[30388f42] Infrastructure Hardening: Repaired CE/Connector DB schema, fixed frontend styling build, implemented robust echo shield in worker v2.1.1, and integrated Lead Engine into gateway.
This commit is contained in:
@@ -1 +1 @@
|
||||
{"task_id": "2ff88f42-8544-8000-8314-c9013414d1d0", "token": "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", "session_start_time": "2026-02-19T08:32:53.260193"}
|
||||
{"task_id": "30388f42-8544-8088-bc48-e59e9b973e91", "token": "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", "readme_path": null, "session_start_time": "2026-03-07T14:07:47.125445"}
|
||||
40
ARCHIVE_legacy_scripts/check_benni.py
Normal file
40
ARCHIVE_legacy_scripts/check_benni.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import sqlite3
|
||||
import os
|
||||
import json
|
||||
|
||||
DB_PATH = "companies_v3_fixed_2.db"
|
||||
|
||||
def check_company_33():
|
||||
if not os.path.exists(DB_PATH):
|
||||
print(f"❌ Database not found at {DB_PATH}")
|
||||
return
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
print(f"🔍 Checking Company ID 33 (Bennis Playland)...")
|
||||
# Check standard fields
|
||||
cursor.execute("SELECT id, name, city, street, zip_code FROM companies WHERE id = 33")
|
||||
row = cursor.fetchone()
|
||||
if row:
|
||||
print(f" Standard: City='{row[2]}', Street='{row[3]}', Zip='{row[4]}'")
|
||||
else:
|
||||
print(" ❌ Company 33 not found in DB.")
|
||||
|
||||
# Check Enrichment
|
||||
cursor.execute("SELECT content FROM enrichment_data WHERE company_id = 33 AND source_type = 'website_scrape'")
|
||||
enrich_row = cursor.fetchone()
|
||||
if enrich_row:
|
||||
data = json.loads(enrich_row[0])
|
||||
imp = data.get("impressum")
|
||||
print(f" Impressum Data: {json.dumps(imp, indent=2) if imp else 'None'}")
|
||||
else:
|
||||
print(" ❌ No website_scrape found for Company 33.")
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
check_company_33()
|
||||
16
ARCHIVE_legacy_scripts/check_erding_openers.py
Normal file
16
ARCHIVE_legacy_scripts/check_erding_openers.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("SELECT name, ai_opener, ai_opener_secondary, industry_ai FROM companies WHERE name LIKE '%Erding%'")
|
||||
row = cursor.fetchone()
|
||||
if row:
|
||||
print(f"Company: {row[0]}")
|
||||
print(f"Industry: {row[3]}")
|
||||
print(f"Opener Primary: {row[1]}")
|
||||
print(f"Opener Secondary: {row[2]}")
|
||||
else:
|
||||
print("Company not found.")
|
||||
conn.close()
|
||||
16
ARCHIVE_legacy_scripts/check_klinikum_erding.py
Normal file
16
ARCHIVE_legacy_scripts/check_klinikum_erding.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("SELECT name, ai_opener, ai_opener_secondary, industry_ai FROM companies WHERE name LIKE '%Klinikum Landkreis Erding%'")
|
||||
row = cursor.fetchone()
|
||||
if row:
|
||||
print(f"Company: {row[0]}")
|
||||
print(f"Industry: {row[3]}")
|
||||
print(f"Opener Primary: {row[1]}")
|
||||
print(f"Opener Secondary: {row[2]}")
|
||||
else:
|
||||
print("Company not found.")
|
||||
conn.close()
|
||||
14
ARCHIVE_legacy_scripts/check_mappings.py
Normal file
14
ARCHIVE_legacy_scripts/check_mappings.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import sqlite3
|
||||
|
||||
def check_mappings():
|
||||
conn = sqlite3.connect('/app/companies_v3_fixed_2.db')
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM job_role_mappings")
|
||||
rows = cursor.fetchall()
|
||||
print("--- Job Role Mappings ---")
|
||||
for row in rows:
|
||||
print(row)
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
check_mappings()
|
||||
25
ARCHIVE_legacy_scripts/check_matrix.py
Normal file
25
ARCHIVE_legacy_scripts/check_matrix.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add the company-explorer directory to the Python path
|
||||
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), 'company-explorer')))
|
||||
|
||||
from backend.database import SessionLocal, MarketingMatrix, Industry, Persona
|
||||
import json
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
count = db.query(MarketingMatrix).count()
|
||||
print(f"MarketingMatrix count: {count}")
|
||||
|
||||
if count > 0:
|
||||
first = db.query(MarketingMatrix).first()
|
||||
print(f"First entry: ID={first.id}, Industry={first.industry_id}, Persona={first.persona_id}")
|
||||
else:
|
||||
print("MarketingMatrix is empty.")
|
||||
# Check if we have industries and personas
|
||||
ind_count = db.query(Industry).count()
|
||||
pers_count = db.query(Persona).count()
|
||||
print(f"Industries: {ind_count}, Personas: {pers_count}")
|
||||
finally:
|
||||
db.close()
|
||||
23
ARCHIVE_legacy_scripts/check_matrix_indoor.py
Normal file
23
ARCHIVE_legacy_scripts/check_matrix_indoor.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
query = """
|
||||
SELECT i.name, p.name, m.subject, m.intro, m.social_proof
|
||||
FROM marketing_matrix m
|
||||
JOIN industries i ON m.industry_id = i.id
|
||||
JOIN personas p ON m.persona_id = p.id
|
||||
WHERE i.name = 'Leisure - Indoor Active'
|
||||
"""
|
||||
|
||||
cursor.execute(query)
|
||||
rows = cursor.fetchall()
|
||||
for row in rows:
|
||||
print(f"Industry: {row[0]} | Persona: {row[1]}")
|
||||
print(f" Subject: {row[2]}")
|
||||
print(f" Intro: {row[3]}")
|
||||
print(f" Social Proof: {row[4]}")
|
||||
print("-" * 50)
|
||||
conn.close()
|
||||
24
ARCHIVE_legacy_scripts/check_matrix_results.py
Normal file
24
ARCHIVE_legacy_scripts/check_matrix_results.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
query = """
|
||||
SELECT i.name, p.name, m.subject, m.intro, m.social_proof
|
||||
FROM marketing_matrix m
|
||||
JOIN industries i ON m.industry_id = i.id
|
||||
JOIN personas p ON m.persona_id = p.id
|
||||
WHERE i.name = 'Healthcare - Hospital'
|
||||
"""
|
||||
|
||||
cursor.execute(query)
|
||||
rows = cursor.fetchall()
|
||||
for row in rows:
|
||||
print(f"Industry: {row[0]} | Persona: {row[1]}")
|
||||
print(f" Subject: {row[2]}")
|
||||
print(f" Intro: {row[3]}")
|
||||
print(f" Social Proof: {row[4]}")
|
||||
print("-" * 50)
|
||||
conn.close()
|
||||
53
ARCHIVE_legacy_scripts/check_silly_billy.py
Normal file
53
ARCHIVE_legacy_scripts/check_silly_billy.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
DB_PATH = "companies_v3_fixed_2.db"
|
||||
|
||||
def check_company():
|
||||
if not os.path.exists(DB_PATH):
|
||||
print(f"❌ Database not found at {DB_PATH}")
|
||||
return
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
print(f"🔍 Searching for 'Silly Billy' in {DB_PATH}...")
|
||||
cursor.execute("SELECT id, name, crm_id, ai_opener, ai_opener_secondary, city, crm_vat, status FROM companies WHERE name LIKE '%Silly Billy%'")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
if not rows:
|
||||
print("❌ No company found matching 'Silly Billy'")
|
||||
else:
|
||||
for row in rows:
|
||||
company_id = row[0]
|
||||
print("\n✅ Company Found:")
|
||||
print(f" ID: {company_id}")
|
||||
print(f" Name: {row[1]}")
|
||||
print(f" CRM ID: {row[2]}")
|
||||
print(f" Status: {row[7]}")
|
||||
print(f" City: {row[5]}")
|
||||
print(f" VAT: {row[6]}")
|
||||
print(f" Opener (Primary): {row[3][:50]}..." if row[3] else " Opener (Primary): None")
|
||||
|
||||
# Check Enrichment Data
|
||||
print(f"\n 🔍 Checking Enrichment Data for ID {company_id}...")
|
||||
cursor.execute("SELECT content FROM enrichment_data WHERE company_id = ? AND source_type = 'website_scrape'", (company_id,))
|
||||
enrich_row = cursor.fetchone()
|
||||
if enrich_row:
|
||||
import json
|
||||
try:
|
||||
data = json.loads(enrich_row[0])
|
||||
imp = data.get("impressum")
|
||||
print(f" Impressum Data in Scrape: {json.dumps(imp, indent=2) if imp else 'None'}")
|
||||
except Exception as e:
|
||||
print(f" ❌ Error parsing JSON: {e}")
|
||||
else:
|
||||
print(" ❌ No website_scrape enrichment data found.")
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"❌ Error reading DB: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
check_company()
|
||||
31
ARCHIVE_legacy_scripts/clear_zombies.py
Normal file
31
ARCHIVE_legacy_scripts/clear_zombies.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import sqlite3
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
DB_PATH = "/app/connector_queue.db"
|
||||
|
||||
def clear_all_zombies():
|
||||
print("🧹 Cleaning up Zombie Jobs (PROCESSING for too long)...")
|
||||
# A job that is PROCESSING for more than 10 minutes is likely dead
|
||||
threshold = (datetime.utcnow() - timedelta(minutes=10)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
# 1. Identify Zombies
|
||||
cursor.execute("SELECT id, updated_at FROM jobs WHERE status = 'PROCESSING' AND updated_at < ?", (threshold,))
|
||||
zombies = cursor.fetchall()
|
||||
|
||||
if not zombies:
|
||||
print("✅ No zombies found.")
|
||||
return
|
||||
|
||||
print(f"🕵️ Found {len(zombies)} zombie jobs.")
|
||||
for zid, updated in zombies:
|
||||
print(f" - Zombie ID {zid} (Last active: {updated})")
|
||||
|
||||
# 2. Kill them
|
||||
cursor.execute("UPDATE jobs SET status = 'FAILED', error_msg = 'Zombie cleared: Process timed out' WHERE status = 'PROCESSING' AND updated_at < ?", (threshold,))
|
||||
print(f"✅ Successfully cleared {cursor.rowcount} zombie(s).")
|
||||
|
||||
if __name__ == "__main__":
|
||||
clear_all_zombies()
|
||||
49
ARCHIVE_legacy_scripts/debug_connector_status.py
Normal file
49
ARCHIVE_legacy_scripts/debug_connector_status.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import sqlite3
|
||||
import json
|
||||
import os
|
||||
|
||||
DB_PATH = "connector_queue.db"
|
||||
|
||||
def inspect_queue():
|
||||
if not os.path.exists(DB_PATH):
|
||||
print(f"❌ Database not found at {DB_PATH}")
|
||||
return
|
||||
|
||||
print(f"🔍 Inspecting Queue: {DB_PATH}")
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get stats
|
||||
cursor.execute("SELECT status, COUNT(*) FROM jobs GROUP BY status")
|
||||
stats = dict(cursor.fetchall())
|
||||
print(f"\n📊 Stats: {stats}")
|
||||
|
||||
# Get recent jobs
|
||||
print("\n📝 Last 10 Jobs:")
|
||||
cursor.execute("SELECT id, event_type, status, error_msg, updated_at, payload FROM jobs ORDER BY updated_at DESC LIMIT 10")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
for row in rows:
|
||||
payload = json.loads(row['payload'])
|
||||
# Try to identify entity
|
||||
entity = "Unknown"
|
||||
if "PrimaryKey" in payload: entity = f"ID {payload['PrimaryKey']}"
|
||||
if "ContactId" in payload: entity = f"Contact {payload['ContactId']}"
|
||||
|
||||
print(f" - Job #{row['id']} [{row['status']}] {row['event_type']} ({entity})")
|
||||
print(f" Updated: {row['updated_at']}")
|
||||
if row['error_msg']:
|
||||
print(f" ❌ ERROR: {row['error_msg']}")
|
||||
|
||||
# Print payload details relevant to syncing
|
||||
if row['status'] == 'COMPLETED':
|
||||
pass # Maybe less interesting if success, but user says it didn't sync
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"❌ Error reading DB: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
inspect_queue()
|
||||
13
ARCHIVE_legacy_scripts/debug_paths.py
Normal file
13
ARCHIVE_legacy_scripts/debug_paths.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import os
|
||||
static_path = "/frontend_static"
|
||||
print(f"Path {static_path} exists: {os.path.exists(static_path)}")
|
||||
if os.path.exists(static_path):
|
||||
for root, dirs, files in os.walk(static_path):
|
||||
for file in files:
|
||||
print(os.path.join(root, file))
|
||||
else:
|
||||
print("Listing /app instead:")
|
||||
for root, dirs, files in os.walk("/app"):
|
||||
if "node_modules" in root: continue
|
||||
for file in files:
|
||||
print(os.path.join(root, file))
|
||||
16
ARCHIVE_legacy_scripts/debug_zombie.py
Normal file
16
ARCHIVE_legacy_scripts/debug_zombie.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
DB_PATH = "/app/connector_queue.db"
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"📊 Accessing database at {DB_PATH}")
|
||||
print("📊 Listing last 20 jobs in database...")
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT id, status, event_type, updated_at FROM jobs ORDER BY id DESC LIMIT 20")
|
||||
rows = cursor.fetchall()
|
||||
for r in rows:
|
||||
print(f" - Job {r['id']}: {r['status']} ({r['event_type']}) - Updated: {r['updated_at']}")
|
||||
|
||||
41
ARCHIVE_legacy_scripts/fix_benni_data.py
Normal file
41
ARCHIVE_legacy_scripts/fix_benni_data.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
import json
|
||||
|
||||
# Setup DB
|
||||
DB_PATH = "sqlite:///companies_v3_fixed_2.db"
|
||||
engine = create_engine(DB_PATH)
|
||||
SessionLocal = sessionmaker(bind=engine)
|
||||
session = SessionLocal()
|
||||
|
||||
from sqlalchemy import Column, Integer, String
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class Company(Base):
|
||||
__tablename__ = "companies"
|
||||
id = Column(Integer, primary_key=True)
|
||||
street = Column(String)
|
||||
zip_code = Column(String)
|
||||
|
||||
def fix_benni():
|
||||
company_id = 33
|
||||
print(f"🔧 Fixing Address for Company ID {company_id}...")
|
||||
|
||||
company = session.query(Company).filter_by(id=company_id).first()
|
||||
if not company:
|
||||
print("❌ Company not found.")
|
||||
return
|
||||
|
||||
# Hardcoded from previous check_benni.py output to be safe/fast
|
||||
# "street": "Eriagstraße 58", "zip": "85053"
|
||||
|
||||
company.street = "Eriagstraße 58"
|
||||
company.zip_code = "85053"
|
||||
|
||||
session.commit()
|
||||
print(f"✅ Database updated: Street='{company.street}', Zip='{company.zip_code}'")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_benni()
|
||||
70
ARCHIVE_legacy_scripts/fix_industry_units.py
Normal file
70
ARCHIVE_legacy_scripts/fix_industry_units.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "companies_v3_fixed_2.db"
|
||||
|
||||
UNIT_MAPPING = {
|
||||
"Logistics - Warehouse": "m²",
|
||||
"Healthcare - Hospital": "Betten",
|
||||
"Infrastructure - Transport": "Passagiere",
|
||||
"Leisure - Indoor Active": "m²",
|
||||
"Retail - Food": "m²",
|
||||
"Retail - Shopping Center": "m²",
|
||||
"Hospitality - Gastronomy": "Sitzplätze",
|
||||
"Leisure - Outdoor Park": "Besucher",
|
||||
"Leisure - Wet & Spa": "Besucher",
|
||||
"Infrastructure - Public": "Kapazität",
|
||||
"Retail - Non-Food": "m²",
|
||||
"Hospitality - Hotel": "Zimmer",
|
||||
"Leisure - Entertainment": "Besucher",
|
||||
"Healthcare - Care Home": "Plätze",
|
||||
"Industry - Manufacturing": "Mitarbeiter",
|
||||
"Energy - Grid & Utilities": "Kunden",
|
||||
"Leisure - Fitness": "Mitglieder",
|
||||
"Corporate - Campus": "Mitarbeiter",
|
||||
"Energy - Solar/Wind": "MWp",
|
||||
"Tech - Data Center": "Racks",
|
||||
"Automotive - Dealer": "Fahrzeuge",
|
||||
"Infrastructure Parking": "Stellplätze",
|
||||
"Reinigungsdienstleister": "Mitarbeiter",
|
||||
"Infrastructure - Communities": "Einwohner"
|
||||
}
|
||||
|
||||
def fix_units():
|
||||
print(f"Connecting to {DB_PATH}...")
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute("SELECT id, name, scraper_search_term, metric_type FROM industries")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
updated_count = 0
|
||||
|
||||
for row in rows:
|
||||
ind_id, name, current_term, m_type = row
|
||||
|
||||
new_term = UNIT_MAPPING.get(name)
|
||||
|
||||
# Fallback Logic
|
||||
if not new_term:
|
||||
if m_type in ["AREA_IN", "AREA_OUT"]:
|
||||
new_term = "m²"
|
||||
else:
|
||||
new_term = "Anzahl" # Generic fallback
|
||||
|
||||
if current_term != new_term:
|
||||
print(f"Updating '{name}': '{current_term}' -> '{new_term}'")
|
||||
cursor.execute("UPDATE industries SET scraper_search_term = ? WHERE id = ?", (new_term, ind_id))
|
||||
updated_count += 1
|
||||
|
||||
conn.commit()
|
||||
print(f"\n✅ Updated {updated_count} industries with correct units.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
conn.rollback()
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_units()
|
||||
23
ARCHIVE_legacy_scripts/fix_mappings_v2.py
Normal file
23
ARCHIVE_legacy_scripts/fix_mappings_v2.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import sqlite3
|
||||
|
||||
def fix_mappings():
|
||||
conn = sqlite3.connect('/app/companies_v3_fixed_2.db')
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Neue Mappings für Geschäftsleitung und Verallgemeinerung
|
||||
new_rules = [
|
||||
('%leitung%', 'Wirtschaftlicher Entscheider'),
|
||||
('%vorstand%', 'Wirtschaftlicher Entscheider'),
|
||||
('%geschäftsleitung%', 'Wirtschaftlicher Entscheider'),
|
||||
('%management%', 'Wirtschaftlicher Entscheider')
|
||||
]
|
||||
|
||||
for pattern, role in new_rules:
|
||||
cursor.execute("INSERT OR REPLACE INTO job_role_mappings (pattern, role, created_at) VALUES (?, ?, '2026-02-22T15:30:00')", (pattern, role))
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
print("Mappings updated for Geschäftsleitung, Vorstand, Management.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_mappings()
|
||||
90
ARCHIVE_legacy_scripts/fix_silly_billy_data.py
Normal file
90
ARCHIVE_legacy_scripts/fix_silly_billy_data.py
Normal file
@@ -0,0 +1,90 @@
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Setup DB
|
||||
DB_PATH = "sqlite:///companies_v3_fixed_2.db"
|
||||
engine = create_engine(DB_PATH)
|
||||
SessionLocal = sessionmaker(bind=engine)
|
||||
session = SessionLocal()
|
||||
|
||||
# Import Models (Simplified for script)
|
||||
from sqlalchemy import Column, Integer, String, Text, JSON
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class Company(Base):
|
||||
__tablename__ = "companies"
|
||||
id = Column(Integer, primary_key=True)
|
||||
name = Column(String)
|
||||
city = Column(String)
|
||||
country = Column(String)
|
||||
crm_vat = Column(String)
|
||||
street = Column(String)
|
||||
zip_code = Column(String)
|
||||
|
||||
class EnrichmentData(Base):
|
||||
__tablename__ = "enrichment_data"
|
||||
id = Column(Integer, primary_key=True)
|
||||
company_id = Column(Integer)
|
||||
source_type = Column(String)
|
||||
content = Column(JSON)
|
||||
|
||||
def fix_data():
|
||||
company_id = 32
|
||||
print(f"🔧 Fixing Data for Company ID {company_id}...")
|
||||
|
||||
company = session.query(Company).filter_by(id=company_id).first()
|
||||
if not company:
|
||||
print("❌ Company not found.")
|
||||
return
|
||||
|
||||
enrichment = session.query(EnrichmentData).filter_by(
|
||||
company_id=company_id, source_type="website_scrape"
|
||||
).first()
|
||||
|
||||
if enrichment and enrichment.content:
|
||||
imp = enrichment.content.get("impressum")
|
||||
if imp:
|
||||
print(f"📄 Found Impressum: {imp}")
|
||||
|
||||
changed = False
|
||||
if imp.get("city"):
|
||||
company.city = imp.get("city")
|
||||
changed = True
|
||||
print(f" -> Set City: {company.city}")
|
||||
|
||||
if imp.get("vat_id"):
|
||||
company.crm_vat = imp.get("vat_id")
|
||||
changed = True
|
||||
print(f" -> Set VAT: {company.crm_vat}")
|
||||
|
||||
if imp.get("country_code"):
|
||||
company.country = imp.get("country_code")
|
||||
changed = True
|
||||
print(f" -> Set Country: {company.country}")
|
||||
|
||||
if imp.get("street"):
|
||||
company.street = imp.get("street")
|
||||
changed = True
|
||||
print(f" -> Set Street: {company.street}")
|
||||
|
||||
if imp.get("zip"):
|
||||
company.zip_code = imp.get("zip")
|
||||
changed = True
|
||||
print(f" -> Set Zip: {company.zip_code}")
|
||||
|
||||
if changed:
|
||||
session.commit()
|
||||
print("✅ Database updated.")
|
||||
else:
|
||||
print("ℹ️ No changes needed.")
|
||||
else:
|
||||
print("⚠️ No impressum data in enrichment.")
|
||||
else:
|
||||
print("⚠️ No enrichment data found.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_data()
|
||||
30
ARCHIVE_legacy_scripts/list_all_companies.py
Normal file
30
ARCHIVE_legacy_scripts/list_all_companies.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
DB_PATH = "companies_v3_fixed_2.db"
|
||||
|
||||
def list_companies():
|
||||
if not os.path.exists(DB_PATH):
|
||||
print(f"❌ Database not found at {DB_PATH}")
|
||||
return
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
print(f"🔍 Listing companies in {DB_PATH}...")
|
||||
cursor.execute("SELECT id, name, crm_id, city, crm_vat FROM companies ORDER BY id DESC LIMIT 20")
|
||||
rows = cursor.fetchall()
|
||||
|
||||
if not rows:
|
||||
print("❌ No companies found")
|
||||
else:
|
||||
for row in rows:
|
||||
print(f" ID: {row[0]} | Name: {row[1]} | CRM ID: {row[2]} | City: {row[3]} | VAT: {row[4]}")
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"❌ Error reading DB: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
list_companies()
|
||||
18
ARCHIVE_legacy_scripts/list_industries.py
Normal file
18
ARCHIVE_legacy_scripts/list_industries.py
Normal file
@@ -0,0 +1,18 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "company-explorer"))
|
||||
from backend.database import SessionLocal, Industry
|
||||
|
||||
def list_industries():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
industries = db.query(Industry.name).all()
|
||||
print("Available Industries:")
|
||||
for (name,) in industries:
|
||||
print(f"- {name}")
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
list_industries()
|
||||
12
ARCHIVE_legacy_scripts/list_industries_db.py
Normal file
12
ARCHIVE_legacy_scripts/list_industries_db.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("SELECT name FROM industries")
|
||||
industries = cursor.fetchall()
|
||||
print("Available Industries:")
|
||||
for ind in industries:
|
||||
print(f"- {ind[0]}")
|
||||
conn.close()
|
||||
120
ARCHIVE_legacy_scripts/market_db_manager.py
Normal file
120
ARCHIVE_legacy_scripts/market_db_manager.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import sqlite3
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
DB_PATH = os.environ.get("DB_PATH", "/app/market_intelligence.db")
|
||||
|
||||
def get_db_connection():
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def init_db():
|
||||
conn = get_db_connection()
|
||||
# Flexible schema: We store almost everything in a 'data' JSON column
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS projects (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
data JSON NOT NULL
|
||||
)
|
||||
''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def save_project(project_data):
|
||||
"""
|
||||
Saves a project. If 'id' exists in data, updates it. Otherwise creates new.
|
||||
"""
|
||||
conn = get_db_connection()
|
||||
try:
|
||||
project_id = project_data.get('id')
|
||||
|
||||
# Extract a name for the list view (e.g. from companyName or referenceUrl)
|
||||
# We assume the frontend passes a 'name' field, or we derive it.
|
||||
name = project_data.get('name') or project_data.get('companyName') or "Untitled Project"
|
||||
|
||||
if not project_id:
|
||||
# Create New
|
||||
project_id = str(uuid.uuid4())
|
||||
project_data['id'] = project_id
|
||||
|
||||
conn.execute(
|
||||
'INSERT INTO projects (id, name, data) VALUES (?, ?, ?)',
|
||||
(project_id, name, json.dumps(project_data))
|
||||
)
|
||||
else:
|
||||
# Update Existing
|
||||
conn.execute(
|
||||
'''UPDATE projects
|
||||
SET name = ?, data = ?, updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ?''',
|
||||
(name, json.dumps(project_data), project_id)
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
return {"id": project_id, "status": "saved"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def get_all_projects():
|
||||
conn = get_db_connection()
|
||||
projects = conn.execute('SELECT id, name, created_at, updated_at FROM projects ORDER BY updated_at DESC').fetchall()
|
||||
conn.close()
|
||||
return [dict(ix) for ix in projects]
|
||||
|
||||
def load_project(project_id):
|
||||
conn = get_db_connection()
|
||||
project = conn.execute('SELECT data FROM projects WHERE id = ?', (project_id,)).fetchone()
|
||||
conn.close()
|
||||
if project:
|
||||
return json.loads(project['data'])
|
||||
return None
|
||||
|
||||
def delete_project(project_id):
|
||||
conn = get_db_connection()
|
||||
try:
|
||||
conn.execute('DELETE FROM projects WHERE id = ?', (project_id,))
|
||||
conn.commit()
|
||||
return {"status": "deleted", "id": project_id}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
# Simple CLI for Node.js bridge
|
||||
# Usage: python market_db_manager.py [init|list|save|load|delete] [args...]
|
||||
|
||||
mode = sys.argv[1]
|
||||
|
||||
if mode == "init":
|
||||
init_db()
|
||||
print(json.dumps({"status": "initialized"}))
|
||||
|
||||
elif mode == "list":
|
||||
print(json.dumps(get_all_projects()))
|
||||
|
||||
elif mode == "save":
|
||||
# Data is passed as a JSON string file path to avoid command line length limits
|
||||
data_file = sys.argv[2]
|
||||
with open(data_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
print(json.dumps(save_project(data)))
|
||||
|
||||
elif mode == "load":
|
||||
p_id = sys.argv[2]
|
||||
result = load_project(p_id)
|
||||
print(json.dumps(result if result else {"error": "Project not found"}))
|
||||
|
||||
elif mode == "delete":
|
||||
p_id = sys.argv[2]
|
||||
print(json.dumps(delete_project(p_id)))
|
||||
29
ARCHIVE_legacy_scripts/migrate_opener_native.py
Normal file
29
ARCHIVE_legacy_scripts/migrate_opener_native.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
print(f"Checking schema in {DB_PATH}...")
|
||||
cursor.execute("PRAGMA table_info(companies)")
|
||||
columns = [row[1] for row in cursor.fetchall()]
|
||||
|
||||
if "ai_opener" in columns:
|
||||
print("Column 'ai_opener' already exists. Skipping.")
|
||||
else:
|
||||
print("Adding column 'ai_opener' to 'companies' table...")
|
||||
cursor.execute("ALTER TABLE companies ADD COLUMN ai_opener TEXT")
|
||||
conn.commit()
|
||||
print("✅ Migration successful.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Migration failed: {e}")
|
||||
finally:
|
||||
if conn: conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
29
ARCHIVE_legacy_scripts/migrate_opener_secondary.py
Normal file
29
ARCHIVE_legacy_scripts/migrate_opener_secondary.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
|
||||
def migrate():
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
print(f"Checking schema in {DB_PATH}...")
|
||||
cursor.execute("PRAGMA table_info(companies)")
|
||||
columns = [row[1] for row in cursor.fetchall()]
|
||||
|
||||
if "ai_opener_secondary" in columns:
|
||||
print("Column 'ai_opener_secondary' already exists. Skipping.")
|
||||
else:
|
||||
print("Adding column 'ai_opener_secondary' to 'companies' table...")
|
||||
cursor.execute("ALTER TABLE companies ADD COLUMN ai_opener_secondary TEXT")
|
||||
conn.commit()
|
||||
print("✅ Migration successful.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Migration failed: {e}")
|
||||
finally:
|
||||
if conn: conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
30
ARCHIVE_legacy_scripts/migrate_personas_v2.py
Normal file
30
ARCHIVE_legacy_scripts/migrate_personas_v2.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
|
||||
def migrate_personas():
|
||||
print(f"Adding new columns to 'personas' table in {DB_PATH}...")
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
columns_to_add = [
|
||||
("description", "TEXT"),
|
||||
("convincing_arguments", "TEXT"),
|
||||
("typical_positions", "TEXT"),
|
||||
("kpis", "TEXT")
|
||||
]
|
||||
|
||||
for col_name, col_type in columns_to_add:
|
||||
try:
|
||||
cursor.execute(f"ALTER TABLE personas ADD COLUMN {col_name} {col_type}")
|
||||
print(f" Added column: {col_name}")
|
||||
except sqlite3.OperationalError:
|
||||
print(f" Column {col_name} already exists.")
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
print("Migration complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate_personas()
|
||||
37
ARCHIVE_legacy_scripts/read_matrix_entry.py
Normal file
37
ARCHIVE_legacy_scripts/read_matrix_entry.py
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "company-explorer"))
|
||||
from backend.database import SessionLocal, Industry, Persona, MarketingMatrix
|
||||
|
||||
def read_specific_entry(industry_name: str, persona_name: str):
|
||||
db = SessionLocal()
|
||||
try:
|
||||
entry = (
|
||||
db.query(MarketingMatrix)
|
||||
.join(Industry)
|
||||
.join(Persona)
|
||||
.filter(Industry.name == industry_name, Persona.name == persona_name)
|
||||
.first()
|
||||
)
|
||||
|
||||
if not entry:
|
||||
print(f"No entry found for {industry_name} and {persona_name}")
|
||||
return
|
||||
|
||||
print("--- Generated Text ---")
|
||||
print(f"Industry: {industry_name}")
|
||||
print(f"Persona: {persona_name}")
|
||||
print("\n[Intro]")
|
||||
print(entry.intro)
|
||||
print("\n[Social Proof]")
|
||||
print(entry.social_proof)
|
||||
print("----------------------")
|
||||
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
read_specific_entry("Healthcare - Hospital", "Infrastruktur-Verantwortlicher")
|
||||
|
||||
|
||||
92
ARCHIVE_legacy_scripts/standalone_importer.py
Normal file
92
ARCHIVE_legacy_scripts/standalone_importer.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import csv
|
||||
from collections import Counter
|
||||
import os
|
||||
import argparse
|
||||
from sqlalchemy import create_engine, Column, Integer, String, Boolean, DateTime
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
# --- Standalone Configuration ---
|
||||
DATABASE_URL = "sqlite:////app/companies_v3_fixed_2.db"
|
||||
LOG_FILE = "/app/Log_from_docker/standalone_importer.log"
|
||||
|
||||
# --- Logging Setup ---
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler(LOG_FILE),
|
||||
logging.StreamHandler()
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- SQLAlchemy Models (simplified, only what's needed) ---
|
||||
Base = declarative_base()
|
||||
|
||||
class RawJobTitle(Base):
|
||||
__tablename__ = 'raw_job_titles'
|
||||
id = Column(Integer, primary_key=True)
|
||||
title = Column(String, unique=True, index=True)
|
||||
count = Column(Integer, default=1)
|
||||
source = Column(String, default="import")
|
||||
is_mapped = Column(Boolean, default=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# --- Database Connection ---
|
||||
engine = create_engine(DATABASE_URL)
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
def import_job_titles_standalone(file_path: str):
|
||||
db = SessionLocal()
|
||||
try:
|
||||
logger.info(f"Starting standalone import of job titles from {file_path}")
|
||||
|
||||
job_title_counts = Counter()
|
||||
total_rows = 0
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
reader = csv.reader(f)
|
||||
for row in reader:
|
||||
if row and row[0].strip():
|
||||
title = row[0].strip()
|
||||
job_title_counts[title] += 1
|
||||
total_rows += 1
|
||||
|
||||
logger.info(f"Read {total_rows} total job title entries. Found {len(job_title_counts)} unique titles.")
|
||||
|
||||
added_count = 0
|
||||
updated_count = 0
|
||||
|
||||
for title, count in job_title_counts.items():
|
||||
existing_title = db.query(RawJobTitle).filter(RawJobTitle.title == title).first()
|
||||
if existing_title:
|
||||
if existing_title.count != count:
|
||||
existing_title.count = count
|
||||
updated_count += 1
|
||||
else:
|
||||
new_title = RawJobTitle(title=title, count=count, source="csv_import", is_mapped=False)
|
||||
db.add(new_title)
|
||||
added_count += 1
|
||||
|
||||
db.commit()
|
||||
logger.info(f"Standalone import complete. Added {added_count} new unique titles, updated {updated_count} existing titles.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during standalone job title import: {e}", exc_info=True)
|
||||
db.rollback()
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Standalone script to import job titles from a CSV file.")
|
||||
parser.add_argument("file_path", type=str, help="Path to the CSV file containing job titles.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Ensure the log directory exists
|
||||
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
|
||||
|
||||
import_job_titles_standalone(args.file_path)
|
||||
22
ARCHIVE_legacy_scripts/test_api_logic.py
Normal file
22
ARCHIVE_legacy_scripts/test_api_logic.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add the company-explorer directory to the Python path
|
||||
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), 'company-explorer')))
|
||||
|
||||
from backend.database import SessionLocal, MarketingMatrix, Industry, Persona
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
query = db.query(MarketingMatrix).options(
|
||||
joinedload(MarketingMatrix.industry),
|
||||
joinedload(MarketingMatrix.persona)
|
||||
)
|
||||
entries = query.all()
|
||||
print(f"Total entries: {len(entries)}")
|
||||
for e in entries[:3]:
|
||||
print(f"ID={e.id}, Industry={e.industry.name if e.industry else 'N/A'}, Persona={e.persona.name if e.persona else 'N/A'}")
|
||||
print(f" Subject: {e.subject}")
|
||||
finally:
|
||||
db.close()
|
||||
91
ARCHIVE_legacy_scripts/test_opener_api.py
Normal file
91
ARCHIVE_legacy_scripts/test_opener_api.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import requests
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
# Load credentials from .env
|
||||
# Simple manual parser to avoid dependency on python-dotenv
|
||||
def load_env(path):
|
||||
if not os.path.exists(path):
|
||||
print(f"Warning: .env file not found at {path}")
|
||||
return
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
if line.strip() and not line.startswith('#'):
|
||||
key, val = line.strip().split('=', 1)
|
||||
os.environ.setdefault(key, val)
|
||||
|
||||
load_env('/app/.env')
|
||||
|
||||
API_USER = os.getenv("API_USER", "admin")
|
||||
API_PASS = os.getenv("API_PASSWORD", "gemini")
|
||||
CE_URL = "http://127.0.0.1:8000" # Target the local container (assuming port 8000 is mapped)
|
||||
TEST_CONTACT_ID = 1 # Therme Erding
|
||||
|
||||
def run_test():
|
||||
print("🚀 STARTING API-LEVEL E2E TEXT GENERATION TEST\n")
|
||||
|
||||
# --- Health Check ---
|
||||
print("Waiting for Company Explorer API to be ready...")
|
||||
for i in range(10):
|
||||
try:
|
||||
health_resp = requests.get(f"{CE_URL}/api/health", auth=(API_USER, API_PASS), timeout=2)
|
||||
if health_resp.status_code == 200:
|
||||
print("✅ API is ready.")
|
||||
break
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
if i == 9:
|
||||
print("❌ API not ready after 20 seconds. Aborting.")
|
||||
return False
|
||||
time.sleep(2)
|
||||
|
||||
scenarios = [
|
||||
{"name": "Infrastructure Role", "job_title": "Facility Manager", "opener_field": "opener", "keyword": "Sicherheit"},
|
||||
{"name": "Operational Role", "job_title": "Leiter Badbetrieb", "opener_field": "opener_secondary", "keyword": "Gäste"}
|
||||
]
|
||||
|
||||
all_passed = True
|
||||
for s in scenarios:
|
||||
print(f"--- Testing: {s['name']} ---")
|
||||
endpoint = f"{CE_URL}/api/provision/superoffice-contact"
|
||||
payload = {
|
||||
"so_contact_id": TEST_CONTACT_ID,
|
||||
"job_title": s['job_title']
|
||||
}
|
||||
|
||||
try:
|
||||
resp = requests.post(endpoint, json=payload, auth=(API_USER, API_PASS))
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
# --- Assertions ---
|
||||
opener = data.get('opener')
|
||||
opener_sec = data.get('opener_secondary')
|
||||
|
||||
assert opener, "❌ FAIL: Primary opener is missing!"
|
||||
print(f" ✅ Primary Opener: '{opener}'")
|
||||
|
||||
assert opener_sec, "❌ FAIL: Secondary opener is missing!"
|
||||
print(f" ✅ Secondary Opener: '{opener_sec}'")
|
||||
|
||||
target_opener_text = data.get(s['opener_field'])
|
||||
assert s['keyword'].lower() in target_opener_text.lower(), f"❌ FAIL: Keyword '{s['keyword']}' not in '{s['opener_field']}'!"
|
||||
print(f" ✅ Keyword '{s['keyword']}' found in correct opener.")
|
||||
|
||||
print(f"--- ✅ PASSED: {s['name']} ---\\n")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ TEST FAILED: {e}")
|
||||
if hasattr(e, 'response') and e.response is not None:
|
||||
print(f" Response: {e.response.text}")
|
||||
all_passed = False
|
||||
|
||||
return all_passed
|
||||
|
||||
if __name__ == "__main__":
|
||||
if run_test():
|
||||
print("🏁 All scenarios passed successfully!")
|
||||
else:
|
||||
print("🔥 Some scenarios failed.")
|
||||
sys.exit(1)
|
||||
12
ARCHIVE_legacy_scripts/test_provisioning_api.py
Normal file
12
ARCHIVE_legacy_scripts/test_provisioning_api.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
url = "http://company-explorer:8000/api/provision/superoffice-contact"
|
||||
payload = {"so_contact_id": 4}
|
||||
auth = ("admin", "gemini")
|
||||
|
||||
try:
|
||||
resp = requests.post(url, json=payload, auth=auth)
|
||||
print(json.dumps(resp.json(), indent=2))
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
@@ -1,6 +1,16 @@
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Ensure we can import from lead-engine
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), 'lead-engine'))
|
||||
try:
|
||||
from trading_twins_ingest import process_leads
|
||||
except ImportError:
|
||||
print("Warning: Could not import trading_twins_ingest from lead-engine. Email ingestion disabled.")
|
||||
process_leads = None
|
||||
|
||||
from company_explorer_connector import handle_company_workflow
|
||||
|
||||
def run_trading_twins_process(target_company_name: str):
|
||||
@@ -46,6 +56,14 @@ def run_trading_twins_process(target_company_name: str):
|
||||
print(f"Trading Twins Analyse für {target_company_name} abgeschlossen.")
|
||||
print(f"{'='*50}\n")
|
||||
|
||||
def run_email_ingest():
|
||||
"""Starts the automated email ingestion process for Tradingtwins leads."""
|
||||
if process_leads:
|
||||
print("\nStarting automated email ingestion via Microsoft Graph...")
|
||||
process_leads()
|
||||
print("Email ingestion completed.")
|
||||
else:
|
||||
print("Error: Email ingestion module not available.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Simulieren der Umgebungsvariablen für diesen Testlauf, falls nicht gesetzt
|
||||
@@ -54,26 +72,28 @@ if __name__ == "__main__":
|
||||
if "COMPANY_EXPLORER_API_PASSWORD" not in os.environ:
|
||||
os.environ["COMPANY_EXPLORER_API_PASSWORD"] = "gemini"
|
||||
|
||||
print("Trading Twins Tool - Main Menu")
|
||||
print("1. Process specific company name")
|
||||
print("2. Ingest leads from Email (info@robo-planet.de)")
|
||||
print("3. Run demo sequence (Robo-Planet, Erding, etc.)")
|
||||
|
||||
choice = input("\nSelect option (1-3): ").strip()
|
||||
|
||||
if choice == "1":
|
||||
name = input("Enter company name: ").strip()
|
||||
if name:
|
||||
run_trading_twins_process(name)
|
||||
elif choice == "2":
|
||||
run_email_ingest()
|
||||
elif choice == "3":
|
||||
# Testfall 1: Ein Unternehmen, das wahrscheinlich bereits existiert
|
||||
# Da 'Robo-Planet GmbH' bei den vorherigen Läufen erstellt wurde, sollte es jetzt gefunden werden.
|
||||
run_trading_twins_process("Robo-Planet GmbH")
|
||||
|
||||
# Kurze Pause zwischen den Testläufen
|
||||
time.sleep(5)
|
||||
|
||||
time.sleep(2)
|
||||
# Testfall 1b: Ein bekanntes, real existierendes Unternehmen
|
||||
run_trading_twins_process("Klinikum Landkreis Erding")
|
||||
|
||||
# Kurze Pause zwischen den Testläufen
|
||||
time.sleep(5)
|
||||
|
||||
time.sleep(2)
|
||||
# Testfall 2: Ein neues, eindeutiges Unternehmen
|
||||
new_unique_company_name = f"Trading Twins New Target {int(time.time())}"
|
||||
run_trading_twins_process(new_unique_company_name)
|
||||
|
||||
# Kurze Pause
|
||||
time.sleep(5)
|
||||
|
||||
# Testfall 3: Ein weiteres neues Unternehmen, um die Erstellung zu prüfen
|
||||
another_new_company_name = f"Another Demo Corp {int(time.time())}"
|
||||
run_trading_twins_process(another_new_company_name)
|
||||
else:
|
||||
print("Invalid choice.")
|
||||
25
ARCHIVE_legacy_scripts/trigger_resync.py
Normal file
25
ARCHIVE_legacy_scripts/trigger_resync.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import sqlite3
|
||||
import json
|
||||
import time
|
||||
|
||||
DB_PATH = "connector_queue.db"
|
||||
|
||||
def trigger_resync(contact_id):
|
||||
print(f"🚀 Triggering manual resync for Contact {contact_id}...")
|
||||
|
||||
payload = {
|
||||
"Event": "contact.changed",
|
||||
"PrimaryKey": contact_id,
|
||||
"ContactId": contact_id,
|
||||
"Changes": ["UserDefinedFields", "Name"] # Dummy changes to pass filters
|
||||
}
|
||||
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
conn.execute(
|
||||
"INSERT INTO jobs (event_type, payload, status) VALUES (?, ?, ?)",
|
||||
("contact.changed", json.dumps(payload), 'PENDING')
|
||||
)
|
||||
print("✅ Job added to queue.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
trigger_resync(6) # Bennis Playland has CRM ID 6
|
||||
13
ARCHIVE_legacy_scripts/verify_db.py
Normal file
13
ARCHIVE_legacy_scripts/verify_db.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import sqlite3
|
||||
|
||||
DB_PATH = "/app/companies_v3_fixed_2.db"
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT name, description, convincing_arguments FROM personas")
|
||||
rows = cursor.fetchall()
|
||||
for row in rows:
|
||||
print(f"Persona: {row[0]}")
|
||||
print(f" Description: {row[1][:100]}...")
|
||||
print(f" Convincing: {row[2][:100]}...")
|
||||
print("-" * 20)
|
||||
conn.close()
|
||||
75
ARCHIVE_vor_migration/Fotograf.de/README.md
Normal file
75
ARCHIVE_vor_migration/Fotograf.de/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Archivierte Fotograf.de Tools
|
||||
|
||||
Dieses Verzeichnis (`ARCHIVE_vor_migration/Fotograf.de/`) enthält zwei archivierte Tools, die zuvor für die Interaktion mit `app.fotograf.de` und die Erstellung von Google Docs Teilnehmerlisten verwendet wurden.
|
||||
|
||||
Beide Tools sind hier isoliert und dokumentiert, um eine spätere Wiederverwendung und Überarbeitung zu erleichtern.
|
||||
|
||||
## 1. Fotograf.de Scraper
|
||||
|
||||
**Verzeichnis:** `./scraper/`
|
||||
|
||||
**Zweck:**
|
||||
Ein Python-basiertes Skript, das die Website `app.fotograf.de` automatisiert besucht, sich anmeldet und in zwei Modi Daten extrahiert:
|
||||
1. **E-Mail-Liste erstellen:** Sammelt Kontaktdaten (Käufer, E-Mail, Kindnamen, Login-URLs) und speichert sie in einer CSV-Datei (`supermailer_fertige_liste.csv`).
|
||||
2. **Statistik auswerten:** Erstellt eine Statistik-CSV-Datei (`job_statistik.csv`) über Album-Käufe.
|
||||
|
||||
**Benötigte Dateien:**
|
||||
* `./scraper/scrape_fotograf.py`: Das Hauptskript mit der gesamten Logik.
|
||||
* `./scraper/fotograf_credentials.json`: **(Manuell zu erstellen!)** Diese Datei muss Ihre Login-Daten für `app.fotograf.de` im folgenden JSON-Format enthalten:
|
||||
```json
|
||||
{
|
||||
"PROFILNAME": {
|
||||
"username": "IHR_BENUTZERNAME",
|
||||
"password": "IHR_PASSWORT"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Ausführung über Docker:**
|
||||
Das Tool wird in einer Docker-Umgebung ausgeführt, die Google Chrome und Selenium bereitstellt.
|
||||
|
||||
1. **Image bauen (einmalig oder bei Änderungen an Dockerfile/requirements.txt):**
|
||||
Navigieren Sie zum Root-Verzeichnis des Hauptprojekts (`/app`) und verwenden Sie das dortige `Dockerfile.brancheneinstufung`:
|
||||
```bash
|
||||
cd /app
|
||||
docker build -f Dockerfile.brancheneinstufung -t fotograf-scraper .
|
||||
```
|
||||
*(Hinweis: Das `Dockerfile.brancheneinstufung` verwendet die globale `requirements.txt` im Root-Verzeichnis, welche `selenium` enthält.)*
|
||||
|
||||
2. **Container starten und Skript ausführen:**
|
||||
Vom Root-Verzeichnis des Hauptprojekts (`/app`) aus:
|
||||
```bash
|
||||
cd /app
|
||||
docker run -it --rm -v "$(pwd):/app" fotograf-scraper python3 /app/ARCHIVE_vor_migration/Fotograf.de/scraper/scrape_fotograf.py
|
||||
```
|
||||
Das Skript fragt Sie interaktiv nach dem gewünschten Modus und der URL des Fotoauftrags.
|
||||
|
||||
|
||||
## 2. Google Docs Teilnehmerlisten-Generator
|
||||
|
||||
**Verzeichnis:** `./list_generator/`
|
||||
|
||||
**Zweck:**
|
||||
Ein Python-Skript, das CSV-Dateien einliest und daraus formatierte Teilnehmerlisten als neues Google Docs-Dokument im Google Drive erstellt. Das Tool ist interaktiv und fragt beim Start Details wie den Namen der Veranstaltung, den Einrichtungstyp und den Ausgabemodus ab.
|
||||
|
||||
**Benötigte Dateien:**
|
||||
* `./list_generator/list_generator.py`: Das Hauptskript mit der gesamten Logik.
|
||||
* `./list_generator/Namensliste.csv`: **(Manuell zu erstellen!)** Eine CSV-Datei mit den Anmeldungen für Kindergärten/Schulen.
|
||||
* `./list_generator/familien.csv`: **(Manuell zu erstellen!)** Eine CSV-Datei mit den Anmeldungen für Familien-Shootings.
|
||||
* `./list_generator/service_account.json`: **(Manuell zu erstellen!)** Die JSON-Datei mit den Anmeldeinformationen für den Google Cloud Service Account. Diese Datei wird benötigt, um auf Google Docs und Google Drive zuzugreifen.
|
||||
|
||||
**Ausführung:**
|
||||
Navigieren Sie in das Verzeichnis des Tools und starten Sie es mit Python:
|
||||
```bash
|
||||
cd /app/ARCHIVE_vor_migration/Fotograf.de/list_generator/
|
||||
python3 list_generator.py
|
||||
```
|
||||
*(Stellen Sie sicher, dass alle benötigten Python-Bibliotheken wie `google-api-python-client` etc. in Ihrer Umgebung installiert sind. Diese sind vermutlich über die globale `requirements.txt` im Root-Verzeichnis des Hauptprojekts verfügbar.)*
|
||||
|
||||
## Wichtiger Hinweis zu Credentials (Sicherheit)
|
||||
|
||||
Die Tools verwenden `fotograf_credentials.json` und `service_account.json` zur Authentifizierung. Diese Dateien enthalten sensitive Zugangsdaten und wurden **bewusst aus der Git-Historie entfernt** und nicht im Repository abgelegt.
|
||||
|
||||
**Für die Wiederinbetriebnahme müssen diese Dateien manuell im jeweiligen Tool-Verzeichnis (`./scraper/` bzw. `./list_generator/`) erstellt und mit den korrekten Zugangsdaten befüllt werden.**
|
||||
|
||||
**Priorität für die Überarbeitung:** Bei einer zukünftigen Überarbeitung dieser Tools ist es **zwingend erforderlich**, die Handhabung der Credentials zu verbessern. Statt fester JSON-Dateien sollten Umgebungsvariablen (`.env`) oder ein sicherer Secret Management Service verwendet werden, um die Sicherheitsstandards zu erhöhen.
|
||||
22
ARCHIVE_vor_migration/Fotograf.de/list_generator/README.md
Normal file
22
ARCHIVE_vor_migration/Fotograf.de/list_generator/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Google Docs Teilnehmerlisten-Generator (Archiviert)
|
||||
|
||||
Dieses Verzeichnis enthält die archivierten Dateien für den "Google Docs Teilnehmerlisten-Generator".
|
||||
|
||||
**Zweck:**
|
||||
Ein Python-Skript, das CSV-Dateien einliest und daraus formatierte Teilnehmerlisten als neues Google Docs-Dokument im Google Drive erstellt. Das Tool ist interaktiv und fragt beim Start Details wie den Namen der Veranstaltung ab.
|
||||
|
||||
**Zugehörige Dateien in diesem Ordner:**
|
||||
* `list_generator.py`: Das Hauptskript mit der gesamten Logik.
|
||||
|
||||
**Manuell zu erstellende Dateien:**
|
||||
Diese Dateien werden vom Skript als Input benötigt und müssen im selben Verzeichnis liegen:
|
||||
* `Namensliste.csv`: Eine CSV-Datei mit den Anmeldungen für Kindergärten/Schulen.
|
||||
* `familien.csv`: Eine CSV-Datei mit den Anmeldungen für Familien-Shootings.
|
||||
* `service_account.json`: Die JSON-Datei mit den Anmeldeinformationen für den Google Cloud Service Account, der die Berechtigung hat, auf Google Docs und Google Drive zuzugreifen.
|
||||
|
||||
**Hinweis zur Ausführung:**
|
||||
Das Skript wird direkt mit Python ausgeführt, z.B.:
|
||||
```bash
|
||||
python3 list_generator.py
|
||||
```
|
||||
Stellen Sie sicher, dass alle benötigten Bibliotheken (wie `google-api-python-client`, `google-auth-httplib2`, `google-auth-oauthlib`) in Ihrer Python-Umgebung installiert sind. Diese sind vermutlich in der globalen `requirements.txt` im Root-Verzeichnis des Projekts enthalten.
|
||||
32
ARCHIVE_vor_migration/Fotograf.de/scraper/README.md
Normal file
32
ARCHIVE_vor_migration/Fotograf.de/scraper/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Fotograf.de Scraper (Archiviert)
|
||||
|
||||
Dieses Verzeichnis enthält die archivierten Dateien für den "Fotograf.de Scraper".
|
||||
|
||||
**Zweck:**
|
||||
Ein Python-basiertes Tool, das die Website `app.fotograf.de` automatisiert besucht, sich anmeldet und in zwei Modi Daten extrahiert:
|
||||
1. **E-Mail-Liste erstellen:** Sammelt Kontaktdaten und speichert sie in einer CSV-Datei (`supermailer_fertige_liste.csv`).
|
||||
2. **Statistik auswerten:** Erstellt eine Statistik-CSV-Datei (`job_statistik.csv`).
|
||||
|
||||
**Zugehörige Dateien in diesem Ordner:**
|
||||
* `scrape_fotograf.py`: Das Hauptskript mit der gesamten Logik.
|
||||
|
||||
**Manuell zu erstellende Dateien:**
|
||||
* `fotograf_credentials.json`: Diese Datei wird vom Skript benötigt und muss die Login-Daten für `app.fotograf.de` im folgenden JSON-Format enthalten:
|
||||
```json
|
||||
{
|
||||
"PROFILNAME": {
|
||||
"username": "IHR_BENUTZERNAME",
|
||||
"password": "IHR_PASSWORT"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Externe Abhängigkeiten (befinden sich im Hauptverzeichnis des Projekts):**
|
||||
* **Dockerfile:** `Dockerfile.brancheneinstufung` wurde wahrscheinlich verwendet, um ein Docker-Image für dieses Tool zu erstellen. Es installiert Google Chrome und die notwendigen Python-Pakete.
|
||||
* **Python-Abhängigkeiten:** Die globale `requirements.txt` im Root-Verzeichnis enthält `selenium` und andere benötigte Bibliotheken.
|
||||
|
||||
**Beispielhafter `docker run`-Befehl:**
|
||||
1. Bauen Sie das Image (nur einmalig): `docker build -f Dockerfile.brancheneinstufung -t fotograf-scraper .`
|
||||
2. Führen Sie den Container aus: `docker run -it --rm -v "$(pwd):/app" fotograf-scraper python3 /app/ARCHIVE_vor_migration/Fotograf.de/scraper/scrape_fotograf.py`
|
||||
|
||||
(Pfade müssen ggf. angepasst werden, je nachdem, von wo der Befehl ausgeführt wird.)
|
||||
0
ARCHIVE_vor_migration/Generating
Normal file
0
ARCHIVE_vor_migration/Generating
Normal file
BIN
FRITZbox7530.pdf
BIN
FRITZbox7530.pdf
Binary file not shown.
265
GEMINI.md
265
GEMINI.md
@@ -20,6 +20,41 @@ Dies ist in der Vergangenheit mehrfach passiert und hat zu massivem Datenverlust
|
||||
- **Git-Repository:** Dieses Projekt wird über ein Git-Repository verwaltet. Alle Änderungen am Code werden versioniert. Beachten Sie den Abschnitt "Git Workflow & Conventions" für unsere Arbeitsregeln.
|
||||
- **WICHTIG:** Der AI-Agent kann Änderungen committen, aber aus Sicherheitsgründen oft nicht `git push` ausführen. Bitte führen Sie `git push` manuell aus, wenn der Agent dies meldet.
|
||||
|
||||
---
|
||||
## ‼️ Aktueller Projekt-Fokus (März 2026): Migration & Stabilisierung
|
||||
|
||||
**Das System wurde am 07. März 2026 vollständig stabilisiert und für den Umzug auf die Ubuntu VM (`docker1`) vorbereitet.**
|
||||
|
||||
Alle aktuellen Aufgaben für den Umzug sind hier zentralisiert:
|
||||
➡️ **[`RELOCATION.md`](./RELOCATION.md)**
|
||||
|
||||
---
|
||||
|
||||
## ✅ Current Status (March 7, 2026) - STABLE
|
||||
|
||||
Das System läuft stabil auf der Synology-Entwicklungsumgebung.
|
||||
|
||||
### 1. SuperOffice Connector (v2.1.1 - "Echo Shield")
|
||||
* **Echo-Prävention (Härtung):** Der Worker (`worker.py`) identifiziert sich beim Start dynamisch (`/Associate/Me`) und ignoriert strikt alle Events, die vom eigenen User (z.B. ID 528) ausgelöst wurden.
|
||||
* **Feld-Filter:** Änderungen werden nur verarbeitet, wenn relevante Felder (Name, URL, JobTitle) betroffen sind. Irrelevante Updates (z.B. `lastUpdated`) werden geskippt.
|
||||
* **Webhook:** Registriert auf `https://floke-ai.duckdns.org/connector/webhook` mit Token-Validierung im Query-String.
|
||||
|
||||
### 2. Company Explorer (v0.7.4)
|
||||
* **Datenbank:** Schema repariert (`fix_missing_columns.py` ausgeführt). Fehlende Spalten (`street`, `zip_code`, `unsubscribe_token`) sind nun vorhanden.
|
||||
* **Frontend:** Build-Pipeline repariert. PostCSS/Tailwind generieren jetzt wieder korrektes Styling.
|
||||
* **Persistence:** Datenbank liegt sicher im Docker Volume `explorer_db_data`.
|
||||
|
||||
### 3. Lead Engine (Trading Twins)
|
||||
* **Integration:** In `docker-compose.yml` integriert und unter `/lead/` via Gateway erreichbar.
|
||||
* **Persistence:** Nutzt Volume `lead_engine_data`.
|
||||
* **Status:** UI läuft. E-Mail-Ingest via MS Graph benötigt noch Credentials.
|
||||
|
||||
### 4. Infrastructure
|
||||
* **Secrets:** Alle API-Keys (OpenAI, Gemini, SO, DuckDNS) sind zentral in der `.env` Datei.
|
||||
* **DuckDNS:** Service läuft und aktualisiert die IP erfolgreich.
|
||||
|
||||
---
|
||||
|
||||
## Git Workflow & Conventions
|
||||
|
||||
### Den Arbeitstag abschließen mit `#fertig`
|
||||
@@ -37,6 +72,38 @@ Wenn Sie `#fertig` eingeben, führt der Agent folgende Schritte aus:
|
||||
- Der Status des Tasks in Notion wird auf "Done" (oder einen anderen passenden Status) gesetzt.
|
||||
4. **Commit & Push:** Wenn Code-Änderungen vorhanden sind, wird ein Commit erstellt und ein `git push` interaktiv angefragt.
|
||||
|
||||
### ⚠️ Troubleshooting: Git `push`/`pull` Fehler in Docker-Containern
|
||||
|
||||
Gelegentlich kann es vorkommen, dass `git push` oder `git pull` Befehle aus dem `gemini-session` Docker-Container heraus mit Fehlern wie `Could not resolve host` oder `Failed to connect to <Gitea-Domain>` fehlschlagen, selbst wenn die externe Gitea-URL (z.B. `floke-gitea.duckdns.org`) im Host-System erreichbar ist. Dies liegt daran, dass der Docker-Container möglicherweise nicht dieselben DNS-Auflösungsmechanismen oder eine direkte Verbindung zur externen Adresse hat.
|
||||
|
||||
**Problem:** Standard-DNS-Auflösung und externe Hostnamen schlagen innerhalb des Docker-Containers fehl.
|
||||
|
||||
**Lösung:** Um eine robuste und direkte Verbindung zum Gitea-Container auf dem *selben Docker-Host* herzustellen, sollte die Git Remote URL auf die **lokale IP-Adresse des Docker-Hosts** und die **token-basierte Authentifizierung** umgestellt werden.
|
||||
|
||||
**Schritte zur Konfiguration:**
|
||||
|
||||
1. **Lokale IP des Docker-Hosts ermitteln:**
|
||||
* Finden Sie die lokale IP-Adresse des Servers (z.B. Ihrer Diskstation), auf dem die Docker-Container laufen. Beispiel: `192.168.178.6`.
|
||||
2. **Gitea-Token aus `.env` ermitteln:**
|
||||
* Finden Sie das Gitea-Token (das im Format `<Token>` in der `.env`-Datei oder in der vorherigen `git remote -v` Ausgabe zu finden ist). Beispiel: `318c736205934dd066b6bbcb1d732931eaa7c8c4`.
|
||||
3. **Git Remote URL aktualisieren:**
|
||||
* Verwenden Sie den folgenden Befehl, um die Remote-URL zu aktualisieren. Ersetzen Sie `<Username>`, `<Token>` und `<Local-IP-Adresse>` durch Ihre Werte.
|
||||
```bash
|
||||
git remote set-url origin http://<Username>:<Token>@<Local-IP-Adresse>:3000/Floke/Brancheneinstufung2.git
|
||||
```
|
||||
* **Beispiel (mit Ihren Daten):**
|
||||
```bash
|
||||
git remote set-url origin http://Floke:318c736205934dd066b6bbcb1d732931eaa7c8c4@192.168.178.6:3000/Floke/Brancheneinstufung2.git
|
||||
```
|
||||
*(Hinweis: Für die interne Docker-Kommunikation ist `http` anstelle von `https` oft ausreichend und kann Probleme mit SSL-Zertifikaten vermeiden.)*
|
||||
4. **Verifizierung:**
|
||||
* Führen Sie `git fetch` aus, um die neue Konfiguration zu testen. Es sollte nun ohne Passwortabfrage funktionieren:
|
||||
```bash
|
||||
git fetch
|
||||
```
|
||||
|
||||
Diese Konfiguration gewährleistet eine stabile Git-Verbindung innerhalb Ihrer Docker-Umgebung.
|
||||
|
||||
|
||||
## Project Overview
|
||||
|
||||
@@ -105,6 +172,17 @@ The system architecture has evolved from a CLI-based toolset to a modern web app
|
||||
* **Problem:** Users didn't see when a background job finished.
|
||||
* **Solution:** Implementing a polling mechanism (`setInterval`) tied to a `isProcessing` state is superior to static timeouts for long-running AI tasks.
|
||||
|
||||
7. **Hyper-Personalized Marketing Engine (v3.2) - "Deep Persona Injection":**
|
||||
* **Problem:** Marketing texts were too generic and didn't reflect the specific psychological or operative profile of the different target roles (e.g., CFO vs. Facility Manager).
|
||||
* **Solution (Deep Sync & Prompt Hardening):**
|
||||
1. **Extended Schema:** Added `description`, `convincing_arguments`, and `kpis` to the `Persona` database model to store richer profile data.
|
||||
2. **Notion Master Sync:** Updated the synchronization logic to pull these deep insights directly from the Notion "Personas / Roles" database.
|
||||
3. **Role-Centric Prompts:** The `MarketingMatrix` generator was re-engineered to inject the persona's "Mindset" and "KPIs" into the prompt.
|
||||
* **Example (Healthcare):**
|
||||
- **Infrastructure Lead:** Focuses now on "IT Security", "DSGVO Compliance", and "WLAN integration".
|
||||
- **Economic Buyer (CFO):** Focuses on "ROI Amortization", "Reduction of Overtime", and "Flexible Financing (RaaS)".
|
||||
* **Verification:** Verified that the transition from a company-specific **Opener** (e.g., observing staff shortages at Klinikum Erding) to the **Role-specific Intro** (e.g., pitching transport robots to reduce walking distances for nursing directors) is seamless and logical.
|
||||
|
||||
## Metric Parser - Regression Tests
|
||||
To ensure the stability and accuracy of the metric extraction logic, a dedicated test suite (`/company-explorer/backend/tests/test_metric_parser.py`) has been created. It covers the following critical, real-world bug fixes:
|
||||
|
||||
@@ -122,8 +200,185 @@ To ensure the stability and accuracy of the metric extraction logic, a dedicated
|
||||
|
||||
These tests are crucial for preventing regressions as the parser logic evolves.
|
||||
|
||||
## Next Steps
|
||||
* **Marketing Automation:** Implement the actual sending logic (or export) based on the contact status.
|
||||
* **Job Role Mapping Engine:** Connect the configured patterns to the contact import/creation process to auto-assign roles.
|
||||
* **Industry Classification Engine:** Connect the configured industries to the AI Analysis prompt to enforce the "Strict Mode" mapping.
|
||||
* **Export:** Generate Excel/CSV enriched reports (already partially implemented via JSON export).
|
||||
## Notion Maintenance & Data Sync
|
||||
|
||||
Since the "Golden Record" for Industry Verticals (Pains, Gains, Products) resides in Notion, specific tools are available to read and sync this data.
|
||||
|
||||
**Location:** `/app/company-explorer/backend/scripts/notion_maintenance/`
|
||||
|
||||
**Prerequisites:**
|
||||
- Ensure `.env` is loaded with `NOTION_API_KEY` and correct DB IDs.
|
||||
|
||||
**Key Scripts:**
|
||||
|
||||
1. **`check_relations.py` (Reader - Deep):**
|
||||
- **Purpose:** Reads Verticals and resolves linked Product Categories (Relation IDs -> Names). Essential for verifying the "Primary/Secondary Product" logic.
|
||||
- **Usage:** `python3 check_relations.py`
|
||||
|
||||
2. **`update_notion_full.py` (Writer - Batch):**
|
||||
- **Purpose:** Batch updates Pains and Gains for multiple verticals. Use this as a template when refining the messaging strategy.
|
||||
- **Usage:** Edit the dictionary in the script, then run `python3 update_notion_full.py`.
|
||||
|
||||
3. **`list_notion_structure.py` (Schema Discovery):**
|
||||
- **Purpose:** Lists all property keys and page titles. Use this to debug schema changes (e.g. if a column was renamed).
|
||||
- **Usage:** `python3 list_notion_structure.py`
|
||||
|
||||
## Next Steps (Updated Feb 27, 2026)
|
||||
|
||||
***HINWEIS:*** *Dieser Abschnitt ist veraltet. Die aktuellen nächsten Schritte beziehen sich auf die Migrations-Vorbereitung und sind in der Datei [`RELOCATION.md`](./RELOCATION.md) dokumentiert.*
|
||||
|
||||
* **Notion Content:** Finalize "Pains" and "Gains" for all 25 verticals in the Notion master database.
|
||||
* **Intelligence:** Run `generate_matrix.py` in the Company Explorer backend to populate the matrix for all new English vertical names.
|
||||
* **Automation:** Register the production webhook (requires `admin-webhooks` rights) to enable real-time CRM sync without manual job injection.
|
||||
* **Execution:** Connect the "Sending Engine" (the actual email dispatch logic) to the SuperOffice fields.
|
||||
* **Monitoring:** Monitor the 'Atomic PATCH' logs in production for any 400 errors regarding field length or specific character sets.
|
||||
|
||||
|
||||
## Company Explorer Access & Debugging
|
||||
|
||||
The **Company Explorer** is the central intelligence engine.
|
||||
|
||||
**Core Paths:**
|
||||
* **Database:** `/app/companies_v3_fixed_2.db` (SQLite)
|
||||
* **Backend Code:** `/app/company-explorer/backend/`
|
||||
* **Logs:** `/app/logs_debug/company_explorer_debug.log`
|
||||
|
||||
**Accessing Data:**
|
||||
To inspect live data without starting the full stack, use `sqlite3` directly or the helper scripts (if environment permits).
|
||||
|
||||
* **Direct SQL:** `sqlite3 /app/companies_v3_fixed_2.db "SELECT * FROM companies WHERE name LIKE '%Firma%';" `
|
||||
* **Python (requires env):** The app runs in a Docker container. When debugging from outside (CLI agent), Python dependencies like `sqlalchemy` might be missing in the global scope. Prefer `sqlite3` for quick checks.
|
||||
|
||||
**Key Endpoints (Internal API :8000):**
|
||||
* `POST /api/provision/superoffice-contact`: Triggers the text generation logic.
|
||||
* `GET /api/companies/{id}`: Full company profile including enrichment data.
|
||||
|
||||
**Troubleshooting:**
|
||||
* **"BaseModel" Error:** Usually a mix-up between Pydantic and SQLAlchemy `Base`. Check imports in `database.py`.
|
||||
* **Missing Dependencies:** The CLI agent runs in `/app` but not necessarily inside the container's venv. Use standard tools (`grep`, `sqlite3`) where possible.
|
||||
|
||||
---
|
||||
|
||||
## Critical Debugging Session (Feb 21, 2026) - Re-Stabilizing the Analysis Engine
|
||||
|
||||
A critical session was required to fix a series of cascading failures in the `ClassificationService`. The key takeaways are documented here to prevent future issues.
|
||||
|
||||
1. **The "Phantom" `NameError`:**
|
||||
* **Symptom:** The application crashed with a `NameError: name 'joinedload' is not defined`, even though the import was correctly added to `classification.py`.
|
||||
* **Root Cause:** The `uvicorn` server's hot-reload mechanism within the Docker container did not reliably pick up file changes made from outside the container. A simple `docker-compose restart` was insufficient to clear the process's cached state.
|
||||
* **Solution:** After any significant code change, especially to imports or core logic, a forced-recreation of the container is **mandatory**.
|
||||
```bash
|
||||
# Correct Way to Apply Changes:
|
||||
docker-compose up -d --build --force-recreate company-explorer
|
||||
```
|
||||
|
||||
2. **The "Invisible" Logs:**
|
||||
* **Symptom:** No debug logs were being written, making it impossible to trace the execution flow.
|
||||
* **Root Cause:** The `LOG_DIR` path in `/company-explorer/backend/config.py` was misconfigured (`/app/logs_debug`) and did not point to the actual, historical log directory (`/app/Log_from_docker`).
|
||||
* **Solution:** Configuration paths must be treated as absolute and verified. Correcting the `LOG_DIR` path immediately resolved the issue.
|
||||
|
||||
3. **Inefficient Debugging Loop:**
|
||||
* **Symptom:** The cycle of triggering a background job via API, waiting, and then manually checking logs was slow and inefficient.
|
||||
* **Root Cause:** Lack of a tool to test the core application logic in isolation.
|
||||
* **Solution:** The creation of a dedicated, interactive test script (`/company-explorer/backend/scripts/debug_single_company.py`). This script allows running the entire analysis for a single company in the foreground, providing immediate and detailed feedback. This pattern is invaluable for complex, multi-step processes and should be a standard for future development.
|
||||
## Production Migration & Multi-Campaign Support (Feb 27, 2026)
|
||||
|
||||
The system has been fully migrated to the SuperOffice production environment (`online3.superoffice.com`, tenant `Cust26720`).
|
||||
|
||||
### 1. Final UDF Mappings (Production)
|
||||
These ProgIDs are verified and active for the production tenant:
|
||||
|
||||
| Field Purpose | Entity | ProgID | Notes |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **MA Subject** | Person | `SuperOffice:19` | |
|
||||
| **MA Intro** | Person | `SuperOffice:20` | |
|
||||
| **MA Social Proof** | Person | `SuperOffice:21` | |
|
||||
| **MA Unsubscribe** | Person | `SuperOffice:22` | URL format |
|
||||
| **MA Campaign** | Person | `SuperOffice:23` | List field (uses `:DisplayText`) |
|
||||
| **Vertical** | Contact | `SuperOffice:83` | List field (mapped via JSON) |
|
||||
| **AI Summary** | Contact | `SuperOffice:84` | Truncated to 132 chars |
|
||||
| **AI Last Update** | Contact | `SuperOffice:85` | Format: `[D:MM/DD/YYYY HH:MM:SS]` |
|
||||
| **Opener Primary** | Contact | `SuperOffice:86` | |
|
||||
| **Opener Secondary**| Contact | `SuperOffice:87` | |
|
||||
| **Last Outreach** | Contact | `SuperOffice:88` | |
|
||||
|
||||
### 2. Vertical ID Mapping (Production)
|
||||
The full list of 25 verticals with their internal SuperOffice IDs (List `udlist331`):
|
||||
`Automotive - Dealer: 1613, Corporate - Campus: 1614, Energy - Grid & Utilities: 1615, Energy - Solar/Wind: 1616, Healthcare - Care Home: 1617, Healthcare - Hospital: 1618, Hospitality - Gastronomy: 1619, Hospitality - Hotel: 1620, Industry - Manufacturing: 1621, Infrastructure - Communities: 1622, Infrastructure - Public: 1623, Infrastructure - Transport: 1624, Infrastructure - Parking: 1625, Leisure - Entertainment: 1626, Leisure - Fitness: 1627, Leisure - Indoor Active: 1628, Leisure - Outdoor Park: 1629, Leisure - Wet & Spa: 1630, Logistics - Warehouse: 1631, Others: 1632, Reinigungsdienstleister: 1633, Retail - Food: 1634, Retail - Non-Food: 1635, Retail - Shopping Center: 1636, Tech - Data Center: 1637`.
|
||||
|
||||
### 3. Technical Lessons Learned (SO REST API)
|
||||
|
||||
1. **Atomic PATCH (Stability):** Bundling all contact updates into a single `PATCH` request to the `/Contact/{id}` endpoint is far more stable than sequential UDF updates. If one field fails (e.g. invalid property), the whole transaction might roll back or partially fail—proactive validation is key.
|
||||
2. **Website Sync (`Urls` Array):** Updating the website via REST requires manipulating the `Urls` array property. Simple field assignment to `UrlAddress` fails during `PATCH`.
|
||||
* *Correct Format:* `"Urls": [{"Value": "https://example.com", "Description": "AI Discovered"}]`.
|
||||
3. **List Resolution (`:DisplayText`):** To get the clean string value of a list field (like Campaign Name) without extra API calls, use the pseudo-field `ProgID:DisplayText` in the `$select` parameter.
|
||||
4. **Field Length Limits:** Standard SuperOffice text UDFs are limited to approx. 140-254 characters. AI-generated summaries must be truncated (e.g. 132 chars) to avoid 400 Bad Request errors.
|
||||
5. **Docker `env_file` Importance:** For production, mapping individual variables in `docker-compose.yml` is error-prone. Using `env_file: .env` ensures all services stay synchronized with the latest UDF IDs and mappings.
|
||||
6. **Production URL Schema:** The production API is strictly hosted on `online3.superoffice.com` (for this tenant), while OAuth remains at `online.superoffice.com`.
|
||||
|
||||
### 4. Campaign Trigger Logic
|
||||
The `worker.py` (v1.8) now extracts the `campaign_tag` from `SuperOffice:23:DisplayText`. This tag is passed to the Company Explorer's provisioning API. If a matching entry exists in the `MarketingMatrix` for that tag, specific texts are used; otherwise, it falls back to the "standard" Kaltakquise texts.
|
||||
|
||||
### 5. SuperOffice Authentication (Critical Update Feb 28, 2026)
|
||||
|
||||
**Problem:** Authentication failures ("Invalid refresh token" or "Invalid client_id") occurred because standard `load_dotenv()` did not override stale environment variables present in the shell process.
|
||||
|
||||
**Solution:** Always use `load_dotenv(override=True)` in Python scripts to force loading the actual values from the `.env` file.
|
||||
|
||||
**Correct Authentication Pattern (Python):**
|
||||
```python
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
# CRITICAL: override=True ensures we read from .env even if env vars are already set
|
||||
load_dotenv(override=True)
|
||||
|
||||
client_id = os.getenv("SO_CLIENT_ID")
|
||||
# ...
|
||||
```
|
||||
|
||||
**Known Working Config (Production):**
|
||||
* **Environment:** `online3`
|
||||
* **Tenant:** `Cust26720`
|
||||
* **Token Logic:** The `AuthHandler` implementation in `health_check_so.py` is the reference standard. Avoid using legacy `superoffice_client.py` without verifying it uses `override=True`.
|
||||
|
||||
### 6. Sales & Opportunities (Roboplanet Specifics)
|
||||
|
||||
When creating sales via API, specific constraints apply due to the shared tenant with Wackler:
|
||||
|
||||
* **SaleTypeId:** MUST be **14** (`GE:"Roboplanet Verkauf";`) to ensure the sale is assigned to the correct business unit.
|
||||
* *Alternative:* ID 16 (`GE:"Roboplanet Teststellung";`) for trials.
|
||||
* **Mandatory Fields:**
|
||||
* `Saledate` (Estimated Date): Must be provided in ISO format (e.g., `YYYY-MM-DDTHH:MM:SSZ`).
|
||||
* `Person`: Highly recommended linking to a specific person, not just the company.
|
||||
* **Context:** Avoid creating sales on the parent company "Wackler Service Group" (ID 3). Always target the specific lead company.
|
||||
|
||||
### Analyse der SuperOffice `Sale`-Entität (März 2026)
|
||||
|
||||
- **Ziel:** Erstellung eines Reports, der abbildet, welche Kunden welche Produkte angeboten bekommen oder gekauft haben. Die initiale Vermutung war, dass Produktinformationen oft als Freitext-Einträge und nicht über den offiziellen Produktkatalog erfasst werden.
|
||||
- **Problem:** Die Untersuchung der Datenstruktur zeigte, dass die API-Endpunkte zur Abfrage von `Quote`-Objekten (Angeboten) und `QuoteLines` (Angebotspositionen) über `Sale`-, `Contact`- oder `Project`-Beziehungen hinweg nicht zuverlässig funktionierten. Viele Abfragen resultierten in `500 Internal Server Errors` oder leeren Datenmengen, was eine direkte Verknüpfung von Verkauf zu Produkt unmöglich machte.
|
||||
- **Kern-Erkenntnis (Datenstruktur):**
|
||||
1. **Freitext statt strukturierter Daten:** Die Analyse eines konkreten `Sale`-Objekts (ID `342243`) bestätigte die ursprüngliche Hypothese. Produktinformationen (z.B. `2xOmnie CD-01 mit Nachlass`) werden direkt in das `Heading`-Feld (Betreff) des `Sale`-Objekts als Freitext eingetragen. Es existieren oft keine verknüpften `Quote`- oder `QuoteLine`-Entitäten.
|
||||
2. **Datenqualität bei Verknüpfungen:** Eine signifikante Anzahl von `Sale`-Objekten im System weist keine Verknüpfung zu einem `Contact`-Objekt auf (`Contact: null`). Dies erschwert die automatische Zuordnung von Verkäufen zu Kunden erheblich.
|
||||
- **Nächster Schritt / Lösungsweg:** Ein Skript (`/app/connector-superoffice/generate_customer_product_report.py`) wurde entwickelt, das diese Probleme adressiert. Es fragt gezielt nur `Sale`-Objekte ab, die eine gültige `Contact`-Verknüpfung besitzen (`$filter=Contact ne null`). Anschließend extrahiert es den Kundennamen und das `Heading`-Feld des Verkaufs und durchsucht letzteres nach vordefinierten Produkt-Schlüsselwörtern. Die Ergebnisse werden für die manuelle Analyse in einer CSV-Datei (`product_report.csv`) gespeichert. Dieser Ansatz ist der einzig verlässliche Weg, um die gewünschten Informationen aus dem System zu extrahieren.
|
||||
|
||||
### 7. Service & Tickets (Anfragen)
|
||||
|
||||
SuperOffice Tickets represent the support and request system. Like Sales, they are organized to allow separation between Roboplanet and Wackler.
|
||||
|
||||
* **Entity Name:** `ticket`
|
||||
* **Roboplanet Specific Categories (CategoryId):**
|
||||
* **ID 46:** `GE:"Lead Roboplanet";`
|
||||
* **ID 47:** `GE:"Vertriebspartner Roboplanet";`
|
||||
* **ID 48:** `GE:"Weitergabe Roboplanet";`
|
||||
* **Hierarchical:** `Roboplanet/Support` (often used for technical issues).
|
||||
* **Key Fields:**
|
||||
* `ticketId`: Internal ID.
|
||||
* `title`: The subject of the request.
|
||||
* `contactId` / `personId`: Links to company and contact person.
|
||||
* `ticketStatusId`: 1 (Unbearbeitet), 2 (In Arbeit), 3 (Bearbeitet).
|
||||
* `ownedBy`: Often "ROBO" for Roboplanet staff.
|
||||
* **Cross-Links:** Tickets can be linked to `saleId` (to track support during a sale) or `projectId`.
|
||||
|
||||
---
|
||||
This is the core logic used to generate the company-specific opener.
|
||||
@@ -1,385 +0,0 @@
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
----------------------------------------------------------------
|
||||
Bauer Emilie Christiana Bienengruppe
|
||||
Eberl Maximilian Bienengruppe
|
||||
Eckinger Charly Bienengruppe
|
||||
Ehrensberger Theo Bienengruppe
|
||||
Gugic Vanessa Bienengruppe
|
||||
Hermansdorfer Marina Bienengruppe
|
||||
Kaiser Theresa Bienengruppe
|
||||
Mehringer Quirin Bienengruppe
|
||||
Müller Helene Bienengruppe
|
||||
Nowak Juna Bienengruppe
|
||||
Root Jonas Bienengruppe
|
||||
Tress Anna Bienengruppe
|
||||
Vass Marcell Bienengruppe
|
||||
Vilgertshofer Clara Bienengruppe
|
||||
Wamprechtshammer Marie Bienengruppe
|
||||
|
||||
15 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
---------------------------------------------------------------
|
||||
Arndt Alina Eulengruppe
|
||||
Ben Hiba Elias Eulengruppe
|
||||
Benke Bastian Eulengruppe
|
||||
Berg Tamara Eulengruppe
|
||||
Berndt Mariella Eulengruppe
|
||||
Brose Valentina Eulengruppe
|
||||
Drexler Lukas Eulengruppe
|
||||
Eisenhofer Kilian Eulengruppe
|
||||
Etterer Juna Eulengruppe
|
||||
Fink Sophia Eulengruppe
|
||||
Fleissner Sebastian Eulengruppe
|
||||
Gebhardt Anton Eulengruppe
|
||||
Götz Ludwig Eulengruppe
|
||||
Josef Maximilian Eulengruppe
|
||||
Klimaschewski Ben Eulengruppe
|
||||
Kroh Louis Eulengruppe
|
||||
Lavalie Alvin Eulengruppe
|
||||
Michalik Fabian Eulengruppe
|
||||
Multhammer Vincent Eulengruppe
|
||||
Multhammer Vincent Ludwig Eulengruppe
|
||||
Reck Sophia Eulengruppe
|
||||
Richter Oskar Eulengruppe
|
||||
Rocha Elias Lorenzo Eulengruppe
|
||||
Rudolph Anna Eulengruppe
|
||||
Ternavska Kira Eulengruppe
|
||||
Varadi Amelie Eulengruppe
|
||||
Varadi Clara Eulengruppe
|
||||
Zerr Oskar Eulengruppe
|
||||
|
||||
28 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
-----------------------------------------------------------------
|
||||
Bauer Luka Maximilian Željko Fröschegruppe
|
||||
Birladeanu Ianis Andrei Fröschegruppe
|
||||
Eberhardt Valentin Fröschegruppe
|
||||
Gryczuk Oliver Fröschegruppe
|
||||
Kelly Ferdinand Fröschegruppe
|
||||
Koburger Leonie Fröschegruppe
|
||||
Kressirer Josephine Fröschegruppe
|
||||
Kunz Sophie Fröschegruppe
|
||||
Käsmeier Xaver Fröschegruppe
|
||||
Magin Lilly Fröschegruppe
|
||||
Mert Lukas Fröschegruppe
|
||||
Schmitz Juna Fröschegruppe
|
||||
Schütz Mirjam Fröschegruppe
|
||||
Wamprechtshammer Moritz Fröschegruppe
|
||||
Wimmer Annika Fröschegruppe
|
||||
Zerr Paul Fröschegruppe
|
||||
|
||||
16 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
---------------------------------------------------------------
|
||||
Albano Felicitas Hasengruppe
|
||||
Ben Hiba Yassin Hasengruppe
|
||||
Daiser Greta Hasengruppe
|
||||
Fischer Alexander Hasengruppe
|
||||
Gerlsbeck Lena Hasengruppe
|
||||
Gottwalt Greta Hasengruppe
|
||||
Gruber Sophia Hasengruppe
|
||||
Hadzic Anelia Hasengruppe
|
||||
Kundt Patrick Hasengruppe
|
||||
Lechner Maximilian Hasengruppe
|
||||
Numberger Benedikt Hasengruppe
|
||||
Scherber Lukas Hasengruppe
|
||||
Tress Leo Hasengruppe
|
||||
Türe Emine Hasengruppe
|
||||
Wagner Isabell Hasengruppe
|
||||
|
||||
15 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
-----------------------------------------------------------------
|
||||
Brandl Mila Koboldegruppe
|
||||
Engelking Melina Koboldegruppe
|
||||
Fink Anna Koboldegruppe
|
||||
Flügel Antonia Koboldegruppe
|
||||
Heindl Louis Koboldegruppe
|
||||
Kundt Philip Koboldegruppe
|
||||
Lechner Ben Koboldegruppe
|
||||
Müller Johanna Koboldegruppe
|
||||
Schütz Rafaela Koboldegruppe
|
||||
Seibold Marie Koboldegruppe
|
||||
Seibold Sophia Koboldegruppe
|
||||
Wenninger Rosa Koboldegruppe
|
||||
|
||||
12 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
---------------------------------------------------------------------
|
||||
Brose Marie Marienkäfergruppe
|
||||
Burghardt Fanny Marienkäfergruppe
|
||||
Daberger Tobias Marienkäfergruppe
|
||||
Grobler Emelia Marienkäfergruppe
|
||||
Hamdard Faria Marienkäfergruppe
|
||||
Hermansdorfer Vincent Marienkäfergruppe
|
||||
Huber Helena Marienkäfergruppe
|
||||
Klaric Mia Marienkäfergruppe
|
||||
Kraus Viktoria Marienkäfergruppe
|
||||
Kroh Maximilian Marienkäfergruppe
|
||||
Schuster Korbinian Marienkäfergruppe
|
||||
Wenninger Valentin Marienkäfergruppe
|
||||
Zehetmaier Emma Marienkäfergruppe
|
||||
|
||||
13 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
-------------------------------------------------------------------
|
||||
Arndt Felix Maulwürfegruppe
|
||||
Beck Josef Maulwürfegruppe
|
||||
Ehrensberger Zeno Maulwürfegruppe
|
||||
Eisenhofer Antonia Maulwürfegruppe
|
||||
Farin Constantin Maulwürfegruppe
|
||||
Fink Xaver Maulwürfegruppe
|
||||
Fischer Joshua Maulwürfegruppe
|
||||
Flügel Johann Maulwürfegruppe
|
||||
Hadzic Aylin Maulwürfegruppe
|
||||
Kressirer Simon Maulwürfegruppe
|
||||
Kroh Liana Maulwürfegruppe
|
||||
Kugler Milena Vaiana Maulwürfegruppe
|
||||
Magin Lotte Maulwürfegruppe
|
||||
Michalik Sarah Maulwürfegruppe
|
||||
Rocha Elias Matteo Maulwürfegruppe
|
||||
Schleier Valentin Maulwürfegruppe
|
||||
Sedlmeir Julia Maulwürfegruppe
|
||||
Suszczewicz Adam Maulwürfegruppe
|
||||
Tratnik Maja Maulwürfegruppe
|
||||
Wagner Mariella Maulwürfegruppe
|
||||
Winkler Clara Maulwürfegruppe
|
||||
|
||||
21 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
------------------------------------------------------------------------
|
||||
Birladeanu Dima Matei Schmetterlingegruppe
|
||||
Eichner Hanna Schmetterlingegruppe
|
||||
Gruber Johanna Schmetterlingegruppe
|
||||
Gschwendtner Theresa Schmetterlingegruppe
|
||||
Haberthaler Moritz Schmetterlingegruppe
|
||||
Hamdard Avesta Schmetterlingegruppe
|
||||
Josef Konstantin Schmetterlingegruppe
|
||||
Mikołajczak Gabriela Schmetterlingegruppe
|
||||
Pfaus Leo Schmetterlingegruppe
|
||||
Sulejmanovic Elna Schmetterlingegruppe
|
||||
|
||||
10 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
-----------------------------------------------------------------
|
||||
Daiser Nora Wichtelgruppe
|
||||
Fabri Jule Wichtelgruppe
|
||||
Fink Leonie Wichtelgruppe
|
||||
Gojanaj Simon Wichtelgruppe
|
||||
Haberthaler Anton Wichtelgruppe
|
||||
Hoffmann Eva Wichtelgruppe
|
||||
Hoffmann Eva Charlotte Wichtelgruppe
|
||||
Koburger Paula Wichtelgruppe
|
||||
Mair Magdalena Wichtelgruppe
|
||||
Sammer Emilie Wichtelgruppe
|
||||
|
||||
10 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
---------------------------------------------------------------
|
||||
Berndt Milena Wölfegruppe
|
||||
Brose Sophia Wölfegruppe
|
||||
Dressel Lucy Jolie Wölfegruppe
|
||||
Etterer Nele Wölfegruppe
|
||||
Fischer Benjamin Wölfegruppe
|
||||
Götz Georg Wölfegruppe
|
||||
Hermansdorfer Ludwig Wölfegruppe
|
||||
Klimaschewski Leon Wölfegruppe
|
||||
Magin Frederik Wölfegruppe
|
||||
Müller Luisa Wölfegruppe
|
||||
Niedermeier Lukas Wölfegruppe
|
||||
|
||||
11 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
|
||||
--- SEITENWECHSEL / PAGE BREAK ---
|
||||
|
||||
Kinderhaus St. Martin Neuching Kinderfotos Erding
|
||||
02. - 05.06.2025
|
||||
|
||||
Nachname Vorname Gruppe
|
||||
------------------------------------------------------------------
|
||||
Baur Leon Malouis Zwergerlgruppe
|
||||
Grobler Eloise Zwergerlgruppe
|
||||
Hagn Maximilian Zwergerlgruppe
|
||||
Hren Jan Zwergerlgruppe
|
||||
Kugler Amaya Marina Zwergerlgruppe
|
||||
Schmidt Emilie Zwergerlgruppe
|
||||
|
||||
6 angemeldete Kinder
|
||||
|
||||
Dies ist die Liste der bereits angemeldeten Kinder. Bitte die Eltern der noch fehlenden
|
||||
Kinder an die Anmeldung erinnern.
|
||||
|
||||
Stand 26.05.2025 20:04 Uhr
|
||||
|
||||
|
||||
Kinderfotos Erding
|
||||
Gartenstr. 10 85445 Oberding
|
||||
www.kinderfotos-erding.de
|
||||
08122-8470867
|
||||
1757
HA_automations.yaml
1757
HA_automations.yaml
File diff suppressed because it is too large
Load Diff
@@ -1,667 +0,0 @@
|
||||
# Loads default set of integrations. Do not remove.
|
||||
default_config:
|
||||
|
||||
# Load frontend themes from the themes folder
|
||||
frontend:
|
||||
themes: !include_dir_merge_named themes
|
||||
|
||||
# Text to speech
|
||||
tts:
|
||||
- platform: google_translate
|
||||
|
||||
automation: !include automations.yaml
|
||||
script: !include scripts.yaml
|
||||
scene: !include scenes.yaml
|
||||
|
||||
#Anleitung: https://book.cryd.de/books/projekte/page/hausverbrauch-strom-messen-incl-dummy-sensoren
|
||||
|
||||
utility_meter:
|
||||
daily_upload_volume:
|
||||
source: sensor.fritzbox_upload_volumen
|
||||
cycle: daily
|
||||
daily_download_volume:
|
||||
source: sensor.fritzbox_download_volumen
|
||||
cycle: daily
|
||||
taeglicher_stromverbrauch:
|
||||
source: sensor.stromzahler_energieverbrauch
|
||||
cycle: daily
|
||||
taegliche_einspeisung:
|
||||
source: sensor.stromzahler_energieeinspeisung
|
||||
cycle: daily
|
||||
|
||||
input_boolean:
|
||||
manual_trigger:
|
||||
name: Manual Trigger
|
||||
initial: off
|
||||
|
||||
influxdb:
|
||||
host: 127.0.0.1
|
||||
#host: a0d7b954-influxdb
|
||||
port: 8086
|
||||
database: homeassistant
|
||||
username: !secret influxdb_user
|
||||
password: !secret influxdb_pw
|
||||
max_retries: 3
|
||||
default_measurement: state
|
||||
|
||||
alexa:
|
||||
smart_home:
|
||||
endpoint: https://api.eu.amazonalexa.com/v3/events
|
||||
filter:
|
||||
include_entities:
|
||||
- light.living_room
|
||||
- switch.kitchen
|
||||
entity_config:
|
||||
light.living_room:
|
||||
name: "Wohnzimmer Licht"
|
||||
switch.kitchen:
|
||||
name: "Küchenschalter"
|
||||
template:
|
||||
- sensor:
|
||||
- name: "Total Power3"
|
||||
unique_id: "total_power_sensor3"
|
||||
unit_of_measurement: "W"
|
||||
device_class: power
|
||||
state_class: measurement
|
||||
state: >
|
||||
{{
|
||||
states('sensor.shelly_em3_channel_a_power') | float(0) +
|
||||
states('sensor.shelly_em3_channel_b_power') | float(0) +
|
||||
states('sensor.shelly_em3_channel_c_power') | float(0)
|
||||
}}
|
||||
- name: "Prozent Nutzung"
|
||||
unique_id: "pv_prozent_nutzung"
|
||||
unit_of_measurement: "%"
|
||||
state: >
|
||||
{% set total_power = states('sensor.total_power_v2') | float(0) + states('sensor.solaranlage_power') | float(0) %}
|
||||
{% if total_power > 0 %}
|
||||
{{ (100 * states('sensor.solaranlage_power') | float(0) / total_power) | round(1) }}
|
||||
{% else %}
|
||||
0
|
||||
{% endif %}
|
||||
- name: "Total Energy Use1"
|
||||
unique_id: "total_energy_use1"
|
||||
device_class: energy
|
||||
state_class: total_increasing
|
||||
unit_of_measurement: "kWh"
|
||||
state: >
|
||||
{{
|
||||
states('sensor.shelly_em3_channel_a_energy') | float(0) +
|
||||
states('sensor.shelly_em3_channel_b_energy') | float(0) +
|
||||
states('sensor.shelly_em3_channel_c_energy') | float(0)
|
||||
}}
|
||||
- name: "Total Energy Returned1"
|
||||
unique_id: "total_energy_returned1"
|
||||
device_class: energy
|
||||
state_class: total_increasing
|
||||
unit_of_measurement: "kWh"
|
||||
state: >
|
||||
{{
|
||||
states('sensor.shelly_em3_channel_a_energy_returned') | float(0) +
|
||||
states('sensor.shelly_em3_channel_b_energy_returned') | float(0) +
|
||||
states('sensor.shelly_em3_channel_c_energy_returned') | float(0)
|
||||
}}
|
||||
- name: "Aktuelle Solarleistung1"
|
||||
unique_id: "aktuelle_solarleistung1"
|
||||
unit_of_measurement: "W"
|
||||
device_class: power
|
||||
state_class: measurement
|
||||
state: >
|
||||
{{
|
||||
max(0, states('sensor.esphome_web_39b3f0_charging_power_2') | float(0) -
|
||||
states('sensor.esphome_web_39b3f0_discharging_power_2') | float(0) +
|
||||
states('sensor.solaranlage_power') | float(0))
|
||||
}}
|
||||
- name: "Täglicher Stromverbrauch"
|
||||
unit_of_measurement: "kWh"
|
||||
state: >
|
||||
{% set aktueller_wert = states('sensor.stromzahler_energieverbrauch') | float %}
|
||||
{% set startwert = states('input_number.tagesstart_zaehlerstand') | float %}
|
||||
{{ (aktueller_wert - startwert) | round(2) }}
|
||||
- name: "Fritzbox Download Volumen"
|
||||
unit_of_measurement: "MB"
|
||||
state: >
|
||||
{% set rate_kbps = states('sensor.fritz_box_7530_download_durchsatz') | float %}
|
||||
{% set rate_kBps = rate_kbps / 8 %} # Kilobits pro Sekunde in Kilobytes umrechnen
|
||||
{{ (rate_kBps * 60) / 1024 }} # Datenvolumen pro Minute in Megabyte
|
||||
- name: "Fritzbox Upload Volumen"
|
||||
unit_of_measurement: "MB"
|
||||
state: >
|
||||
{% set rate_kbps = states('sensor.fritz_box_7530_upload_durchsatz') | float %}
|
||||
{% set rate_kBps = rate_kbps / 8 %} # Kilobits pro Sekunde in Kilobytes umrechnen
|
||||
{{ (rate_kBps * 60) / 1024 }} # Datenvolumen pro Minute in Megabyte
|
||||
- name: "Aktueller Strompreis"
|
||||
state: "{{ states('input_number.strompreis') }}"
|
||||
unit_of_measurement: "€/kWh"
|
||||
device_class: monetary
|
||||
- name: "Stromverbrauch Vortag"
|
||||
unique_id: "stromverbrauch_vortag"
|
||||
unit_of_measurement: "kWh"
|
||||
device_class: energy
|
||||
state: >
|
||||
{% set stats = state_attr('sensor.taeglicher_stromverbrauch', 'last_period') %}
|
||||
{{ stats | float(0) }}
|
||||
- name: "Einspeisung Vortag"
|
||||
unique_id: "einspeisung_vortag"
|
||||
unit_of_measurement: "kWh"
|
||||
device_class: energy
|
||||
state: >
|
||||
{% set stats = state_attr('sensor.taegliche_einspeisung', 'last_period') %}
|
||||
{{ stats | float(0) }}
|
||||
- name: "Generiert Vortag (Template)"
|
||||
unique_id: "generiert_vortag_template"
|
||||
unit_of_measurement: "kWh"
|
||||
device_class: energy
|
||||
state: >
|
||||
{% set stats = state_attr('sensor.komplett_solarlieferung', 'last_period') %}
|
||||
{{ stats | float(0) }}
|
||||
- name: "Nächste Müllabholung"
|
||||
state: >-
|
||||
{% set today = now().date().isoformat() %}
|
||||
{% for date in states.sensor.garbage.attributes.keys() | list | sort %}
|
||||
{% if date >= today %}
|
||||
{{ date }} - {{ states.sensor.garbage.attributes[date] }}
|
||||
{% break %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
- name: "Statistik Solarerzeugung Durchschnitt"
|
||||
state: "{{ now().year }}"
|
||||
attributes:
|
||||
data: >
|
||||
{{ states('sensor.gsheet_data') }}
|
||||
- name: "Solarertrag 2022"
|
||||
state: "OK"
|
||||
attributes:
|
||||
values: >
|
||||
{% set raw_data = state_attr('sensor.statistik_solarerzeugung_durchschnitt_mqtt', 'data')[1][1:] %}
|
||||
{{ raw_data | map('replace', ',', '.') | map('float') | list }}
|
||||
- name: "Solarertrag 2023"
|
||||
state: "OK"
|
||||
attributes:
|
||||
values: >
|
||||
{% set raw_data = state_attr('sensor.statistik_solarerzeugung_durchschnitt_mqtt', 'data')[2][1:] %}
|
||||
{{ raw_data | map('replace', ',', '.') | map('float') | list }}
|
||||
- name: "Solarertrag 2024"
|
||||
state: "OK"
|
||||
attributes:
|
||||
values: >
|
||||
{% set raw_data = state_attr('sensor.statistik_solarerzeugung_durchschnitt_mqtt', 'data')[3][1:] %}
|
||||
{{ raw_data | map('replace', ',', '.') | map('float') | list }}
|
||||
- name: "Solarertrag 2025"
|
||||
state: "OK"
|
||||
attributes:
|
||||
values: >
|
||||
{% set raw_data = state_attr('sensor.statistik_solarerzeugung_durchschnitt_mqtt', 'data')[4][1:] %}
|
||||
{{ raw_data | map('replace', ',', '.') | map('float') | list }}
|
||||
- name: "Solarertrag 2022 Werte"
|
||||
state: "{{ state_attr('sensor.solarertrag_2022', 'values')[-1] | float(0) }}"
|
||||
unit_of_measurement: "kWh" # Passen Sie die Einheit an
|
||||
state_class: measurement
|
||||
attributes:
|
||||
alle_werte: "{{ state_attr('sensor.solarertrag_2022', 'values') }}"
|
||||
|
||||
- name: "Kühlschrank Letzte Aktivzeit"
|
||||
unique_id: kuehlschrank_letzte_aktivzeit
|
||||
unit_of_measurement: "min"
|
||||
state: >
|
||||
{% set aktiv_start = states.binary_sensor.kuehlschrank_laeuft.last_changed %}
|
||||
{% if is_state('binary_sensor.kuehlschrank_laeuft', 'on') %}
|
||||
{{ ((now() - aktiv_start).total_seconds() / 60) | round(1) }}
|
||||
{% else %}
|
||||
0
|
||||
{% endif %}
|
||||
|
||||
- name: "Kühlschrank Letzte Pausezeit"
|
||||
unique_id: kuehlschrank_letzte_pausezeit
|
||||
unit_of_measurement: "min"
|
||||
state: >
|
||||
{% set pause_start = states.binary_sensor.kuehlschrank_laeuft.last_changed %}
|
||||
{% if is_state('binary_sensor.kuehlschrank_laeuft', 'off') %}
|
||||
{{ ((now() - pause_start).total_seconds() / 60) | round(1) }}
|
||||
{% else %}
|
||||
0
|
||||
{% endif %}
|
||||
sensor:
|
||||
- platform: average
|
||||
name: "Durchschnittsverbrauch"
|
||||
unique_id: "durchschnitt_verbrauch"
|
||||
duration: 60
|
||||
entities:
|
||||
- sensor.total_power_v2
|
||||
|
||||
- platform: average
|
||||
name: "Durchschnittsertrag"
|
||||
unique_id: "durchschnitt_ertrag"
|
||||
duration: 180
|
||||
entities:
|
||||
- sensor.aktuelle_solarleistung1
|
||||
|
||||
- platform: teamtracker
|
||||
league_id: "BUND"
|
||||
team_id: "MUC"
|
||||
name: "Bayern2"
|
||||
|
||||
- platform: integration
|
||||
name: Upload Volume
|
||||
source: sensor.fritz_box_7530_upload_durchsatz
|
||||
unit_prefix: k
|
||||
round: 2
|
||||
- platform: integration
|
||||
name: Download Volume
|
||||
source: sensor.fritz_box_7530_download_durchsatz
|
||||
unit_prefix: k
|
||||
round: 2
|
||||
- platform: statistics
|
||||
name: "Generiert Vortag (Statistik)"
|
||||
entity_id: sensor.solaranlage_energy
|
||||
state_characteristic: change
|
||||
max_age:
|
||||
days: 1
|
||||
- platform: rest
|
||||
name: "Google Sheets Daten"
|
||||
resource: "https://script.google.com/macros/s/AKfycbz4sAiMvufOqL-gv5o7YfjaL4V0eWu9dGren_xg6pV35dE8bMyzaQckKp5WCs6ex5bbdA/exec"
|
||||
scan_interval: 600 # Aktualisiert alle 10 Minuten
|
||||
value_template: "{{ value_json[0] }}" # Falls erforderlich, kann dies angepasst werden
|
||||
json_attributes:
|
||||
- "Gesamtwerte"
|
||||
- "Genergiert"
|
||||
- "Einspeisung"
|
||||
- "Netzverbrauch"
|
||||
- "Solarverbrauch"
|
||||
- "Gesamtverbrauch"
|
||||
- "Durchschnittl. Nutzung"
|
||||
- "Autarkiegrad"
|
||||
- "Ersparnis in € / Tag"
|
||||
- "Ersparnis gesamt"
|
||||
- "Prozent Abgezahlt"
|
||||
- "Gesamnt abgezahlt"
|
||||
- platform: history_stats
|
||||
name: "Kühlschrank Aktivzeit"
|
||||
entity_id: binary_sensor.kuehlschrank_laeuft
|
||||
state: "on"
|
||||
type: time
|
||||
start: "{{ now() - timedelta(hours=24) }}"
|
||||
end: "{{ now() }}"
|
||||
|
||||
- platform: history_stats
|
||||
name: "Kühlschrank Pausezeit"
|
||||
entity_id: binary_sensor.kuehlschrank_laeuft
|
||||
state: "off"
|
||||
type: time
|
||||
start: "{{ now() - timedelta(hours=24) }}"
|
||||
end: "{{ now() }}"
|
||||
- platform: statistics
|
||||
name: "Kühlschrank Durchschnitt Aktivzeit"
|
||||
entity_id: sensor.kuehlschrank_letzte_aktivzeit
|
||||
state_characteristic: mean
|
||||
max_age:
|
||||
hours: 24
|
||||
sampling_size: 10
|
||||
|
||||
- platform: statistics
|
||||
name: "Kühlschrank Durchschnitt Pausezeit"
|
||||
entity_id: sensor.kuehlschrank_letzte_pausezeit
|
||||
state_characteristic: mean
|
||||
max_age:
|
||||
hours: 24
|
||||
sampling_size: 10
|
||||
|
||||
input_datetime:
|
||||
kuehlschrank_ende_aktiv:
|
||||
name: "Ende aktive Phase"
|
||||
has_time: true
|
||||
kuehlschrank_ende_pause:
|
||||
name: "Ende Pause Phase"
|
||||
has_time: true
|
||||
|
||||
waste_collection_schedule:
|
||||
sources:
|
||||
- name: awido_de
|
||||
args:
|
||||
customer: Erding
|
||||
city: Oberding
|
||||
street: "Gartenstraße"
|
||||
|
||||
binary_sensor:
|
||||
- platform: template
|
||||
sensors:
|
||||
kuehlschrank_laeuft:
|
||||
friendly_name: "Kühlschrank läuft"
|
||||
value_template: "{{ states('sensor.kuehlschrank_power')|float > 50 }}"
|
||||
|
||||
mqtt:
|
||||
sensor:
|
||||
- name: "Balkonkraftwerk Leistung AC"
|
||||
state_topic: "inverter/hm600/ch0/P_AC"
|
||||
device_class: power
|
||||
unit_of_measurement: W
|
||||
state_class: measurement
|
||||
unique_id: "BalkonkraftwerkLeistungAC"
|
||||
- name: "Balkonkraftwerk Module 1 Leistung"
|
||||
state_topic: "inverter/hm600/ch1/P_DC"
|
||||
device_class: power
|
||||
unit_of_measurement: W
|
||||
state_class: measurement
|
||||
unique_id: "BalkonkraftwerkModule13Leistung"
|
||||
- name: "Balkonkraftwerk Module 2 Leistung"
|
||||
state_topic: "inverter/hm600/ch2/P_DC"
|
||||
device_class: power
|
||||
unit_of_measurement: W
|
||||
state_class: measurement
|
||||
unique_id: "BalkonkraftwerkModule24Leistung"
|
||||
- name: "Balkonkraftwerk Temperatur"
|
||||
state_topic: "inverter/hm600/ch0/Temp"
|
||||
device_class: temperature
|
||||
unit_of_measurement: °C
|
||||
state_class: measurement
|
||||
unique_id: "BalkonkraftwerkTemperatur"
|
||||
- name: "Balkonkraftwerk Arbeit Tag"
|
||||
state_topic: "inverter/hm600/ch0/YieldDay"
|
||||
device_class: energy
|
||||
unit_of_measurement: Wh
|
||||
state_class: total_increasing
|
||||
unique_id: "BalkonkraftwerkArbeitTag"
|
||||
- name: "Balkonkraftwerk Arbeit Gesamt"
|
||||
state_topic: "inverter/hm600/ch0/YieldTotal"
|
||||
device_class: energy
|
||||
unit_of_measurement: kWh
|
||||
state_class: total_increasing
|
||||
unique_id: "BalkonkraftwerkArbeitGesamt"
|
||||
- name: "version"
|
||||
state_topic: "inverter/version"
|
||||
unique_id: "version_dtu"
|
||||
- name: "Limit"
|
||||
state_topic: "inverter/hm600/ch0/active_PowerLimit"
|
||||
unique_id: "set_powerlimit"
|
||||
- name: "Energy Akkuentladung current"
|
||||
device_class: power
|
||||
unit_of_measurement: "W"
|
||||
state_topic: "esphome-web-39b3f0/sensor/esphome-web-39b3f0_discharging_power"
|
||||
unique_id: "energy_akkuentladung"
|
||||
- name: "Energy Akkuentladung total"
|
||||
device_class: energy
|
||||
unit_of_measurement: "kWh"
|
||||
state_topic: "esphome-web-39b3f0/sensor/esphome-web-39b3f0_discharging_power"
|
||||
- name: "Effizienz HM600"
|
||||
unit_of_measurement: "%"
|
||||
state_topic: "inverter/hm600/ch0/Efficiency"
|
||||
unique_id: "effizienz_hm600"
|
||||
- name: "HM600 Spannung"
|
||||
unit_of_measurement: "V"
|
||||
state_topic: "inverter/hm600/ch1/U_DC"
|
||||
- name: "Waschmaschine Leistung"
|
||||
state_topic: "shellyplus1pm-84cca8771670/status/switch:0"
|
||||
value_template: "{{ value_json.apower }}"
|
||||
unit_of_measurement: "W"
|
||||
device_class: power
|
||||
- name: "Waschmaschine Energieverbrauch"
|
||||
state_topic: "shellyplus1pm-84cca8771670/status/switch:0"
|
||||
value_template: "{{ value_json.aenergy.total }}"
|
||||
unit_of_measurement: "kWh"
|
||||
device_class: energy
|
||||
- name: "Statistik Solarerzeugung Durchschnitt mqtt"
|
||||
state_topic: "homeassistant/sensor/gsheet_data"
|
||||
value_template: "{{ value_json.state }}"
|
||||
json_attributes_topic: "homeassistant/sensor/gsheet_data"
|
||||
json_attributes_template: "{{ value_json.attributes | tojson }}"
|
||||
|
||||
logger:
|
||||
default: warning
|
||||
logs:
|
||||
custom_components.awtrix: warning
|
||||
homeassistant.components.sensor: warning
|
||||
|
||||
# Nächste Abholung Restmüll
|
||||
|
||||
# - name: "Restmüll"
|
||||
# state: '{{value.types|join(", ")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'
|
||||
# attributes:
|
||||
# value_template: '{{value.types|join(", ")}}'
|
||||
# unique_id: "restmuell"
|
||||
# unit_of_measurement: "days"
|
||||
# device_class: "timestamp"
|
||||
# value_template: '{{(states.sensor.waste_collection_schedule.attributes.next_date)|as_timestamp | timestamp_local}}'
|
||||
|
||||
# Nächste Abholung Biotonne
|
||||
|
||||
# - name: "Biotonne"
|
||||
# state: '{{value.types|join(", ")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'
|
||||
# attributes:
|
||||
# value_template: '{{value.types|join(", ")}}'
|
||||
# unique_id: "biotonne"
|
||||
# unit_of_measurement: "days"
|
||||
# device_class: "timestamp"
|
||||
# value_template: '{{(states.sensor.waste_collection_schedule.attributes.next_date)|as_timestamp | timestamp_local}}'
|
||||
|
||||
##sensor:
|
||||
# - platform: average
|
||||
# name: 'Durchschnittsverbrauch'
|
||||
# unique_id: 'durchschnitt_verbrauch'
|
||||
# duration: 60
|
||||
# entities:
|
||||
# - sensor.total_power
|
||||
# - platform: average
|
||||
# name: 'Durchschnittsertrag'
|
||||
# unique_id: 'durchschnitt_ertrag'
|
||||
# duration: 180
|
||||
# entities:
|
||||
# - sensor.aktuelle_solarleistung
|
||||
# - platform: teamtracker
|
||||
# league_id: "BUND"
|
||||
# team_id: "MUC"
|
||||
# name: "Bayern2"
|
||||
#
|
||||
# - platform: template
|
||||
# name: "Total Power"
|
||||
# unique_id: "Total_Energy"
|
||||
# device_class: power
|
||||
# state_class: total
|
||||
# unit_of_measurement: "W"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_power')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_power')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_power')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - platform: template
|
||||
# name: "Total Energy Use1"
|
||||
# unique_id: "Total_Energy_Use1"
|
||||
# device_class: energy
|
||||
# state_class: total
|
||||
# unit_of_measurement: "kWh"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_energy')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_energy')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_energy')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "Total Energy Returned1"
|
||||
# unique_id: "Total_Energy_Returned1"
|
||||
# device_class: energy
|
||||
# state_class: total
|
||||
# unit_of_measurement: "kWh"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_energy_returned')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_energy_returned')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_energy_returned')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "PV Einspeisung"
|
||||
# unique_id: "pv_einspeisung"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: "{{ states('sensor.total_power')|float if states('sensor.total_power') | int < 1 else 0 }}"
|
||||
#
|
||||
# - name: "PV Einspeisung negiert"
|
||||
# unique_id: "pv_einspeisung_negiert"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: "{{ states('sensor.pv_einspeisung')|float * -1 }}"
|
||||
#
|
||||
# - name: "Wirkungsgrad"
|
||||
# unique_id: "wirkungsgrad_battery"
|
||||
# unit_of_measurement: "%"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{(100 * states('sensor.solaranlage_power')| float(0) / states('sensor.esphome_web_39b3f0_discharging_power')| float(0)) | round(1) }}
|
||||
#
|
||||
# - name: "Prozent_Nutzung"
|
||||
# unique_id: "pv_prozent_nutzung"
|
||||
# unit_of_measurement: "%"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# (100 * states('sensor.solaranlage_power')| float(0) / (states('sensor.solaranlage_power')| float(0) + states('sensor.total_power_v2')| float(0))) | round(1)
|
||||
# }}
|
||||
#
|
||||
# - name: "Aktuelle_Solarleistung"
|
||||
# unique_id: "aktuelle-solarleistung"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# max(0, states('sensor.esphome_web_39b3f0_charging_power_2')| float(0) -
|
||||
# states('sensor.esphome_web_39b3f0_discharging_power_2')| float(0) +
|
||||
# states('sensor.solaranlage_power')|float(0) +
|
||||
# }}
|
||||
#
|
||||
# //states('sensor.akku_power')|float(0)) removed from aktuelle solarleistung
|
||||
#
|
||||
# - name: "Summierter Ertrag"
|
||||
# unique_id: "summierter_ertrag"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.akku_power')| float(0) +
|
||||
# states('sensor.solaranlage_power')|float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "Total Power"
|
||||
# unique_id: "Total_Energy"
|
||||
# device_class: power
|
||||
# state_class: total
|
||||
# unit_of_measurement: "W"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_power')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_power')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_power')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "Total Energy Use"
|
||||
# unique_id: "Total_Energy_Use"
|
||||
# device_class: energy
|
||||
# state_class: total
|
||||
# unit_of_measurement: "kWh"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_energy')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_energy')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_energy')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "Total Energy Returned"
|
||||
# unique_id: "Total_Energy_Returned"
|
||||
# device_class: energy
|
||||
# state_class: total
|
||||
# unit_of_measurement: "kWh"
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.shelly_em3_channel_a_energy_returned')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_b_energy_returned')| float(0) +
|
||||
# states('sensor.shelly_em3_channel_c_energy_returned')| float(0)
|
||||
# }}
|
||||
#
|
||||
# - name: "PV Einspeisung"
|
||||
# unique_id: "pv_einspeisung"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: "{{ states('sensor.total_power')|float if states('sensor.total_power') | int < 1 else 0 }}"
|
||||
#
|
||||
# - name: "PV Einspeisung negiert"
|
||||
# unique_id: "pv_einspeisung_negiert"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: "{{ states('sensor.pv_einspeisung')|float * -1 }}"
|
||||
#
|
||||
# - name: "Wirkungsgrad"
|
||||
# unique_id: "wirkungsgrad_battery"
|
||||
# unit_of_measurement: "%"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{(100 * states('sensor.solaranlage_power')| float(0) / states('sensor.esphome_web_39b3f0_discharging_power')| float(0)) | round(1) }}###
|
||||
#
|
||||
# - name: "Prozent_Nutzung"
|
||||
# unique_id: "pv_prozent_nutzung"
|
||||
# unit_of_measurement: "%"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# (100 * states('sensor.solaranlage_power')| float(0) / (states('sensor.solaranlage_power')| float(0) + states('sensor.total_power')| float(0))) | round(1)
|
||||
# }}
|
||||
#
|
||||
# - name: "Aktuelle_Solarleistung"
|
||||
# unique_id: "aktuelle-solarleistung"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# max(0, states('sensor.esphome_web_39b3f0_charging_power_2')| float(0) -
|
||||
# states('sensor.esphome_web_39b3f0_discharging_power_2')| float(0) +
|
||||
# states('sensor.solaranlage_power')|float(0) +
|
||||
# states('sensor.akku_power')|float(0))
|
||||
# }}
|
||||
#
|
||||
# - name: "Summierter Ertrag"
|
||||
# unique_id: "summierter_ertrag"
|
||||
# unit_of_measurement: "W"
|
||||
# device_class: power
|
||||
# value_template: >
|
||||
# {{
|
||||
# states('sensor.akku_power')| float(0) +
|
||||
# states('sensor.solaranlage_power')|float(0)
|
||||
# }}
|
||||
|
||||
# https://community.home-assistant.io/t/hoymiles-dtu-microinverters-pv/253674/21
|
||||
|
||||
# statistics
|
||||
#- platform: statistics
|
||||
# entity_id: sensor.total_power_av
|
||||
# sampling_size: 20
|
||||
|
||||
#powercalc:
|
||||
|
||||
#sensor:
|
||||
# - platform: powercalc
|
||||
# entity_id: light.esslicht
|
||||
# fixed:
|
||||
# states_power:
|
||||
# off: 0.4
|
||||
# on: 22
|
||||
|
||||
# platform: template
|
||||
# daily_solar_percent:
|
||||
# value_template: "{{ ( 100 * states('sensor.total_power')|float / states('sensor.solaranlage_power')|float )|round(1) }}"
|
||||
# unit_of_measurement: '%'
|
||||
# friendly_name: Daily Solar Percentage
|
||||
|
||||
# ssl Configuration
|
||||
# http:
|
||||
# ssl_certificate: /ssl/fullchain.pem
|
||||
# ssl_key: /ssl/privkey.pem
|
||||
|
||||
http:
|
||||
use_x_forwarded_for: true
|
||||
trusted_proxies:
|
||||
- 127.0.0.1
|
||||
- 192.168.178.6
|
||||
- 172.16.0.0/12
|
||||
- ::1
|
||||
#ssl_certificate: "/ssl/fullchain.pem"
|
||||
#ssl_key: "/ssl/privkey.pem"
|
||||
homeassistant:
|
||||
external_url: "https://floke-ha.duckdns.org"
|
||||
260
HA_jbd_bms.yaml
260
HA_jbd_bms.yaml
@@ -1,260 +0,0 @@
|
||||
substitutions:
|
||||
name: esphome-web-39b3f0
|
||||
device_description: "Monitor and control a Xiaoxiang Battery Management System (JBD-BMS) via BLE"
|
||||
external_components_source: github://syssi/esphome-jbd-bms@main
|
||||
mac_address: A4:C1:37:00:86:5A
|
||||
|
||||
esphome:
|
||||
name: ${name}
|
||||
comment: ${device_description}
|
||||
min_version: 2024.6.0
|
||||
project:
|
||||
name: "syssi.esphome-jbd-bms"
|
||||
version: 2.1.0
|
||||
|
||||
esp32:
|
||||
board: esp32dev
|
||||
framework:
|
||||
type: esp-idf
|
||||
|
||||
external_components:
|
||||
- source: ${external_components_source}
|
||||
refresh: 0s
|
||||
|
||||
wifi:
|
||||
ssid: !secret wifi_ssid
|
||||
password: !secret wifi_password
|
||||
|
||||
ota:
|
||||
platform: esphome
|
||||
|
||||
logger:
|
||||
level: DEBUG
|
||||
|
||||
# If you use Home Assistant please remove this `mqtt` section and uncomment the `api` component!
|
||||
# The native API has many advantages over MQTT: https://esphome.io/components/api.html#advantages-over-mqtt
|
||||
#mqtt:
|
||||
# broker: !secret mqtt_host
|
||||
# username: !secret mqtt_username
|
||||
# password: !secret mqtt_password
|
||||
# id: mqtt_client
|
||||
|
||||
# api:
|
||||
|
||||
esp32_ble_tracker:
|
||||
scan_parameters:
|
||||
active: false
|
||||
|
||||
ble_client:
|
||||
- id: client0
|
||||
mac_address: ${mac_address}
|
||||
|
||||
jbd_bms_ble:
|
||||
- id: bms0
|
||||
ble_client_id: client0
|
||||
# Some Liontron BMS models require an update interval of less than 8s
|
||||
update_interval: 2s
|
||||
|
||||
button:
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
retrieve_hardware_version:
|
||||
name: "${name} retrieve hardware version"
|
||||
force_soc_reset:
|
||||
name: "${name} force soc reset"
|
||||
|
||||
binary_sensor:
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
balancing:
|
||||
name: "${name} balancing"
|
||||
charging:
|
||||
name: "${name} charging"
|
||||
discharging:
|
||||
name: "${name} discharging"
|
||||
online_status:
|
||||
name: "${name} online status"
|
||||
|
||||
sensor:
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
battery_strings:
|
||||
name: "${name} battery strings"
|
||||
current:
|
||||
name: "${name} current"
|
||||
power:
|
||||
name: "${name} power"
|
||||
charging_power:
|
||||
name: "${name} charging power"
|
||||
discharging_power:
|
||||
name: "${name} discharging power"
|
||||
state_of_charge:
|
||||
name: "${name} state of charge"
|
||||
nominal_capacity:
|
||||
name: "${name} nominal capacity"
|
||||
charging_cycles:
|
||||
name: "${name} charging cycles"
|
||||
capacity_remaining:
|
||||
name: "${name} capacity remaining"
|
||||
battery_cycle_capacity:
|
||||
name: "${name} battery cycle capacity"
|
||||
total_voltage:
|
||||
name: "${name} total voltage"
|
||||
average_cell_voltage:
|
||||
name: "${name} average cell voltage"
|
||||
delta_cell_voltage:
|
||||
name: "${name} delta cell voltage"
|
||||
min_cell_voltage:
|
||||
name: "${name} min cell voltage"
|
||||
max_cell_voltage:
|
||||
name: "${name} max cell voltage"
|
||||
min_voltage_cell:
|
||||
name: "${name} min voltage cell"
|
||||
max_voltage_cell:
|
||||
name: "${name} max voltage cell"
|
||||
temperature_1:
|
||||
name: "${name} temperature 1"
|
||||
temperature_2:
|
||||
name: "${name} temperature 2"
|
||||
temperature_3:
|
||||
name: "${name} temperature 3"
|
||||
temperature_4:
|
||||
name: "${name} temperature 4"
|
||||
temperature_5:
|
||||
name: "${name} temperature 5"
|
||||
temperature_6:
|
||||
name: "${name} temperature 6"
|
||||
cell_voltage_1:
|
||||
name: "${name} cell voltage 1"
|
||||
cell_voltage_2:
|
||||
name: "${name} cell voltage 2"
|
||||
cell_voltage_3:
|
||||
name: "${name} cell voltage 3"
|
||||
cell_voltage_4:
|
||||
name: "${name} cell voltage 4"
|
||||
cell_voltage_5:
|
||||
name: "${name} cell voltage 5"
|
||||
cell_voltage_6:
|
||||
name: "${name} cell voltage 6"
|
||||
cell_voltage_7:
|
||||
name: "${name} cell voltage 7"
|
||||
cell_voltage_8:
|
||||
name: "${name} cell voltage 8"
|
||||
cell_voltage_9:
|
||||
name: "${name} cell voltage 9"
|
||||
cell_voltage_10:
|
||||
name: "${name} cell voltage 10"
|
||||
cell_voltage_11:
|
||||
name: "${name} cell voltage 11"
|
||||
cell_voltage_12:
|
||||
name: "${name} cell voltage 12"
|
||||
cell_voltage_13:
|
||||
name: "${name} cell voltage 13"
|
||||
cell_voltage_14:
|
||||
name: "${name} cell voltage 14"
|
||||
cell_voltage_15:
|
||||
name: "${name} cell voltage 15"
|
||||
cell_voltage_16:
|
||||
name: "${name} cell voltage 16"
|
||||
cell_voltage_17:
|
||||
name: "${name} cell voltage 17"
|
||||
cell_voltage_18:
|
||||
name: "${name} cell voltage 18"
|
||||
cell_voltage_19:
|
||||
name: "${name} cell voltage 19"
|
||||
cell_voltage_20:
|
||||
name: "${name} cell voltage 20"
|
||||
cell_voltage_21:
|
||||
name: "${name} cell voltage 21"
|
||||
cell_voltage_22:
|
||||
name: "${name} cell voltage 22"
|
||||
cell_voltage_23:
|
||||
name: "${name} cell voltage 23"
|
||||
cell_voltage_24:
|
||||
name: "${name} cell voltage 24"
|
||||
cell_voltage_25:
|
||||
name: "${name} cell voltage 25"
|
||||
cell_voltage_26:
|
||||
name: "${name} cell voltage 26"
|
||||
cell_voltage_27:
|
||||
name: "${name} cell voltage 27"
|
||||
cell_voltage_28:
|
||||
name: "${name} cell voltage 28"
|
||||
cell_voltage_29:
|
||||
name: "${name} cell voltage 29"
|
||||
cell_voltage_30:
|
||||
name: "${name} cell voltage 30"
|
||||
cell_voltage_31:
|
||||
name: "${name} cell voltage 31"
|
||||
cell_voltage_32:
|
||||
name: "${name} cell voltage 32"
|
||||
operation_status_bitmask:
|
||||
name: "${name} operation status bitmask"
|
||||
errors_bitmask:
|
||||
name: "${name} errors bitmask"
|
||||
balancer_status_bitmask:
|
||||
name: "${name} balancer status bitmask"
|
||||
software_version:
|
||||
name: "${name} software version"
|
||||
short_circuit_error_count:
|
||||
name: "${name} short circuit error count"
|
||||
charge_overcurrent_error_count:
|
||||
name: "${name} charge overcurrent error count"
|
||||
discharge_overcurrent_error_count:
|
||||
name: "${name} discharge overcurrent error count"
|
||||
cell_overvoltage_error_count:
|
||||
name: "${name} cell overvoltage error count"
|
||||
cell_undervoltage_error_count:
|
||||
name: "${name} cell undervoltage error count"
|
||||
charge_overtemperature_error_count:
|
||||
name: "${name} charge overtemperature error count"
|
||||
charge_undertemperature_error_count:
|
||||
name: "${name} charge undertemperature error count"
|
||||
discharge_overtemperature_error_count:
|
||||
name: "${name} discharge overtemperature error count"
|
||||
discharge_undertemperature_error_count:
|
||||
name: "${name} discharge undertemperature error count"
|
||||
battery_overvoltage_error_count:
|
||||
name: "${name} battery overvoltage error count"
|
||||
battery_undervoltage_error_count:
|
||||
name: "${name} battery undervoltage error count"
|
||||
|
||||
text_sensor:
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
errors:
|
||||
name: "${name} errors"
|
||||
operation_status:
|
||||
name: "${name} operation status"
|
||||
device_model:
|
||||
name: "${name} device model"
|
||||
|
||||
select:
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
read_eeprom_register:
|
||||
name: "${name} read eeprom register"
|
||||
id: read_eeprom_register0
|
||||
optionsmap:
|
||||
0xAA: "Error Counts"
|
||||
|
||||
switch:
|
||||
- platform: ble_client
|
||||
ble_client_id: client0
|
||||
name: "${name} enable bluetooth connection"
|
||||
|
||||
- platform: jbd_bms_ble
|
||||
jbd_bms_ble_id: bms0
|
||||
charging:
|
||||
name: "${name} charging"
|
||||
discharging:
|
||||
name: "${name} discharging"
|
||||
|
||||
# Uncomment this section if you want to update the error count sensors periodically
|
||||
#
|
||||
# interval:
|
||||
# - interval: 30min
|
||||
# then:
|
||||
# - select.set:
|
||||
# id: read_eeprom_register0
|
||||
# option: "Error Counts"
|
||||
@@ -1,81 +0,0 @@
|
||||
# Konver.ai Integration: Strategie & Architektur
|
||||
|
||||
**Status:** Vertrag unterzeichnet (Fokus: Telefon-Enrichment).
|
||||
**Risiko:** Wegfall von Dealfront (Lead Gen) ohne adäquaten, automatisierten Ersatz.
|
||||
**Ziel:** Nutzung von Konver.ai nicht nur als manuelles "Telefonbuch", sondern als **skalierbare Quelle** für die Lead-Fabrik (Company Explorer).
|
||||
|
||||
## 1. Das Zielszenario (The "Golden Flow")
|
||||
|
||||
Wir integrieren Konver.ai via API direkt in den Company Explorer. Der CE fungiert als Gatekeeper, um Credits zu sparen und Dubletten zu verhindern.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph "RoboPlanet Ecosystem"
|
||||
Notion[("Notion Strategy\n(Verticals/Pains)")]
|
||||
SO[("SuperOffice CRM\n(Bestand)")]
|
||||
CE["Company Explorer\n(The Brain)"]
|
||||
end
|
||||
|
||||
subgraph "External Sources"
|
||||
Konver["Konver.ai API"]
|
||||
Web["Web / Google / Wiki"]
|
||||
end
|
||||
|
||||
%% Data Flow
|
||||
Notion -->|1. Sync Strategy| CE
|
||||
SO -->|2. Import Existing (Blocklist)| CE
|
||||
|
||||
CE -->|3. Search Query + Exclusion List| Konver
|
||||
Note right of Konver: "Suche: Altenheime > 10 Mio\nExclude: Domain-Liste aus SO"
|
||||
|
||||
Konver -->|4. Net New Candidates| CE
|
||||
|
||||
CE -->|5. Deep Dive (Robotik-Check)| Web
|
||||
|
||||
CE -->|6. Enrich Contact (Phone/Mail)| Konver
|
||||
Note right of CE: "Nur für Firmen mit\nhohem Robotik-Score!"
|
||||
|
||||
CE -->|7. Export Qualified Lead| SO
|
||||
```
|
||||
|
||||
## 2. Die kritische Lücke: "Exclusion List"
|
||||
|
||||
Da Dealfront (unser bisheriges "Fischnetz") abgeschaltet wird, müssen wir Konver zur **Neukunden-Generierung** nutzen.
|
||||
Ohne eine **Ausschluss-Liste (Exclusion List)** bei der Suche verbrennen wir Geld und Zeit:
|
||||
|
||||
1. **Kosten:** Wir zahlen Credits für Firmen/Kontakte, die wir schon haben.
|
||||
2. **Daten-Hygiene:** Wir importieren Dubletten, die wir mühsam bereinigen müssen.
|
||||
3. **Blindflug:** Wir wissen vor dem Kauf nicht, ob der Datensatz "netto neu" ist.
|
||||
|
||||
### Forderung an Konver (Technisches Onboarding)
|
||||
|
||||
*"Um Konver.ai als strategischen Nachfolger für Dealfront in unserer Marketing-Automation nutzen zu können, benötigen wir zwingend API-Funktionen zur **Deduplizierung VOR dem Datenkauf**."*
|
||||
|
||||
**Konkrete Features:**
|
||||
* **Domain-Exclusion:** Upload einer Liste (z.B. 5.000 Domains), die in der API-Suche *nicht* zurückgegeben werden.
|
||||
* **Contact-Check:** Prüfung (z.B. Hash-Abgleich), ob eine E-Mail-Adresse bereits "bekannt" ist, bevor Kontaktdaten enthüllt (und berechnet) werden.
|
||||
|
||||
## 3. Workflow-Varianten
|
||||
|
||||
### A. Der "Smart Enricher" (Wirtschaftlich)
|
||||
Wir nutzen Konver nur für Firmen, die **wirklich** relevant sind.
|
||||
|
||||
1. **Scraping:** Company Explorer findet 100 Altenheime (Web-Suche).
|
||||
2. **Filterung:** KI prüft Websites -> 40 davon sind relevant (haben große Flächen).
|
||||
3. **Enrichment:** Nur für diese 40 fragen wir Konver via API: *"Gib mir den Facility Manager + Handy"*.
|
||||
4. **Ergebnis:** Wir zahlen 40 Credits statt 100. Hohe Effizienz.
|
||||
|
||||
### B. Der "Mass Loader" (Teuer & Dumm - zu vermeiden)
|
||||
1. Wir laden "Alle Altenheime" aus Konver direkt nach SuperOffice.
|
||||
2. Wir zahlen 100 Credits.
|
||||
3. Der Vertrieb ruft an -> 60 davon sind ungeeignet (zu klein, kein Bedarf).
|
||||
4. **Ergebnis:** 60 Credits verbrannt, Vertrieb frustriert.
|
||||
|
||||
## 4. Fazit & Next Steps
|
||||
|
||||
Wir müssen im Onboarding-Gespräch klären:
|
||||
1. **API-Doku:** Wo ist die Dokumentation für `Search` und `Enrich` Endpoints?
|
||||
2. **Exclusion:** Wie filtern wir Bestandskunden im API-Call?
|
||||
3. **Bulk-Enrichment:** Können wir Listen (Domains) zum Anreichern hochladen?
|
||||
|
||||
Ohne diese Features ist Konver ein Rückschritt in die manuelle Einzelbearbeitung.
|
||||
@@ -1,161 +0,0 @@
|
||||
# Migrations-Plan: Legacy GSheets -> Company Explorer (Robotics Edition v0.8.5)
|
||||
|
||||
**Kontext:** Neuanfang für die Branche **Robotik & Facility Management**.
|
||||
**Ziel:** Ablösung von Google Sheets/CLI durch eine Web-App ("Company Explorer") mit SQLite-Backend.
|
||||
|
||||
## 1. Strategische Neuausrichtung
|
||||
|
||||
| Bereich | Alt (Legacy) | Neu (Robotics Edition) |
|
||||
| :--- | :--- | :--- |
|
||||
| **Daten-Basis** | Google Sheets | **SQLite** (Lokal, performant, filterbar). |
|
||||
| **Ziel-Daten** | Allgemein / Kundenservice | **Quantifizierbares Potenzial** (z.B. 4500m² Fläche, 120 Betten). |
|
||||
| **Branchen** | KI-Vorschlag (Freitext) | **Strict Mode:** Mapping auf definierte Notion-Liste (z.B. "Hotellerie", "Automotive"). |
|
||||
| **Bewertung** | 0-100 Score (Vage) | **Data-Driven:** Rohwert (Scraper/Search) -> Standardisierung (Formel) -> Potenzial. |
|
||||
| **Analytics** | Techniker-ML-Modell | **Deaktiviert**. Fokus auf harte Fakten. |
|
||||
| **Operations** | D365 Sync (Broken) | **Excel-Import & Deduplizierung**. Fokus auf Matching externer Listen gegen Bestand. |
|
||||
|
||||
## 2. Architektur & Komponenten-Mapping
|
||||
|
||||
Das System wird in `company-explorer/` neu aufgebaut. Wir lösen Abhängigkeiten zur Root `helpers.py` auf.
|
||||
|
||||
### A. Core Backend (`backend/`)
|
||||
|
||||
| Komponente | Aufgabe & Neue Logik | Prio |
|
||||
| :--- | :--- | :--- |
|
||||
| **Database** | Ersetzt `GoogleSheetHandler`. Speichert Firmen & "Enrichment Blobs". | 1 |
|
||||
| **Importer** | Ersetzt `SyncManager`. Importiert Excel-Dumps (CRM) und Event-Listen. | 1 |
|
||||
| **Deduplicator** | Ersetzt `company_deduplicator.py`. **Kern-Feature:** Checkt Event-Listen gegen DB. Muss "intelligent" matchen (Name + Ort + Web). | 1 |
|
||||
| **Scraper (Base)** | Extrahiert Text von Websites. Basis für alle Analysen. | 1 |
|
||||
| **Classification Service** | **NEU (v0.7.0).** Zweistufige Logik: <br> 1. Strict Industry Classification. <br> 2. Metric Extraction Cascade (Web -> Wiki -> SerpAPI). | 1 |
|
||||
| **Marketing Engine** | Ersetzt `generate_marketing_text.py`. Nutzt neue `marketing_wissen_robotics.yaml`. | 3 |
|
||||
|
||||
**Identifizierte Hauptdatei:** `company-explorer/backend/app.py`
|
||||
|
||||
### B. Frontend (`frontend/`) - React
|
||||
|
||||
* **View 1: Der "Explorer":** DataGrid aller Firmen. Filterbar nach "Roboter-Potential" und Status.
|
||||
* **View 2: Der "Inspector":** Detailansicht einer Firma. Zeigt gefundene Signale ("Hat SPA Bereich"). Manuelle Korrektur-Möglichkeit.
|
||||
* **Identifizierte Komponente:** `company-explorer/frontend/src/components/Inspector.tsx`
|
||||
* **View 3: "List Matcher":** Upload einer Excel-Liste -> Anzeige von Duplikaten -> Button "Neue importieren".
|
||||
* **View 4: "Settings":** Konfiguration von Branchen, Rollen und Robotik-Logik.
|
||||
* **Frontend "Settings" Komponente:** `company-explorer/frontend/src/components/RoboticsSettings.tsx`
|
||||
|
||||
### C. Architekturmuster für die Client-Integration
|
||||
|
||||
Um externen Diensten (wie der `lead-engine`) eine einfache und robuste Anbindung an den `company-explorer` zu ermöglichen, wurde ein standardisiertes Client-Connector-Muster implementiert.
|
||||
|
||||
| Komponente | Aufgabe & Neue Logik |
|
||||
| :--- | :--- |
|
||||
| **`company_explorer_connector.py`** | **NEU:** Ein zentrales Python-Skript, das als "offizieller" Client-Wrapper für die API des Company Explorers dient. Es kapselt die Komplexität der asynchronen Enrichment-Prozesse. |
|
||||
| **`handle_company_workflow()`** | Die Kernfunktion des Connectors. Sie implementiert den vollständigen "Find-or-Create-and-Enrich"-Workflow: <br> 1. **Prüfen:** Stellt fest, ob ein Unternehmen bereits existiert. <br> 2. **Erstellen:** Legt das Unternehmen an, falls es neu ist. <br> 3. **Anstoßen:** Startet den asynchronen `discover`-Prozess. <br> 4. **Warten (Polling):** Überwacht den Status des Unternehmens, bis eine Website gefunden wurde. <br> 5. **Analysieren:** Startet den asynchronen `analyze`-Prozess. <br> **Vorteil:** Bietet dem aufrufenden Dienst eine einfache, quasi-synchrone Schnittstelle und stellt sicher, dass die Prozessschritte in der korrekten Reihenfolge ausgeführt werden. |
|
||||
|
||||
## 3. Umgang mit Shared Code (`helpers.py` & Co.)
|
||||
|
||||
Wir kapseln das neue Projekt vollständig ab ("Fork & Clean").
|
||||
|
||||
* **Quelle:** `helpers.py` (Root)
|
||||
* **Ziel:** `company-explorer/backend/lib/core_utils.py`
|
||||
* **Aktion:** Wir kopieren nur relevante Teile und ergänzen sie (z.B. `safe_eval_math`, `run_serp_search`).
|
||||
|
||||
## 4. Datenstruktur (SQLite Schema)
|
||||
|
||||
### Tabelle `companies` (Stammdaten & Analyse)
|
||||
* `id` (PK)
|
||||
* `name` (String)
|
||||
* `website` (String)
|
||||
* `crm_id` (String, nullable - Link zum D365)
|
||||
* `industry_crm` (String - Die "erlaubte" Branche aus Notion)
|
||||
* `city` (String)
|
||||
* `country` (String - Standard: "DE" oder aus Impressum)
|
||||
* `status` (Enum: NEW, IMPORTED, ENRICHED, QUALIFIED)
|
||||
* **NEU (v0.7.0):**
|
||||
* `calculated_metric_name` (String - z.B. "Anzahl Betten")
|
||||
* `calculated_metric_value` (Float - z.B. 180)
|
||||
* `calculated_metric_unit` (String - z.B. "Betten")
|
||||
* `standardized_metric_value` (Float - z.B. 4500)
|
||||
* `standardized_metric_unit` (String - z.B. "m²")
|
||||
* `metric_source` (String - "website", "wikipedia", "serpapi")
|
||||
|
||||
### Tabelle `signals` (Deprecated)
|
||||
* *Veraltet ab v0.7.0. Wird durch quantitative Metriken in `companies` ersetzt.*
|
||||
|
||||
### Tabelle `contacts` (Ansprechpartner)
|
||||
* `id` (PK)
|
||||
* `account_id` (FK -> companies.id)
|
||||
* `gender`, `title`, `first_name`, `last_name`, `email`
|
||||
* `job_title` (Visitenkarte)
|
||||
* `role` (Standardisierte Rolle: "Operativer Entscheider", etc.)
|
||||
* `status` (Marketing Status)
|
||||
|
||||
### Tabelle `industries` (Branchen-Fokus - Synced from Notion)
|
||||
* `id` (PK)
|
||||
* `notion_id` (String, Unique)
|
||||
* `name` (String - "Vertical" in Notion)
|
||||
* `description` (Text - "Definition" in Notion)
|
||||
* `metric_type` (String - "Metric Type")
|
||||
* `min_requirement` (Float - "Min. Requirement")
|
||||
* `whale_threshold` (Float - "Whale Threshold")
|
||||
* `proxy_factor` (Float - "Proxy Factor")
|
||||
* `scraper_search_term` (String - "Scraper Search Term")
|
||||
* `scraper_keywords` (Text - "Scraper Keywords")
|
||||
* `standardization_logic` (String - "Standardization Logic")
|
||||
|
||||
### Tabelle `job_role_mappings` (Rollen-Logik)
|
||||
* `id` (PK)
|
||||
* `pattern` (String - Regex für Jobtitles)
|
||||
* `role` (String - Zielrolle)
|
||||
|
||||
## 7. Historie & Fixes (Jan 2026)
|
||||
|
||||
* **[CRITICAL] v0.7.4: Service Restoration & Logic Fix (Jan 24, 2026)**
|
||||
* **[STABILITY] v0.7.3: Hardening Metric Parser & Regression Testing (Jan 23, 2026)**
|
||||
* **[STABILITY] v0.7.2: Robust Metric Parsing (Jan 23, 2026)**
|
||||
* **[STABILITY] v0.7.1: AI Robustness & UI Fixes (Jan 21, 2026)**
|
||||
* **[MAJOR] v0.7.0: Quantitative Potential Analysis (Jan 20, 2026)**
|
||||
* **[UPGRADE] v0.6.x: Notion Integration & UI Improvements**
|
||||
|
||||
## 14. Upgrade v2.0 (Feb 18, 2026): "Lead-Fabrik" Erweiterung
|
||||
|
||||
Dieses Upgrade transformiert den Company Explorer in das zentrale Gehirn der Lead-Generierung (Vorratskammer).
|
||||
|
||||
### 14.1 Detaillierte Logik der neuen Datenfelder
|
||||
|
||||
Um Gemini CLI (dem Bautrupp) die Umsetzung zu ermöglichen, hier die semantische Bedeutung der neuen Spalten:
|
||||
|
||||
#### Tabelle `companies` (Qualitäts- & Abgleich-Metriken)
|
||||
|
||||
* **`confidence_score` (FLOAT, 0.0 - 1.0):** Indikator für die Sicherheit der KI-Klassifizierung. `> 0.8` = Grün.
|
||||
* **`data_mismatch_score` (FLOAT, 0.0 - 1.0):** Abweichung zwischen CRM-Bestand und Web-Recherche (z.B. Umzug).
|
||||
* **`crm_name`, `crm_address`, `crm_website`, `crm_vat`:** Read-Only Snapshot aus SuperOffice zum Vergleich.
|
||||
* **Status-Flags:** `website_scrape_status` und `wiki_search_status`.
|
||||
|
||||
#### Tabelle `industries` (Strategie-Parameter)
|
||||
|
||||
* **`pains` / `gains`:** Strukturierte Textblöcke (getrennt durch `[Primary Product]` und `[Secondary Product]`).
|
||||
* **`ops_focus_secondary` (BOOLEAN):** Steuerung für rollenspezifische Produkt-Priorisierung.
|
||||
|
||||
---
|
||||
|
||||
## 15. Offene Arbeitspakete (Bauleitung)
|
||||
|
||||
Anweisungen für den "Bautrupp" (Gemini CLI).
|
||||
|
||||
### Task 1: UI-Anpassung - Side-by-Side CRM View & Settings
|
||||
(In Arbeit / Teilweise erledigt durch Gemini CLI)
|
||||
|
||||
### Task 2: Intelligenter CRM-Importer (Bestandsdaten)
|
||||
|
||||
**Ziel:** Importieren der `demo_100.xlsx` in die SQLite-Datenbank.
|
||||
|
||||
**Anforderungen:**
|
||||
1. **PLZ-Handling:** Zwingend als **String** einlesen (führende Nullen erhalten).
|
||||
2. **Normalisierung:** Website bereinigen (kein `www.`, `https://`).
|
||||
3. **Matching:** Kaskade über CRM-ID, VAT, Domain, Fuzzy Name.
|
||||
4. **Isolierung:** Nur `crm_` Spalten updaten, Golden Records unberührt lassen.
|
||||
|
||||
---
|
||||
|
||||
## 16. Deployment-Referenz (NAS)
|
||||
* **Pfad:** `/volume1/homes/Floke/python/brancheneinstufung/company-explorer`
|
||||
* **DB:** `/app/companies_v3_fixed_2.db`
|
||||
* **Sync:** `docker exec -it company-explorer python backend/scripts/sync_notion_to_ce_enhanced.py`
|
||||
@@ -45,8 +45,6 @@ COPY --from=frontend-builder /app/dist ./dist
|
||||
|
||||
# Copy the main Python orchestrator script from the project root
|
||||
COPY b2b_marketing_orchestrator.py .
|
||||
# Copy Gemini API Key file if it exists in root
|
||||
COPY gemini_api_key.txt .
|
||||
|
||||
# Expose the port the Node.js server will run on
|
||||
EXPOSE 3002
|
||||
|
||||
@@ -333,12 +333,11 @@ PROMPTS = {
|
||||
# --- API & SCRAPING HELPERS ---
|
||||
|
||||
def load_api_key():
|
||||
try:
|
||||
with open("gemini_api_key.txt", "r") as f:
|
||||
return f.read().strip()
|
||||
except FileNotFoundError:
|
||||
logging.error("API key file 'gemini_api_key.txt' not found.")
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
if not api_key:
|
||||
logging.error("GEMINI_API_KEY environment variable not found.")
|
||||
return None
|
||||
return api_key
|
||||
|
||||
def call_gemini_api(prompt, api_key, retries=3):
|
||||
url = f"https://generativelanguage.googleapis.com/v1/models/gemini-2.5-flash:generateContent?key={api_key}"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user