diff --git a/lead-engine/app.py b/lead-engine/app.py index 13494fd8..5927b2e2 100644 --- a/lead-engine/app.py +++ b/lead-engine/app.py @@ -147,7 +147,11 @@ if not df.empty: except: pass - with st.expander(f"{date_str} | {row['company_name']}"): + # --- DYNAMIC TITLE --- + source_icon = "🌐" if row.get('source') == 'Website-Formular' else "🤝" + title = f"{source_icon} {row.get('source', 'Lead')} | {date_str} | {row['company_name']}" + + with st.expander(title): # Metadata Parsing meta = {} if row.get('lead_metadata'): @@ -155,8 +159,9 @@ if not df.empty: except: pass # --- TOP SECTION: QUALITY WARNING --- + # Now directly checks the metadata from DB, which is more reliable if meta.get('is_low_quality'): - st.warning("⚠️ **Low Quality Lead detected** (Free-mail or missing company).") + st.warning("⚠️ **Low Quality Lead detected** (Free-mail provider or missing company name). Please verify manually.") # --- SECTION 1: LEAD INFO & INTELLIGENCE --- col_lead, col_intel = st.columns(2) diff --git a/lead-engine/db.py b/lead-engine/db.py index f1196aa7..5e07f330 100644 --- a/lead-engine/db.py +++ b/lead-engine/db.py @@ -35,13 +35,18 @@ def init_db(): ) ''') - # Simple migration check: check if lead_metadata column exists - try: - c.execute('SELECT lead_metadata FROM leads LIMIT 1') - except sqlite3.OperationalError: + # Simple migration check: add 'lead_metadata' if not exists + c.execute("PRAGMA table_info(leads)") + columns = [row[1] for row in c.fetchall()] + + if 'lead_metadata' not in columns: print("Migrating DB: Adding lead_metadata column...") c.execute('ALTER TABLE leads ADD COLUMN lead_metadata TEXT') + if 'source' not in columns: + print("Migrating DB: Adding source column...") + c.execute('ALTER TABLE leads ADD COLUMN source TEXT') + conn.commit() conn.close() @@ -71,8 +76,8 @@ def insert_lead(lead_data): c = conn.cursor() try: c.execute(''' - INSERT INTO leads (source_id, received_at, company_name, contact_name, email, phone, raw_body, lead_metadata, status) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + INSERT INTO leads (source_id, received_at, company_name, contact_name, email, phone, raw_body, lead_metadata, status, source) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ''', ( lead_data.get('id'), received_at, @@ -82,7 +87,8 @@ def insert_lead(lead_data): lead_data.get('phone'), lead_data.get('raw_body'), json.dumps(meta), - 'new' + 'new', + lead_data.get('source') # Added source )) conn.commit() return True diff --git a/lead-engine/ingest.py b/lead-engine/ingest.py index 153fa980..28e36c38 100644 --- a/lead-engine/ingest.py +++ b/lead-engine/ingest.py @@ -28,38 +28,44 @@ def parse_tradingtwins_email(body): data['raw_body'] = body return data -def parse_roboplanet_form(body): +def parse_roboplanet_form(html_body): """ Parses the Roboplanet website contact form (HTML format). - Example: Vorname: BÄKO
Nachname: eG
Email: Alexander.Grau@baeko-hr.de ... + Example: Vorname: Gordana
Nachname: Dumitrovic
... """ data = {} - # Helper to strip HTML tags if needed, but we'll use regex on the content - patterns = { - 'contact_first': r'Vorname:\s*(.*?)\s*
', - 'contact_last': r'Nachname:\s*(.*?)\s*
', - 'email': r'Email:\s*(.*?)\s*
', - 'phone': r'Telefon:\s*(.*?)\s*
', - 'company': r'Firma:\s*(.*?)\s*
', - 'zip': r'PLZ:\s*(.*?)\s*
', - 'message': r'Nachricht:\s*(.*?)\s*(?:
|--|$)' + # Map label names in HTML to our keys + field_map = { + 'Vorname': 'contact_first', + 'Nachname': 'contact_last', + 'Email': 'email', + 'Telefon': 'phone', + 'Firma': 'company', + 'PLZ': 'zip', + 'Nachricht': 'message' } - for key, pattern in patterns.items(): - # Use re.DOTALL for message if it spans lines, but usually it's one block - match = re.search(pattern, body, re.IGNORECASE | re.DOTALL) + for label, key in field_map.items(): + # Pattern: Label: Value
+ pattern = fr'{re.escape(label)}:\s*(.*?)\s*
' + match = re.search(pattern, html_body, re.DOTALL | re.IGNORECASE) if match: - # Clean HTML tags from the captured value if any - val = re.sub(r'<.*?>', '', match.group(1)).strip() - data[key] = val + raw_val = match.group(1).strip() + clean_val = re.sub(r'<[^>]+>', '', raw_val).strip() # Clean any leftover HTML tags + data[key] = clean_val - # Combine names - if 'contact_first' in data and 'contact_last' in data: + # Composite fields + if data.get('contact_first') and data.get('contact_last'): data['contact'] = f"{data['contact_first']} {data['contact_last']}" - + # For Roboplanet forms, we use the timestamp as ID or a hash if missing - data['raw_body'] = body + # We need to ensure 'id' is present for db.py compatibility + if not data.get('source_id'): + data['source_id'] = f"rp_unknown_{int(datetime.now().timestamp())}" + data['id'] = data['source_id'] + + data['raw_body'] = html_body return data def ingest_mock_leads(): diff --git a/lead-engine/trading_twins_ingest.py b/lead-engine/trading_twins_ingest.py index 2b023117..d4897d7c 100644 --- a/lead-engine/trading_twins_ingest.py +++ b/lead-engine/trading_twins_ingest.py @@ -10,13 +10,15 @@ from dotenv import load_dotenv # Ensure we can import from root directory sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -# Import db functions +# Import db functions and parsers try: from db import insert_lead, init_db + from ingest import parse_roboplanet_form, parse_tradingtwins_html, is_free_mail except ImportError: # Fallback for direct execution sys.path.append(os.path.dirname(__file__)) from db import insert_lead, init_db + from ingest import parse_roboplanet_form, parse_tradingtwins_html, is_free_mail # Configuration env_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.env')) @@ -43,15 +45,13 @@ def get_access_token(): response.raise_for_status() return response.json().get("access_token") -def fetch_tradingtwins_emails(token, limit=200): +def fetch_new_leads_emails(token, limit=200): url = f"https://graph.microsoft.com/v1.0/users/{USER_EMAIL}/messages" headers = { "Authorization": f"Bearer {token}", "Content-Type": "application/json" } - # Graph API restriction: 'contains' on subject is often blocked. - # Strategy: Fetch metadata + body for last 200 messages and filter client-side. params = { "$top": limit, "$select": "id,subject,receivedDateTime,body", @@ -65,9 +65,11 @@ def fetch_tradingtwins_emails(token, limit=200): all_msgs = response.json().get("value", []) - # Filter strictly for the subject pattern locally - # Handle case where subject might be None - filtered = [m for m in all_msgs if "Neue Anfrage zum Thema Roboter" in (m.get('subject') or '')] + # Filter client-side for both TradingTwins and Roboplanet contact forms + filtered = [m for m in all_msgs if ( + "Neue Anfrage zum Thema Roboter" in (m.get('subject') or '') or + "Kontaktformular Roboplanet" in (m.get('subject') or '') + )] return filtered def is_free_mail(email_addr): @@ -137,15 +139,14 @@ def process_leads(auto_sync=False): new_count = 0 try: token = get_access_token() - emails = fetch_tradingtwins_emails(token) - logger.info(f"Found {len(emails)} Tradingtwins emails.") + emails = fetch_new_leads_emails(token) # Use the new function + logger.info(f"Found {len(emails)} potential lead emails.") for email in emails: - # ... (parsing logic remains same) + subject = email.get('subject') or '' body = email.get('body', {}).get('content', '') received_at_str = email.get('receivedDateTime') - # Convert ISO string to datetime object received_at = None if received_at_str: try: @@ -153,22 +154,53 @@ def process_leads(auto_sync=False): except: pass - lead_data = parse_tradingtwins_html(body) + lead_data = {} + source_prefix = "unknown" + source_display_name = "Unknown" + + if "Neue Anfrage zum Thema Roboter" in subject: + lead_data = parse_tradingtwins_html(body) + source_prefix = "tt" + source_display_name = "TradingTwins" + elif "Kontaktformular Roboplanet" in subject: + lead_data = parse_roboplanet_form(body) + source_prefix = "rp" + source_display_name = "Website-Formular" + else: + # Should not happen with current filtering, but good for robustness + logger.warning(f"Skipping unknown email type: {subject}") + continue + + lead_data['source'] = source_display_name # Add the new source field for the DB lead_data['raw_body'] = body lead_data['received_at'] = received_at - + + # Apply general quality checks (if not already done by parser) + if 'is_free_mail' not in lead_data: + lead_data['is_free_mail'] = is_free_mail(lead_data.get('email', '')) + if 'is_low_quality' not in lead_data: + company_name_check = lead_data.get('company', '') + # Consider company name '-' as missing/invalid + if company_name_check == '-': company_name_check = '' + lead_data['is_low_quality'] = lead_data['is_free_mail'] or not company_name_check + company_name = lead_data.get('company') if not company_name or company_name == '-': + # Fallback: if company name is missing, use contact name as company company_name = lead_data.get('contact') lead_data['company'] = company_name if not company_name: + logger.warning(f"Skipping lead due to missing company and contact name: {subject}") continue - lead_data['id'] = lead_data.get('source_id') or f"tt_{int(datetime.now().timestamp())}" + # Ensure source_id and 'id' for db.py compatibility + if not lead_data.get('source_id'): + lead_data['source_id'] = f"{source_prefix}_unknown_{int(datetime.now().timestamp())}" + lead_data['id'] = lead_data['source_id'] # db.py expects 'id' for source_id column if insert_lead(lead_data): - logger.info(f" -> Ingested: {company_name}") + logger.info(f" -> Ingested ({source_prefix}): {company_name}") new_count += 1 if new_count > 0 and auto_sync: