[31388f42] Feature: Integrate Roboplanet Contact Forms into Lead Engine

This commit integrates the Roboplanet website contact form submissions into the Lead Engine, allowing them to be processed alongside TradingTwins leads.

Key changes:
- **Database Schema Update (db.py):** Added a new source column to the leads table for tracking lead origin (TradingTwins or Website-Formular). Includes a migration check to safely add the column.
- **Improved HTML Parsing (ingest.py):** Refined the `parse_roboplanet_form` function to accurately extract data from the specific HTML structure of Roboplanet contact form emails.
- **Enhanced Ingestion Logic (trading_twins_ingest.py):**
    - Renamed `fetch_tradingtwins_emails` to `fetch_new_leads_emails` and updated it to fetch emails from both lead sources.
    - Modified `process_leads` to dynamically select the correct parser based on email subject.
    - Ensured `source` field is correctly populated and `is_low_quality` checks are applied for both lead types.
- **UI Enhancement (app.py):** Updated the Streamlit UI to visually distinguish lead types with icons and improved the "Low Quality Lead" warning message.

This feature enables a unified processing pipeline for different lead sources and provides better visibility in the Lead Engine dashboard.
This commit is contained in:
2026-03-02 19:19:01 +00:00
parent ee2dfd5b00
commit a43ae9b667
4 changed files with 94 additions and 45 deletions

View File

@@ -10,13 +10,15 @@ from dotenv import load_dotenv
# Ensure we can import from root directory
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Import db functions
# Import db functions and parsers
try:
from db import insert_lead, init_db
from ingest import parse_roboplanet_form, parse_tradingtwins_html, is_free_mail
except ImportError:
# Fallback for direct execution
sys.path.append(os.path.dirname(__file__))
from db import insert_lead, init_db
from ingest import parse_roboplanet_form, parse_tradingtwins_html, is_free_mail
# Configuration
env_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.env'))
@@ -43,15 +45,13 @@ def get_access_token():
response.raise_for_status()
return response.json().get("access_token")
def fetch_tradingtwins_emails(token, limit=200):
def fetch_new_leads_emails(token, limit=200):
url = f"https://graph.microsoft.com/v1.0/users/{USER_EMAIL}/messages"
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
# Graph API restriction: 'contains' on subject is often blocked.
# Strategy: Fetch metadata + body for last 200 messages and filter client-side.
params = {
"$top": limit,
"$select": "id,subject,receivedDateTime,body",
@@ -65,9 +65,11 @@ def fetch_tradingtwins_emails(token, limit=200):
all_msgs = response.json().get("value", [])
# Filter strictly for the subject pattern locally
# Handle case where subject might be None
filtered = [m for m in all_msgs if "Neue Anfrage zum Thema Roboter" in (m.get('subject') or '')]
# Filter client-side for both TradingTwins and Roboplanet contact forms
filtered = [m for m in all_msgs if (
"Neue Anfrage zum Thema Roboter" in (m.get('subject') or '') or
"Kontaktformular Roboplanet" in (m.get('subject') or '')
)]
return filtered
def is_free_mail(email_addr):
@@ -137,15 +139,14 @@ def process_leads(auto_sync=False):
new_count = 0
try:
token = get_access_token()
emails = fetch_tradingtwins_emails(token)
logger.info(f"Found {len(emails)} Tradingtwins emails.")
emails = fetch_new_leads_emails(token) # Use the new function
logger.info(f"Found {len(emails)} potential lead emails.")
for email in emails:
# ... (parsing logic remains same)
subject = email.get('subject') or ''
body = email.get('body', {}).get('content', '')
received_at_str = email.get('receivedDateTime')
# Convert ISO string to datetime object
received_at = None
if received_at_str:
try:
@@ -153,22 +154,53 @@ def process_leads(auto_sync=False):
except:
pass
lead_data = parse_tradingtwins_html(body)
lead_data = {}
source_prefix = "unknown"
source_display_name = "Unknown"
if "Neue Anfrage zum Thema Roboter" in subject:
lead_data = parse_tradingtwins_html(body)
source_prefix = "tt"
source_display_name = "TradingTwins"
elif "Kontaktformular Roboplanet" in subject:
lead_data = parse_roboplanet_form(body)
source_prefix = "rp"
source_display_name = "Website-Formular"
else:
# Should not happen with current filtering, but good for robustness
logger.warning(f"Skipping unknown email type: {subject}")
continue
lead_data['source'] = source_display_name # Add the new source field for the DB
lead_data['raw_body'] = body
lead_data['received_at'] = received_at
# Apply general quality checks (if not already done by parser)
if 'is_free_mail' not in lead_data:
lead_data['is_free_mail'] = is_free_mail(lead_data.get('email', ''))
if 'is_low_quality' not in lead_data:
company_name_check = lead_data.get('company', '')
# Consider company name '-' as missing/invalid
if company_name_check == '-': company_name_check = ''
lead_data['is_low_quality'] = lead_data['is_free_mail'] or not company_name_check
company_name = lead_data.get('company')
if not company_name or company_name == '-':
# Fallback: if company name is missing, use contact name as company
company_name = lead_data.get('contact')
lead_data['company'] = company_name
if not company_name:
logger.warning(f"Skipping lead due to missing company and contact name: {subject}")
continue
lead_data['id'] = lead_data.get('source_id') or f"tt_{int(datetime.now().timestamp())}"
# Ensure source_id and 'id' for db.py compatibility
if not lead_data.get('source_id'):
lead_data['source_id'] = f"{source_prefix}_unknown_{int(datetime.now().timestamp())}"
lead_data['id'] = lead_data['source_id'] # db.py expects 'id' for source_id column
if insert_lead(lead_data):
logger.info(f" -> Ingested: {company_name}")
logger.info(f" -> Ingested ({source_prefix}): {company_name}")
new_count += 1
if new_count > 0 and auto_sync: