From b446894c23a7691934892ef05c2a2de6315fb2cb Mon Sep 17 00:00:00 2001 From: Floke Date: Mon, 16 Feb 2026 14:35:21 +0000 Subject: [PATCH 1/4] [30988f42] feat(transcription-tool): stabilize transcription with plain text parsing and add retry feature --- debug_transcription_raw.py | 70 +++++++++++ docker-compose.yml | 2 +- transcription-tool/backend/app.py | 36 +++++- .../backend/services/orchestrator.py | 111 +++++++++++++++++- .../backend/services/transcription_service.py | 55 +++++++-- transcription-tool/frontend/src/App.tsx | 14 +++ 6 files changed, 270 insertions(+), 18 deletions(-) create mode 100644 debug_transcription_raw.py diff --git a/debug_transcription_raw.py b/debug_transcription_raw.py new file mode 100644 index 00000000..40fbe1e7 --- /dev/null +++ b/debug_transcription_raw.py @@ -0,0 +1,70 @@ +import sqlite3 +import json +import os + +DB_PATH = "transcripts.db" + +def inspect_latest_meeting(): + if not os.path.exists(DB_PATH): + print(f"Error: Database file '{DB_PATH}' not found.") + return + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Get latest meeting + cursor.execute("SELECT id, title, created_at FROM meetings ORDER BY created_at DESC LIMIT 1") + meeting = cursor.fetchone() + + if not meeting: + print("No meetings found in DB.") + conn.close() + return + + meeting_id, title, created_at = meeting + print(f"--- Inspecting Latest Meeting: ID {meeting_id} ('{title}') created at {created_at} ---") + + # Get chunks for this meeting + cursor.execute("SELECT id, chunk_index, raw_text, json_content FROM transcript_chunks WHERE meeting_id = ? ORDER BY chunk_index", (meeting_id,)) + chunks = cursor.fetchall() + + if not chunks: + print("No chunks found for this meeting.") + + for chunk in chunks: + chunk_id, idx, raw_text, json_content = chunk + print(f"\n[Chunk {idx} (ID: {chunk_id})]") + + print(f"Stored JSON Content (Length): {len(json.loads(json_content)) if json_content else 'None/Empty'}") + + print("-" * 20 + " RAW TEXT START " + "-" * 20) + print(raw_text[:500]) # Print first 500 chars + print("..." if len(raw_text) > 500 else "") + print("-" * 20 + " RAW TEXT END " + "-" * 20) + + # Try to parse manually to see error + try: + # Simulate cleaning logic from orchestrator + cleaned = raw_text.strip() + if cleaned.startswith("```json"): + cleaned = cleaned[7:] + elif cleaned.startswith("```"): + cleaned = cleaned[3:] + if cleaned.endswith("```"): + cleaned = cleaned[:-3] + cleaned = cleaned.strip() + + parsed = json.loads(cleaned) + print("✅ Manual Parsing Successful!") + except json.JSONDecodeError as e: + print(f"❌ Manual Parsing Failed: {e}") + # Show context around error + if hasattr(e, 'pos'): + start = max(0, e.pos - 20) + end = min(len(cleaned), e.pos + 20) + print(f" Context at error: ...{cleaned[start:end]}...") + + conn.close() + +if __name__ == "__main__": + inspect_latest_meeting() diff --git a/docker-compose.yml b/docker-compose.yml index a34e7061..d9136bee 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -98,13 +98,13 @@ services: restart: unless-stopped volumes: - ./transcription-tool/backend:/app/backend + - ./transcription-tool/frontend/dist:/app/frontend/dist # Mount Frontend Build for Live Updates - ./transcripts.db:/app/transcripts.db - ./uploads_audio:/app/uploads_audio - ./gemini_api_key.txt:/app/gemini_api_key.txt environment: PYTHONUNBUFFERED: "1" DATABASE_URL: "sqlite:////app/transcripts.db" - GEMINI_API_KEY: "AIzaSyCFRmr1rOrkFKiEuh9GOCJNB2zfJsYmR68" # Placeholder, actual key is in file ports: - "8001:8001" diff --git a/transcription-tool/backend/app.py b/transcription-tool/backend/app.py index 162231fa..6890fc25 100644 --- a/transcription-tool/backend/app.py +++ b/transcription-tool/backend/app.py @@ -99,6 +99,31 @@ async def upload_audio( return meeting +@app.post("/api/meetings/{meeting_id}/retry") +def retry_meeting( + meeting_id: int, + background_tasks: BackgroundTasks, + db: Session = Depends(get_db) +): + meeting = db.query(Meeting).filter(Meeting.id == meeting_id).first() + if not meeting: + raise HTTPException(404, detail="Meeting not found") + + # Check if chunks directory exists + chunk_dir = os.path.join(settings.UPLOAD_DIR, "chunks", str(meeting_id)) + if not os.path.exists(chunk_dir) or not os.listdir(chunk_dir): + raise HTTPException(400, detail="Original audio chunks not found. Please re-upload.") + + # Reset status + meeting.status = "QUEUED" + db.commit() + + # Trigger Retry Task + from .services.orchestrator import retry_meeting_task + background_tasks.add_task(retry_meeting_task, meeting.id, SessionLocal) + + return {"status": "started", "message": "Retrying transcription..."} + from pydantic import BaseModel class InsightRequest(BaseModel): @@ -201,9 +226,16 @@ def delete_meeting(meeting_id: int, db: Session = Depends(get_db)): # Serve Frontend # This must be the last route definition to avoid catching API routes -static_path = "/frontend_static" + +# PRIORITY 1: Mounted Volume (Development / Live Update) +static_path = "/app/frontend/dist" + +# PRIORITY 2: Built-in Image Path (Production) +if not os.path.exists(static_path): + static_path = "/frontend_static" + +# PRIORITY 3: Local Development (running python directly) if not os.path.exists(static_path): - # Fallback for local development if not in Docker static_path = os.path.join(os.path.dirname(__file__), "../frontend/dist") if os.path.exists(static_path): diff --git a/transcription-tool/backend/services/orchestrator.py b/transcription-tool/backend/services/orchestrator.py index 4806febe..60ab336c 100644 --- a/transcription-tool/backend/services/orchestrator.py +++ b/transcription-tool/backend/services/orchestrator.py @@ -19,6 +19,16 @@ def parse_time_to_seconds(time_str): return 0 return 0 +def clean_json_string(text): + text = text.strip() + if text.startswith("```json"): + text = text[7:] + elif text.startswith("```"): + text = text[3:] + if text.endswith("```"): + text = text[:-3] + return text.strip() + def process_meeting_task(meeting_id: int, db_session_factory): db = db_session_factory() meeting = db.query(Meeting).filter(Meeting.id == meeting_id).first() @@ -50,7 +60,13 @@ def process_meeting_task(meeting_id: int, db_session_factory): # Parse JSON and Adjust Timestamps json_data = [] try: - raw_json = json.loads(result["raw_text"]) + cleaned_text = clean_json_string(result["raw_text"]) + raw_json = json.loads(cleaned_text) + + # Check for wrapped structure (e.g. {"items": [...]}) if schema enforced it + if isinstance(raw_json, dict) and "items" in raw_json: + raw_json = raw_json["items"] # Extract inner list + if isinstance(raw_json, list): for entry in raw_json: seconds = parse_time_to_seconds(entry.get("time", "00:00")) @@ -63,7 +79,7 @@ def process_meeting_task(meeting_id: int, db_session_factory): entry["display_time"] = f"{h:02}:{m:02}:{s:02}" json_data.append(entry) except Exception as e: - logger.error(f"JSON Parsing failed for chunk {i}: {e}") + logger.error(f"JSON Parsing failed for chunk {i}: {e}. Raw text start: {result['raw_text'][:100]}") # Save chunk result db_chunk = TranscriptChunk( @@ -89,3 +105,94 @@ def process_meeting_task(meeting_id: int, db_session_factory): db.commit() finally: db.close() + +def retry_meeting_task(meeting_id: int, db_session_factory): + """ + Retries transcription using existing chunks on disk. + Avoids re-splitting the original file. + """ + db = db_session_factory() + meeting = db.query(Meeting).filter(Meeting.id == meeting_id).first() + if not meeting: + return + + try: + import os + transcriber = TranscriptionService() + + # 0. Validate Chunk Directory + chunk_dir = os.path.join(settings.UPLOAD_DIR, "chunks", str(meeting_id)) + if not os.path.exists(chunk_dir): + logger.error(f"Chunk directory not found for meeting {meeting_id}") + meeting.status = "ERROR" + db.commit() + return + + chunks = sorted([os.path.join(chunk_dir, f) for f in os.listdir(chunk_dir) if f.endswith(".mp3")]) + if not chunks: + logger.error(f"No chunks found for meeting {meeting_id}") + meeting.status = "ERROR" + db.commit() + return + + # Phase 1: Clear Old Chunks + meeting.status = "RETRYING" + db.query(TranscriptChunk).filter(TranscriptChunk.meeting_id == meeting_id).delete() + db.commit() + + # Phase 2: Transcribe + all_text = [] + for i, chunk_path in enumerate(chunks): + offset = i * settings.CHUNK_DURATION_SEC + logger.info(f"Retrying chunk {i+1}/{len(chunks)} with offset {offset}s") + + result = transcriber.transcribe_chunk(chunk_path, offset) + + # Parse JSON and Adjust Timestamps (Same logic as process_meeting_task) + json_data = [] + try: + # With response_schema, raw_text SHOULD be valid JSON directly + # But let's keep clean_json_string just in case specific models deviate + cleaned_text = clean_json_string(result["raw_text"]) + raw_json = json.loads(cleaned_text) + + # Check for wrapped structure (e.g. {"items": [...]}) if schema enforced it + if isinstance(raw_json, dict) and "items" in raw_json: + raw_json = raw_json["items"] # Extract inner list + + if isinstance(raw_json, list): + for entry in raw_json: + seconds = parse_time_to_seconds(entry.get("time", "00:00")) + absolute_seconds = seconds + offset + entry["absolute_seconds"] = absolute_seconds + + h = int(absolute_seconds // 3600) + m = int((absolute_seconds % 3600) // 60) + s = int(absolute_seconds % 60) + entry["display_time"] = f"{h:02}:{m:02}:{s:02}" + json_data.append(entry) + except Exception as e: + logger.error(f"JSON Parsing failed for chunk {i}: {e}. Raw: {result['raw_text'][:100]}") + + # Save chunk result + db_chunk = TranscriptChunk( + meeting_id=meeting.id, + chunk_index=i, + raw_text=result["raw_text"], + json_content=json_data + ) + db.add(db_chunk) + all_text.append(result["raw_text"]) + db.commit() + + # Phase 3: Finalize + meeting.status = "COMPLETED" + db.commit() + logger.info(f"Meeting {meeting.id} retry completed.") + + except Exception as e: + logger.error(f"Error retrying meeting {meeting_id}: {e}", exc_info=True) + meeting.status = "ERROR" + db.commit() + finally: + db.close() diff --git a/transcription-tool/backend/services/transcription_service.py b/transcription-tool/backend/services/transcription_service.py index f8c6e375..1775c4b7 100644 --- a/transcription-tool/backend/services/transcription_service.py +++ b/transcription-tool/backend/services/transcription_service.py @@ -30,20 +30,17 @@ class TranscriptionService: if media_file.state == "FAILED": raise Exception("File processing failed at Gemini.") - # 3. Transcribe with Diarization and Timestamps + # 3. Transcribe with Diarization and Timestamps (Plain Text Mode for Stability) prompt = """ Transkribiere dieses Audio wortgetreu. Identifiziere die Sprecher (Speaker A, Speaker B, etc.). - Gib das Ergebnis als JSON-Liste zurück. - Format: - [ - { - "time": "MM:SS", - "speaker": "Speaker A", - "text": "..." - } - ] + Gib das Ergebnis EXAKT in diesem Format zurück (pro Zeile ein Sprecherwechsel): + [MM:SS] Speaker Name: Gesprochener Text... + + Beispiel: + [00:00] Speaker A: Hallo zusammen. + [00:05] Speaker B: Guten Morgen. """ logger.info(f"Generating transcription for {file_path}...") @@ -52,14 +49,46 @@ class TranscriptionService: contents=[media_file, prompt], config=types.GenerateContentConfig( temperature=0.1, - response_mime_type="application/json" + max_output_tokens=8192 ) ) # Cleanup: Delete file from Gemini storage self.client.files.delete(name=media_file.name) - + + # Parse Plain Text to JSON + structured_data = self.parse_transcript(response.text) + import json return { - "raw_text": response.text, # This is now a JSON string + "raw_text": json.dumps(structured_data), # Return valid JSON string "offset": offset_seconds } + + def parse_transcript(self, text: str) -> list: + """ + Parses lines like '[00:12] Speaker A: Hello world' into structured JSON. + """ + import re + results = [] + # Regex to match: [MM:SS] Speaker: Text + # Flexible for MM:SS or H:MM:SS + pattern = re.compile(r"^\[(\d{1,2}:\d{2}(?::\d{2})?)\]\s*([^:]+):\s*(.+)$") + + for line in text.strip().split('\n'): + line = line.strip() + if not line: continue + + match = pattern.match(line) + if match: + time_str, speaker, content = match.groups() + results.append({ + "time": time_str, + "speaker": speaker.strip(), + "text": content.strip() + }) + else: + # Fallback: Append to previous if it looks like continuation + if results and not line.startswith("["): + results[-1]["text"] += " " + line + + return results diff --git a/transcription-tool/frontend/src/App.tsx b/transcription-tool/frontend/src/App.tsx index 863de20f..dd2d980a 100644 --- a/transcription-tool/frontend/src/App.tsx +++ b/transcription-tool/frontend/src/App.tsx @@ -394,6 +394,20 @@ export default function App() { > + From 00a524b2a9e63b147927c32cd5fed37c040ef9c0 Mon Sep 17 00:00:00 2001 From: Floke Date: Mon, 16 Feb 2026 14:37:29 +0000 Subject: [PATCH 2/4] [30988f42] Ergebnis: M4A & Transcription Upgrade [30988f42] Ergebnis: M4A & Transcription Upgrade [30988f42] --- .dev_session/SESSION_INFO | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dev_session/SESSION_INFO b/.dev_session/SESSION_INFO index b16559b8..30c0af97 100644 --- a/.dev_session/SESSION_INFO +++ b/.dev_session/SESSION_INFO @@ -1 +1 @@ -{"task_id": "30988f42-8544-817a-a250-fddb7d72b4c6", "token": "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", "session_start_time": "2026-02-16T11:58:09.261049"} \ No newline at end of file +{"task_id": "30988f42-8544-817a-a250-fddb7d72b4c6", "token": "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", "session_start_time": "2026-02-16T14:37:19.780026"} \ No newline at end of file From 6144771329ed92b9f6f1fbfbf16d58cad0085617 Mon Sep 17 00:00:00 2001 From: Floke Date: Tue, 17 Feb 2026 07:14:51 +0000 Subject: [PATCH 3/4] [30a88f42] feat: Centralize API key handling and fix product enrichment\n\n- Centralized GEMINI_API_KEY loading from project root .env file.\n- Corrected product enrichment: added /enrich-product endpoint in Node.js, implemented mode in Python backend, and updated frontend to use new API for product analysis.\n- Fixed to pass product data correctly for enrichment.\n- Updated documentation (). --- b2b-marketing-assistant/App.tsx | 48 +++++++++++++++++-- b2b-marketing-assistant/README.md | 2 +- .../components/StepDisplay.tsx | 9 +--- b2b-marketing-assistant/server.cjs | 9 ++++ b2b-marketing-assistant/vite.config.ts | 2 +- b2b_marketing_orchestrator.py | 37 ++++++++++++++ 6 files changed, 94 insertions(+), 13 deletions(-) diff --git a/b2b-marketing-assistant/App.tsx b/b2b-marketing-assistant/App.tsx index 1b2b7551..c37f0e89 100644 --- a/b2b-marketing-assistant/App.tsx +++ b/b2b-marketing-assistant/App.tsx @@ -57,6 +57,8 @@ const App: React.FC = () => { const [generationStep, setGenerationStep] = useState(0); // 0: idle, 1-6: step X is complete const [selectedIndustry, setSelectedIndustry] = useState(''); const [batchStatus, setBatchStatus] = useState<{ current: number; total: number; industry: string } | null>(null); + const [isEnriching, setIsEnriching] = useState(false); + // Project Persistence const [projectId, setProjectId] = useState(null); @@ -69,6 +71,43 @@ const App: React.FC = () => { const STEP_TITLES = t.stepTitles; const STEP_KEYS: (keyof AnalysisData)[] = ['offer', 'targetGroups', 'personas', 'painPoints', 'gains', 'messages', 'customerJourney']; + const handleEnrichRow = async (productName: string, productUrl?: string) => { + setIsEnriching(true); + setError(null); + try { + const response = await fetch(`${API_BASE_URL}/enrich-product`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + productName, + productUrl, + language: inputData.language + }), + }); + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.details || `HTTP error! status: ${response.status}`); + } + const newRow = await response.json(); + setAnalysisData(prev => { + const currentOffer = prev.offer || { headers: [], rows: [], summary: [] }; + return { + ...prev, + offer: { + ...currentOffer, + rows: [...currentOffer.rows, newRow] + } + }; + }); + } catch (e) { + console.error(e); + setError(e instanceof Error ? `Fehler beim Anreichern: ${e.message}` : 'Unbekannter Fehler beim Anreichern.'); + } finally { + setIsEnriching(false); + } + }; + + // --- AUTO-SAVE EFFECT --- useEffect(() => { if (generationStep === 0 || !inputData.companyUrl) return; @@ -507,9 +546,10 @@ const App: React.FC = () => { const canAdd = ['offer', 'targetGroups'].includes(stepKey); const canDelete = ['offer', 'targetGroups', 'personas'].includes(stepKey); - const handleManualAdd = (newRow: string[]) => { + const handleManualAdd = () => { + const newEmptyRow = Array(step.headers.length).fill(''); const currentRows = step.rows || []; - handleDataChange(stepKey, { ...step, rows: [...currentRows, newRow] }); + handleDataChange(stepKey, { ...step, rows: [...currentRows, newEmptyRow] }); }; return ( @@ -521,8 +561,8 @@ const App: React.FC = () => { rows={step.rows} onDataChange={(newRows) => handleDataChange(stepKey, { ...step, rows: newRows })} canAddRows={canAdd} - onEnrichRow={canAdd ? handleManualAdd : undefined} - isEnriching={false} + onEnrichRow={stepKey === 'offer' ? handleEnrichRow : handleManualAdd} + isEnriching={isEnriching} canDeleteRows={canDelete} onRestart={() => handleStepRestart(stepKey)} t={t} diff --git a/b2b-marketing-assistant/README.md b/b2b-marketing-assistant/README.md index 4cc79dc2..2ba133e2 100644 --- a/b2b-marketing-assistant/README.md +++ b/b2b-marketing-assistant/README.md @@ -15,6 +15,6 @@ View your app in AI Studio: https://ai.studio/apps/drive/1ZPnGbhaEnyhIyqs2rYhcPX 1. Install dependencies: `npm install` -2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key +2. Set the `GEMINI_API_KEY` in the central `.env` file in the project's root directory. 3. Run the app: `npm run dev` diff --git a/b2b-marketing-assistant/components/StepDisplay.tsx b/b2b-marketing-assistant/components/StepDisplay.tsx index ce86baa6..df16e006 100644 --- a/b2b-marketing-assistant/components/StepDisplay.tsx +++ b/b2b-marketing-assistant/components/StepDisplay.tsx @@ -12,7 +12,7 @@ interface StepDisplayProps { onDataChange: (newRows: string[][]) => void; canAddRows?: boolean; canDeleteRows?: boolean; - onEnrichRow?: (productName: string, productUrl?: string) => Promise; + onEnrichRow?: (productName: string, productUrl?: string) => void; isEnriching?: boolean; onRestart?: () => void; t: typeof translations.de; @@ -106,12 +106,7 @@ export const StepDisplay: React.FC = ({ title, summary, header }; const handleAddRowClick = () => { - if (onEnrichRow) { - setIsAddingRow(true); - } else { - const newEmptyRow = Array(headers.length).fill(''); - onDataChange([...rows, newEmptyRow]); - } + setIsAddingRow(true); }; const handleConfirmAddRow = () => { diff --git a/b2b-marketing-assistant/server.cjs b/b2b-marketing-assistant/server.cjs index 5e246932..aefdf866 100644 --- a/b2b-marketing-assistant/server.cjs +++ b/b2b-marketing-assistant/server.cjs @@ -89,6 +89,15 @@ router.post('/next-step', (req, res) => { } catch (e) { res.status(500).json({ error: e.message }); } }); +router.post('/enrich-product', (req, res) => { + const { productName, productUrl, language } = req.body; + const args = [SCRIPT_PATH, '--mode', 'enrich_product', '--product_name', productName, '--language', language]; + if (productUrl) { + args.push('--product_url', productUrl); + } + runPythonScript(args, res); +}); + router.get('/projects', (req, res) => runPythonScript([dbScript, 'list'], res)); router.get('/projects/:id', (req, res) => runPythonScript([dbScript, 'load', req.params.id], res)); router.delete('/projects/:id', (req, res) => runPythonScript([dbScript, 'delete', req.params.id], res)); diff --git a/b2b-marketing-assistant/vite.config.ts b/b2b-marketing-assistant/vite.config.ts index 508c3760..1631dc5c 100644 --- a/b2b-marketing-assistant/vite.config.ts +++ b/b2b-marketing-assistant/vite.config.ts @@ -3,7 +3,7 @@ import { defineConfig, loadEnv } from 'vite'; import react from '@vitejs/plugin-react'; export default defineConfig(({ mode }) => { - const env = loadEnv(mode, '.', ''); + const env = loadEnv(mode, '../', ''); return { base: '/b2b/', server: { diff --git a/b2b_marketing_orchestrator.py b/b2b_marketing_orchestrator.py index 8641574b..450532b1 100644 --- a/b2b_marketing_orchestrator.py +++ b/b2b_marketing_orchestrator.py @@ -622,6 +622,40 @@ def next_step(language, context_file, generation_step, channels, focus_industry= summary = [re.sub(r'^\*\s*|^-\s*|^\d+\.\s*', '', s.strip()) for s in summary_match[1].split('\n') if s.strip()] if summary_match else [] return {step_key: {"summary": summary, "headers": table_data['headers'], "rows": table_data['rows']}} +def enrich_product(product_name, product_url, language): + logging.info(f"Enriching product: {product_name} ({product_url})") + api_key = load_api_key() + if not api_key: raise ValueError("Gemini API key is missing.") + + grounding_text = "" + if product_url: + grounding_text = get_text_from_url(product_url) + + prompt_text = f""" +# ANWEISUNG +Du bist ein B2B-Marketing-Analyst. Deine Aufgabe ist es, die Daten für EIN Produkt zu generieren. +Basierend auf dem Produktnamen und (optional) dem Inhalt der Produkt-URL, fülle die Spalten einer Markdown-Tabelle aus. +Die Ausgabe MUSS eine einzelne, kommaseparierte Zeile sein, die in eine Tabelle passt. KEINE Header, KEIN Markdown, nur die Werte. + +# PRODUKT +- Name: "{product_name}" +- URL-Inhalt: "{grounding_text[:3000]}..." + +# SPALTEN +Produkt/Lösung | Beschreibung (1-2 Sätze) | Kernfunktionen | Differenzierung | Primäre Quelle (URL) + +# BEISPIEL-OUTPUT +Saugroboter NR1500,Ein professioneller Saugroboter für große Büroflächen.,Autonome Navigation;Intelligente Kartierung;Lange Akkulaufzeit,Fokus auf B2B-Markt;Datenschutzkonform,https://nexaro.com/products/nr1500 + +# DEINE AUFGABE +Erstelle jetzt die kommaseparierte Zeile für das Produkt "{product_name}". +""" + + response_text = call_gemini_api(prompt_text, api_key) + + # Return as a simple list of strings + return [cell.strip() for cell in response_text.split(',')] + def main(): parser = argparse.ArgumentParser() parser.add_argument('--mode', required=True) @@ -633,10 +667,13 @@ def main(): parser.add_argument('--channels') parser.add_argument('--language', required=True) parser.add_argument('--focus_industry') # New argument + parser.add_argument('--product_name') + parser.add_argument('--product_url') args = parser.parse_args() try: if args.mode == 'start_generation': result = start_generation(args.url, args.language, args.regions, args.focus) elif args.mode == 'next_step': result = next_step(args.language, args.context_file, args.generation_step, args.channels, args.focus_industry) + elif args.mode == 'enrich_product': result = enrich_product(args.product_name, args.product_url, args.language) sys.stdout.write(json.dumps(result, ensure_ascii=False)) except Exception as e: logging.error(f"Error: {e}", exc_info=True) From 6f558df8fa38ba2ad10b2a4eb687b776e61c9e20 Mon Sep 17 00:00:00 2001 From: Floke Date: Tue, 17 Feb 2026 07:16:13 +0000 Subject: [PATCH 4/4] [30a88f42] [30a88f42] feat: Centralize API key handling and fix product enrichment\n\n- Centralized GEMINI_API_KEY loading from project root .env file.\n- Corrected product enrichment: added /enrich-product endpoint in Node.js, implemented mode in Python backend, and updated frontend to use new API for product analysis.\n- Fixed to pass product data correctly for enrichment.\n- Updated documentation (). [30a88f42] feat: Centralize API key handling and fix product enrichment\n\n- Centralized GEMINI_API_KEY loading from project root .env file.\n- Corrected product enrichment: added /enrich-product endpoint in Node.js, implemented mode in Python backend, and updated frontend to use new API for product analysis.\n- Fixed to pass product data correctly for enrichment.\n- Updated documentation (). --- .dev_session/SESSION_INFO | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dev_session/SESSION_INFO b/.dev_session/SESSION_INFO index 30c0af97..321a1b9a 100644 --- a/.dev_session/SESSION_INFO +++ b/.dev_session/SESSION_INFO @@ -1 +1 @@ -{"task_id": "30988f42-8544-817a-a250-fddb7d72b4c6", "token": "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", "session_start_time": "2026-02-16T14:37:19.780026"} \ No newline at end of file +{"task_id": "30a88f42-8544-819a-b5fc-c85cd80f43b7", "token": "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", "session_start_time": "2026-02-17T07:16:11.126812"} \ No newline at end of file