diff --git a/BUILDER_APPS_MIGRATION.md b/BUILDER_APPS_MIGRATION.md index 03305992..635271b2 100644 --- a/BUILDER_APPS_MIGRATION.md +++ b/BUILDER_APPS_MIGRATION.md @@ -60,13 +60,7 @@ Multi-Line Prompts können in Docker-Umgebungen zu **sehr hartnäckigen Syntaxfe 2. **Nutze die `.format()` Methode** zur Variablen-Injektion. Dies trennt die String-Definition komplett von der Variablen-Interpolation und ist die robusteste Methode. ```python - # Schlecht (fehleranfällig, auch wenn es manchmal funktioniert): - # prompt = f""" - # 1. Mache dies: {var1} - # 2. Mache das: {var2} - # """ - - # Gut (maximal robust): + # Beispiel: So ist es maximal robust (bevorzugte Methode) prompt_template = """ 1) Mache dies: {variable_1} 2) Mache das: {variable_2} @@ -76,6 +70,10 @@ Multi-Line Prompts können in Docker-Umgebungen zu **sehr hartnäckigen Syntaxfe full_prompt = sys_instr + "\n\n" + prompt ``` +* **Versionierung für Debugging:** Um sicherzustellen, dass die korrekte Version des Codes läuft, füge Versionsnummern in die Start-Logs des Node.js Servers (`server.cjs`) und des Python Orchestrators (`gtm_architect_orchestrator.py`) ein. + * `server.cjs`: `console.log(`... (Version: ${VERSION})`);` + * `gtm_architect_orchestrator.py`: `print(f"DEBUG: Orchestrator v{__version__} loaded ...")` + * **Signaturen prüfen:** Shared Libraries (`helpers.py`) haben oft ältere Signaturen. Immer die tatsächliche Definition prüfen! * Beispiel: `call_openai_chat` unterstützt oft kein `system_message` Argument. Stattdessen Prompt manuell zusammenbauen (`sys_instr + "\n\n" + prompt`). diff --git a/gtm-architect/server.cjs b/gtm-architect/server.cjs index ecad0cae..bc599382 100644 --- a/gtm-architect/server.cjs +++ b/gtm-architect/server.cjs @@ -73,8 +73,10 @@ app.get('*', (req, res) => { }); +const VERSION = "1.1.1_Fix"; // Add a version for debugging + const server = app.listen(port, () => { - console.log(`GTM Architect server listening at http://localhost:${port} (Timeout: 600s)`); + console.log(`GTM Architect server listening at http://localhost:${port} (Version: ${VERSION})`); }); // Prevent 502 Bad Gateway by increasing Node.js server timeouts to match Nginx diff --git a/gtm_architect_orchestrator.py b/gtm_architect_orchestrator.py index e81e456e..44e4653b 100644 --- a/gtm_architect_orchestrator.py +++ b/gtm_architect_orchestrator.py @@ -11,6 +11,8 @@ sys.path.append(str(project_root)) from helpers import call_openai_chat import market_db_manager +__version__ = "1.3.0_SyntaxFix" + # Ensure DB is ready market_db_manager.init_db() @@ -131,8 +133,9 @@ def analyze_product(data): lang = data.get('language', 'de') sys_instr = get_system_instruction(lang) + # Statische Prompt-Teile (keine f-Strings) if lang == 'en': - extraction_prompt_template = """ + extraction_prompt_static_part = """ PHASE 1-A: TECHNICAL EXTRACTION Input Product Description: "{product_description}" @@ -143,9 +146,8 @@ Task: Output JSON format ONLY. """ - extraction_prompt = extraction_prompt_template.format(product_description=product_input[:25000]) else: - extraction_prompt_template = """ + extraction_prompt_static_part = """ PHASE 1-A: TECHNICAL EXTRACTION Input Product Description: "{product_description}" @@ -156,7 +158,8 @@ Aufgabe: Output JSON format ONLY. """ - extraction_prompt = extraction_prompt_template.format(product_description=product_input[:25000]) + # Variablen separat formatieren + extraction_prompt = extraction_prompt_static_part.format(product_description=product_input[:25000]) full_extraction_prompt = sys_instr + "\n\n" + extraction_prompt extraction_response = call_openai_chat(full_extraction_prompt, response_format_json=True) @@ -166,11 +169,11 @@ Output JSON format ONLY. constraints_json = json.dumps(extraction_data.get('constraints')) if lang == 'en': - conflict_prompt_template = """ + conflict_prompt_static_part = """ PHASE 1-B: PORTFOLIO CONFLICT CHECK -New Product Features: {features_json} -New Product Constraints: {constraints_json} +New Product Features: {features_json_var} +New Product Constraints: {constraints_json_var} Existing Portfolio: 1. "Indoor Scrubber 50": Indoor cleaning, hard floor, supermarkets. @@ -181,13 +184,13 @@ Check if the new product overlaps significantly with existing ones (is it just a Output JSON format ONLY. """ - conflict_prompt = conflict_prompt_template.format(features_json=features_json, constraints_json=constraints_json) + conflict_prompt = conflict_prompt_static_part.format(features_json_var=features_json, constraints_json_var=constraints_json) else: - conflict_prompt_template = """ + conflict_prompt_static_part = """ PHASE 1-B: PORTFOLIO CONFLICT CHECK -Neue Produkt-Features: {features_json} -Neue Produkt-Constraints: {constraints_json} +Neue Produkt-Features: {features_json_var} +Neue Produkt-Constraints: {constraints_json_var} Existierendes Portfolio: 1. "Indoor Scrubber 50": Innenreinigung, Hartboden, Supermärkte. @@ -198,7 +201,7 @@ Prüfe, ob das neue Produkt signifikant mit bestehenden Produkten überlappt (Is Output JSON format ONLY. """ - conflict_prompt = conflict_prompt_template.format(features_json=features_json, constraints_json=constraints_json) + conflict_prompt = conflict_prompt_static_part.format(features_json_var=features_json, constraints_json_var=constraints_json) full_conflict_prompt = sys_instr + "\n\n" + conflict_prompt conflict_response = call_openai_chat(full_conflict_prompt, response_format_json=True) @@ -218,8 +221,8 @@ def discover_icps(data): if lang == 'en': prompt_template = """ PHASE 2: ICP DISCOVERY & DATA PROXIES -Based on the product features: {features_json} -And constraints: {constraints_json} +Based on the product features: {features_json_var} +And constraints: {constraints_json_var} Output JSON format ONLY: {{ @@ -227,12 +230,12 @@ Output JSON format ONLY: "dataProxies": [ {{ "target": "Criteria", "method": "How" }} ] }} """ - prompt = prompt_template.format(features_json=features_json, constraints_json=constraints_json) + prompt = prompt_template.format(features_json_var=features_json, constraints_json_var=constraints_json) else: prompt_template = """ PHASE 2: ICP DISCOVERY & DATA PROXIES -Basierend auf Features: {features_json} -Und Constraints: {constraints_json} +Basierend auf Features: {features_json_var} +Und Constraints: {constraints_json_var} Output JSON format ONLY: {{ @@ -240,7 +243,7 @@ Output JSON format ONLY: "dataProxies": [ {{ "target": "Kriterium", "method": "Methode" }} ] }} """ - prompt = prompt_template.format(features_json=features_json, constraints_json=constraints_json) + prompt = prompt_template.format(features_json_var=features_json, constraints_json_var=constraints_json) full_prompt = sys_instr + "\n\n" + prompt response = call_openai_chat(full_prompt, response_format_json=True) @@ -251,7 +254,10 @@ def hunt_whales(data): lang = data.get('language', 'de') sys_instr = get_system_instruction(lang) icps_json = json.dumps(phase2_result.get('icps')) - prompt = f"PHASE 3: WHALE HUNTING for {icps_json}. Identify 3-5 concrete DACH companies per industry and buying center roles. Output JSON ONLY." + prompt_template = """ +PHASE 3: WHALE HUNTING for {icps_json_var}. Identify 3-5 concrete DACH companies per industry and buying center roles. Output JSON ONLY. +""" + prompt = prompt_template.format(icps_json_var=icps_json) full_prompt = sys_instr + "\n\n" + prompt response = call_openai_chat(full_prompt, response_format_json=True) print(response) @@ -262,7 +268,10 @@ def develop_strategy(data): lang = data.get('language', 'de') sys_instr = get_system_instruction(lang) phase3_json = json.dumps(phase3_result) - prompt = f"PHASE 4: STRATEGY Matrix for {phase3_json}. Apply Hybrid logic. Output JSON ONLY." + prompt_template = """ +PHASE 4: STRATEGY Matrix for {phase3_json_var}. Apply Hybrid logic. Output JSON ONLY. +""" + prompt = prompt_template.format(phase3_json_var=phase3_json) full_prompt = sys_instr + "\n\n" + prompt response = call_openai_chat(full_prompt, response_format_json=True) print(response) @@ -271,7 +280,10 @@ def generate_assets(data): lang = data.get('language', 'de') sys_instr = get_system_instruction(lang) data_json = json.dumps(data) - prompt = f"PHASE 5: GTM STRATEGY REPORT Markdown. Use facts, TCO, ROI. Data: {data_json}" + prompt_template = """ +PHASE 5: GTM STRATEGY REPORT Markdown. Use facts, TCO, ROI. Data: {data_json_var} +""" + prompt = prompt_template.format(data_json_var=data_json) full_prompt = sys_instr + "\n\n" + prompt response = call_openai_chat(full_prompt) print(json.dumps(response)) @@ -280,7 +292,10 @@ def generate_sales_enablement(data): lang = data.get('language', 'de') sys_instr = get_system_instruction(lang) data_json = json.dumps(data) - prompt = f"PHASE 6: Battlecards and MJ Prompts. Data: {data_json}. Output JSON ONLY." + prompt_template = """ +PHASE 6: Battlecards and MJ Prompts. Data: {data_json_var}. Output JSON ONLY. +""" + prompt = prompt_template.format(data_json_var=data_json) full_prompt = sys_instr + "\n\n" + prompt response = call_openai_chat(full_prompt, response_format_json=True) print(response) @@ -290,7 +305,7 @@ def main(): parser.add_argument("--mode", type=str, required=True, help="Execution mode") args = parser.parse_args() - print(f"DEBUG: Orchestrator v1.2 loaded (Mode: {args.mode})", file=sys.stderr) + print(f"DEBUG: Orchestrator v{__version__} loaded (Mode: {args.mode})", file=sys.stderr) if not sys.stdin.isatty(): try: @@ -319,4 +334,4 @@ def main(): print(json.dumps({"error": f"Unknown mode: {args.mode}"})) if __name__ == "__main__": - main() \ No newline at end of file + main()