fix(gtm-architect): Stabilize Docker environment and fix frontend
- Refactor Docker build process for gtm-app to ensure reliability - Correct volume mounts and build context in docker-compose.yml to prevent stale code - Fix frontend 404 error by using relative paths in index.html - Ensure API key is correctly mounted into the container - Stabilize Node.js to Python data passing via explicit --data argument - Update gtm_architect_documentation.md with extensive troubleshooting guide
This commit is contained in:
@@ -11,6 +11,8 @@ sys.path.append(str(project_root))
|
||||
from helpers import call_openai_chat
|
||||
import market_db_manager
|
||||
|
||||
print("--- PYTHON ORCHESTRATOR V_20260101_1200_FINAL ---", file=sys.stderr)
|
||||
|
||||
__version__ = "1.4.0_UltimatePromptFix"
|
||||
|
||||
# Ensure DB is ready
|
||||
@@ -56,7 +58,7 @@ Unsere Strategie ist NICHT "Roboter ersetzen Menschen", sondern "Hybrid-Reinigun
|
||||
Behalte immer im Hinterkopf, dass wir bereits folgende Produkte im Portfolio haben:
|
||||
1. "Indoor Scrubber 50": Innenreinigung, Hartboden, Fokus: Supermärkte. Message: "Sauberkeit im laufenden Betrieb."
|
||||
2. "Service Bot Bella": Service/Gastro, Indoor. Fokus: Restaurants. Message: "Entlastung für Servicekräfte."
|
||||
""
|
||||
"""
|
||||
else:
|
||||
return """
|
||||
# IDENTITY & PURPOSE
|
||||
@@ -163,6 +165,7 @@ def analyze_product(data):
|
||||
full_extraction_prompt = sys_instr + "\n\n" + extraction_prompt
|
||||
print(f"DEBUG: Full Extraction Prompt: \n{full_extraction_prompt[:1000]}...\n", file=sys.stderr)
|
||||
extraction_response = call_openai_chat(full_extraction_prompt, response_format_json=True)
|
||||
print(f"DEBUG: Raw Extraction Response from API: {extraction_response}", file=sys.stderr)
|
||||
extraction_data = json.loads(extraction_response)
|
||||
|
||||
features_json = json.dumps(extraction_data.get('features'))
|
||||
@@ -175,4 +178,82 @@ def analyze_product(data):
|
||||
f"New Product Features: {features_json}",
|
||||
f"New Product Constraints: {constraints_json}",
|
||||
"",
|
||||
"Existing Portfolio:",
|
||||
"Existing Portfolio:",
|
||||
"1. 'Indoor Scrubber 50': Indoor, Hard Floor, Supermarkets.",
|
||||
"2. 'Service Bot Bella': Indoor, Service/Hospitality, Restaurants.",
|
||||
"",
|
||||
"Task: Check for cannibalization. Does the new product make an existing one obsolete? Explain briefly.",
|
||||
"Output JSON format ONLY."
|
||||
]
|
||||
else:
|
||||
conflict_prompt_parts = [
|
||||
"PHASE 1-B: PORTFOLIO-KONFLIKT-CHECK",
|
||||
"",
|
||||
f"Neue Produktmerkmale: {features_json}",
|
||||
f"Neue Produkt-Constraints: {constraints_json}",
|
||||
"",
|
||||
"Bestehendes Portfolio:",
|
||||
"1. 'Indoor Scrubber 50': Innenreinigung, Hartboden, Supermärkte.",
|
||||
"2. 'Service Bot Bella': Indoor, Service/Gastro, Restaurants.",
|
||||
"",
|
||||
"Aufgabe: Prüfe auf Kannibalisierung. Macht das neue Produkt ein bestehendes obsolet? Begründe kurz.",
|
||||
"Output JSON format ONLY."
|
||||
]
|
||||
conflict_prompt = "\n".join(conflict_prompt_parts)
|
||||
|
||||
full_conflict_prompt = sys_instr + "\n\n" + conflict_prompt
|
||||
print(f"DEBUG: Full Conflict Prompt: \n{full_conflict_prompt[:1000]}...\n", file=sys.stderr)
|
||||
conflict_response = call_openai_chat(full_conflict_prompt, response_format_json=True)
|
||||
conflict_data = json.loads(conflict_response)
|
||||
|
||||
# --- Final Response Assembly ---
|
||||
final_response = {
|
||||
"phase1_technical_extraction": extraction_data,
|
||||
"phase2_portfolio_conflict": conflict_data,
|
||||
}
|
||||
print(json.dumps(final_response, indent=2))
|
||||
|
||||
|
||||
# --- Main Execution Logic ---
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='GTM Architect main entry point.')
|
||||
parser.add_argument('--data', help='JSON data for the command.')
|
||||
parser.add_argument('--mode', required=True, help='The command to execute.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
command = args.mode
|
||||
|
||||
# Get data from --data arg or stdin
|
||||
if args.data:
|
||||
data = json.loads(args.data)
|
||||
else:
|
||||
# Read from stdin if no data arg provided
|
||||
try:
|
||||
input_data = sys.stdin.read()
|
||||
data = json.loads(input_data) if input_data.strip() else {}
|
||||
except Exception:
|
||||
data = {}
|
||||
|
||||
if not command:
|
||||
print(json.dumps({"error": "No command or mode provided"}))
|
||||
sys.exit(1)
|
||||
|
||||
command_handlers = {
|
||||
'save': save_project_handler,
|
||||
'list': list_projects_handler,
|
||||
'load': load_project_handler,
|
||||
'delete': delete_project_handler,
|
||||
'analyze': analyze_product,
|
||||
}
|
||||
|
||||
handler = command_handlers.get(command)
|
||||
if handler:
|
||||
handler(data)
|
||||
else:
|
||||
print(json.dumps({"error": f"Unknown command: {command}"}))
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(f"GTM Architect Engine v{__version__}_FORCE_RELOAD_2 running.", file=sys.stderr)
|
||||
main()
|
||||
Reference in New Issue
Block a user