Docs: Update MIGRATION_PLAN.md to v0.4.0 with new features (Company Explorer)

This commit is contained in:
2026-01-09 10:18:45 +00:00
parent c4fcc990b0
commit 89b8f75375
5 changed files with 179 additions and 33 deletions

View File

@@ -261,6 +261,45 @@ def override_website_url(company_id: int, url: str = Query(...), db: Session = D
db.commit()
return {"status": "updated", "website": url}
@app.post("/api/companies/{company_id}/override/impressum")
def override_impressum_url(company_id: int, url: str = Query(...), db: Session = Depends(get_db)):
"""
Manually sets the Impressum URL for a company and triggers re-extraction.
"""
company = db.query(Company).filter(Company.id == company_id).first()
if not company:
raise HTTPException(404, "Company not found")
logger.info(f"Manual Override for {company.name}: Setting Impressum URL to {url}")
# 1. Scrape Impressum immediately
impressum_data = scraper._scrape_impressum_data(url)
if not impressum_data:
raise HTTPException(status_code=400, detail="Failed to extract data from provided URL")
# 2. Find existing scrape data or create new
existing_scrape = db.query(EnrichmentData).filter(
EnrichmentData.company_id == company.id,
EnrichmentData.source_type == "website_scrape"
).first()
if not existing_scrape:
# Create minimal scrape entry
db.add(EnrichmentData(
company_id=company.id,
source_type="website_scrape",
content={"impressum": impressum_data, "text": "", "title": "Manual Impressum", "url": url}
))
else:
# Update existing
content = dict(existing_scrape.content) if existing_scrape.content else {}
content["impressum"] = impressum_data
existing_scrape.content = content
existing_scrape.updated_at = datetime.utcnow()
db.commit()
return {"status": "updated", "data": impressum_data}
def run_discovery_task(company_id: int):
# New Session for Background Task
from .database import SessionLocal