duplicate_checker.py aktualisiert

This commit is contained in:
2025-08-18 12:11:35 +00:00
parent af2e60f93d
commit 721cb39cb0

View File

@@ -234,28 +234,32 @@ def choose_rarest_token(norm_name: str, token_freq: Counter):
# --- Hauptfunktion ---
def main(job_id=None):
logger.info("Starte Duplikats-Check v2.15 (Quality-first++)")
# NEU: Status-Update
update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...")
try:
sheet = GoogleSheetHandler()
logger.info("GoogleSheetHandler initialisiert")
except Exception as e:
logger.critical(f"Init GoogleSheetHandler fehlgeschlagen: {e}")
# NEU: Status-Update bei Fehler
update_status(job_id, "Fehlgeschlagen", f"Init GoogleSheetHandler fehlgeschlagen: {e}")
sys.exit(1)
# Daten laden
# NEU: Status-Update
update_status(job_id, "Läuft", "Lade CRM- und Matching-Daten...")
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
logger.info(f"{0 if crm_df is None else len(crm_df)} CRM-Datensätze | {0 if match_df is None else len(match_df)} Matching-Datensätze")
total = len(match_df) if match_df is not None else 0 # NEU: total hier definieren
logger.info(f"{0 if crm_df is None else len(crm_df)} CRM-Datensätze | {total} Matching-Datensätze")
if crm_df is None or crm_df.empty or match_df is None or match_df.empty:
logger.critical("Leere Daten in einem der Sheets. Abbruch.")
# NEU: Status-Update bei Fehler
update_status(job_id, "Fehlgeschlagen", "Leere Daten in einem der Sheets.")
return
# SerpAPI nur für Matching (B und E leer)
# Annahme: serp_key wird global geladen, z.B. durch Config.load_api_keys()
if Config.API_KEYS.get('serpapi'):
if Config.API_KEYS.get('serpapi'): # Sicherer Zugriff auf den Key
if 'Gefundene Website' not in match_df.columns:
match_df['Gefundene Website'] = ''
b_empty = match_df['CRM Website'].fillna('').astype(str).str.strip().str.lower().isin(['','k.a.','k.a','n/a','na'])
@@ -263,6 +267,7 @@ def main(job_id=None):
empty_mask = b_empty & e_empty
empty_count = int(empty_mask.sum())
if empty_count > 0:
# NEU: Status-Update
update_status(job_id, "Läuft", f"Suche Websites für {empty_count} Firmen via SerpAPI...")
logger.info(f"Serp-Fallback für Matching: {empty_count} Firmen ohne URL in B/E")
found_cnt = 0
@@ -289,7 +294,7 @@ def main(job_id=None):
logger.info("Serp-Fallback übersprungen: B oder E bereits befüllt (keine fehlenden Matching-URLs)")
# Normalisierung CRM
update_status(job_id, "Läuft", "Normalisiere CRM-Daten...")
update_status(job_id, "Läuft", "Normalisiere Daten...")
crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name)
crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url)
crm_df['CRM Ort'] = crm_df['CRM Ort'].astype(str).str.lower().str.strip()
@@ -298,7 +303,6 @@ def main(job_id=None):
crm_df['domain_use_flag'] = 1
# Normalisierung Matching
update_status(job_id, "Läuft", "Normalisiere Matching-Daten...")
match_df['Gefundene Website'] = match_df.get('Gefundene Website', pd.Series(index=match_df.index, dtype=object))
match_df['Serp Vertrauen'] = match_df.get('Serp Vertrauen', pd.Series(index=match_df.index, dtype=object))
match_df['Effektive Website'] = match_df['CRM Website'].fillna('').astype(str).str.strip()
@@ -312,17 +316,18 @@ def main(job_id=None):
match_df['block_key'] = match_df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
def _domain_use(row):
if str(row.get('CRM Website','')).strip(): return 1
if str(row.get('CRM Website','')).strip():
return 1
trust = str(row.get('Serp Vertrauen','')).lower()
return 1 if trust == 'hoch' else 0
match_df['domain_use_flag'] = match_df.apply(_domain_use, axis=1)
# City-Tokens dynamisch bauen
def build_city_tokens(crm_df, match_df):
cities = set()
for s in pd.concat([crm_df['CRM Ort'], match_df['CRM Ort']], ignore_index=True).dropna().unique():
for t in _tokenize(s):
if len(t) >= 3: cities.add(t)
if len(t) >= 3:
cities.add(t)
return cities
global CITY_TOKENS
CITY_TOKENS = build_city_tokens(crm_df, match_df)
@@ -336,14 +341,14 @@ def main(job_id=None):
# Matching
results = []
metrics = Counter()
total = len(match_df)
logger.info("Starte Matching-Prozess…")
for idx, mrow in match_df.to_dict('index').items():
processed = idx + 1 # Annahme, dass der Index 0-basiert ist
processed = idx + 1
progress_message = f"Prüfe {processed}/{total}: '{mrow.get('CRM Name','')}'"
logger.info(progress_message)
if processed % 5 == 0: # Status alle 5 Zeilen aktualisieren
# NEU: Status-Update in der Schleife
if processed % 5 == 0 or processed == total:
update_status(job_id, "Läuft", progress_message)
candidates = []
@@ -365,8 +370,10 @@ def main(job_id=None):
for r in crm_records:
n2 = r.get('normalized_name','')
clean2, toks2 = clean_name_for_scoring(n2)
if not clean2: continue
if rtok and rtok not in toks2: continue
if not clean2:
continue
if rtok and rtok not in toks2:
continue
pr = fuzz.partial_ratio(clean1, clean2)
if pr >= PREFILTER_MIN_PARTIAL:
pf.append((pr, r))
@@ -409,6 +416,7 @@ def main(job_id=None):
# Ergebnisse zurückschreiben
update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...")
logger.info("Schreibe Ergebnisse ins Sheet (SAFE in-place, keine Spaltenverluste)…")
res_df = pd.DataFrame(results, index=match_df.index)
write_df = match_df.copy()
write_df['Match'] = res_df['Match']
@@ -444,8 +452,12 @@ def main(job_id=None):
logger.info(f"Serp Vertrauen: {dict(serp_counts)}")
logger.info(f"Config: TH={SCORE_THRESHOLD}, TH_WEAK={SCORE_THRESHOLD_WEAK}, MIN_NAME_FOR_DOMAIN={MIN_NAME_FOR_DOMAIN}, Penalties(city={CITY_MISMATCH_PENALTY},country={COUNTRY_MISMATCH_PENALTY}), Prefilter(partial>={PREFILTER_MIN_PARTIAL}, limit={PREFILTER_LIMIT})")
if __name__ == "__main__":
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
args = parser.parse_args()
# Lade API-Keys, bevor die main-Funktion startet
Config.load_api_keys()
main(job_id=args.job_id)