From f160fc0fc57cda43d48393b8b9fd71e86e5a1363 Mon Sep 17 00:00:00 2001 From: Floke Date: Fri, 5 Sep 2025 07:34:23 +0000 Subject: [PATCH] duplicate_checker.py aktualisiert MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Dynamische Stopword-Erkennung entfernt, da sie zu aggressiv war. Häufige Wörter erhalten nun nur ein niedriges Gewicht. - Score-Berechnung und Schwellenwerte (Thresholds) komplett neu kalibriert für bessere Balance und Treffsicherheit. - "Domain-Gate" wieder eingeführt: Ein Domain-Match zählt nur dann stark, wenn auch eine minimale Namensähnlichkeit besteht. - Golden-Rule und Interaktiver Modus beibehalten. --- duplicate_checker.py | 176 ++++++++++++++++++++----------------------- 1 file changed, 82 insertions(+), 94 deletions(-) diff --git a/duplicate_checker.py b/duplicate_checker.py index 36a269e3..1f8f6d19 100644 --- a/duplicate_checker.py +++ b/duplicate_checker.py @@ -1,11 +1,11 @@ -# duplicate_checker.py v3.0 +# duplicate_checker.py v3.1 # Build timestamp is injected into logfile name. -# --- NEUE FEATURES v3.0 --- -# - Golden-Rule: Fast exakte Namens-Matches (>98%) werden immer als Treffer gewertet. -# - Weighted Scoring (TF-IDF): Einzigartige Wörter im Firmennamen erhalten mehr Gewicht als häufige Füllwörter. -# - Interaktiver Modus: Bei unklaren Fällen kann der Nutzer manuell den besten Kandidaten auswählen. -# - Umfassend überarbeitete Scoring-Logik für höhere Präzision. +# --- ÄNDERUNGEN v3.1 --- +# - Dynamische Stopword-Erkennung entfernt, da sie zu aggressiv war. Häufige Wörter erhalten nun nur ein niedriges Gewicht. +# - Score-Berechnung und Schwellenwerte (Thresholds) komplett neu kalibriert für bessere Balance und Treffsicherheit. +# - "Domain-Gate" wieder eingeführt: Ein Domain-Match zählt nur dann stark, wenn auch eine minimale Namensähnlichkeit besteht. +# - Golden-Rule und Interaktiver Modus beibehalten. import os import sys @@ -25,6 +25,7 @@ from google_sheet_handler import GoogleSheetHandler STATUS_DIR = "job_status" def update_status(job_id, status, progress_message): + # ... (Keine Änderungen hier) if not job_id: return status_file = os.path.join(STATUS_DIR, f"{job_id}.json") try: @@ -46,23 +47,25 @@ CRM_SHEET_NAME = "CRM_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts" LOG_DIR = "Log" now = datetime.now().strftime('%Y-%m-%d_%H-%M') -LOG_FILE = f"{now}_duplicate_check_v3.0.txt" +LOG_FILE = f"{now}_duplicate_check_v3.1.txt" -# Scoring-Konfiguration -SCORE_THRESHOLD = 100 # Standard-Schwelle für einen Match -SCORE_THRESHOLD_WEAK= 130 # Schwelle für Matches ohne Domain oder Ort -GOLDEN_MATCH_RATIO = 98 # Ratio, ab der ein Namens-Match als "Golden Match" gilt -GOLDEN_MATCH_SCORE = 300 # Score, der bei einem Golden Match vergeben wird +# --- NEU: Angepasste Scoring-Konfiguration v3.1 --- +SCORE_THRESHOLD = 85 # Standard-Schwelle für einen Match (NEU) +SCORE_THRESHOLD_WEAK= 110 # Schwelle für Matches ohne Domain oder Ort (NEU) +GOLDEN_MATCH_RATIO = 98 +GOLDEN_MATCH_SCORE = 300 +MIN_NAME_SCORE_FOR_DOMAIN = 2.0 # Mindest-Namensscore, damit ein Domain-Match voll zählt (NEU) # Interaktiver Modus Konfiguration -INTERACTIVE_SCORE_MIN = 100 # Mindestscore des besten Kandidaten, um den interaktiven Modus zu triggern -INTERACTIVE_SCORE_DIFF = 20 # Maximaler Score-Unterschied zum zweitbesten Kandidaten, um den Modus zu triggern +INTERACTIVE_SCORE_MIN = 85 +INTERACTIVE_SCORE_DIFF = 15 # Prefilter-Konfiguration PREFILTER_MIN_PARTIAL = 70 PREFILTER_LIMIT = 30 # --- Logging Setup --- +# ... (Keine Änderungen hier) if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR, exist_ok=True) log_path = os.path.join(LOG_DIR, LOG_FILE) @@ -81,9 +84,10 @@ fh.setFormatter(formatter) root.addHandler(fh) logger = logging.getLogger(__name__) logger.info(f"Logging to console and file: {log_path}") -logger.info(f"Starting duplicate_checker.py v3.0 | Build: {now}") +logger.info(f"Starting duplicate_checker.py v3.1 | Build: {now}") # --- SerpAPI Key laden --- +# ... (Keine Änderungen hier) try: Config.load_api_keys() serp_key = Config.API_KEYS.get('serpapi') @@ -95,74 +99,59 @@ except Exception as e: # --- Stop-/City-Tokens --- STOP_TOKENS_BASE = { - 'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', + 'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv', 'holding','gruppe','group','international','solutions','solution','service','services', 'deutschland','austria','germany','technik','technology','technologies','systems','systeme', 'logistik','logistics','industries','industrie','management','consulting','vertrieb','handel', 'international','company','gesellschaft','mbh&co','mbhco','werke','werk' } -CITY_TOKENS = set() # dynamisch befüllt +CITY_TOKENS = set() # --- Utilities --- def _tokenize(s: str): if not s: return [] return re.split(r"[^a-z0-9äöüß]+", str(s).lower()) -def clean_name_for_scoring(norm_name: str, dynamic_stopwords: set): - """Entfernt Stop- & City-Tokens sowie dynamische Stopwords.""" +def clean_name_for_scoring(norm_name: str): + """Entfernt nur noch Basis-Stop- & City-Tokens.""" if not norm_name: return "", set() tokens = [t for t in _tokenize(norm_name) if len(t) >= 3] - stop_union = STOP_TOKENS_BASE | CITY_TOKENS | dynamic_stopwords + stop_union = STOP_TOKENS_BASE | CITY_TOKENS final_tokens = [t for t in tokens if t not in stop_union] return " ".join(final_tokens), set(final_tokens) -# --- NEU: TF-IDF Logik (vereinfacht) --- -def build_term_weights(crm_df: pd.DataFrame, dynamic_stopwords: set): +# --- TF-IDF Logik --- +def build_term_weights(crm_df: pd.DataFrame): """Erstellt ein Gewichts-Wörterbuch basierend auf der Seltenheit der Wörter (IDF).""" logger.info("Starte Berechnung der Wortgewichte (TF-IDF)...") token_counts = Counter() total_docs = len(crm_df) for name in crm_df['normalized_name']: - _, tokens = clean_name_for_scoring(name, dynamic_stopwords) - for token in set(tokens): # Zähle jedes Wort nur einmal pro Firmenname + _, tokens = clean_name_for_scoring(name) + for token in set(tokens): token_counts[token] += 1 term_weights = {} for token, count in token_counts.items(): - # IDF-Formel: log(N / df) - je seltener das Wort, desto höher das Gewicht - idf = math.log(total_docs / (count + 1)) # +1 zur Glättung + idf = math.log(total_docs / (count + 1)) term_weights[token] = idf logger.info(f"Wortgewichte für {len(term_weights)} Tokens berechnet.") return term_weights -def get_dynamic_stopwords(crm_df: pd.DataFrame, threshold_percent=0.01): - """Identifiziert häufige Wörter im CRM-Datensatz, die als Stopwords behandelt werden sollen.""" - logger.info("Sammle dynamische Stopwords...") - token_counts = Counter() - for name in crm_df['normalized_name']: - tokens = [t for t in _tokenize(name) if len(t) >= 3 and t not in (STOP_TOKENS_BASE | CITY_TOKENS)] - for token in set(tokens): - token_counts[token] += 1 - - limit = len(crm_df) * threshold_percent - stopwords = {token for token, count in token_counts.items() if count > limit} - logger.info(f"{len(stopwords)} dynamische Stopwords identifiziert (z.B. 'stadtwerke', 'werke', ...)") - return stopwords - -# --- Similarity --- -def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_stopwords: set): +# --- Similarity v3.1 --- +def calculate_similarity(mrec: dict, crec: dict, term_weights: dict): - # --- NEU: Golden-Rule für exakten Namens-Match --- + # --- Golden-Rule für exakten Namens-Match --- n1_raw = mrec.get('normalized_name', '') n2_raw = crec.get('normalized_name', '') if fuzz.ratio(n1_raw, n2_raw) >= GOLDEN_MATCH_RATIO: - return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)'} + return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)', 'name_score': 100} - # Domain (mit Gate) + # Domain dom1 = mrec.get('normalized_domain','') dom2 = crec.get('normalized_domain','') domain_match = 1 if (dom1 and dom1 == dom2) else 0 @@ -172,27 +161,31 @@ def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_sto country_match = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land')) else 0 # Name (bereinigt) - clean1, toks1 = clean_name_for_scoring(n1_raw, dynamic_stopwords) - clean2, toks2 = clean_name_for_scoring(n2_raw, dynamic_stopwords) + clean1, toks1 = clean_name_for_scoring(n1_raw) + clean2, toks2 = clean_name_for_scoring(n2_raw) - # --- NEU: Gewichteter Name-Score basierend auf TF-IDF --- + # Gewichteter Name-Score name_score = 0 overlapping_tokens = toks1 & toks2 if overlapping_tokens: - # Score ist die Summe der Gewichte der übereinstimmenden Wörter name_score = sum(term_weights.get(token, 0) for token in overlapping_tokens) - # Bonus für hohe prozentuale Übereinstimmung der seltenen Wörter if toks1: overlap_percentage = len(overlapping_tokens) / len(toks1) name_score *= (1 + overlap_percentage) - # --- NEU: Überarbeitete Gesamt-Score-Berechnung --- - # Basis-Score-Komponenten - score_domain = 100 if domain_match else 0 + # --- ÄNDERUNG v3.1: Domain-Gate wieder eingeführt --- + score_domain = 0 + if domain_match: + if name_score >= MIN_NAME_SCORE_FOR_DOMAIN: + score_domain = 80 # Starker Bonus + else: + score_domain = 20 # Schwacher Bonus + + # Überarbeitete Gesamt-Score-Berechnung score_location = 20 if (city_match and country_match) else 0 - # Gesamtscore - total = name_score * 15 + score_domain + score_location # Name hat jetzt viel mehr Einfluss + # --- ÄNDERUNG v3.1: Angepasste Gewichtung --- + total = name_score * 8 + score_domain + score_location # Strafen penalties = 0 @@ -214,34 +207,33 @@ def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_sto return max(0, round(total)), comp # --- Indexe --- -def build_indexes(crm_df: pd.DataFrame, dynamic_stopwords: set): +def build_indexes(crm_df: pd.DataFrame): + # ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr) records = list(crm_df.to_dict('records')) - # Domain-Index domain_index = {} for r in records: d = r.get('normalized_domain') - if d: - domain_index.setdefault(d, []).append(r) + if d: domain_index.setdefault(d, []).append(r) - # Token-Index token_index = {} for idx, r in enumerate(records): - _, toks = clean_name_for_scoring(r.get('normalized_name',''), dynamic_stopwords) + _, toks = clean_name_for_scoring(r.get('normalized_name','')) for t in set(toks): - token_index.setdefault(t, []).append(idx) # Speichere Index statt ganzem Record + token_index.setdefault(t, []).append(idx) return records, domain_index, token_index -def choose_rarest_token(norm_name: str, term_weights: dict, dynamic_stopwords: set): - _, toks = clean_name_for_scoring(norm_name, dynamic_stopwords) +def choose_rarest_token(norm_name: str, term_weights: dict): + # ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr) + _, toks = clean_name_for_scoring(norm_name) if not toks: return None - # Seltenstes Token hat höchstes Gewicht (höchsten IDF-Score) rarest = max(toks, key=lambda t: term_weights.get(t, 0)) return rarest if term_weights.get(rarest, 0) > 0 else None # --- Hauptfunktion --- def main(job_id=None, interactive=False): - logger.info("Starte Duplikats-Check v3.0 (Weighted Scoring & Interactive Mode)") + logger.info("Starte Duplikats-Check v3.1 (Recalibrated Weighted Scoring)") + # ... (Code bis zur Normalisierung bleibt gleich) ... update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...") try: sheet = GoogleSheetHandler() @@ -251,7 +243,6 @@ def main(job_id=None, interactive=False): update_status(job_id, "Fehlgeschlagen", f"Init GoogleSheetHandler fehlgeschlagen: {e}") sys.exit(1) - # Daten laden update_status(job_id, "Läuft", "Lade CRM- und Matching-Daten...") crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME) match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME) @@ -262,7 +253,6 @@ def main(job_id=None, interactive=False): update_status(job_id, "Fehlgeschlagen", "Leere Daten in einem der Sheets.") return - # Normalisierung update_status(job_id, "Läuft", "Normalisiere Daten...") crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name) crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url) @@ -284,10 +274,9 @@ def main(job_id=None, interactive=False): CITY_TOKENS = build_city_tokens(crm_df, match_df) logger.info(f"City tokens gesammelt: {len(CITY_TOKENS)}") - # --- NEU: TF-IDF und Index-Erstellung --- - dynamic_stopwords = get_dynamic_stopwords(crm_df) - term_weights = build_term_weights(crm_df, dynamic_stopwords) - crm_records, domain_index, token_index = build_indexes(crm_df, dynamic_stopwords) + # --- TF-IDF und Index-Erstellung v3.1 --- + term_weights = build_term_weights(crm_df) + crm_records, domain_index, token_index = build_indexes(crm_df) logger.info(f"Blocking: Domains={len(domain_index)} | TokenKeys={len(token_index)}") # --- Matching --- @@ -304,35 +293,34 @@ def main(job_id=None, interactive=False): candidate_indices = set() used_block = '' - # Blocking via Domain if mrow.get('normalized_domain'): + # ... (Logik zur Kandidaten-Findung, leicht angepasst für Indices) ... candidates_from_domain = domain_index.get(mrow['normalized_domain'], []) for c in candidates_from_domain: - # Finde den Index des Records, um Duplikate zu vermeiden - # Dies ist ineffizient, für eine genaue Index-Logik müsste der domain_index auch indices speichern - for i, record in enumerate(crm_records): - if record['CRM Name'] == c['CRM Name'] and record['CRM Website'] == c['CRM Website']: - candidate_indices.add(i) - break + try: + # Finde den Index des Records (robuster gemacht) + indices = crm_df.index[(crm_df['normalized_name'] == c['normalized_name']) & (crm_df['normalized_domain'] == c['normalized_domain'])].tolist() + if indices: + candidate_indices.add(indices[0]) + except Exception: + continue # Ignoriere Fehler bei der Index-Suche if candidate_indices: used_block = f"domain:{mrow['normalized_domain']}" - - # Blocking via seltenstes Token + if not candidate_indices: - rtok = choose_rarest_token(mrow.get('normalized_name',''), term_weights, dynamic_stopwords) + rtok = choose_rarest_token(mrow.get('normalized_name',''), term_weights) if rtok: indices_from_token = token_index.get(rtok, []) candidate_indices.update(indices_from_token) used_block = f"token:{rtok}" - # Prefilter als Fallback if not candidate_indices: pf = [] n1 = mrow.get('normalized_name','') - clean1, _ = clean_name_for_scoring(n1, dynamic_stopwords) + clean1, _ = clean_name_for_scoring(n1) if clean1: for i, r in enumerate(crm_records): n2 = r.get('normalized_name','') - clean2, _ = clean_name_for_scoring(n2, dynamic_stopwords) + clean2, _ = clean_name_for_scoring(n2) if not clean2: continue pr = fuzz.partial_ratio(clean1, clean2) if pr >= PREFILTER_MIN_PARTIAL: @@ -349,7 +337,7 @@ def main(job_id=None, interactive=False): scored = [] for cr in candidates: - score, comp = calculate_similarity(mrow, cr, term_weights, dynamic_stopwords) + score, comp = calculate_similarity(mrow, cr, term_weights) scored.append({'name': cr.get('CRM Name',''), 'score': score, 'comp': comp, 'record': cr}) scored.sort(key=lambda x: x['score'], reverse=True) @@ -358,11 +346,12 @@ def main(job_id=None, interactive=False): best_match = scored[0] if scored else None - # --- NEU: Interaktiver Modus --- + # --- Interaktiver Modus (Logik unverändert) --- if interactive and best_match and len(scored) > 1: best_score = best_match['score'] second_best_score = scored[1]['score'] - if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF: + if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF and best_score < GOLDEN_MATCH_SCORE: + # ... (Ausgabe und Eingabe für interaktiven Modus, unverändert) ... print("\n" + "="*50) print(f"AMBIGUOUS MATCH for '{mrow['CRM Name']}'") print(f"Top candidates have very similar scores.") @@ -385,13 +374,12 @@ def main(job_id=None, interactive=False): best_match = scored[choice-1] logger.info(f"User selected candidate {choice}: '{best_match['name']}'") elif choice == 0: - best_match = None # User decided no match + best_match = None logger.info("User selected no match.") print("="*50 + "\n") if best_match and best_match['score'] >= SCORE_THRESHOLD: - # Schwache Matches (ohne Domain/Ort) brauchen höheren Threshold is_weak = best_match['comp'].get('domain_match', 0) == 0 and not (best_match['comp'].get('city_match', 0) and best_match['comp'].get('country_match', 0)) applied_threshold = SCORE_THRESHOLD_WEAK if is_weak else SCORE_THRESHOLD @@ -409,17 +397,17 @@ def main(job_id=None, interactive=False): logger.info(f" --> No Match (no candidates)") - # --- Ergebnisse zurückschreiben --- + # --- Ergebnisse zurückschreiben (Logik unverändert) --- logger.info("Matching-Prozess abgeschlossen. Bereite Ergebnisse für den Upload vor...") + # ... (Rest des Codes bleibt gleich) ... update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...") result_df = pd.DataFrame(results) - # Löschen der Ergebnisspalten, falls sie bereits im Sheet existieren cols_to_drop_from_match = ['Match', 'Score', 'Match_Grund'] match_df_clean = match_df.drop(columns=[col for col in cols_to_drop_from_match if col in match_df.columns], errors='ignore') - final_df = pd.concat([match_df_clean, result_df], axis=1) + final_df = pd.concat([match_df_clean.reset_index(drop=True), result_df.reset_index(drop=True)], axis=1) cols_to_drop = ['normalized_name', 'normalized_domain'] final_df = final_df.drop(columns=[col for col in cols_to_drop if col in final_df.columns], errors='ignore') @@ -439,7 +427,7 @@ def main(job_id=None, interactive=False): update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.") if __name__=='__main__': - parser = argparse.ArgumentParser(description="Duplicate Checker v3.0") + parser = argparse.ArgumentParser(description="Duplicate Checker v3.1") parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.") parser.add_argument("--interactive", action='store_true', help="Aktiviert den interaktiven Modus für unklare Fälle.") args = parser.parse_args()