From cfd1e8b5365292cf0c9bed59100e7440d788b5a1 Mon Sep 17 00:00:00 2001 From: Floke Date: Wed, 6 Aug 2025 12:50:12 +0000 Subject: [PATCH] duplicate_checker.py aktualisiert --- duplicate_checker.py | 115 +++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 60 deletions(-) diff --git a/duplicate_checker.py b/duplicate_checker.py index 394701e3..d472ce2e 100644 --- a/duplicate_checker.py +++ b/duplicate_checker.py @@ -7,125 +7,120 @@ from thefuzz import fuzz from helpers import normalize_company_name, simple_normalize_url from google_sheet_handler import GoogleSheetHandler -# duplicate_checker.py v2.1 (mit erweitertem Logging und Datum im Dateinamen) -# Version: 2025-08-06_12-33 +# duplicate_checker.py v2.2 (Domain-Fallback & Name-Partial-Bonus für bessere Matches) +# Version: 2025-08-06_15-20 # --- Konfiguration --- -CRM_SHEET_NAME = "CRM_Accounts" +CRM_SHEET_NAME = "CRM_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts" -SCORE_THRESHOLD = 80 # Score ab hier gilt als Match -LOG_DIR = "Log" +SCORE_THRESHOLD = 80 +LOG_DIR = "Log" -# --- Logging Setup mit Datum im Dateinamen --- +# --- Logging Setup --- if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR, exist_ok=True) now = datetime.now().strftime('%Y-%m-%d_%H-%M') log_path = os.path.join(LOG_DIR, f"{now}_Duplicate.log") - logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) - -# Console-Handler (INFO+) +# Console ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")) logger.addHandler(ch) - -# File-Handler (DEBUG+) +# File fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(name)s - %(message)s")) logger.addHandler(fh) - logger.info(f"Logging in Datei: {log_path}") +logger.info("Version: duplicate_checker.py v2.2 (Domain-Fallback & Partial-Bonus) | Build: 2025-08-06_15-20") def calculate_similarity(record1, record2): - """Berechnet gewichteten Ähnlichkeits-Score zwischen zwei Datensätzen.""" + """Gewichteter Score mit Domain-Fallback und Partial-Name-Bonus.""" total = 0 - # Domain exact match über normalisierte Domain - dom1 = record1.get('normalized_domain', '') - dom2 = record2.get('normalized_domain', '') + # Domain-Exact + dom1 = record1.get('normalized_domain','') + dom2 = record2.get('normalized_domain','') if dom1 and dom1 == dom2: total += 100 - # Name fuzzy (Token-Set Ratio) - name1 = record1.get('normalized_name', '') - name2 = record2.get('normalized_name', '') + # Domain-Fallback: Substring + elif dom1 and dom2 and (dom1 in dom2 or dom2 in dom1): + total += 50 + # Name-Fuzzy Token-Set + name1 = record1.get('normalized_name','') + name2 = record2.get('normalized_name','') if name1 and name2: - name_score = fuzz.token_set_ratio(name1, name2) - total += name_score * 0.7 - # Ort+Land exact + ts = fuzz.token_set_ratio(name1, name2) + total += ts * 0.7 + # Partial match bonus für kurze/abweichende Namen + pr = fuzz.partial_ratio(name1, name2) + if pr >= 85: + total += 20 + # Ort+Land exakt if record1.get('CRM Ort') == record2.get('CRM Ort') and record1.get('CRM Land') == record2.get('CRM Land'): total += 20 return round(total) def main(): - logger.info("Starte Duplikats-Check (v2.0 mit Kern-Syntax nach Entwurf)") + logger.info("Starte Duplikats-Check v2.2 mit Domain-Fallback & Partial-Bonus") try: sheet = GoogleSheetHandler() logger.info("GoogleSheetHandler initialisiert") except Exception as e: - logger.critical(f"FEHLER beim Init GoogleSheetHandler: {e}") + logger.critical(f"FEHLER Init GoogleSheetHandler: {e}") sys.exit(1) - # Daten einlesen crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME) match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME) if crm_df is None or crm_df.empty or match_df is None or match_df.empty: - logger.critical("CRM- oder Matching-Daten fehlen. Abbruch.") + logger.critical("Daten fehlen. Abbruch.") return - logger.info(f"{len(crm_df)} CRM-Datensätze, {len(match_df)} Matching-Datensätze geladen") + logger.info(f"{len(crm_df)} CRM-Zeilen, {len(match_df)} Matching-Zeilen geladen") - # Normalisierung und Blocking-Key - for df, label in [(crm_df, 'CRM'), (match_df, 'Matching')]: - df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name) + # Norm & Block-Key + for df, label in [(crm_df,'CRM'),(match_df,'Matching')]: + df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name) df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url) - df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() - df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip() - df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None) - logger.debug(f"{label}-Beispiel nach Normalisierung: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}") + df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() + df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip() + df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None) + logger.debug(f"{label}-Sample: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}") - # Blocking-Index + # Build index crm_index = {} for idx, row in crm_df.iterrows(): key = row['block_key'] if key: crm_index.setdefault(key, []).append(row) - logger.info(f"Blocking-Index erstellt: {len(crm_index)} Keys") + logger.info(f"Blocking-Index mit {len(crm_index)} Keys erstellt") - # Matching mit Log relevanter Kandidaten results = [] total = len(match_df) for i, mrow in match_df.iterrows(): key = mrow['block_key'] - candidates = crm_index.get(key, []) - logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' (Key='{key}') -> {len(candidates)} Kandidaten") - if not candidates: - results.append({'Match': '', 'Score': 0}) - continue - # Scores sammeln - scored = [(crow['CRM Name'], calculate_similarity(mrow, crow)) for crow in candidates] - # Top 3 relevante Kandidaten loggen - top3 = sorted(scored, key=lambda x: x[1], reverse=True)[:3] - logger.debug(f" Top 3 Kandidaten: {top3}") - # Besten Treffer wählen - best_name, best_score = max(scored, key=lambda x: x[1]) - if best_score >= SCORE_THRESHOLD: - results.append({'Match': best_name, 'Score': best_score}) - logger.info(f" --> Match: '{best_name}' mit Score {best_score}") + cands = crm_index.get(key, []) + logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' (Key='{key}') -> {len(cands)} Kandidaten") + if not cands: + results.append({'Match':'','Score':0}); continue + scored = [(crow['CRM Name'], calculate_similarity(mrow,crow)) for crow in cands] + top3 = sorted(scored, key=lambda x:x[1], reverse=True)[:3] + logger.debug(f" Top3: {top3}") + best, score = max(scored, key=lambda x:x[1]) + if score >= SCORE_THRESHOLD: + results.append({'Match':best,'Score':score}) + logger.info(f" --> Match: '{best}' ({score})") else: - results.append({'Match': '', 'Score': best_score}) - logger.info(f" --> Kein Match (höchster Score {best_score})") + results.append({'Match':'','Score':score}) + logger.info(f" --> Kein Match (Score {score})") - # Ergebnisse zurück ins Sheet out = pd.DataFrame(results) output = pd.concat([match_df[['CRM Name','CRM Website','CRM Ort','CRM Land']].reset_index(drop=True), out], axis=1) data = [output.columns.tolist()] + output.values.tolist() ok = sheet.clear_and_write_data(MATCHING_SHEET_NAME, data) - if ok: - logger.info("Ergebnisse erfolgreich geschrieben") - else: - logger.error("Fehler beim Schreiben ins Google Sheet") + if ok: logger.info("Ergebnisse geschrieben") + else: logger.error("Fehler beim Schreiben ins Sheet") -if __name__ == '__main__': +if __name__=='__main__': main()