From cb76d4592ac522a1d39b5eebdff05a41272d231f Mon Sep 17 00:00:00 2001 From: Floke Date: Mon, 4 Aug 2025 05:44:23 +0000 Subject: [PATCH] duplicate_checker.py aktualisiert --- duplicate_checker.py | 118 ++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 64 deletions(-) diff --git a/duplicate_checker.py b/duplicate_checker.py index 7bc38e25..3e0de2f7 100644 --- a/duplicate_checker.py +++ b/duplicate_checker.py @@ -1,4 +1,4 @@ -# duplicate_checker.py (v2.0 - Maximum Logging) +# duplicate_checker.py (v1.1 - Lauf 1 Logik + Match-Grund) import logging import pandas as pd @@ -11,7 +11,7 @@ import time # --- Konfiguration --- CRM_SHEET_NAME = "CRM_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts" -SCORE_THRESHOLD = 80 +SCORE_THRESHOLD = 80 # Treffer unter diesem Wert werden nicht als "potenzieller Treffer" angezeigt # --- VOLLSTÄNDIGES LOGGING SETUP --- LOG_LEVEL = logging.DEBUG if Config.DEBUG else logging.INFO @@ -20,38 +20,54 @@ LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)s - %(message)s' root_logger = logging.getLogger() root_logger.setLevel(LOG_LEVEL) -for handler in root_logger.handlers[:]: - root_logger.removeHandler(handler) +# Handler nur hinzufügen, wenn noch keine konfiguriert sind +if not root_logger.handlers: + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(logging.Formatter(LOG_FORMAT)) + root_logger.addHandler(stream_handler) -stream_handler = logging.StreamHandler() -stream_handler.setFormatter(logging.Formatter(LOG_FORMAT)) -root_logger.addHandler(stream_handler) - -log_file_path = create_log_filename("duplicate_check_v2_0") -if log_file_path: - file_handler = logging.FileHandler(log_file_path, mode='a', encoding='utf-8') - file_handler.setFormatter(logging.Formatter(LOG_FORMAT)) - root_logger.addHandler(file_handler) + log_file_path = create_log_filename("duplicate_check_final") + if log_file_path: + file_handler = logging.FileHandler(log_file_path, mode='a', encoding='utf-8') + file_handler.setFormatter(logging.Formatter(LOG_FORMAT)) + root_logger.addHandler(file_handler) +else: + # Finde den Dateipfad aus dem bereits konfigurierten Handler + log_file_path = None + for handler in root_logger.handlers: + if isinstance(handler, logging.FileHandler): + log_file_path = handler.baseFilename + break logger = logging.getLogger(__name__) def calculate_similarity_with_details(record1, record2): """ - Berechnet einen gewichteten Ähnlichkeits-Score zwischen zwei Datensätzen - und gibt die Details für die Begründung zurück. + Berechnet einen gewichteten Ähnlichkeits-Score und gibt den Score und den Grund zurück. + Dies ist die originale Scoring-Logik von Lauf 1. """ scores = {'name': 0, 'location': 0, 'domain': 0} - if record1['normalized_domain'] and record1['normalized_domain'] != 'k.a.' and record1['normalized_domain'] == record2['normalized_domain']: + # Domain-Match (100 Punkte) + domain1 = record1.get('normalized_domain') + domain2 = record2.get('normalized_domain') + if domain1 and domain1 != 'k.a.' and domain1 == domain2: scores['domain'] = 100 - if record1['normalized_name'] and record2['normalized_name']: - name_similarity = fuzz.token_set_ratio(record1['normalized_name'], record2['normalized_name']) + # Namensähnlichkeit (70% Gewichtung) + name1 = record1.get('normalized_name') + name2 = record2.get('normalized_name') + if name1 and name2: + name_similarity = fuzz.token_set_ratio(name1, name2) scores['name'] = round(name_similarity * 0.7) - if record1['CRM Ort'] and record1['CRM Ort'] == record2['CRM Ort']: - if record1['CRM Land'] and record1['CRM Land'] == record2['CRM Land']: - scores['location'] = 20 + # Standort-Bonus (20 Punkte) + ort1 = record1.get('CRM Ort') + ort2 = record2.get('CRM Ort') + land1 = record1.get('CRM Land') + land2 = record2.get('CRM Land') + if ort1 and ort1 == ort2 and land1 and land1 == land2: + scores['location'] = 20 total_score = sum(scores.values()) @@ -64,26 +80,23 @@ def calculate_similarity_with_details(record1, record2): return round(total_score), reason_text def main(): - """Hauptfunktion zum Laden, Vergleichen und Schreiben der Daten.""" start_time = time.time() - logger.info("Starte den Duplikats-Check (v2.0 mit Blocking und Maximum Logging)...") + logger.info("Starte den Duplikats-Check (v1.1 - Brute-Force mit Match-Grund)...") logger.info(f"Logdatei: {log_file_path}") try: sheet_handler = GoogleSheetHandler() except Exception as e: - logger.critical(f"FEHLER bei Initialisierung des GoogleSheetHandler: {e}") + logger.critical(f"FEHLER bei Initialisierung: {e}") return logger.info(f"Lade Master-Daten aus '{CRM_SHEET_NAME}'...") crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME) - if crm_df is None or crm_df.empty: - return + if crm_df is None or crm_df.empty: return logger.info(f"Lade zu prüfende Daten aus '{MATCHING_SHEET_NAME}'...") matching_df = sheet_handler.get_sheet_as_dataframe(MATCHING_SHEET_NAME) - if matching_df is None or matching_df.empty: - return + if matching_df is None or matching_df.empty: return original_matching_df = matching_df.copy() logger.info("Normalisiere Daten für den Vergleich...") @@ -92,52 +105,29 @@ def main(): df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url) df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip() - df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x and x.split() else None) - logger.info("Erstelle Index für CRM-Daten zur Beschleunigung...") - crm_index = {} crm_records = crm_df.to_dict('records') - for record in crm_records: - key = record['block_key'] - if key: - if key not in crm_index: - crm_index[key] = [] - crm_index[key].append(record) - - logger.info("Starte Matching-Prozess...") + matching_records = matching_df.to_dict('records') + + logger.info(f"Starte Matching-Prozess: {len(matching_records)} Einträge werden mit {len(crm_records)} CRM-Einträgen verglichen...") results = [] - for match_record in matching_df.to_dict('records'): + for i, match_record in enumerate(matching_records): best_score = -1 best_match_name = "" best_reason = "" - logger.info(f"--- Prüfe: '{match_record.get('CRM Name', 'N/A')}' ---") - logger.debug(f" [Normalisiert: '{match_record.get('normalized_name')}', Domain: '{match_record.get('normalized_domain')}', Key: '{match_record.get('block_key')}']") - - block_key = match_record.get('block_key') - candidates = crm_index.get(block_key, []) + logger.info(f"--- Prüfe {i + 1}/{len(matching_records)}: '{match_record.get('CRM Name', 'N/A')}' ---") - if not candidates: - logger.debug(" -> Keine Kandidaten im Index gefunden. Überspringe Vergleich.") - results.append({ - 'Potenzieller Treffer im CRM': "", 'Ähnlichkeits-Score': 0, 'Matching-Grund': "Keine Kandidaten" - }) - continue - - logger.debug(f" -> Vergleiche mit {len(candidates)} Kandidaten aus Block '{block_key}'.") - - for crm_row in candidates: - score, reason = calculate_similarity_with_details(match_record, crm_row) - - if score > 0: # Logge jeden Vergleich, der einen Score > 0 hat - logger.debug(f" - Kandidat: '{crm_row.get('CRM Name', 'N/A')}' -> Score: {score} (Grund: {reason})") - + # Brute-Force-Vergleich: Jede Zeile wird mit jeder CRM-Zeile verglichen + for crm_record in crm_records: + score, reason = calculate_similarity_with_details(match_record, crm_record) if score > best_score: best_score = score - best_match_name = crm_row.get('CRM Name', 'N/A') + best_match_name = crm_record.get('CRM Name', 'N/A') best_reason = reason - logger.info(f" --> Neuer bester Treffer: '{best_match_name}' mit Score {best_score}") + + logger.info(f" --> Bester Treffer: '{best_match_name}' mit Score {best_score} (Grund: {best_reason})") results.append({ 'Potenzieller Treffer im CRM': best_match_name if best_score >= SCORE_THRESHOLD else "", @@ -154,9 +144,9 @@ def main(): success = sheet_handler.clear_and_write_data(MATCHING_SHEET_NAME, data_to_write) if success: - logging.info(f"Ergebnisse erfolgreich in '{MATCHING_SHEET_NAME}' geschrieben.") + logger.info(f"Ergebnisse erfolgreich in '{MATCHING_SHEET_NAME}' geschrieben.") else: - logging.error("FEHLER beim Schreiben der Ergebnisse ins Google Sheet.") + logger.error("FEHLER beim Schreiben der Ergebnisse ins Google Sheet.") end_time = time.time() logger.info(f"Gesamtdauer des Duplikats-Checks: {end_time - start_time:.2f} Sekunden.")