diff --git a/duplicate_checker.py b/duplicate_checker.py index 31955527..1bf95a45 100644 --- a/duplicate_checker.py +++ b/duplicate_checker.py @@ -1,4 +1,4 @@ -# duplicate_checker.py (v2.6 - Final Optimized Brute-Force) +# duplicate_checker.py (v2.0 - Enhanced Transparency) import logging import pandas as pd @@ -6,53 +6,68 @@ from thefuzz import fuzz from config import Config from helpers import normalize_company_name, simple_normalize_url from google_sheet_handler import GoogleSheetHandler -import time # --- Konfiguration --- CRM_SHEET_NAME = "CRM_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts" -SCORE_THRESHOLD = 85 # Treffer unter diesem Wert werden nicht als "potenzieller Treffer" angezeigt +SCORE_THRESHOLD = 80 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') -def calculate_similarity_details(record1, record2): - """Berechnet einen gewichteten Ähnlichkeits-Score und gibt die Details zurück.""" +def calculate_similarity_with_details(record1, record2): + """ + Berechnet einen gewichteten Ähnlichkeits-Score zwischen zwei Datensätzen + und gibt die Details für die Begründung zurück. + """ scores = {'name': 0, 'location': 0, 'domain': 0} - # Domain-Match (höchste Priorität) - if record1.get('normalized_domain') and record1['normalized_domain'] != 'k.a.' and record1['normalized_domain'] == record2.get('normalized_domain'): + # 1. Website-Domain (stärkstes Signal) + if record1['normalized_domain'] and record1['normalized_domain'] != 'k.a.' and record1['normalized_domain'] == record2['normalized_domain']: scores['domain'] = 100 - # Namensähnlichkeit (hohe Gewichtung) - if record1.get('normalized_name') and record2.get('normalized_name'): - # token_set_ratio ist gut bei unterschiedlicher Wortreihenfolge und Zusatzwörtern - scores['name'] = round(fuzz.token_set_ratio(record1['normalized_name'], record2['normalized_name']) * 0.85) + # 2. Firmenname (Fuzzy-Signal) + if record1['normalized_name'] and record2['normalized_name']: + name_similarity = fuzz.token_set_ratio(record1['normalized_name'], record2['normalized_name']) + scores['name'] = round(name_similarity * 0.7) - # Standort-Bonus - if record1.get('CRM Ort') and record1['CRM Ort'] == record2.get('CRM Ort'): - if record1.get('CRM Land') and record1['CRM Land'] == record2.get('CRM Land'): + # 3. Standort (Bestätigungs-Signal) + if record1['CRM Ort'] and record1['CRM Ort'] == record2['CRM Ort']: + if record1['CRM Land'] and record1['CRM Land'] == record2['CRM Land']: scores['location'] = 20 - + total_score = sum(scores.values()) - return {'total': total_score, 'details': scores} + + # Erstelle den Begründungstext + reasons = [] + if scores['domain'] > 0: reasons.append(f"Domain({scores['domain']})") + if scores['name'] > 0: reasons.append(f"Name({scores['name']})") + if scores['location'] > 0: reasons.append(f"Ort({scores['location']})") + reason_text = " + ".join(reasons) if reasons else "Keine Übereinstimmung" + + return round(total_score), reason_text def main(): - start_time = time.time() - logging.info("Starte den Duplikats-Check (v2.6 - Final Optimized Brute-Force)...") + """Hauptfunktion zum Laden, Vergleichen und Schreiben der Daten.""" + logging.info("Starte den Duplikats-Check (v2.0 mit erweiterter Ausgabe)...") try: sheet_handler = GoogleSheetHandler() except Exception as e: - logging.critical(f"FEHLER bei Initialisierung: {e}") + logging.critical(f"FEHLER bei Initialisierung des GoogleSheetHandler: {e}") return logging.info(f"Lade Master-Daten aus '{CRM_SHEET_NAME}'...") crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME) - if crm_df is None or crm_df.empty: return + if crm_df is None or crm_df.empty: + logging.critical(f"Konnte keine Daten aus '{CRM_SHEET_NAME}' laden. Breche ab.") + return logging.info(f"Lade zu prüfende Daten aus '{MATCHING_SHEET_NAME}'...") matching_df = sheet_handler.get_sheet_as_dataframe(MATCHING_SHEET_NAME) - if matching_df is None or matching_df.empty: return + if matching_df is None or matching_df.empty: + logging.critical(f"Konnte keine Daten aus '{MATCHING_SHEET_NAME}' laden. Breche ab.") + return + # Kopie für die finale Ausgabe sichern original_matching_df = matching_df.copy() logging.info("Normalisiere Daten für den Vergleich...") @@ -61,49 +76,58 @@ def main(): df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url) df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip() + df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x and x.split() else None) + logging.info("Erstelle Index für CRM-Daten zur Beschleunigung...") + crm_index = {} + # Konvertiere in Records für schnelleren Zugriff crm_records = crm_df.to_dict('records') - matching_records = matching_df.to_dict('records') - - logging.info(f"Starte Matching-Prozess: {len(matching_records)} Einträge werden mit {len(crm_records)} CRM-Einträgen verglichen...") + for record in crm_records: + key = record['block_key'] + if key: + if key not in crm_index: + crm_index[key] = [] + crm_index[key].append(record) + + logging.info("Starte Matching-Prozess...") results = [] - for i, match_record in enumerate(matching_records): - best_score_info = {'total': 0, 'details': {'name': 0, 'location': 0, 'domain': 0}} + for match_record in matching_df.to_dict('records'): + best_score = -1 best_match_name = "" + best_reason = "" - logging.info(f"Prüfe {i + 1}/{len(matching_records)}: {match_record.get('CRM Name', 'N/A')}...") + logging.info(f"Prüfe: {match_record.get('CRM Name', 'N/A')}...") + + block_key = match_record.get('block_key') + candidates = crm_index.get(block_key, []) - # Brute-Force-Vergleich: Jede Zeile wird mit jeder CRM-Zeile verglichen - for crm_record in crm_records: - score_info = calculate_similarity_details(match_record, crm_record) - if score_info['total'] > best_score_info['total']: - best_score_info = score_info - best_match_name = crm_record.get('CRM Name', 'N/A') + for crm_row in candidates: + score, reason = calculate_similarity_with_details(match_record, crm_row) + if score > best_score: + best_score = score + best_match_name = crm_row.get('CRM Name', 'N/A') + best_reason = reason results.append({ - 'Potenzieller Treffer im CRM': best_match_name if best_score_info['total'] >= SCORE_THRESHOLD else "", - 'Score (Gesamt)': best_score_info['total'], - 'Score (Name)': best_score_info['details']['name'], - 'Bonus (Standort)': best_score_info['details']['location'], - 'Bonus (Domain)': best_score_info['details']['domain'] + 'Potenzieller Treffer im CRM': best_match_name if best_score >= SCORE_THRESHOLD else "", + 'Ähnlichkeits-Score': best_score, + 'Matching-Grund': best_reason }) logging.info("Matching abgeschlossen. Schreibe Ergebnisse zurück ins Sheet...") result_df = pd.DataFrame(results) + # Füge die neuen Spalten zu den Originaldaten hinzu output_df = pd.concat([original_matching_df.reset_index(drop=True), result_df], axis=1) - + data_to_write = [output_df.columns.values.tolist()] + output_df.values.tolist() - + success = sheet_handler.clear_and_write_data(MATCHING_SHEET_NAME, data_to_write) if success: - logging.info(f"Ergebnisse erfolgreich in '{MATCHING_SHEET_NAME}' geschrieben.") + logging.info(f"Ergebnisse erfolgreich in das Tabellenblatt '{MATCHING_SHEET_NAME}' geschrieben.") else: logging.error("FEHLER beim Schreiben der Ergebnisse ins Google Sheet.") - end_time = time.time() - logging.info(f"Gesamtdauer des Duplikats-Checks: {end_time - start_time:.2f} Sekunden.") - if __name__ == "__main__": main() \ No newline at end of file