duplicate_checker.py aktualisiert

This commit is contained in:
2025-08-01 12:49:34 +00:00
parent a136050dc2
commit 6fd5c486bf

View File

@@ -1,4 +1,4 @@
# duplicate_checker.py (v2.0 - Enhanced Transparency) # duplicate_checker.py (v2.2 - Multi-Key Blocking & optimiertes Scoring)
import logging import logging
import pandas as pd import pandas as pd
@@ -6,68 +6,69 @@ from thefuzz import fuzz
from config import Config from config import Config
from helpers import normalize_company_name, simple_normalize_url from helpers import normalize_company_name, simple_normalize_url
from google_sheet_handler import GoogleSheetHandler from google_sheet_handler import GoogleSheetHandler
from collections import defaultdict
# --- Konfiguration --- # --- Konfiguration ---
CRM_SHEET_NAME = "CRM_Accounts" CRM_SHEET_NAME = "CRM_Accounts"
MATCHING_SHEET_NAME = "Matching_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts"
SCORE_THRESHOLD = 80 SCORE_THRESHOLD = 85 # Etwas höherer Schwellenwert für bessere Präzision
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def calculate_similarity_with_details(record1, record2): def calculate_similarity_details(record1, record2):
""" """Berechnet einen gewichteten Ähnlichkeits-Score und gibt die Details zurück."""
Berechnet einen gewichteten Ähnlichkeits-Score zwischen zwei Datensätzen
und gibt die Details für die Begründung zurück.
"""
scores = {'name': 0, 'location': 0, 'domain': 0} scores = {'name': 0, 'location': 0, 'domain': 0}
# 1. Website-Domain (stärkstes Signal) if record1.get('normalized_domain') and record1['normalized_domain'] != 'k.a.' and record1['normalized_domain'] == record2.get('normalized_domain'):
if record1['normalized_domain'] and record1['normalized_domain'] != 'k.a.' and record1['normalized_domain'] == record2['normalized_domain']:
scores['domain'] = 100 scores['domain'] = 100
# 2. Firmenname (Fuzzy-Signal) # Höhere Gewichtung für den Namen, da die Website oft fehlt
if record1['normalized_name'] and record2['normalized_name']: if record1.get('normalized_name') and record2.get('normalized_name'):
name_similarity = fuzz.token_set_ratio(record1['normalized_name'], record2['normalized_name']) scores['name'] = round(fuzz.token_set_ratio(record1['normalized_name'], record2['normalized_name']) * 0.85)
scores['name'] = round(name_similarity * 0.7)
# 3. Standort (Bestätigungs-Signal) if record1.get('CRM Ort') and record1['CRM Ort'] == record2.get('CRM Ort'):
if record1['CRM Ort'] and record1['CRM Ort'] == record2['CRM Ort']: if record1.get('CRM Land') and record1['CRM Land'] == record2.get('CRM Land'):
if record1['CRM Land'] and record1['CRM Land'] == record2['CRM Land']:
scores['location'] = 20 scores['location'] = 20
total_score = sum(scores.values())
# Erstelle den Begründungstext
reasons = []
if scores['domain'] > 0: reasons.append(f"Domain({scores['domain']})")
if scores['name'] > 0: reasons.append(f"Name({scores['name']})")
if scores['location'] > 0: reasons.append(f"Ort({scores['location']})")
reason_text = " + ".join(reasons) if reasons else "Keine Übereinstimmung"
return round(total_score), reason_text total_score = sum(scores.values())
return {'total': total_score, 'details': scores}
def create_blocking_keys(name):
"""Erstellt mehrere Blocking Keys für einen Namen, um die Sensitivität zu erhöhen."""
if not name:
return []
words = name.split()
keys = set()
# 1. Erstes Wort
if len(words) > 0:
keys.add(words[0])
# 2. Zweites Wort (falls vorhanden)
if len(words) > 1:
keys.add(words[1])
# 3. Erste 4 Buchstaben des ersten Wortes
if len(words) > 0 and len(words[0]) >= 4:
keys.add(words[0][:4])
return list(keys)
def main(): def main():
"""Hauptfunktion zum Laden, Vergleichen und Schreiben der Daten.""" logging.info("Starte den Duplikats-Check (v2.2 mit Multi-Key Blocking)...")
logging.info("Starte den Duplikats-Check (v2.0 mit erweiterter Ausgabe)...")
try: try:
sheet_handler = GoogleSheetHandler() sheet_handler = GoogleSheetHandler()
except Exception as e: except Exception as e:
logging.critical(f"FEHLER bei Initialisierung des GoogleSheetHandler: {e}") logging.critical(f"FEHLER bei Initialisierung: {e}")
return return
logging.info(f"Lade Master-Daten aus '{CRM_SHEET_NAME}'...") logging.info(f"Lade Master-Daten aus '{CRM_SHEET_NAME}'...")
crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME) crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME)
if crm_df is None or crm_df.empty: if crm_df is None or crm_df.empty: return
logging.critical(f"Konnte keine Daten aus '{CRM_SHEET_NAME}' laden. Breche ab.")
return
logging.info(f"Lade zu prüfende Daten aus '{MATCHING_SHEET_NAME}'...") logging.info(f"Lade zu prüfende Daten aus '{MATCHING_SHEET_NAME}'...")
matching_df = sheet_handler.get_sheet_as_dataframe(MATCHING_SHEET_NAME) matching_df = sheet_handler.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
if matching_df is None or matching_df.empty: if matching_df is None or matching_df.empty: return
logging.critical(f"Konnte keine Daten aus '{MATCHING_SHEET_NAME}' laden. Breche ab.")
return
# Kopie für die finale Ausgabe sichern
original_matching_df = matching_df.copy() original_matching_df = matching_df.copy()
logging.info("Normalisiere Daten für den Vergleich...") logging.info("Normalisiere Daten für den Vergleich...")
@@ -76,56 +77,55 @@ def main():
df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url) df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url)
df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip()
df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip() df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip()
df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x and x.split() else None) df['block_keys'] = df['normalized_name'].apply(create_blocking_keys)
logging.info("Erstelle Index für CRM-Daten zur Beschleunigung...") logging.info("Erstelle Index für CRM-Daten zur Beschleunigung...")
crm_index = {} crm_index = defaultdict(list)
# Konvertiere in Records für schnelleren Zugriff for record in crm_df.to_dict('records'):
crm_records = crm_df.to_dict('records') for key in record['block_keys']:
for record in crm_records:
key = record['block_key']
if key:
if key not in crm_index:
crm_index[key] = []
crm_index[key].append(record) crm_index[key].append(record)
logging.info("Starte Matching-Prozess...") logging.info("Starte Matching-Prozess...")
results = [] results = []
for match_record in matching_df.to_dict('records'): for match_record in matching_df.to_dict('records'):
best_score = -1 best_score_info = {'total': 0, 'details': {'name': 0, 'location': 0, 'domain': 0}}
best_match_name = "" best_match_name = ""
best_reason = ""
logging.info(f"Prüfe: {match_record.get('CRM Name', 'N/A')}...") logging.info(f"Prüfe: {match_record['CRM Name']}...")
block_key = match_record.get('block_key') candidate_pool = {}
candidates = crm_index.get(block_key, []) for key in match_record['block_keys']:
for crm_record in crm_index.get(key, []):
candidate_pool[crm_record['CRM Name']] = crm_record
for crm_row in candidates: if not candidate_pool:
score, reason = calculate_similarity_with_details(match_record, crm_row) logging.debug(" -> Keine Kandidaten im Index gefunden.")
if score > best_score:
best_score = score for crm_record in candidate_pool.values():
best_match_name = crm_row.get('CRM Name', 'N/A') score_info = calculate_similarity_details(match_record, crm_record)
best_reason = reason if score_info['total'] > best_score_info['total']:
best_score_info = score_info
best_match_name = crm_record['CRM Name']
results.append({ results.append({
'Potenzieller Treffer im CRM': best_match_name if best_score >= SCORE_THRESHOLD else "", 'Potenzieller Treffer im CRM': best_match_name if best_score_info['total'] >= SCORE_THRESHOLD else "",
'Ähnlichkeits-Score': best_score, 'Score (Gesamt)': best_score_info['total'],
'Matching-Grund': best_reason 'Score (Name)': best_score_info['details']['name'],
'Bonus (Standort)': best_score_info['details']['location'],
'Bonus (Domain)': best_score_info['details']['domain']
}) })
logging.info("Matching abgeschlossen. Schreibe Ergebnisse zurück ins Sheet...") logging.info("Matching abgeschlossen. Schreibe Ergebnisse zurück ins Sheet...")
result_df = pd.DataFrame(results) result_df = pd.DataFrame(results)
# Füge die neuen Spalten zu den Originaldaten hinzu
output_df = pd.concat([original_matching_df.reset_index(drop=True), result_df], axis=1) output_df = pd.concat([original_matching_df.reset_index(drop=True), result_df], axis=1)
data_to_write = [output_df.columns.values.tolist()] + output_df.values.tolist() data_to_write = [output_df.columns.values.tolist()] + output_df.values.tolist()
success = sheet_handler.clear_and_write_data(MATCHING_SHEET_NAME, data_to_write) success = sheet_handler.clear_and_write_data(MATCHING_SHEET_NAME, data_to_write)
if success: if success:
logging.info(f"Ergebnisse erfolgreich in das Tabellenblatt '{MATCHING_SHEET_NAME}' geschrieben.") logging.info(f"Ergebnisse erfolgreich in '{MATCHING_SHEET_NAME}' geschrieben.")
else: else:
logging.error("FEHLER beim Schreiben der Ergebnisse ins Google Sheet.") logging.error("FEHLER beim Schreiben der Ergebnisse ins Google Sheet.")