duplicate_checker.py aktualisiert
This commit is contained in:
@@ -6,15 +6,15 @@ from thefuzz import fuzz
|
||||
from helpers import normalize_company_name, simple_normalize_url
|
||||
from google_sheet_handler import GoogleSheetHandler
|
||||
|
||||
# duplicate_checker.py v2.8 (Match-Komponenten im Log)
|
||||
# Version: 2025-08-06_17-50
|
||||
# duplicate_checker.py v2.9 (Bulletproof Name-Partial/SORT/SET + Bonus)
|
||||
# Version: 2025-08-06_18-10
|
||||
|
||||
# --- Konfiguration ---
|
||||
CRM_SHEET_NAME = "CRM_Accounts"
|
||||
MATCHING_SHEET_NAME = "Matching_Accounts"
|
||||
SCORE_THRESHOLD = 80
|
||||
SCORE_THRESHOLD = 80 # Score-Schwelle
|
||||
LOG_DIR = "Log"
|
||||
LOG_FILE = "duplicate_check_v2.8.log"
|
||||
LOG_FILE = "duplicate_check_v2.9.txt"
|
||||
|
||||
# --- Logging Setup ---
|
||||
if not os.path.exists(LOG_DIR):
|
||||
@@ -22,46 +22,60 @@ if not os.path.exists(LOG_DIR):
|
||||
log_path = os.path.join(LOG_DIR, LOG_FILE)
|
||||
root = logging.getLogger()
|
||||
root.setLevel(logging.DEBUG)
|
||||
# Remove old handlers
|
||||
# Remove existing handlers
|
||||
for h in list(root.handlers):
|
||||
root.removeHandler(h)
|
||||
formatter = logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")
|
||||
# Console
|
||||
# Console handler (INFO+)
|
||||
ch = logging.StreamHandler(sys.stdout)
|
||||
ch.setLevel(logging.INFO)
|
||||
ch.setFormatter(formatter)
|
||||
root.addHandler(ch)
|
||||
# File
|
||||
# File handler (DEBUG+)
|
||||
fh = logging.FileHandler(log_path, mode='a', encoding='utf-8')
|
||||
fh.setLevel(logging.DEBUG)
|
||||
fh.setFormatter(formatter)
|
||||
root.addHandler(fh)
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.info(f"Logging to console and file: {log_path}")
|
||||
logger.info("Starting duplicate_checker.py v2.8 | Version: 2025-08-06_17-50")
|
||||
logger.info("Starting duplicate_checker.py v2.9 | Version: 2025-08-06_18-10")
|
||||
|
||||
# --- Ähnlichkeitsberechnung ---
|
||||
def calculate_similarity(record1, record2):
|
||||
"""Berechnet Score-Komponenten: Domain, Name (SET,PARTIAL,SORT), Ort und Bonus."""
|
||||
# Domain exact match
|
||||
dom1 = record1.get('normalized_domain', '')
|
||||
dom2 = record2.get('normalized_domain', '')
|
||||
domain_flag = 1 if dom1 and dom1 == dom2 else 0
|
||||
|
||||
def calculate_similarity_components(r1, r2):
|
||||
"""Gibt einzelne Komponenten und Gesamt-Score zurück."""
|
||||
# Domain
|
||||
dom1 = r1.get('normalized_domain', '')
|
||||
dom2 = r2.get('normalized_domain', '')
|
||||
domain_match = 1 if dom1 and dom1 == dom2 else 0
|
||||
# Name
|
||||
name1 = r1.get('normalized_name', '')
|
||||
name2 = r2.get('normalized_name', '')
|
||||
name_score = fuzz.token_set_ratio(name1, name2) if name1 and name2 else 0
|
||||
# Ort+Land
|
||||
loc_match = 1 if (r1.get('CRM Ort') == r2.get('CRM Ort') and r1.get('CRM Land') == r2.get('CRM Land')) else 0
|
||||
# Gewichte
|
||||
total = domain_match * 100 + name_score * 0.7 + loc_match * 20
|
||||
return round(total), domain_match, round(name_score,1), loc_match
|
||||
# Location exact match
|
||||
loc_flag = 1 if (record1.get('CRM Ort') == record2.get('CRM Ort') and
|
||||
record1.get('CRM Land') == record2.get('CRM Land')) else 0
|
||||
|
||||
# Name scores
|
||||
n1 = record1.get('normalized_name', '')
|
||||
n2 = record2.get('normalized_name', '')
|
||||
if n1 and n2:
|
||||
ts = fuzz.token_set_ratio(n1, n2)
|
||||
pr = fuzz.partial_ratio(n1, n2)
|
||||
ss = fuzz.token_sort_ratio(n1, n2)
|
||||
name_score = max(ts, pr, ss)
|
||||
else:
|
||||
name_score = 0
|
||||
|
||||
# Bonus für reine Name-Matches
|
||||
bonus_flag = 1 if domain_flag == 0 and loc_flag == 0 and name_score >= 85 else 0
|
||||
|
||||
# Gesamtscore
|
||||
total = domain_flag * 100 + name_score * 1.0 + loc_flag * 20 + bonus_flag * 20
|
||||
return round(total), domain_flag, name_score, loc_flag, bonus_flag
|
||||
|
||||
# --- Hauptfunktion ---
|
||||
def main():
|
||||
logger.info("Starte Duplikats-Check v2.8 (Match-Komponenten im Log)")
|
||||
logger.info("Starte Duplikats-Check v2.9 (Bulletproof)")
|
||||
# GoogleSheetHandler init
|
||||
try:
|
||||
sheet_handler = GoogleSheetHandler()
|
||||
sheet = GoogleSheetHandler()
|
||||
logger.info("GoogleSheetHandler initialisiert")
|
||||
except Exception as e:
|
||||
logger.critical(f"Init GoogleSheetHandler fehlgeschlagen: {e}")
|
||||
@@ -69,18 +83,15 @@ def main():
|
||||
|
||||
# Daten laden
|
||||
logger.info(f"Lade CRM-Daten aus '{CRM_SHEET_NAME}'...")
|
||||
crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME)
|
||||
if crm_df is None or crm_df.empty:
|
||||
logger.critical("CRM-Tab leer. Abbruch.")
|
||||
return
|
||||
logger.info(f"{len(crm_df)} CRM-Datensätze geladen")
|
||||
|
||||
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
|
||||
logger.info(f"{0 if crm_df is None else len(crm_df)} CRM-Datensätze geladen")
|
||||
logger.info(f"Lade Matching-Daten aus '{MATCHING_SHEET_NAME}'...")
|
||||
match_df = sheet_handler.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
|
||||
if match_df is None or match_df.empty:
|
||||
logger.critical("Matching-Tab leer. Abbruch.")
|
||||
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
|
||||
logger.info(f"{0 if match_df is None else len(match_df)} Matching-Datensätze geladen")
|
||||
|
||||
if crm_df is None or crm_df.empty or match_df is None or match_df.empty:
|
||||
logger.critical("Leere Daten in einem der Sheets. Abbruch.")
|
||||
return
|
||||
logger.info(f"{len(match_df)} Matching-Datensätze geladen")
|
||||
|
||||
# Normalisierung & Blocking-Key
|
||||
for df, label in [(crm_df, 'CRM'), (match_df, 'Matching')]:
|
||||
@@ -91,50 +102,49 @@ def main():
|
||||
df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
|
||||
logger.debug(f"{label}-Sample: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}")
|
||||
|
||||
# Build blocking index
|
||||
logger.info("Erstelle Blocking-Index...")
|
||||
# Blocking-Index erzeugen
|
||||
crm_index = {}
|
||||
for idx, row in crm_df.iterrows():
|
||||
for _, row in crm_df.iterrows():
|
||||
key = row['block_key']
|
||||
if key:
|
||||
crm_index.setdefault(key, []).append(row)
|
||||
logger.info(f"Blocking-Index mit {len(crm_index)} Keys erstellt")
|
||||
|
||||
# Matching
|
||||
logger.info("Starte Matching...")
|
||||
results = []
|
||||
total = len(match_df)
|
||||
logger.info("Starte Matching-Prozess...")
|
||||
for i, mrow in match_df.iterrows():
|
||||
key = mrow['block_key']
|
||||
candidates = crm_index.get(key, [])
|
||||
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' -> {len(candidates)} Kandidaten")
|
||||
if not candidates:
|
||||
results.append({'Potenzieller Treffer im CRM':'', 'Ähnlichkeits-Score':0})
|
||||
cands = crm_index.get(key, [])
|
||||
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' -> {len(cands)} Kandidaten")
|
||||
if not cands:
|
||||
results.append({'Match':'', 'Score':0})
|
||||
continue
|
||||
scored = []
|
||||
for crow in candidates:
|
||||
score, dm, ns, lm = calculate_similarity_components(mrow, crow)
|
||||
scored.append((crow['CRM Name'], score, dm, ns, lm))
|
||||
top3 = sorted(scored, key=lambda x: x[1], reverse=True)[:3]
|
||||
# Log Top3 mit Komponenten
|
||||
for name, sc, dm, ns, lm in top3:
|
||||
logger.debug(f" Kandidat: {name}, Score={sc}, Domain={dm}, Name={ns}, Ort={lm}")
|
||||
best_name, best_score, best_dm, best_ns, best_lm = max(scored, key=lambda x: x[1])
|
||||
if best_score >= SCORE_THRESHOLD:
|
||||
results.append({'Potenzieller Treffer im CRM':best_name, 'Ähnlichkeits-Score':best_score})
|
||||
logger.info(f" --> Match: '{best_name}' Score={best_score} (Dom={best_dm}, Name={best_ns}, Ort={best_lm})")
|
||||
else:
|
||||
results.append({'Potenzieller Treffer im CRM':'', 'Ähnlichkeits-Score':best_score})
|
||||
logger.info(f" --> Kein Match (Score={best_score}, Dom={best_dm}, Name={best_ns}, Ort={best_lm})")
|
||||
|
||||
# Write back
|
||||
logger.info("Schreibe Ergebnisse zurück ins Sheet...")
|
||||
out_df = pd.DataFrame(results)
|
||||
scored = []
|
||||
for crow in cands:
|
||||
sc, dm, ns, lm, bf = calculate_similarity(mrow, crow)
|
||||
scored.append((crow['CRM Name'], sc, dm, ns, lm, bf))
|
||||
# Top 3 loggen
|
||||
for name, sc, dm, ns, lm, bf in sorted(scored, key=lambda x: x[1], reverse=True)[:3]:
|
||||
logger.debug(f" Kandidat: {name}, Score={sc}, Dom={dm}, Name={ns}, Ort={lm}, Bonus={bf}")
|
||||
|
||||
best_name, best_score, dm, ns, lm, bf = max(scored, key=lambda x: x[1])
|
||||
if best_score >= SCORE_THRESHOLD:
|
||||
results.append({'Match':best_name, 'Score':best_score})
|
||||
logger.info(f" --> Match: '{best_name}' ({best_score}) [Dom={dm}, Name={ns}, Ort={lm}, Bonus={bf}]")
|
||||
else:
|
||||
results.append({'Match':'', 'Score':best_score})
|
||||
logger.info(f" --> Kein Match (Score={best_score}) [Dom={dm}, Name={ns}, Ort={lm}, Bonus={bf}]")
|
||||
|
||||
# Ergebnisse zurückschreiben
|
||||
logger.info("Schreibe Ergebnisse ins Sheet...")
|
||||
out = pd.DataFrame(results)
|
||||
output = match_df[['CRM Name','CRM Website','CRM Ort','CRM Land']].copy()
|
||||
output = pd.concat([output.reset_index(drop=True), out_df], axis=1)
|
||||
output = pd.concat([output.reset_index(drop=True), out], axis=1)
|
||||
data = [output.columns.tolist()] + output.values.tolist()
|
||||
success = sheet_handler.clear_and_write_data(MATCHING_SHEET_NAME, data)
|
||||
if success:
|
||||
if sheet.clear_and_write_data(MATCHING_SHEET_NAME, data):
|
||||
logger.info("Ergebnisse erfolgreich geschrieben")
|
||||
else:
|
||||
logger.error("Fehler beim Schreiben ins Google Sheet")
|
||||
|
||||
Reference in New Issue
Block a user