duplicate_checker.py aktualisiert

This commit is contained in:
2025-08-06 13:11:09 +00:00
parent 1a84e10c6c
commit 9b1a869c74

View File

@@ -7,64 +7,55 @@ from thefuzz import fuzz
from helpers import normalize_company_name, simple_normalize_url from helpers import normalize_company_name, simple_normalize_url
from google_sheet_handler import GoogleSheetHandler from google_sheet_handler import GoogleSheetHandler
# duplicate_checker.py v2.2 (Domain-Fallback & Name-Partial-Bonus für bessere Matches) # duplicate_checker.py v2.3 (Rückkehr zum v2.0-Scoring, erweitert mit Logging)
# Version: 2025-08-06_15-20 # Version: 2025-08-06_16-00
# --- Konfiguration --- # --- Konfiguration ---
CRM_SHEET_NAME = "CRM_Accounts" CRM_SHEET_NAME = "CRM_Accounts"
MATCHING_SHEET_NAME = "Matching_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts"
SCORE_THRESHOLD = 80 SCORE_THRESHOLD = 80 # v2.0 Schwelle (0190 Skala)
LOG_DIR = "Log" LOG_DIR = "Log"
# --- Logging Setup --- # --- Logging Setup mit Datum im Dateinamen ---
if not os.path.exists(LOG_DIR): if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR, exist_ok=True) os.makedirs(LOG_DIR, exist_ok=True)
now = datetime.now().strftime('%Y-%m-%d_%H-%M') now = datetime.now().strftime('%Y-%m-%d_%H-%M')
log_path = os.path.join(LOG_DIR, f"{now}_Duplicate.log") log_path = os.path.join(LOG_DIR, f"{now}_Duplicate_v2.3.log")
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
# Console # Console Handler (INFO+)
ch = logging.StreamHandler() ch = logging.StreamHandler()
ch.setLevel(logging.INFO) ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")) ch.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s"))
logger.addHandler(ch) logger.addHandler(ch)
# File # File Handler (DEBUG+)
fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') fh = logging.FileHandler(log_path, mode='a', encoding='utf-8')
fh.setLevel(logging.DEBUG) fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(name)s - %(message)s")) fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)-8s - %(name)s - %(message)s"))
logger.addHandler(fh) logger.addHandler(fh)
logger.info(f"Logging in Datei: {log_path}") logger.info(f"Logging in Datei: {log_path}")
logger.info("Version: duplicate_checker.py v2.2 (Domain-Fallback & Partial-Bonus) | Build: 2025-08-06_15-20") logger.info("Version: duplicate_checker.py v2.3 (v2.0-Scoring mit Logging) | Build: 2025-08-06_16-00")
def calculate_similarity(record1, record2): def calculate_similarity(record1, record2):
"""Gewichteter Score mit Domain-Fallback und Partial-Name-Bonus.""" """Berechnet den v2.0-Score: Domain=100, Name*0.7, Ort+Land=20."""
total = 0 total_score = 0
# Domain-Exact # Domain(exakt)
dom1 = record1.get('normalized_domain','') if record1.get('normalized_domain') and record1['normalized_domain'] == record2.get('normalized_domain'):
dom2 = record2.get('normalized_domain','') total_score += 100
if dom1 and dom1 == dom2: # Name fuzzy
total += 100
# Domain-Fallback: Substring
elif dom1 and dom2 and (dom1 in dom2 or dom2 in dom1):
total += 50
# Name-Fuzzy Token-Set
name1 = record1.get('normalized_name','') name1 = record1.get('normalized_name','')
name2 = record2.get('normalized_name','') name2 = record2.get('normalized_name','')
if name1 and name2: if name1 and name2:
ts = fuzz.token_set_ratio(name1, name2) sim = fuzz.token_set_ratio(name1, name2)
total += ts * 0.7 total_score += sim * 0.7
# Partial match bonus für kurze/abweichende Namen
pr = fuzz.partial_ratio(name1, name2)
if pr >= 85:
total += 20
# Ort+Land exakt # Ort+Land exakt
if record1.get('CRM Ort') == record2.get('CRM Ort') and record1.get('CRM Land') == record2.get('CRM Land'): if record1.get('CRM Ort') == record2.get('CRM Ort') and record1.get('CRM Land') == record2.get('CRM Land'):
total += 20 total_score += 20
return round(total) return round(total_score)
def main(): def main():
logger.info("Starte Duplikats-Check v2.2 mit Domain-Fallback & Partial-Bonus") logger.info("Starte Duplikats-Check v2.3 (v2.0-Scoring mit Logging)")
try: try:
sheet = GoogleSheetHandler() sheet = GoogleSheetHandler()
logger.info("GoogleSheetHandler initialisiert") logger.info("GoogleSheetHandler initialisiert")
@@ -72,15 +63,16 @@ def main():
logger.critical(f"FEHLER Init GoogleSheetHandler: {e}") logger.critical(f"FEHLER Init GoogleSheetHandler: {e}")
sys.exit(1) sys.exit(1)
# Daten laden
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME) crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME) match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
if crm_df is None or crm_df.empty or match_df is None or match_df.empty: if crm_df is None or crm_df.empty or match_df is None or match_df.empty:
logger.critical("Daten fehlen. Abbruch.") logger.critical("Daten fehlen. Abbruch.")
return return
logger.info(f"{len(crm_df)} CRM-Zeilen, {len(match_df)} Matching-Zeilen geladen") logger.info(f"{len(crm_df)} CRM- und {len(match_df)} Matching-Zeilen geladen")
# Norm & Block-Key # Normalisierung & Blocking-Key
for df, label in [(crm_df,'CRM'),(match_df,'Matching')]: for df, label in [(crm_df,'CRM'), (match_df,'Matching')]:
df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name) df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name)
df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url) df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url)
df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip() df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip()
@@ -88,7 +80,7 @@ def main():
df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None) df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
logger.debug(f"{label}-Sample: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}") logger.debug(f"{label}-Sample: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}")
# Build index # Build Blocking-Index
crm_index = {} crm_index = {}
for idx, row in crm_df.iterrows(): for idx, row in crm_df.iterrows():
key = row['block_key'] key = row['block_key']
@@ -96,31 +88,38 @@ def main():
crm_index.setdefault(key, []).append(row) crm_index.setdefault(key, []).append(row)
logger.info(f"Blocking-Index mit {len(crm_index)} Keys erstellt") logger.info(f"Blocking-Index mit {len(crm_index)} Keys erstellt")
# Matching mit relevanten Kandidaten im Log
results = [] results = []
total = len(match_df) total = len(match_df)
for i, mrow in match_df.iterrows(): for i, mrow in match_df.iterrows():
key = mrow['block_key'] key = mrow['block_key']
cands = crm_index.get(key, []) candidates = crm_index.get(key, [])
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' (Key='{key}') -> {len(cands)} Kandidaten") logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' (Key='{key}') -> {len(candidates)} Kandidaten")
if not cands: if not candidates:
results.append({'Match':'','Score':0}); continue results.append({'Potenzieller Treffer im CRM':'','Ähnlichkeits-Score':0})
scored = [(crow['CRM Name'], calculate_similarity(mrow,crow)) for crow in cands] continue
top3 = sorted(scored, key=lambda x:x[1], reverse=True)[:3] # Score for each candidate
logger.debug(f" Top3: {top3}") scored = [(crow['CRM Name'], calculate_similarity(mrow,crow)) for crow in candidates]
best, score = max(scored, key=lambda x:x[1]) # Top 3 candidates logged
if score >= SCORE_THRESHOLD: top3 = sorted(scored, key=lambda x: x[1], reverse=True)[:3]
results.append({'Match':best,'Score':score}) logger.debug(f" Top3 Kandidaten: {top3}")
logger.info(f" --> Match: '{best}' ({score})") best_name, best_score = max(scored, key=lambda x: x[1])
if best_score >= SCORE_THRESHOLD:
results.append({'Potenzieller Treffer im CRM':best_name,'Ähnlichkeits-Score':best_score})
logger.info(f" --> Match: '{best_name}' mit Score {best_score}")
else: else:
results.append({'Match':'','Score':score}) results.append({'Potenzieller Treffer im CRM':'','Ähnlichkeits-Score':best_score})
logger.info(f" --> Kein Match (Score {score})") logger.info(f" --> Kein Match (höchster Score {best_score})")
# Write results back
out = pd.DataFrame(results) out = pd.DataFrame(results)
output = pd.concat([match_df[['CRM Name','CRM Website','CRM Ort','CRM Land']].reset_index(drop=True), out], axis=1) output = pd.concat([match_df[['CRM Name','CRM Website','CRM Ort','CRM Land']].reset_index(drop=True), out], axis=1)
data = [output.columns.tolist()] + output.values.tolist() data = [output.columns.tolist()] + output.values.tolist()
ok = sheet.clear_and_write_data(MATCHING_SHEET_NAME, data) ok = sheet.clear_and_write_data(MATCHING_SHEET_NAME, data)
if ok: logger.info("Ergebnisse geschrieben") if ok:
else: logger.error("Fehler beim Schreiben ins Sheet") logger.info("Ergebnisse erfolgreich geschrieben")
else:
logger.error("Fehler beim Schreiben ins Google Sheet")
if __name__=='__main__': if __name__=='__main__':
main() main()