204 lines
9.5 KiB
Python
204 lines
9.5 KiB
Python
import os
|
|
import sys
|
|
import logging
|
|
import pandas as pd
|
|
from datetime import datetime
|
|
from thefuzz import fuzz
|
|
from helpers import normalize_company_name, simple_normalize_url, serp_website_lookup
|
|
from config import Config
|
|
from google_sheet_handler import GoogleSheetHandler
|
|
|
|
# duplicate_checker.py v2.12 (Serp-URL als neue Spalte; Matching nutzt sie nur bei Leerwerten)
|
|
# Version: 2025-08-08_10-20
|
|
|
|
# --- Konfiguration ---
|
|
CRM_SHEET_NAME = "CRM_Accounts"
|
|
MATCHING_SHEET_NAME = "Matching_Accounts"
|
|
SCORE_THRESHOLD = 80 # Score-Schwelle
|
|
LOG_DIR = "Log"
|
|
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
|
|
LOG_FILE = f"{now}_duplicate_check_v2.12.txt"
|
|
|
|
# --- Logging Setup ---
|
|
if not os.path.exists(LOG_DIR):
|
|
os.makedirs(LOG_DIR, exist_ok=True)
|
|
log_path = os.path.join(LOG_DIR, LOG_FILE)
|
|
root = logging.getLogger()
|
|
root.setLevel(logging.DEBUG)
|
|
for h in list(root.handlers):
|
|
root.removeHandler(h)
|
|
formatter = logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")
|
|
ch = logging.StreamHandler(sys.stdout)
|
|
ch.setLevel(logging.INFO)
|
|
ch.setFormatter(formatter)
|
|
root.addHandler(ch)
|
|
fh = logging.FileHandler(log_path, mode='a', encoding='utf-8')
|
|
fh.setLevel(logging.DEBUG)
|
|
fh.setFormatter(formatter)
|
|
root.addHandler(fh)
|
|
logger = logging.getLogger(__name__)
|
|
logger.info(f"Logging to console and file: {log_path}")
|
|
logger.info(f"Starting duplicate_checker.py v2.12 | Version: {now}")
|
|
|
|
# --- SerpAPI Key laden ---
|
|
try:
|
|
Config.load_api_keys()
|
|
serp_key = Config.API_KEYS.get('serpapi')
|
|
if not serp_key:
|
|
logger.warning("SerpAPI Key nicht gefunden; Serp-Fallback deaktiviert.")
|
|
except Exception as e:
|
|
logger.warning(f"Fehler beim Laden API-Keys: {e}")
|
|
serp_key = None
|
|
|
|
# --- Ähnlichkeitsberechnung ---
|
|
def calculate_similarity(record1, record2):
|
|
dom1 = record1.get('normalized_domain','')
|
|
dom2 = record2.get('normalized_domain','')
|
|
domain_flag = 1 if dom1 and dom1 == dom2 else 0
|
|
loc_flag = 1 if (record1.get('CRM Ort')==record2.get('CRM Ort') and record1.get('CRM Land')==record2.get('CRM Land')) else 0
|
|
n1, n2 = record1.get('normalized_name',''), record2.get('normalized_name','')
|
|
if n1 and n2:
|
|
ts = fuzz.token_set_ratio(n1,n2)
|
|
pr = fuzz.partial_ratio(n1,n2)
|
|
ss = fuzz.token_sort_ratio(n1,n2)
|
|
name_score = max(ts,pr,ss)
|
|
else:
|
|
name_score = 0
|
|
bonus_flag = 1 if domain_flag==0 and loc_flag==0 and name_score>=85 else 0
|
|
total = domain_flag*100 + name_score*1.0 + loc_flag*20 + bonus_flag*20
|
|
return round(total), domain_flag, name_score, loc_flag, bonus_flag
|
|
|
|
# --- Hauptfunktion ---
|
|
def main():
|
|
logger.info("Starte Duplikats-Check v2.12 (Serp-URL als neue Spalte)")
|
|
try:
|
|
sheet = GoogleSheetHandler()
|
|
logger.info("GoogleSheetHandler initialisiert")
|
|
except Exception as e:
|
|
logger.critical(f"Init GoogleSheetHandler fehlgeschlagen: {e}")
|
|
sys.exit(1)
|
|
|
|
logger.info(f"Lade CRM-Daten aus '{CRM_SHEET_NAME}'...")
|
|
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
|
|
logger.info(f"{0 if crm_df is None else len(crm_df)} CRM-Datensätze geladen")
|
|
logger.info(f"Lade Matching-Daten aus '{MATCHING_SHEET_NAME}'...")
|
|
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
|
|
logger.info(f"{0 if match_df is None else len(match_df)} Matching-Datensätze geladen")
|
|
if crm_df is None or crm_df.empty or match_df is None or match_df.empty:
|
|
logger.critical("Leere Daten in einem der Sheets. Abbruch.")
|
|
return
|
|
|
|
# --- SerpAPI-Fallback für leere Domains (nur MATCHING) ---
|
|
if serp_key:
|
|
empty_mask = match_df['CRM Website'].fillna('').astype(str).str.strip() == ''
|
|
empty_count = int(empty_mask.sum())
|
|
if empty_count > 0:
|
|
logger.info(f"Serp-Fallback für Matching: {empty_count} Firmen ohne URL")
|
|
found_cnt = 0
|
|
for idx, row in match_df[empty_mask].iterrows():
|
|
company = row['CRM Name']
|
|
try:
|
|
url = serp_website_lookup(company)
|
|
if url and 'k.A.' not in url:
|
|
# Schema ergänzen, falls nötig
|
|
if not str(url).startswith(('http://','https://')):
|
|
url = 'https://' + str(url).lstrip()
|
|
match_df.at[idx, 'Gefundene Website'] = url
|
|
logger.info(f" ✓ URL gefunden: '{company}' -> {url}")
|
|
found_cnt += 1
|
|
else:
|
|
logger.debug(f" ✗ Keine eindeutige URL: '{company}' -> {url}")
|
|
except Exception as e:
|
|
logger.warning(f" ! Serp-Fehler für '{company}': {e}")
|
|
logger.info(f"Serp-Fallback beendet: {found_cnt}/{empty_count} URLs ergänzt")
|
|
else:
|
|
logger.info("Serp-Fallback übersprungen: keine fehlenden Matching-URLs")
|
|
|
|
# --- Normalisierung ---
|
|
# CRM-Daten normalisieren (nutzt ausschließlich CRM Website)
|
|
crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name)
|
|
crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url)
|
|
crm_df['CRM Ort'] = crm_df['CRM Ort'].astype(str).str.lower().str.strip()
|
|
crm_df['CRM Land'] = crm_df['CRM Land'].astype(str).str.lower().str.strip()
|
|
crm_df['block_key'] = crm_df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
|
|
|
|
# Matching-Daten normalisieren (nutzt effektive Website = CRM Website oder Gefundene Website)
|
|
match_df['Gefundene Website'] = match_df.get('Gefundene Website', pd.Series(index=match_df.index, dtype=object))
|
|
match_df['Effektive Website'] = match_df['CRM Website'].fillna('').astype(str).str.strip()
|
|
mask_eff = match_df['Effektive Website'] == ''
|
|
match_df.loc[mask_eff, 'Effektive Website'] = match_df['Gefundene Website'].fillna('').astype(str).str.strip()
|
|
|
|
match_df['normalized_name'] = match_df['CRM Name'].astype(str).apply(normalize_company_name)
|
|
match_df['normalized_domain'] = match_df['Effektive Website'].astype(str).apply(simple_normalize_url)
|
|
match_df['CRM Ort'] = match_df['CRM Ort'].astype(str).str.lower().str.strip()
|
|
match_df['CRM Land'] = match_df['CRM Land'].astype(str).str.lower().str.strip()
|
|
match_df['block_key'] = match_df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
|
|
|
|
# Debug-Sample
|
|
logger.debug(f"CRM-Sample: {crm_df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}")
|
|
logger.debug(f"Matching-Sample: {match_df.iloc[0][['normalized_name','normalized_domain','block_key','Effektive Website','Gefundene Website']].to_dict()}")
|
|
|
|
# Blocking-Index erstellen
|
|
crm_index = {}
|
|
for _, row in crm_df.iterrows():
|
|
key = row['block_key']
|
|
if key:
|
|
crm_index.setdefault(key,[]).append(row)
|
|
logger.info(f"Blocking-Index mit {len(crm_index)} Keys erstellt")
|
|
|
|
# Matching
|
|
results=[]
|
|
total=len(match_df)
|
|
logger.info("Starte Matching-Prozess...")
|
|
for i,mrow in match_df.iterrows():
|
|
key = mrow['block_key']
|
|
cands = crm_index.get(key,[])
|
|
used_src = 'recherchiert' if (str(mrow.get('CRM Website','')).strip()=='' and str(mrow.get('Gefundene Website','')).strip()!='') else 'original'
|
|
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' -> {len(cands)} Kandidaten (Website-Quelle: {used_src})")
|
|
if not cands:
|
|
results.append({'Match':'','Score':0})
|
|
continue
|
|
scored=[]
|
|
for crow in cands:
|
|
sc,dm,ns,lm,bf=calculate_similarity(mrow,crow)
|
|
scored.append((crow['CRM Name'],sc,dm,ns,lm,bf))
|
|
for name,sc,dm,ns,lm,bf in sorted(scored,key=lambda x:x[1],reverse=True)[:3]:
|
|
logger.debug(f" Kandidat: {name}, Score={sc}, Dom={dm}, Name={ns}, Ort={lm}, Bonus={bf}")
|
|
best_name,best_score,dm,ns,lm,bf=max(scored,key=lambda x:x[1])
|
|
if best_score>=SCORE_THRESHOLD:
|
|
results.append({'Match':best_name,'Score':best_score})
|
|
logger.info(f" --> Match: '{best_name}' ({best_score}) [Dom={dm},Name={ns},Ort={lm},Bonus={bf}]")
|
|
else:
|
|
results.append({'Match':'','Score':best_score})
|
|
logger.info(f" --> Kein Match (Score={best_score}) [Dom={dm},Name={ns},Ort={lm},Bonus={bf}]")
|
|
|
|
# Ergebnisse zurückschreiben (SAFE: alle Originalspalten + neue, ohne interne Normalisierungsfelder)
|
|
logger.info("Schreibe Ergebnisse ins Sheet (SAFE in-place, keine Spaltenverluste)...")
|
|
res_df = pd.DataFrame(results, index=match_df.index)
|
|
write_df = match_df.copy()
|
|
# Ergebnisse anfügen
|
|
write_df['Match'] = res_df['Match']
|
|
write_df['Score'] = res_df['Score']
|
|
# Interne Arbeitsfelder entfernen
|
|
drop_cols = ['normalized_name', 'normalized_domain', 'block_key', 'Effektive Website']
|
|
for c in drop_cols:
|
|
if c in write_df.columns:
|
|
write_df.drop(columns=[c], inplace=True)
|
|
# Lokales Backup der originalen Matching-Daten inkl. neuer Spalten
|
|
backup_path = os.path.join(LOG_DIR, f"{now}_backup_{MATCHING_SHEET_NAME}.csv")
|
|
try:
|
|
write_df.to_csv(backup_path, index=False, encoding='utf-8')
|
|
logger.info(f"Lokales Backup geschrieben: {backup_path}")
|
|
except Exception as e:
|
|
logger.warning(f"Backup fehlgeschlagen: {e}")
|
|
# Schreiben
|
|
data = [write_df.columns.tolist()] + write_df.fillna('').values.tolist()
|
|
ok = sheet.clear_and_write_data(MATCHING_SHEET_NAME, data)
|
|
if ok:
|
|
logger.info("Ergebnisse erfolgreich geschrieben")
|
|
else:
|
|
logger.error("Fehler beim Schreiben ins Google Sheet")
|
|
|
|
if __name__=='__main__':
|
|
main()
|