duplicate_checker.py aktualisiert

This commit is contained in:
2025-09-10 11:26:28 +00:00
parent 601d1ab848
commit 5fa5a29250

View File

@@ -1,10 +1,10 @@
# duplicate_checker.py v5.0
# duplicate_checker_v5.1.py
# Build timestamp is injected into logfile name.
# --- FEATURES v5.0 ---
# - Integration eines trainierten Machine-Learning-Modells (XGBoost) für die Match-Entscheidung.
# - Die alte, heuristische Scoring-Logik wurde vollständig durch das ML-Modell ersetzt.
# - Ergebnis ist eine datengetriebene, hochpräzise Duplikatserkennung.
# --- FEATURES v5.1 ---
# - NEU: Robusteres, mehrstufiges Blocking, um sicherzustellen, dass relevante Kandidaten gefunden werden.
# - Nutzt jetzt die Top-3 seltensten Tokens, wenn die primäre Suche zu wenige Ergebnisse liefert.
# - Lädt den CRM-Datensatz aus einer lokalen .pkl-Datei, um Konsistenz zwischen Training und Anwendung zu garantieren.
import os
import sys
@@ -37,18 +37,19 @@ def update_status(job_id, status, progress_message):
except Exception as e:
logging.error(f"Konnte Statusdatei für Job {job_id} nicht schreiben: {e}")
# --- Konfiguration v5.0 ---
CRM_SHEET_NAME = "CRM_Accounts"
# --- Konfiguration v5.1 ---
CRM_SHEET_NAME = "CRM_Accounts" # Nur noch für den Fallback, falls .pkl fehlt
MATCHING_SHEET_NAME = "Matching_Accounts"
LOG_DIR = "Log"
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
LOG_FILE = f"{now}_duplicate_check_v5.0.txt"
LOG_FILE = f"{now}_duplicate_check_v5.1.txt"
MODEL_FILE = 'xgb_model.json'
TERM_WEIGHTS_FILE = 'term_weights.joblib'
PREDICTION_THRESHOLD = 0.5 # Wahrscheinlichkeit, ab der ein Match als "sicher" gilt
CRM_DATA_FILE = 'crm_for_prediction.pkl' # WICHTIG
PREDICTION_THRESHOLD = 0.5
PREFILTER_MIN_PARTIAL = 65
PREFILTER_MIN_PARTIAL = 70
PREFILTER_LIMIT = 50
# --- Logging Setup ---
@@ -68,7 +69,7 @@ fh.setFormatter(formatter)
root.addHandler(fh)
logger = logging.getLogger(__name__)
logger.info(f"Logging to console and file: {log_path}")
logger.info(f"Starting duplicate_checker.py v5.0 | Build: {now}")
logger.info(f"Starting duplicate_checker.py v5.1 | Build: {now}")
# --- Stop-/City-Tokens ---
STOP_TOKENS_BASE = {
@@ -92,13 +93,14 @@ def clean_name_for_scoring(norm_name: str):
final_tokens = [t for t in tokens if t not in stop_union]
return " ".join(final_tokens), set(final_tokens)
def choose_rarest_token(norm_name: str, term_weights: dict):
def get_rarest_tokens(norm_name: str, term_weights: dict, count=3):
_, toks = clean_name_for_scoring(norm_name)
if not toks: return None
return max(toks, key=lambda t: term_weights.get(t, 0))
if not toks: return []
return sorted(list(toks), key=lambda t: term_weights.get(t, 0), reverse=True)[:count]
# --- Feature Engineering Funktion ---
def create_features(mrec: dict, crec: dict, term_weights: dict):
# ... (Diese Funktion bleibt exakt identisch wie in der letzten Version)
features = {}
n1_raw = mrec.get('normalized_name', '')
n2_raw = crec.get('normalized_name', '')
@@ -116,7 +118,7 @@ def create_features(mrec: dict, crec: dict, term_weights: dict):
features['country_mismatch'] = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') != crec.get('CRM Land')) else 0
overlapping_tokens = toks1 & toks2
rarest_token_mrec = choose_rarest_token(n1_raw, term_weights)
rarest_token_mrec = get_rarest_tokens(n1_raw, term_weights, 1)[0] if get_rarest_tokens(n1_raw, term_weights, 1) else None
features['rarest_token_overlap'] = 1 if rarest_token_mrec and rarest_token_mrec in toks2 else 0
features['weighted_token_score'] = sum(term_weights.get(t, 0) for t in overlapping_tokens)
@@ -141,39 +143,37 @@ def build_indexes(crm_df: pd.DataFrame):
return records, domain_index, token_index
def main(job_id=None):
logger.info("Starte Duplikats-Check v5.0 (Machine Learning Model)")
logger.info("Starte Duplikats-Check v5.1 (ML Model with Robust Blocking)")
try:
model = xgb.XGBClassifier()
model.load_model(MODEL_FILE)
term_weights = joblib.load(TERM_WEIGHTS_FILE)
logger.info("Machine-Learning-Modell und Wortgewichte erfolgreich geladen.")
crm_df = pd.read_pickle(CRM_DATA_FILE)
logger.info("ML-Modell, Wortgewichte und lokaler CRM-Datensatz erfolgreich geladen.")
except Exception as e:
logger.critical(f"Konnte Modelldateien nicht laden. Fehler: {e}")
update_status(job_id, "Fehlgeschlagen", f"Modelldateien nicht gefunden: {e}")
logger.critical(f"Konnte Modelldateien/CRM-Daten nicht laden. Fehler: {e}")
update_status(job_id, "Fehlgeschlagen", f"Modelldateien/CRM-Daten nicht gefunden: {e}")
sys.exit(1)
try:
sheet = GoogleSheetHandler()
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
except Exception as e:
logger.critical(f"Fehler beim Laden der Daten aus Google Sheets: {e}")
update_status(job_id, "Fehlgeschlagen", f"Fehler beim Datenladen: {e}")
logger.critical(f"Fehler beim Laden der Matching-Daten aus Google Sheets: {e}")
update_status(job_id, "Fehlgeschlagen", f"Fehler beim Matching-Datenladen: {e}")
sys.exit(1)
total = len(match_df) if match_df is not None else 0
if crm_df is None or crm_df.empty or match_df is None or match_df.empty:
logger.critical("Leere Daten in einem der Sheets. Abbruch.")
update_status(job_id, "Fehlgeschlagen", "Leere Daten in einem der Sheets.")
if match_df is None or match_df.empty:
logger.critical("Leere Daten im Matching-Sheet. Abbruch.")
return
logger.info(f"{len(crm_df)} CRM-Datensätze | {total} Matching-Datensätze")
logger.info(f"{len(crm_df)} CRM-Datensätze (lokal) | {total} Matching-Datensätze")
for df in [crm_df, match_df]:
df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name)
df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url)
df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip()
df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip()
match_df['normalized_name'] = match_df['CRM Name'].astype(str).apply(normalize_company_name)
match_df['normalized_domain'] = match_df['CRM Website'].astype(str).apply(simple_normalize_url)
match_df['CRM Ort'] = match_df['CRM Ort'].astype(str).str.lower().str.strip()
match_df['CRM Land'] = match_df['CRM Land'].astype(str).str.lower().str.strip()
global CITY_TOKENS
CITY_TOKENS = {t for s in pd.concat([crm_df['CRM Ort'], match_df['CRM Ort']]).dropna().unique() for t in _tokenize(s) if len(t) >= 3}
@@ -189,19 +189,27 @@ def main(job_id=None):
logger.info(progress_message)
if processed % 10 == 0 or processed == total: update_status(job_id, "Läuft", progress_message)
# --- NEU: Robusteres, mehrstufiges Blocking ---
candidate_indices = set()
# Stufe 1: Präzises Blocking
if mrow.get('normalized_domain'):
# Hier verwenden wir direkt die Records, da der Index-Aufbau komplexer wäre
candidates_from_domain = domain_index.get(mrow['normalized_domain'], [])
for c in candidates_from_domain:
try:
indices = crm_df.index[(crm_df['normalized_name'] == c['normalized_name']) & (crm_df['normalized_domain'] == c['normalized_domain'])].tolist()
# Finde den Index des Records
indices = crm_df.index[crm_df['normalized_name'] == c['normalized_name']].tolist()
if indices: candidate_indices.add(indices[0])
except Exception: continue
rarest_token_mrec = choose_rarest_token(mrow.get('normalized_name',''), term_weights)
if len(candidate_indices) < 5 and rarest_token_mrec:
candidate_indices.update(token_index.get(rarest_token_mrec, []))
# Stufe 2: Großzügiges Token-Blocking
if len(candidate_indices) < 5:
top_tokens = get_rarest_tokens(mrow.get('normalized_name',''), term_weights, count=3)
for token in top_tokens:
candidate_indices.update(token_index.get(token, []))
# Stufe 3: Fallback-Prefilter
if len(candidate_indices) < 5:
clean1, _ = clean_name_for_scoring(mrow.get('normalized_name',''))
pf = sorted([(fuzz.partial_ratio(clean1, clean_name_for_scoring(r.get('normalized_name',''))[0]), i) for i, r in enumerate(crm_records)], key=lambda x: x[0], reverse=True)
@@ -212,21 +220,15 @@ def main(job_id=None):
results.append({'Match':'', 'Score':0, 'Match_Grund':'keine Kandidaten'})
continue
feature_list = []
for cr in candidates:
features = create_features(mrow, cr, term_weights)
feature_list.append(features)
# Feature Engineering und Prediction
feature_list = [create_features(mrow, cr, term_weights) for cr in candidates]
feature_df = pd.DataFrame(feature_list)
feature_df = feature_df[model.feature_names_in_]
probabilities = model.predict_proba(feature_df)[:, 1]
scored_candidates = []
for i, prob in enumerate(probabilities):
scored_candidates.append({'name': candidates[i].get('CRM Name', ''), 'score': prob, 'record': candidates[i]})
scored_candidates = sorted([{'name': candidates[i].get('CRM Name', ''), 'score': prob} for i, prob in enumerate(probabilities)], key=lambda x: x['score'], reverse=True)
scored_candidates.sort(key=lambda x: x['score'], reverse=True)
best_match = scored_candidates[0] if scored_candidates else None
if best_match and best_match['score'] >= PREDICTION_THRESHOLD:
@@ -254,11 +256,7 @@ def main(job_id=None):
if job_id: update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.")
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Duplicate Checker v5.0 (ML Model)")
parser = argparse.ArgumentParser(description="Duplicate Checker v5.1 (ML Model)")
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
args = parser.parse_args()
# Config-Klasse wird hier nicht mehr benötigt, wenn API-Keys nicht genutzt werden
# Config.load_api_keys()
main(job_id=args.job_id)