import pandas as pd import numpy as np import re import math import joblib import xgboost as xgb from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report from thefuzz import fuzz from collections import Counter import logging # Logging Setup logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # --- Konfiguration --- GOLD_STANDARD_FILE = 'erweitertes_matching.csv' CRM_ACCOUNTS_FILE = 'CRM_Accounts.csv' # Annahme: Du hast einen Export des CRM Sheets als CSV MODEL_OUTPUT_FILE = 'xgb_model.json' TERM_WEIGHTS_OUTPUT_FILE = 'term_weights.joblib' # --- Stop-/City-Tokens --- STOP_TOKENS_BASE = { 'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv', 'holding','gruppe','group','international','solutions','solution','service','services', 'deutschland','austria','germany','technik','technology','technologies','systems','systeme', 'logistik','logistics','industries','industrie','management','consulting','vertrieb','handel', 'international','company','gesellschaft','mbh&co','mbhco','werke','werk' } CITY_TOKENS = set() # --- Hilfsfunktionen (aus dem Original-Skript übernommen) --- def _tokenize(s: str): if not s: return [] return re.split(r"[^a-z0-9äöüß]+", str(s).lower()) def normalize_company_name(name: str): if not isinstance(name, str): return '' name = name.lower() name = re.sub(r'\(.*?\)', '', name) name = re.sub(r'\[.*?\]', '', name) name = re.sub(r'[^a-z0-9äöüß\s]', ' ', name) name = re.sub(r'\s+', ' ', name).strip() return name def clean_name_for_scoring(norm_name: str): if not norm_name: return "", set() tokens = [t for t in _tokenize(norm_name) if len(t) >= 3] stop_union = STOP_TOKENS_BASE | CITY_TOKENS final_tokens = [t for t in tokens if t not in stop_union] return " ".join(final_tokens), set(final_tokens) def choose_rarest_token(norm_name: str, term_weights: dict): _, toks = clean_name_for_scoring(norm_name) if not toks: return None return max(toks, key=lambda t: term_weights.get(t, 0)) def create_features(mrec: dict, crec: dict, term_weights: dict): features = {} n1_raw = mrec.get('normalized_name', '') n2_raw = crec.get('normalized_name', '') clean1, toks1 = clean_name_for_scoring(n1_raw) clean2, toks2 = clean_name_for_scoring(n2_raw) features['fuzz_ratio'] = fuzz.ratio(n1_raw, n2_raw) features['fuzz_partial_ratio'] = fuzz.partial_ratio(n1_raw, n2_raw) features['fuzz_token_set_ratio'] = fuzz.token_set_ratio(clean1, clean2) features['fuzz_token_sort_ratio'] = fuzz.token_sort_ratio(clean1, clean2) features['domain_match'] = 1 if mrec.get('CRM Website') and str(mrec.get('CRM Website')).strip() != '' and mrec.get('CRM Website') == crec.get('Kandidat Website') else 0 features['city_match'] = 1 if mrec.get('CRM Ort') and str(mrec.get('CRM Ort')).strip() != '' and mrec.get('CRM Ort') == crec.get('Kandidat Ort') else 0 features['country_match'] = 1 if mrec.get('CRM Land') and str(mrec.get('CRM Land')).strip() != '' and mrec.get('CRM Land') == crec.get('Kandidat Land') else 0 features['country_mismatch'] = 1 if (mrec.get('CRM Land') and crec.get('Kandidat Land') and mrec.get('CRM Land') != crec.get('Kandidat Land')) else 0 overlapping_tokens = toks1 & toks2 rarest_token_mrec = choose_rarest_token(n1_raw, term_weights) features['rarest_token_overlap'] = 1 if rarest_token_mrec and rarest_token_mrec in toks2 else 0 features['weighted_token_score'] = sum(term_weights.get(t, 0) for t in overlapping_tokens) features['jaccard_similarity'] = len(overlapping_tokens) / len(toks1 | toks2) if len(toks1 | toks2) > 0 else 0 features['name_len_diff'] = abs(len(n1_raw) - len(n2_raw)) features['candidate_is_shorter'] = 1 if len(n2_raw) < len(n1_raw) else 0 return features # --- Haupt-Trainingsskript --- if __name__ == "__main__": logging.info("Starte Trainingsprozess für Duplikats-Checker v5.0") # 1. Daten laden try: gold_df = pd.read_csv(GOLD_STANDARD_FILE, sep=';') # Lade CRM Daten, um Gewichte zu berechnen. # Idealerweise wäre dies ein aktueller Export aus dem Google Sheet. # Für die Simulation nehmen wir die Daten aus dem Gold-Standard. # Besser: Lade hier alle 22.000 CRM Accounts. # Annahme: Du hast einen Export als CRM_Accounts.csv im Ordner try: crm_df = pd.read_csv(CRM_ACCOUNTS_FILE, sep=',') # Passe Trennzeichen ggf. an logging.info(f"{len(crm_df)} CRM Accounts geladen für die Gewichtsberechnung.") except FileNotFoundError: logging.warning(f"'{CRM_ACCOUNTS_FILE}' nicht gefunden. Verwende Daten aus '{GOLD_STANDARD_FILE}' für Gewichte.") crm_df = gold_df.rename(columns={'Kandidat': 'CRM Name'}) except Exception as e: logging.critical(f"Fehler beim Laden der CSV-Dateien: {e}") sys.exit(1) # 2. Daten normalisieren for col in ['CRM Name', 'Kandidat']: gold_df[f'normalized_{col}'] = gold_df[col].astype(str).apply(normalize_company_name) for col in ['CRM Ort', 'Kandidat Ort', 'CRM Land', 'Kandidat Land']: gold_df[col] = gold_df[col].astype(str).str.lower().str.strip() crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name) # 3. Term Weights (TF-IDF) auf dem gesamten CRM-Datensatz berechnen term_weights = {token: math.log(len(crm_df) / (count + 1)) for token, count in Counter(t for n in crm_df['normalized_name'] for t in set(clean_name_for_scoring(n)[1])).items()} logging.info(f"{len(term_weights)} Wortgewichte berechnet.") # 4. Feature-Tabelle und Labels erstellen logging.info("Erstelle Features für den Trainingsdatensatz...") features_list = [] labels = [] for _, row in gold_df.iterrows(): mrec = { 'normalized_name': row['normalized_CRM Name'], 'CRM Website': row['CRM Website'], 'CRM Ort': row['CRM Ort'], 'CRM Land': row['CRM Land'] } crec = { 'normalized_name': row['normalized_Kandidat'], 'Kandidat Website': row['Kandidat Website'], 'Kandidat Ort': row['Kandidat Ort'], 'Kandidat Land': row['Kandidat Land'] } # Nur Zeilen mit einem Kandidaten verarbeiten if pd.notna(row['Kandidat']): features = create_features(mrec, crec, term_weights) features_list.append(features) # Label erstellen: 1 wenn der Kandidat dem Gold-Standard entspricht, sonst 0 is_correct_match = 1 if row['Kandidat'] == row.get('Best Match Option', '') else 0 # Angenommen Spalte G heißt jetzt so labels.append(is_correct_match) X = pd.DataFrame(features_list) y = np.array(labels) logging.info(f"Trainingsdatensatz erstellt mit {X.shape[0]} Beispielen und {X.shape[1]} Features.") logging.info(f"Verteilung der Klassen: {Counter(y)}") # 5. Modell trainieren logging.info("Trainiere das XGBoost-Modell...") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y) model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss') model.fit(X_train, y_train) logging.info("Modell erfolgreich trainiert.") # 6. Modell validieren y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) logging.info(f"\n--- Validierungsergebnis ---") logging.info(f"Genauigkeit auf Testdaten: {accuracy:.2%}") logging.info("Detaillierter Report:") logging.info("\n" + classification_report(y_test, y_pred)) # 7. Finales Modell und Gewichte speichern model.save_model(MODEL_OUTPUT_FILE) joblib.dump(term_weights, TERM_WEIGHTS_OUTPUT_FILE) logging.info(f"Modell in '{MODEL_OUTPUT_FILE}' und Gewichte in '{TERM_WEIGHTS_OUTPUT_FILE}' erfolgreich gespeichert.")