Files
Brancheneinstufung2/train_model.py
2025-09-08 18:36:52 +00:00

172 lines
7.8 KiB
Python

import pandas as pd
import numpy as np
import re
import math
import joblib
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from thefuzz import fuzz
from collections import Counter
import logging
import sys
# Importiere deine bestehenden Helfer
from google_sheet_handler import GoogleSheetHandler
from helpers import normalize_company_name
# Logging Setup
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# --- Konfiguration ---
# HINWEIS: Bitte stelle sicher, dass diese Datei deine finale Vergleichs-CSV ist,
# die die Spalten 'Best Match Option' und die Vorschläge der alten Läufe enthält.
GOLD_STANDARD_FILE = 'matches4.csv'
CRM_SHEET_NAME = "CRM_Accounts"
MODEL_OUTPUT_FILE = 'xgb_model.json'
TERM_WEIGHTS_OUTPUT_FILE = 'term_weights.joblib'
# Spaltennamen aus deiner Vergleichs-CSV
BEST_MATCH_COL = 'Best Match Option'
# Liste der Spalten, die Vorschläge von alten Algorithmen enthalten
SUGGESTION_COLS = ['Ergebnis Lauf 1', 'Ergebnis Lauf 2', 'Ergebnis Lauf 3'] # Passe diese Namen an deine CSV an!
# --- ... (Alle Hilfsfunktionen bleiben identisch zu vorher) ... ---
# --- Stop-/City-Tokens ---
STOP_TOKENS_BASE = {
'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv',
'holding','gruppe','group','international','solutions','solution','service','services',
# ... (Rest der Stopwords)
}
CITY_TOKENS = set()
def _tokenize(s: str):
if not s: return []
return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
def clean_name_for_scoring(norm_name: str):
if not norm_name: return "", set()
tokens = [t for t in _tokenize(norm_name) if len(t) >= 3]
stop_union = STOP_TOKENS_BASE | CITY_TOKENS
final_tokens = [t for t in tokens if t not in stop_union]
return " ".join(final_tokens), set(final_tokens)
def choose_rarest_token(norm_name: str, term_weights: dict):
_, toks = clean_name_for_scoring(norm_name)
if not toks: return None
return max(toks, key=lambda t: term_weights.get(t, 0))
def create_features(mrec: dict, crec: dict, term_weights: dict):
features = {}
n1_raw = mrec.get('normalized_CRM Name', '')
n2_raw = crec.get('normalized_name', '') # Kandidaten-Name ist jetzt generisch
clean1, toks1 = clean_name_for_scoring(n1_raw)
clean2, toks2 = clean_name_for_scoring(n2_raw)
features['fuzz_ratio'] = fuzz.ratio(n1_raw, n2_raw)
features['fuzz_partial_ratio'] = fuzz.partial_ratio(n1_raw, n2_raw)
features['fuzz_token_set_ratio'] = fuzz.token_set_ratio(clean1, clean2)
features['fuzz_token_sort_ratio'] = fuzz.token_sort_ratio(clean1, clean2)
domain1_raw = str(mrec.get('CRM Website', '')).lower()
domain2_raw = str(crec.get('CRM Website', '')).lower()
domain1 = domain1_raw.replace('www.', '').split('/')[0].strip()
domain2 = domain2_raw.replace('www.', '').split('/')[0].strip()
features['domain_match'] = 1 if domain1 and domain1 == domain2 else 0
features['city_match'] = 1 if mrec.get('CRM Ort') and crec.get('CRM Ort') and mrec['CRM Ort'] == crec['CRM Ort'] else 0
features['country_match'] = 1 if mrec.get('CRM Land') and crec.get('CRM Land') and mrec['CRM Land'] == crec['CRM Land'] else 0
features['country_mismatch'] = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec['CRM Land'] != crec['CRM Land']) else 0
overlapping_tokens = toks1 & toks2
rarest_token_mrec = choose_rarest_token(n1_raw, term_weights)
features['rarest_token_overlap'] = 1 if rarest_token_mrec and rarest_token_mrec in toks2 else 0
features['weighted_token_score'] = sum(term_weights.get(t, 0) for t in overlapping_tokens)
features['jaccard_similarity'] = len(overlapping_tokens) / len(toks1 | toks2) if len(toks1 | toks2) > 0 else 0
features['name_len_diff'] = abs(len(n1_raw) - len(n2_raw))
features['candidate_is_shorter'] = 1 if len(n2_raw) < len(n1_raw) else 0
return features
# --- Haupt-Trainingsskript ---
if __name__ == "__main__":
logging.info("Starte Trainingsprozess für Duplikats-Checker v5.0")
try:
gold_df = pd.read_csv(GOLD_STANDARD_FILE, sep=';', encoding='utf-8')
logging.info(f"{len(gold_df)} Zeilen aus Gold-Standard-Datei '{GOLD_STANDARD_FILE}' geladen.")
logging.info("Verbinde mit Google Sheets, um CRM-Daten zu laden...")
sheet_handler = GoogleSheetHandler()
crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME)
logging.info(f"{len(crm_df)} CRM Accounts aus Google Sheets geladen.")
except Exception as e:
logging.critical(f"Fehler beim Laden der Daten: {e}")
sys.exit(1)
crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name)
gold_df['normalized_CRM Name'] = gold_df['CRM Name'].astype(str).apply(normalize_company_name)
term_weights = {token: math.log(len(crm_df) / (count + 1)) for token, count in Counter(t for n in crm_df['normalized_name'] for t in set(clean_name_for_scoring(n)[1])).items()}
logging.info(f"{len(term_weights)} Wortgewichte berechnet.")
logging.info("Erstelle Features für den Trainingsdatensatz...")
features_list = []
labels = []
# Mache das CRM für schnelle Lookups verfügbar
crm_lookup = crm_df.set_index('CRM Name').to_dict('index')
for _, row in gold_df.iterrows():
mrec = row.to_dict() # Die zu prüfende Firma
# 1. Positives Beispiel: Der von dir definierte "Best Match"
best_match_name = row[BEST_MATCH_COL]
if pd.notna(best_match_name) and str(best_match_name).strip() != '' and best_match_name in crm_lookup:
crec_positive = crm_lookup[best_match_name]
features = create_features(mrec, crec_positive, term_weights)
features_list.append(features)
labels.append(1)
# 2. Negative Beispiele: Die falschen Vorschläge der alten Algorithmen
for col_name in SUGGESTION_COLS:
if col_name in row and pd.notna(row[col_name]):
suggestion_name = row[col_name]
# Wenn der Vorschlag nicht der korrekte ist UND im CRM existiert
if suggestion_name != best_match_name and suggestion_name in crm_lookup:
crec_negative = crm_lookup[suggestion_name]
features = create_features(mrec, crec_negative, term_weights)
features_list.append(features)
labels.append(0)
X = pd.DataFrame(features_list)
y = np.array(labels)
if len(X) == 0:
logging.critical("Keine gültigen Trainingsdaten gefunden.")
sys.exit(1)
logging.info(f"Trainingsdatensatz erstellt mit {X.shape[0]} Beispielen und {X.shape[1]} Features.")
logging.info(f"Verteilung der Klassen: {Counter(y)}")
logging.info("Trainiere das XGBoost-Modell...")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
scale_pos_weight = (len(y_train) - sum(y_train)) / sum(y_train) if sum(y_train) > 0 else 1
model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss', scale_pos_weight=scale_pos_weight)
model.fit(X_train, y_train)
logging.info("Modell erfolgreich trainiert.")
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
logging.info(f"\n--- Validierungsergebnis ---")
logging.info(f"Genauigkeit auf Testdaten: {accuracy:.2%}")
logging.info("Detaillierter Report:")
logging.info("\n" + classification_report(y_test, y_pred, zero_division=0))
model.save_model(MODEL_OUTPUT_FILE)
joblib.dump(term_weights, TERM_WEIGHTS_OUTPUT_FILE)
logging.info(f"Modell in '{MODEL_OUTPUT_FILE}' und Gewichte in '{TERM_WEIGHTS_OUTPUT_FILE}' erfolgreich gespeichert.")