diff --git a/train_model.py b/train_model.py index 77351026..cc23d7e0 100644 --- a/train_model.py +++ b/train_model.py @@ -4,43 +4,33 @@ import re import math import joblib import xgboost as xgb -import treelite -import treelite_runtime from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report from thefuzz import fuzz from collections import Counter import logging import sys -import os -# Importiere deine bestehenden Helfer from google_sheet_handler import GoogleSheetHandler from helpers import normalize_company_name -# Logging Setup logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler(sys.stdout)]) log = logging.getLogger() -# --- Konfiguration --- GOLD_STANDARD_FILE = 'erweitertes_matching.csv' CRM_SHEET_NAME = "CRM_Accounts" MODEL_OUTPUT_FILE = 'xgb_model.json' TERM_WEIGHTS_OUTPUT_FILE = 'term_weights.joblib' CRM_PREDICTION_FILE = 'crm_for_prediction.pkl' -TREELITE_MODEL_FILE = 'xgb_model.treelite' - BEST_MATCH_COL = 'Best Match Option' SUGGESTION_COLS = ['V2_Match_Suggestion', 'V3_Match_Suggestion', 'V4_Match_Suggestion'] -# --- Stop-/City-Tokens --- STOP_TOKENS_BASE = { 'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv', 'holding','gruppe','group','international','solutions','solution','service','services', } CITY_TOKENS = set() -# --- Hilfsfunktionen --- def _tokenize(s: str): if not s: return [] return re.split(r"[^a-z0-9äöüß]+", str(s).lower()) @@ -91,35 +81,22 @@ def create_features(mrec: dict, crec: dict, term_weights: dict): return features -# --- Haupt-Trainingsskript --- if __name__ == "__main__": log.info("Starte Trainingsprozess für Duplikats-Checker v5.0") try: - log.info(f"Versuche, Gold-Standard-Datei zu laden: '{GOLD_STANDARD_FILE}'") - if not os.path.exists(GOLD_STANDARD_FILE): - log.critical(f"FEHLER: Die Datei '{GOLD_STANDARD_FILE}' wurde nicht gefunden.") - sys.exit(1) gold_df = pd.read_csv(GOLD_STANDARD_FILE, sep=';', encoding='utf-8') - log.info(f"{len(gold_df)} Zeilen aus '{GOLD_STANDARD_FILE}' geladen.") - - log.info("Verbinde mit Google Sheets, um CRM-Daten zu laden...") sheet_handler = GoogleSheetHandler() crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME) - log.info(f"{len(crm_df)} CRM Accounts aus Google Sheets geladen.") except Exception as e: log.critical(f"Fehler beim Laden der Daten: {e}") sys.exit(1) - log.info("Entferne Duplikate aus CRM-Daten für das Training...") crm_df.drop_duplicates(subset=['CRM Name'], keep='first', inplace=True) - log.info(f"CRM-Daten auf {len(crm_df)} eindeutige Firmennamen reduziert.") crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name) gold_df['normalized_CRM Name'] = gold_df['CRM Name'].astype(str).apply(normalize_company_name) - log.info("Berechne Wortgewichte (TF-IDF)...") term_weights = {token: math.log(len(crm_df) / (count + 1)) for token, count in Counter(t for n in crm_df['normalized_name'] for t in set(clean_name_for_scoring(n)[1])).items()} - log.info("Erstelle Features für den Trainingsdatensatz...") features_list, labels = [], [] crm_lookup = crm_df.set_index('CRM Name').to_dict('index') @@ -140,29 +117,16 @@ if __name__ == "__main__": X, y = pd.DataFrame(features_list), np.array(labels) log.info(f"Trainingsdatensatz erstellt mit {X.shape[0]} Beispielen. Klassenverteilung: {Counter(y)}") - log.info("Trainiere das XGBoost-Modell...") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y) scale_pos_weight = sum(y_train == 0) / sum(y_train) if sum(y_train) > 0 else 1 model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss', scale_pos_weight=scale_pos_weight) model.fit(X_train, y_train) log.info("Modell erfolgreich trainiert.") - - log.info("Validiere Modell auf Testdaten...") + y_pred = model.predict(X_test) log.info(f"\n--- Validierungsergebnis ---\nGenauigkeit: {accuracy_score(y_test, y_pred):.2%}\n" + classification_report(y_test, y_pred, zero_division=0)) - try: - model.save_model(MODEL_OUTPUT_FILE) - log.info(f"Modell in '{MODEL_OUTPUT_FILE}' gespeichert.") - - # KORREKTUR: Wir nutzen den internen Booster für Treelite - treelite_model = treelite.Model.from_xgboost(model.get_booster()) - treelite_model.export_lib(toolchain='gcc', libpath=TREELITE_MODEL_FILE, params={'parallel_comp': 4}, verbose=True) - log.info(f"Leichtgewichtiges Modell in '{TREELITE_MODEL_FILE}' gespeichert.") - - joblib.dump(term_weights, TERM_WEIGHTS_OUTPUT_FILE) - log.info(f"Wortgewichte in '{TERM_WEIGHTS_OUTPUT_FILE}' gespeichert.") - crm_df.to_pickle(CRM_PREDICTION_FILE) - log.info(f"CRM-Daten in '{CRM_PREDICTION_FILE}' gespeichert.") - except Exception as e: - log.critical(f"FEHLER BEIM SPEICHERN DER DATEIEN: {e}") \ No newline at end of file + model.save_model(MODEL_OUTPUT_FILE) + joblib.dump(term_weights, TERM_WEIGHTS_OUTPUT_FILE) + crm_df.to_pickle(CRM_PREDICTION_FILE) + log.info("Alle Modelldateien erfolgreich erstellt.") \ No newline at end of file