Files
Brancheneinstufung2/train_model.py
2025-09-24 15:47:22 +00:00

115 lines
5.0 KiB
Python

import pandas as pd
import numpy as np
import re
import math
import joblib
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from thefuzz import fuzz
from collections import Counter
import logging
import sys
from google_sheet_handler import GoogleSheetHandler
from helpers import normalize_company_name
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler(sys.stdout)])
log = logging.getLogger()
GOLD_STANDARD_FILE = 'erweitertes_matching.csv'
CRM_SHEET_NAME = "CRM_Accounts"
MODEL_OUTPUT_FILE = 'xgb_model.json'
TERM_WEIGHTS_OUTPUT_FILE = 'term_weights.joblib'
CRM_PREDICTION_FILE = 'crm_for_prediction.pkl'
BEST_MATCH_COL = 'Best Match Option'
SUGGESTION_COLS = ['V2_Match_Suggestion', 'V3_Match_Suggestion', 'V4_Match_Suggestion']
STOP_TOKENS_BASE = {
'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv',
'holding','gruppe','group','international','solutions','solution','service','services',
}
CITY_TOKENS = set()
def _tokenize(s: str):
if not s: return []
return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
def clean_name_for_scoring(norm_name: str):
if not norm_name: return "", set()
tokens = [t for t in _tokenize(norm_name) if len(t) >= 3]
stop_union = STOP_TOKENS_BASE | CITY_TOKENS
final_tokens = [t for t in tokens if t not in stop_union]
return " ".join(final_tokens), set(final_tokens)
def choose_rarest_token(norm_name: str, term_weights: dict):
_, toks = clean_name_for_scoring(norm_name)
if not toks: return None
return max(toks, key=lambda t: term_weights.get(t, 0))
def create_features(mrec: dict, crec: dict, term_weights: dict):
features = {}
n1_raw = mrec.get('normalized_CRM Name', '')
n2_raw = crec.get('normalized_name', '')
clean1, toks1 = clean_name_for_scoring(n1_raw)
clean2, toks2 = clean_name_for_scoring(n2_raw)
features['fuzz_ratio'] = fuzz.ratio(n1_raw, n2_raw)
features['fuzz_partial_ratio'] = fuzz.partial_ratio(n1_raw, n2_raw)
features['fuzz_token_set_ratio'] = fuzz.token_set_ratio(clean1, clean2)
features['fuzz_token_sort_ratio'] = fuzz.token_sort_ratio(clean1, clean2)
domain1_raw = str(mrec.get('CRM Website', '')).lower()
domain2_raw = str(crec.get('CRM Website', '')).lower()
domain1 = domain1_raw.replace('www.', '').split('/')[0].strip()
domain2 = domain2_raw.replace('www.', '').split('/')[0].strip()
features['domain_match'] = 1 if domain1 and domain1 == domain2 else 0
features['city_match'] = 1 if mrec.get('CRM Ort') and crec.get('CRM Ort') and mrec['CRM Ort'] == crec['CRM Ort'] else 0
features['country_match'] = 1 if mrec.get('CRM Land') and crec.get('CRM Land') and mrec['CRM Land'] == crec['CRM Land'] else 0
features['country_mismatch'] = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec['CRM Land'] != crec['CRM Land']) else 0
overlapping_tokens = toks1 & toks2
rarest_token_mrec = choose_rarest_token(n1_raw, term_weights)
features['rarest_token_overlap'] = 1 if rarest_token_mrec and rarest_token_mrec in toks2 else 0
features['weighted_token_score'] = sum(term_weights.get(t, 0) for t in overlapping_tokens)
features['jaccard_similarity'] = len(overlapping_tokens) / len(toks1 | toks2) if len(toks1 | toks2) > 0 else 0
features['name_len_diff'] = abs(len(n1_raw) - len(n2_raw))
features['candidate_is_shorter'] = 1 if len(n2_raw) < len(n1_raw) else 0
return features
if __name__ == "__main__":
# ... (der gesamte Trainingsprozess bis zum Speichern) ...
log.info("Modell erfolgreich trainiert.")
y_pred = model.predict(X_test)
log.info(f"\n--- Validierungsergebnis ---\nGenauigkeit: {accuracy_score(y_test, y_pred):.2%}\n" + classification_report(y_test, y_pred, zero_division=0))
try:
model.save_model(MODEL_OUTPUT_FILE)
log.info(f"Modell in '{MODEL_OUTPUT_FILE}' gespeichert.")
# <<< KORREKTUR: Wir exportieren jetzt in das korrekte Format (.so für Linux) >>>
TREELITE_MODEL_SO_FILE = 'xgb_model.so'
treelite_model = treelite.Model.from_xgboost(model.get_booster())
# Dieser Befehl kompiliert das Modell in eine native Bibliothek
treelite_model.export_lib(
toolchain='gcc',
libpath=TREELITE_MODEL_SO_FILE,
params={'parallel_comp': 4}, # Anzahl der CPU-Kerne nutzen
verbose=True
)
log.info(f"Kompiliertes Modell in '{TREELITE_MODEL_SO_FILE}' gespeichert.")
joblib.dump(term_weights, TERM_WEIGHTS_OUTPUT_FILE)
log.info(f"Wortgewichte in '{TERM_WEIGHTS_OUTPUT_FILE}' gespeichert.")
crm_df.to_pickle(CRM_PREDICTION_FILE)
log.info(f"CRM-Daten in '{CRM_PREDICTION_FILE}' gespeichert.")
log.info("Alle Dateien wurden erfolgreich erstellt.")
except Exception as e:
log.critical(f"FEHLER BEIM SPEICHERN DER DATEIEN: {e}")