train_model.py aktualisiert
This commit is contained in:
@@ -19,27 +19,30 @@ from helpers import normalize_company_name
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# --- Konfiguration ---
|
||||
# HINWEIS: Bitte stelle sicher, dass diese Datei deine finale Vergleichs-CSV ist,
|
||||
# die die Spalten 'Best Match Option' und die Vorschläge der alten Läufe enthält.
|
||||
GOLD_STANDARD_FILE = 'matches4.csv'
|
||||
# HINWEIS: Bitte stelle sicher, dass diese Datei deine finale Vergleichs-CSV ist.
|
||||
# Passe den Namen an, falls deine Datei anders heißt (z.B. 'matches4.csv').
|
||||
GOLD_STANDARD_FILE = 'erweitertes_matching.csv'
|
||||
CRM_SHEET_NAME = "CRM_Accounts"
|
||||
MODEL_OUTPUT_FILE = 'xgb_model.json'
|
||||
TERM_WEIGHTS_OUTPUT_FILE = 'term_weights.joblib'
|
||||
|
||||
# Spaltennamen aus deiner Vergleichs-CSV
|
||||
# WICHTIG: Passe diese Spaltennamen exakt an deine CSV-Datei an!
|
||||
BEST_MATCH_COL = 'Best Match Option'
|
||||
# Liste der Spalten, die Vorschläge von alten Algorithmen enthalten
|
||||
SUGGESTION_COLS = ['Ergebnis Lauf 1', 'Ergebnis Lauf 2', 'Ergebnis Lauf 3'] # Passe diese Namen an deine CSV an!
|
||||
# Füge hier alle Spaltennamen hinzu, die du als "alte Vorschläge" verwenden willst.
|
||||
SUGGESTION_COLS = ['V2_Match_Suggestion', 'V3_Match_Suggestion', 'V4_Match_Suggestion']
|
||||
|
||||
# --- ... (Alle Hilfsfunktionen bleiben identisch zu vorher) ... ---
|
||||
# --- Stop-/City-Tokens ---
|
||||
STOP_TOKENS_BASE = {
|
||||
'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv',
|
||||
'holding','gruppe','group','international','solutions','solution','service','services',
|
||||
# ... (Rest der Stopwords)
|
||||
'deutschland','austria','germany','technik','technology','technologies','systems','systeme',
|
||||
'logistik','logistics','industries','industrie','management','consulting','vertrieb','handel',
|
||||
'international','company','gesellschaft','mbh&co','mbhco','werke','werk'
|
||||
}
|
||||
CITY_TOKENS = set()
|
||||
|
||||
# --- Hilfsfunktionen ---
|
||||
def _tokenize(s: str):
|
||||
if not s: return []
|
||||
return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
|
||||
@@ -50,7 +53,7 @@ def clean_name_for_scoring(norm_name: str):
|
||||
stop_union = STOP_TOKENS_BASE | CITY_TOKENS
|
||||
final_tokens = [t for t in tokens if t not in stop_union]
|
||||
return " ".join(final_tokens), set(final_tokens)
|
||||
|
||||
|
||||
def choose_rarest_token(norm_name: str, term_weights: dict):
|
||||
_, toks = clean_name_for_scoring(norm_name)
|
||||
if not toks: return None
|
||||
@@ -59,7 +62,7 @@ def choose_rarest_token(norm_name: str, term_weights: dict):
|
||||
def create_features(mrec: dict, crec: dict, term_weights: dict):
|
||||
features = {}
|
||||
n1_raw = mrec.get('normalized_CRM Name', '')
|
||||
n2_raw = crec.get('normalized_name', '') # Kandidaten-Name ist jetzt generisch
|
||||
n2_raw = crec.get('normalized_name', '')
|
||||
clean1, toks1 = clean_name_for_scoring(n1_raw)
|
||||
clean2, toks2 = clean_name_for_scoring(n2_raw)
|
||||
|
||||
@@ -69,7 +72,7 @@ def create_features(mrec: dict, crec: dict, term_weights: dict):
|
||||
features['fuzz_token_sort_ratio'] = fuzz.token_sort_ratio(clean1, clean2)
|
||||
|
||||
domain1_raw = str(mrec.get('CRM Website', '')).lower()
|
||||
domain2_raw = str(crec.get('CRM Website', '')).lower()
|
||||
domain2_raw = str(crec.get('CRM Website', '')).lower() # crec ist jetzt ein dict aus dem CRM
|
||||
domain1 = domain1_raw.replace('www.', '').split('/')[0].strip()
|
||||
domain2 = domain2_raw.replace('www.', '').split('/')[0].strip()
|
||||
features['domain_match'] = 1 if domain1 and domain1 == domain2 else 0
|
||||
@@ -102,6 +105,7 @@ if __name__ == "__main__":
|
||||
sheet_handler = GoogleSheetHandler()
|
||||
crm_df = sheet_handler.get_sheet_as_dataframe(CRM_SHEET_NAME)
|
||||
logging.info(f"{len(crm_df)} CRM Accounts aus Google Sheets geladen.")
|
||||
|
||||
except Exception as e:
|
||||
logging.critical(f"Fehler beim Laden der Daten: {e}")
|
||||
sys.exit(1)
|
||||
@@ -116,14 +120,13 @@ if __name__ == "__main__":
|
||||
features_list = []
|
||||
labels = []
|
||||
|
||||
# Mache das CRM für schnelle Lookups verfügbar
|
||||
crm_lookup = crm_df.set_index('CRM Name').to_dict('index')
|
||||
|
||||
for _, row in gold_df.iterrows():
|
||||
mrec = row.to_dict() # Die zu prüfende Firma
|
||||
mrec = row.to_dict()
|
||||
|
||||
# 1. Positives Beispiel: Der von dir definierte "Best Match"
|
||||
best_match_name = row[BEST_MATCH_COL]
|
||||
best_match_name = row.get(BEST_MATCH_COL)
|
||||
if pd.notna(best_match_name) and str(best_match_name).strip() != '' and best_match_name in crm_lookup:
|
||||
crec_positive = crm_lookup[best_match_name]
|
||||
features = create_features(mrec, crec_positive, term_weights)
|
||||
@@ -134,7 +137,6 @@ if __name__ == "__main__":
|
||||
for col_name in SUGGESTION_COLS:
|
||||
if col_name in row and pd.notna(row[col_name]):
|
||||
suggestion_name = row[col_name]
|
||||
# Wenn der Vorschlag nicht der korrekte ist UND im CRM existiert
|
||||
if suggestion_name != best_match_name and suggestion_name in crm_lookup:
|
||||
crec_negative = crm_lookup[suggestion_name]
|
||||
features = create_features(mrec, crec_negative, term_weights)
|
||||
@@ -145,7 +147,7 @@ if __name__ == "__main__":
|
||||
y = np.array(labels)
|
||||
|
||||
if len(X) == 0:
|
||||
logging.critical("Keine gültigen Trainingsdaten gefunden.")
|
||||
logging.critical("Keine gültigen Trainingsdaten gefunden. Überprüfe die Spaltennamen in der Konfiguration.")
|
||||
sys.exit(1)
|
||||
|
||||
logging.info(f"Trainingsdatensatz erstellt mit {X.shape[0]} Beispielen und {X.shape[1]} Features.")
|
||||
|
||||
Reference in New Issue
Block a user