duplicate_checker.py aktualisiert

This commit is contained in:
2025-09-10 08:13:02 +00:00
parent 710fe9b973
commit 2fcc7d3a06

View File

@@ -2,11 +2,9 @@
# Build timestamp is injected into logfile name.
# --- FEATURES v5.0 ---
# - NEU: Integration eines trainierten Machine-Learning-Modells (XGBoost) für die Match-Entscheidung.
# - Das Modell wurde auf dem vom Benutzer bereitgestellten "Gold-Standard"-Datensatz trainiert.
# - Feature Engineering: Für jeden Vergleich werden ~15 Merkmale berechnet, die dem Modell als Input dienen.
# - Integration eines trainierten Machine-Learning-Modells (XGBoost) für die Match-Entscheidung.
# - Die alte, heuristische Scoring-Logik wurde vollständig durch das ML-Modell ersetzt.
# - Ergebnis ist eine datengetriebene, hochpräzise Duplikatserkennung mit >80% Trefferquote.
# - Ergebnis ist eine datengetriebene, hochpräzise Duplikatserkennung.
import os
import sys
@@ -25,11 +23,6 @@ from helpers import normalize_company_name, simple_normalize_url
from config import Config
from google_sheet_handler import GoogleSheetHandler
# Wichtiger Hinweis: Dieses Skript benötigt die trainierten Modelldateien:
# - 'xgb_model.json' (das XGBoost-Modell)
# - 'term_weights.joblib' (die gelernten Wortgewichte)
# Diese Dateien müssen im gleichen Verzeichnis wie das Skript liegen.
STATUS_DIR = "job_status"
def update_status(job_id, status, progress_message):
@@ -51,12 +44,10 @@ LOG_DIR = "Log"
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
LOG_FILE = f"{now}_duplicate_check_v5.0.txt"
# ML-Modell Konfiguration
MODEL_FILE = 'xgb_model.json'
TERM_WEIGHTS_FILE = 'term_weights.joblib'
PREDICTION_THRESHOLD = 0.6 # Wahrscheinlichkeit, ab der ein Match als "sicher" gilt
PREDICTION_THRESHOLD = 0.5 # Wahrscheinlichkeit, ab der ein Match als "sicher" gilt
# Prefilter
PREFILTER_MIN_PARTIAL = 65
PREFILTER_LIMIT = 50
@@ -79,7 +70,6 @@ logger = logging.getLogger(__name__)
logger.info(f"Logging to console and file: {log_path}")
logger.info(f"Starting duplicate_checker.py v5.0 | Build: {now}")
# --- Stop-/City-Tokens ---
STOP_TOKENS_BASE = {
'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv',
@@ -107,29 +97,24 @@ def choose_rarest_token(norm_name: str, term_weights: dict):
if not toks: return None
return max(toks, key=lambda t: term_weights.get(t, 0))
# --- NEU: Feature Engineering Funktion ---
# --- Feature Engineering Funktion ---
def create_features(mrec: dict, crec: dict, term_weights: dict):
"""Berechnet alle Merkmale für das ML-Modell für ein gegebenes Paar."""
features = {}
n1_raw = mrec.get('normalized_name', '')
n2_raw = crec.get('normalized_name', '')
clean1, toks1 = clean_name_for_scoring(n1_raw)
clean2, toks2 = clean_name_for_scoring(n2_raw)
# Namens-Features
features['fuzz_ratio'] = fuzz.ratio(n1_raw, n2_raw)
features['fuzz_partial_ratio'] = fuzz.partial_ratio(n1_raw, n2_raw)
features['fuzz_token_set_ratio'] = fuzz.token_set_ratio(clean1, clean2)
features['fuzz_token_sort_ratio'] = fuzz.token_sort_ratio(clean1, clean2)
# Domain & Ort Features
features['domain_match'] = 1 if mrec.get('normalized_domain') and mrec.get('normalized_domain') == crec.get('normalized_domain') else 0
features['city_match'] = 1 if mrec.get('CRM Ort') and mrec.get('CRM Ort') == crec.get('CRM Ort') else 0
features['country_match'] = 1 if mrec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land') else 0
features['city_match'] = 1 if mrec.get('CRM Ort') and crec.get('CRM Ort') and mrec.get('CRM Ort') == crec.get('CRM Ort') else 0
features['country_match'] = 1 if mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land') else 0
features['country_mismatch'] = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') != crec.get('CRM Land')) else 0
# Token-basierte Features
overlapping_tokens = toks1 & toks2
rarest_token_mrec = choose_rarest_token(n1_raw, term_weights)
@@ -137,7 +122,6 @@ def create_features(mrec: dict, crec: dict, term_weights: dict):
features['weighted_token_score'] = sum(term_weights.get(t, 0) for t in overlapping_tokens)
features['jaccard_similarity'] = len(overlapping_tokens) / len(toks1 | toks2) if len(toks1 | toks2) > 0 else 0
# Längen-Features
features['name_len_diff'] = abs(len(n1_raw) - len(n2_raw))
features['candidate_is_shorter'] = 1 if len(n2_raw) < len(n1_raw) else 0
@@ -159,18 +143,16 @@ def build_indexes(crm_df: pd.DataFrame):
def main(job_id=None):
logger.info("Starte Duplikats-Check v5.0 (Machine Learning Model)")
# --- NEU: Lade das trainierte Modell und die Wortgewichte ---
try:
model = xgb.XGBClassifier()
model.load_model(MODEL_FILE)
term_weights = joblib.load(TERM_WEIGHTS_FILE)
logger.info("Machine-Learning-Modell und Wortgewichte erfolgreich geladen.")
except Exception as e:
logger.critical(f"Konnte Modelldateien nicht laden. Stelle sicher, dass '{MODEL_FILE}' und '{TERM_WEIGHTS_FILE}' vorhanden sind. Fehler: {e}")
logger.critical(f"Konnte Modelldateien nicht laden. Fehler: {e}")
update_status(job_id, "Fehlgeschlagen", f"Modelldateien nicht gefunden: {e}")
sys.exit(1)
# Daten laden und vorbereiten
try:
sheet = GoogleSheetHandler()
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
@@ -207,7 +189,6 @@ def main(job_id=None):
logger.info(progress_message)
if processed % 10 == 0 or processed == total: update_status(job_id, "Läuft", progress_message)
# Kandidatensuche (Blocking)
candidate_indices = set()
if mrow.get('normalized_domain'):
candidates_from_domain = domain_index.get(mrow['normalized_domain'], [])
@@ -218,11 +199,12 @@ def main(job_id=None):
except Exception: continue
rarest_token_mrec = choose_rarest_token(mrow.get('normalized_name',''), term_weights)
if not candidate_indices and rarest_token_mrec:
if len(candidate_indices) < 5 and rarest_token_mrec:
candidate_indices.update(token_index.get(rarest_token_mrec, []))
if len(candidate_indices) < 5: # Prefilter, wenn zu wenige Kandidaten
pf = sorted([(fuzz.partial_ratio(clean_name_for_scoring(mrow.get('normalized_name',''))[0], clean_name_for_scoring(r.get('normalized_name',''))[0]), i) for i, r in enumerate(crm_records)], key=lambda x: x[0], reverse=True)
if len(candidate_indices) < 5:
clean1, _ = clean_name_for_scoring(mrow.get('normalized_name',''))
pf = sorted([(fuzz.partial_ratio(clean1, clean_name_for_scoring(r.get('normalized_name',''))[0]), i) for i, r in enumerate(crm_records)], key=lambda x: x[0], reverse=True)
candidate_indices.update([i for score, i in pf if score >= PREFILTER_MIN_PARTIAL][:PREFILTER_LIMIT])
candidates = [crm_records[i] for i in candidate_indices]
@@ -230,45 +212,31 @@ def main(job_id=None):
results.append({'Match':'', 'Score':0, 'Match_Grund':'keine Kandidaten'})
continue
# --- NEU: Prediction mit ML-Modell ---
feature_list = []
for cr in candidates:
features = create_features(mrow, cr, term_weights)
feature_list.append(features)
feature_df = pd.DataFrame(feature_list)
# Stelle sicher, dass die Spaltenreihenfolge die gleiche wie beim Training ist
feature_df = feature_df[model.feature_names_in_]
# Vorhersage der Wahrscheinlichkeit für einen Match (Klasse 1)
probabilities = model.predict_proba(feature_df)[:, 1]
scored_candidates = []
for i, prob in enumerate(probabilities):
scored_candidates.append({
'name': candidates[i].get('CRM Name', ''),
'score': prob, # Der Score ist jetzt die Wahrscheinlichkeit
'record': candidates[i]
})
scored_candidates.append({'name': candidates[i].get('CRM Name', ''), 'score': prob, 'record': candidates[i]})
scored_candidates.sort(key=lambda x: x['score'], reverse=True)
best_match = scored_candidates[0] if scored_candidates else None
# Finale Entscheidung basierend auf Threshold
if best_match and best_match['score'] >= PREDICTION_THRESHOLD:
results.append({
'Match': best_match['name'],
'Score': round(best_match['score'] * 100), # Als Prozent anzeigen
'Match_Grund': f"ML Prediction: {round(best_match['score']*100)}%"
})
results.append({'Match': best_match['name'], 'Score': round(best_match['score'] * 100), 'Match_Grund': f"ML Confidence: {round(best_match['score']*100)}%"})
logger.info(f" --> Match: '{best_match['name']}' (Confidence: {round(best_match['score']*100)}%)")
else:
score_val = round(best_match['score'] * 100) if best_match else 0
results.append({'Match':'', 'Score': score_val, 'Match_Grund': f"Below Threshold ({PREDICTION_THRESHOLD*100}%)"})
results.append({'Match':'', 'Score': score_val, 'Match_Grund': f"Below Threshold ({int(PREDICTION_THRESHOLD*100)}%)"})
logger.info(f" --> No Match (Confidence: {score_val}%)")
# Ergebnisse zurückschreiben
logger.info("Matching-Prozess abgeschlossen. Schreibe Ergebnisse...")
result_df = pd.DataFrame(results)
final_df = pd.concat([match_df.reset_index(drop=True), result_df.reset_index(drop=True)], axis=1)
@@ -290,7 +258,7 @@ if __name__=='__main__':
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
args = parser.parse_args()
# Lade API-Keys etc.
Config.load_api_keys()
# Config-Klasse wird hier nicht mehr benötigt, wenn API-Keys nicht genutzt werden
# Config.load_api_keys()
main(job_id=args.job_id)