From da9d97dae7bd1b2f264e86c9af25a2ec826c7b3a Mon Sep 17 00:00:00 2001 From: Floke Date: Wed, 24 Sep 2025 15:37:12 +0000 Subject: [PATCH] duplicate_checker.py aktualisiert --- duplicate_checker.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/duplicate_checker.py b/duplicate_checker.py index c3b584c0..36ecbc86 100644 --- a/duplicate_checker.py +++ b/duplicate_checker.py @@ -1,4 +1,4 @@ -# duplicate_checker_v6.0.py +# duplicate_checker_v6.1.py import os import sys @@ -17,19 +17,23 @@ from helpers import normalize_company_name, simple_normalize_url from config import Config from google_sheet_handler import GoogleSheetHandler +# --- Konfiguration --- +SCRIPT_VERSION = "v6.1 (Treelite ML Model)" STATUS_DIR = "job_status" LOG_DIR = "Log" -MODEL_FILE = 'xgb_model.json' # Wird nicht mehr direkt genutzt, aber als Referenz behalten +MODEL_FILE = 'xgb_model.json' TERM_WEIGHTS_FILE = 'term_weights.joblib' CRM_DATA_FILE = 'crm_for_prediction.pkl' TREELITE_MODEL_FILE = 'xgb_model.treelite' PREDICTION_THRESHOLD = 0.5 PREFILTER_MIN_PARTIAL = 65 PREFILTER_LIMIT = 50 +CRM_SHEET_NAME = "CRM_Accounts" +MATCHING_SHEET_NAME = "Matching_Accounts" # --- Logging Setup --- now = datetime.now().strftime('%Y-%m-%d_%H-%M') -LOG_FILE = f"{now}_duplicate_check_v6.0.txt" +LOG_FILE = f"{now}_duplicate_check_{SCRIPT_VERSION.split(' ')[0]}.txt" if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR, exist_ok=True) log_path = os.path.join(LOG_DIR, LOG_FILE) root = logging.getLogger() @@ -109,7 +113,6 @@ def create_features(mrec: dict, crec: dict, term_weights: dict, feature_names: l features['name_len_diff'] = abs(len(n1_raw) - len(n2_raw)) features['candidate_is_shorter'] = 1 if len(n2_raw) < len(n1_raw) else 0 - # Stelle sicher, dass das Array die gleiche Reihenfolge hat wie beim Training return [features.get(name, 0) for name in feature_names] def build_indexes(crm_df: pd.DataFrame): @@ -125,7 +128,10 @@ def build_indexes(crm_df: pd.DataFrame): return records, domain_index, token_index def main(job_id=None): - logger.info(f"Starting duplicate_checker.py v6.0 | Build: {now}") + # <<< NEU: Eindeutige Log-Ausgabe ganz am Anfang >>> + logger.info(f"############################################################") + logger.info(f"### DUPLICATE CHECKER {SCRIPT_VERSION} WIRD AUSGEFÜHRT ###") + logger.info(f"############################################################") try: predictor = treelite_runtime.Predictor(TREELITE_MODEL_FILE, nthread=4) @@ -144,6 +150,9 @@ def main(job_id=None): sys.exit(1) total = len(match_df) if match_df is not None else 0 + if match_df is None or match_df.empty: + logger.critical("Leere Daten im Matching-Sheet. Abbruch.") + return logger.info(f"{len(crm_df)} CRM-Datensätze (lokal) | {total} Matching-Datensätze") match_df['normalized_name'] = match_df['CRM Name'].astype(str).apply(normalize_company_name) @@ -162,7 +171,7 @@ def main(job_id=None): for idx, mrow in match_df.to_dict('index').items(): processed = idx + 1 progress_message = f"Prüfe {processed}/{total}: '{mrow.get('CRM Name','')}'" - logger.info(progress_message) + if processed % 100 == 0: logger.info(progress_message) # Seltener loggen if processed % 10 == 0 or processed == total: update_status(job_id, "Läuft", progress_message) candidate_indices = set() @@ -184,7 +193,7 @@ def main(job_id=None): pf = sorted([(fuzz.partial_ratio(clean1, clean_name_for_scoring(r.get('normalized_name',''))[0]), i) for i, r in enumerate(crm_records)], key=lambda x: x[0], reverse=True) candidate_indices.update([i for score, i in pf if score >= PREFILTER_MIN_PARTIAL][:PREFILTER_LIMIT]) - candidates = [crm_records[i] for i in candidate_indices] + candidates = [crm_records[i] for i in list(candidate_indices)[:PREFILTER_LIMIT]] # Limitiere Kandidaten if not candidates: results.append({'Match':'', 'Score':0, 'Match_Grund':'keine Kandidaten'}) continue @@ -199,11 +208,9 @@ def main(job_id=None): if best_match and best_match['score'] >= PREDICTION_THRESHOLD: results.append({'Match': best_match['name'], 'Score': round(best_match['score'] * 100), 'Match_Grund': f"ML Confidence: {round(best_match['score']*100)}%"}) - logger.info(f" --> Match: '{best_match['name']}' (Confidence: {round(best_match['score']*100)}%)") else: score_val = round(best_match['score'] * 100) if best_match else 0 results.append({'Match':'', 'Score': score_val, 'Match_Grund': f"Below Threshold ({int(PREDICTION_THRESHOLD*100)}%)"}) - logger.info(f" --> No Match (Confidence: {score_val}%)") logger.info("Matching-Prozess abgeschlossen. Schreibe Ergebnisse...") result_df = pd.DataFrame(results) @@ -222,7 +229,7 @@ def main(job_id=None): if job_id: update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.") if __name__=='__main__': - parser = argparse.ArgumentParser(description="Duplicate Checker v6.0 (Treelite Model)") + parser = argparse.ArgumentParser(description=f"Duplicate Checker {SCRIPT_VERSION}") parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.") args = parser.parse_args() main(job_id=args.job_id) \ No newline at end of file