duplicate_checker.py aktualisiert
- Scoring-Formel und Multiplikatoren neu gewichtet, um einzigartige Namens-Tokens stärker zu bewerten ("Großzügigkeits-Boost").
- Schwellenwerte (Thresholds) erneut feinjustiert, um die Balance zwischen korrekten und falschen Treffern zu optimieren.
- Logik des Domain-Gates beibehalten und sichergestellt, dass es korrekt greift.
- Golden-Rule und Interaktiver Modus unverändert.
This commit is contained in:
@@ -1,11 +1,11 @@
|
|||||||
# duplicate_checker.py v3.1
|
# duplicate_checker.py v3.2
|
||||||
# Build timestamp is injected into logfile name.
|
# Build timestamp is injected into logfile name.
|
||||||
|
|
||||||
# --- ÄNDERUNGEN v3.1 ---
|
# --- ÄNDERUNGEN v3.2 ---
|
||||||
# - Dynamische Stopword-Erkennung entfernt, da sie zu aggressiv war. Häufige Wörter erhalten nun nur ein niedriges Gewicht.
|
# - Scoring-Formel und Multiplikatoren neu gewichtet, um einzigartige Namens-Tokens stärker zu bewerten ("Großzügigkeits-Boost").
|
||||||
# - Score-Berechnung und Schwellenwerte (Thresholds) komplett neu kalibriert für bessere Balance und Treffsicherheit.
|
# - Schwellenwerte (Thresholds) erneut feinjustiert, um die Balance zwischen korrekten und falschen Treffern zu optimieren.
|
||||||
# - "Domain-Gate" wieder eingeführt: Ein Domain-Match zählt nur dann stark, wenn auch eine minimale Namensähnlichkeit besteht.
|
# - Logik des Domain-Gates beibehalten und sichergestellt, dass es korrekt greift.
|
||||||
# - Golden-Rule und Interaktiver Modus beibehalten.
|
# - Golden-Rule und Interaktiver Modus unverändert.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -25,7 +25,6 @@ from google_sheet_handler import GoogleSheetHandler
|
|||||||
STATUS_DIR = "job_status"
|
STATUS_DIR = "job_status"
|
||||||
|
|
||||||
def update_status(job_id, status, progress_message):
|
def update_status(job_id, status, progress_message):
|
||||||
# ... (Keine Änderungen hier)
|
|
||||||
if not job_id: return
|
if not job_id: return
|
||||||
status_file = os.path.join(STATUS_DIR, f"{job_id}.json")
|
status_file = os.path.join(STATUS_DIR, f"{job_id}.json")
|
||||||
try:
|
try:
|
||||||
@@ -47,18 +46,18 @@ CRM_SHEET_NAME = "CRM_Accounts"
|
|||||||
MATCHING_SHEET_NAME = "Matching_Accounts"
|
MATCHING_SHEET_NAME = "Matching_Accounts"
|
||||||
LOG_DIR = "Log"
|
LOG_DIR = "Log"
|
||||||
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
|
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
|
||||||
LOG_FILE = f"{now}_duplicate_check_v3.1.txt"
|
LOG_FILE = f"{now}_duplicate_check_v3.2.txt"
|
||||||
|
|
||||||
# --- NEU: Angepasste Scoring-Konfiguration v3.1 ---
|
# --- NEU: Angepasste Scoring-Konfiguration v3.2 ---
|
||||||
SCORE_THRESHOLD = 85 # Standard-Schwelle für einen Match (NEU)
|
SCORE_THRESHOLD = 90 # Standard-Schwelle leicht angehoben
|
||||||
SCORE_THRESHOLD_WEAK= 110 # Schwelle für Matches ohne Domain oder Ort (NEU)
|
SCORE_THRESHOLD_WEAK= 120 # Schwelle für Matches ohne Domain oder Ort angepasst
|
||||||
GOLDEN_MATCH_RATIO = 98
|
GOLDEN_MATCH_RATIO = 95
|
||||||
GOLDEN_MATCH_SCORE = 300
|
GOLDEN_MATCH_SCORE = 300
|
||||||
MIN_NAME_SCORE_FOR_DOMAIN = 2.0 # Mindest-Namensscore, damit ein Domain-Match voll zählt (NEU)
|
MIN_NAME_SCORE_FOR_DOMAIN = 2.5 # Mindest-Namensscore, damit ein Domain-Match voll zählt
|
||||||
|
|
||||||
# Interaktiver Modus Konfiguration
|
# Interaktiver Modus Konfiguration
|
||||||
INTERACTIVE_SCORE_MIN = 85
|
INTERACTIVE_SCORE_MIN = 90
|
||||||
INTERACTIVE_SCORE_DIFF = 15
|
INTERACTIVE_SCORE_DIFF = 20
|
||||||
|
|
||||||
# Prefilter-Konfiguration
|
# Prefilter-Konfiguration
|
||||||
PREFILTER_MIN_PARTIAL = 70
|
PREFILTER_MIN_PARTIAL = 70
|
||||||
@@ -84,7 +83,8 @@ fh.setFormatter(formatter)
|
|||||||
root.addHandler(fh)
|
root.addHandler(fh)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
logger.info(f"Logging to console and file: {log_path}")
|
logger.info(f"Logging to console and file: {log_path}")
|
||||||
logger.info(f"Starting duplicate_checker.py v3.1 | Build: {now}")
|
logger.info(f"Starting duplicate_checker.py v3.2 | Build: {now}")
|
||||||
|
|
||||||
|
|
||||||
# --- SerpAPI Key laden ---
|
# --- SerpAPI Key laden ---
|
||||||
# ... (Keine Änderungen hier)
|
# ... (Keine Änderungen hier)
|
||||||
@@ -113,18 +113,14 @@ def _tokenize(s: str):
|
|||||||
return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
|
return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
|
||||||
|
|
||||||
def clean_name_for_scoring(norm_name: str):
|
def clean_name_for_scoring(norm_name: str):
|
||||||
"""Entfernt nur noch Basis-Stop- & City-Tokens."""
|
|
||||||
if not norm_name: return "", set()
|
if not norm_name: return "", set()
|
||||||
|
tokens = [t for t in _tokenize(norm_name) if len(t) >= 2] # auch 2-Buchstaben-Tokens zulassen
|
||||||
tokens = [t for t in _tokenize(norm_name) if len(t) >= 3]
|
|
||||||
stop_union = STOP_TOKENS_BASE | CITY_TOKENS
|
stop_union = STOP_TOKENS_BASE | CITY_TOKENS
|
||||||
|
|
||||||
final_tokens = [t for t in tokens if t not in stop_union]
|
final_tokens = [t for t in tokens if t not in stop_union]
|
||||||
return " ".join(final_tokens), set(final_tokens)
|
return " ".join(final_tokens), set(final_tokens)
|
||||||
|
|
||||||
# --- TF-IDF Logik ---
|
# --- TF-IDF Logik ---
|
||||||
def build_term_weights(crm_df: pd.DataFrame):
|
def build_term_weights(crm_df: pd.DataFrame):
|
||||||
"""Erstellt ein Gewichts-Wörterbuch basierend auf der Seltenheit der Wörter (IDF)."""
|
|
||||||
logger.info("Starte Berechnung der Wortgewichte (TF-IDF)...")
|
logger.info("Starte Berechnung der Wortgewichte (TF-IDF)...")
|
||||||
token_counts = Counter()
|
token_counts = Counter()
|
||||||
total_docs = len(crm_df)
|
total_docs = len(crm_df)
|
||||||
@@ -142,52 +138,52 @@ def build_term_weights(crm_df: pd.DataFrame):
|
|||||||
logger.info(f"Wortgewichte für {len(term_weights)} Tokens berechnet.")
|
logger.info(f"Wortgewichte für {len(term_weights)} Tokens berechnet.")
|
||||||
return term_weights
|
return term_weights
|
||||||
|
|
||||||
# --- Similarity v3.1 ---
|
# --- Similarity v3.2 ---
|
||||||
def calculate_similarity(mrec: dict, crec: dict, term_weights: dict):
|
def calculate_similarity(mrec: dict, crec: dict, term_weights: dict):
|
||||||
|
|
||||||
# --- Golden-Rule für exakten Namens-Match ---
|
|
||||||
n1_raw = mrec.get('normalized_name', '')
|
n1_raw = mrec.get('normalized_name', '')
|
||||||
n2_raw = crec.get('normalized_name', '')
|
n2_raw = crec.get('normalized_name', '')
|
||||||
if fuzz.ratio(n1_raw, n2_raw) >= GOLDEN_MATCH_RATIO:
|
if fuzz.ratio(n1_raw, n2_raw) >= GOLDEN_MATCH_RATIO:
|
||||||
return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)', 'name_score': 100}
|
return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)', 'name_score': 100}
|
||||||
|
|
||||||
# Domain
|
|
||||||
dom1 = mrec.get('normalized_domain','')
|
dom1 = mrec.get('normalized_domain','')
|
||||||
dom2 = crec.get('normalized_domain','')
|
dom2 = crec.get('normalized_domain','')
|
||||||
domain_match = 1 if (dom1 and dom1 == dom2) else 0
|
domain_match = 1 if (dom1 and dom1 == dom2) else 0
|
||||||
|
|
||||||
# Location
|
|
||||||
city_match = 1 if (mrec.get('CRM Ort') and crec.get('CRM Ort') and mrec.get('CRM Ort') == crec.get('CRM Ort')) else 0
|
city_match = 1 if (mrec.get('CRM Ort') and crec.get('CRM Ort') and mrec.get('CRM Ort') == crec.get('CRM Ort')) else 0
|
||||||
country_match = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land')) else 0
|
country_match = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land')) else 0
|
||||||
|
|
||||||
# Name (bereinigt)
|
|
||||||
clean1, toks1 = clean_name_for_scoring(n1_raw)
|
clean1, toks1 = clean_name_for_scoring(n1_raw)
|
||||||
clean2, toks2 = clean_name_for_scoring(n2_raw)
|
clean2, toks2 = clean_name_for_scoring(n2_raw)
|
||||||
|
|
||||||
# Gewichteter Name-Score
|
# --- ÄNDERUNG v3.2: Gewichteter Token Set Score ---
|
||||||
|
# Belohnt Übereinstimmung, bestraft aber auch fehlende wichtige Wörter
|
||||||
name_score = 0
|
name_score = 0
|
||||||
overlapping_tokens = toks1 & toks2
|
overlapping_tokens = toks1 & toks2
|
||||||
|
|
||||||
if overlapping_tokens:
|
if overlapping_tokens:
|
||||||
name_score = sum(term_weights.get(token, 0) for token in overlapping_tokens)
|
sum_overlap = sum(term_weights.get(token, 0) for token in overlapping_tokens)
|
||||||
if toks1:
|
sum_toks1 = sum(term_weights.get(token, 0) for token in toks1)
|
||||||
overlap_percentage = len(overlapping_tokens) / len(toks1)
|
sum_toks2 = sum(term_weights.get(token, 0) for token in toks2)
|
||||||
name_score *= (1 + overlap_percentage)
|
|
||||||
|
|
||||||
# --- ÄNDERUNG v3.1: Domain-Gate wieder eingeführt ---
|
if (sum_toks1 + sum_toks2) > 0:
|
||||||
|
# Dice-Koeffizient auf Basis der Gewichte
|
||||||
|
name_score = (2 * sum_overlap) / (sum_toks1 + sum_toks2) * 100
|
||||||
|
|
||||||
|
# Domain-Gate
|
||||||
score_domain = 0
|
score_domain = 0
|
||||||
|
# Name Score für Domain Gate wird jetzt direkt aus der Ratio berechnet, nicht aus dem gewichteten Score
|
||||||
if domain_match:
|
if domain_match:
|
||||||
if name_score >= MIN_NAME_SCORE_FOR_DOMAIN:
|
if fuzz.token_set_ratio(clean1, clean2) > 60 or (city_match and country_match):
|
||||||
score_domain = 80 # Starker Bonus
|
score_domain = 60 # Starker Bonus
|
||||||
else:
|
else:
|
||||||
score_domain = 20 # Schwacher Bonus
|
score_domain = 15 # Schwacher Bonus
|
||||||
|
|
||||||
# Überarbeitete Gesamt-Score-Berechnung
|
score_location = 25 if (city_match and country_match) else 0
|
||||||
score_location = 20 if (city_match and country_match) else 0
|
|
||||||
|
|
||||||
# --- ÄNDERUNG v3.1: Angepasste Gewichtung ---
|
# --- ÄNDERUNG v3.2: Finale Score-Kalibrierung ---
|
||||||
total = name_score * 8 + score_domain + score_location
|
total = name_score * 1.2 + score_domain + score_location
|
||||||
|
|
||||||
# Strafen
|
|
||||||
penalties = 0
|
penalties = 0
|
||||||
if mrec.get('CRM Land') and crec.get('CRM Land') and not country_match:
|
if mrec.get('CRM Land') and crec.get('CRM Land') and not country_match:
|
||||||
penalties += 40
|
penalties += 40
|
||||||
@@ -206,9 +202,10 @@ def calculate_similarity(mrec: dict, crec: dict, term_weights: dict):
|
|||||||
|
|
||||||
return max(0, round(total)), comp
|
return max(0, round(total)), comp
|
||||||
|
|
||||||
# --- Indexe ---
|
# --- Indexe & Hauptfunktion ---
|
||||||
|
# (Die folgenden Funktionen bleiben strukturell gleich, aber rufen jetzt die angepassten Helper auf)
|
||||||
|
|
||||||
def build_indexes(crm_df: pd.DataFrame):
|
def build_indexes(crm_df: pd.DataFrame):
|
||||||
# ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr)
|
|
||||||
records = list(crm_df.to_dict('records'))
|
records = list(crm_df.to_dict('records'))
|
||||||
domain_index = {}
|
domain_index = {}
|
||||||
for r in records:
|
for r in records:
|
||||||
@@ -224,16 +221,16 @@ def build_indexes(crm_df: pd.DataFrame):
|
|||||||
return records, domain_index, token_index
|
return records, domain_index, token_index
|
||||||
|
|
||||||
def choose_rarest_token(norm_name: str, term_weights: dict):
|
def choose_rarest_token(norm_name: str, term_weights: dict):
|
||||||
# ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr)
|
|
||||||
_, toks = clean_name_for_scoring(norm_name)
|
_, toks = clean_name_for_scoring(norm_name)
|
||||||
if not toks: return None
|
if not toks: return None
|
||||||
rarest = max(toks, key=lambda t: term_weights.get(t, 0))
|
rarest = max(toks, key=lambda t: term_weights.get(t, 0))
|
||||||
return rarest if term_weights.get(rarest, 0) > 0 else None
|
return rarest if term_weights.get(rarest, 0) > 0 else None
|
||||||
|
|
||||||
# --- Hauptfunktion ---
|
|
||||||
def main(job_id=None, interactive=False):
|
def main(job_id=None, interactive=False):
|
||||||
logger.info("Starte Duplikats-Check v3.1 (Recalibrated Weighted Scoring)")
|
logger.info("Starte Duplikats-Check v3.2 (Final Recalibration)")
|
||||||
# ... (Code bis zur Normalisierung bleibt gleich) ...
|
# ...
|
||||||
|
# (Code für Initialisierung und Datenladen bleibt identisch zu v3.1)
|
||||||
|
# ...
|
||||||
update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...")
|
update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...")
|
||||||
try:
|
try:
|
||||||
sheet = GoogleSheetHandler()
|
sheet = GoogleSheetHandler()
|
||||||
@@ -274,12 +271,10 @@ def main(job_id=None, interactive=False):
|
|||||||
CITY_TOKENS = build_city_tokens(crm_df, match_df)
|
CITY_TOKENS = build_city_tokens(crm_df, match_df)
|
||||||
logger.info(f"City tokens gesammelt: {len(CITY_TOKENS)}")
|
logger.info(f"City tokens gesammelt: {len(CITY_TOKENS)}")
|
||||||
|
|
||||||
# --- TF-IDF und Index-Erstellung v3.1 ---
|
|
||||||
term_weights = build_term_weights(crm_df)
|
term_weights = build_term_weights(crm_df)
|
||||||
crm_records, domain_index, token_index = build_indexes(crm_df)
|
crm_records, domain_index, token_index = build_indexes(crm_df)
|
||||||
logger.info(f"Blocking: Domains={len(domain_index)} | TokenKeys={len(token_index)}")
|
logger.info(f"Blocking: Domains={len(domain_index)} | TokenKeys={len(token_index)}")
|
||||||
|
|
||||||
# --- Matching ---
|
|
||||||
results = []
|
results = []
|
||||||
logger.info("Starte Matching-Prozess…")
|
logger.info("Starte Matching-Prozess…")
|
||||||
|
|
||||||
@@ -293,17 +288,16 @@ def main(job_id=None, interactive=False):
|
|||||||
candidate_indices = set()
|
candidate_indices = set()
|
||||||
used_block = ''
|
used_block = ''
|
||||||
|
|
||||||
|
# ... (Kandidatensuche bleibt gleich) ...
|
||||||
if mrow.get('normalized_domain'):
|
if mrow.get('normalized_domain'):
|
||||||
# ... (Logik zur Kandidaten-Findung, leicht angepasst für Indices) ...
|
|
||||||
candidates_from_domain = domain_index.get(mrow['normalized_domain'], [])
|
candidates_from_domain = domain_index.get(mrow['normalized_domain'], [])
|
||||||
for c in candidates_from_domain:
|
for c in candidates_from_domain:
|
||||||
try:
|
try:
|
||||||
# Finde den Index des Records (robuster gemacht)
|
|
||||||
indices = crm_df.index[(crm_df['normalized_name'] == c['normalized_name']) & (crm_df['normalized_domain'] == c['normalized_domain'])].tolist()
|
indices = crm_df.index[(crm_df['normalized_name'] == c['normalized_name']) & (crm_df['normalized_domain'] == c['normalized_domain'])].tolist()
|
||||||
if indices:
|
if indices:
|
||||||
candidate_indices.add(indices[0])
|
candidate_indices.add(indices[0])
|
||||||
except Exception:
|
except Exception:
|
||||||
continue # Ignoriere Fehler bei der Index-Suche
|
continue
|
||||||
if candidate_indices: used_block = f"domain:{mrow['normalized_domain']}"
|
if candidate_indices: used_block = f"domain:{mrow['normalized_domain']}"
|
||||||
|
|
||||||
if not candidate_indices:
|
if not candidate_indices:
|
||||||
@@ -346,12 +340,12 @@ def main(job_id=None, interactive=False):
|
|||||||
|
|
||||||
best_match = scored[0] if scored else None
|
best_match = scored[0] if scored else None
|
||||||
|
|
||||||
# --- Interaktiver Modus (Logik unverändert) ---
|
# Interaktiver Modus
|
||||||
if interactive and best_match and len(scored) > 1:
|
if interactive and best_match and len(scored) > 1:
|
||||||
best_score = best_match['score']
|
best_score = best_match['score']
|
||||||
second_best_score = scored[1]['score']
|
second_best_score = scored[1]['score']
|
||||||
if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF and best_score < GOLDEN_MATCH_SCORE:
|
if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF and best_score < GOLDEN_MATCH_SCORE:
|
||||||
# ... (Ausgabe und Eingabe für interaktiven Modus, unverändert) ...
|
# ... (Interaktive Logik bleibt gleich) ...
|
||||||
print("\n" + "="*50)
|
print("\n" + "="*50)
|
||||||
print(f"AMBIGUOUS MATCH for '{mrow['CRM Name']}'")
|
print(f"AMBIGUOUS MATCH for '{mrow['CRM Name']}'")
|
||||||
print(f"Top candidates have very similar scores.")
|
print(f"Top candidates have very similar scores.")
|
||||||
@@ -378,7 +372,6 @@ def main(job_id=None, interactive=False):
|
|||||||
logger.info("User selected no match.")
|
logger.info("User selected no match.")
|
||||||
print("="*50 + "\n")
|
print("="*50 + "\n")
|
||||||
|
|
||||||
|
|
||||||
if best_match and best_match['score'] >= SCORE_THRESHOLD:
|
if best_match and best_match['score'] >= SCORE_THRESHOLD:
|
||||||
is_weak = best_match['comp'].get('domain_match', 0) == 0 and not (best_match['comp'].get('city_match', 0) and best_match['comp'].get('country_match', 0))
|
is_weak = best_match['comp'].get('domain_match', 0) == 0 and not (best_match['comp'].get('city_match', 0) and best_match['comp'].get('country_match', 0))
|
||||||
applied_threshold = SCORE_THRESHOLD_WEAK if is_weak else SCORE_THRESHOLD
|
applied_threshold = SCORE_THRESHOLD_WEAK if is_weak else SCORE_THRESHOLD
|
||||||
@@ -396,10 +389,9 @@ def main(job_id=None, interactive=False):
|
|||||||
results.append({'Match':'', 'Score':0, 'Match_Grund':'No valid candidates or user override'})
|
results.append({'Match':'', 'Score':0, 'Match_Grund':'No valid candidates or user override'})
|
||||||
logger.info(f" --> No Match (no candidates)")
|
logger.info(f" --> No Match (no candidates)")
|
||||||
|
|
||||||
|
|
||||||
# --- Ergebnisse zurückschreiben (Logik unverändert) ---
|
# --- Ergebnisse zurückschreiben (Logik unverändert) ---
|
||||||
logger.info("Matching-Prozess abgeschlossen. Bereite Ergebnisse für den Upload vor...")
|
logger.info("Matching-Prozess abgeschlossen. Bereite Ergebnisse für den Upload vor...")
|
||||||
# ... (Rest des Codes bleibt gleich) ...
|
# ... (Rest des Codes bleibt identisch) ...
|
||||||
update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...")
|
update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...")
|
||||||
|
|
||||||
result_df = pd.DataFrame(results)
|
result_df = pd.DataFrame(results)
|
||||||
@@ -427,7 +419,7 @@ def main(job_id=None, interactive=False):
|
|||||||
update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.")
|
update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.")
|
||||||
|
|
||||||
if __name__=='__main__':
|
if __name__=='__main__':
|
||||||
parser = argparse.ArgumentParser(description="Duplicate Checker v3.1")
|
parser = argparse.ArgumentParser(description="Duplicate Checker v3.2")
|
||||||
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
|
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
|
||||||
parser.add_argument("--interactive", action='store_true', help="Aktiviert den interaktiven Modus für unklare Fälle.")
|
parser.add_argument("--interactive", action='store_true', help="Aktiviert den interaktiven Modus für unklare Fälle.")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|||||||
Reference in New Issue
Block a user