duplicate_checker.py aktualisiert

- Dynamische Stopword-Erkennung entfernt, da sie zu aggressiv war. Häufige Wörter erhalten nun nur ein niedriges Gewicht.
 - Score-Berechnung und Schwellenwerte (Thresholds) komplett neu kalibriert für bessere Balance und Treffsicherheit.
 - "Domain-Gate" wieder eingeführt: Ein Domain-Match zählt nur dann stark, wenn auch eine minimale Namensähnlichkeit besteht.
 - Golden-Rule und Interaktiver Modus beibehalten.
This commit is contained in:
2025-09-05 07:34:23 +00:00
parent d245d43182
commit 15019e5326

View File

@@ -1,11 +1,11 @@
# duplicate_checker.py v3.0 # duplicate_checker.py v3.1
# Build timestamp is injected into logfile name. # Build timestamp is injected into logfile name.
# --- NEUE FEATURES v3.0 --- # --- ÄNDERUNGEN v3.1 ---
# - Golden-Rule: Fast exakte Namens-Matches (>98%) werden immer als Treffer gewertet. # - Dynamische Stopword-Erkennung entfernt, da sie zu aggressiv war. Häufige Wörter erhalten nun nur ein niedriges Gewicht.
# - Weighted Scoring (TF-IDF): Einzigartige Wörter im Firmennamen erhalten mehr Gewicht als häufige Füllwörter. # - Score-Berechnung und Schwellenwerte (Thresholds) komplett neu kalibriert für bessere Balance und Treffsicherheit.
# - Interaktiver Modus: Bei unklaren Fällen kann der Nutzer manuell den besten Kandidaten auswählen. # - "Domain-Gate" wieder eingeführt: Ein Domain-Match zählt nur dann stark, wenn auch eine minimale Namensähnlichkeit besteht.
# - Umfassend überarbeitete Scoring-Logik für höhere Präzision. # - Golden-Rule und Interaktiver Modus beibehalten.
import os import os
import sys import sys
@@ -25,6 +25,7 @@ from google_sheet_handler import GoogleSheetHandler
STATUS_DIR = "job_status" STATUS_DIR = "job_status"
def update_status(job_id, status, progress_message): def update_status(job_id, status, progress_message):
# ... (Keine Änderungen hier)
if not job_id: return if not job_id: return
status_file = os.path.join(STATUS_DIR, f"{job_id}.json") status_file = os.path.join(STATUS_DIR, f"{job_id}.json")
try: try:
@@ -46,23 +47,25 @@ CRM_SHEET_NAME = "CRM_Accounts"
MATCHING_SHEET_NAME = "Matching_Accounts" MATCHING_SHEET_NAME = "Matching_Accounts"
LOG_DIR = "Log" LOG_DIR = "Log"
now = datetime.now().strftime('%Y-%m-%d_%H-%M') now = datetime.now().strftime('%Y-%m-%d_%H-%M')
LOG_FILE = f"{now}_duplicate_check_v3.0.txt" LOG_FILE = f"{now}_duplicate_check_v3.1.txt"
# Scoring-Konfiguration # --- NEU: Angepasste Scoring-Konfiguration v3.1 ---
SCORE_THRESHOLD = 100 # Standard-Schwelle für einen Match SCORE_THRESHOLD = 85 # Standard-Schwelle für einen Match (NEU)
SCORE_THRESHOLD_WEAK= 130 # Schwelle für Matches ohne Domain oder Ort SCORE_THRESHOLD_WEAK= 110 # Schwelle für Matches ohne Domain oder Ort (NEU)
GOLDEN_MATCH_RATIO = 98 # Ratio, ab der ein Namens-Match als "Golden Match" gilt GOLDEN_MATCH_RATIO = 98
GOLDEN_MATCH_SCORE = 300 # Score, der bei einem Golden Match vergeben wird GOLDEN_MATCH_SCORE = 300
MIN_NAME_SCORE_FOR_DOMAIN = 2.0 # Mindest-Namensscore, damit ein Domain-Match voll zählt (NEU)
# Interaktiver Modus Konfiguration # Interaktiver Modus Konfiguration
INTERACTIVE_SCORE_MIN = 100 # Mindestscore des besten Kandidaten, um den interaktiven Modus zu triggern INTERACTIVE_SCORE_MIN = 85
INTERACTIVE_SCORE_DIFF = 20 # Maximaler Score-Unterschied zum zweitbesten Kandidaten, um den Modus zu triggern INTERACTIVE_SCORE_DIFF = 15
# Prefilter-Konfiguration # Prefilter-Konfiguration
PREFILTER_MIN_PARTIAL = 70 PREFILTER_MIN_PARTIAL = 70
PREFILTER_LIMIT = 30 PREFILTER_LIMIT = 30
# --- Logging Setup --- # --- Logging Setup ---
# ... (Keine Änderungen hier)
if not os.path.exists(LOG_DIR): if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR, exist_ok=True) os.makedirs(LOG_DIR, exist_ok=True)
log_path = os.path.join(LOG_DIR, LOG_FILE) log_path = os.path.join(LOG_DIR, LOG_FILE)
@@ -81,9 +84,10 @@ fh.setFormatter(formatter)
root.addHandler(fh) root.addHandler(fh)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.info(f"Logging to console and file: {log_path}") logger.info(f"Logging to console and file: {log_path}")
logger.info(f"Starting duplicate_checker.py v3.0 | Build: {now}") logger.info(f"Starting duplicate_checker.py v3.1 | Build: {now}")
# --- SerpAPI Key laden --- # --- SerpAPI Key laden ---
# ... (Keine Änderungen hier)
try: try:
Config.load_api_keys() Config.load_api_keys()
serp_key = Config.API_KEYS.get('serpapi') serp_key = Config.API_KEYS.get('serpapi')
@@ -95,74 +99,59 @@ except Exception as e:
# --- Stop-/City-Tokens --- # --- Stop-/City-Tokens ---
STOP_TOKENS_BASE = { STOP_TOKENS_BASE = {
'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'gmbh','mbh','ag','kg','ug','ohg','se','co','kgaa','inc','llc','ltd','sarl', 'b.v', 'bv',
'holding','gruppe','group','international','solutions','solution','service','services', 'holding','gruppe','group','international','solutions','solution','service','services',
'deutschland','austria','germany','technik','technology','technologies','systems','systeme', 'deutschland','austria','germany','technik','technology','technologies','systems','systeme',
'logistik','logistics','industries','industrie','management','consulting','vertrieb','handel', 'logistik','logistics','industries','industrie','management','consulting','vertrieb','handel',
'international','company','gesellschaft','mbh&co','mbhco','werke','werk' 'international','company','gesellschaft','mbh&co','mbhco','werke','werk'
} }
CITY_TOKENS = set() # dynamisch befüllt CITY_TOKENS = set()
# --- Utilities --- # --- Utilities ---
def _tokenize(s: str): def _tokenize(s: str):
if not s: return [] if not s: return []
return re.split(r"[^a-z0-9äöüß]+", str(s).lower()) return re.split(r"[^a-z0-9äöüß]+", str(s).lower())
def clean_name_for_scoring(norm_name: str, dynamic_stopwords: set): def clean_name_for_scoring(norm_name: str):
"""Entfernt Stop- & City-Tokens sowie dynamische Stopwords.""" """Entfernt nur noch Basis-Stop- & City-Tokens."""
if not norm_name: return "", set() if not norm_name: return "", set()
tokens = [t for t in _tokenize(norm_name) if len(t) >= 3] tokens = [t for t in _tokenize(norm_name) if len(t) >= 3]
stop_union = STOP_TOKENS_BASE | CITY_TOKENS | dynamic_stopwords stop_union = STOP_TOKENS_BASE | CITY_TOKENS
final_tokens = [t for t in tokens if t not in stop_union] final_tokens = [t for t in tokens if t not in stop_union]
return " ".join(final_tokens), set(final_tokens) return " ".join(final_tokens), set(final_tokens)
# --- NEU: TF-IDF Logik (vereinfacht) --- # --- TF-IDF Logik ---
def build_term_weights(crm_df: pd.DataFrame, dynamic_stopwords: set): def build_term_weights(crm_df: pd.DataFrame):
"""Erstellt ein Gewichts-Wörterbuch basierend auf der Seltenheit der Wörter (IDF).""" """Erstellt ein Gewichts-Wörterbuch basierend auf der Seltenheit der Wörter (IDF)."""
logger.info("Starte Berechnung der Wortgewichte (TF-IDF)...") logger.info("Starte Berechnung der Wortgewichte (TF-IDF)...")
token_counts = Counter() token_counts = Counter()
total_docs = len(crm_df) total_docs = len(crm_df)
for name in crm_df['normalized_name']: for name in crm_df['normalized_name']:
_, tokens = clean_name_for_scoring(name, dynamic_stopwords) _, tokens = clean_name_for_scoring(name)
for token in set(tokens): # Zähle jedes Wort nur einmal pro Firmenname for token in set(tokens):
token_counts[token] += 1 token_counts[token] += 1
term_weights = {} term_weights = {}
for token, count in token_counts.items(): for token, count in token_counts.items():
# IDF-Formel: log(N / df) - je seltener das Wort, desto höher das Gewicht idf = math.log(total_docs / (count + 1))
idf = math.log(total_docs / (count + 1)) # +1 zur Glättung
term_weights[token] = idf term_weights[token] = idf
logger.info(f"Wortgewichte für {len(term_weights)} Tokens berechnet.") logger.info(f"Wortgewichte für {len(term_weights)} Tokens berechnet.")
return term_weights return term_weights
def get_dynamic_stopwords(crm_df: pd.DataFrame, threshold_percent=0.01): # --- Similarity v3.1 ---
"""Identifiziert häufige Wörter im CRM-Datensatz, die als Stopwords behandelt werden sollen.""" def calculate_similarity(mrec: dict, crec: dict, term_weights: dict):
logger.info("Sammle dynamische Stopwords...")
token_counts = Counter()
for name in crm_df['normalized_name']:
tokens = [t for t in _tokenize(name) if len(t) >= 3 and t not in (STOP_TOKENS_BASE | CITY_TOKENS)]
for token in set(tokens):
token_counts[token] += 1
limit = len(crm_df) * threshold_percent
stopwords = {token for token, count in token_counts.items() if count > limit}
logger.info(f"{len(stopwords)} dynamische Stopwords identifiziert (z.B. 'stadtwerke', 'werke', ...)")
return stopwords
# --- Similarity ---
def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_stopwords: set):
# --- NEU: Golden-Rule für exakten Namens-Match --- # --- Golden-Rule für exakten Namens-Match ---
n1_raw = mrec.get('normalized_name', '') n1_raw = mrec.get('normalized_name', '')
n2_raw = crec.get('normalized_name', '') n2_raw = crec.get('normalized_name', '')
if fuzz.ratio(n1_raw, n2_raw) >= GOLDEN_MATCH_RATIO: if fuzz.ratio(n1_raw, n2_raw) >= GOLDEN_MATCH_RATIO:
return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)'} return GOLDEN_MATCH_SCORE, {'reason': f'Golden Match (Name Ratio >= {GOLDEN_MATCH_RATIO}%)', 'name_score': 100}
# Domain (mit Gate) # Domain
dom1 = mrec.get('normalized_domain','') dom1 = mrec.get('normalized_domain','')
dom2 = crec.get('normalized_domain','') dom2 = crec.get('normalized_domain','')
domain_match = 1 if (dom1 and dom1 == dom2) else 0 domain_match = 1 if (dom1 and dom1 == dom2) else 0
@@ -172,27 +161,31 @@ def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_sto
country_match = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land')) else 0 country_match = 1 if (mrec.get('CRM Land') and crec.get('CRM Land') and mrec.get('CRM Land') == crec.get('CRM Land')) else 0
# Name (bereinigt) # Name (bereinigt)
clean1, toks1 = clean_name_for_scoring(n1_raw, dynamic_stopwords) clean1, toks1 = clean_name_for_scoring(n1_raw)
clean2, toks2 = clean_name_for_scoring(n2_raw, dynamic_stopwords) clean2, toks2 = clean_name_for_scoring(n2_raw)
# --- NEU: Gewichteter Name-Score basierend auf TF-IDF --- # Gewichteter Name-Score
name_score = 0 name_score = 0
overlapping_tokens = toks1 & toks2 overlapping_tokens = toks1 & toks2
if overlapping_tokens: if overlapping_tokens:
# Score ist die Summe der Gewichte der übereinstimmenden Wörter
name_score = sum(term_weights.get(token, 0) for token in overlapping_tokens) name_score = sum(term_weights.get(token, 0) for token in overlapping_tokens)
# Bonus für hohe prozentuale Übereinstimmung der seltenen Wörter
if toks1: if toks1:
overlap_percentage = len(overlapping_tokens) / len(toks1) overlap_percentage = len(overlapping_tokens) / len(toks1)
name_score *= (1 + overlap_percentage) name_score *= (1 + overlap_percentage)
# --- NEU: Überarbeitete Gesamt-Score-Berechnung --- # --- ÄNDERUNG v3.1: Domain-Gate wieder eingeführt ---
# Basis-Score-Komponenten score_domain = 0
score_domain = 100 if domain_match else 0 if domain_match:
if name_score >= MIN_NAME_SCORE_FOR_DOMAIN:
score_domain = 80 # Starker Bonus
else:
score_domain = 20 # Schwacher Bonus
# Überarbeitete Gesamt-Score-Berechnung
score_location = 20 if (city_match and country_match) else 0 score_location = 20 if (city_match and country_match) else 0
# Gesamtscore # --- ÄNDERUNG v3.1: Angepasste Gewichtung ---
total = name_score * 15 + score_domain + score_location # Name hat jetzt viel mehr Einfluss total = name_score * 8 + score_domain + score_location
# Strafen # Strafen
penalties = 0 penalties = 0
@@ -214,34 +207,33 @@ def calculate_similarity(mrec: dict, crec: dict, term_weights: dict, dynamic_sto
return max(0, round(total)), comp return max(0, round(total)), comp
# --- Indexe --- # --- Indexe ---
def build_indexes(crm_df: pd.DataFrame, dynamic_stopwords: set): def build_indexes(crm_df: pd.DataFrame):
# ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr)
records = list(crm_df.to_dict('records')) records = list(crm_df.to_dict('records'))
# Domain-Index
domain_index = {} domain_index = {}
for r in records: for r in records:
d = r.get('normalized_domain') d = r.get('normalized_domain')
if d: if d: domain_index.setdefault(d, []).append(r)
domain_index.setdefault(d, []).append(r)
# Token-Index
token_index = {} token_index = {}
for idx, r in enumerate(records): for idx, r in enumerate(records):
_, toks = clean_name_for_scoring(r.get('normalized_name',''), dynamic_stopwords) _, toks = clean_name_for_scoring(r.get('normalized_name',''))
for t in set(toks): for t in set(toks):
token_index.setdefault(t, []).append(idx) # Speichere Index statt ganzem Record token_index.setdefault(t, []).append(idx)
return records, domain_index, token_index return records, domain_index, token_index
def choose_rarest_token(norm_name: str, term_weights: dict, dynamic_stopwords: set): def choose_rarest_token(norm_name: str, term_weights: dict):
_, toks = clean_name_for_scoring(norm_name, dynamic_stopwords) # ... (Änderung: clean_name_for_scoring braucht keine dynamic_stopwords mehr)
_, toks = clean_name_for_scoring(norm_name)
if not toks: return None if not toks: return None
# Seltenstes Token hat höchstes Gewicht (höchsten IDF-Score)
rarest = max(toks, key=lambda t: term_weights.get(t, 0)) rarest = max(toks, key=lambda t: term_weights.get(t, 0))
return rarest if term_weights.get(rarest, 0) > 0 else None return rarest if term_weights.get(rarest, 0) > 0 else None
# --- Hauptfunktion --- # --- Hauptfunktion ---
def main(job_id=None, interactive=False): def main(job_id=None, interactive=False):
logger.info("Starte Duplikats-Check v3.0 (Weighted Scoring & Interactive Mode)") logger.info("Starte Duplikats-Check v3.1 (Recalibrated Weighted Scoring)")
# ... (Code bis zur Normalisierung bleibt gleich) ...
update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...") update_status(job_id, "Läuft", "Initialisiere GoogleSheetHandler...")
try: try:
sheet = GoogleSheetHandler() sheet = GoogleSheetHandler()
@@ -251,7 +243,6 @@ def main(job_id=None, interactive=False):
update_status(job_id, "Fehlgeschlagen", f"Init GoogleSheetHandler fehlgeschlagen: {e}") update_status(job_id, "Fehlgeschlagen", f"Init GoogleSheetHandler fehlgeschlagen: {e}")
sys.exit(1) sys.exit(1)
# Daten laden
update_status(job_id, "Läuft", "Lade CRM- und Matching-Daten...") update_status(job_id, "Läuft", "Lade CRM- und Matching-Daten...")
crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME) crm_df = sheet.get_sheet_as_dataframe(CRM_SHEET_NAME)
match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME) match_df = sheet.get_sheet_as_dataframe(MATCHING_SHEET_NAME)
@@ -262,7 +253,6 @@ def main(job_id=None, interactive=False):
update_status(job_id, "Fehlgeschlagen", "Leere Daten in einem der Sheets.") update_status(job_id, "Fehlgeschlagen", "Leere Daten in einem der Sheets.")
return return
# Normalisierung
update_status(job_id, "Läuft", "Normalisiere Daten...") update_status(job_id, "Läuft", "Normalisiere Daten...")
crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name) crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name)
crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url) crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url)
@@ -284,10 +274,9 @@ def main(job_id=None, interactive=False):
CITY_TOKENS = build_city_tokens(crm_df, match_df) CITY_TOKENS = build_city_tokens(crm_df, match_df)
logger.info(f"City tokens gesammelt: {len(CITY_TOKENS)}") logger.info(f"City tokens gesammelt: {len(CITY_TOKENS)}")
# --- NEU: TF-IDF und Index-Erstellung --- # --- TF-IDF und Index-Erstellung v3.1 ---
dynamic_stopwords = get_dynamic_stopwords(crm_df) term_weights = build_term_weights(crm_df)
term_weights = build_term_weights(crm_df, dynamic_stopwords) crm_records, domain_index, token_index = build_indexes(crm_df)
crm_records, domain_index, token_index = build_indexes(crm_df, dynamic_stopwords)
logger.info(f"Blocking: Domains={len(domain_index)} | TokenKeys={len(token_index)}") logger.info(f"Blocking: Domains={len(domain_index)} | TokenKeys={len(token_index)}")
# --- Matching --- # --- Matching ---
@@ -304,35 +293,34 @@ def main(job_id=None, interactive=False):
candidate_indices = set() candidate_indices = set()
used_block = '' used_block = ''
# Blocking via Domain
if mrow.get('normalized_domain'): if mrow.get('normalized_domain'):
# ... (Logik zur Kandidaten-Findung, leicht angepasst für Indices) ...
candidates_from_domain = domain_index.get(mrow['normalized_domain'], []) candidates_from_domain = domain_index.get(mrow['normalized_domain'], [])
for c in candidates_from_domain: for c in candidates_from_domain:
# Finde den Index des Records, um Duplikate zu vermeiden try:
# Dies ist ineffizient, für eine genaue Index-Logik müsste der domain_index auch indices speichern # Finde den Index des Records (robuster gemacht)
for i, record in enumerate(crm_records): indices = crm_df.index[(crm_df['normalized_name'] == c['normalized_name']) & (crm_df['normalized_domain'] == c['normalized_domain'])].tolist()
if record['CRM Name'] == c['CRM Name'] and record['CRM Website'] == c['CRM Website']: if indices:
candidate_indices.add(i) candidate_indices.add(indices[0])
break except Exception:
continue # Ignoriere Fehler bei der Index-Suche
if candidate_indices: used_block = f"domain:{mrow['normalized_domain']}" if candidate_indices: used_block = f"domain:{mrow['normalized_domain']}"
# Blocking via seltenstes Token
if not candidate_indices: if not candidate_indices:
rtok = choose_rarest_token(mrow.get('normalized_name',''), term_weights, dynamic_stopwords) rtok = choose_rarest_token(mrow.get('normalized_name',''), term_weights)
if rtok: if rtok:
indices_from_token = token_index.get(rtok, []) indices_from_token = token_index.get(rtok, [])
candidate_indices.update(indices_from_token) candidate_indices.update(indices_from_token)
used_block = f"token:{rtok}" used_block = f"token:{rtok}"
# Prefilter als Fallback
if not candidate_indices: if not candidate_indices:
pf = [] pf = []
n1 = mrow.get('normalized_name','') n1 = mrow.get('normalized_name','')
clean1, _ = clean_name_for_scoring(n1, dynamic_stopwords) clean1, _ = clean_name_for_scoring(n1)
if clean1: if clean1:
for i, r in enumerate(crm_records): for i, r in enumerate(crm_records):
n2 = r.get('normalized_name','') n2 = r.get('normalized_name','')
clean2, _ = clean_name_for_scoring(n2, dynamic_stopwords) clean2, _ = clean_name_for_scoring(n2)
if not clean2: continue if not clean2: continue
pr = fuzz.partial_ratio(clean1, clean2) pr = fuzz.partial_ratio(clean1, clean2)
if pr >= PREFILTER_MIN_PARTIAL: if pr >= PREFILTER_MIN_PARTIAL:
@@ -349,7 +337,7 @@ def main(job_id=None, interactive=False):
scored = [] scored = []
for cr in candidates: for cr in candidates:
score, comp = calculate_similarity(mrow, cr, term_weights, dynamic_stopwords) score, comp = calculate_similarity(mrow, cr, term_weights)
scored.append({'name': cr.get('CRM Name',''), 'score': score, 'comp': comp, 'record': cr}) scored.append({'name': cr.get('CRM Name',''), 'score': score, 'comp': comp, 'record': cr})
scored.sort(key=lambda x: x['score'], reverse=True) scored.sort(key=lambda x: x['score'], reverse=True)
@@ -358,11 +346,12 @@ def main(job_id=None, interactive=False):
best_match = scored[0] if scored else None best_match = scored[0] if scored else None
# --- NEU: Interaktiver Modus --- # --- Interaktiver Modus (Logik unverändert) ---
if interactive and best_match and len(scored) > 1: if interactive and best_match and len(scored) > 1:
best_score = best_match['score'] best_score = best_match['score']
second_best_score = scored[1]['score'] second_best_score = scored[1]['score']
if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF: if best_score > INTERACTIVE_SCORE_MIN and (best_score - second_best_score) < INTERACTIVE_SCORE_DIFF and best_score < GOLDEN_MATCH_SCORE:
# ... (Ausgabe und Eingabe für interaktiven Modus, unverändert) ...
print("\n" + "="*50) print("\n" + "="*50)
print(f"AMBIGUOUS MATCH for '{mrow['CRM Name']}'") print(f"AMBIGUOUS MATCH for '{mrow['CRM Name']}'")
print(f"Top candidates have very similar scores.") print(f"Top candidates have very similar scores.")
@@ -385,13 +374,12 @@ def main(job_id=None, interactive=False):
best_match = scored[choice-1] best_match = scored[choice-1]
logger.info(f"User selected candidate {choice}: '{best_match['name']}'") logger.info(f"User selected candidate {choice}: '{best_match['name']}'")
elif choice == 0: elif choice == 0:
best_match = None # User decided no match best_match = None
logger.info("User selected no match.") logger.info("User selected no match.")
print("="*50 + "\n") print("="*50 + "\n")
if best_match and best_match['score'] >= SCORE_THRESHOLD: if best_match and best_match['score'] >= SCORE_THRESHOLD:
# Schwache Matches (ohne Domain/Ort) brauchen höheren Threshold
is_weak = best_match['comp'].get('domain_match', 0) == 0 and not (best_match['comp'].get('city_match', 0) and best_match['comp'].get('country_match', 0)) is_weak = best_match['comp'].get('domain_match', 0) == 0 and not (best_match['comp'].get('city_match', 0) and best_match['comp'].get('country_match', 0))
applied_threshold = SCORE_THRESHOLD_WEAK if is_weak else SCORE_THRESHOLD applied_threshold = SCORE_THRESHOLD_WEAK if is_weak else SCORE_THRESHOLD
@@ -409,17 +397,17 @@ def main(job_id=None, interactive=False):
logger.info(f" --> No Match (no candidates)") logger.info(f" --> No Match (no candidates)")
# --- Ergebnisse zurückschreiben --- # --- Ergebnisse zurückschreiben (Logik unverändert) ---
logger.info("Matching-Prozess abgeschlossen. Bereite Ergebnisse für den Upload vor...") logger.info("Matching-Prozess abgeschlossen. Bereite Ergebnisse für den Upload vor...")
# ... (Rest des Codes bleibt gleich) ...
update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...") update_status(job_id, "Läuft", "Schreibe Ergebnisse zurück ins Sheet...")
result_df = pd.DataFrame(results) result_df = pd.DataFrame(results)
# Löschen der Ergebnisspalten, falls sie bereits im Sheet existieren
cols_to_drop_from_match = ['Match', 'Score', 'Match_Grund'] cols_to_drop_from_match = ['Match', 'Score', 'Match_Grund']
match_df_clean = match_df.drop(columns=[col for col in cols_to_drop_from_match if col in match_df.columns], errors='ignore') match_df_clean = match_df.drop(columns=[col for col in cols_to_drop_from_match if col in match_df.columns], errors='ignore')
final_df = pd.concat([match_df_clean, result_df], axis=1) final_df = pd.concat([match_df_clean.reset_index(drop=True), result_df.reset_index(drop=True)], axis=1)
cols_to_drop = ['normalized_name', 'normalized_domain'] cols_to_drop = ['normalized_name', 'normalized_domain']
final_df = final_df.drop(columns=[col for col in cols_to_drop if col in final_df.columns], errors='ignore') final_df = final_df.drop(columns=[col for col in cols_to_drop if col in final_df.columns], errors='ignore')
@@ -439,7 +427,7 @@ def main(job_id=None, interactive=False):
update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.") update_status(job_id, "Fehlgeschlagen", "Fehler beim Schreiben ins Google Sheet.")
if __name__=='__main__': if __name__=='__main__':
parser = argparse.ArgumentParser(description="Duplicate Checker v3.0") parser = argparse.ArgumentParser(description="Duplicate Checker v3.1")
parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.") parser.add_argument("--job-id", type=str, help="Eindeutige ID für den Job-Status.")
parser.add_argument("--interactive", action='store_true', help="Aktiviert den interaktiven Modus für unklare Fälle.") parser.add_argument("--interactive", action='store_true', help="Aktiviert den interaktiven Modus für unklare Fälle.")
args = parser.parse_args() args = parser.parse_args()