duplicate_checker.py aktualisiert

This commit is contained in:
2025-08-08 06:00:19 +00:00
parent be3f48aceb
commit ec56daa9ea

View File

@@ -7,15 +7,15 @@ from helpers import normalize_company_name, simple_normalize_url, serp_website_l
from config import Config
from google_sheet_handler import GoogleSheetHandler
# duplicate_checker.py v2.11 (SerpAPI nur für Matching-Accounts)
# Version: 2025-08-08_10-00
# duplicate_checker.py v2.12 (Serp-URL als neue Spalte; Matching nutzt sie nur bei Leerwerten)
# Version: 2025-08-08_10-20
# --- Konfiguration ---
CRM_SHEET_NAME = "CRM_Accounts"
MATCHING_SHEET_NAME = "Matching_Accounts"
SCORE_THRESHOLD = 80 # Score-Schwelle
LOG_DIR = "Log"
LOG_FILE = "duplicate_check_v2.11.txt"
LOG_FILE = "duplicate_check_v2.12.txt"
# --- Logging Setup ---
if not os.path.exists(LOG_DIR):
@@ -23,7 +23,8 @@ if not os.path.exists(LOG_DIR):
log_path = os.path.join(LOG_DIR, LOG_FILE)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
for h in list(root.handlers): root.removeHandler(h)
for h in list(root.handlers):
root.removeHandler(h)
formatter = logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
@@ -35,7 +36,7 @@ fh.setFormatter(formatter)
root.addHandler(fh)
logger = logging.getLogger(__name__)
logger.info(f"Logging to console and file: {log_path}")
logger.info("Starting duplicate_checker.py v2.11 | Version: 2025-08-08_10-00")
logger.info("Starting duplicate_checker.py v2.12 | Version: 2025-08-08_10-20")
# --- SerpAPI Key laden ---
try:
@@ -67,7 +68,7 @@ def calculate_similarity(record1, record2):
# --- Hauptfunktion ---
def main():
logger.info("Starte Duplikats-Check v2.11 mit SerpAPI-Fallback (nur Matching)")
logger.info("Starte Duplikats-Check v2.12 (Serp-URL als neue Spalte)")
try:
sheet = GoogleSheetHandler()
logger.info("GoogleSheetHandler initialisiert")
@@ -96,8 +97,11 @@ def main():
company = row['CRM Name']
try:
url = serp_website_lookup(company)
if url and 'http' in url and 'k.A.' not in url:
match_df.at[idx, 'CRM Website'] = url
if url and 'k.A.' not in url:
# Schema ergänzen, falls nötig
if not str(url).startswith(('http://','https://')):
url = 'https://' + str(url).lstrip()
match_df.at[idx, 'Gefundene Website'] = url
logger.info(f" ✓ URL gefunden: '{company}' -> {url}")
found_cnt += 1
else:
@@ -108,14 +112,29 @@ def main():
else:
logger.info("Serp-Fallback übersprungen: keine fehlenden Matching-URLs")
# Normalisierung & Blocking-Key
for df, label in [(crm_df,'CRM'), (match_df,'Matching')]:
df['normalized_name'] = df['CRM Name'].astype(str).apply(normalize_company_name)
df['normalized_domain'] = df['CRM Website'].astype(str).apply(simple_normalize_url)
df['CRM Ort'] = df['CRM Ort'].astype(str).str.lower().str.strip()
df['CRM Land'] = df['CRM Land'].astype(str).str.lower().str.strip()
df['block_key'] = df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
logger.debug(f"{label}-Sample: {df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}")
# --- Normalisierung ---
# CRM-Daten normalisieren (nutzt ausschließlich CRM Website)
crm_df['normalized_name'] = crm_df['CRM Name'].astype(str).apply(normalize_company_name)
crm_df['normalized_domain'] = crm_df['CRM Website'].astype(str).apply(simple_normalize_url)
crm_df['CRM Ort'] = crm_df['CRM Ort'].astype(str).str.lower().str.strip()
crm_df['CRM Land'] = crm_df['CRM Land'].astype(str).str.lower().str.strip()
crm_df['block_key'] = crm_df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
# Matching-Daten normalisieren (nutzt effektive Website = CRM Website oder Gefundene Website)
match_df['Gefundene Website'] = match_df.get('Gefundene Website', pd.Series(index=match_df.index, dtype=object))
match_df['Effektive Website'] = match_df['CRM Website'].fillna('').astype(str).str.strip()
mask_eff = match_df['Effektive Website'] == ''
match_df.loc[mask_eff, 'Effektive Website'] = match_df['Gefundene Website'].fillna('').astype(str).str.strip()
match_df['normalized_name'] = match_df['CRM Name'].astype(str).apply(normalize_company_name)
match_df['normalized_domain'] = match_df['Effektive Website'].astype(str).apply(simple_normalize_url)
match_df['CRM Ort'] = match_df['CRM Ort'].astype(str).str.lower().str.strip()
match_df['CRM Land'] = match_df['CRM Land'].astype(str).str.lower().str.strip()
match_df['block_key'] = match_df['normalized_name'].apply(lambda x: x.split()[0] if x else None)
# Debug-Sample
logger.debug(f"CRM-Sample: {crm_df.iloc[0][['normalized_name','normalized_domain','block_key']].to_dict()}")
logger.debug(f"Matching-Sample: {match_df.iloc[0][['normalized_name','normalized_domain','block_key','Effektive Website','Gefundene Website']].to_dict()}")
# Blocking-Index erstellen
crm_index = {}
@@ -130,10 +149,13 @@ def main():
total=len(match_df)
logger.info("Starte Matching-Prozess...")
for i,mrow in match_df.iterrows():
key = mrow['block_key']; cands=crm_index.get(key,[])
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' -> {len(cands)} Kandidaten")
key = mrow['block_key']
cands = crm_index.get(key,[])
used_src = 'recherchiert' if (str(mrow.get('CRM Website','')).strip()=='' and str(mrow.get('Gefundene Website','')).strip()!='') else 'original'
logger.info(f"Prüfe {i+1}/{total}: '{mrow['CRM Name']}' -> {len(cands)} Kandidaten (Website-Quelle: {used_src})")
if not cands:
results.append({'Match':'','Score':0}); continue
results.append({'Match':'','Score':0})
continue
scored=[]
for crow in cands:
sc,dm,ns,lm,bf=calculate_similarity(mrow,crow)
@@ -148,16 +170,16 @@ def main():
results.append({'Match':'','Score':best_score})
logger.info(f" --> Kein Match (Score={best_score}) [Dom={dm},Name={ns},Ort={lm},Bonus={bf}]")
# Ergebnisse zurückschreiben
# Ergebnisse zurückschreiben (inkl. Gefundene Website)
logger.info("Schreibe Ergebnisse ins Sheet...")
out=pd.DataFrame(results)
output=match_df[['CRM Name','CRM Website','CRM Ort','CRM Land']].copy()
output=pd.concat([output.reset_index(drop=True),out],axis=1)
data=[output.columns.tolist()]+output.values.tolist()
if sheet.clear_and_write_data(MATCHING_SHEET_NAME,data):
out = pd.DataFrame(results)
output = match_df[['CRM Name','CRM Website','Gefundene Website','CRM Ort','CRM Land']].copy()
output = pd.concat([output.reset_index(drop=True), out], axis=1)
data = [output.columns.tolist()] + output.values.tolist()
if sheet.clear_and_write_data(MATCHING_SHEET_NAME, data):
logger.info("Ergebnisse erfolgreich geschrieben")
else:
logger.error("Fehler beim Schreiben ins Google Sheet")
if __name__=='__main__':
main()
main()