data_processor.py aktualisiert
This commit is contained in:
@@ -817,9 +817,9 @@ class DataProcessor:
|
||||
"""
|
||||
Batch-Prozess NUR für Website-Scraping (Rohtext & Meta-Details).
|
||||
Diese Version ist stabilisiert, nutzt einen robusten Worker (_scrape_website_task_batch)
|
||||
und verarbeitet strukturierte Dictionary-Ergebnisse, um Fehler zu vermeiden.
|
||||
und verarbeitet strukturierte Dictionary-Ergebnisse, inklusive des URL-Prüfstatus.
|
||||
"""
|
||||
self.logger.info(f"Starte Website-Scraping (Batch, v2.0.1). Bereich: {start_sheet_row or 'Start'}-{end_sheet_row or 'Ende'}, Limit: {limit or 'Unbegrenzt'}")
|
||||
self.logger.info(f"Starte Website-Scraping (Batch, v2.0.7). Bereich: {start_sheet_row or 'Start'}-{end_sheet_row or 'Ende'}, Limit: {limit or 'Unbegrenzt'}")
|
||||
|
||||
# --- 1. Daten laden und Startzeile ermitteln ---
|
||||
if start_sheet_row is None:
|
||||
@@ -848,6 +848,7 @@ class DataProcessor:
|
||||
# --- 2. Spalten-Indizes und Buchstaben vorbereiten ---
|
||||
rohtext_col_letter = self.sheet_handler._get_col_letter(get_col_idx("Website Rohtext") + 1)
|
||||
metadetails_col_letter = self.sheet_handler._get_col_letter(get_col_idx("Website Meta-Details") + 1)
|
||||
pruefstatus_col_letter = self.sheet_handler._get_col_letter(get_col_idx("URL Prüfstatus") + 1) # NEU
|
||||
version_col_letter = self.sheet_handler._get_col_letter(get_col_idx("Version") + 1)
|
||||
timestamp_col_letter = self.sheet_handler._get_col_letter(get_col_idx("Website Scrape Timestamp") + 1)
|
||||
|
||||
@@ -872,14 +873,12 @@ class DataProcessor:
|
||||
|
||||
if self._needs_website_processing(row, force_reeval=False):
|
||||
website_url = self._get_cell_value_safe(row, "CRM Website").strip()
|
||||
company_name = self._get_cell_value_safe(row, "CRM Name").strip()
|
||||
|
||||
if website_url and website_url.lower() not in ["k.a.", "http:"]:
|
||||
if limit is not None and processed_count >= limit:
|
||||
self.logger.info(f"Verarbeitungslimit ({limit}) erreicht.")
|
||||
break
|
||||
|
||||
tasks_for_processing_batch.append({"row_num": i, "url": website_url, "company_name": company_name})
|
||||
tasks_for_processing_batch.append({"row_num": i, "url": website_url})
|
||||
processed_count += 1
|
||||
else:
|
||||
skipped_count += 1
|
||||
@@ -890,33 +889,28 @@ class DataProcessor:
|
||||
if len(tasks_for_processing_batch) >= processing_batch_size or (i == effective_end_row and tasks_for_processing_batch):
|
||||
self.logger.info(f"--- Starte Website-Scraping Batch für {len(tasks_for_processing_batch)} Tasks (max. {max_scraping_workers} Worker) ---")
|
||||
|
||||
# Dictionary zum Speichern der kompletten Ergebnis-Dicts
|
||||
scraping_results = {}
|
||||
batch_error_count = 0
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_scraping_workers) as executor:
|
||||
# Die neue, robuste Worker-Funktion wird hier aufgerufen
|
||||
future_to_task = {executor.submit(self._scrape_website_task_batch, task): task for task in tasks_for_processing_batch}
|
||||
|
||||
for future in as_completed(future_to_task):
|
||||
task = future_to_task[future]
|
||||
try:
|
||||
# Das Ergebnis ist garantiert ein Dictionary
|
||||
result_dict = future.result()
|
||||
|
||||
if isinstance(result_dict, dict) and 'row_num' in result_dict:
|
||||
scraping_results[result_dict['row_num']] = result_dict
|
||||
if result_dict.get('error'):
|
||||
batch_error_count += 1
|
||||
self.logger.warning(f"Worker meldete Fehler für Zeile {result_dict['row_num']}: {result_dict.get('status_message')}")
|
||||
else:
|
||||
# Fallback, falls doch etwas Unerwartetes passiert
|
||||
self.logger.error(f"Inkonsistentes Ergebnis für Zeile {task['row_num']}: Erwartete dict mit 'row_num', bekam {type(result_dict)}. Überspringe.")
|
||||
scraping_results[task['row_num']] = {'raw_text': "FEHLER (Inkonsistenter Rückgabetyp)", 'meta_details': 'k.A.', 'error': True}
|
||||
scraping_results[task['row_num']] = {'raw_text': "FEHLER (Inkonsistenter Rückgabetyp)", 'meta_details': 'k.A.', 'url_pruefstatus': 'URL_SCRAPE_ERROR', 'error': True}
|
||||
batch_error_count += 1
|
||||
except Exception as exc:
|
||||
self.logger.error(f"Unerwarteter Fehler bei Ergebnisabfrage für Zeile {task['row_num']}: {exc}", exc_info=True)
|
||||
scraping_results[task['row_num']] = {'raw_text': "FEHLER (Task Exception)", 'meta_details': 'k.A.', 'error': True}
|
||||
scraping_results[task['row_num']] = {'raw_text': "FEHLER (Task Exception)", 'meta_details': 'k.A.', 'url_pruefstatus': 'URL_SCRAPE_ERROR', 'error': True}
|
||||
batch_error_count += 1
|
||||
|
||||
self.logger.info(f" -> Scraping für Batch beendet. {len(scraping_results)} Ergebnisse erhalten ({batch_error_count} davon mit Fehlern).")
|
||||
@@ -927,25 +921,24 @@ class DataProcessor:
|
||||
current_version = getattr(Config, 'VERSION', 'unknown')
|
||||
|
||||
for row_num, res_dict in scraping_results.items():
|
||||
# Rohtext, Meta-Details, Timestamp und Version werden zum Update hinzugefügt
|
||||
all_sheet_updates.append({'range': f'{rohtext_col_letter}{row_num}', 'values': [[res_dict.get('raw_text', 'k.A.')]]})
|
||||
all_sheet_updates.append({'range': f'{metadetails_col_letter}{row_num}', 'values': [[res_dict.get('meta_details', 'k.A.')]]})
|
||||
all_sheet_updates.append({'range': f'{pruefstatus_col_letter}{row_num}', 'values': [[res_dict.get('url_pruefstatus', 'URL_SCRAPE_ERROR')]]}) # NEU
|
||||
all_sheet_updates.append({'range': f'{timestamp_col_letter}{row_num}', 'values': [[current_timestamp]]})
|
||||
all_sheet_updates.append({'range': f'{version_col_letter}{row_num}', 'values': [[current_version]]})
|
||||
|
||||
tasks_for_processing_batch = [] # Batch leeren
|
||||
tasks_for_processing_batch = []
|
||||
|
||||
# --- 6. Sheet-Update auslösen, wenn Update-Batch voll ist ---
|
||||
# Pro Zeile gibt es 4 Updates (Rohtext, Meta, TS, Version)
|
||||
if len(all_sheet_updates) >= (update_batch_row_limit * 4):
|
||||
self.logger.info(f"Sende gesammelte Sheet-Updates ({len(all_sheet_updates) // 4} Zeilen)...")
|
||||
if len(all_sheet_updates) >= (update_batch_row_limit * 5): # NEU: 5 Updates pro Zeile
|
||||
self.logger.info(f"Sende gesammelte Sheet-Updates ({len(all_sheet_updates) // 5} Zeilen)...")
|
||||
self.sheet_handler.batch_update_cells(all_sheet_updates)
|
||||
all_sheet_updates = []
|
||||
time.sleep(1) # Kurze Pause nach einem großen Update
|
||||
time.sleep(1)
|
||||
|
||||
# --- 7. Finale Updates senden ---
|
||||
if all_sheet_updates:
|
||||
self.logger.info(f"Sende finale gesammelte Sheet-Updates ({len(all_sheet_updates) // 4} Zeilen)...")
|
||||
self.logger.info(f"Sende finale gesammelte Sheet-Updates ({len(all_sheet_updates) // 5} Zeilen)...")
|
||||
self.sheet_handler.batch_update_cells(all_sheet_updates)
|
||||
|
||||
self.logger.info(f"Website-Scraping (Batch) abgeschlossen. {processed_count} Zeilen zur Verarbeitung ausgewählt, {skipped_count} Zeilen übersprungen.")
|
||||
@@ -954,12 +947,10 @@ class DataProcessor:
|
||||
"""
|
||||
Robuste Worker-Funktion für das parallele Scrapen von Websites im Batch-Modus.
|
||||
Diese Funktion holt Rohtext sowie Meta-Details und gibt IMMER ein strukturiertes
|
||||
Dictionary zurück, um eine konsistente Verarbeitung im Hauptthread zu gewährleisten.
|
||||
Sie kapselt die Fehlerlogik, die ursprünglich in `get_website_raw` lag.
|
||||
Dictionary zurück, das auch den programmatischen URL-Prüfstatus enthält.
|
||||
"""
|
||||
row_num = task_info['row_num']
|
||||
url = task_info['url']
|
||||
company_name = task_info.get('company_name', 'einem Unternehmen')
|
||||
self.logger.debug(f" -> Batch-Scrape-Task gestartet für Zeile {row_num}: {url}")
|
||||
|
||||
result = {
|
||||
@@ -967,7 +958,8 @@ class DataProcessor:
|
||||
'raw_text': 'k.A. (Fehler im Task)',
|
||||
'meta_details': 'k.A. (Fehler im Task)',
|
||||
'error': True,
|
||||
'status_message': 'Unbekannter Task-Fehler'
|
||||
'status_message': 'Unbekannter Task-Fehler',
|
||||
'url_pruefstatus': 'URL_SCRAPE_ERROR' # Default-Fehlerstatus
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -975,30 +967,44 @@ class DataProcessor:
|
||||
raw_text_result = get_website_raw(url)
|
||||
|
||||
# 2. Ergebnis des Rohtext-Abrufs auswerten
|
||||
if raw_text_result and not str(raw_text_result).strip().lower().startswith('k.a.'):
|
||||
if raw_text_result == URL_CHECK_MARKER:
|
||||
result['raw_text'] = "k.A. (URL prüfen)"
|
||||
result['meta_details'] = "k.A."
|
||||
result['error'] = True
|
||||
result['status_message'] = "URL als nicht erreichbar markiert"
|
||||
result['url_pruefstatus'] = URL_CHECK_MARKER
|
||||
|
||||
elif raw_text_result and not str(raw_text_result).strip().lower().startswith('k.a.'):
|
||||
result['raw_text'] = raw_text_result
|
||||
result['error'] = False
|
||||
result['status_message'] = 'Erfolgreich gescraped'
|
||||
result['url_pruefstatus'] = 'URL_OK_SCRAPED'
|
||||
|
||||
# 3. Bei Erfolg auch Meta-Details abrufen
|
||||
# Bei Erfolg auch Meta-Details abrufen
|
||||
meta_details_result = scrape_website_details(url)
|
||||
result['meta_details'] = meta_details_result if meta_details_result else "k.A. (Keine Meta-Details)"
|
||||
|
||||
# 4. Spezifische Fehler-Strings von get_website_raw behandeln
|
||||
elif "k.A. (Nur Cookie-Banner erkannt)" in raw_text_result:
|
||||
result['raw_text'] = raw_text_result
|
||||
result['meta_details'] = "k.A."
|
||||
result['error'] = True
|
||||
result['status_message'] = "Nur Cookie-Banner erkannt"
|
||||
result['url_pruefstatus'] = "URL_SCRAPE_EMPTY_OR_BANNER"
|
||||
|
||||
elif str(raw_text_result).strip().lower().startswith('k.a.'):
|
||||
result['raw_text'] = raw_text_result # Fehlerstring übernehmen
|
||||
result['meta_details'] = "k.A."
|
||||
result['error'] = True
|
||||
# Extrahiere den Grund aus dem String, z.B. "Timeout"
|
||||
match = re.search(r'\((.*?)\)', raw_text_result)
|
||||
result['status_message'] = match.group(1) if match else "Scraping fehlgeschlagen"
|
||||
result['url_pruefstatus'] = "URL_SCRAPE_ERROR"
|
||||
|
||||
# 5. Fallback für unerwartete leere Ergebnisse
|
||||
else:
|
||||
else: # Fallback für unerwartete leere Ergebnisse
|
||||
result['raw_text'] = 'k.A. (Extraktion leer)'
|
||||
result['meta_details'] = 'k.A.'
|
||||
result['error'] = True
|
||||
result['status_message'] = 'Extraktion lieferte leeren Text'
|
||||
result['url_pruefstatus'] = "URL_SCRAPE_EMPTY_OR_BANNER"
|
||||
|
||||
return result
|
||||
|
||||
|
||||
Reference in New Issue
Block a user