dealfront_enrichment.py aktualisiert
This commit is contained in:
@@ -22,7 +22,10 @@ class TempConfig:
|
||||
# ==============================================================================
|
||||
|
||||
OUTPUT_DIR = "/app/output"
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s', force=True)
|
||||
# Logging-Konfiguration
|
||||
LOG_LEVEL = logging.INFO
|
||||
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s' # Diese Zeile hat gefehlt
|
||||
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT, force=True, handlers=[logging.StreamHandler()])
|
||||
logging.getLogger("selenium").setLevel(logging.WARNING)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -111,28 +114,41 @@ class DealfrontScraper:
|
||||
return False
|
||||
|
||||
def extract_current_page_results(self):
|
||||
# ... (Diese Methode bleibt unverändert) ...
|
||||
"""
|
||||
Extrahiert Firmennamen und Webseiten direkt von der Seite
|
||||
mithilfe der verifizierten CSS-Selektoren.
|
||||
"""
|
||||
try:
|
||||
logger.info("Extrahiere Ergebnisse von der aktuellen Seite...")
|
||||
logger.info("Extrahiere Ergebnisse mit direkten CSS-Selektoren...")
|
||||
results = []
|
||||
rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
|
||||
self.wait.until(EC.presence_of_element_located(rows_selector))
|
||||
|
||||
# Warten, bis das erste Firmenelement vorhanden ist.
|
||||
company_name_selector = ".sticky-column a.t-highlight-text"
|
||||
logger.info(f"Warte auf das erste Firmenelement mit Selektor: '{company_name_selector}'")
|
||||
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, company_name_selector)))
|
||||
time.sleep(3)
|
||||
rows = self.driver.find_elements(*rows_selector)
|
||||
if not rows:
|
||||
logger.warning("Keine Ergebniszeilen (tr[id]) gefunden.")
|
||||
|
||||
# Extraktion aller Elemente
|
||||
company_elements = self.driver.find_elements(By.CSS_SELECTOR, company_name_selector)
|
||||
website_elements = self.driver.find_elements(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text")
|
||||
|
||||
logger.info(f"{len(company_elements)} Firmennamen und {len(website_elements)} Webseiten-Elemente gefunden.")
|
||||
|
||||
num_results = min(len(company_elements), len(website_elements))
|
||||
if num_results == 0:
|
||||
logger.warning("Keine Firmen oder Webseiten gefunden.")
|
||||
self._save_debug_artifacts()
|
||||
return []
|
||||
logger.info(f"{len(rows)} Firmen-Datenzeilen zur Verarbeitung gefunden.")
|
||||
for i, row in enumerate(rows, 1):
|
||||
try:
|
||||
company_name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
|
||||
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
|
||||
|
||||
for i in range(num_results):
|
||||
company_name = company_elements[i].get_attribute("title").strip()
|
||||
website = website_elements[i].text.strip()
|
||||
if company_name and website:
|
||||
results.append({'name': company_name, 'website': website})
|
||||
except NoSuchElementException:
|
||||
logger.warning(f"Zeile {i}: Name oder Webseite nicht extrahierbar. Überspringe.")
|
||||
continue
|
||||
logger.info(f"Extraktion abgeschlossen. {len(results)} Firmen gefunden.")
|
||||
|
||||
logger.info(f"Extraktion abgeschlossen. {len(results)} Firmen erfolgreich zugeordnet.")
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Schwerwiegender Fehler bei der Extraktion: {type(e).__name__}", exc_info=True)
|
||||
self._save_debug_artifacts()
|
||||
|
||||
Reference in New Issue
Block a user