From aa178e57ba46b62a2b1d46e004bb14e1a4b125c4 Mon Sep 17 00:00:00 2001 From: Floke Date: Sun, 13 Jul 2025 10:57:14 +0000 Subject: [PATCH] dealfront_enrichment.py aktualisiert --- dealfront_enrichment.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/dealfront_enrichment.py b/dealfront_enrichment.py index cde40314..72e6182b 100644 --- a/dealfront_enrichment.py +++ b/dealfront_enrichment.py @@ -111,24 +111,21 @@ class DealfrontScraper: def extract_current_page_results(self): results = [] - rows_selector = (By.XPATH, "//table[@id='t-result-table']/tbody/tr[.//a[contains(@class, 't-highlight-text')]]") + rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]") + data_rows = self.driver.find_elements(*rows_selector) + logger.info(f"{len(data_rows)} Datenzeilen zur Extraktion gefunden.") - try: - data_rows = self.driver.find_elements(*rows_selector) - for row in data_rows: + for row in data_rows: + try: + name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip() + website = "N/A" try: - name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip() - website = "N/A" - try: - website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip() - except NoSuchElementException: - pass - results.append({'name': name, 'website': website}) + website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip() except NoSuchElementException: - continue - except Exception as e: - logger.error(f"Fehler bei der Extraktion auf der aktuellen Seite: {e}") - + pass + results.append({'name': name, 'website': website}) + except NoSuchElementException: + continue return results def scrape_all_pages(self, max_pages=10):