dealfront_enrichment.py aktualisiert

This commit is contained in:
2025-07-13 10:57:14 +00:00
parent c1782b24aa
commit aa178e57ba

View File

@@ -111,24 +111,21 @@ class DealfrontScraper:
def extract_current_page_results(self): def extract_current_page_results(self):
results = [] results = []
rows_selector = (By.XPATH, "//table[@id='t-result-table']/tbody/tr[.//a[contains(@class, 't-highlight-text')]]") rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
data_rows = self.driver.find_elements(*rows_selector)
logger.info(f"{len(data_rows)} Datenzeilen zur Extraktion gefunden.")
try: for row in data_rows:
data_rows = self.driver.find_elements(*rows_selector) try:
for row in data_rows: name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
website = "N/A"
try: try:
name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip() website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
website = "N/A"
try:
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
except NoSuchElementException:
pass
results.append({'name': name, 'website': website})
except NoSuchElementException: except NoSuchElementException:
continue pass
except Exception as e: results.append({'name': name, 'website': website})
logger.error(f"Fehler bei der Extraktion auf der aktuellen Seite: {e}") except NoSuchElementException:
continue
return results return results
def scrape_all_pages(self, max_pages=10): def scrape_all_pages(self, max_pages=10):