dealfront_enrichment.py aktualisiert

This commit is contained in:
2025-07-13 10:57:14 +00:00
parent c1782b24aa
commit aa178e57ba

View File

@@ -111,24 +111,21 @@ class DealfrontScraper:
def extract_current_page_results(self):
results = []
rows_selector = (By.XPATH, "//table[@id='t-result-table']/tbody/tr[.//a[contains(@class, 't-highlight-text')]]")
rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
data_rows = self.driver.find_elements(*rows_selector)
logger.info(f"{len(data_rows)} Datenzeilen zur Extraktion gefunden.")
try:
data_rows = self.driver.find_elements(*rows_selector)
for row in data_rows:
for row in data_rows:
try:
name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
website = "N/A"
try:
name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
website = "N/A"
try:
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
except NoSuchElementException:
pass
results.append({'name': name, 'website': website})
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
except NoSuchElementException:
continue
except Exception as e:
logger.error(f"Fehler bei der Extraktion auf der aktuellen Seite: {e}")
pass
results.append({'name': name, 'website': website})
except NoSuchElementException:
continue
return results
def scrape_all_pages(self, max_pages=10):