diff --git a/dealfront_enrichment.py b/dealfront_enrichment.py index 6e90e72d..b752ac7e 100644 --- a/dealfront_enrichment.py +++ b/dealfront_enrichment.py @@ -132,26 +132,29 @@ class DealfrontScraper: rows = self.driver.find_elements(*rows_selector) logger.info(f"{len(rows)} Firmen-Datenzeilen zur Verarbeitung gefunden.") + self.driver.implicitly_wait(1) for i, row in enumerate(rows, 1): try: - # Firmennamen holen + # Firmennamen holen... name_elem = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text") company_name = (name_elem.get_attribute("title") or name_elem.text).strip() - # Website aus der zweiten Spalte: erst href, dann Text-Fallback - try: - website_elem = row.find_element(By.CSS_SELECTOR, "td:nth-of-type(2) a") - # HREF bereinigen (ohne https://) - website = website_elem.get_attribute("href").split("://")[-1].strip("/") - except NoSuchElementException: - website = row.find_element(By.CSS_SELECTOR, "td:nth-of-type(2)").text.strip() + # Website aus der dritten Spalte: erst href, dann Text-Fallback + elems = row.find_elements(By.CSS_SELECTOR, "td:nth-of-type(3) a") + if elems: + website = elems[0].get_attribute("href").split("://", 1)[1].rstrip("/") + else: + website = row.find_element(By.CSS_SELECTOR, "td:nth-of-type(3)").text.strip() results.append({'name': company_name, 'website': website}) except NoSuchElementException: logger.warning(f"Zeile {i}: Name oder Webseite nicht extrahierbar. Überspringe.") continue + # Implicit-Wait wiederherstellen (z. B. 10 Sekunden) + self.driver.implicitly_wait(10) + logger.info(f"Extraktion abgeschlossen. {len(results)} Firmen gefunden.") return results