dealfront_enrichment.py aktualisiert

This commit is contained in:
2025-07-13 11:05:48 +00:00
parent a48e4f0075
commit 707c87f9d6

View File

@@ -14,7 +14,7 @@ from selenium.common.exceptions import TimeoutException, NoSuchElementException
class Config: class Config:
LOGIN_URL = "https://app.dealfront.com/login" LOGIN_URL = "https://app.dealfront.com/login"
TARGET_URL = "https://app.dealfront.com/t/prospector/companies" TARGET_URL = "https://app.dealfront.com/t/prospector/companies"
SEARCH_NAME = "Facility Management" # <-- PASSEN SIE DIES AN IHRE GESPEICHERTE SUCHE AN SEARCH_NAME = "Facility Management"
CREDENTIALS_FILE = "/app/dealfront_credentials.json" CREDENTIALS_FILE = "/app/dealfront_credentials.json"
OUTPUT_DIR = "/app/output" OUTPUT_DIR = "/app/output"
@@ -34,7 +34,6 @@ class DealfrontScraper:
def __init__(self): def __init__(self):
logger.info("Initialisiere WebDriver...") logger.info("Initialisiere WebDriver...")
chrome_options = ChromeOptions() chrome_options = ChromeOptions()
chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
chrome_options.add_argument("--headless=new") chrome_options.add_argument("--headless=new")
chrome_options.add_argument("--no-sandbox") chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--disable-dev-shm-usage")
@@ -49,7 +48,7 @@ class DealfrontScraper:
self.wait = WebDriverWait(self.driver, 30) self.wait = WebDriverWait(self.driver, 30)
self.username, self.password = self._load_credentials() self.username, self.password = self._load_credentials()
if not self.username or not self.password: if not self.username or not self.password:
raise ValueError("Credentials konnten nicht geladen werden. Breche ab.") raise ValueError("Credentials konnten nicht geladen werden.")
logger.info("WebDriver erfolgreich initialisiert.") logger.info("WebDriver erfolgreich initialisiert.")
def _load_credentials(self): def _load_credentials(self):
@@ -62,15 +61,7 @@ class DealfrontScraper:
return None, None return None, None
def _save_debug_artifacts(self, suffix=""): def _save_debug_artifacts(self, suffix=""):
try: pass # Vorerst deaktiviert, um Logs sauber zu halten
timestamp = time.strftime("%Y%m%d-%H%M%S")
filename_base = os.path.join(Config.OUTPUT_DIR, f"error_{suffix}_{timestamp}")
self.driver.save_screenshot(f"{filename_base}.png")
with open(f"{filename_base}.html", "w", encoding="utf-8") as f:
f.write(self.driver.page_source)
logger.error(f"Debug-Artefakte gespeichert: {filename_base}.*")
except Exception as e:
logger.error(f"Konnte Debug-Artefakte nicht speichern: {e}")
def login(self): def login(self):
try: try:
@@ -79,14 +70,12 @@ class DealfrontScraper:
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username) self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password) self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click() self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
logger.info("Login-Befehl gesendet. Warte 5 Sekunden auf Session-Etablierung.") logger.info("Login-Befehl gesendet. Warte auf Session...")
time.sleep(5) time.sleep(5)
# Verifizierung, dass wir nicht mehr auf der Login-Seite sind if "login" in self.driver.current_url:
if "login" not in self.driver.current_url: raise Exception("Weiterleitung nach Login fehlgeschlagen.")
logger.info("Login erfolgreich, URL hat sich geändert.") logger.info("Login erfolgreich.")
return True return True
self._save_debug_artifacts("login_stuck")
return False
except Exception as e: except Exception as e:
logger.critical("Login-Prozess fehlgeschlagen.", exc_info=True) logger.critical("Login-Prozess fehlgeschlagen.", exc_info=True)
self._save_debug_artifacts("login_exception") self._save_debug_artifacts("login_exception")
@@ -94,14 +83,14 @@ class DealfrontScraper:
def navigate_and_load_search(self, search_name): def navigate_and_load_search(self, search_name):
try: try:
logger.info(f"Navigiere direkt zur Target-Seite und lade die Suche...") logger.info(f"Navigiere direkt zur Target-Seite und lade Suche...")
self.driver.get(Config.TARGET_URL) self.driver.get(Config.TARGET_URL)
self.wait.until(EC.url_contains("/t/prospector/")) self.wait.until(EC.url_contains("/t/prospector/"))
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']") search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
self.wait.until(EC.element_to_be_clickable(search_item_selector)).click() self.wait.until(EC.element_to_be_clickable(search_item_selector)).click()
logger.info("Suche geladen. Warte auf das Rendern der Ergebnistabelle.") logger.info("Suche geladen. Warte auf Ergebnistabelle.")
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table tbody tr"))) self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table tbody tr")))
return True return True
except Exception as e: except Exception as e:
@@ -109,77 +98,49 @@ class DealfrontScraper:
self._save_debug_artifacts("navigation_or_search_load") self._save_debug_artifacts("navigation_or_search_load")
return False return False
def extract_current_page_results(self): def scrape_all_pages(self, max_pages=6):
results = [] all_companies = {}
rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
data_rows = self.driver.find_elements(*rows_selector)
logger.info(f"{len(data_rows)} Datenzeilen zur Extraktion gefunden.")
for row in data_rows:
try:
name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
website = "N/A"
try:
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
except NoSuchElementException:
pass
results.append({'name': name, 'website': website})
except NoSuchElementException:
continue
return results
def scrape_all_pages(self, max_pages=10):
"""
Iteriert durch alle Ergebnisseiten, scrollt auf jeder Seite vollständig,
extrahiert die Daten und klickt auf den korrekten "Weiter"-Button.
"""
all_companies = []
for page_number in range(1, max_pages + 1): for page_number in range(1, max_pages + 1):
logger.info(f"--- Verarbeite Seite {page_number} ---") logger.info(f"--- Verarbeite Seite {page_number} ---")
# Warten, bis die Tabelle grundsätzlich geladen ist # Scrollen, um alle 20 Zeilen zu laden
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table"))) scroll_container = self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".scroll-viewport")))
self.driver.execute_script("arguments[0].scrollTo(0, arguments[0].scrollHeight);", scroll_container)
time.sleep(2) # Pause für das Rendern
# --- ROBUSTES SCROLLEN --- # Extraktion
logger.info("Scrolle Seite nach unten, um alle Zeilen zu laden...") rows = self.driver.find_elements(By.XPATH, "//table[@id='t-result-table']/tbody/tr")
scroll_container = self.driver.find_element(By.CSS_SELECTOR, ".scroll-viewport") logger.info(f"Finde {len(rows)} <tr>-Elemente. Extrahiere Daten...")
last_height = self.driver.execute_script("return arguments[0].scrollHeight", scroll_container)
while True: for row in rows:
self.driver.execute_script("arguments[0].scrollTo(0, arguments[0].scrollHeight);", scroll_container) try:
time.sleep(2.5) # Wichtige Pause, damit neue Inhalte nachladen können name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
new_height = self.driver.execute_script("return arguments[0].scrollHeight", scroll_container) website = "N/A"
if new_height == last_height: try:
break website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
last_height = new_height except NoSuchElementException:
logger.info("Seitenende erreicht, alle Zeilen sollten geladen sein.") pass
page_results = self.extract_current_page_results() if name not in [c['name'] for c in all_companies.values()]:
all_companies.extend(page_results) all_companies[name] = {'name': name, 'website': website}
logger.info(f"Seite {page_number}: {len(page_results)} Firmen gefunden. Gesamt: {len(all_companies)}")
# --- FINALE PAGINIERUNGS-LOGIK --- except NoSuchElementException:
continue # Ignoriert leere "Geister"-Zeilen
logger.info(f"Seite {page_number} verarbeitet. Gesamt einzigartig: {len(all_companies)}")
# Paginierung
try: try:
# Eindeutiger XPath für den "Weiter"-Pfeil-Button, der nicht deaktiviert ist next_button = self.driver.find_element(By.CSS_SELECTOR, "a.eb-pagination-button:not(.disabled) svg[data-icon='angle-right']")
next_button_selector = (By.XPATH, "//a[contains(@class, 'eb-pagination-button') and not(contains(@class, 'disabled')) and .//path[starts-with(@d, 'M8.293')]]") self.driver.execute_script("arguments[0].click();", next_button)
next_button = self.driver.find_element(*next_button_selector) logger.info(f"Klicke auf 'Weiter' zu Seite {page_number + 1}...")
time.sleep(4) # Feste Wartezeit für den Seitenwechsel
# Den Button in den sichtbaren Bereich scrollen, falls er verdeckt ist
self.driver.execute_script("arguments[0].scrollIntoView(true);", next_button)
time.sleep(0.5)
next_button.click()
logger.info(f"Auf Seite {page_number + 1} geblättert...")
# Warten auf das Neuladen der Tabelle
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table")))
except NoSuchElementException: except NoSuchElementException:
logger.info("Kein 'Weiter'-Button mehr gefunden. Paginierung abgeschlossen.") logger.info("Kein 'Weiter'-Button mehr gefunden. Paginierung abgeschlossen.")
break break
return all_companies return list(all_companies.values())
def close(self): def close(self):
if self.driver: if self.driver:
@@ -192,7 +153,7 @@ if __name__ == "__main__":
if not scraper.login(): raise Exception("Login fehlgeschlagen") if not scraper.login(): raise Exception("Login fehlgeschlagen")
if not scraper.navigate_and_load_search(Config.SEARCH_NAME): raise Exception("Navigation/Suche fehlgeschlagen") if not scraper.navigate_and_load_search(Config.SEARCH_NAME): raise Exception("Navigation/Suche fehlgeschlagen")
all_companies = scraper.scrape_all_pages(max_pages=6) # Limitiere auf 6 Seiten all_companies = scraper.scrape_all_pages(max_pages=6)
if all_companies: if all_companies:
df = pd.DataFrame(all_companies) df = pd.DataFrame(all_companies)