import os import json import time import logging import pandas as pd from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException, NoSuchElementException # --- Konfiguration --- class Config: LOGIN_URL = "https://app.dealfront.com/login" TARGET_URL = "https://app.dealfront.com/t/prospector/companies" SEARCH_NAME = "Facility Management" CREDENTIALS_FILE = "/app/dealfront_credentials.json" OUTPUT_DIR = "/app/output" # --- Logging Setup --- LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s' logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, force=True) logging.getLogger("selenium.webdriver.remote").setLevel(logging.WARNING) logger = logging.getLogger(__name__) os.makedirs(Config.OUTPUT_DIR, exist_ok=True) log_filepath = os.path.join(Config.OUTPUT_DIR, f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.txt") file_handler = logging.FileHandler(log_filepath, mode='w', encoding='utf-8') file_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(file_handler) class DealfrontScraper: def __init__(self): logger.info("Initialisiere WebDriver...") chrome_options = ChromeOptions() chrome_options.add_argument("--headless=new") chrome_options.add_argument("--no-sandbox") chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--window-size=1920,1200") try: self.driver = webdriver.Chrome(options=chrome_options) except Exception as e: logger.critical("WebDriver konnte nicht initialisiert werden.", exc_info=True) raise self.wait = WebDriverWait(self.driver, 30) self.username, self.password = self._load_credentials() if not self.username or not self.password: raise ValueError("Credentials konnten nicht geladen werden.") logger.info("WebDriver erfolgreich initialisiert.") def _load_credentials(self): try: with open(Config.CREDENTIALS_FILE, 'r', encoding='utf-8') as f: creds = json.load(f) return creds.get("username"), creds.get("password") except Exception as e: logger.error(f"Credentials-Datei {Config.CREDENTIALS_FILE} konnte nicht geladen werden: {e}") return None, None def _save_debug_artifacts(self, suffix=""): pass # Vorerst deaktiviert, um Logs sauber zu halten def login(self): try: logger.info(f"Navigiere zur Login-Seite: {Config.LOGIN_URL}") self.driver.get(Config.LOGIN_URL) self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username) self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password) self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click() logger.info("Login-Befehl gesendet. Warte auf Session...") time.sleep(5) if "login" in self.driver.current_url: raise Exception("Weiterleitung nach Login fehlgeschlagen.") logger.info("Login erfolgreich.") return True except Exception as e: logger.critical("Login-Prozess fehlgeschlagen.", exc_info=True) self._save_debug_artifacts("login_exception") return False def navigate_and_load_search(self, search_name): try: logger.info(f"Navigiere direkt zur Target-Seite und lade Suche...") self.driver.get(Config.TARGET_URL) self.wait.until(EC.url_contains("/t/prospector/")) search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']") self.wait.until(EC.element_to_be_clickable(search_item_selector)).click() logger.info("Suche geladen. Warte auf Ergebnistabelle.") self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table tbody tr"))) return True except Exception as e: logger.critical("Navigation oder Laden der Suche fehlgeschlagen.", exc_info=True) self._save_debug_artifacts("navigation_or_search_load") return False def scrape_all_pages(self, max_pages=6): """ Scrollt die Seite schrittweise nach unten und extrahiert neue Daten, bis keine neuen Firmen mehr geladen werden oder das Seitenlimit erreicht ist. """ logger.info("Starte finalen Scroll- und Extraktionsprozess...") all_companies = {} last_found_count = 0 for i in range(max_pages * 5): # Mache maximal 5 Scroll-Versuche pro Seite page_results = self.extract_current_page_results() for company in page_results: unique_key = (company.get('name'), company.get('website')) all_companies[unique_key] = company # Prüfen, ob neue Firmen hinzugekommen sind if len(all_companies) > last_found_count: logger.info(f"Scrolle... {len(all_companies)} einzigartige Firmen gefunden.") last_found_count = len(all_companies) self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(3) # Wartezeit für das Nachladen else: logger.info("Keine neuen Firmen nach dem Scrollen gefunden. Paginierung scheint abgeschlossen.") break # Die Schleife beenden, wenn Scrollen keine neuen Ergebnisse bringt return list(all_companies.values()) def close(self): if self.driver: self.driver.quit() if __name__ == "__main__": scraper = None try: scraper = DealfrontScraper() if not scraper.login(): raise Exception("Login fehlgeschlagen") if not scraper.navigate_and_load_search(Config.SEARCH_NAME): raise Exception("Navigation/Suche fehlgeschlagen") all_companies = scraper.scrape_all_pages(max_pages=6) if all_companies: df = pd.DataFrame(all_companies) output_csv_path = os.path.join(Config.OUTPUT_DIR, f"dealfront_results_{time.strftime('%Y%m%d-%H%M%S')}.csv") df.to_csv(output_csv_path, index=False, sep=';', encoding='utf-8-sig') logger.info(f"Ergebnisse ({len(df)} Firmen) erfolgreich in '{output_csv_path}' gespeichert.") else: logger.warning("Keine Firmen konnten extrahiert werden.") except Exception as e: logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten: {e}", exc_info=True) finally: if scraper: scraper.close() logger.info("Dealfront Automatisierung beendet.")