dealfront_enrichment.py aktualisiert

This commit is contained in:
2025-07-11 09:29:22 +00:00
parent 4cb2ca387d
commit 6df8a5732b

View File

@@ -8,106 +8,195 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import TimeoutException, NoSuchElementException
# --- Konfiguration ---
class Config:
LOGIN_URL = "https://app.dealfront.com/login"
BASE_TARGET_URL = "https://app.dealfront.com/t/prospector/companies/p/"
SEARCH_NAME = "Facility Management"
TARGET_URL = "https://app.dealfront.com/t/prospector/companies"
SEARCH_NAME = "Facility Management" # <-- PASSEN SIE DIES AN IHRE GESPEICHERTE SUCHE AN
CREDENTIALS_FILE = "/app/dealfront_credentials.json"
OUTPUT_DIR = "/app/output"
# --- Logging Setup ---
# ... (bleibt unverändert) ...
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, force=True)
logging.getLogger("selenium.webdriver.remote").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
os.makedirs(Config.OUTPUT_DIR, exist_ok=True)
log_filepath = os.path.join(Config.OUTPUT_DIR, f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.log")
file_handler = logging.FileHandler(log_filepath, mode='w', encoding='utf-8')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger().addHandler(file_handler)
class DealfrontScraper:
# ... (__init__, _load_credentials, _save_debug_artifacts bleiben unverändert) ...
def login_and_prepare_search(self):
# ... (Login bleibt unverändert) ...
# Navigieren Sie zur ersten Seite der Suche, um die Session zu initialisieren
self.driver.get(f"{Config.BASE_TARGET_URL}1?search_name={Config.SEARCH_NAME}")
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table")))
logger.info("Erste Ergebnisseite erfolgreich geladen.")
return True
def extract_data_with_js(self):
"""Extrahiert die Daten mit einem direkt im Browser ausgeführten JavaScript."""
script = """
const results = [];
const rows = document.querySelectorAll("table#t-result-table tbody tr[id]");
rows.forEach(row => {
const companyElem = row.querySelector(".sticky-column a.t-highlight-text");
const websiteElem = row.querySelector("a.text-gray-400.t-highlight-text");
if (companyElem) {
results.push({
name: companyElem.getAttribute('title') || companyElem.innerText,
website: websiteElem ? websiteElem.innerText : 'N/A'
});
}
});
return results;
"""
def __init__(self):
logger.info("Initialisiere WebDriver...")
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
chrome_options.add_argument("--headless=new")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--window-size=1920,1200")
try:
return self.driver.execute_script(script)
self.driver = webdriver.Chrome(options=chrome_options)
except Exception as e:
logger.error(f"JavaScript-Extraktion fehlgeschlagen: {e}")
return []
logger.critical("WebDriver konnte nicht initialisiert werden.", exc_info=True)
raise
self.wait = WebDriverWait(self.driver, 30)
self.username, self.password = self._load_credentials()
if not self.username or not self.password:
raise ValueError("Credentials konnten nicht geladen werden. Breche ab.")
logger.info("WebDriver erfolgreich initialisiert.")
def run_full_extraction(self, max_pages=6):
def _load_credentials(self):
try:
with open(Config.CREDENTIALS_FILE, 'r', encoding='utf-8') as f:
creds = json.load(f)
return creds.get("username"), creds.get("password")
except Exception as e:
logger.error(f"Credentials-Datei {Config.CREDENTIALS_FILE} konnte nicht geladen werden: {e}")
return None, None
def _save_debug_artifacts(self, suffix=""):
try:
timestamp = time.strftime("%Y%m%d-%H%M%S")
filename_base = os.path.join(Config.OUTPUT_DIR, f"error_{suffix}_{timestamp}")
self.driver.save_screenshot(f"{filename_base}.png")
with open(f"{filename_base}.html", "w", encoding="utf-8") as f:
f.write(self.driver.page_source)
logger.error(f"Debug-Artefakte gespeichert: {filename_base}.*")
except Exception as e:
logger.error(f"Konnte Debug-Artefakte nicht speichern: {e}")
def login(self):
try:
logger.info(f"Navigiere zur Login-Seite: {Config.LOGIN_URL}")
self.driver.get(Config.LOGIN_URL)
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
logger.info("Login-Befehl gesendet. Warte 5 Sekunden auf Session-Etablierung.")
time.sleep(5)
# Verifizierung, dass wir nicht mehr auf der Login-Seite sind
if "login" not in self.driver.current_url:
logger.info("Login erfolgreich, URL hat sich geändert.")
return True
self._save_debug_artifacts("login_stuck")
return False
except Exception as e:
logger.critical("Login-Prozess fehlgeschlagen.", exc_info=True)
self._save_debug_artifacts("login_exception")
return False
def navigate_and_load_search(self, search_name):
try:
logger.info(f"Navigiere direkt zur Target-Seite und lade die Suche...")
self.driver.get(Config.TARGET_URL)
self.wait.until(EC.url_contains("/t/prospector/"))
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
self.wait.until(EC.element_to_be_clickable(search_item_selector)).click()
logger.info("Suche geladen. Warte auf das Rendern der Ergebnistabelle.")
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table tbody tr")))
return True
except Exception as e:
logger.critical("Navigation oder Laden der Suche fehlgeschlagen.", exc_info=True)
self._save_debug_artifacts("navigation_or_search_load")
return False
def extract_current_page_results(self):
results = []
rows_selector = (By.XPATH, "//table[@id='t-result-table']/tbody/tr[.//a[contains(@class, 't-highlight-text')]]")
try:
data_rows = self.driver.find_elements(*rows_selector)
for row in data_rows:
try:
name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
website = "N/A"
try:
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
except NoSuchElementException:
pass
results.append({'name': name, 'website': website})
except NoSuchElementException:
continue
except Exception as e:
logger.error(f"Fehler bei der Extraktion auf der aktuellen Seite: {e}")
return results
def scrape_all_pages(self, max_pages=10):
all_companies = {}
if not self.login_and_prepare_search():
return []
for page_number in range(1, max_pages + 1):
logger.info(f"--- Verarbeite Seite {page_number} ---")
# Warten, bis die Tabelle sichtbar ist
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table")))
try:
page_url = f"{Config.BASE_TARGET_URL}{page_number}?search_name={Config.SEARCH_NAME}"
logger.info(f"--- Navigiere zu Seite {page_number}: {page_url} ---")
self.driver.get(page_url)
# Warten auf ein stabiles Element, das anzeigt, dass die Tabelle da ist
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table")))
time.sleep(3) # Kurze Pause, damit das JS rendern kann
page_results = self.extract_data_with_js()
if not page_results:
logger.warning(f"Seite {page_number}: Keine Daten extrahiert. Möglicherweise das Ende erreicht.")
break
for company in page_results:
unique_key = (company['name'], company['website'])
if unique_key not in all_companies:
all_companies[unique_key] = company
logger.info(f"Seite {page_number}: {len(page_results)} Firmen gefunden. Gesamt einzigartig: {len(all_companies)}")
first_row_id_element = self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")))
first_row_id = first_row_id_element.get_attribute("id")
except TimeoutException:
logger.warning(f"Timeout beim Laden von Seite {page_number}. Breche Paginierung ab.")
self._save_debug_artifacts(f"page_{page_number}")
logger.warning("Konnte keine Datenzeilen auf der Seite finden. Beende Paginierung.")
break
page_results = self.extract_current_page_results()
if not page_results and page_number > 1:
logger.info("Keine weiteren Ergebnisse auf dieser Seite extrahiert. Paginierung abgeschlossen.")
break
for company in page_results:
unique_key = (company.get('name'), company.get('website'))
if unique_key not in all_companies:
all_companies[unique_key] = company
logger.info(f"Seite {page_number}: {len(page_results)} Firmen gefunden. Gesamt einzigartig: {len(all_companies)}")
try:
# Robuster Selektor für den "Weiter"-Button
next_button = self.wait.until(EC.element_to_be_clickable((By.XPATH, "//a[not(contains(@class, 'disabled')) and .//svg[contains(@class, 'fa-angle-right')]]")))
self.driver.execute_script("arguments[0].click();", next_button)
# Warten, bis die alte erste Zeile verschwunden ist
self.wait.until(EC.staleness_of(self.driver.find_element(By.ID, first_row_id)))
logger.info("Neue Seite geladen.")
except TimeoutException:
logger.info("Kein klickbarer 'Weiter'-Button mehr gefunden. Paginierung abgeschlossen.")
break
return list(all_companies.values())
# ... (close-Methode bleibt unverändert) ...
def close(self):
if self.driver:
self.driver.quit()
if __name__ == "__main__":
scraper = None
try:
scraper = DealfrontScraper()
all_companies = scraper.run_full_extraction(max_pages=6) # Setzen Sie hier die maximale Seitenzahl
if not scraper.login(): raise Exception("Login fehlgeschlagen")
if not scraper.navigate_and_load_search(Config.SEARCH_NAME): raise Exception("Navigation/Suche fehlgeschlagen")
all_companies = scraper.scrape_all_pages(max_pages=6) # Limitiere auf 6 Seiten
if all_companies:
df = pd.DataFrame(all_companies)
output_csv_path = os.path.join(Config.OUTPUT_DIR, f"dealfront_results_{time.strftime('%Y%m%d-%H%M%S')}.csv")
df.to_csv(output_csv_path, index=False, sep=';', encoding='utf-8-sig')
logger.info(f"Alle Ergebnisse ({len(df)} Firmen) erfolgreich gespeichert: {output_csv_path}")
logger.info(f"Ergebnisse ({len(df)} Firmen) erfolgreich in '{output_csv_path}' gespeichert.")
else:
logger.warning("Keine Firmen konnten extrahiert werden.")
except Exception as e:
logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten.", exc_info=True)
logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten: {e}", exc_info=True)
finally:
if scraper:
scraper.close()
scraper.close()
logger.info("Dealfront Automatisierung beendet.")