dealfront_enrichment.py aktualisiert
This commit is contained in:
@@ -5,32 +5,30 @@ import logging
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
from selenium import webdriver
|
from selenium import webdriver
|
||||||
from selenium.webdriver.common.by import By
|
from selenium.webdriver.common.by import By
|
||||||
from selenium.webdriver.chrome.service import Service
|
|
||||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||||
from selenium.webdriver.support.ui import WebDriverWait
|
from selenium.webdriver.support.ui import WebDriverWait
|
||||||
from selenium.webdriver.support import expected_conditions as EC
|
from selenium.webdriver.support import expected_conditions as EC
|
||||||
from selenium.common.exceptions import TimeoutException, NoSuchElementException
|
from selenium.common.exceptions import TimeoutException, NoSuchElementException
|
||||||
|
|
||||||
# ==============================================================================
|
# Temporäre Konfiguration, um Unabhängigkeit zu gewährleisten
|
||||||
# TEMPORÄRE, AUTARKE KONFIGURATION
|
|
||||||
# ==============================================================================
|
|
||||||
class TempConfig:
|
class TempConfig:
|
||||||
# --- Direkt hier definierte Werte, um config.py zu umgehen ---
|
|
||||||
DEALFRONT_LOGIN_URL = "https://app.dealfront.com/login"
|
DEALFRONT_LOGIN_URL = "https://app.dealfront.com/login"
|
||||||
TARGET_SEARCH_NAME = "Facility Management" # <-- BITTE AN IHRE SUCHE ANPASSEN
|
TARGET_SEARCH_NAME = "Facility Management" # Passen Sie dies bei Bedarf an
|
||||||
DEALFRONT_CREDENTIALS_FILE = "/app/dealfront_credentials.json"
|
DEALFRONT_CREDENTIALS_FILE = "/app/dealfront_credentials.json"
|
||||||
# ==============================================================================
|
DEALFRONT_TARGET_URL = "https://app.dealfront.com/t?products=target%2Cconnect%2Cpromote%2Cdatacare"
|
||||||
|
|
||||||
|
# Logging-Konfiguration
|
||||||
OUTPUT_DIR = "/app/output"
|
OUTPUT_DIR = "/app/output"
|
||||||
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s' # <-- DEFINITION HIER
|
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s'
|
||||||
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, force=True)
|
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, force=True)
|
||||||
logging.getLogger("selenium").setLevel(logging.WARNING)
|
logging.getLogger("selenium").setLevel(logging.WARNING)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Log-Datei einrichten
|
||||||
log_filename = f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.txt"
|
log_filename = f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.log"
|
||||||
log_filepath = os.path.join(OUTPUT_DIR, log_filename)
|
log_filepath = os.path.join(OUTPUT_DIR, log_filename)
|
||||||
try:
|
try:
|
||||||
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||||
file_handler = logging.FileHandler(log_filepath, mode='w', encoding='utf-8')
|
file_handler = logging.FileHandler(log_filepath, mode='w', encoding='utf-8')
|
||||||
file_handler.setLevel(logging.DEBUG)
|
file_handler.setLevel(logging.DEBUG)
|
||||||
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
|
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||||
@@ -49,13 +47,13 @@ class DealfrontScraper:
|
|||||||
chrome_options.add_argument("--no-sandbox")
|
chrome_options.add_argument("--no-sandbox")
|
||||||
chrome_options.add_argument("--disable-dev-shm-usage")
|
chrome_options.add_argument("--disable-dev-shm-usage")
|
||||||
chrome_options.add_argument("--window-size=1920,1080")
|
chrome_options.add_argument("--window-size=1920,1080")
|
||||||
service = Service(executable_path='/usr/bin/chromedriver')
|
|
||||||
try:
|
try:
|
||||||
self.driver = webdriver.Chrome(service=service, options=chrome_options)
|
self.driver = webdriver.Chrome(options=chrome_options)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(f"WebDriver konnte nicht initialisiert werden.", exc_info=True)
|
logger.critical(f"WebDriver konnte nicht initialisiert werden.", exc_info=True)
|
||||||
raise
|
raise
|
||||||
self.wait = WebDriverWait(self.driver, 30)
|
self.wait = WebDriverWait(self.driver, 45) # Erhöhter Timeout für mehr Stabilität
|
||||||
self.username, self.password = self._load_credentials()
|
self.username, self.password = self._load_credentials()
|
||||||
logger.info("WebDriver erfolgreich initialisiert.")
|
logger.info("WebDriver erfolgreich initialisiert.")
|
||||||
|
|
||||||
@@ -69,82 +67,76 @@ class DealfrontScraper:
|
|||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def _save_debug_artifacts(self):
|
def _save_debug_artifacts(self):
|
||||||
# ... (Diese Methode bleibt unverändert) ...
|
|
||||||
try:
|
try:
|
||||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
||||||
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
||||||
screenshot_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.png")
|
screenshot_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.png")
|
||||||
html_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.html")
|
html_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.html")
|
||||||
self.driver.save_screenshot(screenshot_filepath)
|
self.driver.save_screenshot(screenshot_filepath)
|
||||||
logger.error(f"Screenshot '{screenshot_filepath}' wurde für die Analyse gespeichert.")
|
logger.error(f"Screenshot '{screenshot_filepath}' wurde gespeichert.")
|
||||||
with open(html_filepath, "w", encoding="utf-8") as f:
|
with open(html_filepath, "w", encoding="utf-8") as f:
|
||||||
f.write(self.driver.page_source)
|
f.write(self.driver.page_source)
|
||||||
logger.error(f"HTML-Quellcode '{html_filepath}' wurde für die Analyse gespeichert.")
|
logger.error(f"HTML-Quellcode '{html_filepath}' wurde gespeichert.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Konnte Debug-Artefakte nicht speichern: {e}")
|
logger.error(f"Konnte Debug-Artefakte nicht speichern: {e}")
|
||||||
|
|
||||||
def login_and_find_list(self, search_name):
|
def login(self):
|
||||||
"""
|
|
||||||
Führt den gesamten Prozess vom Login bis zum Auffinden der Suchergebnisliste durch.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
logger.info(f"Navigiere zur Login-Seite: {TempConfig.DEALFRONT_LOGIN_URL}")
|
logger.info(f"Navigiere zur Login-Seite: {TempConfig.DEALFRONT_LOGIN_URL}")
|
||||||
self.driver.get(TempConfig.DEALFRONT_LOGIN_URL)
|
self.driver.get(TempConfig.DEALFRONT_LOGIN_URL)
|
||||||
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
|
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
|
||||||
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
|
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
|
||||||
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
|
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
|
||||||
logger.info("Login-Befehl gesendet.")
|
logger.info("Login-Befehl gesendet. Warte auf Weiterleitung...")
|
||||||
|
# Warten auf ein Element, das nach dem Login sicher da ist (z.B. Dashboard-Link)
|
||||||
logger.info("Warte auf Dashboard und den 'Prospects finden' Quick-Link...")
|
self.wait.until(EC.visibility_of_element_located((By.XPATH, "//a[contains(@href, '/dashboard')]")))
|
||||||
prospects_link_selector = (By.XPATH, "//a[@data-test-target-product-tile]")
|
logger.info("Login erfolgreich und Dashboard erreicht.")
|
||||||
prospects_link = self.wait.until(EC.element_to_be_clickable(prospects_link_selector))
|
|
||||||
prospects_link.click()
|
|
||||||
logger.info("'Prospects finden' geklickt.")
|
|
||||||
|
|
||||||
logger.info(f"Warte auf die Liste der Suchen und klicke auf '{search_name}'...")
|
|
||||||
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
|
|
||||||
self.wait.until(EC.element_to_be_clickable(search_item_selector)).click()
|
|
||||||
|
|
||||||
logger.info(f"Suche '{search_name}' geladen. Warte auf Ergebnisse.")
|
|
||||||
first_row_locator = (By.CSS_SELECTOR, "td.sticky-after-checkbox a.t-highlight-text")
|
|
||||||
self.wait.until(EC.visibility_of_element_located(first_row_locator))
|
|
||||||
|
|
||||||
logger.info("Zielseite mit Ergebnissen erfolgreich erreicht.")
|
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(f"Prozess bis zum Laden der Liste fehlgeschlagen: {type(e).__name__}", exc_info=True)
|
logger.critical(f"Login-Prozess fehlgeschlagen.", exc_info=True)
|
||||||
self._save_debug_artifacts()
|
self._save_debug_artifacts()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# HIER IST DIE KORREKTE EINRÜCKUNG
|
def navigate_and_load_search(self, search_name):
|
||||||
def extract_current_page_results(self):
|
|
||||||
"""
|
|
||||||
Extrahiert Daten mit einem schnellen, direkten Ansatz, der Geister-Zeilen ignoriert.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
logger.info("Extrahiere Ergebnisse mit dem finalen, direkten Selektor-Ansatz...")
|
logger.info(f"Navigiere direkt zur Target-Seite und lade Suche '{search_name}'...")
|
||||||
results = []
|
self.driver.get(TempConfig.DEALFRONT_TARGET_URL)
|
||||||
|
self.wait.until(EC.url_contains("/t/prospector/"))
|
||||||
|
|
||||||
# 1. Wir wissen, dass die Seite geladen ist, und können direkt suchen.
|
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
|
||||||
company_elements = self.driver.find_elements(By.CSS_SELECTOR, "td.sticky-column a.t-highlight-text")
|
self.wait.until(EC.element_to_be_clickable(search_item_selector)).click()
|
||||||
website_elements = self.driver.find_elements(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text")
|
|
||||||
|
|
||||||
logger.info(f"{len(company_elements)} Firmennamen und {len(website_elements)} Webseiten-Elemente gefunden.")
|
|
||||||
|
|
||||||
if not company_elements:
|
|
||||||
logger.warning("Keine Firmen mit dem angegebenen Selektor gefunden.")
|
|
||||||
return []
|
|
||||||
|
|
||||||
for i in range(len(company_elements)):
|
|
||||||
company_name = company_elements[i].get_attribute("title").strip()
|
|
||||||
website = website_elements[i].text.strip() if i < len(website_elements) else "N/A"
|
|
||||||
results.append({'name': company_name, 'website': website})
|
|
||||||
|
|
||||||
logger.info(f"Extraktion abgeschlossen. {len(results)} Firmen verarbeitet.")
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
logger.info(f"Suche '{search_name}' geladen. Warte auf das Rendern der Ergebnisse.")
|
||||||
|
first_row_locator = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
|
||||||
|
self.wait.until(EC.visibility_of_element_located(first_row_locator))
|
||||||
|
logger.info("Ergebnisseite erfolgreich geladen.")
|
||||||
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Schwerwiegender Fehler bei der Extraktion: {type(e).__name__}", exc_info=True)
|
logger.critical(f"Navigation oder Laden der Suche fehlgeschlagen.", exc_info=True)
|
||||||
|
self._save_debug_artifacts()
|
||||||
|
return False
|
||||||
|
|
||||||
|
def extract_current_page_results(self):
|
||||||
|
try:
|
||||||
|
logger.info("Extrahiere Ergebnisse von der aktuellen Seite...")
|
||||||
|
results = []
|
||||||
|
rows_with_data_selector = (By.XPATH, "//table[@id='t-result-table']/tbody/tr[.//a[contains(@class, 't-highlight-text')]]")
|
||||||
|
rows = self.wait.until(EC.presence_of_all_elements_located(rows_with_data_selector))
|
||||||
|
logger.info(f"{len(rows)} gültige Firmen-Datenzeilen gefunden.")
|
||||||
|
|
||||||
|
for row in rows:
|
||||||
|
try:
|
||||||
|
company_name = row.find_element(By.CSS_SELECTOR, ".sticky-column a.t-highlight-text").get_attribute("title").strip()
|
||||||
|
website = "N/A"
|
||||||
|
try:
|
||||||
|
website = row.find_element(By.CSS_SELECTOR, "a.text-gray-400.t-highlight-text").text.strip()
|
||||||
|
except NoSuchElementException:
|
||||||
|
pass # Bleibt N/A, wenn keine Webseite gefunden wird
|
||||||
|
results.append({'name': company_name, 'website': website})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Konnte eine Zeile nicht verarbeiten: {e}")
|
||||||
|
return results
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Fehler bei der Extraktion.", exc_info=True)
|
||||||
self._save_debug_artifacts()
|
self._save_debug_artifacts()
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@@ -154,18 +146,18 @@ class DealfrontScraper:
|
|||||||
self.driver.quit()
|
self.driver.quit()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logger.info("Starte Dealfront Automatisierung - DEBUG-MODUS")
|
logger.info("Starte Dealfront Automatisierung - Finaler Workflow")
|
||||||
scraper = None
|
scraper = None
|
||||||
try:
|
try:
|
||||||
scraper = DealfrontScraper()
|
scraper = DealfrontScraper()
|
||||||
if not scraper.driver:
|
if not scraper.driver:
|
||||||
raise Exception("WebDriver konnte nicht initialisiert werden.")
|
raise Exception("WebDriver konnte nicht initialisiert werden.")
|
||||||
|
|
||||||
if not scraper.login_and_find_list(TempConfig.TARGET_SEARCH_NAME):
|
if not scraper.login():
|
||||||
raise Exception("Der Prozess vom Login bis zum Laden der Liste ist fehlgeschlagen.")
|
raise Exception("Login fehlgeschlagen.")
|
||||||
|
|
||||||
# In dieser Version gibt es keine handle_overlays Methode mehr
|
if not scraper.navigate_and_load_search(TempConfig.TARGET_SEARCH_NAME):
|
||||||
# scraper.handle_overlays()
|
raise Exception("Navigation und Laden der Suche fehlgeschlagen.")
|
||||||
|
|
||||||
companies = scraper.extract_current_page_results()
|
companies = scraper.extract_current_page_results()
|
||||||
if companies:
|
if companies:
|
||||||
@@ -173,20 +165,19 @@ if __name__ == "__main__":
|
|||||||
pd.set_option('display.max_rows', None)
|
pd.set_option('display.max_rows', None)
|
||||||
pd.set_option('display.max_columns', None)
|
pd.set_option('display.max_columns', None)
|
||||||
pd.set_option('display.width', 1000)
|
pd.set_option('display.width', 1000)
|
||||||
pd.set_option('display.max_colwidth', None)
|
pd.set_option('display.max_colwidth', -1)
|
||||||
print("\n" + "="*80)
|
print("\n" + "="*80)
|
||||||
print(" EXTRAHIERTE FIRMEN (ERSTE SEITE) ".center(80, "="))
|
print(" EXTRAHIERTE FIRMEN (ERSTE SEITE) ".center(80, "="))
|
||||||
print("="*80)
|
print("="*80)
|
||||||
print(df.to_string(index=False))
|
print(df.to_string(index=False))
|
||||||
print("="*80 + "\n")
|
print("="*80 + "\n")
|
||||||
else:
|
else:
|
||||||
logger.warning("Obwohl die Seite geladen wurde, konnten keine Firmen extrahiert werden.")
|
logger.warning("Keine Firmen konnten extrahiert werden.")
|
||||||
|
|
||||||
logger.info("Test erfolgreich abgeschlossen. Warte vor dem Schließen...")
|
logger.info("Test erfolgreich abgeschlossen.")
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten: {e}", exc_info=False)
|
logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten: {e}")
|
||||||
finally:
|
finally:
|
||||||
if scraper:
|
if scraper:
|
||||||
scraper.close()
|
scraper.close()
|
||||||
|
|||||||
Reference in New Issue
Block a user