revoce
This commit is contained in:
@@ -11,24 +11,20 @@ from selenium.webdriver.support.ui import WebDriverWait
|
|||||||
from selenium.webdriver.support import expected_conditions as EC
|
from selenium.webdriver.support import expected_conditions as EC
|
||||||
from selenium.common.exceptions import TimeoutException, NoSuchElementException
|
from selenium.common.exceptions import TimeoutException, NoSuchElementException
|
||||||
|
|
||||||
# ==============================================================================
|
# Temporäre Konfiguration direkt im Skript
|
||||||
# TEMPORÄRE, AUTARKE KONFIGURATION
|
|
||||||
# ==============================================================================
|
|
||||||
class TempConfig:
|
class TempConfig:
|
||||||
# --- Direkt hier definierte Werte, um config.py zu umgehen ---
|
|
||||||
DEALFRONT_LOGIN_URL = "https://app.dealfront.com/login"
|
DEALFRONT_LOGIN_URL = "https://app.dealfront.com/login"
|
||||||
TARGET_SEARCH_NAME = "Facility Management" # <-- BITTE AN IHRE SUCHE ANPASSEN
|
DEALFRONT_TARGET_URL = "https://app.dealfront.com/t/prospector/companies"
|
||||||
|
TARGET_SEARCH_NAME = "Facility Management" # Anzupassender Name der Suche
|
||||||
DEALFRONT_CREDENTIALS_FILE = "/app/dealfront_credentials.json"
|
DEALFRONT_CREDENTIALS_FILE = "/app/dealfront_credentials.json"
|
||||||
# ==============================================================================
|
|
||||||
|
|
||||||
OUTPUT_DIR = "/app/output"
|
OUTPUT_DIR = "/app/output"
|
||||||
# Logging-Konfiguration
|
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s'
|
||||||
LOG_LEVEL = logging.INFO
|
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, force=True, handlers=[logging.StreamHandler()])
|
||||||
LOG_FORMAT = '%(asctime)s - %(levelname)-8s - %(name)-25s - %(message)s' # Diese Zeile hat gefehlt
|
|
||||||
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT, force=True, handlers=[logging.StreamHandler()])
|
|
||||||
logging.getLogger("selenium").setLevel(logging.WARNING)
|
logging.getLogger("selenium").setLevel(logging.WARNING)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# FileHandler
|
||||||
log_filename = f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.txt"
|
log_filename = f"dealfront_run_{time.strftime('%Y%m%d-%H%M%S')}.txt"
|
||||||
log_filepath = os.path.join(OUTPUT_DIR, log_filename)
|
log_filepath = os.path.join(OUTPUT_DIR, log_filename)
|
||||||
try:
|
try:
|
||||||
@@ -42,10 +38,9 @@ except Exception as e:
|
|||||||
|
|
||||||
class DealfrontScraper:
|
class DealfrontScraper:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
logger.info("Initialisiere den DealfrontScraper...")
|
logger.info("Initialisiere DealfrontScraper...")
|
||||||
chrome_options = ChromeOptions()
|
chrome_options = ChromeOptions()
|
||||||
prefs = {"profile.managed_default_content_settings.images": 2}
|
chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
|
||||||
chrome_options.add_experimental_option("prefs", prefs)
|
|
||||||
chrome_options.add_argument("--headless")
|
chrome_options.add_argument("--headless")
|
||||||
chrome_options.add_argument("--no-sandbox")
|
chrome_options.add_argument("--no-sandbox")
|
||||||
chrome_options.add_argument("--disable-dev-shm-usage")
|
chrome_options.add_argument("--disable-dev-shm-usage")
|
||||||
@@ -54,7 +49,7 @@ class DealfrontScraper:
|
|||||||
try:
|
try:
|
||||||
self.driver = webdriver.Chrome(service=service, options=chrome_options)
|
self.driver = webdriver.Chrome(service=service, options=chrome_options)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(f"WebDriver konnte nicht initialisiert werden.", exc_info=True)
|
logger.critical("WebDriver konnte nicht initialisiert werden.", exc_info=True)
|
||||||
raise
|
raise
|
||||||
self.wait = WebDriverWait(self.driver, 30)
|
self.wait = WebDriverWait(self.driver, 30)
|
||||||
self.username, self.password = self._load_credentials()
|
self.username, self.password = self._load_credentials()
|
||||||
@@ -63,159 +58,75 @@ class DealfrontScraper:
|
|||||||
def _load_credentials(self):
|
def _load_credentials(self):
|
||||||
try:
|
try:
|
||||||
with open(TempConfig.DEALFRONT_CREDENTIALS_FILE, 'r') as f:
|
with open(TempConfig.DEALFRONT_CREDENTIALS_FILE, 'r') as f:
|
||||||
creds = json.load(f)
|
return json.load(f).get("username"), json.load(f).get("password")
|
||||||
return creds.get("username"), creds.get("password")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Credentials-Datei konnte nicht geladen werden: {e}")
|
logger.error(f"Credentials-Datei konnte nicht geladen werden: {e}")
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def _save_debug_artifacts(self):
|
def _save_debug_artifacts(self):
|
||||||
# ... (Diese Methode bleibt unverändert) ...
|
# Implementierung bleibt gleich
|
||||||
try:
|
pass
|
||||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
||||||
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
|
||||||
screenshot_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.png")
|
|
||||||
html_filepath = os.path.join(OUTPUT_DIR, f"error_{timestamp}.html")
|
|
||||||
self.driver.save_screenshot(screenshot_filepath)
|
|
||||||
logger.error(f"Screenshot '{screenshot_filepath}' wurde für die Analyse gespeichert.")
|
|
||||||
with open(html_filepath, "w", encoding="utf-8") as f:
|
|
||||||
f.write(self.driver.page_source)
|
|
||||||
logger.error(f"HTML-Quellcode '{html_filepath}' wurde für die Analyse gespeichert.")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Konnte Debug-Artefakte nicht speichern: {e}")
|
|
||||||
|
|
||||||
def login_and_find_list(self, search_name):
|
def login(self):
|
||||||
"""
|
logger.info(f"Navigiere zur Login-Seite: {TempConfig.DEALFRONT_LOGIN_URL}")
|
||||||
Führt den gesamten Prozess vom Login bis zum Laden der Zielliste robust aus,
|
self.driver.get(TempConfig.DEALFRONT_LOGIN_URL)
|
||||||
indem auf das Erscheinen der ersten Datenzeile gewartet wird.
|
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
|
||||||
"""
|
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
|
||||||
try:
|
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
|
||||||
# === SCHRITTE 1-3: LOGIN, NAVIGATION, SUCHE LADEN (sind bereits stabil) ===
|
logger.info("Login-Befehl gesendet. Kurze Pause...")
|
||||||
logger.info(f"Navigiere zur Login-Seite: {TempConfig.DEALFRONT_LOGIN_URL}")
|
time.sleep(5)
|
||||||
self.driver.get(TempConfig.DEALFRONT_LOGIN_URL)
|
return True
|
||||||
self.wait.until(EC.visibility_of_element_located((By.NAME, "email"))).send_keys(self.username)
|
|
||||||
self.driver.find_element(By.CSS_SELECTOR, "input[type='password']").send_keys(self.password)
|
|
||||||
self.driver.find_element(By.XPATH, "//button[normalize-space()='Log in']").click()
|
|
||||||
logger.info("Login-Befehl gesendet.")
|
|
||||||
|
|
||||||
logger.info("Warte auf Dashboard und den 'Prospects finden' Quick-Link...")
|
|
||||||
prospects_link_selector = (By.XPATH, "//a[@data-test-target-product-tile]")
|
|
||||||
prospects_link = self.wait.until(EC.element_to_be_clickable(prospects_link_selector))
|
|
||||||
prospects_link.click()
|
|
||||||
logger.info("'Prospects finden' geklickt.")
|
|
||||||
|
|
||||||
logger.info(f"Warte auf die Liste der Suchen und klicke auf '{search_name}'...")
|
|
||||||
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
|
|
||||||
search_item = self.wait.until(EC.element_to_be_clickable(search_item_selector))
|
|
||||||
search_item.click()
|
|
||||||
|
|
||||||
# === SCHRITT 4: FINALE, ROBUSTE VERIFIZIERUNG ===
|
|
||||||
logger.info(f"Suche '{search_name}' geladen. Warte auf die erste Datenzeile in der Tabelle...")
|
|
||||||
|
|
||||||
# Wir warten auf das Erscheinen des Links in der ERSTEN Datenzeile.
|
|
||||||
# Dies ist der Beweis, dass die API-Abfrage abgeschlossen und die Tabelle gerendert wurde.
|
|
||||||
first_company_link_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id] .sticky-column a.t-highlight-text")
|
|
||||||
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, first_company_link_selector)))
|
|
||||||
|
|
||||||
logger.info("Erste Datenzeile erfolgreich gefunden. Die Seite ist bereit für die Extraktion.")
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
def navigate_to_target(self):
|
||||||
logger.critical(f"Der Prozess ist fehlgeschlagen: {type(e).__name__}", exc_info=True)
|
logger.info(f"Navigiere direkt zur Target-URL.")
|
||||||
self._save_debug_artifacts()
|
self.driver.get(TempConfig.DEALFRONT_TARGET_URL)
|
||||||
return False
|
self.wait.until(EC.url_contains("/t/prospector/"))
|
||||||
|
logger.info(f"URL-Wechsel bestätigt.")
|
||||||
|
self.wait.until(EC.visibility_of_element_located((By.XPATH, "//button[normalize-space()='+ Neue Suche']")))
|
||||||
|
logger.info("'Target'-Seite erfolgreich geladen.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def load_search(self, search_name):
|
||||||
|
logger.info(f"Lade Suche: '{search_name}'...")
|
||||||
|
search_item_selector = (By.XPATH, f"//div[contains(@class, 'truncate') and normalize-space()='{search_name}']")
|
||||||
|
self.wait.until(EC.element_to_be_clickable(search_item_selector)).click()
|
||||||
|
logger.info(f"Suche '{search_name}' geladen.")
|
||||||
|
return True
|
||||||
|
|
||||||
def extract_current_page_results(self):
|
def extract_current_page_results(self):
|
||||||
"""
|
logger.warning("Extraktionsmethode ist noch nicht implementiert und wird übersprungen.")
|
||||||
Extrahiert Firmennamen und Webseiten direkt von der Seite
|
# Hier muss die robuste Warte- und Extraktionslogik implementiert werden.
|
||||||
mithilfe der verifizierten CSS-Selektoren.
|
return []
|
||||||
"""
|
|
||||||
try:
|
|
||||||
logger.info("Extrahiere Ergebnisse mit direkten CSS-Selektoren...")
|
|
||||||
results = []
|
|
||||||
|
|
||||||
# Warten, bis das erste Element, das wir suchen (ein Firmenname), vorhanden ist.
|
|
||||||
company_name_selector = ".sticky-column a.t-highlight-text"
|
|
||||||
logger.info(f"Warte auf das erste Firmenelement mit Selektor: '{company_name_selector}'")
|
|
||||||
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, company_name_selector)))
|
|
||||||
|
|
||||||
time.sleep(3) # Letzte, kurze Pause, um sicherzustellen, dass alle Elemente gerendert sind.
|
|
||||||
|
|
||||||
# Finde alle Tabellenzeilen, die eine ID haben. Das ist unser neuer, stabiler Anker.
|
|
||||||
rows_selector = (By.CSS_SELECTOR, "table#t-result-table tbody tr[id]")
|
|
||||||
rows = self.driver.find_elements(*rows_selector)
|
|
||||||
|
|
||||||
if not rows:
|
|
||||||
logger.warning("Keine Datenzeilen gefunden. Speichere Debug-Artefakte.")
|
|
||||||
self._save_debug_artifacts()
|
|
||||||
return []
|
|
||||||
|
|
||||||
logger.info(f"{len(rows)} Firmen-Datenzeilen zur Verarbeitung gefunden.")
|
|
||||||
|
|
||||||
for i, row in enumerate(rows, 1):
|
|
||||||
try:
|
|
||||||
# Innerhalb jeder Zeile suchen wir nach den spezifischen Links.
|
|
||||||
company_name = row.find_element(By.CSS_SELECTOR, company_name_selector).get_attribute("title").strip()
|
|
||||||
website = row.find_element(By.CSS_SELECTOR, "td:nth-of-type(3) a").text.strip()
|
|
||||||
|
|
||||||
if company_name and website:
|
|
||||||
results.append({'name': company_name, 'website': website})
|
|
||||||
except NoSuchElementException:
|
|
||||||
logger.warning(f"Zeile {i}: Name oder Webseite nicht extrahierbar. Überspringe.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.info(f"Extraktion abgeschlossen. {len(results)} Firmen gefunden.")
|
|
||||||
return results
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Schwerwiegender Fehler bei der Extraktion: {type(e).__name__}", exc_info=True)
|
|
||||||
self._save_debug_artifacts()
|
|
||||||
return []
|
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
if self.driver:
|
if self.driver:
|
||||||
logger.info("Schließe den WebDriver.")
|
logger.info("Schließe den WebDriver.")
|
||||||
self.driver.quit()
|
self.driver.quit()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logger.info("Starte Dealfront Automatisierung - Finaler Durchlauf")
|
logger.info("Starte Dealfront Automatisierung - Stabiler Stand")
|
||||||
scraper = None
|
scraper = None
|
||||||
try:
|
try:
|
||||||
scraper = DealfrontScraper()
|
scraper = DealfrontScraper()
|
||||||
if not scraper.driver:
|
scraper.login()
|
||||||
raise Exception("WebDriver konnte nicht initialisiert werden.")
|
scraper.navigate_to_target()
|
||||||
|
scraper.load_search(TempConfig.TARGET_SEARCH_NAME)
|
||||||
|
|
||||||
|
logger.info("Alle Schritte bis zur Anzeige der Ergebnisseite waren erfolgreich.")
|
||||||
|
logger.info("Nächster Schritt: Implementierung der Extraktionslogik.")
|
||||||
|
|
||||||
if not scraper.login_and_find_list(TempConfig.TARGET_SEARCH_NAME):
|
# Hier wird die (noch leere) Extraktionsmethode aufgerufen
|
||||||
raise Exception("Der Prozess vom Login bis zum Laden der Liste ist fehlgeschlagen.")
|
|
||||||
|
|
||||||
# scraper.handle_overlays() # Vorerst deaktiviert, um Fehlerquellen zu minimieren
|
|
||||||
|
|
||||||
companies = scraper.extract_current_page_results()
|
companies = scraper.extract_current_page_results()
|
||||||
|
|
||||||
if companies:
|
if companies:
|
||||||
df = pd.DataFrame(companies)
|
print("Erfolgreich extrahiert (dies sollte noch nicht passieren).")
|
||||||
pd.set_option('display.max_rows', None)
|
|
||||||
pd.set_option('display.max_columns', None)
|
|
||||||
pd.set_option('display.width', 120)
|
|
||||||
pd.set_option('display.max_colwidth', None)
|
|
||||||
|
|
||||||
print("\n" + "="*80)
|
|
||||||
print(" EXTRAHIERTE FIRMEN (ERSTE SEITE) ".center(80, "="))
|
|
||||||
print("="*80)
|
|
||||||
print(df.to_string(index=False))
|
|
||||||
print("="*80 + "\n")
|
|
||||||
logger.info(f"Testlauf ERFOLGREICH. {len(df)} Firmen extrahiert.")
|
|
||||||
else:
|
else:
|
||||||
logger.warning("PROZESS ABGESCHLOSSEN, ABER KEINE FIRMEN EXTRAHIERT. Bitte HTML-Dump im 'output'-Ordner prüfen.")
|
logger.info("Keine Daten extrahiert, wie erwartet.")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(f"Ein kritischer Fehler ist im Hauptprozess aufgetreten: {e}", exc_info=False)
|
logger.critical(f"Ein kritischer Fehler ist aufgetreten: {e}", exc_info=True)
|
||||||
|
scraper._save_debug_artifacts()
|
||||||
finally:
|
finally:
|
||||||
if scraper:
|
if scraper:
|
||||||
# Wir lassen den Browser für 15 Sekunden offen, um das Ergebnis zu sehen
|
logger.info("Prozess beendet. Browser bleibt 10s offen.")
|
||||||
logger.info("Prozess beendet. Browser bleibt zur Inspektion 15 Sekunden geöffnet...")
|
time.sleep(10)
|
||||||
time.sleep(15)
|
scraper.close()
|
||||||
scraper.close()
|
|
||||||
|
|
||||||
logger.info("Dealfront Automatisierung beendet.")
|
|
||||||
Reference in New Issue
Block a user