Files
Brancheneinstufung2/brancheneinstufung.py
Floke eecbf383b5 deepseek V7
Vollständige Implementierung der _extract_infobox_value-Methode

Erweiterte Schlüsselwörter für deutsche Infoboxen

Verbesserte Textbereinigung für Branchenangaben

Toleranz für verschiedene Zahlenformate

Debug-Output für jeden Verarbeitungsschritt
2025-03-31 16:45:02 +00:00

307 lines
12 KiB
Python

import os
import time
import re
import gspread
import wikipedia
import requests
from bs4 import BeautifulSoup
from oauth2client.service_account import ServiceAccountCredentials
from datetime import datetime
from difflib import SequenceMatcher
import csv
# ==================== KONFIGURATION ====================
class Config:
VERSION = "1.0.13"
LANG = "de"
CREDENTIALS_FILE = "service_account.json"
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
MAX_RETRIES = 3
RETRY_DELAY = 5
LOG_CSV = "gpt_antworten_log.csv"
SIMILARITY_THRESHOLD = 0.65
DEBUG = True
WIKIPEDIA_SEARCH_RESULTS = 10
HTML_PARSER = "html.parser"
# ==================== HELPER FUNCTIONS ====================
def retry_on_failure(func):
"""Decorator für Wiederholungsversuche bei Fehlern"""
def wrapper(*args, **kwargs):
for attempt in range(Config.MAX_RETRIES):
try:
return func(*args, **kwargs)
except Exception as e:
print(f"⚠️ Fehler bei {func.__name__} (Versuch {attempt+1}): {str(e)[:100]}")
time.sleep(Config.RETRY_DELAY)
return None
return wrapper
def debug_print(message):
"""Debug-Ausgabe, wenn Config.DEBUG=True"""
if Config.DEBUG:
print(f"[DEBUG] {message}")
def clean_text(text):
"""Bereinigt Text von unerwünschten Zeichen"""
if not text:
return "k.A."
text = str(text)
text = re.sub(r'\[\d+\]', '', text) # Entferne Referenznummern
text = re.sub(r'\s+', ' ', text).strip()
return text if text else "k.A."
# ==================== GOOGLE SHEET HANDLER ====================
class GoogleSheetHandler:
"""Handhabung der Google Sheets Interaktion"""
def __init__(self):
self.sheet = None
self.sheet_values = []
self._connect()
def _connect(self):
"""Stellt Verbindung zum Google Sheet her"""
scope = ["https://www.googleapis.com/auth/spreadsheets"]
creds = ServiceAccountCredentials.from_json_keyfile_name(
Config.CREDENTIALS_FILE, scope
)
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
self.sheet_values = self.sheet.get_all_values()
def get_start_index(self):
"""Ermittelt die erste leere Zeile in Spalte N"""
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
return next(
(i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()),
len(filled_n) + 1
)
def update_row(self, row_num, values):
"""Aktualisiert eine Zeile im Sheet"""
self.sheet.update(
range_name=f"G{row_num}:Q{row_num}",
values=[values]
)
# ==================== WIKIPEDIA SCRAPER ====================
class WikipediaScraper:
"""Handhabung der Wikipedia-Suche und Datenextraktion"""
def __init__(self):
wikipedia.set_lang(Config.LANG)
def _extract_domain_hint(self, website):
"""Extrahiert Domain-Schlüssel aus URL"""
if not website:
return ""
clean_url = re.sub(r'https?://(www\.)?', '', website.lower()).split('.')[0]
return clean_url if clean_url not in ["de", "com", "org"] else ""
def _generate_search_terms(self, company_name, website):
"""Generiert Suchbegriffe mit verbesserter Namensanalyse"""
terms = []
# Basisbegriffe
base_name = re.sub(r'\s+(GmbH|AG|KG|Co\. KG).*$', '', company_name).strip()
terms.append(base_name)
# Domain-Hint
domain_hint = self._extract_domain_hint(website)
if domain_hint:
terms.append(domain_hint)
# Schlüsselwörter extrahieren
name_parts = [p for p in re.split(r'\W+', base_name) if p and len(p) > 3]
if len(name_parts) >= 2:
terms.append(" ".join(name_parts[:2]))
return list(set(terms))
def _validate_article(self, page, company_name, domain_hint):
"""Artikelvalidierung mit erweiterten Checks"""
# Titelbereinigung
clean_title = re.sub(r'\(.*?\)|\s-\s.*', '', page.title).lower()
clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower())
similarity = SequenceMatcher(None, clean_title, clean_company).ratio()
debug_print(f"Ähnlichkeitscheck: {clean_title} vs {clean_company} = {similarity:.2f}")
# Domain-Check
if domain_hint:
try:
response = requests.get(page.url)
if domain_hint not in response.text.lower():
return False
except Exception as e:
debug_print(f"Domain-Check fehlgeschlagen: {str(e)}")
return similarity >= Config.SIMILARITY_THRESHOLD
@retry_on_failure
def search_company_article(self, company_name, website):
"""Hauptfunktion zur Artikelsuche"""
search_terms = self._generate_search_terms(company_name, website)
domain_hint = self._extract_domain_hint(website)
for term in search_terms:
try:
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
debug_print(f"Suche '{term}': {results}")
for title in results:
try:
page = wikipedia.page(title, auto_suggest=False)
if self._validate_article(page, company_name, domain_hint):
return page
except (wikipedia.exceptions.DisambiguationError,
wikipedia.exceptions.PageError) as e:
debug_print(f"Seitenfehler: {str(e)}")
continue
except Exception as e:
debug_print(f"Suchfehler: {str(e)}")
continue
return None
def extract_company_data(self, page_url):
"""Detaillierte Infobox-Extraktion"""
try:
response = requests.get(page_url)
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
return {
'branche': self._extract_infobox_value(soup, 'branche'),
'umsatz': self._extract_infobox_value(soup, 'umsatz'),
'url': page_url
}
except Exception as e:
debug_print(f"Extraktionsfehler: {str(e)}")
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url}
def _extract_infobox_value(self, soup, target):
"""Robuste Infobox-Extraktion mit erweiterten Mustern"""
debug_print(f"Starte Extraktion für: {target}")
# Erweiterte Infobox-Erkennung
infobox = soup.find('table', class_=lambda c: c and any(
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen', 'firmendaten']
))
if not infobox:
debug_print("Keine Infobox gefunden")
return "k.A."
# Erweiterte Keywords für Deutsch
keywords = {
'branche': [
'branche', 'industrie', 'tätigkeitsfeld',
'geschäftsfeld', 'sektor', 'branchen',
'wirtschaftszweig', 'tätigkeitsbereich',
'produkte', 'leistungen', 'aktivität'
],
'umsatz': [
'umsatz', 'jahresumsatz', 'konzernumsatz',
'gesamtumsatz', 'umsatzerlöse', 'erlöse',
'umsatzentwicklung', 'ergebnis',
'einnahmen', 'jahresergebnis'
]
}[target]
# Durchsuche alle Tabellenzeilen
for row in infobox.find_all('tr'):
header = row.find('th')
if header:
header_text = clean_text(header.get_text()).lower()
debug_print(f"Prüfe Header: {header_text}")
if any(kw in header_text for kw in keywords):
value_cell = row.find('td')
if value_cell:
value = clean_text(value_cell.get_text())
# Branchenbereinigung
if target == 'branche':
# Entferne Klammerzusätze und Formatierungen
value = re.sub(r'\[.*?\]|\(.*?\)', '', value)
return ' '.join(value.split()).strip()
# Umsatzbereinigung
if target == 'umsatz':
# Finde numerische Werte
match = re.search(
r'(\d{1,3}(?:[.,]\d{3})*)\s*'
r'(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*'
r'(?:€|Euro|EUR)?',
value.replace('.', '').replace(',', '.'),
re.IGNORECASE
)
if match:
num_value = float(match.group(1))
if 'mrd' in value.lower() or 'milliarden' in value.lower():
num_value *= 1000
return f"{num_value:.1f} Mio €"
return value.strip()
debug_print(f"{target} nicht gefunden")
return "k.A."
# ==================== DATA PROCESSOR ====================
class DataProcessor:
"""Steuerung des Gesamtprozesses"""
def __init__(self):
self.sheet_handler = GoogleSheetHandler()
self.wiki_scraper = WikipediaScraper()
def process_rows(self, num_rows):
"""Verarbeitet die angegebene Anzahl an Zeilen"""
start_index = self.sheet_handler.get_start_index()
print(f"Starte bei Zeile {start_index+1}")
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
row = self.sheet_handler.sheet_values[i]
self._process_single_row(i+1, row)
def _process_single_row(self, row_num, row_data):
"""Verarbeitung einer einzelnen Zeile"""
company_name = row_data[0] if len(row_data) > 0 else ""
website = row_data[1] if len(row_data) > 1 else ""
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
# Wikipedia-Suche
article = self.wiki_scraper.search_company_article(company_name, website)
# Datenextraktion
company_data = self.wiki_scraper.extract_company_data(article.url) if article else {
'branche': 'k.A.',
'umsatz': 'k.A.',
'url': row_data[12] if len(row_data) > 12 else ""
}
# Sheet-Update
self._update_sheet(row_num, company_data)
time.sleep(Config.RETRY_DELAY)
def _update_sheet(self, row_num, data):
"""Aktualisiert die Zeilendaten"""
current_values = self.sheet_handler.sheet.row_values(row_num)
new_values = [
data['branche'] or (current_values[6] if len(current_values) > 6 else "k.A."),
"k.A.", # LinkedIn-Branche
data['umsatz'] or (current_values[8] if len(current_values) > 8 else "k.A."),
"k.A.", "k.A.", "k.A.",
data['url'] or (current_values[12] if len(current_values) > 12 else ""),
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"k.A.", "k.A.",
Config.VERSION
]
self.sheet_handler.update_row(row_num, new_values)
print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}")
# ==================== MAIN ====================
if __name__ == "__main__":
num_rows = int(input("Wieviele Zeilen sollen überprüft werden? "))
processor = DataProcessor()
processor.process_rows(num_rows)
print("\n✅ Wikipedia-Auswertung abgeschlossen")