diff --git a/brancheneinstufung.py b/brancheneinstufung.py index 22c8dba0..5a849198 100644 --- a/brancheneinstufung.py +++ b/brancheneinstufung.py @@ -1,254 +1,277 @@ - import os - import time - import re - import gspread - import wikipedia - import requests - from bs4 import BeautifulSoup - from oauth2client.service_account import ServiceAccountCredentials - from datetime import datetime - from difflib import SequenceMatcher - import csv - # ==================== KONFIGURATION ==================== +[file name]: brancheneinstufung.py +[file content begin] +import os +import time +import re +import gspread +import wikipedia +import requests +from bs4 import BeautifulSoup +from oauth2client.service_account import ServiceAccountCredentials +from datetime import datetime +from difflib import SequenceMatcher +import csv + +# ==================== KONFIGURATION ==================== class Config: - VERSION = "1.1.1" + VERSION = "1.0.13" LANG = "de" CREDENTIALS_FILE = "service_account.json" SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo" MAX_RETRIES = 3 RETRY_DELAY = 5 LOG_CSV = "gpt_antworten_log.csv" - SIMILARITY_THRESHOLD = 0.6 + SIMILARITY_THRESHOLD = 0.65 DEBUG = True - WIKIPEDIA_SEARCH_RESULTS = 8 + WIKIPEDIA_SEARCH_RESULTS = 10 HTML_PARSER = "html.parser" - # ==================== HELPER FUNCTIONS ==================== - def retry_on_failure(func): - """Decorator für Wiederholungsversuche bei Fehlern""" + +# ==================== HELPER FUNCTIONS ==================== +def retry_on_failure(func): + """Decorator für Wiederholungsversuche bei Fehlern""" def wrapper(*args, **kwargs): - for attempt in range(Config.MAX_RETRIES): - try: - return func(*args, **kwargs) - except Exception as e: - print(f"⚠️ Fehler bei {func.__name__} (Versuch {attempt+1}): {str(e)[:100]}") - time.sleep(Config.RETRY_DELAY) + for attempt in range(Config.MAX_RETRIES): + try: + return func(*args, **kwargs) + except Exception as e: + print(f"⚠️ Fehler bei {func.__name__} (Versuch {attempt+1}): {str(e)[:100]}") + time.sleep(Config.RETRY_DELAY) return None - return wrapper - def debug_print(message): - """Debug-Ausgabe, wenn Config.DEBUG=True""" + return wrapper + +def debug_print(message): + """Debug-Ausgabe, wenn Config.DEBUG=True""" if Config.DEBUG: print(f"[DEBUG] {message}") - def clean_text(text): - """Bereinigt Text von HTML-Entitäten und überflüssigen Whitespaces""" + +def clean_text(text): + """Bereinigt Text von unerwünschten Zeichen""" if not text: return "k.A." - # Konvertierung und Säuberung - text = str(text) - text = re.sub(r'\[.*?\]', '', text) # Entferne eckige Klammern mit Inhalt - text = re.sub(r'\(.*?\)', '', text) # Entferne runde Klammern mit Inhalt - text = re.sub(r'<.*?>', '', text) # Entferne HTML-Tags - text = re.sub(r'\s+', ' ', text).strip() - return text if text else "k.A." - # ==================== GOOGLE SHEET HANDLER ==================== + + text = str(text) + text = re.sub(r'\[\d+\]', '', text) # Entferne Referenznummern + text = re.sub(r'\s+', ' ', text).strip() + return text if text else "k.A." + +# ==================== GOOGLE SHEET HANDLER ==================== class GoogleSheetHandler: - """Klasse zur Handhabung der Google Sheets Interaktion""" + """Handhabung der Google Sheets Interaktion""" + def __init__(self): self.sheet = None self.sheet_values = [] self._connect() - def _connect(self): + + def _connect(self): """Stellt Verbindung zum Google Sheet her""" scope = ["https://www.googleapis.com/auth/spreadsheets"] creds = ServiceAccountCredentials.from_json_keyfile_name( - Config.CREDENTIALS_FILE, scope + Config.CREDENTIALS_FILE, scope ) self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1 self.sheet_values = self.sheet.get_all_values() - def get_start_index(self): - """Ermittelt die erste leere Zeile in Spalte N (Index 13)""" + + def get_start_index(self): + """Ermittelt die erste leere Zeile in Spalte N""" filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]] return next( - (i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), - len(filled_n) + 1 + (i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), + len(filled_n) + 1 ) - def update_row(self, row_num, values): + + def update_row(self, row_num, values): """Aktualisiert eine Zeile im Sheet""" self.sheet.update( - range_name=f"G{row_num}:Q{row_num}", - values=[values] + range_name=f"G{row_num}:Q{row_num}", + values=[values] ) - # ==================== WIKIPEDIA SCRAPER ==================== - class_=lambda c: c and any( - kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen'] - )) - if not infobox: - return "k.A." - keywords = { - 'branche': [ - 'branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld', - 'sektor', 'produkte', 'leistungen', 'geschäftsbereich' - ], - 'umsatz': [ - 'umsatz', 'jahresumsatz', 'umsatzerlöse', 'gesamtumsatz', - 'konzernumsatz', 'umsatzentwicklung', 'ergebnis' - ] - }.get(target, []) - # Durchsuche alle Zeilen und Zellen - value = "k.A." - for row in infobox.find_all('tr'): - # Erweiterte Header-Erkennung in th/td mit colspan - header_cells = row.find_all(['th', 'td'], attrs={'colspan': False}) - for header in header_cells: - header_text = clean_text(header.get_text()).lower() - if any(kw in header_text for kw in keywords): - # Hole nächste Zelle, ignoriere verschachtelte Tabellen - value_cell = header.find_next_sibling(['td', 'th']) - if value_cell: - # Verarbeite Listen und mehrzeilige Inhalte - list_items = value_cell.find_all('li') - if list_items: - value = ', '.join(clean_text(li.get_text()) for li in list_items) - else: - value = clean_text(value_cell.get_text()) - # Extrahiere numerische Umsatzwerte mit Regex - if target == 'umsatz': - match = re.search( - r'(\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*(?:€|Euro|EUR)', - value - ) - if match: - value = match.group(1).replace('.', '').replace(',', '.') - return value - return "k.A." - class WikipediaScraper: + +# ==================== WIKIPEDIA SCRAPER ==================== +class WikipediaScraper: + """Handhabung der Wikipedia-Suche und Datenextraktion""" + def __init__(self): wikipedia.set_lang(Config.LANG) - def _extract_domain_hint(self, website): - """Extrahiert den Domain-Schlüssel aus der Website-URL""" - if not website: - return "" - # Entferne Protokoll und www, zerlege in Teile - clean_url = website.lower().replace("https://", "").replace("http://", "").replace("www.", "") - domain_parts = clean_url.split(".") - return domain_parts[0] if domain_parts else "" - def _generate_search_terms(self, company_name, website_hint=""): - """Generiert Suchbegriffe aus Firmenname und Website""" - search_terms = [company_name.strip()] - # Bereinigung von Rechtsformen und Sonderzeichen - clean_name = re.sub( - r'\s+(?:GmbH|AG|KG|OHG|e\.V\.|mbH|& Co\. KG| GmbH & Co\. KG).*$', - '', - company_name - ).strip() - # Füge bereinigten Namen hinzu, wenn unterschiedlich - if clean_name and clean_name != company_name: - search_terms.append(clean_name) - # Extrahiere erste zwei relevante Wörter - name_words = [w for w in re.split(r'\W+', clean_name) if w] - if len(name_words) >= 2: - search_terms.append(" ".join(name_words[:2])) - # Domain-Hint hinzufügen - domain_hint = self._extract_domain_hint(website_hint) - if domain_hint and domain_hint not in ["de", "com", "org", "net"]: - search_terms.append(domain_hint) - debug_print(f"Generierte Suchbegriffe: {search_terms}") - return list(set(search_terms)) # Duplikate entfernen - def _validate_article(self, page, company_name, domain_hint=""): - """Überprüft ob der Artikel zum Unternehmen passt""" - # Normalisiere beide Namen - page_title = re.sub(r'\(.*?\)', '', page.title).strip().lower() - search_name = re.sub(r'[^a-zA-Z0-9äöüß ]', '', company_name).strip().lower() - # Ähnlichkeitsprüfung - similarity = SequenceMatcher(None, page_title, search_name).ratio() - debug_print(f"Ähnlichkeit '{page_title}' vs '{search_name}': {similarity:.2f}") - # Zusätzliche Domain-Prüfung - if domain_hint: - html_content = requests.get(page.url).text.lower() - if domain_hint not in html_content: - debug_print(f"Domain-Hint '{domain_hint}' nicht im Artikel gefunden") - return False - return similarity >= Config.SIMILARITY_THRESHOLD - @retry_on_failure - def search_company_article(self, company_name, website_hint=""): + + def _extract_domain_hint(self, website): + """Extrahiert Domain-Schlüssel aus URL""" + if not website: + return "" + clean_url = re.sub(r'https?://(www\.)?', '', website.lower()).split('.')[0] + return clean_url if clean_url not in ["de", "com", "org"] else "" + + def _generate_search_terms(self, company_name, website): + """Generiert Suchbegriffe mit verbesserter Namensanalyse""" + terms = [] + + # Basisbegriffe + base_name = re.sub(r'\s+(GmbH|AG|KG|Co\. KG).*$', '', company_name).strip() + terms.append(base_name) + + # Domain-Hint + domain_hint = self._extract_domain_hint(website) + if domain_hint: + terms.append(domain_hint) + + # Schlüsselwörter extrahieren + name_parts = [p for p in re.split(r'\W+', base_name) if p and len(p) > 3] + if len(name_parts) >= 2: + terms.append(" ".join(name_parts[:2])) + + return list(set(terms)) + + def _validate_article(self, page, company_name, domain_hint): + """Artikelvalidierung mit erweiterten Checks""" + # Titelbereinigung + clean_title = re.sub(r'\(.*?\)|\s-\s.*', '', page.title).lower() + clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower()) + + similarity = SequenceMatcher(None, clean_title, clean_company).ratio() + debug_print(f"Ähnlichkeitscheck: {clean_title} vs {clean_company} = {similarity:.2f}") + + # Domain-Check + if domain_hint: + try: + response = requests.get(page.url) + if domain_hint not in response.text.lower(): + return False + except Exception as e: + debug_print(f"Domain-Check fehlgeschlagen: {str(e)}") + + return similarity >= Config.SIMILARITY_THRESHOLD + + @retry_on_failure + def search_company_article(self, company_name, website): """Hauptfunktion zur Artikelsuche""" - search_terms = self._generate_search_terms(company_name, website_hint) - domain_hint = self._extract_domain_hint(website_hint) + search_terms = self._generate_search_terms(company_name, website) + domain_hint = self._extract_domain_hint(website) + for term in search_terms: - try: - results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS) - debug_print(f"Suchergebnisse für '{term}': {results}") - for title in results: - try: - page = wikipedia.page(title, auto_suggest=False) - if self._validate_article(page, company_name, domain_hint): - return page - except wikipedia.exceptions.DisambiguationError: - continue - except Exception as e: - debug_print(f"Fehler bei Suche nach {term}: {str(e)}") - continue + try: + results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS) + debug_print(f"Suche '{term}': {results}") + + for title in results: + try: + page = wikipedia.page(title, auto_suggest=False) + if self._validate_article(page, company_name, domain_hint): + return page + except (wikipedia.exceptions.DisambiguationError, + wikipedia.exceptions.PageError) as e: + debug_print(f"Seitenfehler: {str(e)}") + continue + except Exception as e: + debug_print(f"Suchfehler: {str(e)}") + continue return None - def extract_company_data(self, page_url): - """Extrahiert Branche und Umsatz aus dem Wikipedia-Artikel""" - response = requests.get(page_url) - soup = BeautifulSoup(response.text, Config.HTML_PARSER) + + def extract_company_data(self, page_url): + """Detaillierte Infobox-Extraktion""" + try: + response = requests.get(page_url) + soup = BeautifulSoup(response.text, Config.HTML_PARSER) + return { - 'branche': self._extract_infobox_value(soup, 'branche'), - 'umsatz': self._extract_infobox_value(soup, 'umsatz'), - 'url': page_url - } - # ==================== WIKIPEDIA SCRAPER ==================== -class WikipediaScraper: + 'branche': self._extract_infobox_value(soup, 'branche'), + 'umsatz': self._extract_infobox_value(soup, 'umsatz'), + 'url': page_url + } + except Exception as e: + debug_print(f"Extraktionsfehler: {str(e)}") + return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url} + def _extract_infobox_value(self, soup, target): - """Extrahiert spezifischen Wert aus der Infobox mit erweiterten Suchmustern""" - # Erweiterte Infobox-Erkennung - infobox = soup.find('table', - # ==================== DATA PROCESSOR ==================== + """Robuste Infobox-Analyse""" + infobox = soup.find('table', class_=lambda c: c and any( + kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen'] + )) + + if not infobox: + return "k.A." + + keywords = { + 'branche': ['branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld'], + 'umsatz': ['umsatz', 'jahresumsatz', 'konzernumsatz', 'erlöse'] + }[target] + + for row in infobox.find_all('tr'): + header = row.find('th') + if header: + header_text = clean_text(header.get_text()).lower() + if any(kw in header_text for kw in keywords): + value = row.find('td') + if value: + raw_value = clean_text(value.get_text()) + + # Umsatzbereinigung + if target == 'umsatz': + match = re.search( + r'(\d{1,3}(?:[.,]\d{3})*)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?', + raw_value + ) + if match: + return match.group(1).replace('.', '').replace(',', '.') + return raw_value + return "k.A." + +# ==================== DATA PROCESSOR ==================== class DataProcessor: - """Klasse zur Steuerung des Gesamtprozesses""" + """Steuerung des Gesamtprozesses""" + def __init__(self): self.sheet_handler = GoogleSheetHandler() self.wiki_scraper = WikipediaScraper() - def process_rows(self, num_rows): + + def process_rows(self, num_rows): """Verarbeitet die angegebene Anzahl an Zeilen""" start_index = self.sheet_handler.get_start_index() print(f"Starte bei Zeile {start_index+1}") + for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))): - row = self.sheet_handler.sheet_values[i] - self._process_single_row(i+1, row) - def _process_single_row(self, row_num, row_data): - """Verarbeitet eine einzelne Zeile""" + row = self.sheet_handler.sheet_values[i] + self._process_single_row(i+1, row) + + def _process_single_row(self, row_num, row_data): + """Verarbeitung einer einzelnen Zeile""" company_name = row_data[0] if len(row_data) > 0 else "" website = row_data[1] if len(row_data) > 1 else "" print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}") - # Schritt 1: Wikipedia-Artikel finden + + # Wikipedia-Suche article = self.wiki_scraper.search_company_article(company_name, website) - # Schritt 2: Daten extrahieren - if article: - company_data = self.wiki_scraper.extract_company_data(article.url) - else: - company_data = {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''} - # Aktualisiere Daten im Sheet + + # Datenextraktion + company_data = self.wiki_scraper.extract_company_data(article.url) if article else { + 'branche': 'k.A.', + 'umsatz': 'k.A.', + 'url': row_data[12] if len(row_data) > 12 else "" + } + + # Sheet-Update self._update_sheet(row_num, company_data) time.sleep(Config.RETRY_DELAY) - def _update_sheet(self, row_num, data): - """Aktualisiert die Zeile mit den neuen Daten""" + + def _update_sheet(self, row_num, data): + """Aktualisiert die Zeilendaten""" current_values = self.sheet_handler.sheet.row_values(row_num) new_values = [ - data['branche'] if data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.", - "k.A.", # LinkedIn-Branche bleibt unverändert - data['umsatz'] if data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.", - "k.A.", "k.A.", "k.A.", - data['url'] if data['url'] else current_values[12] if len(current_values) > 12 else "", - datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "k.A.", "k.A.", - Config.VERSION + data['branche'] or (current_values[6] if len(current_values) > 6 else "k.A."), + "k.A.", # LinkedIn-Branche + data['umsatz'] or (current_values[8] if len(current_values) > 8 else "k.A."), + "k.A.", "k.A.", "k.A.", + data['url'] or (current_values[12] if len(current_values) > 12 else ""), + datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "k.A.", "k.A.", + Config.VERSION ] self.sheet_handler.update_row(row_num, new_values) print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}") - # ==================== MAIN EXECUTION ==================== - if __name__ == "__main__": - num_rows = int(input("Wieviele Zeilen sollen überprüft werden? ")) - processor = DataProcessor() - processor.process_rows(num_rows) - print("\n✅ Wikipedia-Auswertung abgeschlossen") \ No newline at end of file + +# ==================== MAIN ==================== +if __name__ == "__main__": + num_rows = int(input("Wieviele Zeilen sollen überprüft werden? ")) + processor = DataProcessor() + processor.process_rows(num_rows) + print("\n✅ Wikipedia-Auswertung abgeschlossen") +[file content end] \ No newline at end of file