diff --git a/brancheneinstufung.py b/brancheneinstufung.py index 8a666188..de3d0e32 100644 --- a/brancheneinstufung.py +++ b/brancheneinstufung.py @@ -46,7 +46,6 @@ def clean_text(text): """Bereinigt Text von unerwünschten Zeichen""" if not text: return "k.A." - text = str(text) text = re.sub(r'\[\d+\]', '', text) # Entferne Referenznummern text = re.sub(r'\s+', ' ', text).strip() @@ -80,6 +79,7 @@ class GoogleSheetHandler: def update_row(self, row_num, values): """Aktualisiert eine Zeile im Sheet""" + # ACHTUNG: Bereich auf G bis R erweitern, um 12 Spalten zu umfassen. self.sheet.update( range_name=f"G{row_num}:R{row_num}", values=[values] @@ -96,7 +96,6 @@ class WikipediaScraper: """Normalisiert URLs zu reinen Domainnamen""" if not website: return "" - domain = re.sub(r'^https?:\/\/(www\.)?', '', website.lower()) domain = re.sub(r'\/.*$', '', domain) domain = domain.split('.')[0] @@ -106,22 +105,18 @@ class WikipediaScraper: def _generate_search_terms(self, company_name, website): """Generiert Suchbegriffe mit optimierter URL-Verarbeitung""" terms = [] - clean_name = re.sub( r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$', '', company_name ).strip() terms.extend([company_name.strip(), clean_name]) - domain = self._normalize_domain(website) if domain and domain not in ["de", "com", "org"]: terms.append(domain) - name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3] if len(name_parts) >= 2: terms.append(" ".join(name_parts[:2])) - debug_print(f"Generierte Suchbegriffe: {list(set(terms))}") return list(set(terms)) @@ -131,7 +126,6 @@ class WikipediaScraper: clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower()) similarity = SequenceMatcher(None, clean_title, clean_company).ratio() debug_print(f"Ähnlichkeit: {similarity:.2f} ({clean_title} vs {clean_company})") - if domain_hint: try: html_content = requests.get(page.url).text.lower() @@ -140,7 +134,6 @@ class WikipediaScraper: return False except Exception as e: debug_print(f"Domain-Check fehlgeschlagen: {str(e)}") - return similarity >= Config.SIMILARITY_THRESHOLD @retry_on_failure @@ -148,12 +141,10 @@ class WikipediaScraper: """Hauptfunktion zur Artikelsuche""" search_terms = self._generate_search_terms(company_name, website) domain_hint = self._normalize_domain(website) - for term in search_terms: try: results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS) debug_print(f"Suchergebnisse für '{term}': {results}") - for title in results: try: page = wikipedia.page(title, auto_suggest=False) @@ -168,35 +159,13 @@ class WikipediaScraper: continue return None - def extract_company_data(self, page_url): - """Extrahiert Daten aus dem Wikipedia-Artikel""" - if not page_url: - return { - 'full_infobox': self.extract_full_infobox(soup),'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''} - - try: - response = requests.get(page_url) - soup = BeautifulSoup(response.text, Config.HTML_PARSER) - return { - 'full_infobox': self.extract_full_infobox(soup), - 'branche': self._extract_infobox_value(soup, 'branche'), - 'umsatz': self._extract_infobox_value(soup, 'umsatz'), - 'url': page_url - } - except Exception as e: - debug_print(f"Extraktionsfehler: {str(e)}") - return { - 'full_infobox': self.extract_full_infobox(soup),'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url} - def _extract_infobox_value(self, soup, target): - """Extrahiert Werte aus der Infobox""" + """Extrahiert Werte aus der Infobox (Fallback-Methode)""" infobox = soup.find('table', class_=lambda c: c and any( kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen'] )) - if not infobox: return "k.A." - keywords = { 'branche': [ 'branche', 'industrie', 'tätigkeit', @@ -209,21 +178,17 @@ class WikipediaScraper: 'einnahmen', 'ergebnis', 'jahresergebnis' ] }[target] - for row in infobox.find_all('tr'): header = row.find('th') if header: header_text = clean_text(header.get_text()).lower() - if any(kw in header_text for kw in keywords): value = row.find('td') if value: raw_value = clean_text(value.get_text()) - if target == 'branche': - clean = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value) - return ' '.join(clean.split()).strip() - + clean_val = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value) + return ' '.join(clean_val.split()).strip() if target == 'umsatz': match = re.search( r'(\d{1,3}(?:[.,]\d{3})*)\s*' @@ -238,20 +203,55 @@ class WikipediaScraper: num *= 1000 return f"{num:.1f} Mio €" return raw_value.strip() - return "k.A." - - + def extract_full_infobox(self, soup): """Extrahiert die komplette Infobox als Text""" infobox = soup.find('table', class_=lambda c: c and any( kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen'] )) - if not infobox: return "k.A." - return clean_text(infobox.get_text(separator=' | ')) + + def extract_fields_from_infobox_text(self, infobox_text, field_names): + """Extrahiert die gewünschten Felder (z.B. Branche, Umsatz) aus dem Infobox-Text. + Es wird angenommen, dass die Felder durch ' | ' getrennt sind.""" + result = {} + tokens = [token.strip() for token in infobox_text.split("|") if token.strip()] + for i, token in enumerate(tokens): + for field in field_names: + if token.lower() == field.lower(): + # Nächstes nicht-leeres Token als Wert + j = i + 1 + while j < len(tokens) and not tokens[j]: + j += 1 + result[field] = tokens[j] if j < len(tokens) else "k.A." + return result + + def extract_company_data(self, page_url): + """Extrahiert Daten aus dem Wikipedia-Artikel""" + if not page_url: + return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': '', 'full_infobox': 'k.A.'} + try: + response = requests.get(page_url) + soup = BeautifulSoup(response.text, Config.HTML_PARSER) + # Gesamte Infobox extrahieren + full_infobox = self.extract_full_infobox(soup) + # Versuch, Felder aus dem kompletten Infobox-Text herauszulesen + extracted_fields = self.extract_fields_from_infobox_text(full_infobox, ['Branche', 'Umsatz']) + # Fallback: Falls kein Wert gefunden wird, dann nutze die alte Methode + branche_val = extracted_fields.get('Branche', self._extract_infobox_value(soup, 'branche')) + umsatz_val = extracted_fields.get('Umsatz', self._extract_infobox_value(soup, 'umsatz')) + return { + 'full_infobox': full_infobox, + 'branche': branche_val, + 'umsatz': umsatz_val, + 'url': page_url + } + except Exception as e: + debug_print(f"Extraktionsfehler: {str(e)}") + return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url, 'full_infobox': 'k.A.'} # ==================== DATA PROCESSOR ==================== class DataProcessor: @@ -265,7 +265,6 @@ class DataProcessor: """Verarbeitet die angegebene Anzahl an Zeilen""" start_index = self.sheet_handler.get_start_index() print(f"Starte bei Zeile {start_index+1}") - for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))): row = self.sheet_handler.sheet_values[i] self._process_single_row(i+1, row) @@ -275,9 +274,7 @@ class DataProcessor: company_name = row_data[0] if len(row_data) > 0 else "" website = row_data[1] if len(row_data) > 1 else "" print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}") - article = self.wiki_scraper.search_company_article(company_name, website) - if article: company_data = self.wiki_scraper.extract_company_data(article.url) else: @@ -285,7 +282,7 @@ class DataProcessor: current_values = self.sheet_handler.sheet.row_values(row_num) new_values = [ - company_data.get('full_infobox', 'k.A.'), + company_data.get('full_infobox', 'k.A.'), # Spalte G: kompletter Infobox-Text company_data['branche'] if company_data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.", "k.A.", company_data['umsatz'] if company_data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.", @@ -296,12 +293,16 @@ class DataProcessor: Config.VERSION ] self.sheet_handler.update_row(row_num, new_values) - print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}") + print(f"✅ Aktualisiert: Branche: {new_values[1]}, Umsatz: {new_values[3]}, URL: {new_values[7]}") time.sleep(Config.RETRY_DELAY) # ==================== MAIN ==================== if __name__ == "__main__": - num_rows = int(input("Wieviele Zeilen sollen überprüft werden? ")) + try: + num_rows = int(input("Wieviele Zeilen sollen überprüft werden? ")) + except Exception as e: + print("Ungültige Eingabe. Bitte eine Zahl eingeben.") + exit(1) processor = DataProcessor() processor.process_rows(num_rows) - print("\n✅ Wikipedia-Auswertung abgeschlossen") \ No newline at end of file + print("\n✅ Wikipedia-Auswertung abgeschlossen")