BugfixingChat GPT
GoogleSheetHandler: Der Update-Bereich wurde auf G{row_num}:R{row_num} erweitert, um 12 Spalten zu umfassen.
WikipediaScraper:
Die Methode extract_full_infobox holt den gesamten Infobox-Text mit | als Trenner.
Mit extract_fields_from_infobox_text werden gezielt die Felder "Branche" und "Umsatz" gesucht.
In extract_company_data wird zuerst versucht, die Werte aus dem kompletten Infobox-Text zu extrahieren, bevor der Fallback genutzt wird.
DataProcessor: Die Ausgabe im Sheet umfasst nun als erste Spalte den gesamten Infobox-Text.
This commit is contained in:
@@ -46,7 +46,6 @@ def clean_text(text):
|
|||||||
"""Bereinigt Text von unerwünschten Zeichen"""
|
"""Bereinigt Text von unerwünschten Zeichen"""
|
||||||
if not text:
|
if not text:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
text = str(text)
|
text = str(text)
|
||||||
text = re.sub(r'\[\d+\]', '', text) # Entferne Referenznummern
|
text = re.sub(r'\[\d+\]', '', text) # Entferne Referenznummern
|
||||||
text = re.sub(r'\s+', ' ', text).strip()
|
text = re.sub(r'\s+', ' ', text).strip()
|
||||||
@@ -80,6 +79,7 @@ class GoogleSheetHandler:
|
|||||||
|
|
||||||
def update_row(self, row_num, values):
|
def update_row(self, row_num, values):
|
||||||
"""Aktualisiert eine Zeile im Sheet"""
|
"""Aktualisiert eine Zeile im Sheet"""
|
||||||
|
# ACHTUNG: Bereich auf G bis R erweitern, um 12 Spalten zu umfassen.
|
||||||
self.sheet.update(
|
self.sheet.update(
|
||||||
range_name=f"G{row_num}:R{row_num}",
|
range_name=f"G{row_num}:R{row_num}",
|
||||||
values=[values]
|
values=[values]
|
||||||
@@ -96,7 +96,6 @@ class WikipediaScraper:
|
|||||||
"""Normalisiert URLs zu reinen Domainnamen"""
|
"""Normalisiert URLs zu reinen Domainnamen"""
|
||||||
if not website:
|
if not website:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
domain = re.sub(r'^https?:\/\/(www\.)?', '', website.lower())
|
domain = re.sub(r'^https?:\/\/(www\.)?', '', website.lower())
|
||||||
domain = re.sub(r'\/.*$', '', domain)
|
domain = re.sub(r'\/.*$', '', domain)
|
||||||
domain = domain.split('.')[0]
|
domain = domain.split('.')[0]
|
||||||
@@ -106,22 +105,18 @@ class WikipediaScraper:
|
|||||||
def _generate_search_terms(self, company_name, website):
|
def _generate_search_terms(self, company_name, website):
|
||||||
"""Generiert Suchbegriffe mit optimierter URL-Verarbeitung"""
|
"""Generiert Suchbegriffe mit optimierter URL-Verarbeitung"""
|
||||||
terms = []
|
terms = []
|
||||||
|
|
||||||
clean_name = re.sub(
|
clean_name = re.sub(
|
||||||
r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$',
|
r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$',
|
||||||
'',
|
'',
|
||||||
company_name
|
company_name
|
||||||
).strip()
|
).strip()
|
||||||
terms.extend([company_name.strip(), clean_name])
|
terms.extend([company_name.strip(), clean_name])
|
||||||
|
|
||||||
domain = self._normalize_domain(website)
|
domain = self._normalize_domain(website)
|
||||||
if domain and domain not in ["de", "com", "org"]:
|
if domain and domain not in ["de", "com", "org"]:
|
||||||
terms.append(domain)
|
terms.append(domain)
|
||||||
|
|
||||||
name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3]
|
name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3]
|
||||||
if len(name_parts) >= 2:
|
if len(name_parts) >= 2:
|
||||||
terms.append(" ".join(name_parts[:2]))
|
terms.append(" ".join(name_parts[:2]))
|
||||||
|
|
||||||
debug_print(f"Generierte Suchbegriffe: {list(set(terms))}")
|
debug_print(f"Generierte Suchbegriffe: {list(set(terms))}")
|
||||||
return list(set(terms))
|
return list(set(terms))
|
||||||
|
|
||||||
@@ -131,7 +126,6 @@ class WikipediaScraper:
|
|||||||
clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower())
|
clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower())
|
||||||
similarity = SequenceMatcher(None, clean_title, clean_company).ratio()
|
similarity = SequenceMatcher(None, clean_title, clean_company).ratio()
|
||||||
debug_print(f"Ähnlichkeit: {similarity:.2f} ({clean_title} vs {clean_company})")
|
debug_print(f"Ähnlichkeit: {similarity:.2f} ({clean_title} vs {clean_company})")
|
||||||
|
|
||||||
if domain_hint:
|
if domain_hint:
|
||||||
try:
|
try:
|
||||||
html_content = requests.get(page.url).text.lower()
|
html_content = requests.get(page.url).text.lower()
|
||||||
@@ -140,7 +134,6 @@ class WikipediaScraper:
|
|||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
debug_print(f"Domain-Check fehlgeschlagen: {str(e)}")
|
debug_print(f"Domain-Check fehlgeschlagen: {str(e)}")
|
||||||
|
|
||||||
return similarity >= Config.SIMILARITY_THRESHOLD
|
return similarity >= Config.SIMILARITY_THRESHOLD
|
||||||
|
|
||||||
@retry_on_failure
|
@retry_on_failure
|
||||||
@@ -148,12 +141,10 @@ class WikipediaScraper:
|
|||||||
"""Hauptfunktion zur Artikelsuche"""
|
"""Hauptfunktion zur Artikelsuche"""
|
||||||
search_terms = self._generate_search_terms(company_name, website)
|
search_terms = self._generate_search_terms(company_name, website)
|
||||||
domain_hint = self._normalize_domain(website)
|
domain_hint = self._normalize_domain(website)
|
||||||
|
|
||||||
for term in search_terms:
|
for term in search_terms:
|
||||||
try:
|
try:
|
||||||
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
||||||
debug_print(f"Suchergebnisse für '{term}': {results}")
|
debug_print(f"Suchergebnisse für '{term}': {results}")
|
||||||
|
|
||||||
for title in results:
|
for title in results:
|
||||||
try:
|
try:
|
||||||
page = wikipedia.page(title, auto_suggest=False)
|
page = wikipedia.page(title, auto_suggest=False)
|
||||||
@@ -168,35 +159,13 @@ class WikipediaScraper:
|
|||||||
continue
|
continue
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def extract_company_data(self, page_url):
|
|
||||||
"""Extrahiert Daten aus dem Wikipedia-Artikel"""
|
|
||||||
if not page_url:
|
|
||||||
return {
|
|
||||||
'full_infobox': self.extract_full_infobox(soup),'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''}
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.get(page_url)
|
|
||||||
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
|
||||||
return {
|
|
||||||
'full_infobox': self.extract_full_infobox(soup),
|
|
||||||
'branche': self._extract_infobox_value(soup, 'branche'),
|
|
||||||
'umsatz': self._extract_infobox_value(soup, 'umsatz'),
|
|
||||||
'url': page_url
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
debug_print(f"Extraktionsfehler: {str(e)}")
|
|
||||||
return {
|
|
||||||
'full_infobox': self.extract_full_infobox(soup),'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url}
|
|
||||||
|
|
||||||
def _extract_infobox_value(self, soup, target):
|
def _extract_infobox_value(self, soup, target):
|
||||||
"""Extrahiert Werte aus der Infobox"""
|
"""Extrahiert Werte aus der Infobox (Fallback-Methode)"""
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(
|
infobox = soup.find('table', class_=lambda c: c and any(
|
||||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||||
))
|
))
|
||||||
|
|
||||||
if not infobox:
|
if not infobox:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
keywords = {
|
keywords = {
|
||||||
'branche': [
|
'branche': [
|
||||||
'branche', 'industrie', 'tätigkeit',
|
'branche', 'industrie', 'tätigkeit',
|
||||||
@@ -209,21 +178,17 @@ class WikipediaScraper:
|
|||||||
'einnahmen', 'ergebnis', 'jahresergebnis'
|
'einnahmen', 'ergebnis', 'jahresergebnis'
|
||||||
]
|
]
|
||||||
}[target]
|
}[target]
|
||||||
|
|
||||||
for row in infobox.find_all('tr'):
|
for row in infobox.find_all('tr'):
|
||||||
header = row.find('th')
|
header = row.find('th')
|
||||||
if header:
|
if header:
|
||||||
header_text = clean_text(header.get_text()).lower()
|
header_text = clean_text(header.get_text()).lower()
|
||||||
|
|
||||||
if any(kw in header_text for kw in keywords):
|
if any(kw in header_text for kw in keywords):
|
||||||
value = row.find('td')
|
value = row.find('td')
|
||||||
if value:
|
if value:
|
||||||
raw_value = clean_text(value.get_text())
|
raw_value = clean_text(value.get_text())
|
||||||
|
|
||||||
if target == 'branche':
|
if target == 'branche':
|
||||||
clean = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
clean_val = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
||||||
return ' '.join(clean.split()).strip()
|
return ' '.join(clean_val.split()).strip()
|
||||||
|
|
||||||
if target == 'umsatz':
|
if target == 'umsatz':
|
||||||
match = re.search(
|
match = re.search(
|
||||||
r'(\d{1,3}(?:[.,]\d{3})*)\s*'
|
r'(\d{1,3}(?:[.,]\d{3})*)\s*'
|
||||||
@@ -238,21 +203,56 @@ class WikipediaScraper:
|
|||||||
num *= 1000
|
num *= 1000
|
||||||
return f"{num:.1f} Mio €"
|
return f"{num:.1f} Mio €"
|
||||||
return raw_value.strip()
|
return raw_value.strip()
|
||||||
|
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
|
|
||||||
def extract_full_infobox(self, soup):
|
def extract_full_infobox(self, soup):
|
||||||
"""Extrahiert die komplette Infobox als Text"""
|
"""Extrahiert die komplette Infobox als Text"""
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(
|
infobox = soup.find('table', class_=lambda c: c and any(
|
||||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||||
))
|
))
|
||||||
|
|
||||||
if not infobox:
|
if not infobox:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
return clean_text(infobox.get_text(separator=' | '))
|
return clean_text(infobox.get_text(separator=' | '))
|
||||||
|
|
||||||
|
def extract_fields_from_infobox_text(self, infobox_text, field_names):
|
||||||
|
"""Extrahiert die gewünschten Felder (z.B. Branche, Umsatz) aus dem Infobox-Text.
|
||||||
|
Es wird angenommen, dass die Felder durch ' | ' getrennt sind."""
|
||||||
|
result = {}
|
||||||
|
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
|
||||||
|
for i, token in enumerate(tokens):
|
||||||
|
for field in field_names:
|
||||||
|
if token.lower() == field.lower():
|
||||||
|
# Nächstes nicht-leeres Token als Wert
|
||||||
|
j = i + 1
|
||||||
|
while j < len(tokens) and not tokens[j]:
|
||||||
|
j += 1
|
||||||
|
result[field] = tokens[j] if j < len(tokens) else "k.A."
|
||||||
|
return result
|
||||||
|
|
||||||
|
def extract_company_data(self, page_url):
|
||||||
|
"""Extrahiert Daten aus dem Wikipedia-Artikel"""
|
||||||
|
if not page_url:
|
||||||
|
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': '', 'full_infobox': 'k.A.'}
|
||||||
|
try:
|
||||||
|
response = requests.get(page_url)
|
||||||
|
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||||
|
# Gesamte Infobox extrahieren
|
||||||
|
full_infobox = self.extract_full_infobox(soup)
|
||||||
|
# Versuch, Felder aus dem kompletten Infobox-Text herauszulesen
|
||||||
|
extracted_fields = self.extract_fields_from_infobox_text(full_infobox, ['Branche', 'Umsatz'])
|
||||||
|
# Fallback: Falls kein Wert gefunden wird, dann nutze die alte Methode
|
||||||
|
branche_val = extracted_fields.get('Branche', self._extract_infobox_value(soup, 'branche'))
|
||||||
|
umsatz_val = extracted_fields.get('Umsatz', self._extract_infobox_value(soup, 'umsatz'))
|
||||||
|
return {
|
||||||
|
'full_infobox': full_infobox,
|
||||||
|
'branche': branche_val,
|
||||||
|
'umsatz': umsatz_val,
|
||||||
|
'url': page_url
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Extraktionsfehler: {str(e)}")
|
||||||
|
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url, 'full_infobox': 'k.A.'}
|
||||||
|
|
||||||
# ==================== DATA PROCESSOR ====================
|
# ==================== DATA PROCESSOR ====================
|
||||||
class DataProcessor:
|
class DataProcessor:
|
||||||
"""Steuerung des Gesamtprozesses"""
|
"""Steuerung des Gesamtprozesses"""
|
||||||
@@ -265,7 +265,6 @@ class DataProcessor:
|
|||||||
"""Verarbeitet die angegebene Anzahl an Zeilen"""
|
"""Verarbeitet die angegebene Anzahl an Zeilen"""
|
||||||
start_index = self.sheet_handler.get_start_index()
|
start_index = self.sheet_handler.get_start_index()
|
||||||
print(f"Starte bei Zeile {start_index+1}")
|
print(f"Starte bei Zeile {start_index+1}")
|
||||||
|
|
||||||
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
|
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
|
||||||
row = self.sheet_handler.sheet_values[i]
|
row = self.sheet_handler.sheet_values[i]
|
||||||
self._process_single_row(i+1, row)
|
self._process_single_row(i+1, row)
|
||||||
@@ -275,9 +274,7 @@ class DataProcessor:
|
|||||||
company_name = row_data[0] if len(row_data) > 0 else ""
|
company_name = row_data[0] if len(row_data) > 0 else ""
|
||||||
website = row_data[1] if len(row_data) > 1 else ""
|
website = row_data[1] if len(row_data) > 1 else ""
|
||||||
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
||||||
|
|
||||||
article = self.wiki_scraper.search_company_article(company_name, website)
|
article = self.wiki_scraper.search_company_article(company_name, website)
|
||||||
|
|
||||||
if article:
|
if article:
|
||||||
company_data = self.wiki_scraper.extract_company_data(article.url)
|
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||||
else:
|
else:
|
||||||
@@ -285,7 +282,7 @@ class DataProcessor:
|
|||||||
|
|
||||||
current_values = self.sheet_handler.sheet.row_values(row_num)
|
current_values = self.sheet_handler.sheet.row_values(row_num)
|
||||||
new_values = [
|
new_values = [
|
||||||
company_data.get('full_infobox', 'k.A.'),
|
company_data.get('full_infobox', 'k.A.'), # Spalte G: kompletter Infobox-Text
|
||||||
company_data['branche'] if company_data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
company_data['branche'] if company_data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
||||||
"k.A.",
|
"k.A.",
|
||||||
company_data['umsatz'] if company_data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
company_data['umsatz'] if company_data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
||||||
@@ -296,12 +293,16 @@ class DataProcessor:
|
|||||||
Config.VERSION
|
Config.VERSION
|
||||||
]
|
]
|
||||||
self.sheet_handler.update_row(row_num, new_values)
|
self.sheet_handler.update_row(row_num, new_values)
|
||||||
print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}")
|
print(f"✅ Aktualisiert: Branche: {new_values[1]}, Umsatz: {new_values[3]}, URL: {new_values[7]}")
|
||||||
time.sleep(Config.RETRY_DELAY)
|
time.sleep(Config.RETRY_DELAY)
|
||||||
|
|
||||||
# ==================== MAIN ====================
|
# ==================== MAIN ====================
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
num_rows = int(input("Wieviele Zeilen sollen überprüft werden? "))
|
try:
|
||||||
|
num_rows = int(input("Wieviele Zeilen sollen überprüft werden? "))
|
||||||
|
except Exception as e:
|
||||||
|
print("Ungültige Eingabe. Bitte eine Zahl eingeben.")
|
||||||
|
exit(1)
|
||||||
processor = DataProcessor()
|
processor = DataProcessor()
|
||||||
processor.process_rows(num_rows)
|
processor.process_rows(num_rows)
|
||||||
print("\n✅ Wikipedia-Auswertung abgeschlossen")
|
print("\n✅ Wikipedia-Auswertung abgeschlossen")
|
||||||
Reference in New Issue
Block a user