Optimierung der Wikipedia-Auswertung: Neue Spaltenreihenfolge und erweiterte Extraktion (v1.1.3)
Spaltenreihenfolge angepasst: G: Wikipedia URL H: Erster Absatz des Wikipedia-Artikels I: Branche (aus Infobox) J: Umsatz (als Zahl in Mio €, z. B. "159") K: Anzahl Mitarbeiter (aus Infobox) Entfernung der kompletten Infobox-Ausgabe. Extraktion des ersten Absatzes: Neue Methode extract_first_paragraph liefert den ersten sinnvollen Absatz. Erweiterung der Infobox-Extraktion: Jetzt werden zusätzlich "Mitarbeiter" aus der Infobox extrahiert. Umsatz-Formatierung: Umsatz wird als reine Zahl (in Mio €) ausgegeben. Google Sheet Update: Aktualisierung erfolgt nun auf Spalte G bis K (5 Spalten).
This commit is contained in:
@@ -55,7 +55,7 @@ def normalize_company_name(name):
|
|||||||
"""Entfernt gängige Firmierungsformen und normalisiert den Namen."""
|
"""Entfernt gängige Firmierungsformen und normalisiert den Namen."""
|
||||||
if not name:
|
if not name:
|
||||||
return ""
|
return ""
|
||||||
# Liste gängiger Firmierungsformen, inklusive "Gruppe"
|
# Liste gängiger Firmierungsformen (inklusive "Gruppe")
|
||||||
pattern = r'\b(gmbh|ag|aktiengesellschaft|co\.?\s*kg|mbh|&\s*co\.?\s*kg|e\.v\.|limited|ltd|inc|corp|corporation|gruppe)\b'
|
pattern = r'\b(gmbh|ag|aktiengesellschaft|co\.?\s*kg|mbh|&\s*co\.?\s*kg|e\.v\.|limited|ltd|inc|corp|corporation|gruppe)\b'
|
||||||
normalized = re.sub(pattern, '', name, flags=re.IGNORECASE)
|
normalized = re.sub(pattern, '', name, flags=re.IGNORECASE)
|
||||||
normalized = re.sub(r'[\-–]', ' ', normalized) # Ersetze Bindestriche durch Leerzeichen
|
normalized = re.sub(r'[\-–]', ' ', normalized) # Ersetze Bindestriche durch Leerzeichen
|
||||||
@@ -84,8 +84,8 @@ class GoogleSheetHandler:
|
|||||||
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
|
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
|
||||||
|
|
||||||
def update_row(self, row_num, values):
|
def update_row(self, row_num, values):
|
||||||
"""Aktualisiert eine Zeile im Sheet (Spalten G bis R, also 12 Spalten)"""
|
"""Aktualisiert eine Zeile im Sheet (Spalten G bis K, also 5 Spalten)"""
|
||||||
self.sheet.update(range_name=f"G{row_num}:R{row_num}", values=[values])
|
self.sheet.update(range_name=f"G{row_num}:K{row_num}", values=[values])
|
||||||
|
|
||||||
# ==================== WIKIPEDIA SCRAPER ====================
|
# ==================== WIKIPEDIA SCRAPER ====================
|
||||||
class WikipediaScraper:
|
class WikipediaScraper:
|
||||||
@@ -143,25 +143,22 @@ class WikipediaScraper:
|
|||||||
links = infobox.find_all('a', href=True)
|
links = infobox.find_all('a', href=True)
|
||||||
for link in links:
|
for link in links:
|
||||||
href = link.get('href').lower()
|
href = link.get('href').lower()
|
||||||
# Überspringe Dateilinks
|
|
||||||
if href.startswith('/wiki/datei:'):
|
if href.startswith('/wiki/datei:'):
|
||||||
continue
|
continue
|
||||||
if full_domain in href:
|
if full_domain in href:
|
||||||
debug_print(f"Definitiver Link-Match in Infobox gefunden: {href}")
|
debug_print(f"Definitiver Link-Match in Infobox gefunden: {href}")
|
||||||
domain_found = True
|
domain_found = True
|
||||||
break
|
break
|
||||||
# Suche in den externen Links, falls vorhanden
|
# Suche in externen Links, falls vorhanden
|
||||||
if not domain_found and hasattr(page, 'externallinks'):
|
if not domain_found and hasattr(page, 'externallinks'):
|
||||||
for ext_link in page.externallinks:
|
for ext_link in page.externallinks:
|
||||||
ext_link = ext_link.lower()
|
if full_domain in ext_link.lower():
|
||||||
if full_domain in ext_link:
|
|
||||||
debug_print(f"Definitiver Link-Match in externen Links gefunden: {ext_link}")
|
debug_print(f"Definitiver Link-Match in externen Links gefunden: {ext_link}")
|
||||||
domain_found = True
|
domain_found = True
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
debug_print(f"Fehler beim Extrahieren von Links: {str(e)}")
|
debug_print(f"Fehler beim Extrahieren von Links: {str(e)}")
|
||||||
|
|
||||||
# Normalisierte Namen für Vergleich
|
|
||||||
normalized_title = normalize_company_name(page.title)
|
normalized_title = normalize_company_name(page.title)
|
||||||
normalized_company = normalize_company_name(company_name)
|
normalized_company = normalize_company_name(company_name)
|
||||||
similarity = SequenceMatcher(None, normalized_title, normalized_company).ratio()
|
similarity = SequenceMatcher(None, normalized_title, normalized_company).ratio()
|
||||||
@@ -170,6 +167,101 @@ class WikipediaScraper:
|
|||||||
threshold = 0.60 if domain_found else Config.SIMILARITY_THRESHOLD
|
threshold = 0.60 if domain_found else Config.SIMILARITY_THRESHOLD
|
||||||
return similarity >= threshold
|
return similarity >= threshold
|
||||||
|
|
||||||
|
def extract_first_paragraph(self, page_url):
|
||||||
|
"""Extrahiert den ersten sinnvollen Absatz aus dem Wikipedia-Artikel."""
|
||||||
|
try:
|
||||||
|
response = requests.get(page_url)
|
||||||
|
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||||
|
paragraphs = soup.find_all('p')
|
||||||
|
for p in paragraphs:
|
||||||
|
text = clean_text(p.get_text())
|
||||||
|
if len(text) > 50:
|
||||||
|
return text
|
||||||
|
return "k.A."
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Fehler beim Extrahieren des ersten Absatzes: {e}")
|
||||||
|
return "k.A."
|
||||||
|
|
||||||
|
def _extract_infobox_value(self, soup, target):
|
||||||
|
"""Extrahiert Werte aus der Infobox (Fallback-Methode) für 'branche', 'umsatz' und 'mitarbeiter'."""
|
||||||
|
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
||||||
|
if not infobox:
|
||||||
|
return "k.A."
|
||||||
|
keywords_map = {
|
||||||
|
'branche': ['branche', 'industrie', 'tätigkeit', 'geschäftsfeld', 'sektor', 'produkte', 'leistungen', 'aktivitäten', 'wirtschaftszweig'],
|
||||||
|
'umsatz': ['umsatz', 'jahresumsatz', 'konzernumsatz', 'gesamtumsatz', 'erlöse', 'umsatzerlöse', 'einnahmen', 'ergebnis', 'jahresergebnis'],
|
||||||
|
'mitarbeiter': ['mitarbeiter', 'beschäftigte', 'personal', 'anzahl mitarbeiter']
|
||||||
|
}
|
||||||
|
keywords = keywords_map.get(target, [])
|
||||||
|
for row in infobox.find_all('tr'):
|
||||||
|
header = row.find('th')
|
||||||
|
if header:
|
||||||
|
header_text = clean_text(header.get_text()).lower()
|
||||||
|
if any(kw in header_text for kw in keywords):
|
||||||
|
value = row.find('td')
|
||||||
|
if value:
|
||||||
|
raw_value = clean_text(value.get_text())
|
||||||
|
if target == 'branche':
|
||||||
|
clean_val = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
||||||
|
return ' '.join(clean_val.split()).strip()
|
||||||
|
if target == 'umsatz':
|
||||||
|
match = re.search(r'(\d{1,3}(?:[.,]\d{3})*|\d+)', raw_value.replace('.', '').replace(',', '.'))
|
||||||
|
if match:
|
||||||
|
num = float(match.group(1))
|
||||||
|
# Umsatz in Mio € als Zahl (aufgerundet)
|
||||||
|
return str(int(round(num)))
|
||||||
|
return raw_value.strip()
|
||||||
|
if target == 'mitarbeiter':
|
||||||
|
match = re.search(r'(\d{1,3}(?:[.,]\d{3})*|\d+)', raw_value.replace('.', '').replace(',', '.'))
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
return raw_value.strip()
|
||||||
|
return "k.A."
|
||||||
|
|
||||||
|
def extract_full_infobox(self, soup):
|
||||||
|
"""Extrahiert die komplette Infobox als Text (nicht mehr ausgegeben)"""
|
||||||
|
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
||||||
|
if not infobox:
|
||||||
|
return "k.A."
|
||||||
|
return clean_text(infobox.get_text(separator=' | '))
|
||||||
|
|
||||||
|
def extract_fields_from_infobox_text(self, infobox_text, field_names):
|
||||||
|
"""Extrahiert die gewünschten Felder aus dem Infobox-Text (getrennt durch ' | ')"""
|
||||||
|
result = {}
|
||||||
|
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
|
||||||
|
for i, token in enumerate(tokens):
|
||||||
|
for field in field_names:
|
||||||
|
if token.lower() == field.lower():
|
||||||
|
j = i + 1
|
||||||
|
while j < len(tokens) and not tokens[j]:
|
||||||
|
j += 1
|
||||||
|
result[field] = tokens[j] if j < len(tokens) else "k.A."
|
||||||
|
return result
|
||||||
|
|
||||||
|
def extract_company_data(self, page_url):
|
||||||
|
"""Extrahiert Daten aus dem Wikipedia-Artikel (Erster Absatz, Branche, Umsatz, Mitarbeiter)"""
|
||||||
|
if not page_url:
|
||||||
|
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
|
try:
|
||||||
|
response = requests.get(page_url)
|
||||||
|
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||||
|
full_infobox = self.extract_full_infobox(soup)
|
||||||
|
extracted_fields = self.extract_fields_from_infobox_text(full_infobox, ['Branche', 'Umsatz', 'Mitarbeiter'])
|
||||||
|
branche_val = extracted_fields.get('Branche', self._extract_infobox_value(soup, 'branche'))
|
||||||
|
umsatz_val = extracted_fields.get('Umsatz', self._extract_infobox_value(soup, 'umsatz'))
|
||||||
|
mitarbeiter_val = extracted_fields.get('Mitarbeiter', self._extract_infobox_value(soup, 'mitarbeiter'))
|
||||||
|
first_paragraph = self.extract_first_paragraph(page_url)
|
||||||
|
return {
|
||||||
|
'url': page_url,
|
||||||
|
'first_paragraph': first_paragraph,
|
||||||
|
'branche': branche_val,
|
||||||
|
'umsatz': umsatz_val,
|
||||||
|
'mitarbeiter': mitarbeiter_val
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Extraktionsfehler: {str(e)}")
|
||||||
|
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
|
|
||||||
@retry_on_failure
|
@retry_on_failure
|
||||||
def search_company_article(self, company_name, website):
|
def search_company_article(self, company_name, website):
|
||||||
"""Sucht mit optimierten Suchbegriffen (vollständiger Domainname, Candidate, normalisierter Name) nach dem Wikipedia-Artikel."""
|
"""Sucht mit optimierten Suchbegriffen (vollständiger Domainname, Candidate, normalisierter Name) nach dem Wikipedia-Artikel."""
|
||||||
@@ -191,72 +283,6 @@ class WikipediaScraper:
|
|||||||
continue
|
continue
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _extract_infobox_value(self, soup, target):
|
|
||||||
"""Extrahiert Werte aus der Infobox (Fallback-Methode)"""
|
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
|
||||||
if not infobox:
|
|
||||||
return "k.A."
|
|
||||||
keywords = {
|
|
||||||
'branche': ['branche', 'industrie', 'tätigkeit', 'geschäftsfeld', 'sektor', 'produkte', 'leistungen', 'aktivitäten', 'wirtschaftszweig'],
|
|
||||||
'umsatz': ['umsatz', 'jahresumsatz', 'konzernumsatz', 'gesamtumsatz', 'erlöse', 'umsatzerlöse', 'einnahmen', 'ergebnis', 'jahresergebnis']
|
|
||||||
}[target]
|
|
||||||
for row in infobox.find_all('tr'):
|
|
||||||
header = row.find('th')
|
|
||||||
if header:
|
|
||||||
header_text = clean_text(header.get_text()).lower()
|
|
||||||
if any(kw in header_text for kw in keywords):
|
|
||||||
value = row.find('td')
|
|
||||||
if value:
|
|
||||||
raw_value = clean_text(value.get_text())
|
|
||||||
if target == 'branche':
|
|
||||||
clean_val = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
|
||||||
return ' '.join(clean_val.split()).strip()
|
|
||||||
if target == 'umsatz':
|
|
||||||
match = re.search(r'(\d{1,3}(?:[.,]\d{3})*)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*€?', raw_value.replace('.', '').replace(',', '.'), re.IGNORECASE)
|
|
||||||
if match:
|
|
||||||
num = float(match.group(1))
|
|
||||||
if 'mrd' in raw_value.lower():
|
|
||||||
num *= 1000
|
|
||||||
return f"{num:.1f} Mio €"
|
|
||||||
return raw_value.strip()
|
|
||||||
return "k.A."
|
|
||||||
|
|
||||||
def extract_full_infobox(self, soup):
|
|
||||||
"""Extrahiert die komplette Infobox als Text"""
|
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
|
||||||
if not infobox:
|
|
||||||
return "k.A."
|
|
||||||
return clean_text(infobox.get_text(separator=' | '))
|
|
||||||
|
|
||||||
def extract_fields_from_infobox_text(self, infobox_text, field_names):
|
|
||||||
"""Extrahiert die gewünschten Felder aus dem Infobox-Text (getrennt durch ' | ')"""
|
|
||||||
result = {}
|
|
||||||
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
|
|
||||||
for i, token in enumerate(tokens):
|
|
||||||
for field in field_names:
|
|
||||||
if token.lower() == field.lower():
|
|
||||||
j = i + 1
|
|
||||||
while j < len(tokens) and not tokens[j]:
|
|
||||||
j += 1
|
|
||||||
result[field] = tokens[j] if j < len(tokens) else "k.A."
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_company_data(self, page_url):
|
|
||||||
"""Extrahiert Daten aus dem Wikipedia-Artikel (Infobox, Branche, Umsatz)"""
|
|
||||||
if not page_url:
|
|
||||||
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': '', 'full_infobox': 'k.A.'}
|
|
||||||
try:
|
|
||||||
response = requests.get(page_url)
|
|
||||||
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
|
||||||
full_infobox = self.extract_full_infobox(soup)
|
|
||||||
extracted_fields = self.extract_fields_from_infobox_text(full_infobox, ['Branche', 'Umsatz'])
|
|
||||||
branche_val = extracted_fields.get('Branche', self._extract_infobox_value(soup, 'branche'))
|
|
||||||
umsatz_val = extracted_fields.get('Umsatz', self._extract_infobox_value(soup, 'umsatz'))
|
|
||||||
return {'full_infobox': full_infobox, 'branche': branche_val, 'umsatz': umsatz_val, 'url': page_url}
|
|
||||||
except Exception as e:
|
|
||||||
debug_print(f"Extraktionsfehler: {str(e)}")
|
|
||||||
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url, 'full_infobox': 'k.A.'}
|
|
||||||
|
|
||||||
# ==================== DATA PROCESSOR ====================
|
# ==================== DATA PROCESSOR ====================
|
||||||
class DataProcessor:
|
class DataProcessor:
|
||||||
"""Steuerung des Gesamtprozesses"""
|
"""Steuerung des Gesamtprozesses"""
|
||||||
@@ -282,22 +308,18 @@ class DataProcessor:
|
|||||||
if article:
|
if article:
|
||||||
company_data = self.wiki_scraper.extract_company_data(article.url)
|
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||||
else:
|
else:
|
||||||
company_data = {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': '', 'full_infobox': 'k.A.'}
|
company_data = {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
|
|
||||||
current_values = self.sheet_handler.sheet.row_values(row_num)
|
current_values = self.sheet_handler.sheet.row_values(row_num)
|
||||||
new_values = [
|
new_values = [
|
||||||
company_data.get('full_infobox', 'k.A.'), # Spalte G: kompletter Infobox-Text
|
company_data.get('url', 'k.A.'), # Spalte G: Wikipedia URL
|
||||||
company_data['branche'] if company_data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
company_data.get('first_paragraph', 'k.A.'), # Spalte H: Erster Absatz
|
||||||
"k.A.",
|
company_data.get('branche', 'k.A.'), # Spalte I: Branche
|
||||||
company_data['umsatz'] if company_data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
company_data.get('umsatz', 'k.A.'), # Spalte J: Umsatz (als Zahl in Mio €)
|
||||||
"k.A.", "k.A.", "k.A.",
|
company_data.get('mitarbeiter', 'k.A.') # Spalte K: Anzahl Mitarbeiter
|
||||||
company_data['url'] if company_data['url'] else current_values[12] if len(current_values) > 12 else "",
|
|
||||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
||||||
"k.A.", "k.A.",
|
|
||||||
Config.VERSION
|
|
||||||
]
|
]
|
||||||
self.sheet_handler.update_row(row_num, new_values)
|
self.sheet_handler.update_row(row_num, new_values)
|
||||||
print(f"✅ Aktualisiert: Branche: {new_values[1]}, Umsatz: {new_values[3]}, URL: {new_values[7]}")
|
print(f"✅ Aktualisiert: URL: {new_values[0]}, Erster Absatz: {new_values[1][:30]}..., Branche: {new_values[2]}, Umsatz: {new_values[3]}, Mitarbeiter: {new_values[4]}")
|
||||||
time.sleep(Config.RETRY_DELAY)
|
time.sleep(Config.RETRY_DELAY)
|
||||||
|
|
||||||
# ==================== MAIN ====================
|
# ==================== MAIN ====================
|
||||||
|
|||||||
Reference in New Issue
Block a user