feat(wikipedia): Refactored Wikipedia extraction logic in v1.0.9-wiki-refined

- Stellt wieder bewährte BeautifulSoup-basierte Infobox-Suche her
- Verbesserte Titelvalidierung durch Namenssplit und Substring-Matching
- Umsatz-Parsing via Regex auf deutsche Formatierung optimiert
- Fallback auf Kategorien nur bei fehlender Infobox
- Versionierung (z. B. 1.0.9-wiki-refined) wird nun in Spalte Q mitgeschrieben
This commit is contained in:
2025-03-31 07:57:40 +00:00
parent 16f5f77967
commit 42316cd66a

View File

@@ -15,7 +15,7 @@ from difflib import SequenceMatcher
from lxml import html as lh from lxml import html as lh
# === KONFIGURATION === # === KONFIGURATION ===
VERSION = "1.0.8-wiki-api" VERSION = "1.0.9-wiki-refined"
LANG = "de" LANG = "de"
CREDENTIALS = "service_account.json" CREDENTIALS = "service_account.json"
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo" SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
@@ -49,93 +49,60 @@ def extract_domain_key(url):
parts = clean_url.split(".") parts = clean_url.split(".")
return parts[0] if len(parts) > 1 else "" return parts[0] if len(parts) > 1 else ""
# === INFOBOX-PARSING MIT XPATH === # === ÄHNLICHKEITSPRÜFUNG ===
def parse_infobox_xpath(html_text):
doc = lh.fromstring(html_text)
branche = "k.A."
umsatz = "k.A."
try:
branche_xpath = doc.xpath("//table[contains(@class, 'infobox')]//tr[th[contains(normalize-space(), 'Branche') or contains(normalize-space(), 'Tätigkeitsfeld')]]/td/text()")
umsatz_xpath = doc.xpath("//table[contains(@class, 'infobox')]//tr[th[contains(translate(normalize-space(), 'UMSATZ', 'umsatz'), 'umsatz')]]/td/text()")
if branche_xpath:
branche = branche_xpath[0].strip()
if umsatz_xpath:
umsatz_raw = umsatz_xpath[0].strip()
if "mio" in umsatz_raw.lower() or "millionen" in umsatz_raw.lower():
match = re.search(r"(\d+[.,]?\d*)", umsatz_raw)
if match:
umsatz = match.group(1).replace(",", ".")
except:
pass
return branche, umsatz
# === WIKIPEDIA DATEN ===
WHITELIST_KATEGORIEN = [
"unternehmen", "hersteller", "produktion", "industrie",
"maschinenbau", "technik", "dienstleistung", "chemie",
"pharma", "elektro", "medizin", "bau", "energie",
"logistik", "automobil"
]
def similarity(a, b): def similarity(a, b):
return SequenceMatcher(None, a.lower(), b.lower()).ratio() return SequenceMatcher(None, a.lower(), b.lower()).ratio()
def validate_wikipedia_page(content, title, name, domain_key): # === WIKIPEDIA DATEN LADEN ===
name_fragments = name.lower().split()[:2]
title_check = any(frag in title.lower() for frag in name_fragments)
content_check = any(frag in content.lower() for frag in name_fragments)
domain_check = domain_key and domain_key.lower() in content.lower()
sim_check = similarity(name, title) > 0.5
return (title_check or content_check or domain_check or sim_check)
def get_wikipedia_data(name, website_hint=""): def get_wikipedia_data(name, website_hint=""):
begriffe = [name.strip(), " ".join(name.split()[:2])] begriffe = [name.strip(), " ".join(name.split()[:2])]
domain_key = extract_domain_key(website_hint) if website_hint:
if domain_key: parts = website_hint.replace("https://", "").replace("http://", "").split(".")
begriffe.append(domain_key) if len(parts) > 1:
begriffe.append(parts[0])
best_score = 0
best_result = ("", "k.A.", "k.A.")
for suchbegriff in begriffe: for suchbegriff in begriffe:
if not suchbegriff: results = wikipedia.search(suchbegriff, results=3)
continue for title in results:
for attempt in range(MAX_RETRIES):
try: try:
results = wikipedia.search(suchbegriff, results=5) page = wikipedia.page(title)
for title in results: if name.lower().split()[0] not in page.title.lower():
try: continue
page = wikipedia.page(title, auto_suggest=False) url = page.url
html_text = requests.get(page.url, timeout=10).text html_content = requests.get(url, timeout=10).text
if not validate_wikipedia_page(page.content, title, name, domain_key): soup = BeautifulSoup(html_content, 'html.parser')
infobox = soup.find("table", {"class": "infobox"})
branche = umsatz = ""
if infobox:
for row in infobox.find_all("tr"):
th, td = row.find("th"), row.find("td")
if not th or not td:
continue continue
branche, umsatz = parse_infobox_xpath(html_text) if "Branche" in th.text:
score = similarity(name, title) branche = td.text.strip()
if branche != "k.A.": if "Umsatz" in th.text:
score += 0.1 umsatz_raw = td.text.strip()
if domain_key and domain_key in page.content.lower(): match = re.search(r"(\d+[.,]?\d*)", umsatz_raw)
score += 0.1 if match:
if score > best_score: umsatz = match.group(1).replace(",", ".")
best_score = score if not branche:
best_result = (page.url, branche or "k.A.", umsatz or "k.A.") cats = page.categories
except: branche = cats[0] if cats else "k.A."
continue return url, branche or "k.A.", umsatz or "k.A."
except Exception as e: except:
print(f"⚠️ Wikipedia-Fehler ({suchbegriff}, Versuch {attempt+1}): {str(e)[:100]}") continue
time.sleep(RETRY_DELAY) return "", "k.A.", "k.A."
return best_result # === VERARBEITUNG ===
# === SCHRITT 1: WIKIPEDIA VERARBEITUNG ===
for i in range(start, min(start + DURCHLÄUFE, len(sheet_values))): for i in range(start, min(start + DURCHLÄUFE, len(sheet_values))):
row = sheet_values[i] row = sheet_values[i]
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {i+1}: {row[0]}") print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {i+1}: {row[0]}")
url, wiki_branche, umsatz = get_wikipedia_data(row[0], row[1]) url, branche, umsatz = get_wikipedia_data(row[0], row[1])
wiki_final = wiki_branche if url else "k.A." branche_final = branche if url else "k.A."
umsatz_final = umsatz if url else "k.A." umsatz_final = umsatz if url else "k.A."
values = [ values = [
wiki_final, branche_final,
"k.A.", # LinkedIn-Branche leer "k.A.",
umsatz_final, umsatz_final,
"k.A.", "k.A.", "k.A.", "k.A.", "k.A.", "k.A.",
url, url,