Wiki Such Optimierung
Optimierte Suchbegriffe: – Es werden nur der original Firmenname, seine ersten zwei Wörter und der Domain-Key (erstes Segment der URL) genutzt. – So werden irrelevante Begriffe wie „www“ vermieden. Validierung: – Vor Akzeptanz eines Artikels wird geprüft, ob der Domain-Key im HTML vorkommt und der Titel des Artikels eine ausreichende Ähnlichkeit zum Firmennamen aufweist. Struktur: – Der Code ist in einer neuen Datei namens anpassungen.py zusammengefasst und einsatzbereit.
This commit is contained in:
@@ -12,7 +12,7 @@ import csv
|
||||
|
||||
# ==================== KONFIGURATION ====================
|
||||
class Config:
|
||||
VERSION = "1.0.14"
|
||||
VERSION = "1.1.1"
|
||||
LANG = "de"
|
||||
CREDENTIALS_FILE = "service_account.json"
|
||||
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
|
||||
@@ -21,7 +21,7 @@ class Config:
|
||||
LOG_CSV = "gpt_antworten_log.csv"
|
||||
SIMILARITY_THRESHOLD = 0.65
|
||||
DEBUG = True
|
||||
WIKIPEDIA_SEARCH_RESULTS = 10
|
||||
WIKIPEDIA_SEARCH_RESULTS = 5
|
||||
HTML_PARSER = "html.parser"
|
||||
|
||||
# ==================== HELPER FUNCTIONS ====================
|
||||
@@ -73,8 +73,7 @@ class GoogleSheetHandler:
|
||||
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
|
||||
|
||||
def update_row(self, row_num, values):
|
||||
"""Aktualisiert eine Zeile im Sheet"""
|
||||
# Bereich G bis R umfasst 12 Spalten
|
||||
"""Aktualisiert eine Zeile im Sheet (Spalten G bis R, also 12 Spalten)"""
|
||||
self.sheet.update(range_name=f"G{row_num}:R{row_num}", values=[values])
|
||||
|
||||
# ==================== WIKIPEDIA SCRAPER ====================
|
||||
@@ -84,47 +83,53 @@ class WikipediaScraper:
|
||||
def __init__(self):
|
||||
wikipedia.set_lang(Config.LANG)
|
||||
|
||||
def _normalize_domain(self, website):
|
||||
"""Normalisiert URLs zu reinen Domainnamen ohne Protokoll und www"""
|
||||
def _get_domain_key(self, website):
|
||||
"""Extrahiert den Domain-Key aus der URL (erster Teil ohne Protokoll und www)"""
|
||||
if not website:
|
||||
return ""
|
||||
website = website.lower().strip()
|
||||
website = re.sub(r'^https?:\/\/', '', website) # Entferne http/https
|
||||
website = re.sub(r'^www\.', '', website) # Entferne führendes www.
|
||||
website = re.sub(r'\/.*$', '', website) # Entferne Pfad
|
||||
debug_print(f"Normalisierte Domain: {website}")
|
||||
website = re.sub(r'^https?:\/\/', '', website)
|
||||
website = re.sub(r'^www\.', '', website)
|
||||
parts = website.split(".")
|
||||
if len(parts) > 1:
|
||||
return parts[0]
|
||||
return website
|
||||
|
||||
def _generate_search_terms(self, company_name, website):
|
||||
"""Generiert Suchbegriffe, zuerst basierend auf der URL, dann Firmenname"""
|
||||
"""
|
||||
Generiert Suchbegriffe basierend auf:
|
||||
1. Dem Original-Firmennamen
|
||||
2. Den ersten zwei Wörtern des Firmennamens
|
||||
3. Dem Domain-Key der Website (sofern vorhanden)
|
||||
"""
|
||||
terms = []
|
||||
normalized_url = self._normalize_domain(website)
|
||||
if normalized_url:
|
||||
terms.append(normalized_url)
|
||||
clean_name = re.sub(r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$', '', company_name).strip()
|
||||
if clean_name and clean_name not in terms:
|
||||
terms.append(clean_name)
|
||||
if company_name.strip() and company_name.strip() not in terms:
|
||||
terms.append(company_name.strip())
|
||||
name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3]
|
||||
if len(name_parts) >= 2:
|
||||
candidate = " ".join(name_parts[:2])
|
||||
if candidate not in terms:
|
||||
terms.append(candidate)
|
||||
original_name = company_name.strip()
|
||||
candidate = " ".join(company_name.split()[:2])
|
||||
if original_name:
|
||||
terms.append(original_name)
|
||||
if candidate and candidate not in terms:
|
||||
terms.append(candidate)
|
||||
domain_key = self._get_domain_key(website)
|
||||
if domain_key and domain_key not in terms:
|
||||
terms.append(domain_key)
|
||||
debug_print(f"Generierte Suchbegriffe: {terms}")
|
||||
return terms
|
||||
|
||||
def _validate_article(self, page, company_name, domain_hint):
|
||||
"""Überprüft Artikelrelevanz basierend auf Ähnlichkeit und Domain-Hinweis"""
|
||||
def _validate_article(self, page, company_name, domain_key):
|
||||
"""
|
||||
Validiert den Artikel:
|
||||
- Prüft, ob der Domain-Key im HTML-Inhalt vorkommt (falls vorhanden)
|
||||
- Vergleicht den Wikipedia-Titel mit dem Firmennamen mittels Ähnlichkeitsvergleich
|
||||
"""
|
||||
clean_title = re.sub(r'\(.*?\)', '', page.title).lower()
|
||||
clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower())
|
||||
clean_company = company_name.lower().strip()
|
||||
similarity = SequenceMatcher(None, clean_title, clean_company).ratio()
|
||||
debug_print(f"Ähnlichkeit: {similarity:.2f} ({clean_title} vs {clean_company})")
|
||||
if domain_hint:
|
||||
if domain_key:
|
||||
try:
|
||||
html_content = requests.get(page.url).text.lower()
|
||||
if domain_hint not in html_content:
|
||||
debug_print(f"Domain-Hint '{domain_hint}' nicht gefunden")
|
||||
html_raw = requests.get(page.url).text.lower()
|
||||
if domain_key not in html_raw:
|
||||
debug_print(f"Domain-Hinweis '{domain_key}' nicht gefunden")
|
||||
return False
|
||||
except Exception as e:
|
||||
debug_print(f"Domain-Check fehlgeschlagen: {str(e)}")
|
||||
@@ -132,9 +137,9 @@ class WikipediaScraper:
|
||||
|
||||
@retry_on_failure
|
||||
def search_company_article(self, company_name, website):
|
||||
"""Sucht zuerst nach der URL, dann nach dem Firmennamen"""
|
||||
"""Sucht zuerst mit optimierten Suchbegriffen (Name, Candidate, Domain-Key) nach dem Artikel."""
|
||||
search_terms = self._generate_search_terms(company_name, website)
|
||||
domain_hint = self._normalize_domain(website)
|
||||
domain_key = self._get_domain_key(website)
|
||||
for term in search_terms:
|
||||
try:
|
||||
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
||||
@@ -142,7 +147,7 @@ class WikipediaScraper:
|
||||
for title in results:
|
||||
try:
|
||||
page = wikipedia.page(title, auto_suggest=False)
|
||||
if self._validate_article(page, company_name, domain_hint):
|
||||
if self._validate_article(page, company_name, domain_key):
|
||||
return page
|
||||
except (wikipedia.exceptions.DisambiguationError, wikipedia.exceptions.PageError) as e:
|
||||
debug_print(f"Seitenfehler: {str(e)}")
|
||||
@@ -153,7 +158,7 @@ class WikipediaScraper:
|
||||
return None
|
||||
|
||||
def _extract_infobox_value(self, soup, target):
|
||||
"""Extrahiert Werte aus der Infobox (Fallback)"""
|
||||
"""Extrahiert Werte aus der Infobox (Fallback-Methode)"""
|
||||
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
||||
if not infobox:
|
||||
return "k.A."
|
||||
@@ -203,7 +208,7 @@ class WikipediaScraper:
|
||||
return result
|
||||
|
||||
def extract_company_data(self, page_url):
|
||||
"""Extrahiert Daten aus dem Wikipedia-Artikel"""
|
||||
"""Extrahiert Daten aus dem Wikipedia-Artikel (Infobox, Branche, Umsatz)"""
|
||||
if not page_url:
|
||||
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': '', 'full_infobox': 'k.A.'}
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user