Deepseek bugfix v10
Wichtigste Änderungen: Hinzugefügte search_company_article-Methode extract_company_data-Methode ergänzt Konsistente Fehlerbehandlung Domain-Validierung in der Artikelsuche
This commit is contained in:
@@ -12,7 +12,7 @@ import csv
|
|||||||
|
|
||||||
# ==================== KONFIGURATION ====================
|
# ==================== KONFIGURATION ====================
|
||||||
class Config:
|
class Config:
|
||||||
VERSION = "1.0.13"
|
VERSION = "1.0.14"
|
||||||
LANG = "de"
|
LANG = "de"
|
||||||
CREDENTIALS_FILE = "service_account.json"
|
CREDENTIALS_FILE = "service_account.json"
|
||||||
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
|
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
|
||||||
@@ -97,11 +97,9 @@ class WikipediaScraper:
|
|||||||
if not website:
|
if not website:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
# Entferne Protokoll, Pfad und Query-Parameter
|
|
||||||
domain = re.sub(r'^https?:\/\/(www\.)?', '', website.lower())
|
domain = re.sub(r'^https?:\/\/(www\.)?', '', website.lower())
|
||||||
domain = re.sub(r'\/.*$', '', domain)
|
domain = re.sub(r'\/.*$', '', domain)
|
||||||
domain = domain.split('.')[0] # Nur den Subdomain-Teil
|
domain = domain.split('.')[0]
|
||||||
|
|
||||||
debug_print(f"Normalisierte Domain: {domain}")
|
debug_print(f"Normalisierte Domain: {domain}")
|
||||||
return domain
|
return domain
|
||||||
|
|
||||||
@@ -109,7 +107,6 @@ class WikipediaScraper:
|
|||||||
"""Generiert Suchbegriffe mit optimierter URL-Verarbeitung"""
|
"""Generiert Suchbegriffe mit optimierter URL-Verarbeitung"""
|
||||||
terms = []
|
terms = []
|
||||||
|
|
||||||
# 1. Originalname mit und ohne Rechtsform
|
|
||||||
clean_name = re.sub(
|
clean_name = re.sub(
|
||||||
r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$',
|
r'\s+(GmbH|AG|KG|Co\. KG|e\.V\.|mbH|& Co).*$',
|
||||||
'',
|
'',
|
||||||
@@ -117,12 +114,10 @@ class WikipediaScraper:
|
|||||||
).strip()
|
).strip()
|
||||||
terms.extend([company_name.strip(), clean_name])
|
terms.extend([company_name.strip(), clean_name])
|
||||||
|
|
||||||
# 2. Domain-Name aus URL
|
|
||||||
domain = self._normalize_domain(website)
|
domain = self._normalize_domain(website)
|
||||||
if domain and domain not in ["de", "com", "org"]:
|
if domain and domain not in ["de", "com", "org"]:
|
||||||
terms.append(domain)
|
terms.append(domain)
|
||||||
|
|
||||||
# 3. Erste zwei relevanten Wörter
|
|
||||||
name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3]
|
name_parts = [p for p in re.split(r'\W+', clean_name) if p and len(p) > 3]
|
||||||
if len(name_parts) >= 2:
|
if len(name_parts) >= 2:
|
||||||
terms.append(" ".join(name_parts[:2]))
|
terms.append(" ".join(name_parts[:2]))
|
||||||
@@ -130,8 +125,68 @@ class WikipediaScraper:
|
|||||||
debug_print(f"Generierte Suchbegriffe: {list(set(terms))}")
|
debug_print(f"Generierte Suchbegriffe: {list(set(terms))}")
|
||||||
return list(set(terms))
|
return list(set(terms))
|
||||||
|
|
||||||
|
def _validate_article(self, page, company_name, domain_hint):
|
||||||
|
"""Überprüft Artikelrelevanz"""
|
||||||
|
clean_title = re.sub(r'\(.*?\)', '', page.title).lower()
|
||||||
|
clean_company = re.sub(r'[^a-zäöüß ]', '', company_name.lower())
|
||||||
|
similarity = SequenceMatcher(None, clean_title, clean_company).ratio()
|
||||||
|
debug_print(f"Ähnlichkeit: {similarity:.2f} ({clean_title} vs {clean_company})")
|
||||||
|
|
||||||
|
if domain_hint:
|
||||||
|
try:
|
||||||
|
html_content = requests.get(page.url).text.lower()
|
||||||
|
if domain_hint not in html_content:
|
||||||
|
debug_print(f"Domain-Hint '{domain_hint}' nicht gefunden")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Domain-Check fehlgeschlagen: {str(e)}")
|
||||||
|
|
||||||
|
return similarity >= Config.SIMILARITY_THRESHOLD
|
||||||
|
|
||||||
|
@retry_on_failure
|
||||||
|
def search_company_article(self, company_name, website):
|
||||||
|
"""Hauptfunktion zur Artikelsuche"""
|
||||||
|
search_terms = self._generate_search_terms(company_name, website)
|
||||||
|
domain_hint = self._normalize_domain(website)
|
||||||
|
|
||||||
|
for term in search_terms:
|
||||||
|
try:
|
||||||
|
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
||||||
|
debug_print(f"Suchergebnisse für '{term}': {results}")
|
||||||
|
|
||||||
|
for title in results:
|
||||||
|
try:
|
||||||
|
page = wikipedia.page(title, auto_suggest=False)
|
||||||
|
if self._validate_article(page, company_name, domain_hint):
|
||||||
|
return page
|
||||||
|
except (wikipedia.exceptions.DisambiguationError,
|
||||||
|
wikipedia.exceptions.PageError) as e:
|
||||||
|
debug_print(f"Seitenfehler: {str(e)}")
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Suchfehler: {str(e)}")
|
||||||
|
continue
|
||||||
|
return None
|
||||||
|
|
||||||
|
def extract_company_data(self, page_url):
|
||||||
|
"""Extrahiert Daten aus dem Wikipedia-Artikel"""
|
||||||
|
if not page_url:
|
||||||
|
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(page_url)
|
||||||
|
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||||
|
return {
|
||||||
|
'branche': self._extract_infobox_value(soup, 'branche'),
|
||||||
|
'umsatz': self._extract_infobox_value(soup, 'umsatz'),
|
||||||
|
'url': page_url
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Extraktionsfehler: {str(e)}")
|
||||||
|
return {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': page_url}
|
||||||
|
|
||||||
def _extract_infobox_value(self, soup, target):
|
def _extract_infobox_value(self, soup, target):
|
||||||
"""Robuste Infobox-Extraktion mit erweiterten Mustern"""
|
"""Extrahiert Werte aus der Infobox"""
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(
|
infobox = soup.find('table', class_=lambda c: c and any(
|
||||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||||
))
|
))
|
||||||
@@ -139,23 +194,19 @@ class WikipediaScraper:
|
|||||||
if not infobox:
|
if not infobox:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
# Erweiterte Keywords für Deutsch
|
|
||||||
keywords = {
|
keywords = {
|
||||||
'branche': [
|
'branche': [
|
||||||
'branche', 'industrie', 'tätigkeit',
|
'branche', 'industrie', 'tätigkeit',
|
||||||
'geschäftsfeld', 'sektor', 'produkte',
|
'geschäftsfeld', 'sektor', 'produkte',
|
||||||
'leistungen', 'aktivitäten', 'wirtschaftszweig',
|
'leistungen', 'aktivitäten', 'wirtschaftszweig'
|
||||||
'geschäftsbereich', 'tätigkeitsbereich'
|
|
||||||
],
|
],
|
||||||
'umsatz': [
|
'umsatz': [
|
||||||
'umsatz', 'jahresumsatz', 'konzernumsatz',
|
'umsatz', 'jahresumsatz', 'konzernumsatz',
|
||||||
'gesamtumsatz', 'erlöse', 'umsatzerlöse',
|
'gesamtumsatz', 'erlöse', 'umsatzerlöse',
|
||||||
'einnahmen', 'ergebnis', 'betriebsergebnis',
|
'einnahmen', 'ergebnis', 'jahresergebnis'
|
||||||
'jahresergebnis', 'gewinn'
|
|
||||||
]
|
]
|
||||||
}[target]
|
}[target]
|
||||||
|
|
||||||
# Durchsuche alle Tabellenzellen
|
|
||||||
for row in infobox.find_all('tr'):
|
for row in infobox.find_all('tr'):
|
||||||
header = row.find('th')
|
header = row.find('th')
|
||||||
if header:
|
if header:
|
||||||
@@ -163,33 +214,27 @@ class WikipediaScraper:
|
|||||||
|
|
||||||
if any(kw in header_text for kw in keywords):
|
if any(kw in header_text for kw in keywords):
|
||||||
value = row.find('td')
|
value = row.find('td')
|
||||||
if not value:
|
if value:
|
||||||
continue
|
raw_value = clean_text(value.get_text())
|
||||||
|
|
||||||
raw_value = clean_text(value.get_text())
|
if target == 'branche':
|
||||||
|
clean = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
||||||
|
return ' '.join(clean.split()).strip()
|
||||||
|
|
||||||
# Branchenbereinigung
|
if target == 'umsatz':
|
||||||
if target == 'branche':
|
match = re.search(
|
||||||
# Entferne Klammern und Sonderzeichen
|
r'(\d{1,3}(?:[.,]\d{3})*)\s*'
|
||||||
clean = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
|
r'(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*'
|
||||||
return ' '.join(clean.split()).strip()
|
r'€?',
|
||||||
|
raw_value.replace('.', '').replace(',', '.'),
|
||||||
# Umsatzbereinigung
|
re.IGNORECASE
|
||||||
if target == 'umsatz':
|
)
|
||||||
# Finde numerische Werte mit optionaler Einheit
|
if match:
|
||||||
match = re.search(
|
num = float(match.group(1))
|
||||||
r'(\d{1,3}(?:[.,]\d{3})*)\s*'
|
if 'mrd' in raw_value.lower():
|
||||||
r'(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*'
|
num *= 1000
|
||||||
r'€?',
|
return f"{num:.1f} Mio €"
|
||||||
raw_value.replace('.', '').replace(',', '.'),
|
return raw_value.strip()
|
||||||
re.IGNORECASE
|
|
||||||
)
|
|
||||||
if match:
|
|
||||||
num = float(match.group(1))
|
|
||||||
if 'mrd' in raw_value.lower() or 'milliarden' in raw_value.lower():
|
|
||||||
num *= 1000
|
|
||||||
return f"{num:.1f} Mio €"
|
|
||||||
return raw_value.strip()
|
|
||||||
|
|
||||||
return "k.A."
|
return "k.A."
|
||||||
|
|
||||||
@@ -216,35 +261,27 @@ class DataProcessor:
|
|||||||
website = row_data[1] if len(row_data) > 1 else ""
|
website = row_data[1] if len(row_data) > 1 else ""
|
||||||
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
||||||
|
|
||||||
# Wikipedia-Suche
|
|
||||||
article = self.wiki_scraper.search_company_article(company_name, website)
|
article = self.wiki_scraper.search_company_article(company_name, website)
|
||||||
|
|
||||||
# Datenextraktion
|
if article:
|
||||||
company_data = self.wiki_scraper.extract_company_data(article.url) if article else {
|
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||||
'branche': 'k.A.',
|
else:
|
||||||
'umsatz': 'k.A.',
|
company_data = {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''}
|
||||||
'url': row_data[12] if len(row_data) > 12 else ""
|
|
||||||
}
|
|
||||||
|
|
||||||
# Sheet-Update
|
|
||||||
self._update_sheet(row_num, company_data)
|
|
||||||
time.sleep(Config.RETRY_DELAY)
|
|
||||||
|
|
||||||
def _update_sheet(self, row_num, data):
|
|
||||||
"""Aktualisiert die Zeilendaten"""
|
|
||||||
current_values = self.sheet_handler.sheet.row_values(row_num)
|
current_values = self.sheet_handler.sheet.row_values(row_num)
|
||||||
new_values = [
|
new_values = [
|
||||||
data['branche'] or (current_values[6] if len(current_values) > 6 else "k.A."),
|
company_data['branche'] if company_data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
||||||
"k.A.", # LinkedIn-Branche
|
"k.A.",
|
||||||
data['umsatz'] or (current_values[8] if len(current_values) > 8 else "k.A."),
|
company_data['umsatz'] if company_data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
||||||
"k.A.", "k.A.", "k.A.",
|
"k.A.", "k.A.", "k.A.",
|
||||||
data['url'] or (current_values[12] if len(current_values) > 12 else ""),
|
company_data['url'] if company_data['url'] else current_values[12] if len(current_values) > 12 else "",
|
||||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
"k.A.", "k.A.",
|
"k.A.", "k.A.",
|
||||||
Config.VERSION
|
Config.VERSION
|
||||||
]
|
]
|
||||||
self.sheet_handler.update_row(row_num, new_values)
|
self.sheet_handler.update_row(row_num, new_values)
|
||||||
print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}")
|
print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}")
|
||||||
|
time.sleep(Config.RETRY_DELAY)
|
||||||
|
|
||||||
# ==================== MAIN ====================
|
# ==================== MAIN ====================
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
Reference in New Issue
Block a user