Chat GPT Bugfix für Deepseek
This commit is contained in:
@@ -89,9 +89,56 @@ class GoogleSheetHandler:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# ==================== WIKIPEDIA SCRAPER ====================
|
# ==================== WIKIPEDIA SCRAPER ====================
|
||||||
class WikipediaScraper:
|
class_=lambda c: c and any(
|
||||||
"""Klasse zur Handhabung der Wikipedia-Suche und Datenextraktion"""
|
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||||
|
))
|
||||||
|
|
||||||
|
if not infobox:
|
||||||
|
return "k.A."
|
||||||
|
|
||||||
|
keywords = {
|
||||||
|
'branche': [
|
||||||
|
'branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld',
|
||||||
|
'sektor', 'produkte', 'leistungen', 'geschäftsbereich'
|
||||||
|
],
|
||||||
|
'umsatz': [
|
||||||
|
'umsatz', 'jahresumsatz', 'umsatzerlöse', 'gesamtumsatz',
|
||||||
|
'konzernumsatz', 'umsatzentwicklung', 'ergebnis'
|
||||||
|
]
|
||||||
|
}.get(target, [])
|
||||||
|
|
||||||
|
# Durchsuche alle Zeilen und Zellen
|
||||||
|
value = "k.A."
|
||||||
|
for row in infobox.find_all('tr'):
|
||||||
|
# Erweiterte Header-Erkennung in th/td mit colspan
|
||||||
|
header_cells = row.find_all(['th', 'td'], attrs={'colspan': False})
|
||||||
|
for header in header_cells:
|
||||||
|
header_text = clean_text(header.get_text()).lower()
|
||||||
|
|
||||||
|
if any(kw in header_text for kw in keywords):
|
||||||
|
# Hole nächste Zelle, ignoriere verschachtelte Tabellen
|
||||||
|
value_cell = header.find_next_sibling(['td', 'th'])
|
||||||
|
if value_cell:
|
||||||
|
# Verarbeite Listen und mehrzeilige Inhalte
|
||||||
|
list_items = value_cell.find_all('li')
|
||||||
|
if list_items:
|
||||||
|
value = ', '.join(clean_text(li.get_text()) for li in list_items)
|
||||||
|
else:
|
||||||
|
value = clean_text(value_cell.get_text())
|
||||||
|
|
||||||
|
# Extrahiere numerische Umsatzwerte mit Regex
|
||||||
|
if target == 'umsatz':
|
||||||
|
match = re.search(
|
||||||
|
r'(\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*(?:€|Euro|EUR)',
|
||||||
|
value
|
||||||
|
)
|
||||||
|
if match:
|
||||||
|
value = match.group(1).replace('.', '').replace(',', '.')
|
||||||
|
return value
|
||||||
|
|
||||||
|
return "k.A."
|
||||||
|
|
||||||
|
class WikipediaScraper:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
wikipedia.set_lang(Config.LANG)
|
wikipedia.set_lang(Config.LANG)
|
||||||
|
|
||||||
@@ -152,6 +199,7 @@ class WikipediaScraper:
|
|||||||
return similarity >= Config.SIMILARITY_THRESHOLD
|
return similarity >= Config.SIMILARITY_THRESHOLD
|
||||||
|
|
||||||
@retry_on_failure
|
@retry_on_failure
|
||||||
|
|
||||||
def search_company_article(self, company_name, website_hint=""):
|
def search_company_article(self, company_name, website_hint=""):
|
||||||
"""Hauptfunktion zur Artikelsuche"""
|
"""Hauptfunktion zur Artikelsuche"""
|
||||||
search_terms = self._generate_search_terms(company_name, website_hint)
|
search_terms = self._generate_search_terms(company_name, website_hint)
|
||||||
@@ -185,59 +233,14 @@ class WikipediaScraper:
|
|||||||
'url': page_url
|
'url': page_url
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# ==================== WIKIPEDIA SCRAPER ====================
|
# ==================== WIKIPEDIA SCRAPER ====================
|
||||||
class WikipediaScraper:
|
class WikipediaScraper:
|
||||||
|
|
||||||
def _extract_infobox_value(self, soup, target):
|
def _extract_infobox_value(self, soup, target):
|
||||||
"""Extrahiert spezifischen Wert aus der Infobox mit erweiterten Suchmustern"""
|
"""Extrahiert spezifischen Wert aus der Infobox mit erweiterten Suchmustern"""
|
||||||
# Erweiterte Infobox-Erkennung
|
# Erweiterte Infobox-Erkennung
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(
|
infobox = soup.find('table',
|
||||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
|
||||||
))
|
|
||||||
|
|
||||||
if not infobox:
|
|
||||||
return "k.A."
|
|
||||||
|
|
||||||
keywords = {
|
|
||||||
'branche': [
|
|
||||||
'branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld',
|
|
||||||
'sektor', 'produkte', 'leistungen', 'geschäftsbereich'
|
|
||||||
],
|
|
||||||
'umsatz': [
|
|
||||||
'umsatz', 'jahresumsatz', 'umsatzerlöse', 'gesamtumsatz',
|
|
||||||
'konzernumsatz', 'umsatzentwicklung', 'ergebnis'
|
|
||||||
]
|
|
||||||
}.get(target, [])
|
|
||||||
|
|
||||||
# Durchsuche alle Zeilen und Zellen
|
|
||||||
value = "k.A."
|
|
||||||
for row in infobox.find_all('tr'):
|
|
||||||
# Erweiterte Header-Erkennung in th/td mit colspan
|
|
||||||
header_cells = row.find_all(['th', 'td'], attrs={'colspan': False})
|
|
||||||
for header in header_cells:
|
|
||||||
header_text = clean_text(header.get_text()).lower()
|
|
||||||
|
|
||||||
if any(kw in header_text for kw in keywords):
|
|
||||||
# Hole nächste Zelle, ignoriere verschachtelte Tabellen
|
|
||||||
value_cell = header.find_next_sibling(['td', 'th'])
|
|
||||||
if value_cell:
|
|
||||||
# Verarbeite Listen und mehrzeilige Inhalte
|
|
||||||
list_items = value_cell.find_all('li')
|
|
||||||
if list_items:
|
|
||||||
value = ', '.join(clean_text(li.get_text()) for li in list_items)
|
|
||||||
else:
|
|
||||||
value = clean_text(value_cell.get_text())
|
|
||||||
|
|
||||||
# Extrahiere numerische Umsatzwerte mit Regex
|
|
||||||
if target == 'umsatz':
|
|
||||||
match = re.search(
|
|
||||||
r'(\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*(?:€|Euro|EUR)',
|
|
||||||
value
|
|
||||||
)
|
|
||||||
if match:
|
|
||||||
value = match.group(1).replace('.', '').replace(',', '.')
|
|
||||||
return value
|
|
||||||
|
|
||||||
return "k.A."
|
|
||||||
|
|
||||||
# ==================== DATA PROCESSOR ====================
|
# ==================== DATA PROCESSOR ====================
|
||||||
class DataProcessor:
|
class DataProcessor:
|
||||||
|
|||||||
Reference in New Issue
Block a user