Erweiterung der Re-Evaluierung mittels Flag und Optimierung der Umsatz- und Mitarbeiterextraktion (v
Flag-Spalte A: Nur Zeilen mit einem "x" in Spalte A werden verarbeitet. Verschiebung der Spaltenzuordnungen: Firmenname in Spalte B, Website in Spalte C. Ausgabe erfolgt in den Spalten H bis L (H: Wikipedia URL, I: erster Absatz, J: Branche, K: Umsatz in Mio €, L: Mitarbeiterzahl). Datum und Uhrzeit in Spalte O, Version in Spalte R. Umsatz-Extraktion: Erweiterte Regex-Logik zur Erkennung von Tausendertrennzeichen und zur Umrechnung in Mio €. Mitarbeiterextraktion: Umstellung auf re.findall, um robust das erste Zahlenfragment zu erfassen. Weitere Anpassungen: Deprecation-Warnings bei den Update-Aufrufen wurden behoben (mittels benannter Argumente).
This commit is contained in:
@@ -12,7 +12,7 @@ import csv
|
|||||||
|
|
||||||
# ==================== KONFIGURATION ====================
|
# ==================== KONFIGURATION ====================
|
||||||
class Config:
|
class Config:
|
||||||
VERSION = "1.1.5"
|
VERSION = "1.1.6" # Neue Version
|
||||||
LANG = "de"
|
LANG = "de"
|
||||||
CREDENTIALS_FILE = "service_account.json"
|
CREDENTIALS_FILE = "service_account.json"
|
||||||
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
|
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
|
||||||
@@ -26,7 +26,6 @@ class Config:
|
|||||||
|
|
||||||
# ==================== HELPER FUNCTIONS ====================
|
# ==================== HELPER FUNCTIONS ====================
|
||||||
def retry_on_failure(func):
|
def retry_on_failure(func):
|
||||||
"""Decorator für Wiederholungsversuche bei Fehlern"""
|
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
for attempt in range(Config.MAX_RETRIES):
|
for attempt in range(Config.MAX_RETRIES):
|
||||||
try:
|
try:
|
||||||
@@ -38,12 +37,10 @@ def retry_on_failure(func):
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
def debug_print(message):
|
def debug_print(message):
|
||||||
"""Debug-Ausgabe, wenn Config.DEBUG=True"""
|
|
||||||
if Config.DEBUG:
|
if Config.DEBUG:
|
||||||
print(f"[DEBUG] {message}")
|
print(f"[DEBUG] {message}")
|
||||||
|
|
||||||
def clean_text(text):
|
def clean_text(text):
|
||||||
"""Bereinigt Text von unerwünschten Zeichen"""
|
|
||||||
if not text:
|
if not text:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
text = str(text)
|
text = str(text)
|
||||||
@@ -52,7 +49,6 @@ def clean_text(text):
|
|||||||
return text if text else "k.A."
|
return text if text else "k.A."
|
||||||
|
|
||||||
def normalize_company_name(name):
|
def normalize_company_name(name):
|
||||||
"""Entfernt gängige Firmierungsformen und normalisiert den Namen."""
|
|
||||||
if not name:
|
if not name:
|
||||||
return ""
|
return ""
|
||||||
forms = [
|
forms = [
|
||||||
@@ -75,44 +71,32 @@ def normalize_company_name(name):
|
|||||||
|
|
||||||
# ==================== GOOGLE SHEET HANDLER ====================
|
# ==================== GOOGLE SHEET HANDLER ====================
|
||||||
class GoogleSheetHandler:
|
class GoogleSheetHandler:
|
||||||
"""Handhabung der Google Sheets Interaktion"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.sheet = None
|
self.sheet = None
|
||||||
self.sheet_values = []
|
self.sheet_values = []
|
||||||
self._connect()
|
self._connect()
|
||||||
def _connect(self):
|
def _connect(self):
|
||||||
"""Stellt Verbindung zum Google Sheet her"""
|
|
||||||
scope = ["https://www.googleapis.com/auth/spreadsheets"]
|
scope = ["https://www.googleapis.com/auth/spreadsheets"]
|
||||||
creds = ServiceAccountCredentials.from_json_keyfile_name(Config.CREDENTIALS_FILE, scope)
|
creds = ServiceAccountCredentials.from_json_keyfile_name(Config.CREDENTIALS_FILE, scope)
|
||||||
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
|
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
|
||||||
self.sheet_values = self.sheet.get_all_values()
|
self.sheet_values = self.sheet.get_all_values()
|
||||||
def get_start_index(self):
|
def get_start_index(self):
|
||||||
"""Ermittelt die erste leere Zeile in Spalte N (Index 14)"""
|
|
||||||
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
|
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
|
||||||
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
|
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
|
||||||
# Update-Aufrufe erfolgen separat für verschiedene Spalten.
|
# Update-Aufrufe erfolgen separat.
|
||||||
|
|
||||||
# ==================== WIKIPEDIA SCRAPER ====================
|
# ==================== WIKIPEDIA SCRAPER ====================
|
||||||
class WikipediaScraper:
|
class WikipediaScraper:
|
||||||
"""Klasse zur Handhabung der Wikipedia-Suche und Datenextraktion"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
wikipedia.set_lang(Config.LANG)
|
wikipedia.set_lang(Config.LANG)
|
||||||
def _get_full_domain(self, website):
|
def _get_full_domain(self, website):
|
||||||
"""Extrahiert den vollständigen Domainnamen (inklusive Topleveldomain) aus der URL."""
|
|
||||||
if not website:
|
if not website:
|
||||||
return ""
|
return ""
|
||||||
website = website.lower().strip()
|
website = website.lower().strip()
|
||||||
website = re.sub(r'^https?:\/\/', '', website)
|
website = re.sub(r'^https?:\/\/', '', website)
|
||||||
website = re.sub(r'^www\.', '', website)
|
website = re.sub(r'^www\.', '', website)
|
||||||
website = website.split('/')[0]
|
return website.split('/')[0]
|
||||||
return website
|
|
||||||
def _generate_search_terms(self, company_name, website):
|
def _generate_search_terms(self, company_name, website):
|
||||||
"""
|
|
||||||
Generiert Suchbegriffe in folgender Reihenfolge:
|
|
||||||
1. Vollständiger Domainname (z. B. "heimbach.com")
|
|
||||||
2. Die ersten zwei Wörter des normalisierten Firmennamens
|
|
||||||
3. Der vollständige, normalisierte Firmenname
|
|
||||||
"""
|
|
||||||
terms = []
|
terms = []
|
||||||
full_domain = self._get_full_domain(website)
|
full_domain = self._get_full_domain(website)
|
||||||
if full_domain:
|
if full_domain:
|
||||||
@@ -126,12 +110,6 @@ class WikipediaScraper:
|
|||||||
debug_print(f"Generierte Suchbegriffe: {terms}")
|
debug_print(f"Generierte Suchbegriffe: {terms}")
|
||||||
return terms
|
return terms
|
||||||
def _validate_article(self, page, company_name, website):
|
def _validate_article(self, page, company_name, website):
|
||||||
"""
|
|
||||||
Validiert den Artikel:
|
|
||||||
- Sucht in der Infobox und in externen Links nach Links, die den vollständigen Domainnamen enthalten.
|
|
||||||
Wird ein solcher Link gefunden, wird ein niedrigerer Schwellenwert (0.60) angewendet.
|
|
||||||
- Andernfalls werden der normalisierte Wikipedia-Titel und der normalisierte Firmenname verglichen.
|
|
||||||
"""
|
|
||||||
full_domain = self._get_full_domain(website)
|
full_domain = self._get_full_domain(website)
|
||||||
domain_found = False
|
domain_found = False
|
||||||
if full_domain:
|
if full_domain:
|
||||||
@@ -164,7 +142,6 @@ class WikipediaScraper:
|
|||||||
threshold = 0.60 if domain_found else Config.SIMILARITY_THRESHOLD
|
threshold = 0.60 if domain_found else Config.SIMILARITY_THRESHOLD
|
||||||
return similarity >= threshold
|
return similarity >= threshold
|
||||||
def extract_first_paragraph(self, page_url):
|
def extract_first_paragraph(self, page_url):
|
||||||
"""Extrahiert den ersten sinnvollen Absatz aus dem Wikipedia-Artikel."""
|
|
||||||
try:
|
try:
|
||||||
response = requests.get(page_url)
|
response = requests.get(page_url)
|
||||||
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||||
@@ -178,7 +155,6 @@ class WikipediaScraper:
|
|||||||
debug_print(f"Fehler beim Extrahieren des ersten Absatzes: {e}")
|
debug_print(f"Fehler beim Extrahieren des ersten Absatzes: {e}")
|
||||||
return "k.A."
|
return "k.A."
|
||||||
def _extract_infobox_value(self, soup, target):
|
def _extract_infobox_value(self, soup, target):
|
||||||
"""Extrahiert Werte aus der Infobox (Fallback-Methode) für 'branche', 'umsatz' und 'mitarbeiter'."""
|
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
||||||
if not infobox:
|
if not infobox:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
@@ -203,31 +179,37 @@ class WikipediaScraper:
|
|||||||
raw = raw_value.lower()
|
raw = raw_value.lower()
|
||||||
match = re.search(r'(\d{1,3}(?:[.,]\d{3})*|\d+)', raw)
|
match = re.search(r'(\d{1,3}(?:[.,]\d{3})*|\d+)', raw)
|
||||||
if match:
|
if match:
|
||||||
num = float(match.group(1).replace(',', '.'))
|
num_str = match.group(1)
|
||||||
|
if ',' in num_str:
|
||||||
|
num_str = num_str.replace('.', '').replace(',', '.')
|
||||||
|
else:
|
||||||
|
num_str = num_str.replace('.', '')
|
||||||
|
try:
|
||||||
|
num = float(num_str)
|
||||||
|
except Exception as e:
|
||||||
|
debug_print(f"Umsatz-Umwandlungsfehler: {e} für {num_str}")
|
||||||
|
return raw_value.strip()
|
||||||
if 'mrd' in raw or 'milliarden' in raw:
|
if 'mrd' in raw or 'milliarden' in raw:
|
||||||
num *= 1000
|
num *= 1000
|
||||||
elif 'mio' in raw or 'millionen' in raw:
|
elif 'mio' in raw or 'millionen' in raw:
|
||||||
num = num
|
pass
|
||||||
else:
|
else:
|
||||||
num /= 1e6
|
num /= 1e6
|
||||||
return str(int(round(num)))
|
return str(int(round(num)))
|
||||||
return raw_value.strip()
|
return raw_value.strip()
|
||||||
if target == 'mitarbeiter':
|
if target == 'mitarbeiter':
|
||||||
raw = raw_value.lower()
|
raw = raw_value.lower()
|
||||||
match = re.search(r'(\d[\d\s,\.]*)', raw)
|
numbers = re.findall(r'\d+', raw)
|
||||||
if match:
|
if numbers:
|
||||||
num_str = re.sub(r'\s+', '', match.group(1))
|
return numbers[0]
|
||||||
return num_str
|
|
||||||
return raw_value.strip()
|
return raw_value.strip()
|
||||||
return "k.A."
|
return "k.A."
|
||||||
def extract_full_infobox(self, soup):
|
def extract_full_infobox(self, soup):
|
||||||
"""Extrahiert die komplette Infobox als Text (nicht mehr ausgegeben)"""
|
|
||||||
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
|
||||||
if not infobox:
|
if not infobox:
|
||||||
return "k.A."
|
return "k.A."
|
||||||
return clean_text(infobox.get_text(separator=' | '))
|
return clean_text(infobox.get_text(separator=' | '))
|
||||||
def extract_fields_from_infobox_text(self, infobox_text, field_names):
|
def extract_fields_from_infobox_text(self, infobox_text, field_names):
|
||||||
"""Extrahiert die gewünschten Felder aus dem Infobox-Text (getrennt durch ' | ')"""
|
|
||||||
result = {}
|
result = {}
|
||||||
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
|
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
|
||||||
for i, token in enumerate(tokens):
|
for i, token in enumerate(tokens):
|
||||||
@@ -239,7 +221,6 @@ class WikipediaScraper:
|
|||||||
result[field] = tokens[j] if j < len(tokens) else "k.A."
|
result[field] = tokens[j] if j < len(tokens) else "k.A."
|
||||||
return result
|
return result
|
||||||
def extract_company_data(self, page_url):
|
def extract_company_data(self, page_url):
|
||||||
"""Extrahiert Daten aus dem Wikipedia-Artikel (Erster Absatz, Branche, Umsatz, Mitarbeiter)"""
|
|
||||||
if not page_url:
|
if not page_url:
|
||||||
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
try:
|
try:
|
||||||
@@ -263,7 +244,6 @@ class WikipediaScraper:
|
|||||||
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
return {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
@retry_on_failure
|
@retry_on_failure
|
||||||
def search_company_article(self, company_name, website):
|
def search_company_article(self, company_name, website):
|
||||||
"""Sucht mit optimierten Suchbegriffen (vollständiger Domainname, Candidate, normalisierter Name) nach dem Wikipedia-Artikel."""
|
|
||||||
search_terms = self._generate_search_terms(company_name, website)
|
search_terms = self._generate_search_terms(company_name, website)
|
||||||
for term in search_terms:
|
for term in search_terms:
|
||||||
try:
|
try:
|
||||||
@@ -284,40 +264,43 @@ class WikipediaScraper:
|
|||||||
|
|
||||||
# ==================== DATA PROCESSOR ====================
|
# ==================== DATA PROCESSOR ====================
|
||||||
class DataProcessor:
|
class DataProcessor:
|
||||||
"""Steuerung des Gesamtprozesses"""
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.sheet_handler = GoogleSheetHandler()
|
self.sheet_handler = GoogleSheetHandler()
|
||||||
self.wiki_scraper = WikipediaScraper()
|
self.wiki_scraper = WikipediaScraper()
|
||||||
def process_rows(self, num_rows):
|
def process_rows(self, num_rows):
|
||||||
"""Verarbeitet die angegebene Anzahl an Zeilen"""
|
|
||||||
start_index = self.sheet_handler.get_start_index()
|
start_index = self.sheet_handler.get_start_index()
|
||||||
print(f"Starte bei Zeile {start_index+1}")
|
print(f"Starte bei Zeile {start_index+1}")
|
||||||
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
|
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
|
||||||
row = self.sheet_handler.sheet_values[i]
|
row = self.sheet_handler.sheet_values[i]
|
||||||
self._process_single_row(i+1, row)
|
self._process_single_row(i+1, row)
|
||||||
def _process_single_row(self, row_num, row_data):
|
def _process_single_row(self, row_num, row_data):
|
||||||
"""Verarbeitung einer einzelnen Zeile"""
|
# Nur verarbeiten, wenn in Spalte A ein "x" steht
|
||||||
company_name = row_data[0] if len(row_data) > 0 else ""
|
if not row_data[0].strip().lower() == "x":
|
||||||
website = row_data[1] if len(row_data) > 1 else ""
|
print(f"[{datetime.now().strftime('%H:%M:%S')}] Überspringe Zeile {row_num}, kein 'x' in Spalte A.")
|
||||||
|
return
|
||||||
|
# Firmenname in Spalte B und Website in Spalte C
|
||||||
|
company_name = row_data[1] if len(row_data) > 1 else ""
|
||||||
|
website = row_data[2] if len(row_data) > 2 else ""
|
||||||
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
||||||
article = self.wiki_scraper.search_company_article(company_name, website)
|
article = self.wiki_scraper.search_company_article(company_name, website)
|
||||||
if article:
|
if article:
|
||||||
company_data = self.wiki_scraper.extract_company_data(article.url)
|
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||||
else:
|
else:
|
||||||
company_data = {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
company_data = {'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.', 'umsatz': 'k.A.', 'mitarbeiter': 'k.A.'}
|
||||||
# Update der Spalten G bis K: URL, erster Absatz, Branche, Umsatz, Mitarbeiter
|
# Update der Spalten: Da Flag-Spalte A vorhanden ist, verschieben sich alle Ausgabespalten um eine Spalte nach rechts.
|
||||||
self.sheet_handler.sheet.update(values=[[
|
# Spalte H: URL, I: Erster Absatz, J: Branche, K: Umsatz, L: Mitarbeiter
|
||||||
|
self.sheet_handler.sheet.update(values=[[
|
||||||
company_data.get('url', 'k.A.'),
|
company_data.get('url', 'k.A.'),
|
||||||
company_data.get('first_paragraph', 'k.A.'),
|
company_data.get('first_paragraph', 'k.A.'),
|
||||||
company_data.get('branche', 'k.A.'),
|
company_data.get('branche', 'k.A.'),
|
||||||
company_data.get('umsatz', 'k.A.'),
|
company_data.get('umsatz', 'k.A.'),
|
||||||
company_data.get('mitarbeiter', 'k.A.')
|
company_data.get('mitarbeiter', 'k.A.')
|
||||||
]], range_name=f"G{row_num}:K{row_num}")
|
]], range_name=f"H{row_num}:L{row_num}")
|
||||||
# Spalte N: Datum und aktuelle Zeit
|
# Spalte O: Datum und aktuelle Zeit
|
||||||
current_dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
current_dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
self.sheet_handler.sheet.update(values=[[current_dt]], range_name=f"N{row_num}")
|
self.sheet_handler.sheet.update(values=[[current_dt]], range_name=f"O{row_num}")
|
||||||
# Spalte Q: Version
|
# Spalte R: Version
|
||||||
self.sheet_handler.sheet.update(values=[[Config.VERSION]], range_name=f"Q{row_num}")
|
self.sheet_handler.sheet.update(values=[[Config.VERSION]], range_name=f"R{row_num}")
|
||||||
print(f"✅ Aktualisiert: URL: {company_data.get('url', 'k.A.')}, Erster Absatz: {company_data.get('first_paragraph', 'k.A.')[:30]}..., Branche: {company_data.get('branche', 'k.A.')}, Umsatz: {company_data.get('umsatz', 'k.A.')}, Mitarbeiter: {company_data.get('mitarbeiter', 'k.A.')}")
|
print(f"✅ Aktualisiert: URL: {company_data.get('url', 'k.A.')}, Erster Absatz: {company_data.get('first_paragraph', 'k.A.')[:30]}..., Branche: {company_data.get('branche', 'k.A.')}, Umsatz: {company_data.get('umsatz', 'k.A.')}, Mitarbeiter: {company_data.get('mitarbeiter', 'k.A.')}")
|
||||||
time.sleep(Config.RETRY_DELAY)
|
time.sleep(Config.RETRY_DELAY)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user