v1.3.7: Vollständige Version mit allen Funktionen, Modus 4: Nur Wikipedia-Suche

- Beibehaltung aller Funktionen aus früheren Versionen (Modi 1-3, LinkedIn Contacts)
- Neuer Modus 4 implementiert, der ausschließlich Wikipedia-Suchen ausführt und keine ChatGPT-Anfragen stellt
- Debug-Ausgaben wurden verbessert, um den Ablauf und die Ergebnisse besser nachvollziehen zu können
- Startindex wird anhand des letzten Timestamps in Spalte AH korrekt ermittelt, sodass bestehende Datensätze nicht überschrieben werden
This commit is contained in:
2025-04-03 06:22:12 +00:00
parent ade03f2206
commit 1e658922ba

View File

@@ -14,7 +14,7 @@ import csv
# ==================== KONFIGURATION ====================
class Config:
VERSION = "v1.3.7" # v1.3.7: Alle bisherigen Funktionen beibehalten, neuer Modus 4: Nur Wikipedia-Suche.
VERSION = "v1.3.7" # v1.3.7: Alle bisherigen Funktionen bleiben erhalten; neuer Modus 4: Nur Wikipedia-Suche.
LANG = "de"
CREDENTIALS_FILE = "service_account.json"
SHEET_URL = "https://docs.google.com/spreadsheets/d/1u_gHr9JUfmV1-iviRzbSe3575QEp7KLhK5jFV_gJcgo"
@@ -417,7 +417,7 @@ def process_contacts():
else:
debug_print("Keine Kontakte gefunden in der Haupttabelle.")
# ==================== NEUE FUNKTION: NUR WIKIPEDIA-SUCHE (MODUS 4) ====================
# ==================== NEUER MODUS 4: NUR WIKIPEDIA-SUCHE ====================
def process_wikipedia_only():
debug_print("Starte ausschließlich Wikipedia-Suche (Modus 4)...")
gc = gspread.authorize(ServiceAccountCredentials.from_json_keyfile_name(
@@ -522,6 +522,508 @@ def alignment_demo(sheet):
sheet.update(values=[new_headers], range_name=header_range)
print("Alignment-Demo abgeschlossen: Neue Spaltenüberschriften in Zeile 11200 geschrieben.")
# ==================== WIKIPEDIA SCRAPER ====================
class WikipediaScraper:
def __init__(self):
wikipedia.set_lang(Config.LANG)
def _get_full_domain(self, website):
if not website:
return ""
website = website.lower().strip()
website = re.sub(r'^https?:\/\/', '', website)
website = re.sub(r'^www\.', '', website)
return website.split('/')[0]
def _generate_search_terms(self, company_name, website):
terms = []
full_domain = self._get_full_domain(website)
if full_domain:
terms.append(full_domain)
normalized_name = normalize_company_name(company_name)
candidate = " ".join(normalized_name.split()[:2]).strip()
if candidate and candidate not in terms:
terms.append(candidate)
if normalized_name and normalized_name not in terms:
terms.append(normalized_name)
debug_print(f"Generierte Suchbegriffe: {terms}")
return terms
def _validate_article(self, page, company_name, website):
full_domain = self._get_full_domain(website)
domain_found = False
if full_domain:
try:
html_raw = requests.get(page.url).text
soup = BeautifulSoup(html_raw, Config.HTML_PARSER)
infobox = soup.find('table', class_=lambda c: c and 'infobox' in c.lower())
if infobox:
links = infobox.find_all('a', href=True)
for link in links:
href = link.get('href').lower()
if href.startswith('/wiki/datei:'):
continue
if full_domain in href:
debug_print(f"Definitiver Link-Match in Infobox gefunden: {href}")
domain_found = True
break
if not domain_found and hasattr(page, 'externallinks'):
for ext_link in page.externallinks:
if full_domain in ext_link.lower():
debug_print(f"Definitiver Link-Match in externen Links gefunden: {ext_link}")
domain_found = True
break
except Exception as e:
debug_print(f"Fehler beim Extrahieren von Links: {str(e)}")
normalized_title = normalize_company_name(page.title)
normalized_company = normalize_company_name(company_name)
similarity = SequenceMatcher(None, normalized_title, normalized_company).ratio()
debug_print(f"Ähnlichkeit (normalisiert): {similarity:.2f} ({normalized_title} vs {normalized_company})")
threshold = 0.60 if domain_found else Config.SIMILARITY_THRESHOLD
return similarity >= threshold
def extract_first_paragraph(self, page_url):
try:
response = requests.get(page_url)
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
paragraphs = soup.find_all('p')
for p in paragraphs:
text = clean_text(p.get_text())
if len(text) > 50:
return text
return "k.A."
except Exception as e:
debug_print(f"Fehler beim Extrahieren des ersten Absatzes: {e}")
return "k.A."
def extract_categories(self, soup):
cat_div = soup.find('div', id="mw-normal-catlinks")
if cat_div:
ul = cat_div.find('ul')
if ul:
cats = [clean_text(li.get_text()) for li in ul.find_all('li')]
return ", ".join(cats)
return "k.A."
def _extract_infobox_value(self, soup, target):
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
if not infobox:
return "k.A."
keywords_map = {
'branche': ['branche', 'industrie', 'tätigkeit', 'geschäftsfeld', 'sektor', 'produkte', 'leistungen', 'aktivitäten', 'wirtschaftszweig'],
'umsatz': ['umsatz', 'jahresumsatz', 'konzernumsatz', 'gesamtumsatz', 'erlöse', 'umsatzerlöse', 'einnahmen', 'ergebnis', 'jahresergebnis'],
'mitarbeiter': ['mitarbeiter', 'beschäftigte', 'personal', 'mitarbeiterzahl', 'angestellte', 'belegschaft', 'personalstärke']
}
keywords = keywords_map.get(target, [])
for row in infobox.find_all('tr'):
header = row.find('th')
if header:
header_text = clean_text(header.get_text()).lower()
if any(kw in header_text for kw in keywords):
value = row.find('td')
if value:
raw_value = clean_text(value.get_text())
if target == 'branche':
clean_val = re.sub(r'\[.*?\]|\(.*?\)', '', raw_value)
return ' '.join(clean_val.split()).strip()
if target == 'umsatz':
return extract_numeric_value(raw_value, is_umsatz=True)
if target == 'mitarbeiter':
return extract_numeric_value(raw_value, is_umsatz=False)
return "k.A."
def extract_full_infobox(self, soup):
infobox = soup.find('table', class_=lambda c: c and any(kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']))
if not infobox:
return "k.A."
return clean_text(infobox.get_text(separator=' | '))
def extract_fields_from_infobox_text(self, infobox_text, field_names):
result = {}
tokens = [token.strip() for token in infobox_text.split("|") if token.strip()]
for i, token in enumerate(tokens):
for field in field_names:
if field.lower() in token.lower():
j = i + 1
while j < len(tokens) and not tokens[j]:
j += 1
result[field] = tokens[j] if j < len(tokens) else "k.A."
return result
def extract_company_data(self, page_url):
if not page_url:
return {
'url': 'k.A.',
'first_paragraph': 'k.A.',
'branche': 'k.A.',
'umsatz': 'k.A.',
'mitarbeiter': 'k.A.',
'categories': 'k.A.',
'full_infobox': 'k.A.'
}
try:
response = requests.get(page_url)
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
full_infobox = self.extract_full_infobox(soup)
extracted_fields = self.extract_fields_from_infobox_text(full_infobox, ['Branche', 'Umsatz', 'Mitarbeiter'])
raw_branche = extracted_fields.get('Branche', self._extract_infobox_value(soup, 'branche'))
raw_umsatz = extracted_fields.get('Umsatz', self._extract_infobox_value(soup, 'umsatz'))
raw_mitarbeiter = extracted_fields.get('Mitarbeiter', self._extract_infobox_value(soup, 'mitarbeiter'))
umsatz_val = extract_numeric_value(raw_umsatz, is_umsatz=True)
mitarbeiter_val = extract_numeric_value(raw_mitarbeiter, is_umsatz=False)
categories_val = self.extract_categories(soup)
first_paragraph = self.extract_first_paragraph(page_url)
return {
'url': page_url,
'first_paragraph': first_paragraph,
'branche': raw_branche,
'umsatz': umsatz_val,
'mitarbeiter': mitarbeiter_val,
'categories': categories_val,
'full_infobox': full_infobox
}
except Exception as e:
debug_print(f"Extraktionsfehler: {str(e)}")
return {
'url': 'k.A.',
'first_paragraph': 'k.A.',
'branche': 'k.A.',
'umsatz': 'k.A.',
'mitarbeiter': 'k.A.',
'categories': 'k.A.',
'full_infobox': 'k.A.'
}
@retry_on_failure
def search_company_article(self, company_name, website):
search_terms = self._generate_search_terms(company_name, website)
for term in search_terms:
try:
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
debug_print(f"Suchergebnisse für '{term}': {results}")
for title in results:
try:
page = wikipedia.page(title, auto_suggest=False)
if self._validate_article(page, company_name, website):
return page
except (wikipedia.exceptions.DisambiguationError, wikipedia.exceptions.PageError) as e:
debug_print(f"Seitenfehler: {str(e)}")
continue
except Exception as e:
debug_print(f"Suchfehler: {str(e)}")
continue
return None
# ==================== GOOGLE SHEET HANDLER (für Hauptdaten) ====================
class GoogleSheetHandler:
def __init__(self):
self.sheet = None
self.sheet_values = []
self._connect()
def _connect(self):
scope = ["https://www.googleapis.com/auth/spreadsheets"]
creds = ServiceAccountCredentials.from_json_keyfile_name(Config.CREDENTIALS_FILE, scope)
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
self.sheet_values = self.sheet.get_all_values()
def get_start_index(self):
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
# ==================== ALIGNMENT DEMO (Modus 3) ====================
def alignment_demo(sheet):
new_headers = [
"Spalte A (ReEval Flag)",
"Spalte B (Firmenname)",
"Spalte C (Website)",
"Spalte D (Ort)",
"Spalte E (Beschreibung)",
"Spalte F (Aktuelle Branche)",
"Spalte G (Beschreibung Branche extern)",
"Spalte H (Anzahl Techniker CRM)",
"Spalte I (Umsatz CRM)",
"Spalte J (Anzahl Mitarbeiter CRM)",
"Spalte K (Vorschlag Wiki URL)",
"Spalte L (Wikipedia URL)",
"Spalte M (Wikipedia Absatz)",
"Spalte N (Wikipedia Branche)",
"Spalte O (Wikipedia Umsatz)",
"Spalte P (Wikipedia Mitarbeiter)",
"Spalte Q (Wikipedia Kategorien)",
"Spalte R (Konsistenzprüfung)",
"Spalte S (Begründung bei Inkonsistenz)",
"Spalte T (Vorschlag Wiki Artikel ChatGPT)",
"Spalte U (Begründung bei Abweichung)",
"Spalte V (Vorschlag neue Branche)",
"Spalte W (Konsistenzprüfung Branche)",
"Spalte X (Begründung Abweichung Branche)",
"Spalte Y (FSM Relevanz Ja / Nein)",
"Spalte Z (Begründung für FSM Relevanz)",
"Spalte AA (Schätzung Anzahl Mitarbeiter)",
"Spalte AB (Konsistenzprüfung Mitarbeiterzahl)",
"Spalte AC (Begründung für Abweichung Mitarbeiterzahl)",
"Spalte AD (Einschätzung Anzahl Servicetechniker)",
"Spalte AE (Begründung bei Abweichung Anzahl Servicetechniker)",
"Spalte AF (Schätzung Umsatz ChatGPT)",
"Spalte AG (Begründung für Abweichung Umsatz)",
"Spalte AH (Timestamp letzte Prüfung)",
"Spalte AI (Version)"
]
header_range = "A11200:AI11200"
sheet.update(values=[new_headers], range_name=header_range)
print("Alignment-Demo abgeschlossen: Neue Spaltenüberschriften in Zeile 11200 geschrieben.")
# ==================== NEUER MODUS 4: NUR WIKIPEDIA-SUCHE ====================
def process_wikipedia_only():
debug_print("Starte ausschließlich Wikipedia-Suche (Modus 4)...")
gc = gspread.authorize(ServiceAccountCredentials.from_json_keyfile_name(
Config.CREDENTIALS_FILE, ["https://www.googleapis.com/auth/spreadsheets"]))
sh = gc.open_by_url(Config.SHEET_URL)
main_sheet = sh.sheet1
data = main_sheet.get_all_values()
start_index = GoogleSheetHandler().get_start_index()
debug_print(f"Starte bei Zeile {start_index+1}")
for i, row in enumerate(data[1:], start=2):
if i < start_index:
continue
company_name = row[1] if len(row) > 1 else ""
website = row[2] if len(row) > 2 else ""
debug_print(f"Verarbeite Zeile {i}: {company_name}")
article = WikipediaScraper().search_company_article(company_name, website)
if article:
company_data = WikipediaScraper().extract_company_data(article.url)
else:
company_data = {
'url': 'k.A.',
'first_paragraph': 'k.A.',
'branche': 'k.A.',
'umsatz': 'k.A.',
'mitarbeiter': 'k.A.',
'categories': 'k.A.',
'full_infobox': 'k.A.'
}
wiki_values = [
row[10] if len(row) > 10 and row[10].strip() not in ["", "k.A."] else "k.A.",
company_data.get('url', 'k.A.'),
company_data.get('first_paragraph', 'k.A.'),
company_data.get('branche', 'k.A.'),
company_data.get('umsatz', 'k.A.'),
company_data.get('mitarbeiter', 'k.A.'),
company_data.get('categories', 'k.A.')
]
wiki_range = f"K{i}:Q{i}"
main_sheet.update(values=[wiki_values], range_name=wiki_range)
debug_print(f"Zeile {i} mit Wikipedia-Daten aktualisiert.")
current_dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
main_sheet.update(values=[[current_dt]], range_name=f"AH{i}")
main_sheet.update(values=[[Config.VERSION]], range_name=f"AI{i}")
time.sleep(Config.RETRY_DELAY)
debug_print("Wikipedia-Suche abgeschlossen.")
# ==================== GOOGLE SHEET HANDLER (für Hauptdaten) ====================
class GoogleSheetHandler:
def __init__(self):
self.sheet = None
self.sheet_values = []
self._connect()
def _connect(self):
scope = ["https://www.googleapis.com/auth/spreadsheets"]
creds = ServiceAccountCredentials.from_json_keyfile_name(Config.CREDENTIALS_FILE, scope)
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
self.sheet_values = self.sheet.get_all_values()
def get_start_index(self):
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
return next((i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()), len(filled_n) + 1)
# ==================== NEUER MODUS: CONTACTS (LinkedIn) ====================
def process_contacts():
debug_print("Starte LinkedIn-Kontaktsuche...")
gc = gspread.authorize(ServiceAccountCredentials.from_json_keyfile_name(
Config.CREDENTIALS_FILE, ["https://www.googleapis.com/auth/spreadsheets"]))
sh = gc.open_by_url(Config.SHEET_URL)
try:
contacts_sheet = sh.worksheet("Contacts")
except gspread.exceptions.WorksheetNotFound:
contacts_sheet = sh.add_worksheet(title="Contacts", rows="1000", cols="10")
header = ["Firmenname", "Website", "Vorname", "Nachname", "Position", "Anrede", "E-Mail"]
contacts_sheet.update("A1:G1", [header])
debug_print("Neues Blatt 'Contacts' erstellt und Header eingetragen.")
main_sheet = sh.sheet1
data = main_sheet.get_all_values()
positions = ["Serviceleiter", "IT-Leiter", "Leiter After Sales", "Leiter Einsatzplanung"]
new_rows = []
for idx, row in enumerate(data[1:], start=2):
company_name = row[1] if len(row) > 1 else ""
website = row[2] if len(row) > 2 else ""
debug_print(f"Verarbeite Firma: '{company_name}' (Zeile {idx}), Website: '{website}'")
if not company_name or not website:
debug_print("Überspringe, da Firmenname oder Website fehlt.")
continue
for pos in positions:
debug_print(f"Suche nach Position: '{pos}' bei '{company_name}'")
contact = search_linkedin_contact(company_name, website, pos)
if contact:
debug_print(f"Kontakt gefunden: {contact}")
new_rows.append([contact["Firmenname"], contact["Website"], contact["Vorname"], contact["Nachname"], contact["Position"], "", ""])
else:
debug_print(f"Kein Kontakt für Position '{pos}' bei '{company_name}' gefunden.")
if new_rows:
last_row = len(contacts_sheet.get_all_values()) + 1
range_str = f"A{last_row}:G{last_row + len(new_rows) - 1}"
contacts_sheet.update(range_str, new_rows)
debug_print(f"{len(new_rows)} Kontakte in 'Contacts' hinzugefügt.")
else:
debug_print("Keine Kontakte gefunden in der Haupttabelle.")
# ==================== DATA PROCESSOR ====================
class DataProcessor:
def __init__(self):
self.sheet_handler = GoogleSheetHandler()
self.wiki_scraper = WikipediaScraper()
def process_rows(self, num_rows=None):
if MODE == "2":
print("Re-Evaluierungsmodus: Verarbeitung aller Zeilen mit 'x' in Spalte A.")
for i, row in enumerate(self.sheet_handler.sheet_values[1:], start=2):
if row[0].strip().lower() == "x":
self._process_single_row(i, row)
elif MODE == "3":
print("Alignment-Demo-Modus: Schreibe neue Spaltenüberschriften in Zeile 11200.")
alignment_demo(self.sheet_handler.sheet)
else:
start_index = self.sheet_handler.get_start_index()
print(f"Starte bei Zeile {start_index+1}")
rows_processed = 0
for i, row in enumerate(self.sheet_handler.sheet_values[1:], start=2):
if i < start_index:
continue
if num_rows is not None and rows_processed >= num_rows:
break
self._process_single_row(i, row)
rows_processed += 1
def _process_single_row(self, row_num, row_data):
company_name = row_data[1] if len(row_data) > 1 else ""
website = row_data[2] if len(row_data) > 2 else ""
wiki_update_range = f"K{row_num}:Q{row_num}"
chatgpt_range = f"AF{row_num}"
abgleich_range = f"AG{row_num}"
valid_range = f"R{row_num}"
dt_range = f"AH{row_num}"
ver_range = f"AI{row_num}"
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
if len(row_data) > 10 and row_data[10].strip() not in ["", "k.A."]:
wiki_url = row_data[10].strip()
try:
company_data = self.wiki_scraper.extract_company_data(wiki_url)
except Exception as e:
debug_print(f"Fehler beim Laden des vorgeschlagenen Wikipedia-Artikels: {e}")
article = self.wiki_scraper.search_company_article(company_name, website)
company_data = self.wiki_scraper.extract_company_data(article.url) if article else {
'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.',
'umsatz': 'k.A.', 'mitarbeiter': 'k.A.', 'categories': 'k.A.',
'full_infobox': 'k.A.'
}
else:
article = self.wiki_scraper.search_company_article(company_name, website)
company_data = self.wiki_scraper.extract_company_data(article.url) if article else {
'url': 'k.A.', 'first_paragraph': 'k.A.', 'branche': 'k.A.',
'umsatz': 'k.A.', 'mitarbeiter': 'k.A.', 'categories': 'k.A.',
'full_infobox': 'k.A.'
}
wiki_values = [
row_data[10] if len(row_data) > 10 and row_data[10].strip() not in ["", "k.A."] else "k.A.",
company_data.get('url', 'k.A.'),
company_data.get('first_paragraph', 'k.A.'),
company_data.get('branche', 'k.A.'),
company_data.get('umsatz', 'k.A.'),
company_data.get('mitarbeiter', 'k.A.'),
company_data.get('categories', 'k.A.')
]
self.sheet_handler.sheet.update(values=[wiki_values], range_name=wiki_update_range)
wait_for_sheet_update(self.sheet_handler.sheet, f"K{row_num}", wiki_values[0])
time.sleep(3)
self.sheet_handler.sheet.update(values=[["XX"]], range_name=chatgpt_range)
crm_umsatz = row_data[8] if len(row_data) > 8 else "k.A."
abgleich_result = compare_umsatz_values(crm_umsatz, company_data.get('umsatz', 'k.A.'))
self.sheet_handler.sheet.update(values=[[abgleich_result]], range_name=abgleich_range)
crm_data = ";".join(row_data[1:10])
wiki_data = ";".join(row_data[11:17])
valid_result = validate_article_with_chatgpt(crm_data, wiki_data)
self.sheet_handler.sheet.update(values=[[valid_result]], range_name=valid_range)
crm_branche = row_data[5] if len(row_data) > 5 else "k.A."
beschreibung_branche = row_data[6] if len(row_data) > 6 else "k.A."
wiki_branche = company_data.get('branche', 'k.A.')
wiki_kategorien = company_data.get('categories', 'k.A.')
branche_result = evaluate_branche_chatgpt(crm_branche, beschreibung_branche, wiki_branche, wiki_kategorien)
branche_v_range = f"V{row_num}"
branche_w_range = f"W{row_num}"
branche_x_range = f"X{row_num}"
self.sheet_handler.sheet.update(values=[[branche_result["branch"]]], range_name=branche_v_range)
self.sheet_handler.sheet.update(values=[[branche_result["consistency"]]], range_name=branche_w_range)
self.sheet_handler.sheet.update(values=[[branche_result["justification"]]], range_name=branche_x_range)
fsm_result = evaluate_fsm_suitability(company_name, company_data)
self.sheet_handler.sheet.update(values=[[fsm_result["suitability"]]], range_name=f"Y{row_num}")
self.sheet_handler.sheet.update(values=[[fsm_result["justification"]]], range_name=f"Z{row_num}")
st_estimate = evaluate_servicetechnicians_estimate(company_name, company_data)
self.sheet_handler.sheet.update(values=[[st_estimate]], range_name=f"AD{row_num}")
internal_value = row_data[7] if len(row_data) > 7 else "k.A."
internal_category = map_internal_technicians(internal_value) if internal_value != "k.A." else "k.A."
if internal_category != "k.A." and st_estimate != internal_category:
explanation = evaluate_servicetechnicians_explanation(company_name, st_estimate, company_data)
discrepancy = explanation
else:
discrepancy = "ok"
self.sheet_handler.sheet.update(values=[[discrepancy]], range_name=f"AE{row_num}")
self.sheet_handler.sheet.update(values=[["XX"]], range_name="AF" + str(row_num))
self.sheet_handler.sheet.update(values=[["XX"]], range_name="AG" + str(row_num))
current_dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.sheet_handler.sheet.update(values=[[current_dt]], range_name=dt_range)
self.sheet_handler.sheet.update(values=[[Config.VERSION]], range_name=ver_range)
debug_print(f"✅ Aktualisiert: URL: {company_data.get('url', 'k.A.')}, "
f"Branche: {company_data.get('branche', 'k.A.')}, Umsatz-Abgleich: {abgleich_result}, "
f"Validierung: {valid_result}, Branchenvorschlag: {branche_result['branch']}, "
f"FSM: {fsm_result['suitability']}, Servicetechniker-Schätzung: {st_estimate}")
time.sleep(Config.RETRY_DELAY)
# ==================== NEUER MODUS: CONTACTS (LinkedIn) ====================
def process_contacts():
debug_print("Starte LinkedIn-Kontaktsuche...")
gc = gspread.authorize(ServiceAccountCredentials.from_json_keyfile_name(
Config.CREDENTIALS_FILE, ["https://www.googleapis.com/auth/spreadsheets"]))
sh = gc.open_by_url(Config.SHEET_URL)
try:
contacts_sheet = sh.worksheet("Contacts")
except gspread.exceptions.WorksheetNotFound:
contacts_sheet = sh.add_worksheet(title="Contacts", rows="1000", cols="10")
header = ["Firmenname", "Website", "Vorname", "Nachname", "Position", "Anrede", "E-Mail"]
contacts_sheet.update("A1:G1", [header])
debug_print("Neues Blatt 'Contacts' erstellt und Header eingetragen.")
main_sheet = sh.sheet1
data = main_sheet.get_all_values()
positions = ["Serviceleiter", "IT-Leiter", "Leiter After Sales", "Leiter Einsatzplanung"]
new_rows = []
for idx, row in enumerate(data[1:], start=2):
company_name = row[1] if len(row) > 1 else ""
website = row[2] if len(row) > 2 else ""
debug_print(f"Verarbeite Firma: '{company_name}' (Zeile {idx}), Website: '{website}'")
if not company_name or not website:
debug_print("Überspringe, da Firmenname oder Website fehlt.")
continue
for pos in positions:
debug_print(f"Suche nach Position: '{pos}' bei '{company_name}'")
contact = search_linkedin_contact(company_name, website, pos)
if contact:
debug_print(f"Kontakt gefunden: {contact}")
new_rows.append([contact["Firmenname"], contact["Website"], contact["Vorname"], contact["Nachname"], contact["Position"], "", ""])
else:
debug_print(f"Kein Kontakt für Position '{pos}' bei '{company_name}' gefunden.")
if new_rows:
last_row = len(contacts_sheet.get_all_values()) + 1
range_str = f"A{last_row}:G{last_row + len(new_rows) - 1}"
contacts_sheet.update(range_str, new_rows)
debug_print(f"{len(new_rows)} Kontakte in 'Contacts' hinzugefügt.")
else:
debug_print("Keine Kontakte gefunden in der Haupttabelle.")
# ==================== MAIN PROGRAMM ====================
if __name__ == "__main__":
print("Modi: 1 = regulärer Modus, 2 = Re-Evaluierungsmodus, 3 = Alignment-Demo, 4 = Nur Wikipedia-Suche, 5 = LinkedIn Contacts")