syntax fix
This commit is contained in:
@@ -1,16 +1,15 @@
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
import gspread
|
||||
import wikipedia
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from oauth2client.service_account import ServiceAccountCredentials
|
||||
from datetime import datetime
|
||||
from difflib import SequenceMatcher
|
||||
import csv
|
||||
|
||||
# ==================== KONFIGURATION ====================
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
import gspread
|
||||
import wikipedia
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from oauth2client.service_account import ServiceAccountCredentials
|
||||
from datetime import datetime
|
||||
from difflib import SequenceMatcher
|
||||
import csv
|
||||
# ==================== KONFIGURATION ====================
|
||||
class Config:
|
||||
VERSION = "1.1.1"
|
||||
LANG = "de"
|
||||
@@ -23,280 +22,233 @@ class Config:
|
||||
DEBUG = True
|
||||
WIKIPEDIA_SEARCH_RESULTS = 8
|
||||
HTML_PARSER = "html.parser"
|
||||
|
||||
# ==================== HELPER FUNCTIONS ====================
|
||||
def retry_on_failure(func):
|
||||
"""Decorator für Wiederholungsversuche bei Fehlern"""
|
||||
# ==================== HELPER FUNCTIONS ====================
|
||||
def retry_on_failure(func):
|
||||
"""Decorator für Wiederholungsversuche bei Fehlern"""
|
||||
def wrapper(*args, **kwargs):
|
||||
for attempt in range(Config.MAX_RETRIES):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
print(f"⚠️ Fehler bei {func.__name__} (Versuch {attempt+1}): {str(e)[:100]}")
|
||||
time.sleep(Config.RETRY_DELAY)
|
||||
for attempt in range(Config.MAX_RETRIES):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
print(f"⚠️ Fehler bei {func.__name__} (Versuch {attempt+1}): {str(e)[:100]}")
|
||||
time.sleep(Config.RETRY_DELAY)
|
||||
return None
|
||||
return wrapper
|
||||
|
||||
def debug_print(message):
|
||||
"""Debug-Ausgabe, wenn Config.DEBUG=True"""
|
||||
return wrapper
|
||||
def debug_print(message):
|
||||
"""Debug-Ausgabe, wenn Config.DEBUG=True"""
|
||||
if Config.DEBUG:
|
||||
print(f"[DEBUG] {message}")
|
||||
|
||||
def clean_text(text):
|
||||
"""Bereinigt Text von HTML-Entitäten und überflüssigen Whitespaces"""
|
||||
def clean_text(text):
|
||||
"""Bereinigt Text von HTML-Entitäten und überflüssigen Whitespaces"""
|
||||
if not text:
|
||||
return "k.A."
|
||||
|
||||
# Konvertierung und Säuberung
|
||||
text = str(text)
|
||||
text = re.sub(r'\[.*?\]', '', text) # Entferne eckige Klammern mit Inhalt
|
||||
text = re.sub(r'\(.*?\)', '', text) # Entferne runde Klammern mit Inhalt
|
||||
text = re.sub(r'<.*?>', '', text) # Entferne HTML-Tags
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
return text if text else "k.A."
|
||||
|
||||
# ==================== GOOGLE SHEET HANDLER ====================
|
||||
# Konvertierung und Säuberung
|
||||
text = str(text)
|
||||
text = re.sub(r'\[.*?\]', '', text) # Entferne eckige Klammern mit Inhalt
|
||||
text = re.sub(r'\(.*?\)', '', text) # Entferne runde Klammern mit Inhalt
|
||||
text = re.sub(r'<.*?>', '', text) # Entferne HTML-Tags
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
return text if text else "k.A."
|
||||
# ==================== GOOGLE SHEET HANDLER ====================
|
||||
class GoogleSheetHandler:
|
||||
"""Klasse zur Handhabung der Google Sheets Interaktion"""
|
||||
|
||||
def __init__(self):
|
||||
self.sheet = None
|
||||
self.sheet_values = []
|
||||
self._connect()
|
||||
|
||||
def _connect(self):
|
||||
def _connect(self):
|
||||
"""Stellt Verbindung zum Google Sheet her"""
|
||||
scope = ["https://www.googleapis.com/auth/spreadsheets"]
|
||||
creds = ServiceAccountCredentials.from_json_keyfile_name(
|
||||
Config.CREDENTIALS_FILE, scope
|
||||
Config.CREDENTIALS_FILE, scope
|
||||
)
|
||||
self.sheet = gspread.authorize(creds).open_by_url(Config.SHEET_URL).sheet1
|
||||
self.sheet_values = self.sheet.get_all_values()
|
||||
|
||||
def get_start_index(self):
|
||||
def get_start_index(self):
|
||||
"""Ermittelt die erste leere Zeile in Spalte N (Index 13)"""
|
||||
filled_n = [row[13] if len(row) > 13 else '' for row in self.sheet_values[1:]]
|
||||
return next(
|
||||
(i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()),
|
||||
len(filled_n) + 1
|
||||
(i + 1 for i, v in enumerate(filled_n, start=1) if not str(v).strip()),
|
||||
len(filled_n) + 1
|
||||
)
|
||||
|
||||
def update_row(self, row_num, values):
|
||||
def update_row(self, row_num, values):
|
||||
"""Aktualisiert eine Zeile im Sheet"""
|
||||
self.sheet.update(
|
||||
range_name=f"G{row_num}:Q{row_num}",
|
||||
values=[values]
|
||||
range_name=f"G{row_num}:Q{row_num}",
|
||||
values=[values]
|
||||
)
|
||||
|
||||
# ==================== WIKIPEDIA SCRAPER ====================
|
||||
class_=lambda c: c and any(
|
||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||
)
|
||||
|
||||
if not infobox:
|
||||
# ==================== WIKIPEDIA SCRAPER ====================
|
||||
class_=lambda c: c and any(
|
||||
kw in c.lower() for kw in ['infobox', 'vcard', 'unternehmen']
|
||||
))
|
||||
if not infobox:
|
||||
return "k.A."
|
||||
|
||||
keywords = {
|
||||
'branche': [
|
||||
'branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld',
|
||||
'sektor', 'produkte', 'leistungen', 'geschäftsbereich'
|
||||
],
|
||||
'umsatz': [
|
||||
'umsatz', 'jahresumsatz', 'umsatzerlöse', 'gesamtumsatz',
|
||||
'konzernumsatz', 'umsatzentwicklung', 'ergebnis'
|
||||
]
|
||||
keywords = {
|
||||
'branche': [
|
||||
'branche', 'industrie', 'tätigkeitsfeld', 'geschäftsfeld',
|
||||
'sektor', 'produkte', 'leistungen', 'geschäftsbereich'
|
||||
],
|
||||
'umsatz': [
|
||||
'umsatz', 'jahresumsatz', 'umsatzerlöse', 'gesamtumsatz',
|
||||
'konzernumsatz', 'umsatzentwicklung', 'ergebnis'
|
||||
]
|
||||
}.get(target, [])
|
||||
|
||||
# Durchsuche alle Zeilen und Zellen
|
||||
# Durchsuche alle Zeilen und Zellen
|
||||
value = "k.A."
|
||||
for row in infobox.find_all('tr'):
|
||||
# Erweiterte Header-Erkennung in th/td mit colspan
|
||||
header_cells = row.find_all(['th', 'td'], attrs={'colspan': False})
|
||||
for header in header_cells:
|
||||
header_text = clean_text(header.get_text()).lower()
|
||||
|
||||
if any(kw in header_text for kw in keywords):
|
||||
# Hole nächste Zelle, ignoriere verschachtelte Tabellen
|
||||
value_cell = header.find_next_sibling(['td', 'th'])
|
||||
if value_cell:
|
||||
# Verarbeite Listen und mehrzeilige Inhalte
|
||||
list_items = value_cell.find_all('li')
|
||||
if list_items:
|
||||
value = ', '.join(clean_text(li.get_text()) for li in list_items)
|
||||
else:
|
||||
value = clean_text(value_cell.get_text())
|
||||
|
||||
# Extrahiere numerische Umsatzwerte mit Regex
|
||||
if target == 'umsatz':
|
||||
match = re.search(
|
||||
r'(\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*(?:€|Euro|EUR)',
|
||||
value
|
||||
)
|
||||
if match:
|
||||
value = match.group(1).replace('.', '').replace(',', '.')
|
||||
return value
|
||||
|
||||
return "k.A."
|
||||
|
||||
class WikipediaScraper:
|
||||
for row in infobox.find_all('tr'):
|
||||
# Erweiterte Header-Erkennung in th/td mit colspan
|
||||
header_cells = row.find_all(['th', 'td'], attrs={'colspan': False})
|
||||
for header in header_cells:
|
||||
header_text = clean_text(header.get_text()).lower()
|
||||
if any(kw in header_text for kw in keywords):
|
||||
# Hole nächste Zelle, ignoriere verschachtelte Tabellen
|
||||
value_cell = header.find_next_sibling(['td', 'th'])
|
||||
if value_cell:
|
||||
# Verarbeite Listen und mehrzeilige Inhalte
|
||||
list_items = value_cell.find_all('li')
|
||||
if list_items:
|
||||
value = ', '.join(clean_text(li.get_text()) for li in list_items)
|
||||
else:
|
||||
value = clean_text(value_cell.get_text())
|
||||
# Extrahiere numerische Umsatzwerte mit Regex
|
||||
if target == 'umsatz':
|
||||
match = re.search(
|
||||
r'(\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?)\s*(?:Mio\.?|Millionen|Mrd\.?|Milliarden)?\s*(?:€|Euro|EUR)',
|
||||
value
|
||||
)
|
||||
if match:
|
||||
value = match.group(1).replace('.', '').replace(',', '.')
|
||||
return value
|
||||
return "k.A."
|
||||
class WikipediaScraper:
|
||||
def __init__(self):
|
||||
wikipedia.set_lang(Config.LANG)
|
||||
|
||||
def _extract_domain_hint(self, website):
|
||||
"""Extrahiert den Domain-Schlüssel aus der Website-URL"""
|
||||
if not website:
|
||||
return ""
|
||||
# Entferne Protokoll und www, zerlege in Teile
|
||||
clean_url = website.lower().replace("https://", "").replace("http://", "").replace("www.", "")
|
||||
domain_parts = clean_url.split(".")
|
||||
return domain_parts[0] if domain_parts else ""
|
||||
|
||||
def _generate_search_terms(self, company_name, website_hint=""):
|
||||
"""Generiert Suchbegriffe aus Firmenname und Website"""
|
||||
search_terms = [company_name.strip()]
|
||||
|
||||
def _extract_domain_hint(self, website):
|
||||
"""Extrahiert den Domain-Schlüssel aus der Website-URL"""
|
||||
if not website:
|
||||
return ""
|
||||
# Entferne Protokoll und www, zerlege in Teile
|
||||
clean_url = website.lower().replace("https://", "").replace("http://", "").replace("www.", "")
|
||||
domain_parts = clean_url.split(".")
|
||||
return domain_parts[0] if domain_parts else ""
|
||||
def _generate_search_terms(self, company_name, website_hint=""):
|
||||
"""Generiert Suchbegriffe aus Firmenname und Website"""
|
||||
search_terms = [company_name.strip()]
|
||||
# Bereinigung von Rechtsformen und Sonderzeichen
|
||||
clean_name = re.sub(
|
||||
r'\s+(?:GmbH|AG|KG|OHG|e\.V\.|mbH|& Co\. KG| GmbH & Co\. KG).*$',
|
||||
'',
|
||||
company_name
|
||||
).strip()
|
||||
|
||||
clean_name = re.sub(
|
||||
r'\s+(?:GmbH|AG|KG|OHG|e\.V\.|mbH|& Co\. KG| GmbH & Co\. KG).*$',
|
||||
'',
|
||||
company_name
|
||||
).strip()
|
||||
# Füge bereinigten Namen hinzu, wenn unterschiedlich
|
||||
if clean_name and clean_name != company_name:
|
||||
search_terms.append(clean_name)
|
||||
|
||||
if clean_name and clean_name != company_name:
|
||||
search_terms.append(clean_name)
|
||||
# Extrahiere erste zwei relevante Wörter
|
||||
name_words = [w for w in re.split(r'\W+', clean_name) if w]
|
||||
if len(name_words) >= 2:
|
||||
search_terms.append(" ".join(name_words[:2]))
|
||||
|
||||
name_words = [w for w in re.split(r'\W+', clean_name) if w]
|
||||
if len(name_words) >= 2:
|
||||
search_terms.append(" ".join(name_words[:2]))
|
||||
# Domain-Hint hinzufügen
|
||||
domain_hint = self._extract_domain_hint(website_hint)
|
||||
if domain_hint and domain_hint not in ["de", "com", "org", "net"]:
|
||||
search_terms.append(domain_hint)
|
||||
|
||||
domain_hint = self._extract_domain_hint(website_hint)
|
||||
if domain_hint and domain_hint not in ["de", "com", "org", "net"]:
|
||||
search_terms.append(domain_hint)
|
||||
debug_print(f"Generierte Suchbegriffe: {search_terms}")
|
||||
return list(set(search_terms)) # Duplikate entfernen
|
||||
|
||||
def _validate_article(self, page, company_name, domain_hint=""):
|
||||
"""Überprüft ob der Artikel zum Unternehmen passt"""
|
||||
# Normalisiere beide Namen
|
||||
page_title = re.sub(r'\(.*?\)', '', page.title).strip().lower()
|
||||
search_name = re.sub(r'[^a-zA-Z0-9äöüß ]', '', company_name).strip().lower()
|
||||
|
||||
return list(set(search_terms)) # Duplikate entfernen
|
||||
def _validate_article(self, page, company_name, domain_hint=""):
|
||||
"""Überprüft ob der Artikel zum Unternehmen passt"""
|
||||
# Normalisiere beide Namen
|
||||
page_title = re.sub(r'\(.*?\)', '', page.title).strip().lower()
|
||||
search_name = re.sub(r'[^a-zA-Z0-9äöüß ]', '', company_name).strip().lower()
|
||||
# Ähnlichkeitsprüfung
|
||||
similarity = SequenceMatcher(None, page_title, search_name).ratio()
|
||||
debug_print(f"Ähnlichkeit '{page_title}' vs '{search_name}': {similarity:.2f}")
|
||||
|
||||
similarity = SequenceMatcher(None, page_title, search_name).ratio()
|
||||
debug_print(f"Ähnlichkeit '{page_title}' vs '{search_name}': {similarity:.2f}")
|
||||
# Zusätzliche Domain-Prüfung
|
||||
if domain_hint:
|
||||
html_content = requests.get(page.url).text.lower()
|
||||
if domain_hint not in html_content:
|
||||
debug_print(f"Domain-Hint '{domain_hint}' nicht im Artikel gefunden")
|
||||
return False
|
||||
|
||||
if domain_hint:
|
||||
html_content = requests.get(page.url).text.lower()
|
||||
if domain_hint not in html_content:
|
||||
debug_print(f"Domain-Hint '{domain_hint}' nicht im Artikel gefunden")
|
||||
return False
|
||||
return similarity >= Config.SIMILARITY_THRESHOLD
|
||||
|
||||
@retry_on_failure
|
||||
|
||||
def search_company_article(self, company_name, website_hint=""):
|
||||
"""Hauptfunktion zur Artikelsuche"""
|
||||
search_terms = self._generate_search_terms(company_name, website_hint)
|
||||
domain_hint = self._extract_domain_hint(website_hint)
|
||||
|
||||
for term in search_terms:
|
||||
try:
|
||||
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
||||
debug_print(f"Suchergebnisse für '{term}': {results}")
|
||||
|
||||
for title in results:
|
||||
try:
|
||||
page = wikipedia.page(title, auto_suggest=False)
|
||||
if self._validate_article(page, company_name, domain_hint):
|
||||
return page
|
||||
except wikipedia.exceptions.DisambiguationError:
|
||||
continue
|
||||
except Exception as e:
|
||||
debug_print(f"Fehler bei Suche nach {term}: {str(e)}")
|
||||
continue
|
||||
return None
|
||||
|
||||
def extract_company_data(self, page_url):
|
||||
"""Extrahiert Branche und Umsatz aus dem Wikipedia-Artikel"""
|
||||
response = requests.get(page_url)
|
||||
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||
|
||||
@retry_on_failure
|
||||
def search_company_article(self, company_name, website_hint=""):
|
||||
"""Hauptfunktion zur Artikelsuche"""
|
||||
search_terms = self._generate_search_terms(company_name, website_hint)
|
||||
domain_hint = self._extract_domain_hint(website_hint)
|
||||
for term in search_terms:
|
||||
try:
|
||||
results = wikipedia.search(term, results=Config.WIKIPEDIA_SEARCH_RESULTS)
|
||||
debug_print(f"Suchergebnisse für '{term}': {results}")
|
||||
for title in results:
|
||||
try:
|
||||
page = wikipedia.page(title, auto_suggest=False)
|
||||
if self._validate_article(page, company_name, domain_hint):
|
||||
return page
|
||||
except wikipedia.exceptions.DisambiguationError:
|
||||
continue
|
||||
except Exception as e:
|
||||
debug_print(f"Fehler bei Suche nach {term}: {str(e)}")
|
||||
continue
|
||||
return None
|
||||
def extract_company_data(self, page_url):
|
||||
"""Extrahiert Branche und Umsatz aus dem Wikipedia-Artikel"""
|
||||
response = requests.get(page_url)
|
||||
soup = BeautifulSoup(response.text, Config.HTML_PARSER)
|
||||
return {
|
||||
'branche': self._extract_infobox_value(soup, 'branche'),
|
||||
'umsatz': self._extract_infobox_value(soup, 'umsatz'),
|
||||
'url': page_url
|
||||
}
|
||||
|
||||
|
||||
# ==================== WIKIPEDIA SCRAPER ====================
|
||||
class WikipediaScraper:
|
||||
|
||||
'branche': self._extract_infobox_value(soup, 'branche'),
|
||||
'umsatz': self._extract_infobox_value(soup, 'umsatz'),
|
||||
'url': page_url
|
||||
}
|
||||
# ==================== WIKIPEDIA SCRAPER ====================
|
||||
class WikipediaScraper:
|
||||
def _extract_infobox_value(self, soup, target):
|
||||
"""Extrahiert spezifischen Wert aus der Infobox mit erweiterten Suchmustern"""
|
||||
# Erweiterte Infobox-Erkennung
|
||||
infobox = soup.find('table',
|
||||
|
||||
# ==================== DATA PROCESSOR ====================
|
||||
"""Extrahiert spezifischen Wert aus der Infobox mit erweiterten Suchmustern"""
|
||||
# Erweiterte Infobox-Erkennung
|
||||
infobox = soup.find('table',
|
||||
# ==================== DATA PROCESSOR ====================
|
||||
class DataProcessor:
|
||||
"""Klasse zur Steuerung des Gesamtprozesses"""
|
||||
|
||||
def __init__(self):
|
||||
self.sheet_handler = GoogleSheetHandler()
|
||||
self.wiki_scraper = WikipediaScraper()
|
||||
|
||||
def process_rows(self, num_rows):
|
||||
def process_rows(self, num_rows):
|
||||
"""Verarbeitet die angegebene Anzahl an Zeilen"""
|
||||
start_index = self.sheet_handler.get_start_index()
|
||||
print(f"Starte bei Zeile {start_index+1}")
|
||||
|
||||
for i in range(start_index, min(start_index + num_rows, len(self.sheet_handler.sheet_values))):
|
||||
row = self.sheet_handler.sheet_values[i]
|
||||
self._process_single_row(i+1, row)
|
||||
|
||||
def _process_single_row(self, row_num, row_data):
|
||||
row = self.sheet_handler.sheet_values[i]
|
||||
self._process_single_row(i+1, row)
|
||||
def _process_single_row(self, row_num, row_data):
|
||||
"""Verarbeitet eine einzelne Zeile"""
|
||||
company_name = row_data[0] if len(row_data) > 0 else ""
|
||||
website = row_data[1] if len(row_data) > 1 else ""
|
||||
print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Verarbeite Zeile {row_num}: {company_name}")
|
||||
|
||||
# Schritt 1: Wikipedia-Artikel finden
|
||||
# Schritt 1: Wikipedia-Artikel finden
|
||||
article = self.wiki_scraper.search_company_article(company_name, website)
|
||||
|
||||
# Schritt 2: Daten extrahieren
|
||||
if article:
|
||||
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||
else:
|
||||
company_data = {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''}
|
||||
|
||||
# Aktualisiere Daten im Sheet
|
||||
# Schritt 2: Daten extrahieren
|
||||
if article:
|
||||
company_data = self.wiki_scraper.extract_company_data(article.url)
|
||||
else:
|
||||
company_data = {'branche': 'k.A.', 'umsatz': 'k.A.', 'url': ''}
|
||||
# Aktualisiere Daten im Sheet
|
||||
self._update_sheet(row_num, company_data)
|
||||
time.sleep(Config.RETRY_DELAY)
|
||||
|
||||
def _update_sheet(self, row_num, data):
|
||||
def _update_sheet(self, row_num, data):
|
||||
"""Aktualisiert die Zeile mit den neuen Daten"""
|
||||
current_values = self.sheet_handler.sheet.row_values(row_num)
|
||||
new_values = [
|
||||
data['branche'] if data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
||||
"k.A.", # LinkedIn-Branche bleibt unverändert
|
||||
data['umsatz'] if data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
||||
"k.A.", "k.A.", "k.A.",
|
||||
data['url'] if data['url'] else current_values[12] if len(current_values) > 12 else "",
|
||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"k.A.", "k.A.",
|
||||
Config.VERSION
|
||||
data['branche'] if data['branche'] != "k.A." else current_values[6] if len(current_values) > 6 else "k.A.",
|
||||
"k.A.", # LinkedIn-Branche bleibt unverändert
|
||||
data['umsatz'] if data['umsatz'] != "k.A." else current_values[8] if len(current_values) > 8 else "k.A.",
|
||||
"k.A.", "k.A.", "k.A.",
|
||||
data['url'] if data['url'] else current_values[12] if len(current_values) > 12 else "",
|
||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"k.A.", "k.A.",
|
||||
Config.VERSION
|
||||
]
|
||||
self.sheet_handler.update_row(row_num, new_values)
|
||||
print(f"✅ Aktualisiert: Branche: {new_values[0]}, Umsatz: {new_values[2]}, URL: {new_values[6]}")
|
||||
|
||||
# ==================== MAIN EXECUTION ====================
|
||||
if __name__ == "__main__":
|
||||
num_rows = int(input("Wieviele Zeilen sollen überprüft werden? "))
|
||||
processor = DataProcessor()
|
||||
processor.process_rows(num_rows)
|
||||
print("\n✅ Wikipedia-Auswertung abgeschlossen")
|
||||
# ==================== MAIN EXECUTION ====================
|
||||
if __name__ == "__main__":
|
||||
num_rows = int(input("Wieviele Zeilen sollen überprüft werden? "))
|
||||
processor = DataProcessor()
|
||||
processor.process_rows(num_rows)
|
||||
print("\n✅ Wikipedia-Auswertung abgeschlossen")
|
||||
Reference in New Issue
Block a user