refactor(ai): Migrate to google-genai Client API

- Rewrites  and  in helpers.py to use the new  and  syntax.
- Removes the deprecated  calls which were causing AttributeError.
- Updates parameter handling to use .
- This change ensures full compatibility with the  library version 1.0+.
This commit is contained in:
2026-01-03 10:15:05 +00:00
parent 511dbdf780
commit 0aad604339

View File

@@ -324,37 +324,47 @@ def _get_gemini_api_key():
def call_gemini_flash(prompt, system_instruction=None, temperature=0.3, json_mode=False):
"""
Spezifische Funktion für Gemini 1.5 Flash Aufrufe mit System-Instruction Support.
Wird vom GTM Architect Orchestrator verwendet.
Aktualisiert für die neue 'google-genai' Bibliothek (v1.0+).
"""
logger = logging.getLogger(__name__)
if not HAS_GEMINI:
logger.error("Fehler: google-genai Bibliothek fehlt.")
raise ImportError("google-genai not installed.")
api_key = _get_gemini_api_key()
genai.configure(api_key=api_key)
model_name = "gemini-1.5-flash"
generation_config = {
"temperature": temperature,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
}
if json_mode:
generation_config["response_mime_type"] = "application/json"
try:
# Pass system_instruction if provided
model_instance = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
system_instruction=system_instruction
# NEU: Client-basierte Instanziierung für google-genai v1.0+
client = genai.Client(api_key=api_key)
# Konfiguration für den Aufruf
config = {
"temperature": temperature,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
}
if json_mode:
config["response_mime_type"] = "application/json"
# Der Modell-Name muss oft explizit sein, 'gemini-1.5-flash' ist Standard
model_id = "gemini-1.5-flash"
response = client.models.generate_content(
model=model_id,
contents=prompt,
config=genai.types.GenerateContentConfig(
temperature=temperature,
top_p=0.95,
top_k=40,
max_output_tokens=8192,
response_mime_type="application/json" if json_mode else "text/plain",
system_instruction=system_instruction
)
)
# generate_content is stateless and cleaner for this use case than start_chat
response = model_instance.generate_content(prompt)
return response.text.strip()
except Exception as e:
@@ -366,9 +376,7 @@ def call_gemini_flash(prompt, system_instruction=None, temperature=0.3, json_mod
@retry_on_failure
def call_openai_chat(prompt, temperature=0.3, model=None, response_format_json=False):
"""
Zentrale Funktion fuer KI API Aufrufe.
Wurde umgebaut auf Google Gemini (generativeai), behält aber den Namen 'call_openai_chat'
aus Kompatibilitätsgründen mit dem Rest des Codes bei.
Zentrale Funktion fuer KI API Aufrufe (jetzt Gemini via google-genai Client).
"""
logger = logging.getLogger(__name__)
@@ -379,33 +387,23 @@ def call_openai_chat(prompt, temperature=0.3, model=None, response_format_json=F
logger.error("Fehler: google-genai Bibliothek fehlt.")
raise ImportError("google-genai not installed.")
# Konfiguriere Gemini
genai.configure(api_key=api_key)
# Wähle Modell (Standard auf Flash für Speed/Kosten)
model_name = "gemini-1.5-flash"
generation_config = {
"temperature": temperature,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
}
if response_format_json:
generation_config["response_mime_type"] = "application/json"
try:
model_instance = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
)
# NEU: Client Instanziierung
client = genai.Client(api_key=api_key)
chat_session = model_instance.start_chat(
history=[]
)
model_id = "gemini-1.5-flash"
response = chat_session.send_message(prompt)
response = client.models.generate_content(
model=model_id,
contents=prompt,
config=genai.types.GenerateContentConfig(
temperature=temperature,
top_p=0.95,
top_k=40,
max_output_tokens=8192,
response_mime_type="application/json" if response_format_json else "text/plain"
)
)
return response.text.strip()
except Exception as e: