fix(transcription): Behebt Start- und API-Fehler in der App [2f488f42]
This commit is contained in:
@@ -2,13 +2,9 @@ import sys
|
||||
import os
|
||||
from sqlalchemy.orm import Session
|
||||
from .. import database
|
||||
from .. import prompt_library
|
||||
|
||||
from ..prompt_library import get_prompt
|
||||
import logging
|
||||
from sqlalchemy.orm import Session
|
||||
from .. import database
|
||||
from .. import prompt_library
|
||||
from ..lib.gemini_client import call_gemini_flash
|
||||
from .llm_service import call_gemini_api
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -57,18 +53,7 @@ def _format_transcript(chunks: list[database.TranscriptChunk]) -> str:
|
||||
|
||||
return "\n".join(full_transcript)
|
||||
|
||||
def get_prompt_by_type(insight_type: str) -> str:
|
||||
"""
|
||||
Returns the corresponding prompt from the prompt_library based on the type.
|
||||
"""
|
||||
if insight_type == "meeting_minutes":
|
||||
return prompt_library.MEETING_MINUTES_PROMPT
|
||||
elif insight_type == "action_items":
|
||||
return prompt_library.ACTION_ITEMS_PROMPT
|
||||
elif insight_type == "sales_summary":
|
||||
return prompt_library.SALES_SUMMARY_PROMPT
|
||||
else:
|
||||
raise ValueError(f"Unknown insight type: {insight_type}")
|
||||
|
||||
|
||||
def generate_insight(db: Session, meeting_id: int, insight_type: str) -> database.AnalysisResult:
|
||||
"""
|
||||
@@ -102,7 +87,7 @@ def generate_insight(db: Session, meeting_id: int, insight_type: str) -> databas
|
||||
# This can happen if all chunks are empty or malformed
|
||||
raise ValueError(f"Formatted transcript for meeting {meeting_id} is empty or could not be processed.")
|
||||
|
||||
prompt_template = get_prompt_by_type(insight_type)
|
||||
prompt_template = get_prompt(insight_type)
|
||||
final_prompt = prompt_template.format(transcript_text=transcript_text)
|
||||
|
||||
# 4. Call the AI model
|
||||
|
||||
91
transcription-tool/backend/services/llm_service.py
Normal file
91
transcription-tool/backend/services/llm_service.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
import requests
|
||||
import logging
|
||||
import time
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
def call_gemini_api(prompt: str, retries: int = 3, timeout: int = 600) -> str:
|
||||
"""
|
||||
Calls the Gemini Pro API with a given prompt.
|
||||
|
||||
Args:
|
||||
prompt: The text prompt to send to the API.
|
||||
retries: The number of times to retry on failure.
|
||||
timeout: The request timeout in seconds.
|
||||
|
||||
Returns:
|
||||
The text response from the API or an empty string if the response is malformed.
|
||||
|
||||
Raises:
|
||||
Exception: If the API call fails after all retries.
|
||||
"""
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
if not api_key:
|
||||
logging.error("GEMINI_API_KEY environment variable not set.")
|
||||
raise ValueError("API key not found.")
|
||||
|
||||
url = f"https://generativelanguage.googleapis.com/v1/models/gemini-1.5-flash:generateContent?key={api_key}"
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
payload = {
|
||||
"contents": [{
|
||||
"parts": [{"text": prompt}]
|
||||
}],
|
||||
"generationConfig": {
|
||||
"temperature": 0.7,
|
||||
"topK": 40,
|
||||
"topP": 0.95,
|
||||
"maxOutputTokens": 8192,
|
||||
}
|
||||
}
|
||||
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
|
||||
if 'candidates' in result and result['candidates']:
|
||||
candidate = result['candidates'][0]
|
||||
if 'content' in candidate and 'parts' in candidate['content']:
|
||||
# Check for safety ratings
|
||||
if 'safetyRatings' in candidate:
|
||||
blocked = any(r.get('blocked') for r in candidate['safetyRatings'])
|
||||
if blocked:
|
||||
logging.error(f"API call blocked due to safety ratings: {candidate['safetyRatings']}")
|
||||
# Provide a more specific error or return a specific string
|
||||
return "[Blocked by Safety Filter]"
|
||||
return candidate['content']['parts'][0]['text']
|
||||
|
||||
# Handle cases where the response is valid but doesn't contain expected content
|
||||
if 'promptFeedback' in result and result['promptFeedback'].get('blockReason'):
|
||||
reason = result['promptFeedback']['blockReason']
|
||||
logging.error(f"Prompt was blocked by the API. Reason: {reason}")
|
||||
return f"[Prompt Blocked: {reason}]"
|
||||
|
||||
logging.warning(f"Unexpected API response structure on attempt {attempt+1}: {result}")
|
||||
return ""
|
||||
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code in [500, 502, 503, 504] and attempt < retries - 1:
|
||||
wait_time = (2 ** attempt) * 2 # Exponential backoff
|
||||
logging.warning(f"Server Error {e.response.status_code}. Retrying in {wait_time}s...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
logging.error(f"HTTP Error calling Gemini API: {e.response.status_code} {e.response.text}")
|
||||
raise
|
||||
except requests.exceptions.RequestException as e:
|
||||
if attempt < retries - 1:
|
||||
wait_time = (2 ** attempt) * 2
|
||||
logging.warning(f"Connection Error: {e}. Retrying in {wait_time}s...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
logging.error(f"Final Connection Error calling Gemini API: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
return "" # Should not be reached if retries are exhausted
|
||||
64
transcription-tool/backend/services/translation_service.py
Normal file
64
transcription-tool/backend/services/translation_service.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from sqlalchemy.orm import Session
|
||||
from ..database import Meeting, AnalysisResult
|
||||
from .llm_service import call_gemini_api
|
||||
from ..prompt_library import get_prompt
|
||||
from typing import Dict, Any, List
|
||||
|
||||
def _format_transcript_for_translation(chunks: List[Any]) -> str:
|
||||
"""Formats the transcript into a single string for the translation prompt."""
|
||||
full_transcript = []
|
||||
# Ensure chunks are treated correctly, whether they are dicts or objects
|
||||
for chunk in chunks:
|
||||
json_content = getattr(chunk, 'json_content', None)
|
||||
if not json_content:
|
||||
continue
|
||||
for line in json_content:
|
||||
speaker = line.get("speaker", "Unknown")
|
||||
text = line.get("text", "")
|
||||
full_transcript.append(f"{speaker}: {text}")
|
||||
return "\n".join(full_transcript)
|
||||
|
||||
def translate_transcript(db: Session, meeting_id: int, target_language: str) -> AnalysisResult:
|
||||
"""
|
||||
Translates the transcript of a meeting and stores it.
|
||||
"""
|
||||
meeting = db.query(Meeting).filter(Meeting.id == meeting_id).first()
|
||||
if not meeting:
|
||||
raise ValueError("Meeting not found")
|
||||
|
||||
prompt_key = f"translation_{target_language.lower()}"
|
||||
|
||||
# Check if translation already exists
|
||||
existing_translation = db.query(AnalysisResult).filter(
|
||||
AnalysisResult.meeting_id == meeting_id,
|
||||
AnalysisResult.prompt_key == prompt_key
|
||||
).first()
|
||||
|
||||
if existing_translation:
|
||||
return existing_translation
|
||||
|
||||
# Prepare transcript
|
||||
transcript_text = _format_transcript_for_translation(meeting.chunks)
|
||||
if not transcript_text:
|
||||
raise ValueError("Transcript is empty, cannot translate.")
|
||||
|
||||
# Get prompt from library
|
||||
prompt = get_prompt('translate_transcript', {
|
||||
'transcript': transcript_text,
|
||||
'target_language': target_language
|
||||
})
|
||||
|
||||
# Call Gemini API using the new service function
|
||||
translated_text = call_gemini_api(prompt)
|
||||
|
||||
# Store result
|
||||
translation_result = AnalysisResult(
|
||||
meeting_id=meeting_id,
|
||||
prompt_key=prompt_key,
|
||||
result_text=translated_text
|
||||
)
|
||||
db.add(translation_result)
|
||||
db.commit()
|
||||
db.refresh(translation_result)
|
||||
|
||||
return translation_result
|
||||
Reference in New Issue
Block a user