feat(transcription): v0.4.0 with structured json, inline editing and deletion

- Backend: Switched prompt to JSON output for structured data
- Backend: Added PUT /chunks/{id} endpoint for persistence
- Backend: Fixed app.py imports and initialization logic
- Frontend: Complete rewrite for Unified View (flattened chunks)
- Frontend: Added Inline Editing (Text/Speaker) and Row Deletion
- Docs: Updated TRANSCRIPTION_TOOL.md with v0.4 features
This commit is contained in:
2026-01-24 20:43:33 +00:00
parent 0858df6f25
commit da00d461e1
5 changed files with 389 additions and 99 deletions

View File

@@ -1,4 +1,5 @@
import logging
import json
from sqlalchemy.orm import Session
from .ffmpeg_service import FFmpegService
from .transcription_service import TranscriptionService
@@ -7,6 +8,17 @@ from ..config import settings
logger = logging.getLogger(__name__)
def parse_time_to_seconds(time_str):
try:
parts = time_str.split(':')
if len(parts) == 2: # MM:SS
return int(parts[0]) * 60 + int(parts[1])
elif len(parts) == 3: # HH:MM:SS
return int(parts[0]) * 3600 + int(parts[1]) * 60 + int(parts[2])
except:
return 0
return 0
def process_meeting_task(meeting_id: int, db_session_factory):
db = db_session_factory()
meeting = db.query(Meeting).filter(Meeting.id == meeting_id).first()
@@ -35,11 +47,30 @@ def process_meeting_task(meeting_id: int, db_session_factory):
result = transcriber.transcribe_chunk(chunk_path, offset)
# Parse JSON and Adjust Timestamps
json_data = []
try:
raw_json = json.loads(result["raw_text"])
if isinstance(raw_json, list):
for entry in raw_json:
seconds = parse_time_to_seconds(entry.get("time", "00:00"))
absolute_seconds = seconds + offset
entry["absolute_seconds"] = absolute_seconds
h = int(absolute_seconds // 3600)
m = int((absolute_seconds % 3600) // 60)
s = int(absolute_seconds % 60)
entry["display_time"] = f"{h:02}:{m:02}:{s:02}"
json_data.append(entry)
except Exception as e:
logger.error(f"JSON Parsing failed for chunk {i}: {e}")
# Save chunk result
db_chunk = TranscriptChunk(
meeting_id=meeting.id,
chunk_index=i,
raw_text=result["raw_text"]
raw_text=result["raw_text"],
json_content=json_data
)
db.add(db_chunk)
all_text.append(result["raw_text"])

View File

@@ -19,8 +19,8 @@ class TranscriptionService:
"""
logger.info(f"Uploading chunk {file_path} to Gemini...")
# 1. Upload file
media_file = self.client.files.upload(path=file_path)
# 1. Upload file (positional argument)
media_file = self.client.files.upload(file=file_path)
# 2. Wait for processing (usually fast for audio)
while media_file.state == "PROCESSING":
@@ -32,12 +32,18 @@ class TranscriptionService:
# 3. Transcribe with Diarization and Timestamps
prompt = """
Transkribiere dieses Audio wortgetreu.
Identifiziere die Sprecher (Sprecher A, Sprecher B, etc.).
Gib das Ergebnis als strukturierte Liste mit Timestamps aus.
Wichtig: Das Audio ist ein Teil eines größeren Gesprächs.
Antworte NUR mit dem Transkript im Format:
[MM:SS] Sprecher X: Text
Transkribiere dieses Audio wortgetreu.
Identifiziere die Sprecher (Speaker A, Speaker B, etc.).
Gib das Ergebnis als JSON-Liste zurück.
Format:
[
{
"time": "MM:SS",
"speaker": "Speaker A",
"text": "..."
}
]
"""
logger.info(f"Generating transcription for {file_path}...")
@@ -45,7 +51,8 @@ class TranscriptionService:
model="gemini-2.0-flash",
contents=[media_file, prompt],
config=types.GenerateContentConfig(
temperature=0.1, # Low temp for accuracy
temperature=0.1,
response_mime_type="application/json"
)
)
@@ -53,6 +60,6 @@ class TranscriptionService:
self.client.files.delete(name=media_file.name)
return {
"raw_text": response.text,
"raw_text": response.text, # This is now a JSON string
"offset": offset_seconds
}