Compare commits
4 Commits
c337e1bde1
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 9f943aea21 | |||
| 87c710a3e9 | |||
| bfbce1aad0 | |||
| 18c9ce8754 |
@@ -1 +1 @@
|
||||
{"task_id": "31f88f42-8544-80ad-9573-e9f7f398e8b1", "token": "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", "readme_path": "connector-superoffice/README.md", "session_start_time": "2026-03-10T14:01:26.654688"}
|
||||
{"task_id": "30388f42-8544-8088-bc48-e59e9b973e91", "token": "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", "readme_path": "readme.md", "session_start_time": "2026-03-10T19:23:35.891681"}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -71,3 +71,4 @@ Log_from_docker/
|
||||
|
||||
.gemini/.env
|
||||
gemini_api_key.txt
|
||||
*.tar.gz
|
||||
|
||||
@@ -337,3 +337,42 @@ Das neue Gitea (auf Port 3000) wird zur zentralen "Source of Truth".
|
||||
|
||||
Dieser Prozess ist sicher, schnell und wiederholbar, da er nur aus Standard-Git- und Docker-Befehlen besteht.
|
||||
|
||||
---
|
||||
|
||||
### **UMZUG Live - Lessons Learned (März 2026)**
|
||||
|
||||
Der reale Umzug von der Synology auf die Wackler-VM (`docker1`) hat gezeigt, dass die Theorie gut war, die Praxis aber spezifische Hürden bereithielt. Diese Lektionen sind kritisch für alle zukünftigen Deployments:
|
||||
|
||||
#### **1. Die Git-Historien-Falle (Packfile / Timeout Error)**
|
||||
* **Problem:** Versuche, das Repository per `git clone` oder über die Gitea-Migrations-Funktion umzuziehen, brachen reproduzierbar mit `RPC failed; HTTP 504` oder `fatal: expected 'packfile'` ab.
|
||||
* **Ursache:** Das lokale `.git` Verzeichnis war auf **1,4 Gigabyte** angewachsen, da in der Vergangenheit versehentlich große MP3-Dateien committet wurden. Auch nach dem Löschen verbleiben diese in der Historie und blockieren den Transfer über Proxies.
|
||||
* **Lösung:** Radikale Bereinigung der Historie via `git filter-branch` (oder `git filter-repo`) für den Ordner `uploads_audio/`, gefolgt von einer aggressiven Garbage Collection (`git gc --prune=now --aggressive`).
|
||||
* **Ergebnis:** Schrumpfung des Repos von 1,4 GB auf **130 MB**.
|
||||
* **Lesson:** Bei Git-Transfer-Fehlern zuerst die Größe des `.git` Ordners prüfen.
|
||||
|
||||
#### **2. Der "Filter-Branch" Nebeneffekt (.env Schutz)**
|
||||
* **Problem:** Nach der Git-Bereinigung schienen ungetrackte Dateien (wie `.env` und `volume_backups/`) plötzlich verschwunden.
|
||||
* **Ursache:** `git filter-branch` erzwingt einen harten Reset auf den bereinigten Stand. Dateien, die in der `.gitignore` stehen, werden dabei oft aus dem Arbeitsverzeichnis entfernt.
|
||||
* **Lösung:** Wiederherstellung aus einem zuvor erstellten `.tar.gz`-Backup des gesamten Projektverzeichnisses.
|
||||
* **Lesson:** Führe niemals tiefe Git-Eingriffe ohne ein physisches Dateibackup der ungetrackten Dateien durch.
|
||||
|
||||
#### **3. Das "Transit-Repo" Muster für Secrets**
|
||||
* **Problem:** Sicherer Transfer der `.env` und Datenbanken ohne Einchecken in das Haupt-Repo und ohne Zugriff auf externe Transfer-Dienste (Firewall-Blockade).
|
||||
* **Lösung:** Erstellung eines temporären, privaten Gitea-Repositorys namens `transit` auf dem Ziel-System. Hochladen des verschlüsselten Backups über die Gitea-Weboberfläche und anschließendes Klonen/Entpacken auf der VM. Danach sofortige Löschung des `transit` Repos.
|
||||
* **Lesson:** Gitea ist das beste Transit-Medium für große Files hinter restriktiven Firewalls.
|
||||
|
||||
#### **4. Docker Build Context & Berechtigungen**
|
||||
* **Problem:** `docker compose up --build` brach ab mit: `failed to solve: error from sender: open /volume_backups: permission denied`.
|
||||
* **Ursache:** Docker versucht beim Bauen das gesamte Verzeichnis einzulesen. Ordner, die dem `root` User gehören (z.B. durch Gemini-Aktionen erstellt), blockieren den Prozess.
|
||||
* **Lösung:**
|
||||
1. Berechtigungen korrigieren: `sudo chown -R $USER:$USER volume_backups/`
|
||||
2. Ordner in der `.dockerignore` ausschließen, damit er gar nicht erst eingelesen wird.
|
||||
* **Lesson:** Alles, was nicht in ein Container-Image gehört, **muss** in die `.dockerignore`.
|
||||
|
||||
#### **5. Vite & TypeScript Build-Tücken**
|
||||
* **Problem:** Der Frontend-Build schlug fehl, weil die `tsconfig.json` im Container nicht gefunden wurde.
|
||||
* **Ursache:** `COPY . .` verhält sich in komplexen Verzeichnisstrukturen manchmal unvorhersehbar.
|
||||
* **Lösung:** Explizites Kopieren aller Konfigurationsdateien im Dockerfile: `COPY tsconfig.json tsconfig.node.json tsconfig.app.json vite.config.ts ./` direkt vor dem Build-Schritt.
|
||||
* **Lesson:** Verlasse dich beim Build nicht auf Wildcards; kopiere essenzielle Config-Files explizit.
|
||||
|
||||
|
||||
|
||||
29
check_notion_task.py
Normal file
29
check_notion_task.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
token = os.environ.get('NOTION_API_KEY')
|
||||
db_name = "Tasks [UT]"
|
||||
|
||||
def find_database_id(token, title):
|
||||
url = "https://api.notion.com/v1/search"
|
||||
headers = {"Authorization": f"Bearer {token}", "Notion-Version": "2022-06-28"}
|
||||
payload = {"query": title, "filter": {"value": "database", "property": "object"}}
|
||||
res = requests.post(url, headers=headers, json=payload)
|
||||
for db in res.json().get("results", []):
|
||||
if db["title"][0]["plain_text"] == title:
|
||||
return db["id"]
|
||||
return None
|
||||
|
||||
db_id = find_database_id(token, db_name)
|
||||
if db_id:
|
||||
url = f"https://api.notion.com/v1/databases/{db_id}/query"
|
||||
headers = {"Authorization": f"Bearer {token}", "Notion-Version": "2022-06-28"}
|
||||
res = requests.post(url, headers=headers)
|
||||
tasks = res.json().get("results", [])
|
||||
for task in tasks:
|
||||
tid = task["id"]
|
||||
title = task["properties"]["Name"]["title"][0]["plain_text"]
|
||||
print(f"Task: {title} | ID: {tid}")
|
||||
@@ -66,47 +66,47 @@ class JobQueue:
|
||||
conn.commit()
|
||||
logger.debug(f"Job added: {event_type} for entity {entity_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to add job: {e}", exc_info=True)
|
||||
conn.rollback()
|
||||
raise
|
||||
logger.error(f"❌ Failed to add job: {e}", exc_info=True)
|
||||
conn.rollback()
|
||||
raise
|
||||
|
||||
def is_duplicate_pending(self, event_type: str, payload: dict) -> bool:
|
||||
"""
|
||||
Checks if a job with the same event_type and entity_id is already PENDING.
|
||||
This is to prevent adding duplicate jobs from webhooks that are sent multiple times.
|
||||
"""
|
||||
# We only want to de-duplicate creation events, as change events can be validly stacked.
|
||||
if "created" not in event_type.lower():
|
||||
return False
|
||||
def is_duplicate_pending(self, event_type: str, payload: dict) -> bool:
|
||||
"""
|
||||
Checks if a job with the same event_type and entity_id is already PENDING.
|
||||
This is to prevent adding duplicate jobs from webhooks that are sent multiple times.
|
||||
"""
|
||||
# We only want to de-duplicate creation events, as change events can be validly stacked.
|
||||
if "created" not in event_type.lower():
|
||||
return False
|
||||
|
||||
entity_id = payload.get("PrimaryKey")
|
||||
if not entity_id:
|
||||
return False # Cannot de-duplicate without a key
|
||||
|
||||
five_minutes_ago = datetime.utcnow() - timedelta(minutes=5)
|
||||
with sqlite3.connect(DB_PATH, timeout=30) as conn:
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT id FROM jobs
|
||||
WHERE event_type = ?
|
||||
AND entity_id = ?
|
||||
AND status = 'PENDING'
|
||||
AND created_at >= ?
|
||||
""", (event_type, entity_id, five_minutes_ago))
|
||||
|
||||
entity_id = payload.get("PrimaryKey")
|
||||
if not entity_id:
|
||||
return False # Cannot de-duplicate without a key
|
||||
|
||||
five_minutes_ago = datetime.utcnow() - timedelta(minutes=5)
|
||||
with sqlite3.connect(DB_PATH, timeout=30) as conn:
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT id FROM jobs
|
||||
WHERE event_type = ?
|
||||
AND entity_id = ?
|
||||
AND status = 'PENDING'
|
||||
AND created_at >= ?
|
||||
""", (event_type, entity_id, five_minutes_ago))
|
||||
|
||||
if existing_job := cursor.fetchone():
|
||||
logger.warning(f"Found PENDING duplicate of job {existing_job[0]} for event '{event_type}' and entity '{entity_id}'. Ignoring new webhook.")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to check for PENDING duplicate jobs for entity '{entity_id}': {e}", exc_info=True)
|
||||
return False # Fail safe: better to have a duplicate than to miss an event.
|
||||
|
||||
def update_entity_name(self, job_id, name, associate_name=None):
|
||||
with sqlite3.connect(DB_PATH, timeout=30) as conn:
|
||||
try:
|
||||
if associate_name:
|
||||
if existing_job := cursor.fetchone():
|
||||
logger.warning(f"Found PENDING duplicate of job {existing_job[0]} for event '{event_type}' and entity '{entity_id}'. Ignoring new webhook.")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to check for PENDING duplicate jobs for entity '{entity_id}': {e}", exc_info=True)
|
||||
return False # Fail safe: better to have a duplicate than to miss an event.
|
||||
|
||||
def update_entity_name(self, job_id, name, associate_name=None):
|
||||
with sqlite3.connect(DB_PATH, timeout=30) as conn:
|
||||
try:
|
||||
if associate_name:
|
||||
conn.execute(
|
||||
"UPDATE jobs SET entity_name = ?, associate_name = ?, updated_at = datetime('now') WHERE id = ?",
|
||||
(str(name), str(associate_name), job_id)
|
||||
|
||||
Reference in New Issue
Block a user