Compare commits

..

3 Commits

Author SHA1 Message Date
Michele
5870b5eede fix: strip markdown code fences from LLM JSON responses
Claude wraps JSON in ```json ... ``` fences even when instructed to
return raw JSON. This caused all TopicResult validations to fail with
"Invalid JSON at line 1 column 1". Strip fences before parsing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 15:10:39 +01:00
Michele
5c06b1a342 fix: add trailing slashes to settings and prompts API calls
Without trailing slash, FastAPI's SPAStaticFiles catch-all intercepts
/api/settings and /api/prompts before the API router, returning HTML
instead of JSON (405 error in UI).

Affected endpoints:
- GET /api/settings → /api/settings/
- PUT /api/settings → /api/settings/
- GET /api/prompts → /api/prompts/

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 12:54:46 +01:00
Michele
36a7e0281d chore: mark project as deployed on VPS
URL: https://lab.mlhub.it/postgenerator/
Container: lab-postgenerator-app (1024M RAM, 1.0 CPU)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 12:07:50 +01:00
3 changed files with 31 additions and 7 deletions

View File

@@ -8,13 +8,21 @@
"clone_url": "https://git.mlhub.it/Michele/postgenerator.git"
},
"vps": {
"deployed": false,
"url": null,
"deployed": true,
"url": "https://lab.mlhub.it/postgenerator/",
"last_deploy": "2026-03-09T11:06:00Z",
"container": "lab-postgenerator-app",
"path": "/opt/lab-postgenerator/"
},
"supabase": {
"enabled": false,
"project_ref": null
},
"resources": {
"limits": {
"ram_mb": 1024,
"cpu_percent": 100
},
"deployed_at": "2026-03-09T11:06:00Z"
}
}

View File

@@ -12,6 +12,7 @@ from __future__ import annotations
import json
import logging
import random
import re
import time
from typing import Type, TypeVar
@@ -111,9 +112,10 @@ class LLMService:
elapsed,
)
# Valida con Pydantic
# Rimuovi eventuali code fences markdown e valida con Pydantic
clean_text = self._strip_code_fences(raw_text)
try:
result = response_schema.model_validate_json(raw_text)
result = response_schema.model_validate_json(clean_text)
# Pausa inter-request dopo chiamata riuscita
time.sleep(self._inter_request_delay)
return result
@@ -259,6 +261,20 @@ class LLMService:
# Metodi privati
# ---------------------------------------------------------------------------
@staticmethod
def _strip_code_fences(text: str) -> str:
"""Rimuove i code fences markdown dalla risposta LLM.
Claude a volte wrappa il JSON in ```json ... ``` anche quando
gli si chiede di rispondere solo con JSON.
"""
stripped = text.strip()
# Rimuove ```json ... ``` o ``` ... ```
match = re.match(r"^```(?:json)?\s*\n?(.*?)\n?\s*```$", stripped, re.DOTALL)
if match:
return match.group(1).strip()
return stripped
@staticmethod
def _parse_retry_after(error: anthropic.RateLimitError) -> float:
"""Estrae il valore retry-after dall'eccezione RateLimitError.

View File

@@ -37,7 +37,7 @@ import type {
export function useSettings() {
return useQuery<Settings>({
queryKey: ['settings'],
queryFn: () => apiGet<Settings>('/settings'),
queryFn: () => apiGet<Settings>('/settings/'),
staleTime: 60_000,
})
}
@@ -56,7 +56,7 @@ export function useSettingsStatus() {
export function useUpdateSettings() {
const queryClient = useQueryClient()
return useMutation<Settings, Error, Partial<Settings>>({
mutationFn: (settings) => apiPut<Settings>('/settings', settings),
mutationFn: (settings) => apiPut<Settings>('/settings/', settings),
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: ['settings'] })
},
@@ -199,7 +199,7 @@ export function useDownloadEditedCsv() {
export function usePromptList() {
return useQuery<PromptListResponse>({
queryKey: ['prompts'],
queryFn: () => apiGet<PromptListResponse>('/prompts'),
queryFn: () => apiGet<PromptListResponse>('/prompts/'),
staleTime: 30_000,
})
}