Files
leopost-full/backend/app/services/llm.py
Michele 2c16407f96 feat: pannello Settings con 4 sezioni flessibili + provider custom
- Redesign Settings: Testi, Immagini, Video, Voiceover — sezioni separate
- Ogni sezione ha dropdown provider + API key + campo opzionale modello
- Opzione "Personalizzato" con campo Base URL libero per qualsiasi servizio
- LLM: aggiunto OpenRouter + provider custom OpenAI-compatible
- Backend: OpenAICompatibleProvider unifica OpenAI/OpenRouter/custom
- Router content: passa llm_base_url a get_llm_provider

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-31 18:34:24 +02:00

224 lines
7.2 KiB
Python

"""
Multi-LLM abstraction layer.
Supports Claude (Anthropic), OpenAI, Gemini, OpenRouter, and any
OpenAI-compatible custom endpoint via direct HTTP calls using httpx.
"""
from abc import ABC, abstractmethod
import httpx
# Default models per provider
DEFAULT_MODELS = {
"claude": "claude-sonnet-4-20250514",
"openai": "gpt-4o-mini",
"gemini": "gemini-2.0-flash",
"openrouter": "openai/gpt-4o-mini",
"custom": "",
}
TIMEOUT = 60.0
class LLMProvider(ABC):
"""Abstract base class for LLM providers."""
def __init__(self, api_key: str, model: str | None = None):
self.api_key = api_key
self.model = model
@abstractmethod
def generate(self, prompt: str, system: str = "") -> str:
"""Generate text from a prompt."""
...
class ClaudeProvider(LLMProvider):
"""Anthropic Claude provider via Messages API."""
API_URL = "https://api.anthropic.com/v1/messages"
def __init__(self, api_key: str, model: str | None = None):
super().__init__(api_key, model or DEFAULT_MODELS["claude"])
def generate(self, prompt: str, system: str = "") -> str:
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json",
}
payload: dict = {
"model": self.model,
"max_tokens": 2048,
"messages": [{"role": "user", "content": prompt}],
}
if system:
payload["system"] = system
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(self.API_URL, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
content_blocks = data.get("content", [])
return "".join(
block.get("text", "") for block in content_blocks if block.get("type") == "text"
)
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"Claude API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"Claude API request failed: {e}") from e
class OpenAICompatibleProvider(LLMProvider):
"""OpenAI Chat Completions-compatible provider.
Used for OpenAI, OpenRouter, and any custom OpenAI-compatible endpoint.
Set base_url to point to any compatible API.
"""
DEFAULT_BASE_URL = "https://api.openai.com/v1"
def __init__(
self,
api_key: str,
model: str | None = None,
base_url: str | None = None,
default_model: str = "gpt-4o-mini",
):
super().__init__(api_key, model or default_model)
self.base_url = (base_url or self.DEFAULT_BASE_URL).rstrip("/")
def generate(self, prompt: str, system: str = "") -> str:
url = f"{self.base_url}/chat/completions"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
messages: list[dict] = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": prompt})
payload = {
"model": self.model,
"messages": messages,
"max_tokens": 2048,
}
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"API request failed: {e}") from e
class GeminiProvider(LLMProvider):
"""Google Gemini provider via Generative Language API."""
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
def __init__(self, api_key: str, model: str | None = None):
super().__init__(api_key, model or DEFAULT_MODELS["gemini"])
def generate(self, prompt: str, system: str = "") -> str:
url = f"{self.API_BASE}/{self.model}:generateContent"
params = {"key": self.api_key}
headers = {"Content-Type": "application/json"}
parts: list[dict] = []
if system:
parts.append({"text": f"{system}\n\n{prompt}"})
else:
parts.append({"text": prompt})
payload = {
"contents": [{"parts": parts}],
"generationConfig": {"maxOutputTokens": 2048},
}
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(url, params=params, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
candidates = data.get("candidates", [])
if not candidates:
return ""
content = candidates[0].get("content", {})
parts_out = content.get("parts", [])
return "".join(part.get("text", "") for part in parts_out)
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"Gemini API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"Gemini API request failed: {e}") from e
def get_llm_provider(
provider_name: str,
api_key: str,
model: str | None = None,
base_url: str | None = None,
) -> LLMProvider:
"""Factory: returns an LLMProvider instance.
Args:
provider_name: 'claude', 'openai', 'gemini', 'openrouter', or 'custom'.
api_key: API key for the provider.
model: Optional model override.
base_url: Optional base URL override (used for 'openrouter' and 'custom').
Returns:
An LLMProvider instance ready to generate text.
"""
name = provider_name.lower()
if name == "claude":
return ClaudeProvider(api_key=api_key, model=model)
if name == "openai":
return OpenAICompatibleProvider(
api_key=api_key,
model=model,
base_url="https://api.openai.com/v1",
default_model=DEFAULT_MODELS["openai"],
)
if name == "gemini":
return GeminiProvider(api_key=api_key, model=model)
if name == "openrouter":
return OpenAICompatibleProvider(
api_key=api_key,
model=model,
base_url=base_url or "https://openrouter.ai/api/v1",
default_model=DEFAULT_MODELS["openrouter"],
)
if name == "custom":
if not base_url:
raise ValueError("Provider 'custom' richiede un base_url configurato nelle impostazioni.")
return OpenAICompatibleProvider(
api_key=api_key,
model=model,
base_url=base_url,
default_model=model or "",
)
raise ValueError(
f"Provider LLM '{provider_name}' non supportato. "
f"Usa: claude, openai, gemini, openrouter, custom."
)