feat: pannello Settings con 4 sezioni flessibili + provider custom
- Redesign Settings: Testi, Immagini, Video, Voiceover — sezioni separate - Ogni sezione ha dropdown provider + API key + campo opzionale modello - Opzione "Personalizzato" con campo Base URL libero per qualsiasi servizio - LLM: aggiunto OpenRouter + provider custom OpenAI-compatible - Backend: OpenAICompatibleProvider unifica OpenAI/OpenRouter/custom - Router content: passa llm_base_url a get_llm_provider Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -63,7 +63,8 @@ def generate_content(request: GenerateContentRequest, db: Session = Depends(get_
|
||||
}
|
||||
|
||||
# Create LLM provider and generate text
|
||||
llm = get_llm_provider(provider_name, api_key, model)
|
||||
base_url = _get_setting(db, "llm_base_url")
|
||||
llm = get_llm_provider(provider_name, api_key, model, base_url=base_url)
|
||||
text = generate_post_text(
|
||||
character=char_dict,
|
||||
llm_provider=llm,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""
|
||||
Multi-LLM abstraction layer.
|
||||
|
||||
Supports Claude (Anthropic), OpenAI, and Gemini via direct HTTP calls using httpx.
|
||||
Each provider implements the same interface for text generation.
|
||||
Supports Claude (Anthropic), OpenAI, Gemini, OpenRouter, and any
|
||||
OpenAI-compatible custom endpoint via direct HTTP calls using httpx.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
@@ -14,6 +14,8 @@ DEFAULT_MODELS = {
|
||||
"claude": "claude-sonnet-4-20250514",
|
||||
"openai": "gpt-4o-mini",
|
||||
"gemini": "gemini-2.0-flash",
|
||||
"openrouter": "openai/gpt-4o-mini",
|
||||
"custom": "",
|
||||
}
|
||||
|
||||
TIMEOUT = 60.0
|
||||
@@ -28,15 +30,7 @@ class LLMProvider(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, prompt: str, system: str = "") -> str:
|
||||
"""Generate text from a prompt.
|
||||
|
||||
Args:
|
||||
prompt: The user prompt / message.
|
||||
system: Optional system prompt for context and behavior.
|
||||
|
||||
Returns:
|
||||
Generated text string.
|
||||
"""
|
||||
"""Generate text from a prompt."""
|
||||
...
|
||||
|
||||
|
||||
@@ -67,7 +61,6 @@ class ClaudeProvider(LLMProvider):
|
||||
response = client.post(self.API_URL, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
# Claude returns content as a list of content blocks
|
||||
content_blocks = data.get("content", [])
|
||||
return "".join(
|
||||
block.get("text", "") for block in content_blocks if block.get("type") == "text"
|
||||
@@ -80,15 +73,27 @@ class ClaudeProvider(LLMProvider):
|
||||
raise RuntimeError(f"Claude API request failed: {e}") from e
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
"""OpenAI provider via Chat Completions API."""
|
||||
class OpenAICompatibleProvider(LLMProvider):
|
||||
"""OpenAI Chat Completions-compatible provider.
|
||||
|
||||
API_URL = "https://api.openai.com/v1/chat/completions"
|
||||
Used for OpenAI, OpenRouter, and any custom OpenAI-compatible endpoint.
|
||||
Set base_url to point to any compatible API.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key: str, model: str | None = None):
|
||||
super().__init__(api_key, model or DEFAULT_MODELS["openai"])
|
||||
DEFAULT_BASE_URL = "https://api.openai.com/v1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str,
|
||||
model: str | None = None,
|
||||
base_url: str | None = None,
|
||||
default_model: str = "gpt-4o-mini",
|
||||
):
|
||||
super().__init__(api_key, model or default_model)
|
||||
self.base_url = (base_url or self.DEFAULT_BASE_URL).rstrip("/")
|
||||
|
||||
def generate(self, prompt: str, system: str = "") -> str:
|
||||
url = f"{self.base_url}/chat/completions"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
@@ -106,16 +111,16 @@ class OpenAIProvider(LLMProvider):
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=TIMEOUT) as client:
|
||||
response = client.post(self.API_URL, headers=headers, json=payload)
|
||||
response = client.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data["choices"][0]["message"]["content"]
|
||||
except httpx.HTTPStatusError as e:
|
||||
raise RuntimeError(
|
||||
f"OpenAI API error {e.response.status_code}: {e.response.text}"
|
||||
f"API error {e.response.status_code}: {e.response.text}"
|
||||
) from e
|
||||
except httpx.RequestError as e:
|
||||
raise RuntimeError(f"OpenAI API request failed: {e}") from e
|
||||
raise RuntimeError(f"API request failed: {e}") from e
|
||||
|
||||
|
||||
class GeminiProvider(LLMProvider):
|
||||
@@ -131,7 +136,6 @@ class GeminiProvider(LLMProvider):
|
||||
params = {"key": self.api_key}
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
# Build contents; Gemini uses a parts-based structure
|
||||
parts: list[dict] = []
|
||||
if system:
|
||||
parts.append({"text": f"{system}\n\n{prompt}"})
|
||||
@@ -140,9 +144,7 @@ class GeminiProvider(LLMProvider):
|
||||
|
||||
payload = {
|
||||
"contents": [{"parts": parts}],
|
||||
"generationConfig": {
|
||||
"maxOutputTokens": 2048,
|
||||
},
|
||||
"generationConfig": {"maxOutputTokens": 2048},
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -165,30 +167,57 @@ class GeminiProvider(LLMProvider):
|
||||
|
||||
|
||||
def get_llm_provider(
|
||||
provider_name: str, api_key: str, model: str | None = None
|
||||
provider_name: str,
|
||||
api_key: str,
|
||||
model: str | None = None,
|
||||
base_url: str | None = None,
|
||||
) -> LLMProvider:
|
||||
"""Factory function to get an LLM provider instance.
|
||||
"""Factory: returns an LLMProvider instance.
|
||||
|
||||
Args:
|
||||
provider_name: One of 'claude', 'openai', 'gemini'.
|
||||
provider_name: 'claude', 'openai', 'gemini', 'openrouter', or 'custom'.
|
||||
api_key: API key for the provider.
|
||||
model: Optional model override. Uses default if not specified.
|
||||
model: Optional model override.
|
||||
base_url: Optional base URL override (used for 'openrouter' and 'custom').
|
||||
|
||||
Returns:
|
||||
An LLMProvider instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If provider_name is not supported.
|
||||
An LLMProvider instance ready to generate text.
|
||||
"""
|
||||
providers = {
|
||||
"claude": ClaudeProvider,
|
||||
"openai": OpenAIProvider,
|
||||
"gemini": GeminiProvider,
|
||||
}
|
||||
provider_cls = providers.get(provider_name.lower())
|
||||
if provider_cls is None:
|
||||
supported = ", ".join(providers.keys())
|
||||
raise ValueError(
|
||||
f"Unknown LLM provider '{provider_name}'. Supported: {supported}"
|
||||
name = provider_name.lower()
|
||||
|
||||
if name == "claude":
|
||||
return ClaudeProvider(api_key=api_key, model=model)
|
||||
|
||||
if name == "openai":
|
||||
return OpenAICompatibleProvider(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url="https://api.openai.com/v1",
|
||||
default_model=DEFAULT_MODELS["openai"],
|
||||
)
|
||||
return provider_cls(api_key=api_key, model=model)
|
||||
|
||||
if name == "gemini":
|
||||
return GeminiProvider(api_key=api_key, model=model)
|
||||
|
||||
if name == "openrouter":
|
||||
return OpenAICompatibleProvider(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url=base_url or "https://openrouter.ai/api/v1",
|
||||
default_model=DEFAULT_MODELS["openrouter"],
|
||||
)
|
||||
|
||||
if name == "custom":
|
||||
if not base_url:
|
||||
raise ValueError("Provider 'custom' richiede un base_url configurato nelle impostazioni.")
|
||||
return OpenAICompatibleProvider(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url=base_url,
|
||||
default_model=model or "",
|
||||
)
|
||||
|
||||
raise ValueError(
|
||||
f"Provider LLM '{provider_name}' non supportato. "
|
||||
f"Usa: claude, openai, gemini, openrouter, custom."
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user