diff --git a/backend/app/routers/content.py b/backend/app/routers/content.py index fcba744..3a26ae4 100644 --- a/backend/app/routers/content.py +++ b/backend/app/routers/content.py @@ -63,7 +63,8 @@ def generate_content(request: GenerateContentRequest, db: Session = Depends(get_ } # Create LLM provider and generate text - llm = get_llm_provider(provider_name, api_key, model) + base_url = _get_setting(db, "llm_base_url") + llm = get_llm_provider(provider_name, api_key, model, base_url=base_url) text = generate_post_text( character=char_dict, llm_provider=llm, diff --git a/backend/app/services/llm.py b/backend/app/services/llm.py index 5c050b9..a7b817f 100644 --- a/backend/app/services/llm.py +++ b/backend/app/services/llm.py @@ -1,8 +1,8 @@ """ Multi-LLM abstraction layer. -Supports Claude (Anthropic), OpenAI, and Gemini via direct HTTP calls using httpx. -Each provider implements the same interface for text generation. +Supports Claude (Anthropic), OpenAI, Gemini, OpenRouter, and any +OpenAI-compatible custom endpoint via direct HTTP calls using httpx. """ from abc import ABC, abstractmethod @@ -14,6 +14,8 @@ DEFAULT_MODELS = { "claude": "claude-sonnet-4-20250514", "openai": "gpt-4o-mini", "gemini": "gemini-2.0-flash", + "openrouter": "openai/gpt-4o-mini", + "custom": "", } TIMEOUT = 60.0 @@ -28,15 +30,7 @@ class LLMProvider(ABC): @abstractmethod def generate(self, prompt: str, system: str = "") -> str: - """Generate text from a prompt. - - Args: - prompt: The user prompt / message. - system: Optional system prompt for context and behavior. - - Returns: - Generated text string. - """ + """Generate text from a prompt.""" ... @@ -67,7 +61,6 @@ class ClaudeProvider(LLMProvider): response = client.post(self.API_URL, headers=headers, json=payload) response.raise_for_status() data = response.json() - # Claude returns content as a list of content blocks content_blocks = data.get("content", []) return "".join( block.get("text", "") for block in content_blocks if block.get("type") == "text" @@ -80,15 +73,27 @@ class ClaudeProvider(LLMProvider): raise RuntimeError(f"Claude API request failed: {e}") from e -class OpenAIProvider(LLMProvider): - """OpenAI provider via Chat Completions API.""" +class OpenAICompatibleProvider(LLMProvider): + """OpenAI Chat Completions-compatible provider. - API_URL = "https://api.openai.com/v1/chat/completions" + Used for OpenAI, OpenRouter, and any custom OpenAI-compatible endpoint. + Set base_url to point to any compatible API. + """ - def __init__(self, api_key: str, model: str | None = None): - super().__init__(api_key, model or DEFAULT_MODELS["openai"]) + DEFAULT_BASE_URL = "https://api.openai.com/v1" + + def __init__( + self, + api_key: str, + model: str | None = None, + base_url: str | None = None, + default_model: str = "gpt-4o-mini", + ): + super().__init__(api_key, model or default_model) + self.base_url = (base_url or self.DEFAULT_BASE_URL).rstrip("/") def generate(self, prompt: str, system: str = "") -> str: + url = f"{self.base_url}/chat/completions" headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", @@ -106,16 +111,16 @@ class OpenAIProvider(LLMProvider): try: with httpx.Client(timeout=TIMEOUT) as client: - response = client.post(self.API_URL, headers=headers, json=payload) + response = client.post(url, headers=headers, json=payload) response.raise_for_status() data = response.json() return data["choices"][0]["message"]["content"] except httpx.HTTPStatusError as e: raise RuntimeError( - f"OpenAI API error {e.response.status_code}: {e.response.text}" + f"API error {e.response.status_code}: {e.response.text}" ) from e except httpx.RequestError as e: - raise RuntimeError(f"OpenAI API request failed: {e}") from e + raise RuntimeError(f"API request failed: {e}") from e class GeminiProvider(LLMProvider): @@ -131,7 +136,6 @@ class GeminiProvider(LLMProvider): params = {"key": self.api_key} headers = {"Content-Type": "application/json"} - # Build contents; Gemini uses a parts-based structure parts: list[dict] = [] if system: parts.append({"text": f"{system}\n\n{prompt}"}) @@ -140,9 +144,7 @@ class GeminiProvider(LLMProvider): payload = { "contents": [{"parts": parts}], - "generationConfig": { - "maxOutputTokens": 2048, - }, + "generationConfig": {"maxOutputTokens": 2048}, } try: @@ -165,30 +167,57 @@ class GeminiProvider(LLMProvider): def get_llm_provider( - provider_name: str, api_key: str, model: str | None = None + provider_name: str, + api_key: str, + model: str | None = None, + base_url: str | None = None, ) -> LLMProvider: - """Factory function to get an LLM provider instance. + """Factory: returns an LLMProvider instance. Args: - provider_name: One of 'claude', 'openai', 'gemini'. + provider_name: 'claude', 'openai', 'gemini', 'openrouter', or 'custom'. api_key: API key for the provider. - model: Optional model override. Uses default if not specified. + model: Optional model override. + base_url: Optional base URL override (used for 'openrouter' and 'custom'). Returns: - An LLMProvider instance. - - Raises: - ValueError: If provider_name is not supported. + An LLMProvider instance ready to generate text. """ - providers = { - "claude": ClaudeProvider, - "openai": OpenAIProvider, - "gemini": GeminiProvider, - } - provider_cls = providers.get(provider_name.lower()) - if provider_cls is None: - supported = ", ".join(providers.keys()) - raise ValueError( - f"Unknown LLM provider '{provider_name}'. Supported: {supported}" + name = provider_name.lower() + + if name == "claude": + return ClaudeProvider(api_key=api_key, model=model) + + if name == "openai": + return OpenAICompatibleProvider( + api_key=api_key, + model=model, + base_url="https://api.openai.com/v1", + default_model=DEFAULT_MODELS["openai"], ) - return provider_cls(api_key=api_key, model=model) + + if name == "gemini": + return GeminiProvider(api_key=api_key, model=model) + + if name == "openrouter": + return OpenAICompatibleProvider( + api_key=api_key, + model=model, + base_url=base_url or "https://openrouter.ai/api/v1", + default_model=DEFAULT_MODELS["openrouter"], + ) + + if name == "custom": + if not base_url: + raise ValueError("Provider 'custom' richiede un base_url configurato nelle impostazioni.") + return OpenAICompatibleProvider( + api_key=api_key, + model=model, + base_url=base_url, + default_model=model or "", + ) + + raise ValueError( + f"Provider LLM '{provider_name}' non supportato. " + f"Usa: claude, openai, gemini, openrouter, custom." + ) diff --git a/frontend/src/components/SettingsPage.jsx b/frontend/src/components/SettingsPage.jsx index 3c89798..7830eea 100644 --- a/frontend/src/components/SettingsPage.jsx +++ b/frontend/src/components/SettingsPage.jsx @@ -1,31 +1,46 @@ import { useState, useEffect } from 'react' import { api } from '../api' -const LLM_PROVIDERS = [ - { value: 'claude', label: 'Claude (Anthropic)' }, - { value: 'openai', label: 'OpenAI' }, - { value: 'gemini', label: 'Gemini (Google)' }, +// ─── Provider catalogs ──────────────────────────────────────────────────────── + +const TEXT_PROVIDERS = [ + { value: 'claude', label: 'Claude (Anthropic)', defaultModel: 'claude-sonnet-4-20250514', needsBaseUrl: false }, + { value: 'openai', label: 'OpenAI', defaultModel: 'gpt-4o-mini', needsBaseUrl: false }, + { value: 'gemini', label: 'Gemini (Google)', defaultModel: 'gemini-2.0-flash', needsBaseUrl: false }, + { value: 'openrouter', label: 'OpenRouter', defaultModel: 'openai/gpt-4o-mini', needsBaseUrl: false }, + { value: 'custom', label: 'Personalizzato (custom)', defaultModel: '', needsBaseUrl: true }, ] const IMAGE_PROVIDERS = [ - { value: 'dalle', label: 'DALL-E (OpenAI)' }, - { value: 'replicate', label: 'Replicate' }, + { value: 'dalle', label: 'DALL-E (OpenAI)', needsBaseUrl: false }, + { value: 'replicate', label: 'Replicate', needsBaseUrl: false }, + { value: 'wavespeed', label: 'WaveSpeed', needsBaseUrl: false }, + { value: 'custom', label: 'Personalizzato (custom)', needsBaseUrl: true }, ] -const LLM_DEFAULTS = { - claude: 'claude-sonnet-4-20250514', - openai: 'gpt-4o', - gemini: 'gemini-2.0-flash', -} +const VIDEO_PROVIDERS = [ + { value: 'wavespeed', label: 'WaveSpeed', needsBaseUrl: false }, + { value: 'replicate', label: 'Replicate', needsBaseUrl: false }, + { value: 'custom', label: 'Personalizzato (custom)', needsBaseUrl: true }, +] -const cardStyle = { +const VOICE_PROVIDERS = [ + { value: 'elevenlabs', label: 'ElevenLabs', needsBaseUrl: false }, + { value: 'openai_tts', label: 'OpenAI TTS', needsBaseUrl: false }, + { value: 'wavespeed', label: 'WaveSpeed', needsBaseUrl: false }, + { value: 'custom', label: 'Personalizzato (custom)', needsBaseUrl: true }, +] + +// ─── Styles ─────────────────────────────────────────────────────────────────── + +const card = { backgroundColor: 'var(--surface)', border: '1px solid var(--border)', borderRadius: '0.75rem', padding: '1.5rem', } -const inputStyle = { +const input = { width: '100%', padding: '0.625rem 1rem', border: '1px solid var(--border)', @@ -34,98 +49,206 @@ const inputStyle = { color: 'var(--ink)', backgroundColor: 'var(--cream)', outline: 'none', + boxSizing: 'border-box', } -export default function SettingsPage() { - const [settings, setSettings] = useState({}) - const [providerStatus, setProviderStatus] = useState({}) - const [loading, setLoading] = useState(true) - const [sectionSaving, setSectionSaving] = useState({}) - const [sectionSuccess, setSectionSuccess] = useState({}) - const [sectionError, setSectionError] = useState({}) +// ─── Section component ──────────────────────────────────────────────────────── - const [llmForm, setLlmForm] = useState({ +function ProviderSection({ title, icon, description, providers, settingKeys, values, onChange, onSave, saving, success, error }) { + const { providerKey, apiKeyKey, modelKey, baseUrlKey, extraKey, extraLabel, extraPlaceholder } = settingKeys + const currentProvider = providers.find(p => p.value === values[providerKey]) || providers[0] + const showModel = modelKey != null + const showBaseUrl = currentProvider.needsBaseUrl && baseUrlKey + + return ( +
+
+ {icon} +
+

+ {title} +

+

{description}

+
+
+ + {error && ( +
+ {error} +
+ )} + {success && ( +
+ Salvato con successo +
+ )} + + {/* Provider dropdown */} +
+ + +
+ + {/* Custom base URL (shown only for 'custom' provider) */} + {showBaseUrl && ( +
+ + onChange(baseUrlKey, e.target.value)} + placeholder="https://..." + style={{ ...input, fontFamily: 'monospace' }} + /> +
+ )} + + {/* API Key */} +
+ + onChange(apiKeyKey, e.target.value)} + placeholder="Inserisci la tua API key..." + style={{ ...input, fontFamily: 'monospace' }} + /> +
+ + {/* Model (optional, only for text/video providers) */} + {showModel && ( +
+ + onChange(modelKey, e.target.value)} + placeholder={currentProvider.defaultModel || 'nome-modello'} + style={{ ...input, fontFamily: 'monospace' }} + /> +
+ )} + + {/* Extra field (es. Voice ID per ElevenLabs) */} + {extraKey && ( +
+ + onChange(extraKey, e.target.value)} + placeholder={extraPlaceholder} + style={input} + /> +
+ )} + + +
+ ) +} + +// ─── Main component ─────────────────────────────────────────────────────────── + +export default function SettingsPage() { + const [values, setValues] = useState({ + // Text llm_provider: 'claude', llm_api_key: '', llm_model: '', - }) - const [imageForm, setImageForm] = useState({ + llm_base_url: '', + // Image image_provider: 'dalle', image_api_key: '', - }) - const [voiceForm, setVoiceForm] = useState({ - elevenlabs_api_key: '', + image_base_url: '', + // Video + video_provider: 'wavespeed', + video_api_key: '', + video_model: '', + video_base_url: '', + // Voice + voice_provider: 'elevenlabs', + voice_api_key: '', + voice_base_url: '', elevenlabs_voice_id: '', }) - useEffect(() => { - loadSettings() - }, []) + const [loading, setLoading] = useState(true) + const [saving, setSaving] = useState({}) + const [success, setSuccess] = useState({}) + const [errors, setErrors] = useState({}) + + useEffect(() => { loadSettings() }, []) const loadSettings = async () => { setLoading(true) try { - const [settingsData, statusData] = await Promise.all([ - api.get('/settings/').catch(() => ({})), - api.get('/settings/providers/status').catch(() => ({})), - ]) - - let normalizedSettings = {} - if (Array.isArray(settingsData)) { - settingsData.forEach((s) => { normalizedSettings[s.key] = s.value }) - } else { - normalizedSettings = settingsData || {} + const data = await api.get('/settings/').catch(() => []) + const normalized = {} + if (Array.isArray(data)) { + data.forEach(s => { normalized[s.key] = s.value }) } - - setSettings(normalizedSettings) - setProviderStatus(statusData || {}) - - setLlmForm({ - llm_provider: normalizedSettings.llm_provider || 'claude', - llm_api_key: normalizedSettings.llm_api_key || '', - llm_model: normalizedSettings.llm_model || '', - }) - setImageForm({ - image_provider: normalizedSettings.image_provider || 'dalle', - image_api_key: normalizedSettings.image_api_key || '', - }) - setVoiceForm({ - elevenlabs_api_key: normalizedSettings.elevenlabs_api_key || '', - elevenlabs_voice_id: normalizedSettings.elevenlabs_voice_id || '', - }) - } catch { - // silent + setValues(prev => ({ ...prev, ...normalized })) } finally { setLoading(false) } } - const saveSection = async (section, data) => { - setSectionSaving((prev) => ({ ...prev, [section]: true })) - setSectionSuccess((prev) => ({ ...prev, [section]: false })) - setSectionError((prev) => ({ ...prev, [section]: '' })) + const handleChange = (key, value) => { + setValues(prev => ({ ...prev, [key]: value })) + } + const saveSection = async (section, keys) => { + setSaving(prev => ({ ...prev, [section]: true })) + setSuccess(prev => ({ ...prev, [section]: false })) + setErrors(prev => ({ ...prev, [section]: '' })) try { - for (const [key, value] of Object.entries(data)) { - await api.put(`/settings/${key}`, { value }) + for (const key of keys) { + if (values[key] !== undefined) { + await api.put(`/settings/${key}`, { value: values[key] }) + } } - setSectionSuccess((prev) => ({ ...prev, [section]: true })) - setTimeout(() => { - setSectionSuccess((prev) => ({ ...prev, [section]: false })) - }, 3000) - const statusData = await api.get('/settings/providers/status').catch(() => ({})) - setProviderStatus(statusData || {}) + setSuccess(prev => ({ ...prev, [section]: true })) + setTimeout(() => setSuccess(prev => ({ ...prev, [section]: false })), 3000) } catch (err) { - setSectionError((prev) => ({ ...prev, [section]: err.message || 'Errore nel salvataggio' })) + setErrors(prev => ({ ...prev, [section]: err.message || 'Errore nel salvataggio' })) } finally { - setSectionSaving((prev) => ({ ...prev, [section]: false })) + setSaving(prev => ({ ...prev, [section]: false })) } } if (loading) { return (
-

Impostazioni

+

+ Impostazioni +

@@ -135,152 +258,98 @@ export default function SettingsPage() { return (
-
-

Impostazioni

+
+

+ Impostazioni +

- Configurazione dei provider AI e dei servizi esterni + Scegli il provider per ogni tipo di output. Usa "Personalizzato" per collegare qualsiasi servizio compatibile.

- {/* LLM Provider */} -
-

- Provider LLM -

- {sectionError.llm &&
{sectionError.llm}
} - {sectionSuccess.llm &&
Salvato con successo
} -
- - -
+ {/* TEXT */} + saveSection('text', ['llm_provider', 'llm_api_key', 'llm_model', 'llm_base_url'])} + saving={saving.text} + success={success.text} + error={errors.text} + /> -
- - setLlmForm((prev) => ({ ...prev, llm_api_key: e.target.value }))} - placeholder="sk-..." - style={{ ...inputStyle, fontFamily: 'monospace' }} - /> -
+ {/* IMAGE */} + saveSection('image', ['image_provider', 'image_api_key', 'image_base_url'])} + saving={saving.image} + success={success.image} + error={errors.image} + /> -
- - setLlmForm((prev) => ({ ...prev, llm_model: e.target.value }))} - placeholder={LLM_DEFAULTS[llmForm.llm_provider]} - style={{ ...inputStyle, fontFamily: 'monospace' }} - /> -
+ {/* VIDEO */} + saveSection('video', ['video_provider', 'video_api_key', 'video_model', 'video_base_url'])} + saving={saving.video} + success={success.video} + error={errors.video} + /> - -
+ {/* VOICE */} + saveSection('voice', ['voice_provider', 'voice_api_key', 'voice_base_url', 'elevenlabs_voice_id'])} + saving={saving.voice} + success={success.voice} + error={errors.voice} + /> - {/* Image Provider */} -
-

- Generazione Immagini -

- {sectionError.image &&
{sectionError.image}
} - {sectionSuccess.image &&
Salvato con successo
} - -
- - -
- -
- - setImageForm((prev) => ({ ...prev, image_api_key: e.target.value }))} - placeholder="API key del provider immagini" - style={{ ...inputStyle, fontFamily: 'monospace' }} - /> -
- - -
- - {/* Voiceover */} -
-

- Voiceover (ElevenLabs) -

- {sectionError.voice &&
{sectionError.voice}
} - {sectionSuccess.voice &&
Salvato con successo
} - -
- - setVoiceForm((prev) => ({ ...prev, elevenlabs_api_key: e.target.value }))} - placeholder="ElevenLabs API key" - style={{ ...inputStyle, fontFamily: 'monospace' }} - /> -
- -
- - setVoiceForm((prev) => ({ ...prev, elevenlabs_voice_id: e.target.value }))} - placeholder="ID della voce ElevenLabs" - style={inputStyle} - /> -
- - -
)