Initial commit: Leopost Full — merge di Leopost, Post Generator e Autopilot OS

- Backend FastAPI con multi-LLM (Claude/OpenAI/Gemini)
- Publishing su Facebook, Instagram, YouTube, TikTok
- Calendario editoriale con awareness levels (PAS, AIDA, BAB...)
- Design system Editorial Fresh (Fraunces + DM Sans)
- Scheduler automatico, gestione commenti AI, affiliate links

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Michele
2026-03-31 17:23:16 +02:00
commit 519a580679
58 changed files with 8348 additions and 0 deletions

194
backend/app/services/llm.py Normal file
View File

@@ -0,0 +1,194 @@
"""
Multi-LLM abstraction layer.
Supports Claude (Anthropic), OpenAI, and Gemini via direct HTTP calls using httpx.
Each provider implements the same interface for text generation.
"""
from abc import ABC, abstractmethod
import httpx
# Default models per provider
DEFAULT_MODELS = {
"claude": "claude-sonnet-4-20250514",
"openai": "gpt-4o-mini",
"gemini": "gemini-2.0-flash",
}
TIMEOUT = 60.0
class LLMProvider(ABC):
"""Abstract base class for LLM providers."""
def __init__(self, api_key: str, model: str | None = None):
self.api_key = api_key
self.model = model
@abstractmethod
def generate(self, prompt: str, system: str = "") -> str:
"""Generate text from a prompt.
Args:
prompt: The user prompt / message.
system: Optional system prompt for context and behavior.
Returns:
Generated text string.
"""
...
class ClaudeProvider(LLMProvider):
"""Anthropic Claude provider via Messages API."""
API_URL = "https://api.anthropic.com/v1/messages"
def __init__(self, api_key: str, model: str | None = None):
super().__init__(api_key, model or DEFAULT_MODELS["claude"])
def generate(self, prompt: str, system: str = "") -> str:
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json",
}
payload: dict = {
"model": self.model,
"max_tokens": 2048,
"messages": [{"role": "user", "content": prompt}],
}
if system:
payload["system"] = system
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(self.API_URL, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Claude returns content as a list of content blocks
content_blocks = data.get("content", [])
return "".join(
block.get("text", "") for block in content_blocks if block.get("type") == "text"
)
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"Claude API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"Claude API request failed: {e}") from e
class OpenAIProvider(LLMProvider):
"""OpenAI provider via Chat Completions API."""
API_URL = "https://api.openai.com/v1/chat/completions"
def __init__(self, api_key: str, model: str | None = None):
super().__init__(api_key, model or DEFAULT_MODELS["openai"])
def generate(self, prompt: str, system: str = "") -> str:
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
messages: list[dict] = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": prompt})
payload = {
"model": self.model,
"messages": messages,
"max_tokens": 2048,
}
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(self.API_URL, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"OpenAI API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"OpenAI API request failed: {e}") from e
class GeminiProvider(LLMProvider):
"""Google Gemini provider via Generative Language API."""
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
def __init__(self, api_key: str, model: str | None = None):
super().__init__(api_key, model or DEFAULT_MODELS["gemini"])
def generate(self, prompt: str, system: str = "") -> str:
url = f"{self.API_BASE}/{self.model}:generateContent"
params = {"key": self.api_key}
headers = {"Content-Type": "application/json"}
# Build contents; Gemini uses a parts-based structure
parts: list[dict] = []
if system:
parts.append({"text": f"{system}\n\n{prompt}"})
else:
parts.append({"text": prompt})
payload = {
"contents": [{"parts": parts}],
"generationConfig": {
"maxOutputTokens": 2048,
},
}
try:
with httpx.Client(timeout=TIMEOUT) as client:
response = client.post(url, params=params, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
candidates = data.get("candidates", [])
if not candidates:
return ""
content = candidates[0].get("content", {})
parts_out = content.get("parts", [])
return "".join(part.get("text", "") for part in parts_out)
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"Gemini API error {e.response.status_code}: {e.response.text}"
) from e
except httpx.RequestError as e:
raise RuntimeError(f"Gemini API request failed: {e}") from e
def get_llm_provider(
provider_name: str, api_key: str, model: str | None = None
) -> LLMProvider:
"""Factory function to get an LLM provider instance.
Args:
provider_name: One of 'claude', 'openai', 'gemini'.
api_key: API key for the provider.
model: Optional model override. Uses default if not specified.
Returns:
An LLMProvider instance.
Raises:
ValueError: If provider_name is not supported.
"""
providers = {
"claude": ClaudeProvider,
"openai": OpenAIProvider,
"gemini": GeminiProvider,
}
provider_cls = providers.get(provider_name.lower())
if provider_cls is None:
supported = ", ".join(providers.keys())
raise ValueError(
f"Unknown LLM provider '{provider_name}'. Supported: {supported}"
)
return provider_cls(api_key=api_key, model=model)