Initial commit: Leopost Full — merge di Leopost, Post Generator e Autopilot OS
- Backend FastAPI con multi-LLM (Claude/OpenAI/Gemini) - Publishing su Facebook, Instagram, YouTube, TikTok - Calendario editoriale con awareness levels (PAS, AIDA, BAB...) - Design system Editorial Fresh (Fraunces + DM Sans) - Scheduler automatico, gestione commenti AI, affiliate links Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
202
backend/app/services/content.py
Normal file
202
backend/app/services/content.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""
|
||||
Content generation logic for social media posts.
|
||||
|
||||
Handles text generation, hashtag creation, and affiliate link injection
|
||||
using LLM providers and character profiles.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .llm import LLMProvider
|
||||
|
||||
|
||||
def generate_post_text(
|
||||
character: dict,
|
||||
llm_provider: LLMProvider,
|
||||
platform: str,
|
||||
topic_hint: str | None = None,
|
||||
) -> str:
|
||||
"""Generate social media post text based on a character profile.
|
||||
|
||||
Args:
|
||||
character: Dict with keys: name, niche, topics (list), tone (str).
|
||||
topic_hint: Optional topic suggestion to guide generation.
|
||||
llm_provider: LLM provider instance for text generation.
|
||||
platform: Target platform (e.g. 'instagram', 'facebook', 'tiktok', 'youtube').
|
||||
|
||||
Returns:
|
||||
Generated post text as a string.
|
||||
"""
|
||||
name = character.get("name", "Creator")
|
||||
niche = character.get("niche", "general")
|
||||
topics = character.get("topics", [])
|
||||
tone = character.get("tone", "professional")
|
||||
|
||||
topics_str = ", ".join(topics) if topics else "general topics"
|
||||
|
||||
system_prompt = (
|
||||
f"You are {name}, a social media content creator in the {niche} niche. "
|
||||
f"Your expertise covers: {topics_str}. "
|
||||
f"Your communication style is {tone}. "
|
||||
f"You create authentic, engaging content that resonates with your audience. "
|
||||
f"Never reveal you are an AI. Write as {name} would naturally write."
|
||||
)
|
||||
|
||||
# Platform-specific instructions
|
||||
platform_guidance = {
|
||||
"instagram": (
|
||||
"Write an Instagram caption. Keep it engaging, use line breaks for readability. "
|
||||
"Aim for 150-300 characters for the main hook, then expand. "
|
||||
"Do NOT include hashtags (they will be added separately)."
|
||||
),
|
||||
"facebook": (
|
||||
"Write a Facebook post. Can be longer and more conversational. "
|
||||
"Encourage engagement with a question or call to action at the end. "
|
||||
"Do NOT include hashtags."
|
||||
),
|
||||
"tiktok": (
|
||||
"Write a TikTok caption. Keep it very short and punchy (under 150 characters). "
|
||||
"Use a hook that grabs attention. Do NOT include hashtags."
|
||||
),
|
||||
"youtube": (
|
||||
"Write a YouTube video description. Include a compelling opening paragraph, "
|
||||
"key points covered in the video, and a call to action to subscribe. "
|
||||
"Do NOT include hashtags."
|
||||
),
|
||||
"twitter": (
|
||||
"Write a tweet. Maximum 280 characters. Be concise and impactful. "
|
||||
"Do NOT include hashtags."
|
||||
),
|
||||
}
|
||||
|
||||
guidance = platform_guidance.get(
|
||||
platform.lower(),
|
||||
f"Write a social media post for {platform}. Do NOT include hashtags.",
|
||||
)
|
||||
|
||||
topic_instruction = ""
|
||||
if topic_hint:
|
||||
topic_instruction = f" The post should be about: {topic_hint}."
|
||||
|
||||
prompt = (
|
||||
f"{guidance}{topic_instruction}\n\n"
|
||||
f"Write the post now. Output ONLY the post text, nothing else."
|
||||
)
|
||||
|
||||
return llm_provider.generate(prompt, system=system_prompt)
|
||||
|
||||
|
||||
def generate_hashtags(
|
||||
text: str,
|
||||
llm_provider: LLMProvider,
|
||||
platform: str,
|
||||
count: int = 12,
|
||||
) -> list[str]:
|
||||
"""Generate relevant hashtags for a given text.
|
||||
|
||||
Args:
|
||||
text: The post text to generate hashtags for.
|
||||
llm_provider: LLM provider instance.
|
||||
platform: Target platform.
|
||||
count: Number of hashtags to generate.
|
||||
|
||||
Returns:
|
||||
List of hashtag strings (each prefixed with #).
|
||||
"""
|
||||
platform_limits = {
|
||||
"instagram": 30,
|
||||
"tiktok": 5,
|
||||
"twitter": 3,
|
||||
"facebook": 5,
|
||||
"youtube": 15,
|
||||
}
|
||||
max_tags = min(count, platform_limits.get(platform.lower(), count))
|
||||
|
||||
system_prompt = (
|
||||
"You are a social media hashtag strategist. You generate relevant, "
|
||||
"effective hashtags that maximize reach and engagement."
|
||||
)
|
||||
|
||||
prompt = (
|
||||
f"Generate exactly {max_tags} hashtags for the following {platform} post.\n\n"
|
||||
f"Post text:\n{text}\n\n"
|
||||
f"Rules:\n"
|
||||
f"- Mix popular (high reach) and niche (targeted) hashtags\n"
|
||||
f"- Each hashtag must start with #\n"
|
||||
f"- No spaces within hashtags, use CamelCase for multi-word\n"
|
||||
f"- Output ONLY the hashtags, one per line, nothing else"
|
||||
)
|
||||
|
||||
result = llm_provider.generate(prompt, system=system_prompt)
|
||||
|
||||
# Parse hashtags from the response
|
||||
hashtags: list[str] = []
|
||||
for line in result.strip().splitlines():
|
||||
tag = line.strip()
|
||||
if not tag:
|
||||
continue
|
||||
# Ensure it starts with #
|
||||
if not tag.startswith("#"):
|
||||
tag = f"#{tag}"
|
||||
# Remove any trailing punctuation or spaces
|
||||
tag = tag.split()[0] # Take only the first word if extra text
|
||||
hashtags.append(tag)
|
||||
|
||||
return hashtags[:max_tags]
|
||||
|
||||
|
||||
def inject_affiliate_links(
|
||||
text: str,
|
||||
affiliate_links: list[dict],
|
||||
topics: list[str],
|
||||
) -> tuple[str, list[dict]]:
|
||||
"""Find relevant affiliate links and append them to the post text.
|
||||
|
||||
Matches affiliate links based on topic overlap. Links whose keywords
|
||||
overlap with the provided topics are appended naturally at the end.
|
||||
|
||||
Args:
|
||||
text: Original post text.
|
||||
affiliate_links: List of dicts, each with keys:
|
||||
- url (str): The affiliate URL
|
||||
- label (str): Display text for the link
|
||||
- keywords (list[str]): Topic keywords this link is relevant for
|
||||
topics: Current post topics to match against.
|
||||
|
||||
Returns:
|
||||
Tuple of (modified_text, links_used) where links_used is the list
|
||||
of affiliate link dicts that were injected.
|
||||
"""
|
||||
if not affiliate_links or not topics:
|
||||
return text, []
|
||||
|
||||
# Normalize topics to lowercase for matching
|
||||
topics_lower = {t.lower() for t in topics}
|
||||
|
||||
# Score each link by keyword overlap
|
||||
scored_links: list[tuple[int, dict]] = []
|
||||
for link in affiliate_links:
|
||||
keywords = link.get("keywords", [])
|
||||
keywords_lower = {k.lower() for k in keywords}
|
||||
overlap = len(topics_lower & keywords_lower)
|
||||
if overlap > 0:
|
||||
scored_links.append((overlap, link))
|
||||
|
||||
if not scored_links:
|
||||
return text, []
|
||||
|
||||
# Sort by relevance (most overlap first), take top 2
|
||||
scored_links.sort(key=lambda x: x[0], reverse=True)
|
||||
top_links = [link for _, link in scored_links[:2]]
|
||||
|
||||
# Build the links section
|
||||
links_section_parts: list[str] = []
|
||||
for link in top_links:
|
||||
label = link.get("label", "Check this out")
|
||||
url = link.get("url", "")
|
||||
links_section_parts.append(f"{label}: {url}")
|
||||
|
||||
links_text = "\n".join(links_section_parts)
|
||||
modified_text = f"{text}\n\n{links_text}"
|
||||
|
||||
return modified_text, top_links
|
||||
Reference in New Issue
Block a user