feat(db): migrate to PostgreSQL 16 standalone

- docker-compose.prod.yml: add postgres:16-alpine service with health check,
  dedicated prod_leopost_net, backup volume mount, connection pool
- requirements.txt: add psycopg2-binary==2.9.9
- database.py: remove SQLite-specific run_migrations(), add PG pool_size/
  max_overflow/pool_pre_ping, keep sqlite compat for dev
- main.py: remove run_migrations call, rely on create_all for PG
- scripts/migrate_sqlite_to_pg.py: one-shot data migration script

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Michele
2026-04-01 17:11:35 +02:00
parent b38419f3ee
commit cc1cb2d02a
5 changed files with 175 additions and 74 deletions

View File

@@ -1,13 +1,21 @@
from sqlalchemy import create_engine, text
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, declarative_base
from .config import settings
# PostgreSQL: use connection pool; SQLite (dev/test): check_same_thread
connect_args = {}
if settings.database_url.startswith("sqlite"):
connect_args["check_same_thread"] = False
engine = create_engine(settings.database_url, connect_args=connect_args)
else:
engine = create_engine(
settings.database_url,
pool_size=10,
max_overflow=20,
pool_pre_ping=True,
)
engine = create_engine(settings.database_url, connect_args=connect_args)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
@@ -18,69 +26,3 @@ def get_db():
yield db
finally:
db.close()
def run_migrations(engine):
"""SQLite-safe migration: add new columns if they don't exist."""
migrations = {
"users": [
("email", "VARCHAR"),
("display_name", "VARCHAR"),
("avatar_url", "VARCHAR"),
("auth_provider", "VARCHAR DEFAULT 'local'"),
("google_id", "VARCHAR"),
("subscription_plan", "VARCHAR DEFAULT 'freemium'"),
("subscription_expires_at", "DATETIME"),
("is_admin", "BOOLEAN DEFAULT 0"),
("posts_generated_this_month", "INTEGER DEFAULT 0"),
("posts_reset_date", "DATE"),
],
"characters": [("user_id", "INTEGER")],
"posts": [("user_id", "INTEGER")],
"affiliate_links": [("user_id", "INTEGER")],
"editorial_plans": [("user_id", "INTEGER")],
"social_accounts": [("user_id", "INTEGER")],
"system_settings": [("user_id", "INTEGER")],
}
with engine.connect() as conn:
for table, cols in migrations.items():
try:
existing = {row[1] for row in conn.execute(text(f"PRAGMA table_info({table})"))}
for col_name, col_def in cols:
if col_name not in existing:
conn.execute(text(f"ALTER TABLE {table} ADD COLUMN {col_name} {col_def}"))
conn.commit()
except Exception as e:
print(f"Migration warning for {table}: {e}")
# Fix system_settings: remove UNIQUE constraint on 'key' by recreating the table
# This allows per-user settings (same key, different user_id)
try:
indexes = list(conn.execute(text("PRAGMA index_list(system_settings)")))
has_unique_key = any(
row[1].lower().startswith("ix_") or "key" in row[1].lower()
for row in indexes
if row[2] == 1 # unique=1
)
# Check via table creation SQL
create_sql_row = conn.execute(text(
"SELECT sql FROM sqlite_master WHERE type='table' AND name='system_settings'"
)).fetchone()
if create_sql_row and "UNIQUE" in (create_sql_row[0] or "").upper():
# Recreate without UNIQUE on key
conn.execute(text("ALTER TABLE system_settings RENAME TO system_settings_old"))
conn.execute(text("""
CREATE TABLE system_settings (
id INTEGER PRIMARY KEY,
key VARCHAR(100) NOT NULL,
value JSON,
updated_at DATETIME,
user_id INTEGER REFERENCES users(id)
)
"""))
conn.execute(text("INSERT INTO system_settings SELECT id, key, value, updated_at, user_id FROM system_settings_old"))
conn.execute(text("DROP TABLE system_settings_old"))
conn.commit()
print("Migration: system_settings UNIQUE constraint on key removed.")
except Exception as e:
print(f"Migration warning for system_settings UNIQUE fix: {e}")

View File

@@ -17,7 +17,7 @@ from fastapi.staticfiles import StaticFiles
from .auth import hash_password
from .config import settings
from .database import Base, SessionLocal, engine, run_migrations
from .database import Base, SessionLocal, engine
from .models import User
from .routers.admin import router as admin_router
from .routers.auth import router as auth_router
@@ -79,10 +79,7 @@ async def lifespan(app: FastAPI):
data_dir = Path("./data")
data_dir.mkdir(parents=True, exist_ok=True)
# Run migrations FIRST (add new columns to existing tables)
run_migrations(engine)
# Create tables (for new tables like subscription_codes)
# Create all tables (PostgreSQL: idempotent, safe to run on every startup)
Base.metadata.create_all(bind=engine)
# Create or update admin user

View File

@@ -1,6 +1,7 @@
fastapi==0.115.0
uvicorn[standard]==0.30.6
sqlalchemy==2.0.35
psycopg2-binary==2.9.9
pydantic==2.9.2
pydantic-settings==2.5.2
python-jose[cryptography]==3.3.0

View File

@@ -1,4 +1,29 @@
services:
postgres:
image: postgres:16-alpine
container_name: prod-leopost-postgres
restart: unless-stopped
environment:
POSTGRES_DB: leopost
POSTGRES_USER: leopost
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-leopost_pg_2026}
volumes:
- ./pgdata:/var/lib/postgresql/data
- ./backups:/backups
networks:
- prod_leopost_net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U leopost -d leopost"]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
app:
build:
context: .
@@ -9,15 +34,19 @@ services:
container_name: prod-leopost-full-app
restart: unless-stopped
command: ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
depends_on:
postgres:
condition: service_healthy
volumes:
- ./data:/app/data
environment:
- DATABASE_URL=sqlite:///./data/leopost.db
- DATABASE_URL=postgresql://leopost:${POSTGRES_PASSWORD:-leopost_pg_2026}@postgres:5432/leopost
- APP_URL=https://leopost.it
- GOOGLE_CLIENT_ID=${GOOGLE_CLIENT_ID}
- GOOGLE_CLIENT_SECRET=${GOOGLE_CLIENT_SECRET}
- SECRET_KEY=${SECRET_KEY:-leopost-prod-secret-2026}
networks:
- prod_leopost_net
- proxy_net
deploy:
resources:
@@ -26,5 +55,7 @@ services:
cpus: '1.0'
networks:
prod_leopost_net:
name: prod_leopost_net
proxy_net:
external: true

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env python3
"""
Migrazione dati da SQLite → PostgreSQL per Leopost.
Eseguire UNA SOLA VOLTA sul VPS, dopo che il container PostgreSQL è attivo
ma prima di avviare l'app.
Usage (sul VPS):
cd /opt/lab-leopost-full-prod
docker compose -f docker-compose.prod.yml up -d postgres
# aspetta che postgres sia healthy (10-15 sec)
python3 scripts/migrate_sqlite_to_pg.py \
--sqlite data/leopost.db \
--pg "postgresql://leopost:leopost_pg_2026@localhost:5433/leopost"
"""
import argparse
import json
import sqlite3
import sys
try:
import psycopg2
import psycopg2.extras
except ImportError:
print("ERROR: psycopg2 non trovato. Installa con: pip install psycopg2-binary")
sys.exit(1)
# Ordine di inserimento rispetta le FK
TABLES_ORDER = [
"users",
"subscription_codes",
"characters",
"posts",
"affiliate_links",
"editorial_plans",
"scheduled_posts",
"social_accounts",
"comments",
"system_settings",
]
def get_sqlite_data(sqlite_path: str) -> dict[str, list[dict]]:
conn = sqlite3.connect(sqlite_path)
conn.row_factory = sqlite3.Row
data = {}
cursor = conn.cursor()
for table in TABLES_ORDER:
try:
cursor.execute(f"SELECT * FROM {table}")
rows = [dict(r) for r in cursor.fetchall()]
data[table] = rows
print(f" SQLite {table}: {len(rows)} righe lette")
except sqlite3.OperationalError as e:
print(f" SQLite {table}: saltata ({e})")
data[table] = []
conn.close()
return data
def migrate(sqlite_path: str, pg_dsn: str):
print(f"\n[1/3] Lettura SQLite: {sqlite_path}")
data = get_sqlite_data(sqlite_path)
print(f"\n[2/3] Connessione PostgreSQL...")
pg = psycopg2.connect(pg_dsn)
pg.autocommit = False
cur = pg.cursor()
print(f"\n[3/3] Inserimento dati in PostgreSQL...")
# Disabilita temporaneamente i trigger di FK per inserimento sicuro
cur.execute("SET session_replication_role = replica;")
for table in TABLES_ORDER:
rows = data.get(table, [])
if not rows:
print(f" {table}: nessuna riga, saltata")
continue
cols = list(rows[0].keys())
placeholders = ", ".join(["%s"] * len(cols))
col_names = ", ".join(cols)
sql = f"INSERT INTO {table} ({col_names}) VALUES ({placeholders}) ON CONFLICT DO NOTHING"
inserted = 0
for row in rows:
values = []
for col in cols:
v = row[col]
# SQLite stores JSON as string — psycopg2 needs Python objects for JSON columns
if isinstance(v, str) and v and v[0] in ("{", "["):
try:
v = json.loads(v)
v = psycopg2.extras.Json(v)
except json.JSONDecodeError:
pass
values.append(v)
try:
cur.execute(sql, values)
inserted += 1
except Exception as e:
print(f" WARN riga in {table}: {e} — saltata")
print(f" {table}: {inserted}/{len(rows)} righe inserite")
# Ripristina FK e resetta le sequenze
cur.execute("SET session_replication_role = DEFAULT;")
print("\n Resetting sequences PostgreSQL...")
for table in TABLES_ORDER:
cur.execute(f"""
SELECT setval(
pg_get_serial_sequence('{table}', 'id'),
COALESCE((SELECT MAX(id) FROM {table}), 1)
)
""")
pg.commit()
cur.close()
pg.close()
print("\n✓ Migrazione completata con successo.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sqlite", required=True, help="Path al file SQLite (es. data/leopost.db)")
parser.add_argument("--pg", required=True, help="DSN PostgreSQL (es. postgresql://leopost:pass@localhost:5433/leopost)")
args = parser.parse_args()
migrate(args.sqlite, args.pg)