- Add BASE_URL setting to config.py, replace hardcoded localhost:8096 in notifications - Fix LLM_FALLBACK_MODEL default from fyn-llm-agent-chat to qwen2.5:7b - Fix docker-compose LLM_FALLBACK_MODEL to use env var with correct default - Add BASE_URL env var to API and worker in docker-compose.yml - Add soft_time_limit/time_limit to all pipeline stage tasks (prevent stuck workers) - Cache Redis connection in _is_debug_mode() instead of creating per-call - Remove duplicate test files in backend/tests/notifications/
115 lines
3.7 KiB
Python
115 lines
3.7 KiB
Python
"""Application configuration loaded from environment variables."""
|
|
|
|
from functools import lru_cache
|
|
|
|
from pydantic_settings import BaseSettings
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
"""Chrysopedia API settings.
|
|
|
|
Values are loaded from environment variables (or .env file via
|
|
pydantic-settings' dotenv support).
|
|
"""
|
|
|
|
# Database
|
|
database_url: str = "postgresql+asyncpg://chrysopedia:changeme@localhost:5433/chrysopedia"
|
|
|
|
# Redis
|
|
redis_url: str = "redis://localhost:6379/0"
|
|
|
|
# Application
|
|
app_env: str = "development"
|
|
app_log_level: str = "info"
|
|
app_secret_key: str = "changeme-generate-a-real-secret"
|
|
|
|
# CORS
|
|
cors_origins: list[str] = ["*"]
|
|
|
|
# LLM endpoint (OpenAI-compatible)
|
|
llm_api_url: str = "http://localhost:11434/v1"
|
|
llm_api_key: str = "sk-placeholder"
|
|
llm_model: str = "fyn-llm-agent-chat"
|
|
llm_fallback_url: str = "http://localhost:11434/v1"
|
|
llm_fallback_model: str = "qwen2.5:7b"
|
|
|
|
# Per-stage model overrides (optional — falls back to llm_model / "chat")
|
|
llm_stage2_model: str | None = "fyn-llm-agent-chat" # segmentation — mechanical, fast chat
|
|
llm_stage2_modality: str = "chat"
|
|
llm_stage3_model: str | None = "fyn-llm-agent-think" # extraction — reasoning
|
|
llm_stage3_modality: str = "thinking"
|
|
llm_stage4_model: str | None = "fyn-llm-agent-chat" # classification — mechanical, fast chat
|
|
llm_stage4_modality: str = "chat"
|
|
llm_stage5_model: str | None = "fyn-llm-agent-think" # synthesis — reasoning
|
|
llm_stage5_modality: str = "thinking"
|
|
|
|
# Token limits — static across all stages
|
|
llm_max_tokens_hard_limit: int = 96000 # Hard ceiling for dynamic estimator
|
|
llm_max_tokens: int = 96000 # Fallback when no estimate is provided (must not exceed hard_limit)
|
|
llm_temperature: float = 0.0 # Deterministic output for structured JSON extraction
|
|
|
|
# Stage 5 synthesis chunking — max moments per LLM call before splitting
|
|
synthesis_chunk_size: int = 30
|
|
|
|
# Embedding endpoint
|
|
embedding_api_url: str = "http://localhost:11434/v1"
|
|
embedding_model: str = "nomic-embed-text"
|
|
embedding_dimensions: int = 768
|
|
|
|
# Qdrant
|
|
qdrant_url: str = "http://localhost:6333"
|
|
qdrant_collection: str = "chrysopedia"
|
|
|
|
# LightRAG
|
|
lightrag_url: str = "http://chrysopedia-lightrag:9621"
|
|
lightrag_search_timeout: float = 2.0
|
|
lightrag_min_query_length: int = 3
|
|
|
|
# Prompt templates
|
|
prompts_path: str = "./prompts"
|
|
|
|
# Debug mode — when True, pipeline captures full LLM prompts and responses
|
|
debug_mode: bool = False
|
|
|
|
# MinIO (file storage for post attachments)
|
|
minio_url: str = "chrysopedia-minio:9000"
|
|
minio_access_key: str = "chrysopedia"
|
|
minio_secret_key: str = "changeme-minio"
|
|
minio_bucket: str = "chrysopedia"
|
|
minio_secure: bool = False
|
|
|
|
# File storage
|
|
transcript_storage_path: str = "/data/transcripts"
|
|
video_metadata_path: str = "/data/video_meta"
|
|
video_source_path: str = "/videos"
|
|
|
|
# SMTP (email digests)
|
|
smtp_host: str = ""
|
|
smtp_port: int = 587
|
|
smtp_user: str = ""
|
|
smtp_password: str = ""
|
|
smtp_from_address: str = ""
|
|
smtp_tls: bool = True
|
|
|
|
# Public base URL for links in emails and external references
|
|
base_url: str = "http://localhost:8096"
|
|
|
|
# Rate limiting (per hour)
|
|
rate_limit_user_per_hour: int = 30
|
|
rate_limit_ip_per_hour: int = 10
|
|
rate_limit_creator_per_hour: int = 60
|
|
|
|
# Git commit SHA (set at Docker build time or via env var)
|
|
git_commit_sha: str = "unknown"
|
|
|
|
model_config = {
|
|
"env_file": ".env",
|
|
"env_file_encoding": "utf-8",
|
|
"case_sensitive": False,
|
|
}
|
|
|
|
|
|
@lru_cache
|
|
def get_settings() -> Settings:
|
|
"""Return cached application settings (singleton)."""
|
|
return Settings()
|