chrysopedia/backend/routers/chat.py
jlightner d13d6c3aa1 test: Added multi-turn conversation memory with Redis-backed history (1…
- "backend/chat_service.py"
- "backend/routers/chat.py"
- "backend/tests/test_chat.py"

GSD-Task: S04/T01
2026-04-04 07:50:30 +00:00

65 lines
1.9 KiB
Python

"""Chat endpoint — POST /api/v1/chat with SSE streaming response.
Accepts a query and optional creator filter, returns a Server-Sent Events
stream with sources, token, done, and error events.
"""
from __future__ import annotations
import logging
from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, Field
from sqlalchemy.ext.asyncio import AsyncSession
from chat_service import ChatService
from config import Settings, get_settings
from database import get_session
from redis_client import get_redis
logger = logging.getLogger("chrysopedia.chat.router")
router = APIRouter(prefix="/chat", tags=["chat"])
class ChatRequest(BaseModel):
"""Request body for the chat endpoint."""
query: str = Field(..., min_length=1, max_length=1000)
creator: str | None = None
conversation_id: str | None = None
@router.post("")
async def chat(
body: ChatRequest,
db: AsyncSession = Depends(get_session),
settings: Settings = Depends(get_settings),
) -> StreamingResponse:
"""Stream a chat response as Server-Sent Events.
SSE protocol:
- ``event: sources`` — citation metadata array (sent first)
- ``event: token`` — streamed text chunk (repeated)
- ``event: done`` — completion metadata with cascade_tier, conversation_id
- ``event: error`` — error message (on failure)
"""
logger.info("chat_request query=%r creator=%r cid=%r", body.query, body.creator, body.conversation_id)
redis = await get_redis()
service = ChatService(settings, redis=redis)
return StreamingResponse(
service.stream_response(
query=body.query,
db=db,
creator=body.creator,
conversation_id=body.conversation_id,
),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)