feat: Add context labels to multi-call pipeline stages

Stage 3 (extraction) LLM calls now show the topic group label (e.g.,
'Sound Design Basics') and Stage 5 (synthesis) calls show the category
name. Displayed as a cyan italic label in the event row between the
event type badge and model name. Helps admins understand why there are
multiple LLM calls per stage.
This commit is contained in:
jlightner 2026-03-31 17:27:40 +00:00
parent c2db9aa011
commit c1583820ea
3 changed files with 19 additions and 2 deletions

View file

@ -135,6 +135,7 @@ def _make_llm_callback(
system_prompt: str | None = None,
user_prompt: str | None = None,
run_id: str | None = None,
context_label: str | None = None,
):
"""Create an on_complete callback for LLMClient that emits llm_call events.
@ -162,6 +163,7 @@ def _make_llm_callback(
"content_length": len(content) if content else 0,
"finish_reason": finish_reason,
"is_fallback": is_fallback,
**({"context": context_label} if context_label else {}),
},
system_prompt_text=system_prompt if debug else None,
user_prompt_text=user_prompt if debug else None,
@ -427,7 +429,7 @@ def stage3_extraction(self, video_id: str, run_id: str | None = None) -> str:
)
max_tokens = estimate_max_tokens(system_prompt, user_prompt, stage="stage3_extraction", hard_limit=hard_limit)
raw = llm.complete(system_prompt, user_prompt, response_model=ExtractionResult, on_complete=_make_llm_callback(video_id, "stage3_extraction", system_prompt=system_prompt, user_prompt=user_prompt, run_id=run_id),
raw = llm.complete(system_prompt, user_prompt, response_model=ExtractionResult, on_complete=_make_llm_callback(video_id, "stage3_extraction", system_prompt=system_prompt, user_prompt=user_prompt, run_id=run_id, context_label=topic_label),
modality=modality, model_override=model_override, max_tokens=max_tokens)
result = _safe_parse_llm_response(raw, ExtractionResult, llm, system_prompt, user_prompt,
modality=modality, model_override=model_override)
@ -781,7 +783,7 @@ def stage5_synthesis(self, video_id: str, run_id: str | None = None) -> str:
user_prompt = f"<creator>{creator_name}</creator>\n<moments>\n{moments_text}\n</moments>"
max_tokens = estimate_max_tokens(system_prompt, user_prompt, stage="stage5_synthesis", hard_limit=hard_limit)
raw = llm.complete(system_prompt, user_prompt, response_model=SynthesisResult, on_complete=_make_llm_callback(video_id, "stage5_synthesis", system_prompt=system_prompt, user_prompt=user_prompt, run_id=run_id),
raw = llm.complete(system_prompt, user_prompt, response_model=SynthesisResult, on_complete=_make_llm_callback(video_id, "stage5_synthesis", system_prompt=system_prompt, user_prompt=user_prompt, run_id=run_id, context_label=category),
modality=modality, model_override=model_override, max_tokens=max_tokens)
result = _safe_parse_llm_response(raw, SynthesisResult, llm, system_prompt, user_prompt,
modality=modality, model_override=model_override)

View file

@ -4133,6 +4133,16 @@ a.app-footer__repo:hover {
font-weight: 500;
}
.pipeline-event__context {
color: var(--color-accent);
font-size: 0.75rem;
font-style: italic;
max-width: 150px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.pipeline-event__model {
color: var(--color-text-muted);
font-size: 0.75rem;

View file

@ -294,6 +294,11 @@ function EventLog({ videoId, status, runId }: { videoId: string; status: string;
<span className={`pipeline-badge pipeline-badge--event-${evt.event_type}`}>
{evt.event_type}
</span>
{typeof evt.payload?.context === "string" && (
<span className="pipeline-event__context" title="Processing context">
{evt.payload.context}
</span>
)}
{evt.model && <span className="pipeline-event__model">{evt.model}</span>}
{evt.total_tokens != null && evt.total_tokens > 0 && (
<span className="pipeline-event__tokens" title={`prompt: ${evt.prompt_tokens ?? 0} / completion: ${evt.completion_tokens ?? 0}`}>