fixing ruff suggestions and tests needed updating.
All checks were successful
CI / Lint (push) Successful in 1m39s
CI / Test (push) Successful in 1m37s
CI / Release (push) Successful in 6s
CI / Notify (push) Successful in 1s

This commit is contained in:
2026-02-18 07:37:13 -05:00
parent 24a4098c9a
commit a1cf87909d
3 changed files with 40 additions and 17 deletions

View File

@@ -134,9 +134,7 @@ class ChatHandler(Handler):
enable_tts = data.get("enable_tts", self.chat_settings.enable_tts)
system_prompt = data.get("system_prompt")
# companions-frontend may set a custom response subject
response_subject = data.get(
"response_subject", f"ai.chat.response.{request_id}"
)
response_subject = data.get("response_subject", f"ai.chat.response.{request_id}")
logger.info(f"Processing request {request_id}: {query[:50]}...")
@@ -159,7 +157,9 @@ class ChatHandler(Handler):
# 2. Search Milvus for context
documents = await self._search_context(
embedding, collection, top_k=top_k,
embedding,
collection,
top_k=top_k,
)
# 3. Optionally rerank documents
@@ -172,21 +172,24 @@ class ChatHandler(Handler):
if reranked:
context = self._build_context(reranked)
rag_sources = [
d.get("source", d.get("document", "")[:80])
for d in reranked[:3]
d.get("source", d.get("document", "")[:80]) for d in reranked[:3]
]
used_rag = True
# 5. Generate LLM response (with or without RAG context)
response_text = await self._generate_response(
query, context or None, system_prompt,
query,
context or None,
system_prompt,
)
# 6. Stream response chunks if requested
if enable_streaming:
stream_subject = f"ai.chat.response.stream.{request_id}"
await self._publish_streaming_chunks(
stream_subject, request_id, response_text,
stream_subject,
request_id,
response_text,
)
# 7. Optionally synthesize speech