fixing ruff suggestions and tests needed updating.
All checks were successful
CI / Lint (push) Successful in 1m39s
CI / Test (push) Successful in 1m37s
CI / Release (push) Successful in 6s
CI / Notify (push) Successful in 1s

This commit is contained in:
2026-02-18 07:37:13 -05:00
parent 24a4098c9a
commit a1cf87909d
3 changed files with 40 additions and 17 deletions

View File

@@ -134,9 +134,7 @@ class ChatHandler(Handler):
enable_tts = data.get("enable_tts", self.chat_settings.enable_tts) enable_tts = data.get("enable_tts", self.chat_settings.enable_tts)
system_prompt = data.get("system_prompt") system_prompt = data.get("system_prompt")
# companions-frontend may set a custom response subject # companions-frontend may set a custom response subject
response_subject = data.get( response_subject = data.get("response_subject", f"ai.chat.response.{request_id}")
"response_subject", f"ai.chat.response.{request_id}"
)
logger.info(f"Processing request {request_id}: {query[:50]}...") logger.info(f"Processing request {request_id}: {query[:50]}...")
@@ -159,7 +157,9 @@ class ChatHandler(Handler):
# 2. Search Milvus for context # 2. Search Milvus for context
documents = await self._search_context( documents = await self._search_context(
embedding, collection, top_k=top_k, embedding,
collection,
top_k=top_k,
) )
# 3. Optionally rerank documents # 3. Optionally rerank documents
@@ -172,21 +172,24 @@ class ChatHandler(Handler):
if reranked: if reranked:
context = self._build_context(reranked) context = self._build_context(reranked)
rag_sources = [ rag_sources = [
d.get("source", d.get("document", "")[:80]) d.get("source", d.get("document", "")[:80]) for d in reranked[:3]
for d in reranked[:3]
] ]
used_rag = True used_rag = True
# 5. Generate LLM response (with or without RAG context) # 5. Generate LLM response (with or without RAG context)
response_text = await self._generate_response( response_text = await self._generate_response(
query, context or None, system_prompt, query,
context or None,
system_prompt,
) )
# 6. Stream response chunks if requested # 6. Stream response chunks if requested
if enable_streaming: if enable_streaming:
stream_subject = f"ai.chat.response.stream.{request_id}" stream_subject = f"ai.chat.response.stream.{request_id}"
await self._publish_streaming_chunks( await self._publish_streaming_chunks(
stream_subject, request_id, response_text, stream_subject,
request_id,
response_text,
) )
# 7. Optionally synthesize speech # 7. Optionally synthesize speech

View File

@@ -53,7 +53,7 @@ def sample_reranked():
def mock_nats_message(): def mock_nats_message():
"""Create a mock NATS message.""" """Create a mock NATS message."""
msg = MagicMock() msg = MagicMock()
msg.subject = "ai.chat.request" msg.subject = "ai.chat.user.test-user-1.message"
msg.reply = "ai.chat.response.test-123" msg.reply = "ai.chat.response.test-123"
return msg return msg
@@ -63,7 +63,13 @@ def mock_chat_request():
"""Sample chat request payload.""" """Sample chat request payload."""
return { return {
"request_id": "test-request-123", "request_id": "test-request-123",
"query": "What is machine learning?", "user_id": "test-user-1",
"username": "testuser",
"message": "What is machine learning?",
"premium": True,
"enable_rag": True,
"enable_reranker": True,
"enable_streaming": False,
"collection": "test_collection", "collection": "test_collection",
"enable_tts": False, "enable_tts": False,
"system_prompt": None, "system_prompt": None,
@@ -75,7 +81,13 @@ def mock_chat_request_with_tts():
"""Sample chat request with TTS enabled.""" """Sample chat request with TTS enabled."""
return { return {
"request_id": "test-request-456", "request_id": "test-request-456",
"query": "Tell me about AI", "user_id": "test-user-2",
"username": "testuser2",
"message": "Tell me about AI",
"premium": True,
"enable_rag": True,
"enable_reranker": True,
"enable_streaming": False,
"collection": "documents", "collection": "documents",
"enable_tts": True, "enable_tts": True,
"system_prompt": "You are a helpful assistant.", "system_prompt": "You are a helpful assistant.",

View File

@@ -86,7 +86,7 @@ class TestChatHandler:
def test_init(self, handler): def test_init(self, handler):
"""Test handler initialization.""" """Test handler initialization."""
assert handler.subject == "ai.chat.request" assert handler.subject == "ai.chat.user.*.message"
assert handler.queue_group == "chat-handlers" assert handler.queue_group == "chat-handlers"
assert handler.chat_settings.service_name == "chat-handler" assert handler.chat_settings.service_name == "chat-handler"
@@ -111,12 +111,15 @@ class TestChatHandler:
result = await handler.handle_message(mock_nats_message, mock_chat_request) result = await handler.handle_message(mock_nats_message, mock_chat_request)
# Verify # Verify
assert result["request_id"] == "test-request-123" assert result["user_id"] == "test-user-1"
assert result["success"] is True
assert "response" in result assert "response" in result
assert result["response"] == "Machine learning is a subset of AI that..." assert result["response"] == "Machine learning is a subset of AI that..."
assert "sources" in result # include_sources is True by default assert result["response_text"] == result["response"]
assert result["used_rag"] is True
assert isinstance(result["rag_sources"], list)
# Verify pipeline was called # Verify RAG pipeline was called (enable_rag=True in fixture)
handler.embeddings.embed_single.assert_called_once() handler.embeddings.embed_single.assert_called_once()
handler.milvus.search_with_texts.assert_called_once() handler.milvus.search_with_texts.assert_called_once()
handler.reranker.rerank.assert_called_once() handler.reranker.rerank.assert_called_once()
@@ -142,7 +145,9 @@ class TestChatHandler:
result = await handler.handle_message(mock_nats_message, mock_chat_request) result = await handler.handle_message(mock_nats_message, mock_chat_request)
assert "sources" not in result # New response format doesn't have a separate "sources" key;
# rag_sources is always present (may be empty)
assert "rag_sources" in result
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_handle_message_with_tts( async def test_handle_message_with_tts(
@@ -180,7 +185,10 @@ class TestChatHandler:
"""Test LLM is called with custom system prompt.""" """Test LLM is called with custom system prompt."""
request = { request = {
"request_id": "test-123", "request_id": "test-123",
"query": "Hello", "user_id": "user-42",
"message": "Hello",
"premium": True,
"enable_rag": True,
"system_prompt": "You are a pirate. Respond like one.", "system_prompt": "You are a pirate. Respond like one.",
} }