style: fix ruff lint and formatting issues
- tts.py: rename ambiguous variable 'l' to 'line' (E741) - tts.py, llm.py: apply ruff formatter
This commit is contained in:
18
llm.py
18
llm.py
@@ -168,10 +168,12 @@ async def chat_stream(
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
|
||||
for entry in history:
|
||||
messages.append({
|
||||
"role": entry["role"],
|
||||
"content": _extract_content(entry["content"]),
|
||||
})
|
||||
messages.append(
|
||||
{
|
||||
"role": entry["role"],
|
||||
"content": _extract_content(entry["content"]),
|
||||
}
|
||||
)
|
||||
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
@@ -214,7 +216,9 @@ async def chat_stream(
|
||||
continue
|
||||
|
||||
latency = time.time() - start_time
|
||||
logger.info("LLM streamed response: %d chars in %.1fs", len(full_text), latency)
|
||||
logger.info(
|
||||
"LLM streamed response: %d chars in %.1fs", len(full_text), latency
|
||||
)
|
||||
|
||||
# Best-effort metrics from the final SSE payload
|
||||
_log_llm_metrics(
|
||||
@@ -229,9 +233,7 @@ async def chat_stream(
|
||||
# Non-streaming fallback (endpoint doesn't support stream)
|
||||
body = await response.aread()
|
||||
result = json.loads(body)
|
||||
text = _extract_content(
|
||||
result["choices"][0]["message"]["content"]
|
||||
)
|
||||
text = _extract_content(result["choices"][0]["message"]["content"])
|
||||
latency = time.time() - start_time
|
||||
usage = result.get("usage", {})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user