Files
ray-serve/ray_serve/serve_tts.py
Billy D. 7ec2107e0c
All checks were successful
Build and Publish ray-serve-apps / build-and-publish (push) Successful in 16s
feat: add MLflow inference logging to all Ray Serve apps
- Add mlflow_logger.py: lightweight REST-based MLflow logger (no mlflow dep)
- Instrument serve_llm.py with latency, token counts, tokens/sec metrics
- Instrument serve_embeddings.py with latency, batch_size, total_tokens
- Instrument serve_whisper.py with latency, audio_duration, realtime_factor
- Instrument serve_tts.py with latency, audio_duration, text_chars
- Instrument serve_reranker.py with latency, num_pairs, top_k
2026-02-12 06:14:30 -05:00

148 lines
4.2 KiB
Python

"""
Ray Serve deployment for Coqui TTS.
Runs on: elminster (RTX 2070 8GB, CUDA)
"""
import base64
import io
import os
import time
from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
@serve.deployment(name="TTSDeployment", num_replicas=1)
class TTSDeployment:
def __init__(self):
import torch
from TTS.api import TTS
self.model_name = os.environ.get("MODEL_NAME", "tts_models/en/ljspeech/tacotron2-DDC")
# Detect device
self.use_gpu = torch.cuda.is_available()
print(f"Loading TTS model: {self.model_name}")
print(f"Using GPU: {self.use_gpu}")
self.tts = TTS(model_name=self.model_name, progress_bar=False)
if self.use_gpu:
self.tts = self.tts.to("cuda")
print("TTS model loaded successfully")
# MLflow metrics
self._mlflow = InferenceLogger(
experiment_name="ray-serve-tts",
run_name=f"tts-{self.model_name.split('/')[-1]}",
tags={"model.name": self.model_name, "model.framework": "coqui-tts", "gpu": str(self.use_gpu)},
flush_every=5,
)
self._mlflow.initialize(params={"model_name": self.model_name, "use_gpu": str(self.use_gpu)})
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
Handle text-to-speech requests.
Expected request format:
{
"text": "Text to synthesize",
"speaker": "speaker_name",
"language": "en",
"speed": 1.0,
"output_format": "wav",
"return_base64": true
}
"""
import numpy as np
from scipy.io import wavfile
_start = time.time()
text = request.get("text", "")
speaker = request.get("speaker")
language = request.get("language")
speed = request.get("speed", 1.0)
output_format = request.get("output_format", "wav")
return_base64 = request.get("return_base64", True)
if not text:
return {"error": "No text provided"}
# Generate speech
try:
# TTS.tts returns a numpy array of audio samples
wav = self.tts.tts(
text=text,
speaker=speaker,
language=language,
speed=speed,
)
# Convert to numpy array if needed
if not isinstance(wav, np.ndarray):
wav = np.array(wav)
# Normalize to int16
wav_int16 = (wav * 32767).astype(np.int16)
# Get sample rate from model config
sample_rate = (
self.tts.synthesizer.output_sample_rate
if hasattr(self.tts, "synthesizer")
else 22050
)
# Write to buffer
buffer = io.BytesIO()
wavfile.write(buffer, sample_rate, wav_int16)
audio_bytes = buffer.getvalue()
duration = len(wav) / sample_rate
# Log to MLflow
self._mlflow.log_request(
latency_s=time.time() - _start,
audio_duration_s=duration,
text_chars=len(text),
realtime_factor=(time.time() - _start) / duration if duration > 0 else 0,
)
response = {
"model": self.model_name,
"sample_rate": sample_rate,
"duration": duration,
"format": output_format,
}
if return_base64:
response["audio"] = base64.b64encode(audio_bytes).decode("utf-8")
else:
response["audio_bytes"] = audio_bytes
return response
except Exception as e:
return {
"error": str(e),
"model": self.model_name,
}
def list_speakers(self) -> dict[str, Any]:
"""List available speakers for multi-speaker models."""
speakers = []
if hasattr(self.tts, "speakers") and self.tts.speakers:
speakers = self.tts.speakers
return {
"model": self.model_name,
"speakers": speakers,
"is_multi_speaker": len(speakers) > 0,
}
app = TTSDeployment.bind()