feat: initial ray-serve-apps PyPI package
Implements ADR-0024: Ray Repository Structure - Ray Serve deployments for GPU-shared AI inference - Published as PyPI package for dynamic code loading - Deployments: LLM, embeddings, reranker, whisper, TTS - CI/CD workflow publishes to Gitea PyPI on push to main Extracted from kuberay-images repo per ADR-0024
This commit is contained in:
124
ray_serve/serve_tts.py
Normal file
124
ray_serve/serve_tts.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Ray Serve deployment for Coqui TTS.
|
||||
Runs on: elminster (RTX 2070 8GB, CUDA)
|
||||
"""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from ray import serve
|
||||
|
||||
|
||||
@serve.deployment(name="TTSDeployment", num_replicas=1)
|
||||
class TTSDeployment:
|
||||
def __init__(self):
|
||||
import torch
|
||||
from TTS.api import TTS
|
||||
|
||||
self.model_name = os.environ.get("MODEL_NAME", "tts_models/en/ljspeech/tacotron2-DDC")
|
||||
|
||||
# Detect device
|
||||
self.use_gpu = torch.cuda.is_available()
|
||||
|
||||
print(f"Loading TTS model: {self.model_name}")
|
||||
print(f"Using GPU: {self.use_gpu}")
|
||||
|
||||
self.tts = TTS(model_name=self.model_name, progress_bar=False)
|
||||
|
||||
if self.use_gpu:
|
||||
self.tts = self.tts.to("cuda")
|
||||
|
||||
print("TTS model loaded successfully")
|
||||
|
||||
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Handle text-to-speech requests.
|
||||
|
||||
Expected request format:
|
||||
{
|
||||
"text": "Text to synthesize",
|
||||
"speaker": "speaker_name",
|
||||
"language": "en",
|
||||
"speed": 1.0,
|
||||
"output_format": "wav",
|
||||
"return_base64": true
|
||||
}
|
||||
"""
|
||||
import numpy as np
|
||||
from scipy.io import wavfile
|
||||
|
||||
text = request.get("text", "")
|
||||
speaker = request.get("speaker")
|
||||
language = request.get("language")
|
||||
speed = request.get("speed", 1.0)
|
||||
output_format = request.get("output_format", "wav")
|
||||
return_base64 = request.get("return_base64", True)
|
||||
|
||||
if not text:
|
||||
return {"error": "No text provided"}
|
||||
|
||||
# Generate speech
|
||||
try:
|
||||
# TTS.tts returns a numpy array of audio samples
|
||||
wav = self.tts.tts(
|
||||
text=text,
|
||||
speaker=speaker,
|
||||
language=language,
|
||||
speed=speed,
|
||||
)
|
||||
|
||||
# Convert to numpy array if needed
|
||||
if not isinstance(wav, np.ndarray):
|
||||
wav = np.array(wav)
|
||||
|
||||
# Normalize to int16
|
||||
wav_int16 = (wav * 32767).astype(np.int16)
|
||||
|
||||
# Get sample rate from model config
|
||||
sample_rate = (
|
||||
self.tts.synthesizer.output_sample_rate
|
||||
if hasattr(self.tts, "synthesizer")
|
||||
else 22050
|
||||
)
|
||||
|
||||
# Write to buffer
|
||||
buffer = io.BytesIO()
|
||||
wavfile.write(buffer, sample_rate, wav_int16)
|
||||
audio_bytes = buffer.getvalue()
|
||||
|
||||
response = {
|
||||
"model": self.model_name,
|
||||
"sample_rate": sample_rate,
|
||||
"duration": len(wav) / sample_rate,
|
||||
"format": output_format,
|
||||
}
|
||||
|
||||
if return_base64:
|
||||
response["audio"] = base64.b64encode(audio_bytes).decode("utf-8")
|
||||
else:
|
||||
response["audio_bytes"] = audio_bytes
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": str(e),
|
||||
"model": self.model_name,
|
||||
}
|
||||
|
||||
def list_speakers(self) -> dict[str, Any]:
|
||||
"""List available speakers for multi-speaker models."""
|
||||
speakers = []
|
||||
if hasattr(self.tts, "speakers") and self.tts.speakers:
|
||||
speakers = self.tts.speakers
|
||||
|
||||
return {
|
||||
"model": self.model_name,
|
||||
"speakers": speakers,
|
||||
"is_multi_speaker": len(speakers) > 0,
|
||||
}
|
||||
|
||||
|
||||
app = TTSDeployment.bind()
|
||||
Reference in New Issue
Block a user