fix: make mlflow_logger import optional with no-op fallback
All checks were successful
Build and Publish ray-serve-apps / build-and-publish (push) Successful in 11s

The strixhalo LLM worker uses py_executable pointing to the Docker
image venv which doesn't have the updated ray-serve-apps package.
Wrap all InferenceLogger imports in try/except and guard usage with
None checks so apps degrade gracefully without MLflow logging.
This commit is contained in:
2026-02-12 07:01:17 -05:00
parent 7ec2107e0c
commit 15e4b8afa3
5 changed files with 124 additions and 88 deletions

View File

@@ -9,7 +9,10 @@ from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
try:
from ray_serve.mlflow_logger import InferenceLogger
except ImportError:
InferenceLogger = None
@serve.deployment(name="EmbeddingsDeployment", num_replicas=1)
@@ -37,6 +40,7 @@ class EmbeddingsDeployment:
print(f"Model loaded. Embedding dimension: {self.embedding_dim}")
# MLflow metrics
if InferenceLogger is not None:
self._mlflow = InferenceLogger(
experiment_name="ray-serve-embeddings",
run_name=f"embeddings-{self.model_id.split('/')[-1]}",
@@ -46,6 +50,8 @@ class EmbeddingsDeployment:
self._mlflow.initialize(
params={"model_id": self.model_id, "embedding_dim": str(self.embedding_dim), "device": self.device}
)
else:
self._mlflow = None
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
@@ -86,6 +92,7 @@ class EmbeddingsDeployment:
total_tokens += len(text.split())
# Log to MLflow
if self._mlflow:
self._mlflow.log_request(
latency_s=time.time() - _start,
batch_size=len(texts),

View File

@@ -10,7 +10,10 @@ from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
try:
from ray_serve.mlflow_logger import InferenceLogger
except ImportError:
InferenceLogger = None
@serve.deployment(name="LLMDeployment", num_replicas=1)
@@ -40,6 +43,7 @@ class LLMDeployment:
print(f"Model {self.model_id} async engine created")
# MLflow metrics
if InferenceLogger is not None:
self._mlflow = InferenceLogger(
experiment_name="ray-serve-llm",
run_name=f"llm-{self.model_id.split('/')[-1]}",
@@ -53,6 +57,8 @@ class LLMDeployment:
"gpu_memory_utilization": str(self.gpu_memory_utilization),
}
)
else:
self._mlflow = None
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
@@ -96,6 +102,7 @@ class LLMDeployment:
completion_tokens = len(generated_text.split())
# Log to MLflow
if self._mlflow:
self._mlflow.log_request(
latency_s=latency,
prompt_tokens=prompt_tokens,

View File

@@ -9,7 +9,10 @@ from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
try:
from ray_serve.mlflow_logger import InferenceLogger
except ImportError:
InferenceLogger = None
@serve.deployment(name="RerankerDeployment", num_replicas=1)
@@ -62,6 +65,7 @@ class RerankerDeployment:
print("Reranker model loaded successfully")
# MLflow metrics
if InferenceLogger is not None:
self._mlflow = InferenceLogger(
experiment_name="ray-serve-reranker",
run_name=f"reranker-{self.model_id.split('/')[-1]}",
@@ -71,6 +75,8 @@ class RerankerDeployment:
self._mlflow.initialize(
params={"model_id": self.model_id, "device": self.device, "use_ipex": str(self.use_ipex)}
)
else:
self._mlflow = None
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
@@ -105,6 +111,7 @@ class RerankerDeployment:
}
)
if self._mlflow:
self._mlflow.log_request(
latency_s=time.time() - _start,
num_pairs=len(pairs),
@@ -153,6 +160,7 @@ class RerankerDeployment:
results = results[:top_k]
# Log to MLflow
if self._mlflow:
self._mlflow.log_request(
latency_s=time.time() - _start,
num_pairs=len(pairs),

View File

@@ -11,7 +11,10 @@ from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
try:
from ray_serve.mlflow_logger import InferenceLogger
except ImportError:
InferenceLogger = None
@serve.deployment(name="TTSDeployment", num_replicas=1)
@@ -36,6 +39,7 @@ class TTSDeployment:
print("TTS model loaded successfully")
# MLflow metrics
if InferenceLogger is not None:
self._mlflow = InferenceLogger(
experiment_name="ray-serve-tts",
run_name=f"tts-{self.model_name.split('/')[-1]}",
@@ -43,6 +47,8 @@ class TTSDeployment:
flush_every=5,
)
self._mlflow.initialize(params={"model_name": self.model_name, "use_gpu": str(self.use_gpu)})
else:
self._mlflow = None
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
@@ -104,6 +110,7 @@ class TTSDeployment:
duration = len(wav) / sample_rate
# Log to MLflow
if self._mlflow:
self._mlflow.log_request(
latency_s=time.time() - _start,
audio_duration_s=duration,

View File

@@ -11,7 +11,10 @@ from typing import Any
from ray import serve
from ray_serve.mlflow_logger import InferenceLogger
try:
from ray_serve.mlflow_logger import InferenceLogger
except ImportError:
InferenceLogger = None
@serve.deployment(name="WhisperDeployment", num_replicas=1)
@@ -42,6 +45,7 @@ class WhisperDeployment:
print("Whisper model loaded successfully")
# MLflow metrics
if InferenceLogger is not None:
self._mlflow = InferenceLogger(
experiment_name="ray-serve-whisper",
run_name=f"whisper-{self.model_size}",
@@ -51,6 +55,8 @@ class WhisperDeployment:
self._mlflow.initialize(
params={"model_size": self.model_size, "device": self.device, "compute_type": self.compute_type}
)
else:
self._mlflow = None
async def __call__(self, request: dict[str, Any]) -> dict[str, Any]:
"""
@@ -146,6 +152,7 @@ class WhisperDeployment:
}
# Log to MLflow
if self._mlflow:
self._mlflow.log_request(
latency_s=time.time() - _start,
audio_duration_s=info.duration,