Some checks failed
Build and Push Images / determine-version (push) Successful in 4s
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
71 lines
2.5 KiB
Docker
71 lines
2.5 KiB
Docker
# syntax=docker/dockerfile:1.7
|
|
# NVIDIA GPU Ray Worker for elminster (RTX 2070)
|
|
# Used for: Whisper STT, XTTS Text-to-Speech
|
|
#
|
|
# Build:
|
|
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-nvidia:latest \
|
|
# -f dockerfiles/Dockerfile.ray-worker-nvidia .
|
|
|
|
FROM docker.io/rayproject/ray:2.53.0-py312-cu121
|
|
|
|
# OCI Image Spec labels
|
|
LABEL org.opencontainers.image.title="Ray Worker - NVIDIA GPU"
|
|
LABEL org.opencontainers.image.description="Ray Serve worker for NVIDIA GPUs (Whisper STT, XTTS TTS)"
|
|
LABEL org.opencontainers.image.vendor="DaviesTechLabs"
|
|
LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images"
|
|
LABEL org.opencontainers.image.licenses="MIT"
|
|
LABEL gpu.target="nvidia-cuda-12.1"
|
|
LABEL ray.version="2.53.0"
|
|
|
|
WORKDIR /app
|
|
|
|
# Install system dependencies in a single layer with cleanup
|
|
USER root
|
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
apt-get update && apt-get install -y --no-install-recommends \
|
|
ffmpeg \
|
|
libsndfile1 \
|
|
&& rm -rf /var/lib/apt/lists/*
|
|
|
|
# Install uv for fast Python package management (ADR-0014)
|
|
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
|
|
|
# Switch back to non-root ray user
|
|
USER ray
|
|
|
|
# Install Python dependencies with uv cache mount (10-100x faster than pip)
|
|
# Pinned versions for reproducibility
|
|
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
|
|
uv pip install --system \
|
|
'faster-whisper>=1.0.0,<2.0' \
|
|
'coqui-tts>=0.27.0,<1.0' \
|
|
'soundfile>=0.12.0,<1.0' \
|
|
'pydub>=0.25.0,<1.0' \
|
|
'librosa>=0.10.0,<1.0' \
|
|
'torch>=2.0.0,<3.0' \
|
|
'torchaudio>=2.0.0,<3.0' \
|
|
'transformers>=4.46.0,<5.0' \
|
|
'fastapi>=0.100.0,<1.0' \
|
|
'uvicorn>=0.23.0,<1.0' \
|
|
'httpx>=0.27.0,<1.0' \
|
|
'pydantic>=2.0.0,<3.0'
|
|
|
|
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
|
|
COPY --chown=1000:100 --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
|
|
|
# Environment configuration
|
|
ENV PYTHONPATH=/app \
|
|
PYTHONUNBUFFERED=1 \
|
|
PYTHONDONTWRITEBYTECODE=1 \
|
|
CUDA_VISIBLE_DEVICES=0 \
|
|
RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \
|
|
GPU_RESOURCE="gpu_nvidia" \
|
|
NUM_GPUS="1"
|
|
|
|
# Health check - verify Ray worker can connect
|
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
|
CMD ray status --address=localhost:6379 || exit 1
|
|
|
|
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|