feat: Add GPU-specific Ray worker images with CI/CD
- Add Dockerfiles for nvidia, rdna2, strixhalo, and intel GPU targets - Add ray-serve modules (embeddings, whisper, tts, llm, reranker) - Add Gitea Actions workflow for automated builds - Add Makefile for local development - Update README with comprehensive documentation
This commit is contained in:
77
dockerfiles/Dockerfile.ray-worker-intel
Normal file
77
dockerfiles/Dockerfile.ray-worker-intel
Normal file
@@ -0,0 +1,77 @@
|
||||
# Intel GPU Ray Worker for danilo (Intel i915 iGPU)
|
||||
# Used for: Reranker
|
||||
#
|
||||
# Build from llm-workflows root:
|
||||
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-intel:latest -f dockerfiles/Dockerfile.ray-worker-intel .
|
||||
#
|
||||
# Multi-stage build to ensure Python 3.11.11 matches Ray head node
|
||||
FROM rayproject/ray:2.53.0-py311 AS base
|
||||
|
||||
LABEL maintainer="billy-davies-2"
|
||||
LABEL description="Ray worker for Intel GPUs (Reranker)"
|
||||
LABEL gpu.target="intel-xpu"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies for Intel GPU support
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
wget \
|
||||
gnupg2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Add Intel oneAPI repository for runtime libraries
|
||||
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor -o /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" > /etc/apt/sources.list.d/intel-oneapi.list
|
||||
|
||||
# Add Intel compute-runtime repository for Level Zero
|
||||
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor -o /usr/share/keyrings/intel-graphics-archive-keyring.gpg && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/intel-graphics-archive-keyring.gpg arch=amd64] https://repositories.intel.com/gpu/ubuntu jammy client" > /etc/apt/sources.list.d/intel-gpu.list && \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
intel-oneapi-runtime-opencl \
|
||||
intel-oneapi-runtime-compilers \
|
||||
intel-level-zero-gpu \
|
||||
level-zero \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER ray
|
||||
|
||||
# Ensure Ray CLI is in PATH
|
||||
ENV PATH="/home/ray/.local/bin:${PATH}"
|
||||
|
||||
# Install Intel Extension for PyTorch (IPEX) for Python 3.11
|
||||
# This provides XPU support for Intel GPUs
|
||||
RUN pip install --no-cache-dir \
|
||||
torch==2.5.1 \
|
||||
intel-extension-for-pytorch==2.5.10+xpu \
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
|
||||
# Install Ray Serve and AI inference dependencies
|
||||
RUN pip install --no-cache-dir \
|
||||
sentence-transformers \
|
||||
FlagEmbedding \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
httpx \
|
||||
pydantic \
|
||||
transformers \
|
||||
huggingface_hub
|
||||
|
||||
# Copy Ray Serve Python code
|
||||
COPY ray-serve/ /app/ray_serve/
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Copy Ray Serve entrypoint
|
||||
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
||||
|
||||
# Default environment variables
|
||||
ENV RAY_HEAD_SVC="ai-inference-raycluster-head-svc"
|
||||
ENV GPU_RESOURCE="gpu_intel"
|
||||
ENV NUM_GPUS="1"
|
||||
# Intel XPU settings
|
||||
ENV ZE_AFFINITY_MASK=0
|
||||
ENV SYCL_DEVICE_FILTER=level_zero:gpu
|
||||
|
||||
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|
||||
53
dockerfiles/Dockerfile.ray-worker-nvidia
Normal file
53
dockerfiles/Dockerfile.ray-worker-nvidia
Normal file
@@ -0,0 +1,53 @@
|
||||
# NVIDIA GPU Ray Worker for elminster (RTX 2070)
|
||||
# Used for: Whisper STT, TTS
|
||||
#
|
||||
# Build from llm-workflows root:
|
||||
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-nvidia:latest -f dockerfiles/Dockerfile.ray-worker-nvidia .
|
||||
#
|
||||
FROM rayproject/ray:2.53.0-py311-cu121
|
||||
|
||||
LABEL maintainer="billy-davies-2"
|
||||
LABEL description="Ray worker for NVIDIA GPUs (Whisper, TTS)"
|
||||
LABEL gpu.target="nvidia-cuda"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies for audio processing
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ffmpeg \
|
||||
libsndfile1 \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
USER ray
|
||||
|
||||
# Install Python dependencies for inference
|
||||
RUN pip install --no-cache-dir \
|
||||
faster-whisper \
|
||||
openai-whisper \
|
||||
TTS \
|
||||
soundfile \
|
||||
pydub \
|
||||
librosa \
|
||||
torch \
|
||||
torchaudio \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
httpx \
|
||||
pydantic
|
||||
|
||||
# Copy Ray Serve Python code
|
||||
COPY --chown=ray:ray ray-serve/ /app/ray_serve/
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Copy Ray Serve entrypoint
|
||||
COPY --chown=ray:ray dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
||||
RUN chmod +x /app/ray-entrypoint.sh
|
||||
|
||||
# Default environment variables
|
||||
ENV CUDA_VISIBLE_DEVICES=0
|
||||
ENV RAY_HEAD_SVC="ai-inference-raycluster-head-svc"
|
||||
ENV GPU_RESOURCE="gpu_nvidia"
|
||||
ENV NUM_GPUS="1"
|
||||
|
||||
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|
||||
65
dockerfiles/Dockerfile.ray-worker-rdna2
Normal file
65
dockerfiles/Dockerfile.ray-worker-rdna2
Normal file
@@ -0,0 +1,65 @@
|
||||
# Ray Worker for AMD RDNA 2 (gfx1035 - Radeon 680M)
|
||||
# Pre-bakes all dependencies for fast startup
|
||||
#
|
||||
# Build from llm-workflows root:
|
||||
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-rdna2:latest -f dockerfiles/Dockerfile.ray-worker-rdna2 .
|
||||
#
|
||||
# Multi-stage build to ensure Python 3.11.11 matches Ray head node
|
||||
|
||||
# Stage 1: Extract ROCm libraries from vendor image
|
||||
FROM docker.io/rocm/pytorch:rocm6.4.4_ubuntu22.04_py3.10_pytorch_release_2.7.1 AS rocm-libs
|
||||
|
||||
# Stage 2: Build on Ray base with Python 3.11
|
||||
FROM rayproject/ray:2.53.0-py311 AS base
|
||||
|
||||
# Copy ROCm stack from vendor image
|
||||
COPY --from=rocm-libs /opt/rocm /opt/rocm
|
||||
|
||||
# Set up ROCm environment
|
||||
ENV ROCM_HOME=/opt/rocm
|
||||
ENV PATH="${ROCM_HOME}/bin:${ROCM_HOME}/llvm/bin:${PATH}"
|
||||
ENV LD_LIBRARY_PATH="${ROCM_HOME}/lib:${ROCM_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
ENV HSA_PATH="${ROCM_HOME}/hsa"
|
||||
ENV HIP_PATH="${ROCM_HOME}/hip"
|
||||
|
||||
# ROCm environment for RDNA 2 (gfx1035)
|
||||
ENV HIP_VISIBLE_DEVICES=0 \
|
||||
HSA_ENABLE_SDMA=0 \
|
||||
PYTORCH_HIP_ALLOC_CONF=expandable_segments:True \
|
||||
PYTHONPATH=/app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install ROCm system dependencies
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libelf1 \
|
||||
libnuma1 \
|
||||
libdrm2 \
|
||||
libdrm-amdgpu1 \
|
||||
kmod \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
USER ray
|
||||
|
||||
# Install PyTorch ROCm wheels compatible with Python 3.11 and ROCm 6.2
|
||||
RUN pip install --no-cache-dir \
|
||||
torch==2.5.1 torchvision torchaudio \
|
||||
--index-url https://download.pytorch.org/whl/rocm6.2
|
||||
|
||||
# Install Ray Serve and AI inference dependencies
|
||||
RUN pip install --no-cache-dir \
|
||||
transformers \
|
||||
accelerate \
|
||||
sentence-transformers \
|
||||
httpx \
|
||||
numpy \
|
||||
scipy
|
||||
|
||||
# Pre-download embedding model for faster cold starts
|
||||
RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')"
|
||||
|
||||
# Copy application code
|
||||
COPY ray-serve/ /app/ray_serve/
|
||||
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|
||||
72
dockerfiles/Dockerfile.ray-worker-strixhalo
Normal file
72
dockerfiles/Dockerfile.ray-worker-strixhalo
Normal file
@@ -0,0 +1,72 @@
|
||||
# Ray Worker for AMD Strix Halo (gfx1151 / RDNA 3.5)
|
||||
# Pre-bakes all dependencies for fast startup
|
||||
#
|
||||
# Build from llm-workflows root:
|
||||
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest -f dockerfiles/Dockerfile.ray-worker-strixhalo .
|
||||
#
|
||||
# Multi-stage build to ensure Python 3.11.11 matches Ray head node
|
||||
|
||||
# Stage 1: Extract ROCm 7.1 libraries from vendor image
|
||||
FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 AS rocm-libs
|
||||
|
||||
# Stage 2: Build on Ray base with Python 3.11
|
||||
FROM rayproject/ray:2.53.0-py311 AS base
|
||||
|
||||
# Copy ROCm stack from vendor image
|
||||
COPY --from=rocm-libs /opt/rocm /opt/rocm
|
||||
|
||||
# Set up ROCm environment
|
||||
ENV ROCM_HOME=/opt/rocm
|
||||
ENV PATH="${ROCM_HOME}/bin:${ROCM_HOME}/llvm/bin:${PATH}"
|
||||
ENV LD_LIBRARY_PATH="${ROCM_HOME}/lib:${ROCM_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
ENV HSA_PATH="${ROCM_HOME}/hsa"
|
||||
ENV HIP_PATH="${ROCM_HOME}/hip"
|
||||
|
||||
# ROCm environment for AMD Strix Halo (gfx1151 / RDNA 3.5)
|
||||
ENV HIP_VISIBLE_DEVICES=0
|
||||
ENV HSA_ENABLE_SDMA=0
|
||||
ENV PYTORCH_HIP_ALLOC_CONF=expandable_segments:True,max_split_size_mb:512
|
||||
ENV HSA_OVERRIDE_GFX_VERSION=11.0.0
|
||||
ENV ROCM_TARGET_LST=gfx1151,gfx1100
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install ROCm system dependencies
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libelf1 \
|
||||
libnuma1 \
|
||||
libdrm2 \
|
||||
libdrm-amdgpu1 \
|
||||
kmod \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
USER ray
|
||||
|
||||
# WORKAROUND: ROCm/ROCm#5853 - Standard PyTorch ROCm wheels cause segfault
|
||||
# in libhsa-runtime64.so during VRAM allocation on gfx1151 (Strix Halo).
|
||||
# TheRock gfx110X-all packages provide Python 3.11 compatible wheels.
|
||||
RUN pip install --no-cache-dir \
|
||||
--index-url https://rocm.nightlies.amd.com/v2/gfx110X-all/ \
|
||||
torch torchaudio torchvision
|
||||
|
||||
# Install Ray Serve and AI inference dependencies
|
||||
RUN pip install --no-cache-dir \
|
||||
vllm \
|
||||
transformers \
|
||||
accelerate \
|
||||
sentence-transformers \
|
||||
httpx \
|
||||
numpy \
|
||||
scipy
|
||||
|
||||
# Pre-download common models for faster cold starts
|
||||
RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')" || true
|
||||
|
||||
# Copy Ray Serve Python code
|
||||
COPY ray-serve/ /app/ray_serve/
|
||||
|
||||
# Ray worker entrypoint
|
||||
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|
||||
27
dockerfiles/ray-entrypoint.sh
Normal file
27
dockerfiles/ray-entrypoint.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# Ray Worker Entrypoint
|
||||
# Connects to Ray head node and registers custom resources
|
||||
|
||||
set -e
|
||||
|
||||
# Ensure Ray is in PATH (works across all base images)
|
||||
export PATH="/home/ray/.local/bin:/home/ray/anaconda3/bin:${PATH}"
|
||||
|
||||
# Get Ray head address from environment or default
|
||||
RAY_HEAD_ADDRESS="${RAY_HEAD_SVC:-ray-head-svc}:6379"
|
||||
|
||||
# Get custom resources from environment
|
||||
GPU_RESOURCE="${GPU_RESOURCE:-gpu_amd}"
|
||||
NUM_GPUS="${NUM_GPUS:-1}"
|
||||
|
||||
echo "Starting Ray worker..."
|
||||
echo " Head address: $RAY_HEAD_ADDRESS"
|
||||
echo " GPU resource: $GPU_RESOURCE"
|
||||
echo " Num GPUs: $NUM_GPUS"
|
||||
|
||||
# Start Ray worker with custom resources
|
||||
exec ray start \
|
||||
--address="$RAY_HEAD_ADDRESS" \
|
||||
--num-gpus="$NUM_GPUS" \
|
||||
--resources="{\"$GPU_RESOURCE\": 1}" \
|
||||
--block
|
||||
Reference in New Issue
Block a user