feat: Add GPU-specific Ray worker images with CI/CD
Some checks failed
Build and Push Images / build-nvidia (push) Failing after 1s
Build and Push Images / build-rdna2 (push) Failing after 1s
Build and Push Images / build-strixhalo (push) Failing after 1s
Build and Push Images / build-intel (push) Failing after 1s

- Add Dockerfiles for nvidia, rdna2, strixhalo, and intel GPU targets
- Add ray-serve modules (embeddings, whisper, tts, llm, reranker)
- Add Gitea Actions workflow for automated builds
- Add Makefile for local development
- Update README with comprehensive documentation
This commit is contained in:
2026-02-01 15:04:31 -05:00
parent e68d5c1f0e
commit a16ffff73f
16 changed files with 1311 additions and 2 deletions

View File

@@ -0,0 +1,65 @@
# Ray Worker for AMD RDNA 2 (gfx1035 - Radeon 680M)
# Pre-bakes all dependencies for fast startup
#
# Build from llm-workflows root:
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-rdna2:latest -f dockerfiles/Dockerfile.ray-worker-rdna2 .
#
# Multi-stage build to ensure Python 3.11.11 matches Ray head node
# Stage 1: Extract ROCm libraries from vendor image
FROM docker.io/rocm/pytorch:rocm6.4.4_ubuntu22.04_py3.10_pytorch_release_2.7.1 AS rocm-libs
# Stage 2: Build on Ray base with Python 3.11
FROM rayproject/ray:2.53.0-py311 AS base
# Copy ROCm stack from vendor image
COPY --from=rocm-libs /opt/rocm /opt/rocm
# Set up ROCm environment
ENV ROCM_HOME=/opt/rocm
ENV PATH="${ROCM_HOME}/bin:${ROCM_HOME}/llvm/bin:${PATH}"
ENV LD_LIBRARY_PATH="${ROCM_HOME}/lib:${ROCM_HOME}/lib64:${LD_LIBRARY_PATH}"
ENV HSA_PATH="${ROCM_HOME}/hsa"
ENV HIP_PATH="${ROCM_HOME}/hip"
# ROCm environment for RDNA 2 (gfx1035)
ENV HIP_VISIBLE_DEVICES=0 \
HSA_ENABLE_SDMA=0 \
PYTORCH_HIP_ALLOC_CONF=expandable_segments:True \
PYTHONPATH=/app
WORKDIR /app
# Install ROCm system dependencies
USER root
RUN apt-get update && apt-get install -y --no-install-recommends \
libelf1 \
libnuma1 \
libdrm2 \
libdrm-amdgpu1 \
kmod \
&& rm -rf /var/lib/apt/lists/*
USER ray
# Install PyTorch ROCm wheels compatible with Python 3.11 and ROCm 6.2
RUN pip install --no-cache-dir \
torch==2.5.1 torchvision torchaudio \
--index-url https://download.pytorch.org/whl/rocm6.2
# Install Ray Serve and AI inference dependencies
RUN pip install --no-cache-dir \
transformers \
accelerate \
sentence-transformers \
httpx \
numpy \
scipy
# Pre-download embedding model for faster cold starts
RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')"
# Copy application code
COPY ray-serve/ /app/ray_serve/
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
ENTRYPOINT ["/app/ray-entrypoint.sh"]