# syntax=docker/dockerfile:1.7 # AMD Strix Halo Ray Worker for khelben (gfx1151 / RDNA 3.5) # Used for: vLLM (Llama 3.1 70B) # # Build: # docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \ # -f dockerfiles/Dockerfile.ray-worker-strixhalo . # # Uses ROCm vendor image as base (Ubuntu 24.04 / glibc 2.38) so that all # ROCm 7.1 shared libraries (libMIOpen, libhipblas, etc.) find a compatible # glibc. Ray 2.53.0 is installed into the vendor venv via pip. # Note: Python 3.12 required — vLLM ROCm wheel (wheels.vllm.ai/rocm) is cp312 only FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 # OCI Image Spec labels LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo" LABEL org.opencontainers.image.description="Ray Serve worker for AMD Strix Halo (vLLM LLM inference)" LABEL org.opencontainers.image.vendor="DaviesTechLabs" LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images" LABEL org.opencontainers.image.licenses="MIT" LABEL gpu.target="amd-rocm-7.1-gfx1151" LABEL ray.version="2.53.0" WORKDIR /app # The vendor image ships a venv at /opt/venv with Python 3.12 + torch 2.9.1. # We keep using that venv for all pip installs. ENV ROCM_HOME=/opt/rocm \ VIRTUAL_ENV=/opt/venv ENV PATH="/opt/venv/bin:/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \ LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/opt/venv/lib" \ HSA_PATH="/opt/rocm/hsa" \ HIP_PATH="/opt/rocm/hip" \ # Strix Halo (gfx1151) specific settings HIP_VISIBLE_DEVICES=0 \ HSA_ENABLE_SDMA=0 \ PYTORCH_HIP_ALLOC_CONF="expandable_segments:True,max_split_size_mb:512" \ HSA_OVERRIDE_GFX_VERSION="11.0.0" \ ROCM_TARGET_LST="gfx1151,gfx1100" # System dependencies + create ray user (uid 1000 / gid 100) for KubeRay RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install -y --no-install-recommends \ libelf1 \ libnuma1 \ libdrm2 \ libdrm-amdgpu1 \ kmod \ libopenmpi3 \ && rm -rf /var/lib/apt/lists/* \ && groupadd -g 100 -o users 2>/dev/null || true \ && useradd -m -u 1000 -g 100 -s /bin/bash ray \ && mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter # Install uv for fast Python package management (ADR-0014) COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv # Remove vendor torch — the vendor ships torch 2.9.1+rocm7.1 but vLLM # ROCm wheel (0.15.1+rocm700) was compiled against the PyTorch ROCm 7.0 # ABI. Installing from the ROCm 7.0 index avoids undefined-symbol errors # in vllm._C / vllm._rocm_C (e.g. silu_and_mul). RUN uv pip uninstall --python /opt/venv/bin/python3 \ torch torchaudio torchvision 2>/dev/null || true # Install Ray, vLLM ROCm, torch ROCm 7.0, and inference dependencies. # --index-strategy unsafe-best-match: let uv pull each package from whichever # index has the best-matching version (vllm from rocm/, torch from rocm7.0, # setuptools/others from PyPI). RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --python /opt/venv/bin/python3 \ --index-strategy unsafe-best-match \ --prerelease=allow \ --extra-index-url https://wheels.vllm.ai/rocm/ \ --extra-index-url https://download.pytorch.org/whl/rocm7.0 \ 'ray[default]==2.53.0' \ 'vllm==0.15.1+rocm700' \ torch torchaudio torchvision \ 'transformers>=4.35.0,<5.0' \ 'accelerate>=0.25.0,<1.0' \ 'sentence-transformers>=2.3.0,<3.0' \ 'httpx>=0.27.0,<1.0' \ 'scipy>=1.11.0,<2.0' \ 'pandas>=2.0.0,<3.0' \ 'numpy>=2.1.0,<2.3' # Install amdsmi sysfs shim (required for vLLM ROCm platform detection). # Even though the native amdsmi works on Ubuntu 24.04, the sysfs shim is # still needed because the native library reports GTT instead of VRAM on # unified-memory APUs. Must be installed after vLLM/torch so PyPI amdsmi # doesn't overwrite it. COPY amdsmi-shim /tmp/amdsmi-shim RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --python /opt/venv/bin/python3 /tmp/amdsmi-shim \ && rm -rf /tmp/amdsmi-shim # Patch torch.cuda.mem_get_info for unified memory APUs. # On Strix Halo, PyTorch reports GTT (128 GiB) instead of real VRAM (96 GiB). # The .pth file auto-patches mem_get_info on Python startup. COPY amdsmi-shim/strixhalo_vram_fix.py \ /opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.py RUN echo "import strixhalo_vram_fix" > \ /opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.pth # Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime) COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh # Make /app owned by ray user RUN chown -R 1000:100 /app # Switch to ray user for runtime (KubeRay expects uid 1000) USER 1000 # Environment configuration ENV PYTHONPATH=/app \ PYTHONUNBUFFERED=1 \ PYTHONDONTWRITEBYTECODE=1 \ RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \ GPU_RESOURCE="gpu_amd_strixhalo" \ NUM_GPUS="1" # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \ CMD ray status --address=localhost:6379 || exit 1 ENTRYPOINT ["/app/ray-entrypoint.sh"]