Files
kuberay-images/dockerfiles/Dockerfile.ray-worker-strixhalo
Billy D. b0c58b98a0
Some checks failed
Build and Push Images / determine-version (push) Successful in 4s
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
fix
2026-02-09 11:31:18 -05:00

138 lines
6.1 KiB
Docker

# syntax=docker/dockerfile:1.7
# AMD Strix Halo Ray Worker for khelben (gfx1151 / RDNA 3.5)
# Used for: vLLM (Llama 3.1 70B)
#
# Build:
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \
# -f dockerfiles/Dockerfile.ray-worker-strixhalo .
#
# Multi-stage build: Extract ROCm 7.1 from vendor image, use Ray base for Python 3.12
# Note: Uses TheRock gfx110X wheels due to ROCm/ROCm#5853 segfault issue
# Note: Python 3.12 required — vLLM ROCm wheel (wheels.vllm.ai/rocm) is cp312 only
# Stage 1: ROCm 7.1 libraries from AMD vendor image
FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 AS rocm-source
# Stage 2: Production image
FROM docker.io/rayproject/ray:2.53.0-py312 AS production
# OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
LABEL org.opencontainers.image.description="Ray Serve worker for AMD Strix Halo (vLLM LLM inference)"
LABEL org.opencontainers.image.vendor="DaviesTechLabs"
LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images"
LABEL org.opencontainers.image.licenses="MIT"
LABEL gpu.target="amd-rocm-7.1-gfx1151"
LABEL ray.version="2.53.0"
WORKDIR /app
# Copy ROCm stack from vendor image (--link makes this layer independent for better caching)
COPY --link --from=rocm-source /opt/rocm /opt/rocm
# ROCm environment variables - split to ensure ROCM_HOME is set first
ENV ROCM_HOME=/opt/rocm
ENV PATH="/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/anaconda3/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/home/ray/anaconda3/lib" \
HSA_PATH="/opt/rocm/hsa" \
HIP_PATH="/opt/rocm/hip" \
# Strix Halo (gfx1151) specific settings
HIP_VISIBLE_DEVICES=0 \
HSA_ENABLE_SDMA=0 \
PYTORCH_HIP_ALLOC_CONF="expandable_segments:True,max_split_size_mb:512" \
HSA_OVERRIDE_GFX_VERSION="11.0.0" \
ROCM_TARGET_LST="gfx1151,gfx1100"
# Install system dependencies
USER root
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y --no-install-recommends \
libelf1 \
libnuma1 \
libdrm2 \
libdrm-amdgpu1 \
kmod \
&& rm -rf /var/lib/apt/lists/*
# Install uv for fast Python package management (ADR-0014)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
USER ray
# Install vLLM ROCm build, torch ROCm 7.0, and inference dependencies.
# IMPORTANT: vLLM ROCm wheel (0.15.1+rocm700) has C-extensions compiled against
# the official PyTorch ROCm 7.0 ABI. We MUST use torch from the same ROCm 7.0
# index — TheRock nightlies have an incompatible c10::hip ABI that causes
# undefined symbol errors in vllm._C and vllm._rocm_C.
# HSA_OVERRIDE_GFX_VERSION=11.0.0 makes gfx1151 appear as gfx1100 to this torch.
# --index-strategy unsafe-best-match: let uv pull each package from whichever
# index has the best-matching version (vllm from rocm/, torch from rocm7.0,
# setuptools/others from PyPI).
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system \
--index-strategy unsafe-best-match \
--prerelease=allow \
--extra-index-url https://wheels.vllm.ai/rocm/ \
--extra-index-url https://download.pytorch.org/whl/rocm7.0 \
'vllm==0.15.1+rocm700' \
torch torchaudio torchvision \
'transformers>=4.35.0,<5.0' \
'accelerate>=0.25.0,<1.0' \
'sentence-transformers>=2.3.0,<3.0' \
'httpx>=0.27.0,<1.0' \
'scipy>=1.11.0,<2.0'
# FIX: Ray base image has pandas 1.5.3 which is incompatible with numpy 2.x
# PyTorch ROCm 7.0 requires numpy 2.x, so upgrade pandas to match.
# Pin numpy <2.3 because numba (required by vLLM for speculative decoding)
# does not yet support numpy 2.3+.
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system 'pandas>=2.0.0,<3.0' 'numpy>=2.1.0,<2.3'
# Install amdsmi sysfs shim LAST (required for vLLM ROCm platform detection).
# The native amdsmi from ROCm 7.1 requires glibc 2.38 (Ubuntu 24.04),
# but the Ray base image is Ubuntu 22.04 (glibc 2.35). This pure-Python
# shim reads GPU info from /sys/class/drm/* instead of libamd_smi.so.
# Must be installed after vLLM/torch to prevent PyPI amdsmi from overwriting it.
COPY --chown=1000:100 amdsmi-shim /tmp/amdsmi-shim
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system /tmp/amdsmi-shim && rm -rf /tmp/amdsmi-shim
# FIX: Patch torch.cuda.mem_get_info for unified memory APUs.
# On Strix Halo, PyTorch reports GTT (128 GiB) instead of real VRAM (96 GiB)
# from sysfs. vLLM uses mem_get_info to pre-allocate, so wrong numbers cause
# OOM or "insufficient GPU memory" at startup. The .pth file auto-patches
# mem_get_info on Python startup to return sysfs VRAM values.
COPY --chown=1000:100 amdsmi-shim/strixhalo_vram_fix.py \
/home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.py
RUN echo "import strixhalo_vram_fix" > \
/home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# Pre-download common models for faster cold starts (optional, increases image size)
# RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')"
# Pre-create aiter JIT build cache directory.
# The vLLM ROCm aiter package compiles kernels on first import and needs
# this directory writable by the ray user (uid 1000).
USER root
RUN mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
USER ray
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
COPY --chown=1000:100 --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
# Environment configuration
ENV PYTHONPATH=/app \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \
GPU_RESOURCE="gpu_amd_strixhalo" \
NUM_GPUS="1"
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
CMD ray status --address=localhost:6379 || exit 1
ENTRYPOINT ["/app/ray-entrypoint.sh"]