Files
kuberay-images/dockerfiles/Dockerfile.ray-worker-strixhalo
Billy D. 72489b920e
Some checks failed
Build and Push Images / determine-version (push) Successful in 5s
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
fix(strixhalo): add vllm runtime deps to --no-deps build
vllm was installed with --no-deps to avoid torch/xgrammar pin conflicts.
This left msgspec, fastapi, openai, xgrammar, and other runtime deps
missing. Now explicitly installs all vllm runtime deps in a separate
layer, with xgrammar in the --no-deps ROCm layer.
2026-02-09 20:41:12 -05:00

259 lines
12 KiB
Docker

# AMD Strix Halo Ray Worker for khelben (gfx1151 / RDNA 3.5)
# Used for: vLLM (Llama 3.1 70B)
#
# Build:
# docker build -t registry.lab.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:v1.0.21 \
# -f dockerfiles/Dockerfile.ray-worker-strixhalo .
#
# STRATEGY: Full source build of vLLM on AMD's vendor PyTorch image.
#
# The vendor image (rocm/pytorch ROCm 7.0.2 / Ubuntu 24.04 / Python 3.12)
# ships torch 2.9.1 compiled by AMD CI against the exact ROCm libraries in
# the image. Pre-built vLLM torch wheels (wheels.vllm.ai) carry a custom
# torch 2.9.1+git8907517 that segfaults in libhsa-runtime64.so on gfx1151
# during HSA queue creation. By keeping the vendor torch and compiling vLLM
# from source we guarantee ABI compatibility across the entire stack.
#
# gfx1151 is mapped to gfx1100 at runtime via HSA_OVERRIDE_GFX_VERSION=11.0.0,
# so all HIP kernels are compiled for the gfx1100 target.
#
# Note: AITER is gfx9-only. On gfx11, vLLM defaults to TRITON_ATTN backend.
FROM docker.io/rocm/pytorch:rocm7.0.2_ubuntu24.04_py3.12_pytorch_release_2.9.1
# ── Build arguments ─────────────────────────────────────────────────────
ARG VLLM_VERSION=v0.15.1
ARG PYTORCH_ROCM_ARCH="gfx1100"
ARG MAX_JOBS=16
# ── OCI labels ──────────────────────────────────────────────────────────
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
LABEL org.opencontainers.image.description="Ray Serve worker for AMD Strix Halo (vLLM source-built)"
LABEL org.opencontainers.image.vendor="DaviesTechLabs"
LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images"
LABEL org.opencontainers.image.licenses="MIT"
LABEL gpu.target="amd-rocm-7.0.2-gfx1151"
LABEL ray.version="2.53.0"
LABEL vllm.build="source"
WORKDIR /app
# ── Persistent environment ──────────────────────────────────────────────
# The vendor image ships a venv at /opt/venv with Python 3.12 + torch 2.9.1.
# All pip installs go into this venv.
ENV ROCM_HOME=/opt/rocm \
VIRTUAL_ENV=/opt/venv \
HIP_CLANG_PATH=/opt/rocm/llvm/bin
ENV PATH="/opt/venv/bin:/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/opt/venv/lib" \
HSA_PATH="/opt/rocm/hsa" \
HIP_PATH="/opt/rocm" \
# Strix Halo (gfx1151 / RDNA 3.5) runtime settings
HIP_VISIBLE_DEVICES=0 \
HSA_ENABLE_SDMA=0 \
PYTORCH_ALLOC_CONF="max_split_size_mb:512" \
HSA_OVERRIDE_GFX_VERSION="11.0.0" \
ROCM_TARGET_LST="gfx1151,gfx1100"
# ── System setup ─────────────────────────────────────────────────────────
# The vendor image already ships ALL needed packages:
# cmake 4.0, hipcc 7.0.2, clang++ 20.0 (AMD ROCm LLVM), git,
# libelf, libnuma, libdrm, libopenmpi3, and HIP dev headers/cmake configs.
#
# CRITICAL: Do NOT run apt-get upgrade or install ANY packages from apt.
# Even installing ccache triggers a dependency cascade that pulls in
# Ubuntu's hipcc 5.7.1 (which overwrites the vendor hipcc 7.0.2) and
# a broken /usr/bin/hipconfig.pl that makes cmake find_package(hip)
# report version 0.0.0 → "Can't find CUDA or HIP installation."
#
# Create ray user (uid 1000 / gid 100) for KubeRay.
# Vendor image may already have UID 1000 — rename or create.
RUN (groupadd -g 100 -o users 2>/dev/null || true) \
&& existing=$(getent passwd 1000 | cut -d: -f1) \
&& if [ -n "$existing" ] && [ "$existing" != "ray" ]; then \
usermod -l ray -d /home/ray -m -s /bin/bash "$existing"; \
elif [ -z "$existing" ]; then \
useradd -m -u 1000 -g 100 -s /bin/bash ray; \
fi \
&& mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
# Install uv for fast Python package management (ADR-0014)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
# ── Python build dependencies ──────────────────────────────────────────
# CRITICAL: vLLM requires cmake<4. The vendor image ships cmake 4.0.0
# which changed find_package(MODULE) behaviour and breaks FindHIP.cmake
# (reports HIP version 0.0.0). Downgrade to 3.x per vLLM's rocm-build.txt.
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
'cmake>=3.26.1,<4' \
ninja \
'packaging>=24.2' \
'setuptools>=77.0.3,<80.0.0' \
'setuptools-scm>=8' \
wheel \
'jinja2>=3.1.6' \
regex
# ── Build vLLM from source ─────────────────────────────────────────────
# Clone at the specified version, then strip torch from build-requires so
# the build system uses the vendor torch already in /opt/venv.
WORKDIR /tmp/vllm-build
RUN git clone --depth 1 --branch ${VLLM_VERSION} \
https://github.com/vllm-project/vllm.git .
# Remove torch from build-requires. use_existing_torch.py is provided by
# newer vLLM branches; fall back to sed for older releases.
# --prefix strips only torch=/torchvision=/torchaudio= lines (not
# unrelated packages whose name happens to contain "torch").
RUN if [ -f use_existing_torch.py ]; then \
python3 use_existing_torch.py --prefix; \
else \
sed -i '/"torch[= ]/d; /"torchvision[= ]/d; /"torchaudio[= ]/d' pyproject.toml 2>/dev/null || true; \
fi
# Compile C++/HIP extensions and install the vLLM Python package.
# vLLM's setup.py passes -DROCM_PATH=$ROCM_HOME to cmake automatically.
# HIP_ROOT_DIR tells FindHIP.cmake where to look for hipconfig.
ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} \
VLLM_TARGET_DEVICE=rocm \
MAX_JOBS=${MAX_JOBS} \
CMAKE_BUILD_TYPE=Release \
HIP_ROOT_DIR=/opt/rocm \
CMAKE_PREFIX_PATH="/opt/rocm;/opt/rocm/lib/cmake" \
CCACHE_DIR=/root/.cache/ccache
# Build using setup.py bdist_wheel (same as vLLM CI in Dockerfile.rocm),
# then install the wheel. This avoids a develop-mode egg-link to the
# build directory so we can safely clean up /tmp/vllm-build afterwards.
#
# --no-deps: vllm's dep tree pulls torch/xgrammar with exact +gitXXX pins
# that conflict with the vendor torch. Runtime deps are installed in the
# next layer instead.
RUN --mount=type=cache,target=/root/.cache/ccache \
python3 setup.py bdist_wheel --dist-dir=dist \
&& uv pip install --python /opt/venv/bin/python3 --no-deps dist/*.whl
# ── ROCm-specific Python wheels ────────────────────────────────────────
# triton (ROCm HIP backend) and flash-attn (Triton AMD kernels for gfx11)
# from vLLM's ROCm wheels index. --no-deps prevents torch replacement.
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
--no-deps \
--prerelease=allow \
--extra-index-url https://wheels.vllm.ai/rocm/ \
triton triton-kernels flash_attn \
xgrammar
# ── Runtime Python dependencies ─────────────────────────────────────────
# Because vllm was installed --no-deps (torch pin conflicts), we install
# its runtime deps here. Packages already in the vendor image (torch,
# numpy, pillow, pyyaml, etc.) are satisfied and skipped by uv.
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
'ray[default]==2.53.0' \
'transformers>=4.35.0,<5.0' \
'accelerate>=0.25.0,<1.0' \
'sentence-transformers>=2.3.0,<3.0' \
'httpx>=0.27.0,<1.0' \
'scipy>=1.11.0,<2.0' \
'pandas>=2.0.0,<3.0' \
'numpy>=2.1.0,<2.3' \
'numba>=0.60.0,<0.62' \
'uvloop>=0.21.0' \
'msgpack>=1.0.0' \
# ── vllm runtime deps (not pulled by --no-deps) ──
'msgspec>=0.18.0' \
'fastapi>=0.110.0' \
'uvicorn[standard]>=0.28.0' \
'openai>=1.0' \
'peft>=0.7.0' \
'datasets>=2.16.0' \
'pydantic>=2.0' \
'prometheus-fastapi-instrumentator>=6.0' \
'lark>=1.1.0' \
'outlines_core>=0.1.0' \
'lm-format-enforcer>=0.10.0' \
'partial-json-parser>=0.2.0' \
'mistral-common>=1.5.0' \
'compressed-tensors>=0.8.0' \
'gguf>=0.6.0' \
'tokenizers>=0.20.0' \
'safetensors>=0.4.0' \
'filelock>=3.13.0' \
'psutil>=5.9.0' \
'py-cpuinfo>=9.0.0' \
'prometheus-client>=0.20.0' \
'pillow>=10.0' \
'aiohttp>=3.9.0' \
'requests>=2.31.0' \
'pyyaml>=6.0' \
'cloudpickle>=3.0' \
'blake3>=0.3.0' \
'cbor2>=5.0' \
'diskcache>=5.0' \
'pyzmq>=25.0' \
'python-json-logger>=2.0' \
'sentencepiece>=0.2.0' \
'tiktoken>=0.5.0' \
'tqdm>=4.66.0' \
'packaging>=23.0' \
'regex>=2023.0' \
'six>=1.16.0' \
'typing_extensions>=4.8.0' \
'einops>=0.7.0' \
'depyf>=0.18.0' \
'grpcio>=1.60.0' \
'protobuf>=4.25.0'
# ── Verify vendor torch survived ───────────────────────────────────────
# Fail early if any install step accidentally replaced the vendor torch.
RUN python3 -c "\
import torch; \
v = torch.__version__; \
assert '+git' not in v, f'vLLM torch detected ({v}) — vendor torch was overwritten!'; \
print(f'torch {v} (vendor) OK')"
# ── amdsmi sysfs shim ──────────────────────────────────────────────────
# Required for vLLM ROCm platform detection. The native amdsmi reports
# GTT instead of VRAM on unified-memory APUs.
COPY amdsmi-shim /tmp/amdsmi-shim
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 /tmp/amdsmi-shim \
&& rm -rf /tmp/amdsmi-shim
# ── VRAM fix for unified memory APU ────────────────────────────────────
# Monkey-patches torch.cuda.mem_get_info to report actual VRAM (96 GiB)
# rather than GTT (128 GiB).
COPY amdsmi-shim/strixhalo_vram_fix.py \
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.py
RUN echo "import strixhalo_vram_fix" > \
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# ── Clean up build artifacts ────────────────────────────────────────────
WORKDIR /app
RUN rm -rf /tmp/vllm-build
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
# Make /app owned by ray user
RUN chown -R 1000:100 /app
# Switch to ray user for runtime (KubeRay expects uid 1000)
USER 1000
# Environment configuration
ENV PYTHONPATH=/app \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \
GPU_RESOURCE="gpu_strixhalo" \
NUM_GPUS="1"
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
CMD ray status --address=localhost:6379 || exit 1
ENTRYPOINT ["/app/ray-entrypoint.sh"]