Files
kuberay-images/dockerfiles/Dockerfile.ray-worker-strixhalo
Billy D. 2e3fbb8c60
Some checks failed
Build and Push Images / determine-version (push) Successful in 7s
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
feat(strixhalo): full source build of vLLM for gfx1151 (v1.0.20)
- Build vLLM v0.15.1 from source against vendor torch 2.9.1
- Preserve AMD's vendor PyTorch from rocm/pytorch:rocm7.0.2 base
- use_existing_torch.py --prefix to strip torch from build-requires
- Compile C++/HIP extensions for gfx1100 (mapped from gfx1151)
- Install triton/flash-attn from wheels.vllm.ai/rocm with --no-deps
- Add torch vendor verification step to catch accidental overwrites
- Fix GPU_RESOURCE default to match cluster (gpu_strixhalo)
- Remove unsupported expandable_segments from PYTORCH_ALLOC_CONF
- AITER is gfx9-only; gfx11 uses TRITON_ATTN backend by default
2026-02-09 15:46:25 -05:00

214 lines
9.7 KiB
Docker

# syntax=docker/dockerfile:1.7
# AMD Strix Halo Ray Worker for khelben (gfx1151 / RDNA 3.5)
# Used for: vLLM (Llama 3.1 70B)
#
# Build:
# docker build -t registry.lab.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:v1.0.20 \
# -f dockerfiles/Dockerfile.ray-worker-strixhalo .
#
# STRATEGY: Full source build of vLLM on AMD's vendor PyTorch image.
#
# The vendor image (rocm/pytorch ROCm 7.0.2 / Ubuntu 24.04 / Python 3.12)
# ships torch 2.9.1 compiled by AMD CI against the exact ROCm libraries in
# the image. Pre-built vLLM torch wheels (wheels.vllm.ai) carry a custom
# torch 2.9.1+git8907517 that segfaults in libhsa-runtime64.so on gfx1151
# during HSA queue creation. By keeping the vendor torch and compiling vLLM
# from source we guarantee ABI compatibility across the entire stack.
#
# gfx1151 is mapped to gfx1100 at runtime via HSA_OVERRIDE_GFX_VERSION=11.0.0,
# so all HIP kernels are compiled for the gfx1100 target.
#
# Note: AITER is gfx9-only. On gfx11, vLLM defaults to TRITON_ATTN backend.
FROM docker.io/rocm/pytorch:rocm7.0.2_ubuntu24.04_py3.12_pytorch_release_2.9.1
# ── Build arguments ─────────────────────────────────────────────────────
ARG VLLM_VERSION=v0.15.1
ARG PYTORCH_ROCM_ARCH="gfx1100"
ARG MAX_JOBS=16
# ── OCI labels ──────────────────────────────────────────────────────────
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
LABEL org.opencontainers.image.description="Ray Serve worker for AMD Strix Halo (vLLM source-built)"
LABEL org.opencontainers.image.vendor="DaviesTechLabs"
LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images"
LABEL org.opencontainers.image.licenses="MIT"
LABEL gpu.target="amd-rocm-7.0.2-gfx1151"
LABEL ray.version="2.53.0"
LABEL vllm.build="source"
WORKDIR /app
# ── Persistent environment ──────────────────────────────────────────────
# The vendor image ships a venv at /opt/venv with Python 3.12 + torch 2.9.1.
# All pip installs go into this venv.
ENV ROCM_HOME=/opt/rocm \
VIRTUAL_ENV=/opt/venv
ENV PATH="/opt/venv/bin:/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/opt/venv/lib" \
HSA_PATH="/opt/rocm/hsa" \
HIP_PATH="/opt/rocm/hip" \
# Strix Halo (gfx1151 / RDNA 3.5) runtime settings
HIP_VISIBLE_DEVICES=0 \
HSA_ENABLE_SDMA=0 \
PYTORCH_ALLOC_CONF="max_split_size_mb:512" \
HSA_OVERRIDE_GFX_VERSION="11.0.0" \
ROCM_TARGET_LST="gfx1151,gfx1100"
# ── System dependencies + build tools ───────────────────────────────────
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y --no-install-recommends \
# Runtime
libelf1 libnuma1 libdrm2 libdrm-amdgpu1 kmod libopenmpi3 \
# Build (for vLLM C++/HIP compilation)
git cmake ninja-build ccache \
&& rm -rf /var/lib/apt/lists/* \
# Create ray user (uid 1000 / gid 100) for KubeRay.
# Vendor image may already have UID 1000 — rename or create.
&& (groupadd -g 100 -o users 2>/dev/null || true) \
&& existing=$(getent passwd 1000 | cut -d: -f1) \
&& if [ -n "$existing" ] && [ "$existing" != "ray" ]; then \
usermod -l ray -d /home/ray -m -s /bin/bash "$existing"; \
elif [ -z "$existing" ]; then \
useradd -m -u 1000 -g 100 -s /bin/bash ray; \
fi \
&& mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
# Install uv for fast Python package management (ADR-0014)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
# ── Python build dependencies ──────────────────────────────────────────
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
'cmake>=3.26.1' \
ninja \
'packaging>=24.2' \
'setuptools>=77.0.3,<80.0.0' \
'setuptools-scm>=8' \
wheel \
'jinja2>=3.1.6' \
regex
# ── Build vLLM from source ─────────────────────────────────────────────
# Clone at the specified version, then strip torch from build-requires so
# the build system uses the vendor torch already in /opt/venv.
WORKDIR /tmp/vllm-build
RUN git clone --depth 1 --branch ${VLLM_VERSION} \
https://github.com/vllm-project/vllm.git .
# Remove torch from build-requires. use_existing_torch.py is provided by
# newer vLLM branches; fall back to sed for older releases.
# --prefix strips only torch=/torchvision=/torchaudio= lines (not
# unrelated packages whose name happens to contain "torch").
RUN if [ -f use_existing_torch.py ]; then \
python3 use_existing_torch.py --prefix; \
else \
sed -i '/"torch[= ]/d; /"torchvision[= ]/d; /"torchaudio[= ]/d' pyproject.toml 2>/dev/null || true; \
fi
# Compile C++/HIP extensions and install the vLLM Python package.
# --no-build-isolation : use vendor torch + our build deps directly
# --no-deps : we install runtime deps ourselves (below)
ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} \
VLLM_TARGET_DEVICE=rocm \
MAX_JOBS=${MAX_JOBS} \
CMAKE_BUILD_TYPE=Release \
CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=cache,target=/root/.cache/ccache \
uv pip install --python /opt/venv/bin/python3 \
--no-build-isolation --no-deps .
# ── ROCm-specific Python wheels ────────────────────────────────────────
# triton (ROCm HIP backend) and flash-attn (Triton AMD kernels for gfx11)
# from vLLM's ROCm wheels index. --no-deps prevents torch replacement.
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
--no-deps \
--prerelease=allow \
--extra-index-url https://wheels.vllm.ai/rocm/ \
triton triton-kernels flash_attn
# ── Runtime Python dependencies ─────────────────────────────────────────
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
'ray[default]==2.53.0' \
'transformers>=4.35.0,<5.0' \
'accelerate>=0.25.0,<1.0' \
'sentence-transformers>=2.3.0,<3.0' \
'httpx>=0.27.0,<1.0' \
'scipy>=1.11.0,<2.0' \
'pandas>=2.0.0,<3.0' \
'numpy>=2.1.0,<2.3' \
'numba>=0.60.0,<0.62' \
'tokenizers>=0.20.0' \
'safetensors>=0.4.0' \
'uvloop>=0.21.0' \
'pyyaml>=6.0' \
'requests>=2.31.0' \
'aiohttp>=3.9.0' \
'pillow>=10.0' \
'prometheus-client>=0.20.0' \
'py-cpuinfo>=9.0.0' \
'filelock>=3.13.0' \
'psutil>=5.9.0' \
'msgpack>=1.0.0' \
'gguf>=0.6.0' \
'compressed-tensors>=0.8.0' \
'outlines>=0.1.0' \
'lm-format-enforcer>=0.10.0' \
'partial-json-parser>=0.2.0' \
'mistral-common>=1.5.0'
# ── Verify vendor torch survived ───────────────────────────────────────
# Fail early if any install step accidentally replaced the vendor torch.
RUN python3 -c "\
import torch; \
v = torch.__version__; \
assert '+git' not in v, f'vLLM torch detected ({v}) — vendor torch was overwritten!'; \
print(f'torch {v} (vendor) OK')"
# ── amdsmi sysfs shim ──────────────────────────────────────────────────
# Required for vLLM ROCm platform detection. The native amdsmi reports
# GTT instead of VRAM on unified-memory APUs.
COPY amdsmi-shim /tmp/amdsmi-shim
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 /tmp/amdsmi-shim \
&& rm -rf /tmp/amdsmi-shim
# ── VRAM fix for unified memory APU ────────────────────────────────────
# Monkey-patches torch.cuda.mem_get_info to report actual VRAM (96 GiB)
# rather than GTT (128 GiB).
COPY amdsmi-shim/strixhalo_vram_fix.py \
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.py
RUN echo "import strixhalo_vram_fix" > \
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# ── Clean up build artifacts ────────────────────────────────────────────
WORKDIR /app
RUN rm -rf /tmp/vllm-build
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
# Make /app owned by ray user
RUN chown -R 1000:100 /app
# Switch to ray user for runtime (KubeRay expects uid 1000)
USER 1000
# Environment configuration
ENV PYTHONPATH=/app \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \
GPU_RESOURCE="gpu_strixhalo" \
NUM_GPUS="1"
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
CMD ray status --address=localhost:6379 || exit 1
ENTRYPOINT ["/app/ray-entrypoint.sh"]