big refactor.
Some checks failed
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
Build and Push Images / determine-version (push) Has been cancelled

This commit is contained in:
2026-02-09 12:17:12 -05:00
parent a20a5d2ccd
commit 65de596212

View File

@@ -6,15 +6,12 @@
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \
# -f dockerfiles/Dockerfile.ray-worker-strixhalo .
#
# Multi-stage build: Extract ROCm 7.1 from vendor image, use Ray base for Python 3.12
# Note: Uses TheRock gfx110X wheels due to ROCm/ROCm#5853 segfault issue
# Uses ROCm vendor image as base (Ubuntu 24.04 / glibc 2.38) so that all
# ROCm 7.1 shared libraries (libMIOpen, libhipblas, etc.) find a compatible
# glibc. Ray 2.53.0 is installed into the vendor venv via pip.
# Note: Python 3.12 required — vLLM ROCm wheel (wheels.vllm.ai/rocm) is cp312 only
# Stage 1: ROCm 7.1 libraries from AMD vendor image
FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 AS rocm-source
# Stage 2: Production image
FROM docker.io/rayproject/ray:2.53.0-py312 AS production
FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1
# OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
@@ -27,13 +24,12 @@ LABEL ray.version="2.53.0"
WORKDIR /app
# Copy ROCm stack from vendor image (--link makes this layer independent for better caching)
COPY --link --from=rocm-source /opt/rocm /opt/rocm
# ROCm environment variables - split to ensure ROCM_HOME is set first
ENV ROCM_HOME=/opt/rocm
ENV PATH="/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/anaconda3/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/home/ray/anaconda3/lib" \
# The vendor image ships a venv at /opt/venv with Python 3.12 + torch 2.9.1.
# We keep using that venv for all pip installs.
ENV ROCM_HOME=/opt/rocm \
VIRTUAL_ENV=/opt/venv
ENV PATH="/opt/venv/bin:/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/opt/venv/lib" \
HSA_PATH="/opt/rocm/hsa" \
HIP_PATH="/opt/rocm/hip" \
# Strix Halo (gfx1151) specific settings
@@ -43,8 +39,7 @@ ENV PATH="/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/anaconda3/bin:/home/ray/.lo
HSA_OVERRIDE_GFX_VERSION="11.0.0" \
ROCM_TARGET_LST="gfx1151,gfx1100"
# Install system dependencies
USER root
# System dependencies + create ray user (uid 1000 / gid 100) for KubeRay
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y --no-install-recommends \
@@ -54,74 +49,68 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
libdrm-amdgpu1 \
kmod \
libopenmpi3 \
&& rm -rf /var/lib/apt/lists/*
&& rm -rf /var/lib/apt/lists/* \
&& groupadd -g 100 -o users 2>/dev/null || true \
&& useradd -m -u 1000 -g 100 -s /bin/bash ray \
&& mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
# Install uv for fast Python package management (ADR-0014)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
USER ray
# Remove vendor torch — the vendor ships torch 2.9.1+rocm7.1 but vLLM
# ROCm wheel (0.15.1+rocm700) was compiled against the PyTorch ROCm 7.0
# ABI. Installing from the ROCm 7.0 index avoids undefined-symbol errors
# in vllm._C / vllm._rocm_C (e.g. silu_and_mul).
RUN uv pip uninstall --python /opt/venv/bin/python3 \
torch torchaudio torchvision 2>/dev/null || true
# Install vLLM ROCm build, torch ROCm 7.0, and inference dependencies.
# IMPORTANT: vLLM ROCm wheel (0.15.1+rocm700) has C-extensions compiled against
# the official PyTorch ROCm 7.0 ABI. We MUST use torch from the same ROCm 7.0
# index — TheRock nightlies have an incompatible c10::hip ABI that causes
# undefined symbol errors in vllm._C and vllm._rocm_C.
# HSA_OVERRIDE_GFX_VERSION=11.0.0 makes gfx1151 appear as gfx1100 to this torch.
# Install Ray, vLLM ROCm, torch ROCm 7.0, and inference dependencies.
# --index-strategy unsafe-best-match: let uv pull each package from whichever
# index has the best-matching version (vllm from rocm/, torch from rocm7.0,
# setuptools/others from PyPI).
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system \
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 \
--index-strategy unsafe-best-match \
--prerelease=allow \
--extra-index-url https://wheels.vllm.ai/rocm/ \
--extra-index-url https://download.pytorch.org/whl/rocm7.0 \
'ray[default]==2.53.0' \
'vllm==0.15.1+rocm700' \
torch torchaudio torchvision \
'transformers>=4.35.0,<5.0' \
'accelerate>=0.25.0,<1.0' \
'sentence-transformers>=2.3.0,<3.0' \
'httpx>=0.27.0,<1.0' \
'scipy>=1.11.0,<2.0'
'scipy>=1.11.0,<2.0' \
'pandas>=2.0.0,<3.0' \
'numpy>=2.1.0,<2.3'
# FIX: Ray base image has pandas 1.5.3 which is incompatible with numpy 2.x
# PyTorch ROCm 7.0 requires numpy 2.x, so upgrade pandas to match.
# Pin numpy <2.3 because numba (required by vLLM for speculative decoding)
# does not yet support numpy 2.3+.
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system 'pandas>=2.0.0,<3.0' 'numpy>=2.1.0,<2.3'
# Install amdsmi sysfs shim (required for vLLM ROCm platform detection).
# Even though the native amdsmi works on Ubuntu 24.04, the sysfs shim is
# still needed because the native library reports GTT instead of VRAM on
# unified-memory APUs. Must be installed after vLLM/torch so PyPI amdsmi
# doesn't overwrite it.
COPY amdsmi-shim /tmp/amdsmi-shim
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --python /opt/venv/bin/python3 /tmp/amdsmi-shim \
&& rm -rf /tmp/amdsmi-shim
# Install amdsmi sysfs shim LAST (required for vLLM ROCm platform detection).
# The native amdsmi from ROCm 7.1 requires glibc 2.38 (Ubuntu 24.04),
# but the Ray base image is Ubuntu 22.04 (glibc 2.35). This pure-Python
# shim reads GPU info from /sys/class/drm/* instead of libamd_smi.so.
# Must be installed after vLLM/torch to prevent PyPI amdsmi from overwriting it.
COPY --chown=1000:100 amdsmi-shim /tmp/amdsmi-shim
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system /tmp/amdsmi-shim && rm -rf /tmp/amdsmi-shim
# FIX: Patch torch.cuda.mem_get_info for unified memory APUs.
# On Strix Halo, PyTorch reports GTT (128 GiB) instead of real VRAM (96 GiB)
# from sysfs. vLLM uses mem_get_info to pre-allocate, so wrong numbers cause
# OOM or "insufficient GPU memory" at startup. The .pth file auto-patches
# mem_get_info on Python startup to return sysfs VRAM values.
COPY --chown=1000:100 amdsmi-shim/strixhalo_vram_fix.py \
/home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.py
# Patch torch.cuda.mem_get_info for unified memory APUs.
# On Strix Halo, PyTorch reports GTT (128 GiB) instead of real VRAM (96 GiB).
# The .pth file auto-patches mem_get_info on Python startup.
COPY amdsmi-shim/strixhalo_vram_fix.py \
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.py
RUN echo "import strixhalo_vram_fix" > \
/home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# Pre-download common models for faster cold starts (optional, increases image size)
# RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')"
# Pre-create aiter JIT build cache directory.
# The vLLM ROCm aiter package compiles kernels on first import and needs
# this directory writable by the ray user (uid 1000).
USER root
RUN mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
USER ray
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
COPY --chown=1000:100 --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
# Make /app owned by ray user
RUN chown -R 1000:100 /app
# Switch to ray user for runtime (KubeRay expects uid 1000)
USER 1000
# Environment configuration
ENV PYTHONPATH=/app \