Some checks failed
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Has been cancelled
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Has been cancelled
Build and Push Images / Release (push) Has been cancelled
Build and Push Images / Notify (push) Has been cancelled
Build and Push Images / determine-version (push) Has been cancelled
ROCm 7.1 system libraries (libhsa-runtime64.so.1.18.70100) are ABI- incompatible with the torch/vLLM ROCm 7.0 wheels from wheels.vllm.ai. This caused SIGSEGV at 0x34 in libhsa-runtime64 on every GPU operation. Switch to rocm/pytorch:rocm7.0.2_ubuntu24.04_py3.12_pytorch_release_2.9.1 which provides matching ROCm 7.0.2 system libraries while keeping Ubuntu 24.04 (glibc 2.38) and Python 3.12.
134 lines
5.7 KiB
Docker
134 lines
5.7 KiB
Docker
# syntax=docker/dockerfile:1.7
|
|
# AMD Strix Halo Ray Worker for khelben (gfx1151 / RDNA 3.5)
|
|
# Used for: vLLM (Llama 3.1 70B)
|
|
#
|
|
# Build:
|
|
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \
|
|
# -f dockerfiles/Dockerfile.ray-worker-strixhalo .
|
|
#
|
|
# Uses ROCm 7.0.2 vendor image (Ubuntu 24.04 / glibc 2.38) so system ROCm
|
|
# libraries (libhsa-runtime64, libhipblas, libMIOpen, etc.) are ABI-compatible
|
|
# with torch + vLLM wheels from wheels.vllm.ai/rocm/ (compiled for ROCm 7.0).
|
|
# ROCm 7.1 base causes segfault in libhsa-runtime64.so due to ABI mismatch.
|
|
# Note: Python 3.12 required — vLLM ROCm wheel (wheels.vllm.ai/rocm) is cp312 only
|
|
|
|
FROM docker.io/rocm/pytorch:rocm7.0.2_ubuntu24.04_py3.12_pytorch_release_2.9.1
|
|
|
|
# OCI Image Spec labels
|
|
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
|
|
LABEL org.opencontainers.image.description="Ray Serve worker for AMD Strix Halo (vLLM LLM inference)"
|
|
LABEL org.opencontainers.image.vendor="DaviesTechLabs"
|
|
LABEL org.opencontainers.image.source="https://git.daviestechlabs.io/daviestechlabs/kuberay-images"
|
|
LABEL org.opencontainers.image.licenses="MIT"
|
|
LABEL gpu.target="amd-rocm-7.0-gfx1151"
|
|
LABEL ray.version="2.53.0"
|
|
|
|
WORKDIR /app
|
|
|
|
# The vendor image ships a venv at /opt/venv with Python 3.12 + torch 2.9.1.
|
|
# We keep using that venv for all pip installs.
|
|
ENV ROCM_HOME=/opt/rocm \
|
|
VIRTUAL_ENV=/opt/venv
|
|
ENV PATH="/opt/venv/bin:/opt/rocm/bin:/opt/rocm/llvm/bin:/home/ray/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
|
|
LD_LIBRARY_PATH="/opt/rocm/lib:/opt/rocm/lib64:/opt/venv/lib" \
|
|
HSA_PATH="/opt/rocm/hsa" \
|
|
HIP_PATH="/opt/rocm/hip" \
|
|
# Strix Halo (gfx1151) specific settings
|
|
HIP_VISIBLE_DEVICES=0 \
|
|
HSA_ENABLE_SDMA=0 \
|
|
PYTORCH_HIP_ALLOC_CONF="expandable_segments:True,max_split_size_mb:512" \
|
|
HSA_OVERRIDE_GFX_VERSION="11.0.0" \
|
|
ROCM_TARGET_LST="gfx1151,gfx1100"
|
|
|
|
# System dependencies + create ray user (uid 1000 / gid 100) for KubeRay
|
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
apt-get update && apt-get install -y --no-install-recommends \
|
|
libelf1 \
|
|
libnuma1 \
|
|
libdrm2 \
|
|
libdrm-amdgpu1 \
|
|
kmod \
|
|
libopenmpi3 \
|
|
&& rm -rf /var/lib/apt/lists/* \
|
|
&& (groupadd -g 100 -o users 2>/dev/null || true) \
|
|
# Vendor image may already have UID 1000 — rename it to ray, or create ray
|
|
&& existing=$(getent passwd 1000 | cut -d: -f1) \
|
|
&& if [ -n "$existing" ] && [ "$existing" != "ray" ]; then \
|
|
usermod -l ray -d /home/ray -m -s /bin/bash "$existing"; \
|
|
elif [ -z "$existing" ]; then \
|
|
useradd -m -u 1000 -g 100 -s /bin/bash ray; \
|
|
fi \
|
|
&& mkdir -p /home/ray/.aiter && chown 1000:100 /home/ray/.aiter
|
|
|
|
# Install uv for fast Python package management (ADR-0014)
|
|
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
|
|
|
# Remove vendor torch — replace with the exact torch from wheels.vllm.ai/rocm/
|
|
# (2.9.1+git8907517) that vLLM 0.15.1+rocm700 was compiled against.
|
|
# The vendor torch is close but may differ in C++ ABI details.
|
|
RUN uv pip uninstall --python /opt/venv/bin/python3 \
|
|
torch torchaudio torchvision 2>/dev/null || true
|
|
|
|
# Install Ray, vLLM ROCm, torch ROCm 7.0, and inference dependencies.
|
|
# --index-strategy unsafe-best-match: let uv pull each package from whichever
|
|
# index has the best-matching version (vllm from rocm/, torch from rocm7.0,
|
|
# setuptools/others from PyPI).
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --python /opt/venv/bin/python3 \
|
|
--index-strategy unsafe-best-match \
|
|
--prerelease=allow \
|
|
--extra-index-url https://wheels.vllm.ai/rocm/ \
|
|
--extra-index-url https://download.pytorch.org/whl/rocm7.0 \
|
|
'ray[default]==2.53.0' \
|
|
'vllm==0.15.1+rocm700' \
|
|
torch torchaudio torchvision \
|
|
'transformers>=4.35.0,<5.0' \
|
|
'accelerate>=0.25.0,<1.0' \
|
|
'sentence-transformers>=2.3.0,<3.0' \
|
|
'httpx>=0.27.0,<1.0' \
|
|
'scipy>=1.11.0,<2.0' \
|
|
'pandas>=2.0.0,<3.0' \
|
|
'numpy>=2.1.0,<2.3'
|
|
|
|
# Install amdsmi sysfs shim (required for vLLM ROCm platform detection).
|
|
# Even though the native amdsmi works on Ubuntu 24.04, the sysfs shim is
|
|
# still needed because the native library reports GTT instead of VRAM on
|
|
# unified-memory APUs. Must be installed after vLLM/torch so PyPI amdsmi
|
|
# doesn't overwrite it.
|
|
COPY amdsmi-shim /tmp/amdsmi-shim
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --python /opt/venv/bin/python3 /tmp/amdsmi-shim \
|
|
&& rm -rf /tmp/amdsmi-shim
|
|
|
|
# Patch torch.cuda.mem_get_info for unified memory APUs.
|
|
# On Strix Halo, PyTorch reports GTT (128 GiB) instead of real VRAM (96 GiB).
|
|
# The .pth file auto-patches mem_get_info on Python startup.
|
|
COPY amdsmi-shim/strixhalo_vram_fix.py \
|
|
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.py
|
|
RUN echo "import strixhalo_vram_fix" > \
|
|
/opt/venv/lib/python3.12/site-packages/strixhalo_vram_fix.pth
|
|
|
|
# Copy entrypoint script (ray-serve-apps is installed from PyPI at runtime)
|
|
COPY --chmod=755 dockerfiles/ray-entrypoint.sh /app/ray-entrypoint.sh
|
|
|
|
# Make /app owned by ray user
|
|
RUN chown -R 1000:100 /app
|
|
|
|
# Switch to ray user for runtime (KubeRay expects uid 1000)
|
|
USER 1000
|
|
|
|
# Environment configuration
|
|
ENV PYTHONPATH=/app \
|
|
PYTHONUNBUFFERED=1 \
|
|
PYTHONDONTWRITEBYTECODE=1 \
|
|
RAY_HEAD_SVC="ai-inference-raycluster-head-svc" \
|
|
GPU_RESOURCE="gpu_amd_strixhalo" \
|
|
NUM_GPUS="1"
|
|
|
|
# Health check
|
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
|
|
CMD ray status --address=localhost:6379 || exit 1
|
|
|
|
ENTRYPOINT ["/app/ray-entrypoint.sh"]
|