fix: update to python 3.12.
Some checks failed
Build and Push Images / determine-version (push) Successful in 4s
Build and Push Images / build (Dockerfile.ray-worker-nvidia, nvidia) (push) Failing after 21s
Build and Push Images / build (Dockerfile.ray-worker-intel, intel) (push) Failing after 23s
Build and Push Images / build (Dockerfile.ray-worker-strixhalo, strixhalo) (push) Failing after 19s
Build and Push Images / build (Dockerfile.ray-worker-rdna2, rdna2) (push) Failing after 23s
Build and Push Images / Release (push) Has been skipped
Build and Push Images / Notify (push) Successful in 1s

This commit is contained in:
2026-02-09 08:52:32 -05:00
parent 64585dac7e
commit 6aad7ad38a
4 changed files with 21 additions and 14 deletions

View File

@@ -6,7 +6,7 @@
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-intel:latest \ # docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-intel:latest \
# -f dockerfiles/Dockerfile.ray-worker-intel . # -f dockerfiles/Dockerfile.ray-worker-intel .
FROM docker.io/rayproject/ray:2.53.0-py311 FROM docker.io/rayproject/ray:2.53.0-py312
# OCI Image Spec labels # OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - Intel GPU" LABEL org.opencontainers.image.title="Ray Worker - Intel GPU"

View File

@@ -6,7 +6,7 @@
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-nvidia:latest \ # docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-nvidia:latest \
# -f dockerfiles/Dockerfile.ray-worker-nvidia . # -f dockerfiles/Dockerfile.ray-worker-nvidia .
FROM docker.io/rayproject/ray:2.53.0-py311-cu121 FROM docker.io/rayproject/ray:2.53.0-py312-cu121
# OCI Image Spec labels # OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - NVIDIA GPU" LABEL org.opencontainers.image.title="Ray Worker - NVIDIA GPU"

View File

@@ -6,13 +6,13 @@
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-rdna2:latest \ # docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-rdna2:latest \
# -f dockerfiles/Dockerfile.ray-worker-rdna2 . # -f dockerfiles/Dockerfile.ray-worker-rdna2 .
# #
# Multi-stage build: Extract ROCm from vendor image, use Ray base for Python 3.11 # Multi-stage build: Extract ROCm from vendor image, use Ray base for Python 3.12
# Stage 1: ROCm libraries from AMD vendor image # Stage 1: ROCm libraries from AMD vendor image
FROM docker.io/rocm/pytorch:rocm6.4.4_ubuntu22.04_py3.10_pytorch_release_2.7.1 AS rocm-source FROM docker.io/rocm/pytorch:rocm6.4.4_ubuntu22.04_py3.10_pytorch_release_2.7.1 AS rocm-source
# Stage 2: Production image # Stage 2: Production image
FROM docker.io/rayproject/ray:2.53.0-py311 AS production FROM docker.io/rayproject/ray:2.53.0-py312 AS production
# OCI Image Spec labels # OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - AMD RDNA 2" LABEL org.opencontainers.image.title="Ray Worker - AMD RDNA 2"
@@ -56,7 +56,7 @@ COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
USER ray USER ray
# Install PyTorch with ROCm 6.2 wheels for Python 3.11 (uv is 10-100x faster) # Install PyTorch with ROCm 6.2 wheels for Python 3.12 (uv is 10-100x faster)
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \ RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system \ uv pip install --system \
torch==2.5.1 torchvision torchaudio \ torch==2.5.1 torchvision torchaudio \

View File

@@ -6,14 +6,15 @@
# docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \ # docker build -t git.daviestechlabs.io/daviestechlabs/ray-worker-strixhalo:latest \
# -f dockerfiles/Dockerfile.ray-worker-strixhalo . # -f dockerfiles/Dockerfile.ray-worker-strixhalo .
# #
# Multi-stage build: Extract ROCm 7.1 from vendor image, use Ray base for Python 3.11 # Multi-stage build: Extract ROCm 7.1 from vendor image, use Ray base for Python 3.12
# Note: Uses TheRock gfx110X wheels due to ROCm/ROCm#5853 segfault issue # Note: Uses TheRock gfx110X wheels due to ROCm/ROCm#5853 segfault issue
# Note: Python 3.12 required — vLLM ROCm wheel (wheels.vllm.ai/rocm) is cp312 only
# Stage 1: ROCm 7.1 libraries from AMD vendor image # Stage 1: ROCm 7.1 libraries from AMD vendor image
FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 AS rocm-source FROM docker.io/rocm/pytorch:rocm7.1_ubuntu24.04_py3.12_pytorch_release_2.9.1 AS rocm-source
# Stage 2: Production image # Stage 2: Production image
FROM docker.io/rayproject/ray:2.53.0-py311 AS production FROM docker.io/rayproject/ray:2.53.0-py312 AS production
# OCI Image Spec labels # OCI Image Spec labels
LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo" LABEL org.opencontainers.image.title="Ray Worker - AMD Strix Halo"
@@ -59,11 +60,16 @@ COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
USER ray USER ray
# Install vLLM and inference dependencies first (without torch) # Install vLLM ROCm build and inference dependencies.
# vLLM will try to install CUDA torch as dependency, we exclude it here # The vLLM ROCm wheel from wheels.vllm.ai includes HIP-compiled C-extensions
# (vllm._C, vllm._rocm_C) that are ABI-compatible with ROCm PyTorch.
# PyPI vLLM is CUDA-only and crashes with: libcudart.so.12 not found.
# uv gives --extra-index-url higher priority than PyPI, so the ROCm wheel
# is selected over the CUDA wheel.
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \ RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system \ uv pip install --system \
'vllm>=0.5.0' \ --extra-index-url https://wheels.vllm.ai/rocm/ \
vllm \
'transformers>=4.35.0,<5.0' \ 'transformers>=4.35.0,<5.0' \
'accelerate>=0.25.0,<1.0' \ 'accelerate>=0.25.0,<1.0' \
'sentence-transformers>=2.3.0,<3.0' \ 'sentence-transformers>=2.3.0,<3.0' \
@@ -72,8 +78,9 @@ RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
# WORKAROUND: ROCm/ROCm#5853 - Standard PyTorch ROCm wheels cause segfault # WORKAROUND: ROCm/ROCm#5853 - Standard PyTorch ROCm wheels cause segfault
# in libhsa-runtime64.so during VRAM allocation on gfx1151 (Strix Halo). # in libhsa-runtime64.so during VRAM allocation on gfx1151 (Strix Halo).
# TheRock gfx110X-all packages provide compatible Python 3.11 wheels. # TheRock gfx110X-all packages provide compatible Python 3.12 wheels.
# Install AFTER vLLM to override the CUDA torch it pulled in. # Reinstall AFTER vLLM to override the standard ROCm torch it pulled in.
# vLLM's ROCm C-extensions remain compatible (same HIP ABI, torch 2.10.x).
RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \ RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
uv pip install --system --reinstall \ uv pip install --system --reinstall \
--index-url https://rocm.nightlies.amd.com/v2/gfx110X-all/ \ --index-url https://rocm.nightlies.amd.com/v2/gfx110X-all/ \
@@ -101,9 +108,9 @@ RUN --mount=type=cache,target=/home/ray/.cache/uv,uid=1000,gid=1000 \
# OOM or "insufficient GPU memory" at startup. The .pth file auto-patches # OOM or "insufficient GPU memory" at startup. The .pth file auto-patches
# mem_get_info on Python startup to return sysfs VRAM values. # mem_get_info on Python startup to return sysfs VRAM values.
COPY --chown=1000:100 amdsmi-shim/strixhalo_vram_fix.py \ COPY --chown=1000:100 amdsmi-shim/strixhalo_vram_fix.py \
/home/ray/anaconda3/lib/python3.11/site-packages/strixhalo_vram_fix.py /home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.py
RUN echo "import strixhalo_vram_fix" > \ RUN echo "import strixhalo_vram_fix" > \
/home/ray/anaconda3/lib/python3.11/site-packages/strixhalo_vram_fix.pth /home/ray/anaconda3/lib/python3.12/site-packages/strixhalo_vram_fix.pth
# Pre-download common models for faster cold starts (optional, increases image size) # Pre-download common models for faster cold starts (optional, increases image size)
# RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')" # RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('BAAI/bge-large-en-v1.5')"