diff --git a/dockerfiles/Dockerfile.ray-worker-strixhalo b/dockerfiles/Dockerfile.ray-worker-strixhalo index 383e1e2..550ff74 100644 --- a/dockerfiles/Dockerfile.ray-worker-strixhalo +++ b/dockerfiles/Dockerfile.ray-worker-strixhalo @@ -215,7 +215,13 @@ RUN --mount=type=cache,target=/root/.cache/uv \ 'grpcio-tools>=1.60.0' \ 'anthropic>=0.20.0' \ 'mcp>=1.0' \ - 'tensorizer>=2.9.0' + 'tensorizer>=2.9.0' \ + 'openai-harmony>=0.0.6' \ + 'llguidance>=1.0' \ + 'conch-triton-kernels>=1.0' \ + 'model-hosting-container-standards>=0.1.0' \ + 'runai-model-streamer>=0.15.0' \ + 'timm>=1.0' # ── Ray Serve application package ────────────────────────────────────── # Baked into the image so the LLM serve app can use the source-built vllm