From 78d8ca49ec18e1da1237f67edf66c07e22174803 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Sat, 23 Nov 2024 22:24:45 -0800 Subject: [PATCH] Skip Nvidia GPU python packages during Server install in Dockerfiles --- Dockerfile | 6 ++++-- prod.Dockerfile | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 458038f7..202d1e92 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,8 +26,10 @@ WORKDIR /app COPY pyproject.toml . COPY README.md . ARG VERSION=0.0.0 -# use the pre-built llama-cpp-python cpu wheel -ENV PIP_EXTRA_INDEX_URL=https://abetlen.github.io/llama-cpp-python/whl/cpu +# use the pre-built llama-cpp-python, torch cpu wheel +ENV PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu https://abetlen.github.io/llama-cpp-python/whl/cpu" +# avoid downloading unused cuda specific python packages +ENV CUDA_VISIBLE_DEVICES="" RUN sed -i "s/dynamic = \\[\"version\"\\]/version = \"$VERSION\"/" pyproject.toml && \ pip install --no-cache-dir . diff --git a/prod.Dockerfile b/prod.Dockerfile index f0911a05..07a5c0d4 100644 --- a/prod.Dockerfile +++ b/prod.Dockerfile @@ -26,8 +26,10 @@ WORKDIR /app COPY pyproject.toml . COPY README.md . ARG VERSION=0.0.0 -# use the pre-built llama-cpp-python cpu wheel -ENV PIP_EXTRA_INDEX_URL=https://abetlen.github.io/llama-cpp-python/whl/cpu +# use the pre-built llama-cpp-python, torch cpu wheel +ENV PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu https://abetlen.github.io/llama-cpp-python/whl/cpu" +# avoid downloading unused cuda specific python packages +ENV CUDA_VISIBLE_DEVICES="" RUN sed -i "s/dynamic = \\[\"version\"\\]/version = \"$VERSION\"/" pyproject.toml && \ pip install --no-cache-dir .[prod]