From 7216886a00692cd875ab9f63696aa05e91a3d751 Mon Sep 17 00:00:00 2001 From: Richard Edgar Date: Fri, 17 May 2024 06:22:26 -0400 Subject: [PATCH] [Build] Adjust llama-cpp-python versions (#832) Exclude another version of `llama-cpp-python` which appears to be causing us issues. --- .github/workflows/action_gpu_unit_tests.yml | 2 +- .github/workflows/ci_tests.yml | 2 +- .github/workflows/notebook_tests.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/action_gpu_unit_tests.yml b/.github/workflows/action_gpu_unit_tests.yml index f03634fad..fd221b851 100644 --- a/.github/workflows/action_gpu_unit_tests.yml +++ b/.github/workflows/action_gpu_unit_tests.yml @@ -56,7 +56,7 @@ jobs: - name: GPU pip installs run: | pip install accelerate - CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python!=0.2.58" + CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python!=0.2.58,!=0.2.75" - name: Check GPU available run: | python -c "import torch; assert torch.cuda.is_available()" diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index b7872c5ac..f5631ab39 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -53,7 +53,7 @@ jobs: - name: GPU pip installs run: | pip install accelerate - CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python<0.2.58" + CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python!=0.2.58,!=0.2.75" - name: Check GPU available run: | python -c "import torch; assert torch.cuda.is_available()" diff --git a/.github/workflows/notebook_tests.yml b/.github/workflows/notebook_tests.yml index 9c36c0152..087c03691 100644 --- a/.github/workflows/notebook_tests.yml +++ b/.github/workflows/notebook_tests.yml @@ -56,7 +56,7 @@ jobs: - name: GPU pip installs run: | pip install accelerate - CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python<0.2.58" + CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install "llama-cpp-python!=0.2.58,!=0.2.75" - name: Check GPU available run: | python -c "import torch; assert torch.cuda.is_available()"