From a07b612989461af6056180355e96b207b1186dfd Mon Sep 17 00:00:00 2001 From: drbh Date: Wed, 19 Jun 2024 17:31:13 +0000 Subject: [PATCH] fix: revert skips and prefer updated ci token for tests --- .github/workflows/build.yaml | 2 +- .github/workflows/tests.yaml | 2 +- integration-tests/models/test_flash_llama.py | 3 --- integration-tests/models/test_flash_llama_gptq.py | 3 --- server/tests/models/test_model.py | 3 --- 5 files changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8c407e81..991cd76d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -141,7 +141,7 @@ jobs: run: | export DOCKER_VOLUME=/mnt/cache export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} - export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} + export HUGGING_FACE_HUB_TOKEN=${{ secrets.HF_TOKEN }} pytest -s -vv integration-tests - name: Tailscale Wait if: ${{ failure() || runner.debug == '1' }} diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 83fff196..d5ad9da3 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -72,7 +72,7 @@ jobs: - name: Run server tests run: | pip install pytest - export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} + export HUGGING_FACE_HUB_TOKEN=${{ secrets.HF_TOKEN }} pytest -s -vv server/tests - name: Pre-commit checks run: | diff --git a/integration-tests/models/test_flash_llama.py b/integration-tests/models/test_flash_llama.py index e9e5ab09..c69314ff 100644 --- a/integration-tests/models/test_flash_llama.py +++ b/integration-tests/models/test_flash_llama.py @@ -1,8 +1,5 @@ import pytest -# TODO: avoid skipping module when CI permissions are fixed -pytest.skip(allow_module_level=True) - @pytest.fixture(scope="module") def flash_llama_handle(launcher): diff --git a/integration-tests/models/test_flash_llama_gptq.py b/integration-tests/models/test_flash_llama_gptq.py index 7e4e22dc..b87f054b 100644 --- a/integration-tests/models/test_flash_llama_gptq.py +++ b/integration-tests/models/test_flash_llama_gptq.py @@ -1,8 +1,5 @@ import pytest -# TODO: avoid skipping module when CI permissions are fixed -pytest.skip(allow_module_level=True) - @pytest.fixture(scope="module") def flash_llama_gptq_handle(launcher): diff --git a/server/tests/models/test_model.py b/server/tests/models/test_model.py index ab7e62a4..8441e8c6 100644 --- a/server/tests/models/test_model.py +++ b/server/tests/models/test_model.py @@ -5,9 +5,6 @@ from transformers import AutoTokenizer from text_generation_server.models import Model -# TODO: avoid skipping module when CI permissions are fixed -pytest.skip(allow_module_level=True) - def get_test_model(): class TestModel(Model):