fix: revert skips and prefer updated ci token for tests
This commit is contained in:
parent
c9e4526b9d
commit
a07b612989
|
@ -141,7 +141,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
export DOCKER_VOLUME=/mnt/cache
|
export DOCKER_VOLUME=/mnt/cache
|
||||||
export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }}
|
export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }}
|
||||||
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HF_TOKEN }}
|
||||||
pytest -s -vv integration-tests
|
pytest -s -vv integration-tests
|
||||||
- name: Tailscale Wait
|
- name: Tailscale Wait
|
||||||
if: ${{ failure() || runner.debug == '1' }}
|
if: ${{ failure() || runner.debug == '1' }}
|
||||||
|
|
|
@ -72,7 +72,7 @@ jobs:
|
||||||
- name: Run server tests
|
- name: Run server tests
|
||||||
run: |
|
run: |
|
||||||
pip install pytest
|
pip install pytest
|
||||||
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HF_TOKEN }}
|
||||||
pytest -s -vv server/tests
|
pytest -s -vv server/tests
|
||||||
- name: Pre-commit checks
|
- name: Pre-commit checks
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
# TODO: avoid skipping module when CI permissions are fixed
|
|
||||||
pytest.skip(allow_module_level=True)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def flash_llama_handle(launcher):
|
def flash_llama_handle(launcher):
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
# TODO: avoid skipping module when CI permissions are fixed
|
|
||||||
pytest.skip(allow_module_level=True)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def flash_llama_gptq_handle(launcher):
|
def flash_llama_gptq_handle(launcher):
|
||||||
|
|
|
@ -5,9 +5,6 @@ from transformers import AutoTokenizer
|
||||||
|
|
||||||
from text_generation_server.models import Model
|
from text_generation_server.models import Model
|
||||||
|
|
||||||
# TODO: avoid skipping module when CI permissions are fixed
|
|
||||||
pytest.skip(allow_module_level=True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_test_model():
|
def get_test_model():
|
||||||
class TestModel(Model):
|
class TestModel(Model):
|
||||||
|
|
Loading…
Reference in New Issue