This commit is contained in:
fxmarty 2024-06-11 11:25:14 +00:00
parent 73b067d193
commit dadfff621e
6 changed files with 114 additions and 10 deletions

View File

@ -31,15 +31,19 @@ jobs:
# with sigstore/fulcio when running outside of PRs.
id-token: write
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Initialize Docker Buildx
uses: docker/setup-buildx-action@v3
with:
install: true
- name: Inject slug/short variables
uses: rlespinasse/github-slug-action@v4.4.1
- name: Construct harware variables
shell: bash
run: |
@ -54,11 +58,9 @@ jobs:
export dockerfile="Dockerfile_amd"
export label_extension="-rocm"
export docker_devices="/dev/kfd,/dev/dri"
# TODO Re-enable when they pass.
# export runs_on="amd-gpu-tgi"
export runs_on="ubuntu-latest"
export runs_on="amd-gpu-tgi"
;;
intel)
xpu)
export dockerfile="Dockerfile_intel"
export label_extension="-intel"
export docker_devices=""
@ -74,10 +76,12 @@ jobs:
echo "LABEL=${label_extension}" >> $GITHUB_ENV
echo "DOCKER_DEVICES=${docker_devices}" >> $GITHUB_ENV
echo "RUNS_ON=${runs_on}" >> $GITHUB_ENV
- name: Tailscale
uses: huggingface/tailscale-action@main
with:
authkey: ${{ secrets.TAILSCALE_AUTHKEY }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
@ -85,12 +89,14 @@ jobs:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to internal Container Registry
uses: docker/login-action@v3
with:
username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }}
password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }}
registry: registry.internal.huggingface.tech
- name: Login to Azure Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
@ -98,6 +104,7 @@ jobs:
username: ${{ secrets.AZURE_DOCKER_USERNAME }}
password: ${{ secrets.AZURE_DOCKER_PASSWORD }}
registry: db4c2190dd824d1f950f5d1555fbadf0.azurecr.io
# If pull request
- name: Extract metadata (tags, labels) for Docker
if: ${{ github.event_name == 'pull_request' }}
@ -108,6 +115,7 @@ jobs:
registry.internal.huggingface.tech/api-inference/community/text-generation-inference
tags: |
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }}
# If main, release or tag
- name: Extract metadata (tags, labels) for Docker
if: ${{ github.event_name != 'pull_request' }}
@ -125,6 +133,7 @@ jobs:
type=semver,pattern={{major}}.{{minor}}${{ env.LABEL }}
type=raw,value=latest${{ env.LABEL }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }}
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }}
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v4
@ -140,6 +149,7 @@ jobs:
labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }}
cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ env.LABEL }},mode=min
cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ env.LABEL }},mode=min
- name: Final
id: final
run: |
@ -147,6 +157,7 @@ jobs:
echo "docker_devices=${{ env.DOCKER_DEVICES }}" >> "$GITHUB_OUTPUT"
echo "runs_on=${{ env.RUNS_ON }}" >> "$GITHUB_OUTPUT"
echo "label=${{ env.LABEL }}" >> "$GITHUB_OUTPUT"
integration_tests:
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ needs.build-and-push.outputs.label }}-${{ github.head_ref || github.run_id }}
@ -157,25 +168,37 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Inject slug/short variables
uses: rlespinasse/github-slug-action@v4.4.1
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install
run: |
make install-integration-tests
- name: Tailscale
uses: huggingface/tailscale-action@main
if: needs.build-and-push.outputs.runs_on != 'amd-gpu-tgi'
with:
authkey: ${{ secrets.TAILSCALE_AUTHKEY }}
- name: Run tests
run: |
export DOCKER_VOLUME=/mnt/cache
export DOCKER_IMAGE=${{ needs.build-and-push.outputs.docker_image }}
export DOCKER_DEVICES=${{ needs.build-and-push.outputs.docker_devices }}
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
export DOCKER_IMAGE=${{ needs.build-and-push.outputs.docker_image }}
echo "DOCKER_IMAGE:"
echo $DOCKER_IMAGE
pytest -s -vv integration-tests
export SYSTEM=${{ inputs.hardware }}
echo "SYSTEM:"
echo $SYSTEM
pytest -s -vvvvv integration-tests

View File

@ -29,7 +29,7 @@ jobs:
# fail-fast is true by default
fail-fast: false
matrix:
hardware: ["cuda", "rocm", "intel"]
hardware: ["cuda", "rocm", "xpu"]
uses: ./.github/workflows/build.yaml # calls the one above ^
with:
hardware: ${{ matrix.hardware }}

View File

@ -1,6 +1,6 @@
import pytest
from testing_utils import SYSTEM, is_flaky_async
from testing_utils import SYSTEM, is_flaky_async, require_backend_async
@pytest.fixture(scope="module")

View File

@ -20,6 +20,8 @@ async def test_flash_gemma_gptq(flash_gemma_gptq, ignore_logprob_response_snapsh
"Test request", max_new_tokens=10, decoder_input_details=True
)
print(f"response.generated_text `{response.generated_text}`")
assert response.details.generated_tokens == 10
assert response == ignore_logprob_response_snapshot

View File

@ -1,5 +1,7 @@
import pytest
from testing_utils import is_flaky_async, SYSTEM, require_backend_async
@pytest.fixture(scope="module")
def flash_llama_gptq_handle(launcher):
@ -15,18 +17,26 @@ async def flash_llama_gptq(flash_llama_gptq_handle):
@pytest.mark.asyncio
@pytest.mark.private
@is_flaky_async(max_attempts=5)
async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot):
response = await flash_llama_gptq.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
assert response.generated_text == "\nTest request\nTest request\nTest request\n"
if SYSTEM != "rocm":
# Logits were taken on an Nvidia GPU, and are too far off to be meaningfully compared.
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
@require_backend_async("cuda")
async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot):
# TODO: investigate why exllamav2 gptq kernel is this much more non-deterministic on ROCm vs on CUDA.
response = await flash_llama_gptq.generate(
"Test request",
max_new_tokens=10,
@ -41,16 +51,18 @@ async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot):
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
@require_backend_async("cuda")
async def test_flash_llama_gptq_load(
flash_llama_gptq, generate_load, response_snapshot
):
# TODO: investigate why exllamav2 gptq kernel is this much more non-deterministic on ROCm vs on CUDA.
responses = await generate_load(
flash_llama_gptq, "Test request", max_new_tokens=10, n=4
)

View File

@ -0,0 +1,67 @@
import functools
import os
from typing import Optional
import sys
import pytest
SYSTEM = os.environ.get("SYSTEM")
def is_flaky_async(
max_attempts: int = 5,
wait_before_retry: Optional[float] = None,
description: Optional[str] = None,
):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
async def wrapper(*args, **kwargs):
retry_count = 1
while retry_count <= max_attempts:
try:
return await test_func_ref(*args, **kwargs)
except Exception as err:
if retry_count == max_attempts:
raise err
print(
f"Test failed at try {retry_count}/{max_attempts}.",
file=sys.stderr,
)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return wrapper
return decorator
def require_backend_async(*args):
def decorator(func):
@functools.wraps(func)
async def wrapper(*wrapper_args, **wrapper_kwargs):
if SYSTEM not in args:
pytest.skip(
f"Skipping as this test requires the backend {args} to be run, but current system is SYSTEM={SYSTEM}."
)
return await func(*wrapper_args, **wrapper_kwargs)
return wrapper
return decorator