name: Build and push docker image to internal registry on: workflow_call: inputs: hardware: type: string description: Hardware # options: # - cuda # - rocm # - xpu required: true release-tests: description: "Run release integration tests" required: true default: false type: boolean jobs: build-and-push: outputs: docker_image: ${{ steps.final.outputs.docker_image }} base_docker_image: ${{ steps.final.outputs.base_docker_image }} docker_devices: ${{ steps.final.outputs.docker_devices }} docker_volume: ${{ steps.final.outputs.docker_volume}} runs_on: ${{ steps.final.outputs.runs_on }} label: ${{ steps.final.outputs.label }} concurrency: group: ${{ github.workflow }}-build-and-push-image-${{ inputs.hardware }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true # TODO see with @Glegendre to get CPU runner here instead runs-on: [self-hosted, intel-cpu, 32-cpu, 256-ram, ci] permissions: contents: write packages: write # This is used to complete the identity challenge # with sigstore/fulcio when running outside of PRs. id-token: write security-events: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4.4.1 - name: Construct harware variables shell: bash run: | case ${{ inputs.hardware }} in cuda) export dockerfile="Dockerfile" export label_extension="" export docker_devices="" export runs_on="nvidia-gpu" ;; rocm) export dockerfile="Dockerfile_amd" export label_extension="-rocm" export docker_devices="/dev/kfd,/dev/dri" export runs_on="amd-gpu-tgi" ;; xpu) export dockerfile="Dockerfile_intel" export label_extension="-intel" export docker_devices="" export runs_on="ubuntu-latest" ;; esac echo $dockerfile echo "Dockerfile=${dockerfile}" echo $label_extension echo $docker_devices echo $runs_on echo "DOCKERFILE=${dockerfile}" >> $GITHUB_ENV echo "LABEL=${label_extension}" >> $GITHUB_ENV echo "DOCKER_DEVICES=${docker_devices}" >> $GITHUB_ENV echo "RUNS_ON=${runs_on}" >> $GITHUB_ENV - name: Initialize Docker Buildx uses: docker/setup-buildx-action@v3 with: install: true config-inline: | [registry."docker.io"] mirrors = ["registry.github-runners.huggingface.tech"] - name: Login to GitHub Container Registry if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Login to Azure Container Registry if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.AZURE_DOCKER_USERNAME }} password: ${{ secrets.AZURE_DOCKER_PASSWORD }} registry: db4c2190dd824d1f950f5d1555fbadf0.azurecr.io # If pull request - name: Extract metadata (tags, labels) for Docker if: ${{ github.event_name == 'pull_request' }} id: meta-pr uses: docker/metadata-action@v5 with: images: | registry-push.github-runners.huggingface.tech/api-inference/community/text-generation-inference tags: | type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }} # If main, release or tag - name: Extract metadata (tags, labels) for Docker if: ${{ github.event_name != 'pull_request' }} id: meta uses: docker/metadata-action@v4.3.0 with: flavor: | latest=auto images: | registry-push.github-runners.huggingface.tech/api-inference/community/text-generation-inference ghcr.io/huggingface/text-generation-inference db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference tags: | type=semver,pattern={{version}}${{ env.LABEL }} type=semver,pattern={{major}}.{{minor}}${{ env.LABEL }} type=raw,value=latest${{ env.LABEL }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }} - name: Build and push Docker image id: build-and-push uses: docker/build-push-action@v4 with: context: . file: ${{ env.DOCKERFILE }} push: true platforms: 'linux/amd64' build-args: | GIT_SHA=${{ env.GITHUB_SHA }} DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }} tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }} labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} cache-from: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache${{ env.LABEL }},mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min cache-to: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache${{ env.LABEL }},mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min - name: Final id: final run: | echo "docker_image=registry-push.github-runners.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT}}${{ env.LABEL }}" >> "$GITHUB_OUTPUT" echo "docker_devices=${{ env.DOCKER_DEVICES }}" >> "$GITHUB_OUTPUT" echo "runs_on=${{ env.RUNS_ON }}" >> "$GITHUB_OUTPUT" echo "label=${{ env.LABEL }}" >> "$GITHUB_OUTPUT" if [[ ${{ inputs.hardware }} == "rocm" ]] then echo "base_docker_image=rocm/dev-ubuntu-22.04:6.1.1_hip_update" >> "$GITHUB_OUTPUT" elif [[ ${{ inputs.hardware }} == "cuda" ]] then echo "base_docker_image=nvidia/cuda:12.1.0-base-ubuntu22.04" >> "$GITHUB_OUTPUT" elif [[ ${{ inputs.hardware }} == "xpu" ]] then echo "base_docker_image=intel/intel-extension-for-pytorch:2.1.30-xpu" >> "$GITHUB_OUTPUT" else exit 1 fi if [[ ${{ inputs.hardware }} == "rocm" ]] then echo "docker_volume=/data/cache/.cache/huggingface/hub" >> "$GITHUB_OUTPUT" else echo "docker_volume=/mnt/cache" >> "$GITHUB_OUTPUT" fi prepare_integration_tests: runs-on: ["self-hosted", "${{ needs.build-and-push.outputs.runs_on }}", "multi-gpu"] needs: [build-and-push] concurrency: group: ${{ github.workflow }}-${{ github.job }}-${{ needs.build-and-push.outputs.label }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true if: needs.build-and-push.outputs.runs_on != 'ubuntu-latest' # Ideally, we would use the image from registry.internal.huggingface.tech but we can not login to the private registry outside of tailscale, # and even adding a previous job with tailscale login still results in `Docker login for 'registry.internal.huggingface.tech' failed with exit code 1`. container: image: ${{ needs.build-and-push.outputs.base_docker_image }} options: --shm-size "16gb" --ipc host -v ${{ needs.build-and-push.outputs.docker_volume }}:/data steps: - name: Checkout repository uses: actions/checkout@v4 - name: Clean Hugging Face cache shell: bash run: | if [[ ${{ inputs.hardware }} == "rocm" ]] then echo "pwd:" pwd echo "ls:" ls pip3 install -U huggingface_hub python3 integration-tests/clean_cache_and_download.py --token ${{ secrets.HF_TOKEN }} --cache-dir /data # Avoid permissions issues in the next step not run within docker (File was unable to be removed Error: EACCES). if [[ $PWD == *"text-generation-inference"* ]]; then rm -rf -- ..?* .[!.]* * fi fi integration_tests: concurrency: group: ${{ github.workflow }}-${{ github.job }}-${{ needs.build-and-push.outputs.label }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true needs: [build-and-push, prepare_integration_tests] runs-on: ["self-hosted", "${{ needs.build-and-push.outputs.runs_on }}", "multi-gpu"] if: needs.build-and-push.outputs.runs_on != 'ubuntu-latest' env: PYTEST_FLAGS: ${{ (startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main' || inputs.release-tests == true) && '--release' || '' }} steps: - name: Checkout repository uses: actions/checkout@v4 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4.4.1 - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install run: | make install-integration-tests - name: Run tests run: | export DOCKER_DEVICES=${{ needs.build-and-push.outputs.docker_devices }} export HF_TOKEN=${{ secrets.HF_TOKEN }} export DOCKER_IMAGE=${{ needs.build-and-push.outputs.docker_image }} echo "DOCKER_IMAGE:" echo $DOCKER_IMAGE export SYSTEM=${{ inputs.hardware }} echo "SYSTEM:" echo $SYSTEM export DOCKER_VOLUME=${{ needs.build-and-push.outputs.docker_volume }} echo "DOCKER_VOLUME:" echo $DOCKER_VOLUME # TunableOp warmup is rather slow, do it only for a few seqlens. if [[ ${{ inputs.hardware }} == "rocm" ]] then PYTORCH_TUNABLEOP_SEQLENS=2,4 fi pytest -s -vvvvv integration-tests ${PYTEST_FLAGS}