2022-12-16 10:51:11 -07:00
|
|
|
name: Slow tests on main
|
2022-08-26 09:34:58 -06:00
|
|
|
|
|
|
|
on:
|
|
|
|
push:
|
|
|
|
branches:
|
|
|
|
- main
|
|
|
|
|
|
|
|
env:
|
2022-10-31 06:38:43 -06:00
|
|
|
DIFFUSERS_IS_CI: yes
|
2022-08-26 09:34:58 -06:00
|
|
|
HF_HOME: /mnt/cache
|
|
|
|
OMP_NUM_THREADS: 8
|
|
|
|
MKL_NUM_THREADS: 8
|
2022-12-16 10:51:11 -07:00
|
|
|
PYTEST_TIMEOUT: 600
|
2022-08-26 09:34:58 -06:00
|
|
|
RUN_SLOW: yes
|
|
|
|
|
|
|
|
jobs:
|
2022-11-02 07:07:07 -06:00
|
|
|
run_slow_tests:
|
|
|
|
strategy:
|
|
|
|
fail-fast: false
|
|
|
|
matrix:
|
|
|
|
config:
|
|
|
|
- name: Slow PyTorch CUDA tests on Ubuntu
|
|
|
|
framework: pytorch
|
|
|
|
runner: docker-gpu
|
|
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
|
|
report: torch_cuda
|
|
|
|
- name: Slow Flax TPU tests on Ubuntu
|
|
|
|
framework: flax
|
|
|
|
runner: docker-tpu
|
|
|
|
image: diffusers/diffusers-flax-tpu
|
|
|
|
report: flax_tpu
|
|
|
|
- name: Slow ONNXRuntime CUDA tests on Ubuntu
|
|
|
|
framework: onnxruntime
|
|
|
|
runner: docker-gpu
|
|
|
|
image: diffusers/diffusers-onnxruntime-cuda
|
|
|
|
report: onnx_cuda
|
|
|
|
|
|
|
|
name: ${{ matrix.config.name }}
|
|
|
|
|
|
|
|
runs-on: ${{ matrix.config.runner }}
|
|
|
|
|
2022-08-26 09:34:58 -06:00
|
|
|
container:
|
2022-11-02 07:07:07 -06:00
|
|
|
image: ${{ matrix.config.image }}
|
|
|
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ ${{ matrix.config.runner == 'docker-tpu' && '--privileged' || '--gpus 0'}}
|
|
|
|
|
|
|
|
defaults:
|
|
|
|
run:
|
|
|
|
shell: bash
|
2022-08-26 09:34:58 -06:00
|
|
|
|
|
|
|
steps:
|
|
|
|
- name: Checkout diffusers
|
|
|
|
uses: actions/checkout@v3
|
|
|
|
with:
|
|
|
|
fetch-depth: 2
|
|
|
|
|
|
|
|
- name: NVIDIA-SMI
|
2022-11-02 07:07:07 -06:00
|
|
|
if : ${{ matrix.config.runner == 'docker-gpu' }}
|
2022-08-26 09:34:58 -06:00
|
|
|
run: |
|
|
|
|
nvidia-smi
|
|
|
|
|
|
|
|
- name: Install dependencies
|
|
|
|
run: |
|
|
|
|
python -m pip install -e .[quality,test]
|
2022-11-23 06:36:39 -07:00
|
|
|
python -m pip install -U git+https://github.com/huggingface/transformers
|
2022-12-29 06:36:02 -07:00
|
|
|
python -m pip install git+https://github.com/huggingface/accelerate
|
2022-08-26 09:34:58 -06:00
|
|
|
|
|
|
|
- name: Environment
|
|
|
|
run: |
|
|
|
|
python utils/print_env.py
|
|
|
|
|
2022-11-02 07:07:07 -06:00
|
|
|
- name: Run slow PyTorch CUDA tests
|
|
|
|
if: ${{ matrix.config.framework == 'pytorch' }}
|
|
|
|
env:
|
|
|
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
|
|
|
run: |
|
|
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
|
|
-s -v -k "not Flax and not Onnx" \
|
|
|
|
--make-reports=tests_${{ matrix.config.report }} \
|
|
|
|
tests/
|
|
|
|
|
|
|
|
- name: Run slow Flax TPU tests
|
|
|
|
if: ${{ matrix.config.framework == 'flax' }}
|
2022-08-31 09:26:20 -06:00
|
|
|
env:
|
2022-08-31 09:57:46 -06:00
|
|
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
2022-08-26 09:34:58 -06:00
|
|
|
run: |
|
2022-11-02 07:07:07 -06:00
|
|
|
python -m pytest -n 0 \
|
|
|
|
-s -v -k "Flax" \
|
|
|
|
--make-reports=tests_${{ matrix.config.report }} \
|
|
|
|
tests/
|
|
|
|
|
|
|
|
- name: Run slow ONNXRuntime CUDA tests
|
|
|
|
if: ${{ matrix.config.framework == 'onnxruntime' }}
|
|
|
|
env:
|
|
|
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
|
|
|
run: |
|
|
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
|
|
-s -v -k "Onnx" \
|
|
|
|
--make-reports=tests_${{ matrix.config.report }} \
|
|
|
|
tests/
|
2022-09-19 09:08:29 -06:00
|
|
|
|
|
|
|
- name: Failure short reports
|
|
|
|
if: ${{ failure() }}
|
2022-11-02 07:07:07 -06:00
|
|
|
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
2022-09-19 09:08:29 -06:00
|
|
|
|
|
|
|
- name: Test suite reports artifacts
|
|
|
|
if: ${{ always() }}
|
|
|
|
uses: actions/upload-artifact@v2
|
|
|
|
with:
|
2022-11-02 07:07:07 -06:00
|
|
|
name: ${{ matrix.config.report }}_test_reports
|
2022-09-21 05:36:59 -06:00
|
|
|
path: reports
|
|
|
|
|
2022-11-02 07:07:07 -06:00
|
|
|
run_examples_tests:
|
|
|
|
name: Examples PyTorch CUDA tests on Ubuntu
|
|
|
|
|
|
|
|
runs-on: docker-gpu
|
|
|
|
|
2022-09-21 05:36:59 -06:00
|
|
|
container:
|
2022-11-02 07:07:07 -06:00
|
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
|
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
2022-09-21 05:36:59 -06:00
|
|
|
|
|
|
|
steps:
|
|
|
|
- name: Checkout diffusers
|
|
|
|
uses: actions/checkout@v3
|
|
|
|
with:
|
|
|
|
fetch-depth: 2
|
|
|
|
|
|
|
|
- name: NVIDIA-SMI
|
|
|
|
run: |
|
|
|
|
nvidia-smi
|
|
|
|
|
|
|
|
- name: Install dependencies
|
|
|
|
run: |
|
|
|
|
python -m pip install -e .[quality,test,training]
|
2022-12-29 06:36:02 -07:00
|
|
|
python -m pip install git+https://github.com/huggingface/accelerate
|
2022-11-23 06:36:39 -07:00
|
|
|
python -m pip install -U git+https://github.com/huggingface/transformers
|
2022-09-21 05:36:59 -06:00
|
|
|
|
|
|
|
- name: Environment
|
|
|
|
run: |
|
|
|
|
python utils/print_env.py
|
|
|
|
|
|
|
|
- name: Run example tests on GPU
|
|
|
|
env:
|
|
|
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
|
|
|
run: |
|
2022-11-02 07:07:07 -06:00
|
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
2022-09-21 05:36:59 -06:00
|
|
|
|
|
|
|
- name: Failure short reports
|
|
|
|
if: ${{ failure() }}
|
2022-11-02 07:07:07 -06:00
|
|
|
run: cat reports/examples_torch_cuda_failures_short.txt
|
2022-09-21 05:36:59 -06:00
|
|
|
|
|
|
|
- name: Test suite reports artifacts
|
|
|
|
if: ${{ always() }}
|
|
|
|
uses: actions/upload-artifact@v2
|
|
|
|
with:
|
|
|
|
name: examples_test_reports
|
2022-12-29 06:36:02 -07:00
|
|
|
path: reports
|