do not skip workflow on cuda, fix no space left no device

This commit is contained in:
Felix Marty 2024-06-24 11:54:09 +00:00 committed by Nicolas Patry
parent f16f0ad92b
commit 09a41f2c43
No known key found for this signature in database
GPG Key ID: E939E8CC91A1C674
2 changed files with 12 additions and 6 deletions

View File

@ -174,7 +174,7 @@ jobs:
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ needs.build-and-push.outputs.label }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
if: needs.build-and-push.outputs.runs_on == 'amd-gpu-tgi'
if: needs.build-and-push.outputs.runs_on != 'ubuntu-latest'
container:
image: ${{ needs.build-and-push.outputs.docker_image }}
options: --shm-size "16gb" --ipc host -v ${{ needs.build-and-push.outputs.docker_volume }}:/data
@ -191,7 +191,7 @@ jobs:
pwd
echo "ls:"
ls
python integration-tests/clean_cache_and_download.py --token ${{ secrets.HF_TOKEN }}
python integration-tests/clean_cache_and_download.py --token ${{ secrets.HF_TOKEN }} --cache-dir /data
fi
integration_tests:
@ -243,4 +243,8 @@ jobs:
echo "SYSTEM:"
echo $SYSTEM
export DOCKER_VOLUME=${{ needs.build-and-push.outputs.docker_volume }}
echo "DOCKER_VOLUME:"
echo $DOCKER_VOLUME
pytest -s -vvvvv integration-tests

View File

@ -35,7 +35,7 @@ REQUIRED_MODELS = {
}
def cleanup_cache(token: str):
def cleanup_cache(token: str, cache_dir: str):
# Retrieve the size per model for all models used in the CI.
size_per_model = {}
extension_per_model = {}
@ -74,7 +74,7 @@ def cleanup_cache(token: str):
total_required_size = sum(size_per_model.values())
print(f"Total required disk: {total_required_size:.2f} GB")
cached_dir = huggingface_hub.scan_cache_dir()
cached_dir = huggingface_hub.scan_cache_dir(cache_dir)
cache_size_per_model = {}
cached_required_size_per_model = {}
@ -121,7 +121,7 @@ def cleanup_cache(token: str):
print("Removing", largest_model_id)
for sha in cached_shas_per_model[largest_model_id]:
huggingface_hub.scan_cache_dir().delete_revisions(sha).execute()
huggingface_hub.scan_cache_dir(cache_dir).delete_revisions(sha).execute()
del cache_size_per_model[largest_model_id]
@ -135,10 +135,11 @@ if __name__ == "__main__":
parser.add_argument(
"--token", help="Hugging Face Hub token.", required=True, type=str
)
parser.add_argument("--cache-dir", help="Hub cache path.", required=True, type=str)
args = parser.parse_args()
start = time.time()
extension_per_model = cleanup_cache(args.token)
extension_per_model = cleanup_cache(args.token, args.cache_dir)
end = time.time()
print(f"Cache cleanup done in {end - start:.2f} s")
@ -153,6 +154,7 @@ if __name__ == "__main__":
revision=revision,
token=args.token,
allow_patterns=f"*{extension_per_model[model_id]}",
cache_dir=args.cache_dir,
)
end = time.time()