From af546505ad8b1a23d8abf82860a8672a908eda20 Mon Sep 17 00:00:00 2001 From: Drew Paettie Date: Sat, 12 Oct 2024 22:55:49 -0700 Subject: [PATCH] add gfx1100 support to AMD pytorch build see https://github.com/huggingface/text-generation-inference/issues/2641 --- Dockerfile_amd | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile_amd b/Dockerfile_amd index 4bb6407a..1873d92e 100644 --- a/Dockerfile_amd +++ b/Dockerfile_amd @@ -78,7 +78,7 @@ ARG PYTHON_VERSION='3.11.10' ARG TARGETPLATFORM ENV PATH=/opt/conda/bin:$PATH -ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942" +ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx1100" # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba @@ -170,7 +170,7 @@ RUN --mount=type=bind,from=export_hipblaslt,src=/,target=/install \ fi ARG BUILD_ENVIRONMENT=pytorch-linux-jammy-rocm6.2-py3.11 -ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942" +ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx1100" # A commit to fix the output scaling factor issue in _scaled_mm # Not yet in 2.5.0-rc1