149 lines
5.4 KiB
Plaintext
149 lines
5.4 KiB
Plaintext
ARG cuda_arch_list="75-real;80-real;86-real;89-real;90-real"
|
|
ARG ompi_version="4.1.7rc1"
|
|
ARG build_type=release
|
|
ARG is_gha_build=false
|
|
|
|
# CUDA dependent dependencies resolver stage
|
|
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 AS cuda-builder
|
|
|
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
apt update && apt install -y \
|
|
build-essential \
|
|
cmake \
|
|
curl \
|
|
gcc-14 \
|
|
g++-14 \
|
|
git \
|
|
git-lfs \
|
|
lld \
|
|
libssl-dev \
|
|
libucx-dev \
|
|
libasan8 \
|
|
libubsan1 \
|
|
ninja-build \
|
|
pkg-config \
|
|
pipx \
|
|
python3 \
|
|
python3-dev \
|
|
python3-setuptools \
|
|
tar \
|
|
wget && \
|
|
pipx ensurepath
|
|
|
|
ENV TGI_INSTALL_PREFIX=/usr/local/tgi
|
|
ENV TENSORRT_INSTALL_PREFIX=/usr/local/tensorrt
|
|
|
|
# Install OpenMPI
|
|
FROM cuda-builder AS mpi-builder
|
|
ARG ompi_version
|
|
|
|
ENV OMPI_TARBALL_FILENAME="openmpi-$ompi_version.tar.bz2"
|
|
RUN wget "https://download.open-mpi.org/release/open-mpi/v4.1/$OMPI_TARBALL_FILENAME" -P /opt/src && \
|
|
mkdir /usr/src/mpi && \
|
|
tar -xf "/opt/src/$OMPI_TARBALL_FILENAME" -C /usr/src/mpi --strip-components=1 && \
|
|
cd /usr/src/mpi && \
|
|
./configure --prefix=/usr/local/mpi --with-cuda=/usr/local/cuda --with-slurm && \
|
|
make -j all && \
|
|
make install && \
|
|
rm -rf "/opt/src/$OMPI_TARBALL_FILENAME"
|
|
|
|
# Install TensorRT
|
|
FROM cuda-builder AS trt-builder
|
|
COPY backends/trtllm/scripts/install_tensorrt.sh /opt/install_tensorrt.sh
|
|
RUN chmod +x /opt/install_tensorrt.sh && \
|
|
/opt/install_tensorrt.sh
|
|
|
|
# Build Backend
|
|
FROM cuda-builder AS tgi-builder
|
|
WORKDIR /usr/src/text-generation-inference
|
|
|
|
# Install Rust
|
|
ENV PATH="/root/.cargo/bin:$PATH"
|
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y && \
|
|
chmod -R a+w /root/.rustup && \
|
|
chmod -R a+w /root/.cargo && \
|
|
cargo install sccache --locked
|
|
|
|
# Build actual TGI
|
|
ARG build_type
|
|
ARG cuda_arch_list
|
|
ARG is_gha_build="false"
|
|
|
|
# SCCACHE Specifics args - before finding a better, more generic, way...
|
|
ARG aws_access_key_id
|
|
ARG aws_secret_key_id
|
|
ARG aws_session_token
|
|
ARG sccache_bucket
|
|
ARG sccache_s3_key_prefix
|
|
ARG sscache_region
|
|
|
|
ENV CMAKE_PREFIX_PATH="/usr/local/mpi:/usr/local/tensorrt:$CMAKE_PREFIX_PATH"
|
|
ENV CUDA_ARCH_LIST=${cuda_arch_list}
|
|
ENV IS_GHA_BUILD ${is_gha_build}
|
|
ENV LD_LIBRARY_PATH="/usr/local/mpi/lib:$LD_LIBRARY_PATH"
|
|
ENV PKG_CONFIG_PATH="/usr/local/mpi/lib/pkgconfig:$PKG_CONFIG_PATH"
|
|
ENV USE_LLD_LINKER=ON
|
|
|
|
COPY . .
|
|
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
|
|
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
|
|
RUN mkdir $TGI_INSTALL_PREFIX && mkdir "$TGI_INSTALL_PREFIX/include" && mkdir "$TGI_INSTALL_PREFIX/lib" && \
|
|
python3 scripts/setup_sccache.py --is-gha-build ${is_gha_build} -k ${aws_access_key_id} -s ${aws_secret_key_id} -t ${aws_session_token} -b ${sccache_bucket} -r ${sscache_region} -p ${sccache_s3_key_prefix } && \
|
|
RUSTC_WRAPPER=sccache CMAKE_INSTALL_PREFIX=$TGI_INSTALL_PREFIX cargo build --profile ${build_type} --package text-generation-backends-trtllm --bin text-generation-backends-trtllm
|
|
|
|
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS runtime
|
|
RUN apt update && apt install -y libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \
|
|
rm -rf /var/lib/{apt,dpkg,cache,log}/ && \
|
|
pipx ensurepath && \
|
|
pipx install --include-deps transformers tokenizers
|
|
|
|
WORKDIR /usr/local/tgi/bin
|
|
|
|
ARG build_type
|
|
|
|
ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH
|
|
ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
|
|
ENV TOKENIZERS_PARALLELISM=false
|
|
ENV OMPI_MCA_plm_rsh_agent=""
|
|
|
|
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
|
|
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
|
|
COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi
|
|
COPY --from=tgi-builder /usr/src/text-generation-inference/target/release/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher
|
|
|
|
FROM runtime
|
|
|
|
LABEL co.huggingface.vendor="Hugging Face Inc."
|
|
LABEL org.opencontainers.image.authors="hardware@hf.co"
|
|
LABEL org.opencontainers.title="Text-Generation-Inference TensorRT-LLM Backend"
|
|
|
|
ENTRYPOINT ["./text-generation-launcher"]
|
|
CMD ["--executor-worker", "/usr/local/tgi/bin/executorWorker"]
|
|
|
|
# This is used only for the CI/CD
|
|
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS ci-runtime
|
|
RUN apt update && apt install -y libasan8 libubsan1 libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \
|
|
rm -rf /var/lib/{apt,dpkg,cache,log}/ && \
|
|
pipx ensurepath && \
|
|
pipx install --include-deps transformers tokenizers
|
|
|
|
WORKDIR /usr/local/tgi/bin
|
|
|
|
ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH
|
|
ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
|
|
ENV TOKENIZERS_PARALLELISM=false
|
|
ENV OMPI_MCA_plm_rsh_agent=""
|
|
|
|
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
|
|
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
|
|
COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi
|
|
|
|
# Basically we copy from target/debug instead of target/release
|
|
COPY --from=tgi-builder /usr/src/text-generation-inference/target/debug/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher
|
|
|
|
FROM ci-runtime
|
|
|
|
LABEL co.huggingface.vendor="Hugging Face Inc."
|
|
LABEL org.opencontainers.image.authors="hardware@hf.co"
|
|
LABEL org.opencontainers.title="Text-Generation-Inference TensorRT-LLM Backend CI/CD" |