2023-04-16 16:26:47 -06:00
# Rust builder
2024-06-17 08:40:44 -06:00
FROM lukemathwalker/cargo-chef:latest-rust-1.79 AS chef
2023-03-03 07:07:27 -07:00
WORKDIR /usr/src
2023-06-08 06:51:52 -06:00
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL = sparse
2024-07-03 04:48:45 -06:00
FROM chef AS planner
2024-06-24 10:16:36 -06:00
COPY Cargo.lock Cargo.lock
2023-03-03 07:07:27 -07:00
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
2023-05-09 06:39:59 -06:00
COPY benchmark benchmark
2023-03-03 07:07:27 -07:00
COPY router router
2024-07-31 02:33:10 -06:00
COPY backends backends
2023-03-03 07:07:27 -07:00
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
2023-02-13 05:02:45 -07:00
RUN PROTOC_ZIP = protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
2022-10-14 07:56:21 -06:00
2023-03-03 07:07:27 -07:00
COPY --from= planner /usr/src/recipe.json recipe.json
2024-06-05 04:18:38 -06:00
RUN cargo chef cook --profile release-opt --recipe-path recipe.json
2022-10-14 07:56:21 -06:00
2024-06-06 10:51:42 -06:00
ARG GIT_SHA
ARG DOCKER_LABEL
2023-03-03 07:07:27 -07:00
COPY Cargo.toml Cargo.toml
2022-11-08 09:42:38 -07:00
COPY rust-toolchain.toml rust-toolchain.toml
2022-10-14 07:56:21 -06:00
COPY proto proto
2023-05-09 06:39:59 -06:00
COPY benchmark benchmark
2022-10-14 07:56:21 -06:00
COPY router router
2024-07-31 02:33:10 -06:00
COPY backends backends
2022-10-18 07:19:03 -06:00
COPY launcher launcher
2024-06-05 04:18:38 -06:00
RUN cargo build --profile release-opt
2022-10-18 07:19:03 -06:00
2023-04-16 16:26:47 -06:00
# Python builder
# Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile
2024-07-03 04:48:45 -06:00
FROM nvidia/cuda:12.1.0-devel-ubuntu22.04 AS pytorch-install
2023-04-14 02:12:21 -06:00
2024-07-08 09:52:10 -06:00
# NOTE: When updating PyTorch version, beware to remove `pip install nvidia-nccl-cu12==2.22.3` below in the Dockerfile. Context: https://github.com/huggingface/text-generation-inference/pull/2099
2024-07-23 14:39:43 -06:00
ARG PYTORCH_VERSION = 2 .4.0
2024-07-08 09:52:10 -06:00
2023-11-23 05:38:50 -07:00
ARG PYTHON_VERSION = 3 .10
2023-08-03 13:54:39 -06:00
# Keep in sync with `server/pyproject.toml
2023-11-23 05:38:50 -07:00
ARG CUDA_VERSION = 12 .1
Pali gemma modeling (#1895)
This PR adds paligemma modeling code
Blog post: https://huggingface.co/blog/paligemma
Transformers PR: https://github.com/huggingface/transformers/pull/30814
install the latest changes and run with
```bash
# get the weights
# text-generation-server download-weights gv-hf/PaliGemma-base-224px-hf
# run TGI
text-generation-launcher --model-id gv-hf/PaliGemma-base-224px-hf
```
basic example sending various requests
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://127.0.0.1:3000")
images = [
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png",
]
prompts = [
"What animal is in this image?",
"Name three colors in this image.",
"What are 10 colors in this image?",
"Where is the cow standing?",
"answer en Where is the cow standing?",
"Is there a bird in the image?",
"Is ther a cow in the image?",
"Is there a rabbit in the image?",
"how many birds are in the image?",
"how many rabbits are in the image?",
]
for img in images:
print(f"\nImage: {img.split('/')[-1]}")
for prompt in prompts:
inputs = f"![]({img}){prompt}\n"
json_data = {
"inputs": inputs,
"parameters": {
"max_new_tokens": 30,
"do_sample": False,
},
}
generated_output = client.text_generation(prompt, max_new_tokens=30, stream=False)
print([f"{prompt}\n{generated_output}"])
```
---------
Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
2024-05-15 22:58:47 -06:00
ARG MAMBA_VERSION = 24 .3.0-0
2023-04-16 16:26:47 -06:00
ARG CUDA_CHANNEL = nvidia
ARG INSTALL_CHANNEL = pytorch
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH /opt/conda/bin:$PATH
RUN apt-get update && DEBIAN_FRONTEND = noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git && \
rm -rf /var/lib/apt/lists/*
# Install conda
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${ TARGETPLATFORM } in \
"linux/arm64" ) MAMBA_ARCH = aarch64 ; ; \
*) MAMBA_ARCH = x86_64 ; ; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O " https://github.com/conda-forge/miniforge/releases/download/ ${ MAMBA_VERSION } /Mambaforge- ${ MAMBA_VERSION } -Linux- ${ MAMBA_ARCH } .sh "
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
rm ~/mambaforge.sh
# Install pytorch
# On arm64 we exit with an error code
RUN case ${ TARGETPLATFORM } in \
"linux/arm64" ) exit 1 ; ; \
*) /opt/conda/bin/conda update -y conda && \
2023-11-23 05:38:50 -07:00
/opt/conda/bin/conda install -c " ${ INSTALL_CHANNEL } " -c " ${ CUDA_CHANNEL } " -y " python= ${ PYTHON_VERSION } " " pytorch= $PYTORCH_VERSION " " pytorch-cuda= $( echo $CUDA_VERSION | cut -d'.' -f 1-2) " ; ; \
2023-04-16 16:26:47 -06:00
esac && \
/opt/conda/bin/conda clean -ya
# CUDA kernels builder image
2024-07-03 04:48:45 -06:00
FROM pytorch-install AS kernel-builder
2023-04-16 16:26:47 -06:00
2023-11-23 05:38:50 -07:00
ARG MAX_JOBS = 8
2023-04-16 16:26:47 -06:00
RUN apt-get update && DEBIAN_FRONTEND = noninteractive apt-get install -y --no-install-recommends \
2024-04-10 09:20:25 -06:00
ninja-build cmake \
2023-04-16 16:26:47 -06:00
&& rm -rf /var/lib/apt/lists/*
# Build Flash Attention CUDA kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS flash-att-builder
2023-04-16 16:26:47 -06:00
WORKDIR /usr/src
COPY server/Makefile-flash-att Makefile
# Build specific version of flash attention
RUN make build-flash-attention
2023-07-18 08:21:18 -06:00
# Build Flash Attention v2 CUDA kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS flash-att-v2-builder
2023-07-18 08:21:18 -06:00
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
2023-11-27 06:08:12 -07:00
RUN make build-flash-attention-v2-cuda
2023-07-18 08:21:18 -06:00
2023-07-21 02:59:00 -06:00
# Build Transformers exllama kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS exllama-kernels-builder
2023-07-21 02:59:00 -06:00
WORKDIR /usr/src
COPY server/exllama_kernels/ .
2023-11-25 14:38:38 -07:00
RUN TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX" python setup.py build
# Build Transformers exllama kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS exllamav2-kernels-builder
2023-11-25 14:38:38 -07:00
WORKDIR /usr/src
2024-08-14 00:49:58 -06:00
COPY server/Makefile-exllamav2/ Makefile
2023-11-25 14:38:38 -07:00
2023-07-21 02:59:00 -06:00
# Build specific version of transformers
2024-08-14 00:49:58 -06:00
RUN TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX" make build-exllamav2
2023-07-21 02:59:00 -06:00
Add AWQ quantization inference support (#1019) (#1054)
# Add AWQ quantization inference support
Fixes
https://github.com/huggingface/text-generation-inference/issues/781
This PR (partially) adds support for AWQ quantization for inference.
More information on AWQ [here](https://arxiv.org/abs/2306.00978). In
general, AWQ is faster and more accurate than GPTQ, which is currently
supported by TGI.
This PR installs 4-bit GEMM custom CUDA kernels released by AWQ authors
(in `requirements.txt`, just one line change).
Quick way to test this PR would be bring up TGI as follows:
```
text-generation-server download-weights abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq
text-generation-launcher \
--huggingface-hub-cache ~/.cache/huggingface/hub/ \
--model-id abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq \
--trust-remote-code --port 8080 \
--max-input-length 2048 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 \
--quantize awq
```
Please note:
* This PR was tested with FlashAttention v2 and vLLM.
* This PR adds support for AWQ inference, not quantizing the models.
That needs to be done outside of TGI, instructions
[here](https://github.com/mit-han-lab/llm-awq/tree/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa).
* This PR only adds support for `FlashLlama` models for now.
* Multi-GPU setup has not been tested.
* No integration tests have been added so far, will add later if
maintainers are interested in this change.
* This PR can be tested on any of the models released
[here](https://huggingface.co/abhinavkulkarni?sort_models=downloads#models).
Please refer to the linked issue for benchmarks for
[abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq](https://huggingface.co/abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq)
vs
[TheBloke/Llama-2-7b-Chat-GPTQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ).
Please note, AWQ has released faster (and in case of Llama, fused)
kernels for 4-bit GEMM, currently at the top of the `main` branch at
https://github.com/mit-han-lab/llm-awq, but this PR uses an older commit
that has been tested to work. We can switch to latest commit later on.
## Who can review?
@OlivierDehaene OR @Narsil
---------
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Abhinav M Kulkarni <abhinavkulkarni@gmail.com>
Co-authored-by: Abhinav Kulkarni <abhinav@concentric.ai>
2023-09-25 07:31:27 -06:00
# Build Transformers awq kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS awq-kernels-builder
Add AWQ quantization inference support (#1019) (#1054)
# Add AWQ quantization inference support
Fixes
https://github.com/huggingface/text-generation-inference/issues/781
This PR (partially) adds support for AWQ quantization for inference.
More information on AWQ [here](https://arxiv.org/abs/2306.00978). In
general, AWQ is faster and more accurate than GPTQ, which is currently
supported by TGI.
This PR installs 4-bit GEMM custom CUDA kernels released by AWQ authors
(in `requirements.txt`, just one line change).
Quick way to test this PR would be bring up TGI as follows:
```
text-generation-server download-weights abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq
text-generation-launcher \
--huggingface-hub-cache ~/.cache/huggingface/hub/ \
--model-id abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq \
--trust-remote-code --port 8080 \
--max-input-length 2048 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 \
--quantize awq
```
Please note:
* This PR was tested with FlashAttention v2 and vLLM.
* This PR adds support for AWQ inference, not quantizing the models.
That needs to be done outside of TGI, instructions
[here](https://github.com/mit-han-lab/llm-awq/tree/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa).
* This PR only adds support for `FlashLlama` models for now.
* Multi-GPU setup has not been tested.
* No integration tests have been added so far, will add later if
maintainers are interested in this change.
* This PR can be tested on any of the models released
[here](https://huggingface.co/abhinavkulkarni?sort_models=downloads#models).
Please refer to the linked issue for benchmarks for
[abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq](https://huggingface.co/abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq)
vs
[TheBloke/Llama-2-7b-Chat-GPTQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ).
Please note, AWQ has released faster (and in case of Llama, fused)
kernels for 4-bit GEMM, currently at the top of the `main` branch at
https://github.com/mit-han-lab/llm-awq, but this PR uses an older commit
that has been tested to work. We can switch to latest commit later on.
## Who can review?
@OlivierDehaene OR @Narsil
---------
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Abhinav M Kulkarni <abhinavkulkarni@gmail.com>
Co-authored-by: Abhinav Kulkarni <abhinav@concentric.ai>
2023-09-25 07:31:27 -06:00
WORKDIR /usr/src
COPY server/Makefile-awq Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX" make build-awq
2023-09-29 03:19:06 -06:00
# Build eetq kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS eetq-kernels-builder
2023-09-29 03:19:06 -06:00
WORKDIR /usr/src
COPY server/Makefile-eetq Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX" make build-eetq
2024-06-25 12:46:27 -06:00
# Build Lorax Punica kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS lorax-punica-builder
2024-06-25 12:46:27 -06:00
WORKDIR /usr/src
COPY server/Makefile-lorax-punica Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX" make build-lorax-punica
2023-04-16 16:26:47 -06:00
# Build Transformers CUDA kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS custom-kernels-builder
2023-04-16 16:26:47 -06:00
WORKDIR /usr/src
2023-06-08 06:51:52 -06:00
COPY server/custom_kernels/ .
2023-04-16 16:26:47 -06:00
# Build specific version of transformers
2023-06-08 06:51:52 -06:00
RUN python setup.py build
2023-04-16 16:26:47 -06:00
2024-07-20 11:02:04 -06:00
# Build FBGEMM CUDA kernels
FROM kernel-builder AS fbgemm-builder
WORKDIR /usr/src
COPY server/Makefile-fbgemm Makefile
RUN make build-fbgemm
2023-06-30 11:09:59 -06:00
# Build vllm CUDA kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS vllm-builder
2023-06-30 11:09:59 -06:00
WORKDIR /usr/src
2024-04-30 06:04:28 -06:00
ENV TORCH_CUDA_ARCH_LIST = "7.0 7.5 8.0 8.6 8.9 9.0+PTX"
2023-06-30 11:09:59 -06:00
COPY server/Makefile-vllm Makefile
# Build specific version of vllm
2023-11-27 06:08:12 -07:00
RUN make build-vllm-cuda
2023-06-30 11:09:59 -06:00
2024-02-08 02:19:45 -07:00
# Build mamba kernels
2024-07-03 04:48:45 -06:00
FROM kernel-builder AS mamba-builder
2024-02-08 02:19:45 -07:00
WORKDIR /usr/src
COPY server/Makefile-selective-scan Makefile
RUN make build-all
2023-04-16 16:26:47 -06:00
# Text Generation Inference base image
2024-07-03 04:48:45 -06:00
FROM nvidia/cuda:12.1.0-base-ubuntu22.04 AS base
2023-04-16 16:26:47 -06:00
# Conda env
ENV PATH = /opt/conda/bin:$PATH \
CONDA_PREFIX = /opt/conda
# Text Generation Inference base env
2024-08-09 06:25:44 -06:00
ENV HF_HOME = /data \
2023-02-18 06:04:11 -07:00
HF_HUB_ENABLE_HF_TRANSFER = 1 \
2023-04-16 16:26:47 -06:00
PORT = 80
2022-10-14 07:56:21 -06:00
2023-04-14 11:30:30 -06:00
WORKDIR /usr/src
2023-03-24 07:02:14 -06:00
2023-04-16 16:26:47 -06:00
RUN apt-get update && DEBIAN_FRONTEND = noninteractive apt-get install -y --no-install-recommends \
libssl-dev \
ca-certificates \
make \
2023-09-26 07:23:47 -06:00
curl \
Pali gemma modeling (#1895)
This PR adds paligemma modeling code
Blog post: https://huggingface.co/blog/paligemma
Transformers PR: https://github.com/huggingface/transformers/pull/30814
install the latest changes and run with
```bash
# get the weights
# text-generation-server download-weights gv-hf/PaliGemma-base-224px-hf
# run TGI
text-generation-launcher --model-id gv-hf/PaliGemma-base-224px-hf
```
basic example sending various requests
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://127.0.0.1:3000")
images = [
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png",
]
prompts = [
"What animal is in this image?",
"Name three colors in this image.",
"What are 10 colors in this image?",
"Where is the cow standing?",
"answer en Where is the cow standing?",
"Is there a bird in the image?",
"Is ther a cow in the image?",
"Is there a rabbit in the image?",
"how many birds are in the image?",
"how many rabbits are in the image?",
]
for img in images:
print(f"\nImage: {img.split('/')[-1]}")
for prompt in prompts:
inputs = f"![]({img}){prompt}\n"
json_data = {
"inputs": inputs,
"parameters": {
"max_new_tokens": 30,
"do_sample": False,
},
}
generated_output = client.text_generation(prompt, max_new_tokens=30, stream=False)
print([f"{prompt}\n{generated_output}"])
```
---------
Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
2024-05-15 22:58:47 -06:00
git \
2023-04-16 16:26:47 -06:00
&& rm -rf /var/lib/apt/lists/*
2022-10-14 07:56:21 -06:00
2024-04-10 09:20:25 -06:00
# Copy conda with PyTorch installed
COPY --from= pytorch-install /opt/conda /opt/conda
2022-10-14 07:56:21 -06:00
2023-04-16 16:26:47 -06:00
# Copy build artifacts from flash attention builder
2023-11-23 05:38:50 -07:00
COPY --from= flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
COPY --from= flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
COPY --from= flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2023-04-14 02:12:21 -06:00
2023-07-18 08:21:18 -06:00
# Copy build artifacts from flash attention v2 builder
2024-06-04 11:38:46 -06:00
COPY --from= flash-att-v2-builder /opt/conda/lib/python3.10/site-packages/flash_attn_2_cuda.cpython-310-x86_64-linux-gnu.so /opt/conda/lib/python3.10/site-packages
2023-07-18 08:21:18 -06:00
2023-06-30 11:09:59 -06:00
# Copy build artifacts from custom kernels builder
2023-11-23 05:38:50 -07:00
COPY --from= custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2023-07-21 02:59:00 -06:00
# Copy build artifacts from exllama kernels builder
2023-11-23 05:38:50 -07:00
COPY --from= exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2023-11-25 14:38:38 -07:00
# Copy build artifacts from exllamav2 kernels builder
2024-08-14 00:49:58 -06:00
COPY --from= exllamav2-kernels-builder /usr/src/exllamav2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
Add AWQ quantization inference support (#1019) (#1054)
# Add AWQ quantization inference support
Fixes
https://github.com/huggingface/text-generation-inference/issues/781
This PR (partially) adds support for AWQ quantization for inference.
More information on AWQ [here](https://arxiv.org/abs/2306.00978). In
general, AWQ is faster and more accurate than GPTQ, which is currently
supported by TGI.
This PR installs 4-bit GEMM custom CUDA kernels released by AWQ authors
(in `requirements.txt`, just one line change).
Quick way to test this PR would be bring up TGI as follows:
```
text-generation-server download-weights abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq
text-generation-launcher \
--huggingface-hub-cache ~/.cache/huggingface/hub/ \
--model-id abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq \
--trust-remote-code --port 8080 \
--max-input-length 2048 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 \
--quantize awq
```
Please note:
* This PR was tested with FlashAttention v2 and vLLM.
* This PR adds support for AWQ inference, not quantizing the models.
That needs to be done outside of TGI, instructions
[here](https://github.com/mit-han-lab/llm-awq/tree/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa).
* This PR only adds support for `FlashLlama` models for now.
* Multi-GPU setup has not been tested.
* No integration tests have been added so far, will add later if
maintainers are interested in this change.
* This PR can be tested on any of the models released
[here](https://huggingface.co/abhinavkulkarni?sort_models=downloads#models).
Please refer to the linked issue for benchmarks for
[abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq](https://huggingface.co/abhinavkulkarni/meta-llama-Llama-2-7b-chat-hf-w4-g128-awq)
vs
[TheBloke/Llama-2-7b-Chat-GPTQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GPTQ).
Please note, AWQ has released faster (and in case of Llama, fused)
kernels for 4-bit GEMM, currently at the top of the `main` branch at
https://github.com/mit-han-lab/llm-awq, but this PR uses an older commit
that has been tested to work. We can switch to latest commit later on.
## Who can review?
@OlivierDehaene OR @Narsil
---------
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Abhinav M Kulkarni <abhinavkulkarni@gmail.com>
Co-authored-by: Abhinav Kulkarni <abhinav@concentric.ai>
2023-09-25 07:31:27 -06:00
# Copy build artifacts from awq kernels builder
2023-11-23 05:38:50 -07:00
COPY --from= awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2023-09-29 03:19:06 -06:00
# Copy build artifacts from eetq kernels builder
2023-11-23 05:38:50 -07:00
COPY --from= eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2024-08-12 09:24:32 -06:00
# Copy build artifacts from lorax punica kernels builder
COPY --from= lorax-punica-builder /usr/src/lorax-punica/server/punica_kernels/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2024-07-20 11:02:04 -06:00
# Copy build artifacts from fbgemm builder
COPY --from= fbgemm-builder /usr/src/fbgemm/fbgemm_gpu/_skbuild/linux-x86_64-3.10/cmake-install /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from vllm builder
2023-11-23 05:38:50 -07:00
COPY --from= vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
2024-02-08 02:19:45 -07:00
# Copy build artifacts from mamba builder
COPY --from= mamba-builder /usr/src/mamba/build/lib.linux-x86_64-cpython-310/ /opt/conda/lib/python3.10/site-packages
COPY --from= mamba-builder /usr/src/causal-conv1d/build/lib.linux-x86_64-cpython-310/ /opt/conda/lib/python3.10/site-packages
2024-04-30 06:04:28 -06:00
# Install flash-attention dependencies
2023-06-08 06:51:52 -06:00
RUN pip install einops --no-cache-dir
2023-04-09 11:59:16 -06:00
2022-10-14 07:56:21 -06:00
# Install server
2022-10-22 12:00:15 -06:00
COPY proto proto
2022-10-14 07:56:21 -06:00
COPY server server
2023-04-16 16:26:47 -06:00
COPY server/Makefile server/Makefile
2022-10-14 07:56:21 -06:00
RUN cd server && \
2022-10-22 12:00:15 -06:00
make gen-server && \
2023-11-27 06:08:12 -07:00
pip install -r requirements_cuda.txt && \
2024-07-29 07:37:10 -06:00
pip install ".[bnb, accelerate, marlin, quantize, peft, outlines]" --no-cache-dir && \
2024-07-23 15:31:28 -06:00
pip install nvidia-nccl-cu12= = 2.22.3
ENV LD_PRELOAD = /opt/conda/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2
2024-08-14 11:41:29 -06:00
ENV EXLLAMA_NO_FLASH_ATTN = 1
2022-10-14 07:56:21 -06:00
2024-06-06 10:51:42 -06:00
# Deps before the binaries
# The binaries change on every build given we burn the SHA into them
# The deps change less often.
RUN apt-get update && DEBIAN_FRONTEND = noninteractive apt-get install -y --no-install-recommends \
build-essential \
g++ \
&& rm -rf /var/lib/apt/lists/*
2023-05-09 05:19:31 -06:00
# Install benchmarker
2024-06-05 04:18:38 -06:00
COPY --from= builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
2022-10-14 07:56:21 -06:00
# Install router
2024-06-05 04:18:38 -06:00
COPY --from= builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router
2022-10-22 12:00:15 -06:00
# Install launcher
2024-06-05 04:18:38 -06:00
COPY --from= builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher
2022-10-14 07:56:21 -06:00
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
2023-11-27 06:08:12 -07:00
# AWS Sagemaker compatible image
2024-07-03 04:48:45 -06:00
FROM base AS sagemaker
2023-03-29 13:38:30 -06:00
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT [ "./entrypoint.sh" ]
2023-04-14 02:12:21 -06:00
# Final image
2023-03-29 13:38:30 -06:00
FROM base
2024-04-11 11:31:48 -06:00
COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh
2024-04-30 06:04:28 -06:00
RUN chmod +x /tgi-entrypoint.sh
2024-04-11 11:31:48 -06:00
ENTRYPOINT [ "/tgi-entrypoint.sh" ]
2024-06-25 12:46:27 -06:00
# CMD ["--json-output"]