chore: prepare 2.4.1 release (#2773)

* chore: prepare 2.4.1 release

* fix tests

* fmt
This commit is contained in:
OlivierDehaene 2024-11-22 18:26:15 +01:00 committed by GitHub
parent e87893d38e
commit 780531ec77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 700 additions and 410 deletions

837
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ default-members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "2.4.1-dev0" version = "2.4.2-dev0"
edition = "2021" edition = "2021"
authors = ["Olivier Dehaene"] authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference" homepage = "https://github.com/huggingface/text-generation-inference"

View File

@ -84,7 +84,7 @@ model=HuggingFaceH4/zephyr-7b-beta
volume=$PWD/data volume=$PWD/data
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model
``` ```
And then you can make requests like And then you can make requests like
@ -121,7 +121,7 @@ curl localhost:8080/v1/chat/completions \
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar. **Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0-rocm --model-id $model` instead of the command above. **Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1-rocm --model-id $model` instead of the command above.
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli): To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
``` ```
@ -151,7 +151,7 @@ model=meta-llama/Meta-Llama-3.1-8B-Instruct
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=<your cli READ token> token=<your cli READ token>
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model
``` ```
### A note on Shared Memory (shm) ### A note on Shared Memory (shm)

View File

@ -147,7 +147,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing::info!("Downloading tokenizer"); tracing::info!("Downloading tokenizer");
// Parse Huggingface hub token // Parse Huggingface hub token
let auth_token = std::env::var("HF_TOKEN") let token = std::env::var("HF_TOKEN")
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN")) .or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
.ok(); .ok();
@ -155,7 +155,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// We need to download it outside of the Tokio runtime // We need to download it outside of the Tokio runtime
let params = FromPretrainedParameters { let params = FromPretrainedParameters {
revision, revision,
auth_token, token,
..Default::default() ..Default::default()
}; };
Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap()

View File

@ -10,7 +10,7 @@
"name": "Apache 2.0", "name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0" "url": "https://www.apache.org/licenses/LICENSE-2.0"
}, },
"version": "2.4.1-dev0" "version": "2.4.2-dev0"
}, },
"paths": { "paths": {
"/": { "/": {

View File

@ -19,6 +19,6 @@ docker run --gpus all \
--shm-size 1g \ --shm-size 1g \
-e HF_TOKEN=$token \ -e HF_TOKEN=$token \
-p 8080:80 \ -p 8080:80 \
-v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 \ -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 \
--model-id $model --model-id $model
``` ```

View File

@ -19,7 +19,7 @@ bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models.
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇 In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
```bash ```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize bitsandbytes docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes
``` ```
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load. 4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
@ -27,7 +27,7 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇 In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
```bash ```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize bitsandbytes-nf4 docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes-nf4
``` ```
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes). You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
@ -48,7 +48,7 @@ $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇 TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
```bash ```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize gptq docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize gptq
``` ```
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI. Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.

View File

@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--device=/dev/kfd --device=/dev/dri --group-add video \ --device=/dev/kfd --device=/dev/dri --group-add video \
--ipc=host --shm-size 256g --net host -v $volume:/data \ --ipc=host --shm-size 256g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0-rocm \ ghcr.io/huggingface/text-generation-inference:2.4.1-rocm \
--model-id $model --model-id $model
``` ```

View File

@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm --privileged --cap-add=sys_nice \ docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \ --device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \ --ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0-intel-xpu \ ghcr.io/huggingface/text-generation-inference:2.4.1-intel-xpu \
--model-id $model --cuda-graphs 0 --model-id $model --cuda-graphs 0
``` ```
@ -29,7 +29,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm --privileged --cap-add=sys_nice \ docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \ --device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \ --ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu \ ghcr.io/huggingface/text-generation-inference:2.4.1-intel-cpu \
--model-id $model --cuda-graphs 0 --model-id $model --cuda-graphs 0
``` ```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \ docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0 \ ghcr.io/huggingface/text-generation-inference:2.4.1 \
--model-id $model --model-id $model
``` ```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.4.0 \ ghcr.io/huggingface/text-generation-inference:2.4.1 \
--model-id $model --model-id $model
``` ```
@ -96,7 +96,7 @@ curl 127.0.0.1:8080/generate \
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
```bash ```bash
docker run ghcr.io/huggingface/text-generation-inference:2.4.0 --help docker run ghcr.io/huggingface/text-generation-inference:2.4.1 --help
``` ```
</Tip> </Tip>

View File

@ -163,7 +163,7 @@ hub = {
# create Hugging Face Model Class # create Hugging Face Model Class
huggingface_model = HuggingFaceModel( huggingface_model = HuggingFaceModel(
image_uri=get_huggingface_llm_image_uri("huggingface",version="2.4.0"), image_uri=get_huggingface_llm_image_uri("huggingface",version="2.4.1"),
env=hub, env=hub,
role=role, role=role,
) )

View File

@ -17,7 +17,7 @@
"id": "", "id": "",
"model": "Qwen/Qwen2-VL-7B-Instruct", "model": "Qwen/Qwen2-VL-7B-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 58, "completion_tokens": 58,
"prompt_tokens": 349, "prompt_tokens": 349,

View File

@ -15,6 +15,6 @@
"id": "", "id": "",
"model": "Qwen/Qwen2-VL-7B-Instruct", "model": "Qwen/Qwen2-VL-7B-Instruct",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": null "usage": null
} }

View File

@ -18,7 +18,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 10, "completion_tokens": 10,
"prompt_tokens": 50, "prompt_tokens": 50,
@ -44,7 +44,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 10, "completion_tokens": 10,
"prompt_tokens": 50, "prompt_tokens": 50,
@ -70,7 +70,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 10, "completion_tokens": 10,
"prompt_tokens": 50, "prompt_tokens": 50,
@ -96,7 +96,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 10, "completion_tokens": 10,
"prompt_tokens": 50, "prompt_tokens": 50,

View File

@ -17,7 +17,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 10, "completion_tokens": 10,
"prompt_tokens": 50, "prompt_tokens": 50,

View File

@ -17,7 +17,7 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.1-8B-Instruct", "model": "meta-llama/Llama-3.1-8B-Instruct",
"object": "chat.completion", "object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": { "usage": {
"completion_tokens": 23, "completion_tokens": 23,
"prompt_tokens": 604, "prompt_tokens": 604,

View File

@ -15,6 +15,6 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.1-8B-Instruct", "model": "meta-llama/Llama-3.1-8B-Instruct",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": null "usage": null
} }

View File

@ -15,6 +15,6 @@
"id": "", "id": "",
"model": "meta-llama/Llama-3.1-8B-Instruct", "model": "meta-llama/Llama-3.1-8B-Instruct",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"system_fingerprint": "2.4.1-dev0-native", "system_fingerprint": "2.4.2-dev0-native",
"usage": null "usage": null
} }

View File

@ -24,10 +24,12 @@ class InferenceEngineRunner:
class TGIDockerRunner(InferenceEngineRunner): class TGIDockerRunner(InferenceEngineRunner):
def __init__(self, def __init__(
model: str, self,
image: str = "ghcr.io/huggingface/text-generation-inference:latest", model: str,
volumes=None): image: str = "ghcr.io/huggingface/text-generation-inference:latest",
volumes=None,
):
super().__init__(model) super().__init__(model)
if volumes is None: if volumes is None:
volumes = [] volumes = []
@ -43,13 +45,15 @@ class TGIDockerRunner(InferenceEngineRunner):
volumes = {} volumes = {}
for v in self.volumes: for v in self.volumes:
volumes[v[0]] = {"bind": v[1], "mode": "rw"} volumes[v[0]] = {"bind": v[1], "mode": "rw"}
self.container = run_docker(self.image, params, self.container = run_docker(
"Connected", self.image,
"ERROR", params,
volumes=volumes, "Connected",
gpus=gpus, "ERROR",
ports={"8080/tcp": 8080} volumes=volumes,
) gpus=gpus,
ports={"8080/tcp": 8080},
)
def stop(self): def stop(self):
if self.container: if self.container:
@ -57,9 +61,11 @@ class TGIDockerRunner(InferenceEngineRunner):
class BenchmarkRunner: class BenchmarkRunner:
def __init__(self, def __init__(
image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest", self,
volumes: List[Tuple[str, str]] = None): image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest",
volumes: List[Tuple[str, str]] = None,
):
if volumes is None: if volumes is None:
volumes = [] volumes = []
self.container = None self.container = None
@ -70,26 +76,41 @@ class BenchmarkRunner:
params = "text-generation-inference-benchmark" params = "text-generation-inference-benchmark"
for p in parameters: for p in parameters:
params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}" params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}"
logger.info(f"Running text-generation-inference-benchmarks with parameters: {params}") logger.info(
f"Running text-generation-inference-benchmarks with parameters: {params}"
)
volumes = {} volumes = {}
for v in self.volumes: for v in self.volumes:
volumes[v[0]] = {"bind": v[1], "mode": "rw"} volumes[v[0]] = {"bind": v[1], "mode": "rw"}
self.container = run_docker(self.image, params, self.container = run_docker(
"Benchmark finished", self.image,
"Fatal:", params,
volumes=volumes, "Benchmark finished",
extra_env={"RUST_LOG": "text_generation_inference_benchmark=info", "Fatal:",
"RUST_BACKTRACE": "full"}, volumes=volumes,
network_mode=network_mode) extra_env={
"RUST_LOG": "text_generation_inference_benchmark=info",
"RUST_BACKTRACE": "full",
},
network_mode=network_mode,
)
def stop(self): def stop(self):
if self.container: if self.container:
self.container.stop() self.container.stop()
def run_docker(image: str, args: str, success_sentinel: str, def run_docker(
error_sentinel: str, ports: Dict[str, int] = None, volumes=None, network_mode: str = "bridge", image: str,
gpus: int = 0, extra_env: Dict[str, str] = None) -> Container: args: str,
success_sentinel: str,
error_sentinel: str,
ports: Dict[str, int] = None,
volumes=None,
network_mode: str = "bridge",
gpus: int = 0,
extra_env: Dict[str, str] = None,
) -> Container:
if ports is None: if ports is None:
ports = {} ports = {}
if volumes is None: if volumes is None:
@ -98,21 +119,24 @@ def run_docker(image: str, args: str, success_sentinel: str,
extra_env = {} extra_env = {}
client = docker.from_env(timeout=300) client = docker.from_env(timeout=300)
# retrieve the GPU devices from CUDA_VISIBLE_DEVICES # retrieve the GPU devices from CUDA_VISIBLE_DEVICES
devices = [f"{i}" for i in devices = [f"{i}" for i in range(get_num_gpus())][:gpus]
range(get_num_gpus())][:gpus]
environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")} environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
environment.update(extra_env) environment.update(extra_env)
container = client.containers.run(image, args, container = client.containers.run(
detach=True, image,
device_requests=[ args,
docker.types.DeviceRequest(device_ids=devices, detach=True,
capabilities=[['gpu']]) device_requests=(
] if gpus > 0 else None, [docker.types.DeviceRequest(device_ids=devices, capabilities=[["gpu"]])]
volumes=volumes, if gpus > 0
shm_size="1g", else None
ports=ports, ),
network_mode=network_mode, volumes=volumes,
environment=environment, ) shm_size="1g",
ports=ports,
network_mode=network_mode,
environment=environment,
)
for line in container.logs(stream=True): for line in container.logs(stream=True):
print(line.decode("utf-8"), end="") print(line.decode("utf-8"), end="")
if success_sentinel.encode("utf-8") in line: if success_sentinel.encode("utf-8") in line:
@ -126,14 +150,14 @@ def run_docker(image: str, args: str, success_sentinel: str,
def get_gpu_names() -> str: def get_gpu_names() -> str:
gpus = GPUtil.getGPUs() gpus = GPUtil.getGPUs()
if len(gpus) == 0: if len(gpus) == 0:
return '' return ""
return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}' return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}'
def get_gpu_name() -> str: def get_gpu_name() -> str:
gpus = GPUtil.getGPUs() gpus = GPUtil.getGPUs()
if len(gpus) == 0: if len(gpus) == 0:
return '' return ""
return gpus[0].name return gpus[0].name
@ -147,29 +171,29 @@ def build_df(model: str, data_files: dict[str, str]) -> pd.DataFrame:
created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00' created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00'
# Load the results # Load the results
for key, filename in data_files.items(): for key, filename in data_files.items():
with open(filename, 'r') as f: with open(filename, "r") as f:
data = json.load(f) data = json.load(f)
for result in data['results']: for result in data["results"]:
entry = result entry = result
[config] = pd.json_normalize(result['config']).to_dict(orient='records') [config] = pd.json_normalize(result["config"]).to_dict(orient="records")
entry.update(config) entry.update(config)
entry['engine'] = data['config']['meta']['engine'] entry["engine"] = data["config"]["meta"]["engine"]
entry['tp'] = data['config']['meta']['tp'] entry["tp"] = data["config"]["meta"]["tp"]
entry['version'] = data['config']['meta']['version'] entry["version"] = data["config"]["meta"]["version"]
entry['model'] = model entry["model"] = model
entry['created_at'] = created_at entry["created_at"] = created_at
del entry['config'] del entry["config"]
df = pd.concat([df, pd.DataFrame(entry, index=[0])]) df = pd.concat([df, pd.DataFrame(entry, index=[0])])
return df return df
def main(sha, results_file): def main(sha, results_file):
results_dir = 'results' results_dir = "results"
# get absolute path # get absolute path
results_dir = os.path.join(os.path.dirname(__file__), results_dir) results_dir = os.path.join(os.path.dirname(__file__), results_dir)
logger.info('Starting benchmark') logger.info("Starting benchmark")
models = [ models = [
('meta-llama/Llama-3.1-8B-Instruct', 1), ("meta-llama/Llama-3.1-8B-Instruct", 1),
# ('meta-llama/Llama-3.1-70B-Instruct', 4), # ('meta-llama/Llama-3.1-70B-Instruct', 4),
# ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2), # ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2),
] ]
@ -177,31 +201,42 @@ def main(sha, results_file):
for model in models: for model in models:
tgi_runner = TGIDockerRunner(model[0]) tgi_runner = TGIDockerRunner(model[0])
# create results directory # create results directory
model_dir = os.path.join(results_dir, f'{model[0].replace("/", "_").replace(".", "_")}') model_dir = os.path.join(
results_dir, f'{model[0].replace("/", "_").replace(".", "_")}'
)
os.makedirs(model_dir, exist_ok=True) os.makedirs(model_dir, exist_ok=True)
runner = BenchmarkRunner( runner = BenchmarkRunner(
volumes=[(model_dir, '/opt/text-generation-inference-benchmark/results')] volumes=[(model_dir, "/opt/text-generation-inference-benchmark/results")]
) )
try: try:
tgi_runner.run([('max-concurrent-requests', 512)], gpus=model[1]) tgi_runner.run([("max-concurrent-requests", 512)], gpus=model[1])
logger.info(f'TGI started for model {model[0]}') logger.info(f"TGI started for model {model[0]}")
parameters = [ parameters = [
('tokenizer-name', model[0]), ("tokenizer-name", model[0]),
('max-vus', 800), ("max-vus", 800),
('url', 'http://localhost:8080'), ("url", "http://localhost:8080"),
('duration', '120s'), ("duration", "120s"),
('warmup', '30s'), ("warmup", "30s"),
('benchmark-kind', 'rate'), ("benchmark-kind", "rate"),
('prompt-options', 'num_tokens=200,max_tokens=220,min_tokens=180,variance=10'), (
('decode-options', 'num_tokens=200,max_tokens=220,min_tokens=180,variance=10'), "prompt-options",
('extra-meta', f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"'), "num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
('no-console', None) ),
(
"decode-options",
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
),
(
"extra-meta",
f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"',
),
("no-console", None),
] ]
rates = [('rates', f'{r / 10.}') for r in list(range(8, 248, 8))] rates = [("rates", f"{r / 10.}") for r in list(range(8, 248, 8))]
parameters.extend(rates) parameters.extend(rates)
runner.run(parameters, f'container:{tgi_runner.container.id}') runner.run(parameters, f"container:{tgi_runner.container.id}")
except Exception as e: except Exception as e:
logger.error(f'Error running benchmark for model {model[0]}: {e}') logger.error(f"Error running benchmark for model {model[0]}: {e}")
# print the stack trace # print the stack trace
print(traceback.format_exc()) print(traceback.format_exc())
success = False success = False
@ -209,33 +244,45 @@ def main(sha, results_file):
tgi_runner.stop() tgi_runner.stop()
runner.stop() runner.stop()
if not success: if not success:
logger.error('Some benchmarks failed') logger.error("Some benchmarks failed")
exit(1) exit(1)
df = pd.DataFrame() df = pd.DataFrame()
# list recursively directories # list recursively directories
directories = [f'{results_dir}/{d}' for d in os.listdir(results_dir) if os.path.isdir(f'{results_dir}/{d}')] directories = [
logger.info(f'Found result directories: {directories}') f"{results_dir}/{d}"
for d in os.listdir(results_dir)
if os.path.isdir(f"{results_dir}/{d}")
]
logger.info(f"Found result directories: {directories}")
for directory in directories: for directory in directories:
data_files = {} data_files = {}
for filename in os.listdir(directory): for filename in os.listdir(directory):
if filename.endswith('.json'): if filename.endswith(".json"):
data_files[filename.split('.')[-2]] = f'{directory}/{filename}' data_files[filename.split(".")[-2]] = f"{directory}/{filename}"
logger.info(f'Processing directory {directory}') logger.info(f"Processing directory {directory}")
df = pd.concat([df, build_df(directory.split('/')[-1], data_files)]) df = pd.concat([df, build_df(directory.split("/")[-1], data_files)])
df['device'] = get_gpu_name() df["device"] = get_gpu_name()
df['error_rate'] = df['failed_requests'] / (df['failed_requests'] + df['successful_requests']) * 100.0 df["error_rate"] = (
df["failed_requests"]
/ (df["failed_requests"] + df["successful_requests"])
* 100.0
)
df.to_parquet(results_file) df.to_parquet(results_file)
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--sha", help="SHA of the commit to add to the results", required=True) parser.add_argument(
parser.add_argument("--results-file", "--sha", help="SHA of the commit to add to the results", required=True
help="The file where to store the results, can be a local file or a s3 path") )
parser.add_argument(
"--results-file",
help="The file where to store the results, can be a local file or a s3 path",
)
args = parser.parse_args() args = parser.parse_args()
if args.results_file is None: if args.results_file is None:
results_file = f'{args.sha}.parquet' results_file = f"{args.sha}.parquet"
else: else:
results_file = args.results_file results_file = args.results_file

View File

@ -803,7 +803,7 @@ mod tests {
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap(); let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
let tool_prompt = "This default prompt will be used".to_string(); let tool_prompt = "This default prompt will be used".to_string();
let tools_and_prompt = Some((tools, tool_prompt)); let tools_and_prompt = Some((tools, tool_prompt));
let result = ct.apply(None, msgs, tools_and_prompt); let result = ct.apply(msgs, tools_and_prompt);
let expected = "<s>[INST] I'd like to show off how chat templating works! [/INST]Great! How can I help you today?</s> [INST] Just testing\n---\n[{\"type\":\"function\",\"function\":{\"description\":\"Get the current weather\",\"name\":\"get_current_weather\",\"arguments\":{\"properties\":{\"format\":{\"description\":\"The temperature unit to use. Infer this from the users location.\",\"enum\":[\"celsius\",\"fahrenheit\"],\"type\":\"string\"},\"location\":{\"description\":\"The city and state, e.g. San Francisco, CA\",\"type\":\"string\"}},\"required\":[\"location\",\"format\"],\"type\":\"object\"}}}]\nThis default prompt will be used [/INST]".to_string(); let expected = "<s>[INST] I'd like to show off how chat templating works! [/INST]Great! How can I help you today?</s> [INST] Just testing\n---\n[{\"type\":\"function\",\"function\":{\"description\":\"Get the current weather\",\"name\":\"get_current_weather\",\"arguments\":{\"properties\":{\"format\":{\"description\":\"The temperature unit to use. Infer this from the users location.\",\"enum\":[\"celsius\",\"fahrenheit\"],\"type\":\"string\"},\"location\":{\"description\":\"The city and state, e.g. San Francisco, CA\",\"type\":\"string\"}},\"required\":[\"location\",\"format\"],\"type\":\"object\"}}}]\nThis default prompt will be used [/INST]".to_string();
assert_eq!(result.unwrap(), expected); assert_eq!(result.unwrap(), expected);
} }
@ -837,7 +837,7 @@ mod tests {
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap(); let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
let tool_prompt = "This default prompt will be used".to_string(); let tool_prompt = "This default prompt will be used".to_string();
let tools_and_prompt = Some((tools, tool_prompt)); let tools_and_prompt = Some((tools, tool_prompt));
let result = ct.apply(None, msgs, tools_and_prompt); let result = ct.apply(msgs, tools_and_prompt);
let expected = "<s><|start_header_id|>system<|end_header_id|>\n\nEnvironment: ipython\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\nYoure a helpful assistant! Answer the users question best you can.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGiven the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.\n\nRespond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.Do not use variables.\n\n{\n \"function\": {\n \"arguments\": {\n \"properties\": {\n \"format\": {\n \"description\": \"The temperature unit to use. Infer this from the users location.\",\n \"enum\": [\n \"celsius\",\n \"fahrenheit\"\n ],\n \"type\": \"string\"\n },\n \"location\": {\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"location\",\n \"format\"\n ],\n \"type\": \"object\"\n },\n \"description\": \"Get the current weather\",\n \"name\": \"get_current_weather\"\n },\n \"type\": \"function\"\n}\n\nWhat is the weather like in Brooklyn, New York?\n---\nThis default prompt will be used<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n".to_string(); let expected = "<s><|start_header_id|>system<|end_header_id|>\n\nEnvironment: ipython\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\nYoure a helpful assistant! Answer the users question best you can.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGiven the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.\n\nRespond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.Do not use variables.\n\n{\n \"function\": {\n \"arguments\": {\n \"properties\": {\n \"format\": {\n \"description\": \"The temperature unit to use. Infer this from the users location.\",\n \"enum\": [\n \"celsius\",\n \"fahrenheit\"\n ],\n \"type\": \"string\"\n },\n \"location\": {\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"location\",\n \"format\"\n ],\n \"type\": \"object\"\n },\n \"description\": \"Get the current weather\",\n \"name\": \"get_current_weather\"\n },\n \"type\": \"function\"\n}\n\nWhat is the weather like in Brooklyn, New York?\n---\nThis default prompt will be used<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n".to_string();
assert_eq!(result.unwrap(), expected); assert_eq!(result.unwrap(), expected);
} }