diff --git a/Cargo.lock b/Cargo.lock
index 8984ea6a..61c22a40 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2893,7 +2893,7 @@ dependencies = [
[[package]]
name = "text-generation-benchmark"
-version = "0.9.4"
+version = "1.0.0"
dependencies = [
"average",
"clap",
@@ -2913,7 +2913,7 @@ dependencies = [
[[package]]
name = "text-generation-client"
-version = "0.9.4"
+version = "1.0.0"
dependencies = [
"futures",
"grpc-metadata",
@@ -2929,7 +2929,7 @@ dependencies = [
[[package]]
name = "text-generation-launcher"
-version = "0.9.4"
+version = "1.0.0"
dependencies = [
"clap",
"ctrlc",
@@ -2945,7 +2945,7 @@ dependencies = [
[[package]]
name = "text-generation-router"
-version = "0.9.4"
+version = "1.0.0"
dependencies = [
"async-stream",
"axum",
diff --git a/Cargo.toml b/Cargo.toml
index 3bfe9831..255d35d4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
-version = "0.9.4"
+version = "1.0.0"
edition = "2021"
authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference"
diff --git a/README.md b/README.md
index 2bbb6583..869cc668 100644
--- a/README.md
+++ b/README.md
@@ -7,16 +7,14 @@
-
-
-
-
A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co)
-to power LLMs api-inference widgets.
+to power Hugging Chat, the Inference API and Inference Endpoint.
+
+
## Table of contents
@@ -85,7 +83,7 @@ The easiest way of getting started is using the official Docker container:
model=tiiuae/falcon-7b-instruct
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
-docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.4 --model-id $model
+docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model
```
**Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher.
@@ -152,7 +150,7 @@ model=meta-llama/Llama-2-7b-chat-hf
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=
-docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model
+docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model
```
### A note on Shared Memory (shm)
diff --git a/docs/openapi.json b/docs/openapi.json
index 9a672382..0465a5ad 100644
--- a/docs/openapi.json
+++ b/docs/openapi.json
@@ -10,7 +10,7 @@
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0"
},
- "version": "0.9.4"
+ "version": "1.0.0"
},
"paths": {
"/": {
diff --git a/server/Makefile-vllm b/server/Makefile-vllm
index 9100fff4..af750733 100644
--- a/server/Makefile-vllm
+++ b/server/Makefile-vllm
@@ -1,4 +1,4 @@
-vllm_commit := 084ca75d4271f8f67be731bc58e0d41d8e0afd3a
+vllm_commit := d284b831c17f42a8ea63369a06138325f73c4cf9
vllm:
# Clone vllm
diff --git a/server/pyproject.toml b/server/pyproject.toml
index 3ee3351c..fdfbdd70 100644
--- a/server/pyproject.toml
+++ b/server/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "text-generation-server"
-version = "0.9.4"
+version = "1.0.0"
description = "Text Generation Inference Python gRPC Server"
authors = ["Olivier Dehaene "]
diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py
index 7a45808e..183cf2c1 100644
--- a/server/text_generation_server/utils/layers.py
+++ b/server/text_generation_server/utils/layers.py
@@ -219,31 +219,36 @@ class TensorParallelHead(SuperLayer):
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
+ if not self.should_gather:
+ return super().forward(input)
+
world_size = self.process_group.size()
- # Fast branch for single requests
- if (
- self.should_gather
- and len(input.shape) == 2
- and isinstance(self.linear, FastLinear)
- and input.shape[0] == 1
- ):
+ if len(input.shape) == 2 and isinstance(self.linear, FastLinear):
out_dim = self.linear.weight.shape[0]
- world_out = input.new_empty(1, out_dim * world_size)
- local_out = input.new_empty(1, out_dim)
+ if input.shape[0] == 1:
+ world_out = input.new_empty(1, out_dim * world_size)
+ local_out = input.new_empty(1, out_dim)
+ gather_input = local_out
+ else:
+ world_out = input.new_empty(out_dim * world_size, input.shape[0])
+ gather_input = input.new_empty(out_dim, input.shape[0])
+ local_out = gather_input.T
torch.mm(input, self.linear.weight.T, out=local_out)
torch.distributed.all_gather_into_tensor(
- world_out, local_out, group=self.process_group
+ world_out, gather_input, group=self.process_group
)
- return world_out
+
+ if input.shape[0] == 1:
+ return world_out
+ return world_out.T
output = super().forward(input)
- if not self.should_gather:
- return output
-
- world_output = [torch.empty_like(output) for _ in range(world_size)]
+ world_output = [
+ torch.empty_like(output) for _ in range(self.process_group.size())
+ ]
torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1)
return world_output