This commit is contained in:
OlivierDehaene 2023-07-28 17:43:46 +02:00 committed by GitHub
parent bde25e62b3
commit 3ef5ffbc64
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 33 additions and 30 deletions

8
Cargo.lock generated
View File

@ -2893,7 +2893,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-benchmark" name = "text-generation-benchmark"
version = "0.9.4" version = "1.0.0"
dependencies = [ dependencies = [
"average", "average",
"clap", "clap",
@ -2913,7 +2913,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-client" name = "text-generation-client"
version = "0.9.4" version = "1.0.0"
dependencies = [ dependencies = [
"futures", "futures",
"grpc-metadata", "grpc-metadata",
@ -2929,7 +2929,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-launcher" name = "text-generation-launcher"
version = "0.9.4" version = "1.0.0"
dependencies = [ dependencies = [
"clap", "clap",
"ctrlc", "ctrlc",
@ -2945,7 +2945,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-router" name = "text-generation-router"
version = "0.9.4" version = "1.0.0"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"axum", "axum",

View File

@ -8,7 +8,7 @@ members = [
] ]
[workspace.package] [workspace.package]
version = "0.9.4" version = "1.0.0"
edition = "2021" edition = "2021"
authors = ["Olivier Dehaene"] authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference" homepage = "https://github.com/huggingface/text-generation-inference"

View File

@ -7,16 +7,14 @@
<a href="https://github.com/huggingface/text-generation-inference"> <a href="https://github.com/huggingface/text-generation-inference">
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social"> <img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social">
</a> </a>
<a href="https://github.com/huggingface/text-generation-inference/blob/main/LICENSE">
<img alt="License" src="https://img.shields.io/github/license/huggingface/text-generation-inference">
</a>
<a href="https://huggingface.github.io/text-generation-inference"> <a href="https://huggingface.github.io/text-generation-inference">
<img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational"> <img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational">
</a> </a>
</div>
A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co) A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co)
to power LLMs api-inference widgets. to power Hugging Chat, the Inference API and Inference Endpoint.
</div>
## Table of contents ## Table of contents
@ -85,7 +83,7 @@ The easiest way of getting started is using the official Docker container:
model=tiiuae/falcon-7b-instruct model=tiiuae/falcon-7b-instruct
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.4 --model-id $model docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model
``` ```
**Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. **Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher.
@ -152,7 +150,7 @@ model=meta-llama/Llama-2-7b-chat-hf
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=<your cli READ token> token=<your cli READ token>
docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.0.0 --model-id $model
``` ```
### A note on Shared Memory (shm) ### A note on Shared Memory (shm)

View File

@ -10,7 +10,7 @@
"name": "Apache 2.0", "name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0" "url": "https://www.apache.org/licenses/LICENSE-2.0"
}, },
"version": "0.9.4" "version": "1.0.0"
}, },
"paths": { "paths": {
"/": { "/": {

View File

@ -1,4 +1,4 @@
vllm_commit := 084ca75d4271f8f67be731bc58e0d41d8e0afd3a vllm_commit := d284b831c17f42a8ea63369a06138325f73c4cf9
vllm: vllm:
# Clone vllm # Clone vllm

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "text-generation-server" name = "text-generation-server"
version = "0.9.4" version = "1.0.0"
description = "Text Generation Inference Python gRPC Server" description = "Text Generation Inference Python gRPC Server"
authors = ["Olivier Dehaene <olivier@huggingface.co>"] authors = ["Olivier Dehaene <olivier@huggingface.co>"]

View File

@ -219,31 +219,36 @@ class TensorParallelHead(SuperLayer):
) )
def forward(self, input: torch.Tensor) -> torch.Tensor: def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.should_gather:
return super().forward(input)
world_size = self.process_group.size() world_size = self.process_group.size()
# Fast branch for single requests if len(input.shape) == 2 and isinstance(self.linear, FastLinear):
if (
self.should_gather
and len(input.shape) == 2
and isinstance(self.linear, FastLinear)
and input.shape[0] == 1
):
out_dim = self.linear.weight.shape[0] out_dim = self.linear.weight.shape[0]
world_out = input.new_empty(1, out_dim * world_size) if input.shape[0] == 1:
local_out = input.new_empty(1, out_dim) world_out = input.new_empty(1, out_dim * world_size)
local_out = input.new_empty(1, out_dim)
gather_input = local_out
else:
world_out = input.new_empty(out_dim * world_size, input.shape[0])
gather_input = input.new_empty(out_dim, input.shape[0])
local_out = gather_input.T
torch.mm(input, self.linear.weight.T, out=local_out) torch.mm(input, self.linear.weight.T, out=local_out)
torch.distributed.all_gather_into_tensor( torch.distributed.all_gather_into_tensor(
world_out, local_out, group=self.process_group world_out, gather_input, group=self.process_group
) )
return world_out
if input.shape[0] == 1:
return world_out
return world_out.T
output = super().forward(input) output = super().forward(input)
if not self.should_gather: world_output = [
return output torch.empty_like(output) for _ in range(self.process_group.size())
]
world_output = [torch.empty_like(output) for _ in range(world_size)]
torch.distributed.all_gather(world_output, output, group=self.process_group) torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1) world_output = torch.cat(world_output, dim=-1)
return world_output return world_output