From 9f18f4c00627e1a0ad696b6774e5ad7ca8f4261c Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 27 Jul 2023 19:25:15 +0200 Subject: [PATCH] v0.9.4 (#713) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- README.md | 2 +- docs/openapi.json | 2 +- server/pyproject.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 539cf124..8984ea6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2893,7 +2893,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "0.9.3" +version = "0.9.4" dependencies = [ "average", "clap", @@ -2913,7 +2913,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "0.9.3" +version = "0.9.4" dependencies = [ "futures", "grpc-metadata", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "0.9.3" +version = "0.9.4" dependencies = [ "clap", "ctrlc", @@ -2945,7 +2945,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "0.9.3" +version = "0.9.4" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 49b7717a..3bfe9831 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ members = [ ] [workspace.package] -version = "0.9.3" +version = "0.9.4" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/README.md b/README.md index 9e256c9d..effab42e 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The easiest way of getting started is using the official Docker container: model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9.4 --model-id $model ``` **Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. diff --git a/docs/openapi.json b/docs/openapi.json index 80240460..9a672382 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "0.9.3" + "version": "0.9.4" }, "paths": { "/": { diff --git a/server/pyproject.toml b/server/pyproject.toml index 40cd31ee..3ee3351c 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "0.9.3" +version = "0.9.4" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "]