65 lines
1.7 KiB
TOML
65 lines
1.7 KiB
TOML
[tool.poetry]
|
|
name = "text-generation-server"
|
|
version = "2.0.0"
|
|
description = "Text Generation Inference Python gRPC Server"
|
|
authors = ["Olivier Dehaene <olivier@huggingface.co>"]
|
|
|
|
[tool.poetry.scripts]
|
|
text-generation-server = 'text_generation_server.cli:app'
|
|
|
|
[tool.poetry.dependencies]
|
|
python = ">=3.9,<3.13"
|
|
protobuf = "^4.21.7"
|
|
grpcio = "^1.51.1"
|
|
grpcio-status = "^1.51.1"
|
|
grpcio-reflection = "^1.51.1"
|
|
grpc-interceptor = "^0.15.0"
|
|
typer = "^0.6.1"
|
|
accelerate = { version = "^0.29.1", optional = true }
|
|
bitsandbytes = { version = "^0.43.0", optional = true }
|
|
safetensors = "^0.4"
|
|
loguru = "^0.6.0"
|
|
opentelemetry-api = "^1.15.0"
|
|
opentelemetry-exporter-otlp = "^1.15.0"
|
|
opentelemetry-instrumentation-grpc = "^0.36b0"
|
|
hf-transfer = "^0.1.2"
|
|
sentencepiece = "^0.1.97"
|
|
tokenizers = "^0.15.0"
|
|
huggingface-hub = "^0.19.3"
|
|
transformers = "^4.39"
|
|
einops = "^0.6.1"
|
|
texttable = { version = "^1.6.7", optional = true }
|
|
datasets = { version = "^2.14.0", optional = true }
|
|
peft = { version = "^0.10", optional = true }
|
|
torch = { version = "^2.1.1", optional = true }
|
|
scipy = "^1.11.1"
|
|
pillow = "^10.0.0"
|
|
outlines= { version = "^0.0.36", optional = true }
|
|
|
|
[tool.poetry.extras]
|
|
torch = ["torch"]
|
|
accelerate = ["accelerate"]
|
|
bnb = ["bitsandbytes"]
|
|
peft = ["peft"]
|
|
quantize = ["texttable", "datasets", "accelerate"]
|
|
outlines = ["outlines"]
|
|
|
|
[tool.poetry.group.dev.dependencies]
|
|
grpcio-tools = "^1.51.1"
|
|
pytest = "^7.3.0"
|
|
|
|
|
|
[[tool.poetry.source]]
|
|
name = "pytorch-gpu-src"
|
|
url = "https://download.pytorch.org/whl/cu121"
|
|
priority = "explicit"
|
|
|
|
[tool.pytest.ini_options]
|
|
markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"]
|
|
|
|
[build-system]
|
|
requires = [
|
|
"poetry-core>=1.0.0",
|
|
]
|
|
build-backend = "poetry.core.masonry.api"
|