hf_text-generation-inference/server/Makefile

28 lines
1.3 KiB
Makefile
Raw Normal View History

2022-10-08 04:30:12 -06:00
gen-server:
# Compile protos
pip install grpcio-tools==1.49.1 --no-cache-dir
mkdir text_generation/pb || true
python -m grpc_tools.protoc -I../proto --python_out=text_generation/pb --grpc_python_out=text_generation/pb ../proto/generate.proto
find text_generation/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
touch text_generation/pb/__init__.py
2022-10-08 04:30:12 -06:00
2022-10-18 07:19:03 -06:00
install-transformers:
# Install specific version of transformers with custom cuda kernels
2022-10-18 07:19:03 -06:00
rm transformers || true
rm transformers-b55f16c5b71aeef47a66a4270e19c154f050a7a7 || true
curl -L -O https://github.com/OlivierDehaene/transformers/archive/b55f16c5b71aeef47a66a4270e19c154f050a7a7.zip
unzip b55f16c5b71aeef47a66a4270e19c154f050a7a7.zip
rm b55f16c5b71aeef47a66a4270e19c154f050a7a7.zip
mv transformers-b55f16c5b71aeef47a66a4270e19c154f050a7a7 transformers
2022-10-18 07:19:03 -06:00
cd transformers && python setup.py install
2022-10-08 04:30:12 -06:00
2022-10-18 07:19:03 -06:00
install-torch:
# Install specific version of torch
pip install torch --extra-index-url https://download.pytorch.org/whl/cu116 --no-cache-dir
2022-10-08 04:30:12 -06:00
2022-11-07 04:53:56 -07:00
install: gen-server install-torch install-transformers
pip install pip --upgrade
pip install -e . --no-cache-dir
2022-10-08 04:30:12 -06:00
run-dev:
2022-11-07 04:53:56 -07:00
SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation/cli.py serve bigscience/bloom-560m --sharded