preemo_text-generation-infe.../server/Makefile

38 lines
1.7 KiB
Makefile
Raw Normal View History

transformers_commit := 2b57aa18da658e7d2f42ef6bd5b56751af582fef
2023-03-24 07:02:14 -06:00
flash_att_commit := 4d87e4d875077ad9efd25030efa4ab0ba92c19e1
2022-10-08 04:30:12 -06:00
gen-server:
# Compile protos
2023-02-13 05:02:45 -07:00
pip install grpcio-tools==1.51.1 --no-cache-dir
2023-03-07 10:52:22 -07:00
mkdir text_generation_server/pb || true
python -m grpc_tools.protoc -I../proto --python_out=text_generation_server/pb --grpc_python_out=text_generation_server/pb ../proto/generate.proto
find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
touch text_generation_server/pb/__init__.py
2022-10-08 04:30:12 -06:00
2022-10-18 07:19:03 -06:00
install-transformers:
# Install specific version of transformers with custom cuda kernels
2022-12-08 10:49:33 -07:00
pip uninstall transformers -y || true
rm -rf transformers || true
2023-03-24 07:02:14 -06:00
git clone https://github.com/OlivierDehaene/transformers.git
cd transformers && git checkout $(transformers_commit)
2022-10-18 07:19:03 -06:00
cd transformers && python setup.py install
2022-10-08 04:30:12 -06:00
2023-03-24 07:02:14 -06:00
install-flash-attention:
# Install specific version of flash attention
pip install packaging
pip uninstall flash_attn rotary_emb dropout_layer_norm -y || true
rm -rf flash-attention || true
git clone https://github.com/HazyResearch/flash-attention.git
cd flash-attention && git checkout $(flash_att_commit)
cd flash-attention && python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install
2022-10-18 07:19:03 -06:00
install-torch:
# Install specific version of torch
pip install torch --extra-index-url https://download.pytorch.org/whl/cu118 --no-cache-dir
2022-10-08 04:30:12 -06:00
2022-11-07 04:53:56 -07:00
install: gen-server install-torch install-transformers
pip install pip --upgrade
pip install -e . --no-cache-dir
2022-10-08 04:30:12 -06:00
run-dev:
2023-03-07 10:52:22 -07:00
SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded