hf_text-generation-inference/server/Makefile-vllm

27 lines
827 B
Plaintext

build-vllm-cuda:
if [ ! -d 'vllm' ]; then \
pip install -U ninja packaging --no-cache-dir && \
git clone https://github.com/Narsil/vllm.git vllm &&\
cd vllm && \
git fetch && git checkout b5dfc61db88a81069e45b44f7cc99bd9e62a60fa &&\
python setup.py build; \
fi
install-vllm-cuda: build-vllm-cuda
if [ ! -d 'vllm' ]; then \
cd vllm && pip install -e .; \
fi
build-vllm-rocm:
if [ ! -d 'vllm' ]; then \
pip install -U ninja packaging --no-cache-dir && \
git clone https://github.com/fxmarty/rocm-vllm.git vllm && \
cd vllm && git fetch && git checkout ca6913b3c2ffacdcb7d15e914dc34adbc6c89479 && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
fi
install-vllm-rocm: build-vllm-rocm
if [ ! -d 'vllm' ]; then \
cd vllm && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .; \
fi