fix base vllm dockerfile

This commit is contained in:
Cyberes 2023-10-18 09:24:39 -06:00
parent 6f89ac83f3
commit ca40295e50
1 changed files with 1 additions and 1 deletions

View File

@ -33,7 +33,7 @@ RUN /venv/bin/pip3 install torch==2.0.1 --index-url https://download.pytorch.org
# Don't build VLLM because we don't do that on the inference server. Just install from pip.
# RUN /venv/bin/pip install git+https://github.com/vllm-project/vllm
RUN pip install vllm
RUN /venv/bin/pip install vllm
FROM nvidia/cuda:11.8.0-base-ubuntu22.04 as base