fix base vllm dockerfile
This commit is contained in:
parent
6f89ac83f3
commit
ca40295e50
|
@ -33,7 +33,7 @@ RUN /venv/bin/pip3 install torch==2.0.1 --index-url https://download.pytorch.org
|
|||
# Don't build VLLM because we don't do that on the inference server. Just install from pip.
|
||||
# RUN /venv/bin/pip install git+https://github.com/vllm-project/vllm
|
||||
|
||||
RUN pip install vllm
|
||||
RUN /venv/bin/pip install vllm
|
||||
|
||||
FROM nvidia/cuda:11.8.0-base-ubuntu22.04 as base
|
||||
|
||||
|
|
Reference in New Issue