This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/other/vllm/vllm.service

16 lines
494 B
SYSTEMD
Raw Normal View History

[Unit]
Description=VLLM Backend
Wants=basic.target
After=basic.target network.target
[Service]
User=USERNAME
Group=USERNAME
# Can add --disable-log-requests when I know the backend won't crash
ExecStart=/storage/vllm/venv/bin/python /storage/vllm/api_server.py --model /storage/oobabooga/one-click-installers/text-generation-webui/models/TheBloke_MythoMax-L2-13B-GPTQ/ --host 0.0.0.0 --port 7000 --max-num-batched-tokens 24576
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target