local-llm-server/other/vllm/vllm.service

16 lines
494 B
SYSTEMD

[Unit]
Description=VLLM Backend
Wants=basic.target
After=basic.target network.target
[Service]
User=USERNAME
Group=USERNAME
# Can add --disable-log-requests when I know the backend won't crash
ExecStart=/storage/vllm/venv/bin/python /storage/vllm/api_server.py --model /storage/oobabooga/one-click-installers/text-generation-webui/models/TheBloke_MythoMax-L2-13B-GPTQ/ --host 0.0.0.0 --port 7000 --max-num-batched-tokens 24576
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target