This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/other/local-llm-server.service

26 lines
878 B
SYSTEMD
Raw Normal View History

2023-08-21 23:25:53 -06:00
[Unit]
2023-12-21 13:22:47 -07:00
Description=Local LLM Proxy Server
2023-08-21 23:25:53 -06:00
Wants=basic.target
2023-12-21 13:22:47 -07:00
After=basic.target network.target local-llm-daemon.service
Requires=local-llm-daemon.service
2023-08-21 23:25:53 -06:00
[Service]
User=server
WorkingDirectory=/srv/server/local-llm-server
# Sometimes the old processes aren't terminated when the service is restarted.
ExecStartPre=/usr/bin/pkill -9 -f "/srv/server/local-llm-server/venv/bin/python3 /srv/server/local-llm-server/venv/bin/gunicorn"
# TODO: make sure gunicorn logs to stdout and logging also goes to stdout
# Need a lot of workers since we have long-running requests. This takes about 3.5G memory.
ExecStart=/srv/server/local-llm-server/venv/bin/gunicorn --workers 20 --bind 0.0.0.0:5000 server:app --timeout 60 --worker-class gevent --access-logfile '-' --error-logfile '-'
2023-08-21 23:25:53 -06:00
Restart=always
RestartSec=2
2023-12-21 13:22:47 -07:00
SyslogIdentifier=local-llm-server
2023-08-21 23:25:53 -06:00
[Install]
WantedBy=multi-user.target
2024-03-18 12:42:44 -06:00