fix gunicorn logging

This commit is contained in:
Cyberes 2023-12-21 14:24:50 -07:00
parent 27d36a7f0e
commit 0e7f04ab2d
8 changed files with 49 additions and 53 deletions

View File

@ -6,6 +6,8 @@ The purpose of this server is to abstract your LLM backend from your frontend AP
### Install ### Install
also need to create /var/log/localllm
1. `sudo apt install redis` 1. `sudo apt install redis`
2. `python3 -m venv venv` 2. `python3 -m venv venv`
3. `source venv/bin/activate` 3. `source venv/bin/activate`

View File

@ -1,4 +1,6 @@
import logging import logging
from pathlib import Path
import sys
import coloredlogs import coloredlogs
@ -28,9 +30,10 @@ class LoggingInfo:
logging_info = LoggingInfo() logging_info = LoggingInfo()
LOG_DIRECTORY = None
def init_logging(): def init_logging(filepath:Path=None):
""" """
Set up the parent logger. Set up the parent logger.
:return: :return:
@ -38,6 +41,18 @@ def init_logging():
logger = logging.getLogger('llm_server') logger = logging.getLogger('llm_server')
logger.setLevel(logging_info.level) logger.setLevel(logging_info.level)
if filepath:
p = Path(filepath)
if not p.parent.is_dir():
logger.fatal(f'Log directory does not exist: {p.parent}')
sys.exit(1)
LOG_DIRECTORY = p.parent
handler = logging.FileHandler(filepath)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def create_logger(name): def create_logger(name):
logger = logging.getLogger('llm_server').getChild(name) logger = logging.getLogger('llm_server').getChild(name)
@ -49,4 +64,7 @@ def create_logger(name):
handler.setFormatter(formatter) handler.setFormatter(formatter)
logger.addHandler(handler) logger.addHandler(handler)
coloredlogs.install(logger=logger, level=logging_info.level) coloredlogs.install(logger=logger, level=logging_info.level)
if LOG_DIRECTORY:
handler = logging.FileHandler(LOG_DIRECTORY / f'{name}.log')
logger.addHandler(handler)
return logger return logger

View File

@ -1,12 +0,0 @@
import time
import sys
from llm_server.custom_redis import redis
from llm_server.logging import create_logger
def server_startup(s):
logger = create_logger('gunicorn')
while not redis.get('daemon_started', dtype=bool):
logger.warning('Could not find the key daemon_started in Redis. Did you forget to start the daemon process?')
time.sleep(10)

View File

@ -1,2 +0,0 @@
accesslog="-"
errorlog="-"

View File

@ -1,2 +0,0 @@
accesslog="-"
errorlog="-"

View File

@ -1,20 +0,0 @@
"""
This file is used to run certain tasks when the HTTP server starts.
It's located here so it doesn't get imported with daemon.py
"""
from llm_server.logging import create_logger
try:
import gevent.monkey
gevent.monkey.patch_all()
except ImportError:
pass
from llm_server.pre_fork import server_startup
def on_starting(s):
server_startup(s)
logger = create_logger('gunicorn')
logger.info('Startup complete!')

View File

@ -10,7 +10,7 @@ Group=server
WorkingDirectory=/srv/server/local-llm-server WorkingDirectory=/srv/server/local-llm-server
# Need a lot of workers since we have long-running requests # Need a lot of workers since we have long-running requests
# Takes about 3.5G memory # Takes about 3.5G memory
ExecStart=/srv/server/local-llm-server/venv/bin/gunicorn --workers 20 --bind 0.0.0.0:5000 server:app --timeout 60 --worker-class gevent -c /srv/server/local-llm-server/other/gconfig.py ExecStart=/srv/server/local-llm-server/venv/bin/gunicorn --workers 20 --bind 0.0.0.0:5000 server:app --timeout 60 --worker-class gevent
Restart=always Restart=always
RestartSec=2 RestartSec=2
SyslogIdentifier=local-llm-server SyslogIdentifier=local-llm-server

View File

@ -1,3 +1,5 @@
import time
try: try:
import gevent.monkey import gevent.monkey
@ -5,6 +7,7 @@ try:
except ImportError: except ImportError:
pass pass
import logging
import os import os
import sys import sys
from pathlib import Path from pathlib import Path
@ -23,7 +26,7 @@ from llm_server.database.conn import database
from llm_server.database.create import create_db from llm_server.database.create import create_db
from llm_server.helpers import auto_set_base_client_api from llm_server.helpers import auto_set_base_client_api
from llm_server.llm.vllm.info import vllm_info from llm_server.llm.vllm.info import vllm_info
from llm_server.pre_fork import server_startup from llm_server.logging import init_logging
from llm_server.routes.openai import openai_bp, openai_model_bp from llm_server.routes.openai import openai_bp, openai_model_bp
from llm_server.routes.server_error import handle_server_error from llm_server.routes.server_error import handle_server_error
from llm_server.routes.v1 import bp from llm_server.routes.v1 import bp
@ -66,19 +69,6 @@ except ModuleNotFoundError as e:
print('Please see README.md for install instructions.') print('Please see README.md for install instructions.')
sys.exit(1) sys.exit(1)
app = Flask(__name__)
# Fixes ConcurrentObjectUseError
# https://github.com/miguelgrinberg/simple-websocket/issues/24
app.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 25}
app.register_blueprint(bp, url_prefix='/api/')
app.register_blueprint(openai_bp, url_prefix='/api/openai/v1/')
app.register_blueprint(openai_model_bp, url_prefix='/api/openai/')
init_wssocket(app)
flask_cache.init_app(app)
flask_cache.clear()
script_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.dirname(os.path.realpath(__file__))
config_path_environ = os.getenv("CONFIG_PATH") config_path_environ = os.getenv("CONFIG_PATH")
if config_path_environ: if config_path_environ:
@ -91,9 +81,31 @@ if not success:
print('Failed to load config:', msg) print('Failed to load config:', msg)
sys.exit(1) sys.exit(1)
init_logging(Path(config['webserver_log_directory']) / 'server.log')
logger = logging.getLogger('llm_server')
while not redis.get('daemon_started', dtype=bool):
logger.warning('Could not find the key daemon_started in Redis. Did you forget to start the daemon process?')
time.sleep(10)
logger.info('Started HTTP worker!')
database.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database']) database.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database'])
create_db() create_db()
app = Flask(__name__)
# Fixes ConcurrentObjectUseError
# https://github.com/miguelgrinberg/simple-websocket/issues/24
app.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 25}
app.register_blueprint(bp, url_prefix='/api/')
app.register_blueprint(openai_bp, url_prefix='/api/openai/v1/')
app.register_blueprint(openai_model_bp, url_prefix='/api/openai/')
init_wssocket(app)
flask_cache.init_app(app)
flask_cache.clear()
@app.route('/') @app.route('/')
@app.route('/api') @app.route('/api')
@ -203,6 +215,6 @@ def before_app_request():
if __name__ == "__main__": if __name__ == "__main__":
server_startup(None) # server_startup(None)
print('FLASK MODE - Startup complete!') print('FLASK MODE - Startup complete!')
app.run(host='0.0.0.0', threaded=False, processes=15) app.run(host='0.0.0.0', threaded=False, processes=15)