fix gunicorn logging
This commit is contained in:
parent
27d36a7f0e
commit
0e7f04ab2d
|
@ -6,6 +6,8 @@ The purpose of this server is to abstract your LLM backend from your frontend AP
|
|||
|
||||
### Install
|
||||
|
||||
also need to create /var/log/localllm
|
||||
|
||||
1. `sudo apt install redis`
|
||||
2. `python3 -m venv venv`
|
||||
3. `source venv/bin/activate`
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
import coloredlogs
|
||||
|
||||
|
@ -28,9 +30,10 @@ class LoggingInfo:
|
|||
|
||||
|
||||
logging_info = LoggingInfo()
|
||||
LOG_DIRECTORY = None
|
||||
|
||||
|
||||
def init_logging():
|
||||
def init_logging(filepath:Path=None):
|
||||
"""
|
||||
Set up the parent logger.
|
||||
:return:
|
||||
|
@ -38,6 +41,18 @@ def init_logging():
|
|||
logger = logging.getLogger('llm_server')
|
||||
logger.setLevel(logging_info.level)
|
||||
|
||||
if filepath:
|
||||
p = Path(filepath)
|
||||
if not p.parent.is_dir():
|
||||
logger.fatal(f'Log directory does not exist: {p.parent}')
|
||||
sys.exit(1)
|
||||
LOG_DIRECTORY = p.parent
|
||||
handler = logging.FileHandler(filepath)
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
|
||||
def create_logger(name):
|
||||
logger = logging.getLogger('llm_server').getChild(name)
|
||||
|
@ -49,4 +64,7 @@ def create_logger(name):
|
|||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
coloredlogs.install(logger=logger, level=logging_info.level)
|
||||
if LOG_DIRECTORY:
|
||||
handler = logging.FileHandler(LOG_DIRECTORY / f'{name}.log')
|
||||
logger.addHandler(handler)
|
||||
return logger
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
import time
|
||||
import sys
|
||||
|
||||
from llm_server.custom_redis import redis
|
||||
from llm_server.logging import create_logger
|
||||
|
||||
|
||||
def server_startup(s):
|
||||
logger = create_logger('gunicorn')
|
||||
while not redis.get('daemon_started', dtype=bool):
|
||||
logger.warning('Could not find the key daemon_started in Redis. Did you forget to start the daemon process?')
|
||||
time.sleep(10)
|
|
@ -1,2 +0,0 @@
|
|||
accesslog="-"
|
||||
errorlog="-"
|
|
@ -1,2 +0,0 @@
|
|||
accesslog="-"
|
||||
errorlog="-"
|
|
@ -1,20 +0,0 @@
|
|||
"""
|
||||
This file is used to run certain tasks when the HTTP server starts.
|
||||
It's located here so it doesn't get imported with daemon.py
|
||||
"""
|
||||
from llm_server.logging import create_logger
|
||||
|
||||
try:
|
||||
import gevent.monkey
|
||||
|
||||
gevent.monkey.patch_all()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from llm_server.pre_fork import server_startup
|
||||
|
||||
|
||||
def on_starting(s):
|
||||
server_startup(s)
|
||||
logger = create_logger('gunicorn')
|
||||
logger.info('Startup complete!')
|
|
@ -10,7 +10,7 @@ Group=server
|
|||
WorkingDirectory=/srv/server/local-llm-server
|
||||
# Need a lot of workers since we have long-running requests
|
||||
# Takes about 3.5G memory
|
||||
ExecStart=/srv/server/local-llm-server/venv/bin/gunicorn --workers 20 --bind 0.0.0.0:5000 server:app --timeout 60 --worker-class gevent -c /srv/server/local-llm-server/other/gconfig.py
|
||||
ExecStart=/srv/server/local-llm-server/venv/bin/gunicorn --workers 20 --bind 0.0.0.0:5000 server:app --timeout 60 --worker-class gevent
|
||||
Restart=always
|
||||
RestartSec=2
|
||||
SyslogIdentifier=local-llm-server
|
||||
|
|
42
server.py
42
server.py
|
@ -1,3 +1,5 @@
|
|||
import time
|
||||
|
||||
try:
|
||||
import gevent.monkey
|
||||
|
||||
|
@ -5,6 +7,7 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
@ -23,7 +26,7 @@ from llm_server.database.conn import database
|
|||
from llm_server.database.create import create_db
|
||||
from llm_server.helpers import auto_set_base_client_api
|
||||
from llm_server.llm.vllm.info import vllm_info
|
||||
from llm_server.pre_fork import server_startup
|
||||
from llm_server.logging import init_logging
|
||||
from llm_server.routes.openai import openai_bp, openai_model_bp
|
||||
from llm_server.routes.server_error import handle_server_error
|
||||
from llm_server.routes.v1 import bp
|
||||
|
@ -66,19 +69,6 @@ except ModuleNotFoundError as e:
|
|||
print('Please see README.md for install instructions.')
|
||||
sys.exit(1)
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# Fixes ConcurrentObjectUseError
|
||||
# https://github.com/miguelgrinberg/simple-websocket/issues/24
|
||||
app.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 25}
|
||||
|
||||
app.register_blueprint(bp, url_prefix='/api/')
|
||||
app.register_blueprint(openai_bp, url_prefix='/api/openai/v1/')
|
||||
app.register_blueprint(openai_model_bp, url_prefix='/api/openai/')
|
||||
init_wssocket(app)
|
||||
flask_cache.init_app(app)
|
||||
flask_cache.clear()
|
||||
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
config_path_environ = os.getenv("CONFIG_PATH")
|
||||
if config_path_environ:
|
||||
|
@ -91,9 +81,31 @@ if not success:
|
|||
print('Failed to load config:', msg)
|
||||
sys.exit(1)
|
||||
|
||||
init_logging(Path(config['webserver_log_directory']) / 'server.log')
|
||||
logger = logging.getLogger('llm_server')
|
||||
|
||||
while not redis.get('daemon_started', dtype=bool):
|
||||
logger.warning('Could not find the key daemon_started in Redis. Did you forget to start the daemon process?')
|
||||
time.sleep(10)
|
||||
|
||||
logger.info('Started HTTP worker!')
|
||||
|
||||
database.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database'])
|
||||
create_db()
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# Fixes ConcurrentObjectUseError
|
||||
# https://github.com/miguelgrinberg/simple-websocket/issues/24
|
||||
app.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 25}
|
||||
|
||||
app.register_blueprint(bp, url_prefix='/api/')
|
||||
app.register_blueprint(openai_bp, url_prefix='/api/openai/v1/')
|
||||
app.register_blueprint(openai_model_bp, url_prefix='/api/openai/')
|
||||
init_wssocket(app)
|
||||
flask_cache.init_app(app)
|
||||
flask_cache.clear()
|
||||
|
||||
|
||||
@app.route('/')
|
||||
@app.route('/api')
|
||||
|
@ -203,6 +215,6 @@ def before_app_request():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
server_startup(None)
|
||||
# server_startup(None)
|
||||
print('FLASK MODE - Startup complete!')
|
||||
app.run(host='0.0.0.0', threaded=False, processes=15)
|
||||
|
|
Reference in New Issue