2023-08-21 21:28:52 -06:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
from pathlib import Path
|
2023-08-23 21:33:52 -06:00
|
|
|
from threading import Thread
|
2023-09-20 20:30:31 -06:00
|
|
|
import simplejson as json
|
2023-08-23 23:11:12 -06:00
|
|
|
from flask import Flask, jsonify, render_template, request
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-09-20 20:30:31 -06:00
|
|
|
import llm_server
|
|
|
|
from llm_server.database.conn import db_pool
|
|
|
|
from llm_server.database.create import create_db
|
|
|
|
from llm_server.database.database import get_number_of_rows
|
|
|
|
from llm_server.llm.oobabooga.ooba_backend import OobaboogaBackend
|
|
|
|
from llm_server.llm.vllm.vllm_backend import VLLMBackend
|
2023-09-12 16:40:09 -06:00
|
|
|
from llm_server.routes.openai import openai_bp
|
2023-09-12 10:30:45 -06:00
|
|
|
from llm_server.routes.server_error import handle_server_error
|
|
|
|
|
2023-09-12 01:05:03 -06:00
|
|
|
try:
|
|
|
|
import vllm
|
|
|
|
except ModuleNotFoundError as e:
|
|
|
|
print('Could not import vllm-gptq:', e)
|
2023-09-12 01:10:58 -06:00
|
|
|
print('Please see README.md for install instructions.')
|
2023-09-12 01:05:03 -06:00
|
|
|
sys.exit(1)
|
|
|
|
|
2023-08-23 23:11:12 -06:00
|
|
|
import config
|
2023-08-21 21:28:52 -06:00
|
|
|
from llm_server import opts
|
2023-08-23 23:11:12 -06:00
|
|
|
from llm_server.config import ConfigLoader, config_default_vars, config_required_vars, mode_ui_names
|
2023-08-21 21:28:52 -06:00
|
|
|
from llm_server.helpers import resolve_path
|
2023-09-12 01:04:11 -06:00
|
|
|
from llm_server.llm.vllm.info import vllm_info
|
2023-08-23 22:01:06 -06:00
|
|
|
from llm_server.routes.cache import cache, redis
|
2023-08-23 20:33:49 -06:00
|
|
|
from llm_server.routes.queue import start_workers
|
2023-08-23 22:21:59 -06:00
|
|
|
from llm_server.routes.stats import SemaphoreCheckerThread, process_avg_gen_time
|
2023-08-21 21:28:52 -06:00
|
|
|
from llm_server.routes.v1 import bp
|
2023-08-23 23:11:12 -06:00
|
|
|
from llm_server.routes.v1.generate_stats import generate_stats
|
2023-08-29 17:56:12 -06:00
|
|
|
from llm_server.stream import init_socketio
|
2023-09-17 18:55:36 -06:00
|
|
|
from llm_server.threads import MainBackgroundThread, cache_stats
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-08-21 23:07:12 -06:00
|
|
|
script_path = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
|
2023-08-21 21:28:52 -06:00
|
|
|
config_path_environ = os.getenv("CONFIG_PATH")
|
|
|
|
if config_path_environ:
|
|
|
|
config_path = config_path_environ
|
|
|
|
else:
|
2023-08-21 23:07:12 -06:00
|
|
|
config_path = Path(script_path, 'config', 'config.yml')
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-08-23 22:08:10 -06:00
|
|
|
config_loader = ConfigLoader(config_path, config_default_vars, config_required_vars)
|
2023-08-21 21:28:52 -06:00
|
|
|
success, config, msg = config_loader.load_config()
|
|
|
|
if not success:
|
|
|
|
print('Failed to load config:', msg)
|
|
|
|
sys.exit(1)
|
|
|
|
|
2023-08-21 23:07:12 -06:00
|
|
|
# Resolve relative directory to the directory of the script
|
|
|
|
if config['database_path'].startswith('./'):
|
|
|
|
config['database_path'] = resolve_path(script_path, config['database_path'].strip('./'))
|
|
|
|
|
2023-09-20 20:30:31 -06:00
|
|
|
db_pool.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database'])
|
|
|
|
create_db()
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-09-12 13:09:47 -06:00
|
|
|
if config['mode'] not in ['oobabooga', 'vllm']:
|
2023-08-21 21:28:52 -06:00
|
|
|
print('Unknown mode:', config['mode'])
|
2023-09-11 09:51:01 -06:00
|
|
|
sys.exit(1)
|
2023-08-21 21:28:52 -06:00
|
|
|
opts.mode = config['mode']
|
|
|
|
opts.auth_required = config['auth_required']
|
|
|
|
opts.log_prompts = config['log_prompts']
|
2023-08-22 00:26:46 -06:00
|
|
|
opts.concurrent_gens = config['concurrent_gens']
|
2023-08-22 16:50:49 -06:00
|
|
|
opts.frontend_api_client = config['frontend_api_client']
|
2023-08-22 20:42:38 -06:00
|
|
|
opts.context_size = config['token_limit']
|
2023-08-23 22:08:10 -06:00
|
|
|
opts.show_num_prompts = config['show_num_prompts']
|
|
|
|
opts.show_uptime = config['show_uptime']
|
|
|
|
opts.backend_url = config['backend_url'].strip('/')
|
2023-08-24 20:43:11 -06:00
|
|
|
opts.show_total_output_tokens = config['show_total_output_tokens']
|
2023-08-24 21:36:00 -06:00
|
|
|
opts.netdata_root = config['netdata_root']
|
2023-09-11 20:47:19 -06:00
|
|
|
opts.simultaneous_requests_per_ip = config['simultaneous_requests_per_ip']
|
2023-08-29 13:46:41 -06:00
|
|
|
opts.show_backend_info = config['show_backend_info']
|
2023-08-30 18:53:26 -06:00
|
|
|
opts.max_new_tokens = config['max_new_tokens']
|
2023-09-12 10:30:45 -06:00
|
|
|
opts.manual_model_name = config['manual_model_name']
|
2023-09-12 16:40:09 -06:00
|
|
|
opts.llm_middleware_name = config['llm_middleware_name']
|
|
|
|
opts.enable_openi_compatible_backend = config['enable_openi_compatible_backend']
|
|
|
|
opts.openai_system_prompt = config['openai_system_prompt']
|
2023-09-13 20:25:56 -06:00
|
|
|
opts.expose_openai_system_prompt = config['expose_openai_system_prompt']
|
2023-09-14 14:05:50 -06:00
|
|
|
opts.enable_streaming = config['enable_streaming']
|
2023-09-14 15:07:17 -06:00
|
|
|
opts.openai_api_key = config['openai_api_key']
|
2023-08-23 16:14:13 -06:00
|
|
|
|
2023-08-23 16:11:32 -06:00
|
|
|
opts.verify_ssl = config['verify_ssl']
|
2023-08-23 16:14:13 -06:00
|
|
|
if not opts.verify_ssl:
|
|
|
|
import urllib3
|
|
|
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-08-23 22:01:06 -06:00
|
|
|
flushed_keys = redis.flush()
|
|
|
|
print('Flushed', len(flushed_keys), 'keys from Redis.')
|
|
|
|
|
|
|
|
if config['load_num_prompts']:
|
|
|
|
redis.set('proompts', get_number_of_rows('prompts'))
|
|
|
|
|
2023-08-24 16:47:14 -06:00
|
|
|
if config['average_generation_time_mode'] not in ['database', 'minute']:
|
|
|
|
print('Invalid value for config item "average_generation_time_mode":', config['average_generation_time_mode'])
|
|
|
|
sys.exit(1)
|
|
|
|
opts.average_generation_time_mode = config['average_generation_time_mode']
|
|
|
|
|
2023-09-20 20:30:31 -06:00
|
|
|
|
|
|
|
if opts.mode == 'oobabooga':
|
|
|
|
raise NotImplementedError
|
|
|
|
# llm_server.llm.tokenizer = OobaboogaBackend()
|
|
|
|
elif opts.mode == 'vllm':
|
|
|
|
llm_server.llm.tokenizer = llm_server.llm.vllm.tokenize
|
|
|
|
else:
|
|
|
|
raise Exception
|
|
|
|
|
2023-09-17 18:55:36 -06:00
|
|
|
# Start background processes
|
2023-08-23 20:33:49 -06:00
|
|
|
start_workers(opts.concurrent_gens)
|
2023-08-23 21:33:52 -06:00
|
|
|
process_avg_gen_time_background_thread = Thread(target=process_avg_gen_time)
|
|
|
|
process_avg_gen_time_background_thread.daemon = True
|
|
|
|
process_avg_gen_time_background_thread.start()
|
2023-08-24 20:43:11 -06:00
|
|
|
MainBackgroundThread().start()
|
2023-08-23 20:33:49 -06:00
|
|
|
SemaphoreCheckerThread().start()
|
2023-08-22 22:32:29 -06:00
|
|
|
|
2023-08-21 21:28:52 -06:00
|
|
|
app = Flask(__name__)
|
|
|
|
cache.init_app(app)
|
2023-08-22 22:32:29 -06:00
|
|
|
cache.clear() # clear redis cache
|
2023-08-29 17:56:12 -06:00
|
|
|
init_socketio(app)
|
2023-08-21 21:28:52 -06:00
|
|
|
app.register_blueprint(bp, url_prefix='/api/v1/')
|
2023-09-12 16:40:09 -06:00
|
|
|
app.register_blueprint(openai_bp, url_prefix='/api/openai/v1/')
|
2023-08-21 21:28:52 -06:00
|
|
|
|
2023-09-17 18:55:36 -06:00
|
|
|
# This needs to be started after Flask is initalized
|
|
|
|
stats_updater_thread = Thread(target=cache_stats)
|
|
|
|
stats_updater_thread.daemon = True
|
|
|
|
stats_updater_thread.start()
|
|
|
|
|
2023-08-21 22:49:44 -06:00
|
|
|
|
|
|
|
# print(app.url_map)
|
2023-08-21 21:28:52 -06:00
|
|
|
|
|
|
|
|
|
|
|
@app.route('/')
|
2023-08-23 23:11:12 -06:00
|
|
|
@app.route('/api')
|
2023-09-14 14:05:50 -06:00
|
|
|
@app.route('/api/openai')
|
2023-09-14 14:30:01 -06:00
|
|
|
@cache.cached(timeout=10)
|
2023-08-23 23:11:12 -06:00
|
|
|
def home():
|
2023-08-25 13:53:23 -06:00
|
|
|
stats = generate_stats()
|
2023-08-23 23:11:12 -06:00
|
|
|
|
|
|
|
if not bool(redis.get('backend_online')) or not stats['online']:
|
|
|
|
running_model = estimated_wait_sec = 'offline'
|
|
|
|
else:
|
|
|
|
running_model = opts.running_model
|
2023-08-25 13:53:23 -06:00
|
|
|
|
2023-08-27 22:24:44 -06:00
|
|
|
if stats['queue']['queued'] == 0 and stats['queue']['processing'] > 0:
|
2023-08-25 13:53:23 -06:00
|
|
|
# There will be a wait if the queue is empty but prompts are processing, but we don't
|
|
|
|
# know how long.
|
2023-08-25 15:17:01 -06:00
|
|
|
estimated_wait_sec = f"less than {stats['stats']['average_generation_elapsed_sec']} seconds"
|
2023-08-25 13:53:23 -06:00
|
|
|
else:
|
2023-08-27 22:24:44 -06:00
|
|
|
estimated_wait_sec = f"{stats['queue']['estimated_wait_sec']} seconds"
|
2023-08-23 23:11:12 -06:00
|
|
|
|
2023-08-23 23:27:33 -06:00
|
|
|
if len(config['analytics_tracking_code']):
|
|
|
|
analytics_tracking_code = f"<script>\n{config['analytics_tracking_code']}\n</script>"
|
|
|
|
else:
|
|
|
|
analytics_tracking_code = ''
|
|
|
|
|
2023-08-24 17:55:55 -06:00
|
|
|
if config['info_html']:
|
2023-09-13 20:40:55 -06:00
|
|
|
info_html = config['info_html']
|
2023-08-24 17:55:55 -06:00
|
|
|
else:
|
|
|
|
info_html = ''
|
|
|
|
|
2023-09-12 01:04:11 -06:00
|
|
|
mode_info = ''
|
|
|
|
if opts.mode == 'vllm':
|
|
|
|
mode_info = vllm_info
|
|
|
|
|
2023-09-17 18:55:36 -06:00
|
|
|
x = redis.get('base_client_api')
|
|
|
|
base_client_api = x.decode() if x else None
|
|
|
|
del x
|
|
|
|
|
2023-08-23 23:11:12 -06:00
|
|
|
return render_template('home.html',
|
2023-09-12 16:40:09 -06:00
|
|
|
llm_middleware_name=opts.llm_middleware_name,
|
2023-08-23 23:27:33 -06:00
|
|
|
analytics_tracking_code=analytics_tracking_code,
|
2023-08-24 17:55:55 -06:00
|
|
|
info_html=info_html,
|
2023-09-12 10:30:45 -06:00
|
|
|
current_model=opts.manual_model_name if opts.manual_model_name else running_model,
|
2023-09-11 20:47:19 -06:00
|
|
|
client_api=stats['endpoints']['blocking'],
|
|
|
|
ws_client_api=stats['endpoints']['streaming'],
|
2023-08-23 23:11:12 -06:00
|
|
|
estimated_wait=estimated_wait_sec,
|
2023-08-24 16:47:14 -06:00
|
|
|
mode_name=mode_ui_names[opts.mode][0],
|
|
|
|
api_input_textbox=mode_ui_names[opts.mode][1],
|
2023-08-29 17:56:12 -06:00
|
|
|
streaming_input_textbox=mode_ui_names[opts.mode][2],
|
2023-08-23 23:11:12 -06:00
|
|
|
context_size=opts.context_size,
|
2023-08-29 14:00:35 -06:00
|
|
|
stats_json=json.dumps(stats, indent=4, ensure_ascii=False),
|
2023-09-12 01:04:11 -06:00
|
|
|
extra_info=mode_info,
|
2023-09-17 18:55:36 -06:00
|
|
|
openai_client_api=f'https://{base_client_api}/openai/v1' if opts.enable_openi_compatible_backend else 'disabled',
|
2023-09-14 14:05:50 -06:00
|
|
|
expose_openai_system_prompt=opts.expose_openai_system_prompt,
|
|
|
|
enable_streaming=opts.enable_streaming,
|
2023-08-23 23:11:12 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-08-21 21:28:52 -06:00
|
|
|
@app.route('/<first>')
|
|
|
|
@app.route('/<first>/<path:rest>')
|
|
|
|
def fallback(first=None, rest=None):
|
|
|
|
return jsonify({
|
2023-08-30 18:53:26 -06:00
|
|
|
'code': 404,
|
2023-08-21 21:28:52 -06:00
|
|
|
'msg': 'not found'
|
|
|
|
}), 404
|
|
|
|
|
|
|
|
|
2023-09-12 01:04:11 -06:00
|
|
|
@app.errorhandler(500)
|
|
|
|
def server_error(e):
|
2023-09-12 10:30:45 -06:00
|
|
|
return handle_server_error(e)
|
2023-09-12 01:04:11 -06:00
|
|
|
|
|
|
|
|
2023-09-17 18:55:36 -06:00
|
|
|
@app.before_request
|
|
|
|
def before_app_request():
|
|
|
|
if not opts.http_host:
|
|
|
|
opts.http_host = request.headers.get("Host")
|
|
|
|
if not redis.get('base_client_api'):
|
|
|
|
redis.set('base_client_api', f'{request.headers.get("Host")}/{opts.frontend_api_client.strip("/")}')
|
|
|
|
|
|
|
|
|
2023-08-21 21:28:52 -06:00
|
|
|
if __name__ == "__main__":
|
2023-09-14 17:38:20 -06:00
|
|
|
app.run(host='0.0.0.0', threaded=False, processes=15)
|