2023-09-27 23:36:44 -06:00
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
|
|
|
|
import openai
|
|
|
|
|
2023-10-05 21:37:18 -06:00
|
|
|
import llm_server
|
2023-09-27 23:36:44 -06:00
|
|
|
from llm_server import opts
|
|
|
|
from llm_server.config.config import ConfigLoader, config_default_vars, config_required_vars
|
2023-09-28 18:40:24 -06:00
|
|
|
from llm_server.custom_redis import redis
|
2023-09-27 23:36:44 -06:00
|
|
|
from llm_server.database.conn import database
|
|
|
|
from llm_server.database.database import get_number_of_rows
|
2023-10-05 21:37:18 -06:00
|
|
|
from llm_server.routes.queue import PriorityQueue
|
2023-09-27 23:36:44 -06:00
|
|
|
|
|
|
|
|
2023-09-28 18:40:24 -06:00
|
|
|
def load_config(config_path):
|
2023-09-27 23:36:44 -06:00
|
|
|
config_loader = ConfigLoader(config_path, config_default_vars, config_required_vars)
|
|
|
|
success, config, msg = config_loader.load_config()
|
|
|
|
if not success:
|
|
|
|
return success, config, msg
|
|
|
|
|
|
|
|
# TODO: this is atrocious
|
|
|
|
opts.auth_required = config['auth_required']
|
|
|
|
opts.log_prompts = config['log_prompts']
|
|
|
|
opts.frontend_api_client = config['frontend_api_client']
|
|
|
|
opts.show_num_prompts = config['show_num_prompts']
|
|
|
|
opts.show_uptime = config['show_uptime']
|
2023-09-28 18:40:24 -06:00
|
|
|
opts.cluster = config['cluster']
|
2023-09-27 23:36:44 -06:00
|
|
|
opts.show_total_output_tokens = config['show_total_output_tokens']
|
|
|
|
opts.netdata_root = config['netdata_root']
|
|
|
|
opts.simultaneous_requests_per_ip = config['simultaneous_requests_per_ip']
|
|
|
|
opts.show_backend_info = config['show_backend_info']
|
|
|
|
opts.max_new_tokens = config['max_new_tokens']
|
|
|
|
opts.manual_model_name = config['manual_model_name']
|
|
|
|
opts.llm_middleware_name = config['llm_middleware_name']
|
|
|
|
opts.enable_openi_compatible_backend = config['enable_openi_compatible_backend']
|
|
|
|
opts.openai_system_prompt = config['openai_system_prompt']
|
|
|
|
opts.expose_openai_system_prompt = config['expose_openai_system_prompt']
|
|
|
|
opts.enable_streaming = config['enable_streaming']
|
|
|
|
opts.openai_api_key = config['openai_api_key']
|
|
|
|
openai.api_key = opts.openai_api_key
|
|
|
|
opts.admin_token = config['admin_token']
|
|
|
|
opts.openai_expose_our_model = config['openai_epose_our_model']
|
|
|
|
opts.openai_force_no_hashes = config['openai_force_no_hashes']
|
|
|
|
opts.include_system_tokens_in_stats = config['include_system_tokens_in_stats']
|
|
|
|
opts.openai_moderation_scan_last_n = config['openai_moderation_scan_last_n']
|
|
|
|
opts.openai_org_name = config['openai_org_name']
|
|
|
|
opts.openai_silent_trim = config['openai_silent_trim']
|
|
|
|
opts.openai_moderation_enabled = config['openai_moderation_enabled']
|
2023-09-29 00:09:44 -06:00
|
|
|
opts.show_backends = config['show_backends']
|
2023-09-30 23:03:42 -06:00
|
|
|
opts.background_homepage_cacher = config['background_homepage_cacher']
|
2023-10-03 13:40:08 -06:00
|
|
|
opts.openai_moderation_timeout = config['openai_moderation_timeout']
|
|
|
|
opts.frontend_api_mode = config['frontend_api_mode']
|
2023-10-04 10:19:44 -06:00
|
|
|
opts.prioritize_by_size = config['prioritize_by_size']
|
2023-09-27 23:36:44 -06:00
|
|
|
|
2023-10-05 20:14:28 -06:00
|
|
|
# Scale the number of workers.
|
|
|
|
for item in config['cluster']:
|
|
|
|
opts.cluster_workers += item['concurrent_gens']
|
|
|
|
|
2023-10-05 21:37:18 -06:00
|
|
|
llm_server.routes.queue.priority_queue = PriorityQueue([x['backend_url'] for x in config['cluster']])
|
|
|
|
|
2023-09-27 23:36:44 -06:00
|
|
|
if opts.openai_expose_our_model and not opts.openai_api_key:
|
|
|
|
print('If you set openai_epose_our_model to false, you must set your OpenAI key in openai_api_key.')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
opts.verify_ssl = config['verify_ssl']
|
|
|
|
if not opts.verify_ssl:
|
|
|
|
import urllib3
|
|
|
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
|
|
|
|
if config['http_host']:
|
|
|
|
http_host = re.sub(r'http(?:s)?://', '', config["http_host"])
|
|
|
|
redis.set('http_host', http_host)
|
|
|
|
redis.set('base_client_api', f'{http_host}/{opts.frontend_api_client.strip("/")}')
|
|
|
|
|
|
|
|
database.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database'])
|
|
|
|
|
|
|
|
if config['load_num_prompts']:
|
|
|
|
redis.set('proompts', get_number_of_rows('prompts'))
|
|
|
|
|
|
|
|
return success, config, msg
|
2023-09-28 18:40:24 -06:00
|
|
|
|
|
|
|
|
|
|
|
def parse_backends(config):
|
|
|
|
if not config.get('cluster'):
|
|
|
|
return False
|
|
|
|
cluster = config.get('cluster')
|
|
|
|
config = {}
|
|
|
|
for item in cluster:
|
|
|
|
backend_url = item['backend_url'].strip('/')
|
|
|
|
item['backend_url'] = backend_url
|
|
|
|
config[backend_url] = item
|
|
|
|
return config
|