87 lines
3.7 KiB
Python
87 lines
3.7 KiB
Python
import re
|
|
import sys
|
|
|
|
import openai
|
|
|
|
from llm_server import opts
|
|
from llm_server.config.config import ConfigLoader, config_default_vars, config_required_vars
|
|
from llm_server.database.conn import database
|
|
from llm_server.database.database import get_number_of_rows
|
|
from llm_server.helpers import resolve_path
|
|
from llm_server.routes.cache import redis
|
|
|
|
|
|
def load_config(config_path, script_path):
|
|
config_loader = ConfigLoader(config_path, config_default_vars, config_required_vars)
|
|
success, config, msg = config_loader.load_config()
|
|
if not success:
|
|
return success, config, msg
|
|
|
|
# Resolve relative directory to the directory of the script
|
|
if config['database_path'].startswith('./'):
|
|
config['database_path'] = resolve_path(script_path, config['database_path'].strip('./'))
|
|
|
|
if config['mode'] not in ['oobabooga', 'vllm']:
|
|
print('Unknown mode:', config['mode'])
|
|
sys.exit(1)
|
|
|
|
# TODO: this is atrocious
|
|
opts.mode = config['mode']
|
|
opts.auth_required = config['auth_required']
|
|
opts.log_prompts = config['log_prompts']
|
|
opts.concurrent_gens = config['concurrent_gens']
|
|
opts.frontend_api_client = config['frontend_api_client']
|
|
opts.context_size = config['token_limit']
|
|
opts.show_num_prompts = config['show_num_prompts']
|
|
opts.show_uptime = config['show_uptime']
|
|
opts.backend_url = config['backend_url'].strip('/')
|
|
opts.show_total_output_tokens = config['show_total_output_tokens']
|
|
opts.netdata_root = config['netdata_root']
|
|
opts.simultaneous_requests_per_ip = config['simultaneous_requests_per_ip']
|
|
opts.show_backend_info = config['show_backend_info']
|
|
opts.max_new_tokens = config['max_new_tokens']
|
|
opts.manual_model_name = config['manual_model_name']
|
|
opts.llm_middleware_name = config['llm_middleware_name']
|
|
opts.enable_openi_compatible_backend = config['enable_openi_compatible_backend']
|
|
opts.openai_system_prompt = config['openai_system_prompt']
|
|
opts.expose_openai_system_prompt = config['expose_openai_system_prompt']
|
|
opts.enable_streaming = config['enable_streaming']
|
|
opts.openai_api_key = config['openai_api_key']
|
|
openai.api_key = opts.openai_api_key
|
|
opts.admin_token = config['admin_token']
|
|
opts.openai_expose_our_model = config['openai_epose_our_model']
|
|
opts.openai_force_no_hashes = config['openai_force_no_hashes']
|
|
opts.include_system_tokens_in_stats = config['include_system_tokens_in_stats']
|
|
opts.openai_moderation_scan_last_n = config['openai_moderation_scan_last_n']
|
|
opts.openai_moderation_workers = config['openai_moderation_workers']
|
|
opts.openai_org_name = config['openai_org_name']
|
|
opts.openai_silent_trim = config['openai_silent_trim']
|
|
opts.openai_moderation_enabled = config['openai_moderation_enabled']
|
|
|
|
if opts.openai_expose_our_model and not opts.openai_api_key:
|
|
print('If you set openai_epose_our_model to false, you must set your OpenAI key in openai_api_key.')
|
|
sys.exit(1)
|
|
|
|
opts.verify_ssl = config['verify_ssl']
|
|
if not opts.verify_ssl:
|
|
import urllib3
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
if config['http_host']:
|
|
http_host = re.sub(r'http(?:s)?://', '', config["http_host"])
|
|
redis.set('http_host', http_host)
|
|
redis.set('base_client_api', f'{http_host}/{opts.frontend_api_client.strip("/")}')
|
|
|
|
database.init_db(config['mysql']['host'], config['mysql']['username'], config['mysql']['password'], config['mysql']['database'])
|
|
|
|
if config['load_num_prompts']:
|
|
redis.set('proompts', get_number_of_rows('prompts'))
|
|
|
|
redis.set_dict('recent_prompters', {})
|
|
redis.set_dict('processing_ips', {})
|
|
redis.set_dict('queued_ip_count', {})
|
|
redis.set('backend_mode', opts.mode)
|
|
|
|
return success, config, msg
|