This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/opts.py

46 lines
2.1 KiB
Python
Raw Normal View History

2023-08-23 23:11:12 -06:00
# Read-only global variables
2023-08-21 22:49:44 -06:00
# Uppercase variables are read-only globals.
# Lowercase variables are ones that are set on startup and are never changed.
2023-08-24 20:43:11 -06:00
# TODO: rewrite the config system so I don't have to add every single config default here
frontend_api_mode = 'ooba'
2023-08-30 18:53:26 -06:00
max_new_tokens = 500
2023-08-21 21:28:52 -06:00
auth_required = False
log_prompts = False
2023-08-22 16:50:49 -06:00
frontend_api_client = ''
2023-08-23 16:11:32 -06:00
verify_ssl = True
2023-08-23 22:08:10 -06:00
show_num_prompts = True
show_uptime = True
average_generation_time_mode = 'database'
2023-08-24 20:43:11 -06:00
show_total_output_tokens = True
2023-08-25 15:02:40 -06:00
netdata_root = None
2023-09-11 20:47:19 -06:00
simultaneous_requests_per_ip = 3
manual_model_name = None
2023-09-12 16:40:09 -06:00
llm_middleware_name = ''
enable_openi_compatible_backend = True
2023-09-13 20:25:56 -06:00
openai_system_prompt = """You are an assistant chatbot. Your main function is to provide accurate and helpful responses to the user's queries. You should always be polite, respectful, and patient. You should not provide any personal opinions or advice unless specifically asked by the user. You should not make any assumptions about the user's knowledge or abilities. You should always strive to provide clear and concise answers. If you do not understand a user's query, ask for clarification. If you cannot provide an answer, apologize and suggest the user seek help elsewhere.\nLines that start with "### ASSISTANT" were messages you sent previously.\nLines that start with "### USER" were messages sent by the user you are chatting with.\nYou will respond to the "### RESPONSE:" prompt as the assistant and follow the instructions given by the user.\n\n"""
expose_openai_system_prompt = True
enable_streaming = True
openai_api_key = None
backend_request_timeout = 30
backend_generate_request_timeout = 95
admin_token = None
openai_expose_our_model = False
2023-09-25 22:01:57 -06:00
openai_force_no_hashes = True
2023-09-25 23:39:50 -06:00
include_system_tokens_in_stats = True
2023-09-26 22:09:11 -06:00
openai_moderation_scan_last_n = 5
openai_org_name = 'OpenAI'
openai_silent_trim = False
openai_moderation_enabled = True
cluster = {}
show_backends = True
background_homepage_cacher = True
openai_moderation_timeout = 5
prioritize_by_size = False
cluster_workers = 0
redis_stream_timeout = 25000
LOGGING_FORMAT = "%(asctime)s: %(levelname)s:%(name)s - %(message)s"