42 lines
1.9 KiB
Python
42 lines
1.9 KiB
Python
# Read-only global variables
|
|
|
|
# TODO: rewrite the config system so I don't have to add every single config default here
|
|
|
|
frontend_api_mode = 'ooba'
|
|
max_new_tokens = 500
|
|
auth_required = False
|
|
log_prompts = False
|
|
frontend_api_client = ''
|
|
verify_ssl = True
|
|
show_num_prompts = True
|
|
show_uptime = True
|
|
average_generation_time_mode = 'database'
|
|
show_total_output_tokens = True
|
|
netdata_root = None
|
|
simultaneous_requests_per_ip = 3
|
|
show_backend_info = True
|
|
manual_model_name = None
|
|
llm_middleware_name = ''
|
|
enable_openi_compatible_backend = True
|
|
openai_system_prompt = """You are an assistant chatbot. Your main function is to provide accurate and helpful responses to the user's queries. You should always be polite, respectful, and patient. You should not provide any personal opinions or advice unless specifically asked by the user. You should not make any assumptions about the user's knowledge or abilities. You should always strive to provide clear and concise answers. If you do not understand a user's query, ask for clarification. If you cannot provide an answer, apologize and suggest the user seek help elsewhere.\nLines that start with "### ASSISTANT" were messages you sent previously.\nLines that start with "### USER" were messages sent by the user you are chatting with.\nYou will respond to the "### RESPONSE:" prompt as the assistant and follow the instructions given by the user.\n\n"""
|
|
expose_openai_system_prompt = True
|
|
enable_streaming = True
|
|
openai_api_key = None
|
|
backend_request_timeout = 30
|
|
backend_generate_request_timeout = 95
|
|
admin_token = None
|
|
openai_expose_our_model = False
|
|
openai_force_no_hashes = True
|
|
include_system_tokens_in_stats = True
|
|
openai_moderation_scan_last_n = 5
|
|
openai_org_name = 'OpenAI'
|
|
openai_silent_trim = False
|
|
openai_moderation_enabled = True
|
|
cluster = {}
|
|
show_backends = True
|
|
background_homepage_cacher = True
|
|
openai_moderation_timeout = 5
|
|
prioritize_by_size = False
|
|
cluster_workers = 0
|
|
redis_stream_timeout = 25000
|