local-llm-server/config/config.yml.sample

61 lines
1.7 KiB
Plaintext
Raw Normal View History

2023-08-24 00:09:57 -06:00
## Important stuff
# The base URL of your backend API
backend_url: http://x.x.x.x:5000
# Mode to run the proxy in
mode: oobabooga
# How many concurrent generation requests will be processed at the same time.
2023-08-24 12:19:59 -06:00
# Oobabooga only supports one. If you're using Oobabooga, you MUST set this to 1
# or else your estimated wait time will be incorrect.
concurrent_gens: 1
2023-08-24 00:09:57 -06:00
# The configured token limit of your backend.
# This number is shown to clients and on the home page. (may be important later)
token_limit: 7777
# How many requests a single IP is allowed to put in the queue.
# If an IP tries to put more than this their request will be rejected
# until the other(s) are completed.
ip_in_queue_max: 1
2023-08-24 00:09:57 -06:00
llm_middleware_name: Local LLM Proxy
## Optional
# Log request and response content.
log_prompts: false
# Python request has issues with self-signed certs.
verify_ssl: false
# Reject all requests if they aren't authenticated with a token.
auth_required: false
2023-08-24 12:19:59 -06:00
# JS tracking code to add to the home page.
2023-08-24 00:09:57 -06:00
#analytics_tracking_code: |
# alert("hello");
# HTML to add under the "Estimated Wait Time" line.
#info_html: |
# <a href="https://chub-archive.evulid.cc/#/proxy-stats.html?proxy=proxy_chub_archive_evulid">Historical Stats</a>
2023-08-24 00:09:57 -06:00
## STATS ##
# Display the total_proompts item on the stats screen.
show_num_prompts: true
# Display the uptime item on the stats screen.
show_uptime: true
# Load the number of prompts from the database to display on the stats page.
load_num_prompts: true
# Path that is shown to users for them to connect to
frontend_api_client: /api
# Relative paths are mapped to the directory of the server
database_path: ./proxy-server.db
average_generation_time_mode: database