50 lines
1.3 KiB
Plaintext
50 lines
1.3 KiB
Plaintext
## Important stuff
|
|
|
|
# The base URL of your backend API
|
|
backend_url: http://x.x.x.x:5000
|
|
|
|
# Mode to run the proxy in
|
|
mode: oobabooga
|
|
|
|
# How many concurrent generation requests will be processed at the same time.
|
|
# Oobabooga only supports one. If you're using Oobabooga, you MUST set this to 1
|
|
# or else your estimated wait time will be incorrect.
|
|
concurrent_gens: 1
|
|
|
|
# The configured token limit of your backend.
|
|
# This number is shown to clients and on the home page. (may be important later)
|
|
token_limit: 7777
|
|
|
|
llm_middleware_name: Local LLM Proxy
|
|
|
|
## Optional
|
|
|
|
# Log request and response content.
|
|
log_prompts: false
|
|
|
|
# Python request has issues with self-signed certs.
|
|
verify_ssl: false
|
|
|
|
# Reject all requests if they aren't authenticated with a token.
|
|
auth_required: false
|
|
|
|
# JS tracking code to add to the home page.
|
|
#analytics_tracking_code: |
|
|
# alert("hello");
|
|
|
|
## STATS ##
|
|
|
|
# Display the total_proompts item on the stats screen.
|
|
show_num_prompts: true
|
|
|
|
# Display the uptime item on the stats screen.
|
|
show_uptime: true
|
|
|
|
# Load the number of prompts from the database to display on the stats page.
|
|
load_num_prompts: true
|
|
|
|
# Path that is shown to users for them to connect to
|
|
frontend_api_client: /api
|
|
|
|
# Relative paths are mapped to the directory of the server
|
|
database_path: ./proxy-server.db |