oai-reverse-proxy/.env.example

147 lines
6.6 KiB
Plaintext

# To customize your server, make a copy of this file to `.env` and edit any
# values you want to change. Be sure to remove the `#` at the beginning of each
# line you want to modify.
# All values have reasonable defaults, so you only need to change the ones you
# want to override.
# Use production mode unless you are developing locally.
NODE_ENV=production
# ------------------------------------------------------------------------------
# General settings:
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
# IMAGE_MODEL_RATE_LIMIT=2
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=16384
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=400
# MAX_OUTPUT_TOKENS_ANTHROPIC=400
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Note: CHECK_KEYS is disabled by default in local development mode, but enabled
# by default in production mode.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | dall-e | claude | claude-opus | gemini-pro | mistral-tiny | mistral-small | mistral-medium | mistral-large | aws-claude | azure-turbo | azure-gpt4 | azure-gpt4-32k | azure-gpt4-turbo | azure-dall-e
# By default, all models are allowed except for 'dall-e' / 'azure-dall-e'.
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,claude,claude-opus,gemini-pro,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Comma-separated list of phrases that will be rejected. Only whole words are matched.
# Surround phrases with quotes if they contain commas.
# Avoid short or common phrases as this tests the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="This content violates /aicg/'s acceptable use policy."
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# The port and network interface to listen on.
# PORT=7860
# BIND_ADDRESS=0.0.0.0
# Whether cookies should be set without the Secure flag, for hosts that don't support SSL.
# USE_INSECURE_COOKIES=false
# Detail level of logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
# See `docs/user-quotas.md` to learn how to set up quotas.
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# GATEKEEPER_STORE=memory
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
# Whether user_tokens should be automatically disabled when reaching the IP limit.
# MAX_IPS_AUTO_BAN=true
# With user_token gatekeeper, whether to allow users to change their nickname.
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# DALL-E "tokens" are counted at a rate of 100000 tokens per US$1.00 generated,
# which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_TURBO=0
# TOKEN_QUOTA_GPT4=0
# TOKEN_QUOTA_GPT4_32K=0
# TOKEN_QUOTA_GPT4_TURBO=0
# TOKEN_QUOTA_DALL_E=0
# TOKEN_QUOTA_CLAUDE=0
# TOKEN_QUOTA_GEMINI_PRO=0
# TOKEN_QUOTA_AWS_CLAUDE=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# ------------------------------------------------------------------------------
# Secrets and keys:
# For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
# With prompt logging, the Google Sheets credentials.
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx