oai-reverse-proxy/.env.example

197 lines
9.5 KiB
Plaintext
Raw Normal View History

# To customize your server, make a copy of this file to `.env` and edit any
# values you want to change. Be sure to remove the `#` at the beginning of each
# line you want to modify.
2023-04-08 05:02:59 -06:00
# All values have reasonable defaults, so you only need to change the ones you
# want to override.
# Use production mode unless you are developing locally.
NODE_ENV=production
# Detail level of diagnostic logging. (trace | debug | info | warn | error)
# LOG_LEVEL=info
# ------------------------------------------------------------------------------
# General settings:
# The title displayed on the info page.
# SERVER_TITLE=Coom Tunnel
# The route name used to proxy requests to APIs, relative to the Web site root.
# PROXY_ENDPOINT_ROUTE=/proxy
# Text model requests allowed per minute per user.
# TEXT_MODEL_RATE_LIMIT=4
# Image model requests allowed per minute per user.
# IMAGE_MODEL_RATE_LIMIT=2
# Max number of context tokens a user can request at once.
# Increase this if your proxy allow GPT 32k or 128k context
# MAX_CONTEXT_TOKENS_OPENAI=32768
# MAX_CONTEXT_TOKENS_ANTHROPIC=32768
# Max number of output tokens a user can request at once.
# MAX_OUTPUT_TOKENS_OPENAI=1024
# MAX_OUTPUT_TOKENS_ANTHROPIC=1024
# Whether to show the estimated cost of consumed tokens on the info page.
# SHOW_TOKEN_COSTS=false
# Whether to automatically check API keys for validity.
# Disabled by default in local development mode, but enabled in production.
# CHECK_KEYS=true
# Which model types users are allowed to access.
# The following model families are recognized:
# turbo | gpt4 | gpt4-32k | gpt4-turbo | gpt4o | o1 | dall-e | claude
# | claude-opus | gemini-flash | gemini-pro | gemini-ultra | mistral-tiny |
# | mistral-small | mistral-medium | mistral-large | aws-claude |
# | aws-claude-opus | gcp-claude | gcp-claude-opus | azure-turbo | azure-gpt4
# | azure-gpt4-32k | azure-gpt4-turbo | azure-gpt4o | azure-o1 | azure-dall-e
# By default, all models are allowed except for dall-e and o1.
2024-03-09 12:03:50 -07:00
# To allow DALL-E image generation, uncomment the line below and add 'dall-e' or
# 'azure-dall-e' to the list of allowed model families.
# ALLOWED_MODEL_FAMILIES=turbo,gpt4,gpt4-32k,gpt4-turbo,gpt4o,claude,claude-opus,gemini-flash,gemini-pro,gemini-ultra,mistral-tiny,mistral-small,mistral-medium,mistral-large,aws-claude,aws-claude-opus,gcp-claude,gcp-claude-opus,azure-turbo,azure-gpt4,azure-gpt4-32k,azure-gpt4-turbo,azure-gpt4o
# Which services can be used to process prompts containing images via multimodal
# models. The following services are recognized:
# openai | anthropic | aws | gcp | azure | google-ai | mistral-ai
# Do not enable this feature unless all users are trusted, as you will be liable
# for any user-submitted images containing illegal content.
# By default, no image services are allowed and image prompts are rejected.
# ALLOWED_VISION_SERVICES=
# Whether prompts should be logged to Google Sheets.
# Requires additional setup. See `docs/google-sheets.md` for more information.
# PROMPT_LOGGING=false
# Specifies the number of proxies or load balancers in front of the server.
# For Cloudflare or Hugging Face deployments, the default of 1 is correct.
# For any other deployments, please see config.ts as the correct configuration
# depends on your setup. Misconfiguring this value can result in problems
# accurately tracking IP addresses and enforcing rate limits.
# TRUSTED_PROXIES=1
# Whether cookies should be set without the Secure flag, for hosts that don't
# support SSL. True by default in development, false in production.
# USE_INSECURE_COOKIES=false
# Reorganizes requests in the queue according to their token count, placing
# larger prompts further back. The penalty is determined by (promptTokens *
# TOKENS_PUNISHMENT_FACTOR). A value of 1.0 adds one second per 1000 tokens.
# When there is no queue or it is very short, the effect is negligible (this
# setting only reorders the queue, it does not artificially delay requests).
# TOKENS_PUNISHMENT_FACTOR=0.0
# Captcha verification settings. Refer to docs/pow-captcha.md for guidance.
# CAPTCHA_MODE=none
# POW_TOKEN_HOURS=24
# POW_TOKEN_MAX_IPS=2
# POW_DIFFICULTY_LEVEL=low
# POW_CHALLENGE_TIMEOUT=30
# -------------------------------------------------------------------------------
# Blocking settings:
# Allows blocking requests depending on content, referers, or IP addresses.
# This is a convenience feature; if you need more robust functionality it is
# highly recommended to put this application behind nginx or Cloudflare, as they
# will have better performance.
# IP addresses or CIDR blocks from which requests will be blocked.
# IP_BLACKLIST=10.0.0.1/24
# URLs from which requests will be blocked.
# BLOCKED_ORIGINS=reddit.com,9gag.com
# Message to show when requests are blocked.
# BLOCK_MESSAGE="You must be over the age of majority in your country to use this service."
# Destination to redirect blocked requests to.
# BLOCK_REDIRECT="https://roblox.com/"
# Comma-separated list of phrases that will be rejected. Surround phrases with
# quotes if they contain commas. You can use regular expression tokens.
# Avoid overly broad phrases as will trigger on any match in the entire prompt.
# REJECT_PHRASES="phrase one,phrase two,"phrase three, which has a comma",phrase four"
# Message to show when requests are rejected.
# REJECT_MESSAGE="You can't say that here."
# ------------------------------------------------------------------------------
# Optional settings for user management, access control, and quota enforcement:
# See `docs/user-management.md` for more information and setup instructions.
# See `docs/user-quotas.md` to learn how to set up quotas.
2023-10-02 14:39:30 -06:00
# Which access control method to use. (none | proxy_key | user_token)
# GATEKEEPER=none
# Which persistence method to use. (memory | firebase_rtdb)
# GATEKEEPER_STORE=memory
# Maximum number of unique IPs a user can connect from. (0 for unlimited)
# MAX_IPS_PER_USER=0
# Whether user_tokens should be automatically disabled when reaching the IP limit.
# MAX_IPS_AUTO_BAN=true
# With user_token gatekeeper, whether to allow users to change their nickname.
# ALLOW_NICKNAME_CHANGES=true
# Default token quotas for each model family. (0 for unlimited)
# Specify as TOKEN_QUOTA_MODEL_FAMILY=value (replacing dashes with underscores).
# eg. TOKEN_QUOTA_TURBO=0, TOKEN_QUOTA_GPT4=1000000, TOKEN_QUOTA_GPT4_32K=100000
# "Tokens" for image-generation models are counted at a rate of 100000 tokens
# per US$1.00 generated, which is similar to the cost of GPT-4 Turbo.
# DALL-E 3 costs around US$0.10 per image (10000 tokens).
# See `docs/dall-e-configuration.md` for more information.
# TOKEN_QUOTA_DALL_E=0
# How often to refresh token quotas. (hourly | daily)
# Leave unset to never automatically refresh quotas.
# QUOTA_REFRESH_PERIOD=daily
# -------------------------------------------------------------------------------
# HTTP agent settings:
# If you need to change how the proxy makes requests to other servers, such
# as when checking keys or forwarding users' requests to external services,
# you can configure an alternative HTTP agent. Otherwise the default OS settings
# will be used.
# The name of the network interface to use. The first external IPv4 address
# belonging to this interface will be used for outgoing requests.
# HTTP_AGENT_INTERFACE=enp0s3
# The URL of a proxy server to use. Supports SOCKS4, SOCKS5, HTTP, and HTTPS.
# Note that if your proxy server issues a self-signed certificate, you may need
# NODE_EXTRA_CA_CERTS set to the path to your certificate. You will need to set
# that variable in your environment, not in this file.
# HTTP_AGENT_PROXY_URL=http://test:test@127.0.0.1:8000
2024-01-08 15:41:30 -07:00
# ------------------------------------------------------------------------------
# Secrets and keys:
# For Huggingface, set them via the Secrets section in your Space's config UI. Dp not set them in .env.
# For Render, create a "secret file" called .env using the Environment tab.
# You can add multiple API keys by separating them with a comma.
# For AWS credentials, separate the access key ID, secret key, and region with a colon.
# For GCP credentials, separate the project ID, client email, region, and private key with a colon.
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
ANTHROPIC_KEY=sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
GOOGLE_AI_KEY=AIzaxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
2023-10-07 19:58:04 -06:00
# See `docs/aws-configuration.md` for more information, there may be additional steps required to set up AWS.
AWS_CREDENTIALS=myaccesskeyid:mysecretkey:us-east-1,anotheraccesskeyid:anothersecretkey:us-west-2
# See `docs/azure-configuration.md` for more information, there may be additional steps required to set up Azure.
AZURE_CREDENTIALS=azure-resource-name:deployment-id:api-key,another-azure-resource-name:another-deployment-id:another-api-key
GCP_CREDENTIALS=project-id:client-email:region:private-key
# With proxy_key gatekeeper, the password users must provide to access the API.
# PROXY_KEY=your-secret-key
# With user_token gatekeeper, the admin password used to manage users.
# ADMIN_KEY=your-very-secret-key
# Restrict access to the admin interface to specific IP addresses, specified
# as a comma-separated list of CIDR ranges.
# ADMIN_WHITELIST=0.0.0.0/0
# With firebase_rtdb gatekeeper storage, the Firebase project credentials.
# FIREBASE_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# FIREBASE_RTDB_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.firebaseio.com
# With prompt logging, the Google Sheets credentials.
# GOOGLE_SHEETS_SPREADSHEET_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# GOOGLE_SHEETS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx