improves config handling
This commit is contained in:
parent
11308b2baa
commit
c51a0ef94d
11
.env.example
11
.env.example
|
@ -1,9 +1,5 @@
|
|||
# Copy this file to .env and fill in the values.
|
||||
|
||||
# Uncomment the following line and replace the value with your own secret key
|
||||
# to control access to the proxy server
|
||||
# PROXY_KEY=your-secret-key
|
||||
|
||||
# Set your OpenAI API key below.
|
||||
OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
|
@ -18,3 +14,10 @@ OPENAI_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|||
# Encoded in base-64, this would look like:
|
||||
# OPENAI_KEYS=WwogeyAia2V5IjogInlvdXItb3BlbmFpLWtleS0xIiwgImlzVHJpYWwiOiB0cnVlLCAiaXNHcHQ0IjogZmFsc2UgfSwKIHsgImtleSI6ICJ5b3VyLW9wZW5haS1rZXktMiIsICJpc1RyaWFsIjogZmFsc2UsICJpc0dwdDQiOiBmYWxzZSB9LAogeyAia2V5IjogInlvdXItb3BlbmFpLWtleS0zIiwgImlzVHJpYWwiOiBmYWxzZSwgImlzR3B0NCI6IHRydWUgfQpd
|
||||
|
||||
# Optional settings (please see config.ts for more details)
|
||||
# PORT=7860
|
||||
# PROXY_KEY=your-secret-key
|
||||
# MODEL_RATE_LIMIT=2
|
||||
# MAX_OUTPUT_TOKENS=256
|
||||
# LOG_LEVEL=info
|
||||
# LOG_PROMPTS=false
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
import dotenv from "dotenv";
|
||||
dotenv.config();
|
||||
|
||||
type Config = {
|
||||
/** The port the proxy server will listen on. */
|
||||
port: number;
|
||||
/** OpenAI API key, either a single key or a base64-encoded JSON array of key configs. */
|
||||
openaiKey?: string;
|
||||
/** Proxy key. If set, requests must provide this key in the Authorization header to use the proxy. */
|
||||
proxyKey?: string;
|
||||
/** Per-IP limit for requests per minute to OpenAI's completions endpoint. */
|
||||
modelRateLimit: number; // TODO
|
||||
/** Max number of tokens to generate. Requests which specify a higher value will be rewritten to use this value. */
|
||||
maxOutputTokens: number; // TODO
|
||||
/** Logging threshold. */
|
||||
logLevel?: "debug" | "info" | "warn" | "error";
|
||||
/** Whether prompts and responses should be logged. */
|
||||
logPrompts?: boolean; // TODO
|
||||
};
|
||||
|
||||
export const config: Config = {
|
||||
port: getEnvWithDefault("PORT", 7860),
|
||||
openaiKey: getEnvWithDefault("OPENAI_KEY", ""),
|
||||
proxyKey: getEnvWithDefault("PROXY_KEY", ""),
|
||||
modelRateLimit: getEnvWithDefault("MODEL_RATE_LIMIT", 2),
|
||||
maxOutputTokens: getEnvWithDefault("MAX_OUTPUT_TOKENS", 256),
|
||||
logLevel: getEnvWithDefault("LOG_LEVEL", "info"),
|
||||
logPrompts: getEnvWithDefault("LOG_PROMPTS", false),
|
||||
} as const;
|
||||
|
||||
export const SENSITIVE_KEYS: (keyof Config)[] = ["proxyKey", "openaiKey"];
|
||||
const getKeys = Object.keys as <T extends object>(obj: T) => Array<keyof T>;
|
||||
export function listConfig(): Record<string, string> {
|
||||
const result: Record<string, string> = {};
|
||||
for (const key of getKeys(config)) {
|
||||
const value = config[key]?.toString() || "";
|
||||
if (value && SENSITIVE_KEYS.includes(key)) {
|
||||
result[key] = "********";
|
||||
} else {
|
||||
result[key] = value;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
function getEnvWithDefault<T>(name: string, defaultValue: T): T {
|
||||
const value = process.env[name];
|
||||
if (value === undefined) {
|
||||
return defaultValue;
|
||||
}
|
||||
try {
|
||||
if (name === "OPENAI_KEY") {
|
||||
return value as unknown as T;
|
||||
}
|
||||
return JSON.parse(value) as T;
|
||||
} catch (err) {
|
||||
return value as unknown as T;
|
||||
}
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
import { Request, Response } from "express";
|
||||
import showdown from "showdown";
|
||||
import { listConfig } from "./config";
|
||||
import { keys } from "./keys";
|
||||
|
||||
export const handleInfoPage = (req: Request, res: Response) => {
|
||||
|
@ -13,23 +14,23 @@ export const handleInfoPage = (req: Request, res: Response) => {
|
|||
function getInfoPageHtml(host: string) {
|
||||
const keylist = keys.list();
|
||||
const info = {
|
||||
message: "OpenAI Reverse Proxy",
|
||||
uptime: process.uptime(),
|
||||
timestamp: Date.now(),
|
||||
baseUrl: host,
|
||||
kobold: host + "/proxy/kobold" + " (not yet implemented)",
|
||||
openai: host + "/proxy/openai",
|
||||
proompts: keylist.reduce((acc, k) => acc + k.promptCount, 0),
|
||||
keys: {
|
||||
all: keylist.length,
|
||||
active: keylist.filter((k) => !k.isDisabled).length,
|
||||
trial: keylist.filter((k) => k.isTrial).length,
|
||||
gpt4: keylist.filter((k) => k.isGpt4).length,
|
||||
proompts: keylist.reduce((acc, k) => acc + k.promptCount, 0),
|
||||
},
|
||||
config: listConfig(),
|
||||
};
|
||||
|
||||
const readme = require("fs").readFileSync("README.md", "utf8");
|
||||
const readmeBody = readme.split("---")[2];
|
||||
const readmeBody = readme.split("---")[2] || readme;
|
||||
const converter = new showdown.Converter();
|
||||
const html = converter.makeHtml(readmeBody);
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
/* Manages OpenAI API keys. Tracks usage, disables expired keys, and provides
|
||||
round-robin access to keys. Keys are stored in the OPENAI_KEY environment
|
||||
variable, either as a single key, or a base64-encoded JSON array of keys.*/
|
||||
import { logger } from "./logger";
|
||||
import crypto from "crypto";
|
||||
import { config } from "./config";
|
||||
import { logger } from "./logger";
|
||||
|
||||
/** Represents a key stored in the OPENAI_KEY environment variable. */
|
||||
type KeySchema = {
|
||||
|
@ -37,7 +38,7 @@ export type Key = KeySchema & {
|
|||
const keyPool: Key[] = [];
|
||||
|
||||
function init() {
|
||||
const keyString = process.env.OPENAI_KEY;
|
||||
const keyString = config.openaiKey;
|
||||
if (!keyString?.trim()) {
|
||||
throw new Error("OPENAI_KEY environment variable is not set");
|
||||
}
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
import pino from "pino";
|
||||
import { config } from "./config";
|
||||
|
||||
export const logger = pino();
|
||||
export const logger = pino({
|
||||
level: config.logLevel,
|
||||
});
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import type { Request, Response, NextFunction } from "express";
|
||||
import { config } from "../config";
|
||||
|
||||
const PROXY_KEY = process.env.PROXY_KEY;
|
||||
const PROXY_KEY = config.proxyKey;
|
||||
|
||||
export const auth = (req: Request, res: Response, next: NextFunction) => {
|
||||
if (!PROXY_KEY) {
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import dotenv from "dotenv";
|
||||
dotenv.config();
|
||||
import { config } from "./config";
|
||||
import express from "express";
|
||||
import cors from "cors";
|
||||
import pinoHttp from "pino-http";
|
||||
|
@ -8,7 +7,7 @@ import { keys } from "./keys";
|
|||
import { proxyRouter } from "./proxy/routes";
|
||||
import { handleInfoPage } from "./info-page";
|
||||
|
||||
const PORT = process.env.PORT || 7860;
|
||||
const PORT = config.port;
|
||||
|
||||
const app = express();
|
||||
// middleware
|
||||
|
|
Loading…
Reference in New Issue