2023-09-25 09:32:23 -06:00
|
|
|
import traceback
|
|
|
|
|
|
|
|
import requests
|
|
|
|
from flask import jsonify
|
2023-09-12 16:40:09 -06:00
|
|
|
|
2023-09-28 18:40:24 -06:00
|
|
|
from llm_server.custom_redis import ONE_MONTH_SECONDS, flask_cache, redis
|
2023-10-01 14:15:01 -06:00
|
|
|
from . import openai_bp
|
2023-09-12 16:40:09 -06:00
|
|
|
from ..stats import server_start_time
|
|
|
|
from ... import opts
|
2023-10-04 16:29:19 -06:00
|
|
|
from ...cluster.cluster_config import cluster_config, get_a_cluster_backend
|
2023-09-25 09:32:23 -06:00
|
|
|
from ...helpers import jsonify_pretty
|
2023-10-01 14:15:01 -06:00
|
|
|
from ...llm.openai.transform import generate_oai_string
|
2023-09-12 16:40:09 -06:00
|
|
|
|
|
|
|
|
|
|
|
@openai_bp.route('/models', methods=['GET'])
|
2023-09-26 22:09:11 -06:00
|
|
|
@flask_cache.cached(timeout=60, query_string=True)
|
2023-09-12 16:40:09 -06:00
|
|
|
def openai_list_models():
|
2023-10-01 14:15:01 -06:00
|
|
|
model_name = cluster_config.get_backend(get_a_cluster_backend()).get('model')
|
|
|
|
if not model_name:
|
2023-09-12 16:40:09 -06:00
|
|
|
response = jsonify({
|
|
|
|
'code': 502,
|
|
|
|
'msg': 'failed to reach backend',
|
|
|
|
}), 500 # return 500 so Cloudflare doesn't intercept us
|
|
|
|
else:
|
2023-09-29 00:09:44 -06:00
|
|
|
running_model = redis.get('running_model', 'ERROR', dtype=str)
|
2023-09-24 21:45:30 -06:00
|
|
|
oai = fetch_openai_models()
|
2023-10-01 23:07:49 -06:00
|
|
|
r = {
|
|
|
|
"object": "list",
|
|
|
|
"data": oai
|
|
|
|
}
|
|
|
|
# TODO: verify this works
|
2023-09-25 17:20:21 -06:00
|
|
|
if opts.openai_expose_our_model:
|
2023-10-01 23:07:49 -06:00
|
|
|
r["data"].insert(0, {
|
|
|
|
"id": running_model,
|
|
|
|
"object": "model",
|
|
|
|
"created": int(server_start_time.timestamp()),
|
|
|
|
"owned_by": opts.llm_middleware_name,
|
|
|
|
"permission": [
|
2023-09-25 09:32:23 -06:00
|
|
|
{
|
2023-09-26 13:32:33 -06:00
|
|
|
"id": running_model,
|
2023-10-01 23:07:49 -06:00
|
|
|
"object": "model_permission",
|
2023-09-25 09:32:23 -06:00
|
|
|
"created": int(server_start_time.timestamp()),
|
2023-10-01 23:07:49 -06:00
|
|
|
"allow_create_engine": False,
|
|
|
|
"allow_sampling": False,
|
|
|
|
"allow_logprobs": False,
|
|
|
|
"allow_search_indices": False,
|
|
|
|
"allow_view": True,
|
|
|
|
"allow_fine_tuning": False,
|
|
|
|
"organization": "*",
|
|
|
|
"group": None,
|
|
|
|
"is_blocking": False
|
2023-09-25 09:32:23 -06:00
|
|
|
}
|
2023-10-01 23:07:49 -06:00
|
|
|
],
|
|
|
|
"root": None,
|
|
|
|
"parent": None
|
|
|
|
})
|
|
|
|
response = jsonify_pretty(r), 200
|
2023-09-12 16:40:09 -06:00
|
|
|
return response
|
2023-09-24 21:45:30 -06:00
|
|
|
|
|
|
|
|
2023-09-26 22:09:11 -06:00
|
|
|
@flask_cache.memoize(timeout=ONE_MONTH_SECONDS)
|
2023-09-24 21:45:30 -06:00
|
|
|
def fetch_openai_models():
|
2023-09-25 09:32:23 -06:00
|
|
|
if opts.openai_api_key:
|
|
|
|
try:
|
|
|
|
response = requests.get('https://api.openai.com/v1/models', headers={'Authorization': f"Bearer {opts.openai_api_key}"}, timeout=10)
|
2023-10-01 14:15:01 -06:00
|
|
|
j = response.json()['data']
|
|
|
|
|
|
|
|
# The "modelperm" string appears to be user-specific, so we'll
|
|
|
|
# randomize it just to be safe.
|
|
|
|
for model in range(len(j)):
|
|
|
|
for p in range(len(j[model]['permission'])):
|
|
|
|
j[model]['permission'][p]['id'] = f'modelperm-{generate_oai_string(24)}'
|
|
|
|
return j
|
2023-09-25 09:32:23 -06:00
|
|
|
except:
|
|
|
|
traceback.print_exc()
|
|
|
|
return []
|
|
|
|
else:
|
|
|
|
return []
|