2023-08-22 19:58:31 -06:00
|
|
|
import requests
|
|
|
|
|
|
|
|
from llm_server import opts
|
2023-09-12 01:04:11 -06:00
|
|
|
|
2023-08-22 19:58:31 -06:00
|
|
|
|
|
|
|
def get_running_model():
|
2023-09-11 20:47:19 -06:00
|
|
|
# TODO: cache the results for 1 min so we don't have to keep calling the backend
|
2023-09-12 01:04:11 -06:00
|
|
|
# TODO: only use one try/catch
|
2023-09-11 20:47:19 -06:00
|
|
|
|
2023-08-22 19:58:31 -06:00
|
|
|
if opts.mode == 'oobabooga':
|
|
|
|
try:
|
2023-09-14 14:05:50 -06:00
|
|
|
backend_response = requests.get(f'{opts.backend_url}/api/v1/model', timeout=opts.backend_request_timeout, verify=opts.verify_ssl)
|
2023-09-11 09:51:01 -06:00
|
|
|
r_json = backend_response.json()
|
|
|
|
return r_json['result'], None
|
2023-08-22 19:58:31 -06:00
|
|
|
except Exception as e:
|
2023-08-23 16:02:57 -06:00
|
|
|
return False, e
|
2023-09-11 20:47:19 -06:00
|
|
|
elif opts.mode == 'vllm':
|
|
|
|
try:
|
2023-09-14 14:05:50 -06:00
|
|
|
backend_response = requests.get(f'{opts.backend_url}/model', timeout=opts.backend_request_timeout, verify=opts.verify_ssl)
|
2023-09-11 20:47:19 -06:00
|
|
|
r_json = backend_response.json()
|
2023-09-12 01:04:11 -06:00
|
|
|
return r_json['model'], None
|
2023-09-11 20:47:19 -06:00
|
|
|
except Exception as e:
|
|
|
|
return False, e
|
2023-08-22 19:58:31 -06:00
|
|
|
else:
|
|
|
|
raise Exception
|