38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
import requests
|
|
|
|
from llm_server import opts
|
|
|
|
|
|
def get_running_model(backend_url: str, mode: str):
|
|
if mode == 'ooba':
|
|
try:
|
|
backend_response = requests.get(f'{backend_url}/api/v1/model', timeout=opts.backend_request_timeout, verify=opts.verify_ssl)
|
|
r_json = backend_response.json()
|
|
return r_json['result'], None
|
|
except Exception as e:
|
|
return False, e
|
|
elif mode == 'vllm':
|
|
try:
|
|
backend_response = requests.get(f'{backend_url}/model', timeout=opts.backend_request_timeout, verify=opts.verify_ssl)
|
|
r_json = backend_response.json()
|
|
return r_json['model'], None
|
|
except Exception as e:
|
|
return False, e
|
|
else:
|
|
raise Exception
|
|
|
|
|
|
def get_info(backend_url: str, mode: str):
|
|
if mode == 'ooba':
|
|
return {}
|
|
# raise NotImplementedError
|
|
elif mode == 'vllm':
|
|
try:
|
|
r = requests.get(f'{backend_url}/info', verify=opts.verify_ssl, timeout=opts.backend_request_timeout)
|
|
j = r.json()
|
|
except Exception as e:
|
|
return {}
|
|
return j
|
|
else:
|
|
raise Exception
|