38 lines
1.3 KiB
Python
38 lines
1.3 KiB
Python
import requests
|
|
|
|
from llm_server.config.global_config import GlobalConfig
|
|
|
|
|
|
def get_running_model(backend_url: str, mode: str):
|
|
if mode == 'ooba':
|
|
try:
|
|
backend_response = requests.get(f'{backend_url}/api/v1/model', timeout=GlobalConfig.get().backend_request_timeout, verify=GlobalConfig.get().verify_ssl)
|
|
r_json = backend_response.json()
|
|
return r_json['result'], None
|
|
except Exception as e:
|
|
return False, e
|
|
elif mode == 'vllm':
|
|
try:
|
|
backend_response = requests.get(f'{backend_url}/model', timeout=GlobalConfig.get().backend_request_timeout, verify=GlobalConfig.get().verify_ssl)
|
|
r_json = backend_response.json()
|
|
return r_json['model'], None
|
|
except Exception as e:
|
|
return False, e
|
|
else:
|
|
raise Exception
|
|
|
|
|
|
def get_info(backend_url: str, mode: str):
|
|
if mode == 'ooba':
|
|
return {}
|
|
# raise NotImplementedError
|
|
elif mode == 'vllm':
|
|
try:
|
|
r = requests.get(f'{backend_url}/info', verify=GlobalConfig.get().verify_ssl, timeout=GlobalConfig.get().backend_request_timeout)
|
|
j = r.json()
|
|
except Exception as e:
|
|
return {}
|
|
return j
|
|
else:
|
|
raise Exception
|