29 lines
785 B
Python
29 lines
785 B
Python
import requests
|
|
|
|
from llm_server import opts
|
|
|
|
|
|
def get_running_model():
|
|
if opts.mode == 'oobabooga':
|
|
try:
|
|
backend_response = requests.get(f'{opts.backend_url}/api/v1/model')
|
|
except Exception as e:
|
|
return False
|
|
try:
|
|
r_json = backend_response.json()
|
|
return r_json['result']
|
|
except Exception as e:
|
|
return False
|
|
elif opts.mode == 'hf-textgen':
|
|
try:
|
|
backend_response = requests.get(f'{opts.backend_url}/info')
|
|
except Exception as e:
|
|
return False
|
|
try:
|
|
r_json = backend_response.json()
|
|
return r_json['model_id'].replace('/', '_')
|
|
except Exception as e:
|
|
return False
|
|
else:
|
|
raise Exception
|