update current model when we generate_stats()
This commit is contained in:
parent
ec3fe2c2ac
commit
01b8442b95
|
@ -0,0 +1 @@
|
||||||
|
# TODO: add an about page
|
|
@ -0,0 +1 @@
|
||||||
|
# TODO: explain the API, how to use it, the endpoints
|
|
@ -13,11 +13,12 @@ from llm_server.routes.stats import SemaphoreCheckerThread, calculate_avg_gen_ti
|
||||||
# TODO: have routes/__init__.py point to the latest API version generate_stats()
|
# TODO: have routes/__init__.py point to the latest API version generate_stats()
|
||||||
|
|
||||||
def generate_stats():
|
def generate_stats():
|
||||||
model_list, error = get_running_model() # will return False when the fetch fails
|
model_name, error = get_running_model() # will return False when the fetch fails
|
||||||
if isinstance(model_list, bool):
|
if isinstance(model_name, bool):
|
||||||
online = False
|
online = False
|
||||||
else:
|
else:
|
||||||
online = True
|
online = True
|
||||||
|
opts.running_model = model_name
|
||||||
|
|
||||||
# t = elapsed_times.copy() # copy since we do multiple operations and don't want it to change
|
# t = elapsed_times.copy() # copy since we do multiple operations and don't want it to change
|
||||||
# if len(t) == 0:
|
# if len(t) == 0:
|
||||||
|
@ -59,7 +60,7 @@ def generate_stats():
|
||||||
'gatekeeper': 'none' if opts.auth_required is False else 'token',
|
'gatekeeper': 'none' if opts.auth_required is False else 'token',
|
||||||
'context_size': opts.context_size,
|
'context_size': opts.context_size,
|
||||||
'queue_size': opts.concurrent_gens,
|
'queue_size': opts.concurrent_gens,
|
||||||
'model': model_list,
|
'model': model_name,
|
||||||
'mode': opts.mode,
|
'mode': opts.mode,
|
||||||
},
|
},
|
||||||
'keys': {
|
'keys': {
|
||||||
|
|
Reference in New Issue