update current model when we generate_stats()

This commit is contained in:
Cyberes 2023-08-24 21:10:00 -06:00
parent ec3fe2c2ac
commit 01b8442b95
4 changed files with 6 additions and 3 deletions

View File

View File

@ -0,0 +1 @@
# TODO: add an about page

1
llm_server/pages/api.py Normal file
View File

@ -0,0 +1 @@
# TODO: explain the API, how to use it, the endpoints

View File

@ -13,11 +13,12 @@ from llm_server.routes.stats import SemaphoreCheckerThread, calculate_avg_gen_ti
# TODO: have routes/__init__.py point to the latest API version generate_stats() # TODO: have routes/__init__.py point to the latest API version generate_stats()
def generate_stats(): def generate_stats():
model_list, error = get_running_model() # will return False when the fetch fails model_name, error = get_running_model() # will return False when the fetch fails
if isinstance(model_list, bool): if isinstance(model_name, bool):
online = False online = False
else: else:
online = True online = True
opts.running_model = model_name
# t = elapsed_times.copy() # copy since we do multiple operations and don't want it to change # t = elapsed_times.copy() # copy since we do multiple operations and don't want it to change
# if len(t) == 0: # if len(t) == 0:
@ -59,7 +60,7 @@ def generate_stats():
'gatekeeper': 'none' if opts.auth_required is False else 'token', 'gatekeeper': 'none' if opts.auth_required is False else 'token',
'context_size': opts.context_size, 'context_size': opts.context_size,
'queue_size': opts.concurrent_gens, 'queue_size': opts.concurrent_gens,
'model': model_list, 'model': model_name,
'mode': opts.mode, 'mode': opts.mode,
}, },
'keys': { 'keys': {