From 1a7f22ec55459ec449fa0cb38929f8c98e8878ed Mon Sep 17 00:00:00 2001 From: Cyberes Date: Tue, 3 Oct 2023 20:47:37 -0600 Subject: [PATCH] adjust again --- llm_server/routes/v1/generate_stats.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/llm_server/routes/v1/generate_stats.py b/llm_server/routes/v1/generate_stats.py index c31f0f6..fccd3d1 100644 --- a/llm_server/routes/v1/generate_stats.py +++ b/llm_server/routes/v1/generate_stats.py @@ -53,7 +53,7 @@ def generate_stats(regen: bool = False): 'openaiKeys': '∞', 'anthropicKeys': '∞', }, - 'backend_info': redis.get_dict('backend_info') if opts.show_backend_info else None, + 'backends': {}, } # TODO: have get_model_choices() return all the info so we don't have to loop over the backends ourself @@ -64,7 +64,7 @@ def generate_stats(regen: bool = False): if not backend_info['online']: continue backend_uptime = int((datetime.now() - datetime.fromtimestamp(backend_info['startup_time'])).total_seconds()) if opts.show_uptime else None - output['backend_info'][backend_info['hash']] = { + output['backends'][backend_info['hash']] = { 'uptime': backend_uptime, 'max_tokens': backend_info['model_config']['max_position_embeddings'], 'model': backend_info['model'], @@ -72,8 +72,6 @@ def generate_stats(regen: bool = False): 'nvidia': backend_info['nvidia'], 'priority': backend_info['priority'], } - else: - output['backend_info'] = {} result = deep_sort(output)