From 1a4cb5f78655a59f5ca12e6e0ade1f2ead80c097 Mon Sep 17 00:00:00 2001 From: Cyberes Date: Sun, 27 Aug 2023 22:24:44 -0600 Subject: [PATCH] reorganize stats page again --- llm_server/routes/v1/generate_stats.py | 12 ++++++------ requirements.txt | 3 ++- server.py | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/llm_server/routes/v1/generate_stats.py b/llm_server/routes/v1/generate_stats.py index 782752e..eb3e9d1 100644 --- a/llm_server/routes/v1/generate_stats.py +++ b/llm_server/routes/v1/generate_stats.py @@ -72,11 +72,7 @@ def generate_stats(): '1_min': SemaphoreCheckerThread.proompters_1_min, '24_hrs': get_distinct_ips_24h(), }, - 'proompts': { - 'processing': active_gen_workers, - 'queued': proompters_in_queue, - 'total': get_total_proompts() if opts.show_num_prompts else None, - }, + 'proompts_total': get_total_proompts() if opts.show_num_prompts else None, 'uptime': int((datetime.now() - server_start_time).total_seconds()) if opts.show_uptime else None, 'average_generation_elapsed_sec': int(gen_time_calc), 'average_tps': average_tps, @@ -87,7 +83,11 @@ def generate_stats(): 'endpoints': { 'blocking': opts.full_client_api, }, - 'estimated_wait_sec': int(estimated_wait_sec), + 'queue': { + 'processing': active_gen_workers, + 'queued': proompters_in_queue, + 'estimated_wait_sec': int(estimated_wait_sec), + }, 'timestamp': int(time.time()), 'config': { 'gatekeeper': 'none' if opts.auth_required is False else 'token', diff --git a/requirements.txt b/requirements.txt index ca4dfdb..9c24864 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,5 @@ requests tiktoken gunicorn redis -gevent \ No newline at end of file +gevent +async-timeout \ No newline at end of file diff --git a/server.py b/server.py index f83f96d..3477ce8 100644 --- a/server.py +++ b/server.py @@ -106,12 +106,12 @@ def home(): else: running_model = opts.running_model - if stats['estimated_wait_sec'] == 0 and stats['stats']['proompts']['processing'] > 0: + if stats['queue']['queued'] == 0 and stats['queue']['processing'] > 0: # There will be a wait if the queue is empty but prompts are processing, but we don't # know how long. estimated_wait_sec = f"less than {stats['stats']['average_generation_elapsed_sec']} seconds" else: - estimated_wait_sec = f"{stats['estimated_wait_sec']} seconds" + estimated_wait_sec = f"{stats['queue']['estimated_wait_sec']} seconds" if len(config['analytics_tracking_code']): analytics_tracking_code = f""