local-llm-server/llm_server/routes/v1/generate_stats.py

64 lines
2.6 KiB
Python

import time
from datetime import datetime
from llm_server import opts
from llm_server.llm.info import get_running_model
from llm_server.routes.cache import redis
from llm_server.routes.queue import priority_queue
from llm_server.routes.stats import SemaphoreCheckerThread, calculate_avg_gen_time, get_active_gen_workers, get_total_proompts, server_start_time
def generate_stats():
model_list, error = get_running_model() # will return False when the fetch fails
if isinstance(model_list, bool):
online = False
else:
online = True
# t = elapsed_times.copy() # copy since we do multiple operations and don't want it to change
# if len(t) == 0:
# estimated_wait = 0
# else:
# waits = [elapsed for end, elapsed in t]
# estimated_wait = int(sum(waits) / len(waits))
proompters_in_queue = len(priority_queue) + get_active_gen_workers()
average_tps = float(redis.get('average_tps'))
if opts.average_generation_time_mode == 'database':
average_generation_time = int(float(redis.get('average_generation_elapsed_sec')))
average_output_tokens = int(float(redis.get('average_output_tokens')))
estimated_wait_sec = int(((average_output_tokens / average_tps) * proompters_in_queue) / opts.concurrent_gens)
elif opts.average_generation_time_mode == 'minute':
average_generation_time = int(calculate_avg_gen_time())
estimated_wait_sec = int((average_generation_time * proompters_in_queue) / opts.concurrent_gens)
else:
raise Exception
# TODO: https://stackoverflow.com/questions/22721579/sorting-a-nested-ordereddict-by-key-recursively
return {
'stats': {
'proompts_in_queue': proompters_in_queue,
'proompters_1_min': SemaphoreCheckerThread.proompters_1_min,
'total_proompts': get_total_proompts() if opts.show_num_prompts else None,
'uptime': int((datetime.now() - server_start_time).total_seconds()) if opts.show_uptime else None,
'average_generation_elapsed_sec': average_generation_time,
'average_tps': average_tps,
},
'online': online,
'mode': opts.mode,
'model': model_list,
'endpoints': {
'blocking': opts.full_client_api,
},
'estimated_wait_sec': estimated_wait_sec,
'timestamp': int(time.time()),
'openaiKeys': '',
'anthropicKeys': '',
'config': {
'gatekeeper': 'none' if opts.auth_required is False else 'token',
'context_size': opts.context_size,
'queue_size': opts.concurrent_gens,
}
}