fix recent proompters to work with gunicorn

This commit is contained in:
Cyberes 2023-09-17 19:06:53 -06:00
parent 3c1254d3bf
commit eb3179cfff
3 changed files with 14 additions and 10 deletions

View File

@ -13,7 +13,6 @@ from llm_server.routes.cache import redis
from llm_server.routes.helpers.client import format_sillytavern_err
from llm_server.routes.helpers.http import require_api_key, validate_json
from llm_server.routes.queue import priority_queue
from llm_server.routes.stats import SemaphoreCheckerThread
DEFAULT_PRIORITY = 9999
@ -29,7 +28,9 @@ class RequestHandler:
self.backend = get_backend()
self.parameters = None
self.used = False
SemaphoreCheckerThread.recent_prompters[self.client_ip] = time.time()
recent_prompters = redis.get_dict('recent_prompters')
recent_prompters[self.client_ip] = time.time()
redis.set_dict('recent_prompters', recent_prompters)
def get_client_ip(self):
if self.request.headers.get('cf-connecting-ip'):

View File

@ -83,8 +83,7 @@ def get_active_gen_workers():
class SemaphoreCheckerThread(Thread):
proompters_1_min = 0
recent_prompters = {}
redis.set_dict('recent_prompters', {})
def __init__(self):
Thread.__init__(self)
@ -93,6 +92,8 @@ class SemaphoreCheckerThread(Thread):
def run(self):
while True:
current_time = time.time()
SemaphoreCheckerThread.recent_prompters = {ip: timestamp for ip, timestamp in SemaphoreCheckerThread.recent_prompters.items() if current_time - timestamp <= 60}
SemaphoreCheckerThread.proompters_1_min = len(SemaphoreCheckerThread.recent_prompters)
recent_prompters = redis.get_dict('recent_prompters')
new_recent_prompters = {ip: timestamp for ip, timestamp in recent_prompters.items() if current_time - timestamp <= 60}
redis.set_dict('recent_prompters', new_recent_prompters)
redis.set('proompters_1_min', len(new_recent_prompters))
time.sleep(1)

View File

@ -1,8 +1,6 @@
import time
from datetime import datetime
from flask import request
from llm_server import opts
from llm_server.database import get_distinct_ips_24h, sum_column
from llm_server.helpers import deep_sort, round_up_base
@ -10,7 +8,7 @@ from llm_server.llm.info import get_running_model
from llm_server.netdata import get_power_states
from llm_server.routes.cache import cache, redis
from llm_server.routes.queue import priority_queue
from llm_server.routes.stats import SemaphoreCheckerThread, calculate_avg_gen_time, get_active_gen_workers, get_total_proompts, server_start_time
from llm_server.routes.stats import calculate_avg_gen_time, get_active_gen_workers, get_total_proompts, server_start_time
def calculate_wait_time(gen_time_calc, proompters_in_queue, concurrent_gens, active_gen_workers):
@ -90,10 +88,14 @@ def generate_stats():
base_client_api = x.decode() if x else None
del x
x = redis.get('proompters_1_min')
proompters_1_min = int(x) if x else None
del x
output = {
'stats': {
'proompters': {
'1_min': SemaphoreCheckerThread.proompters_1_min,
'1_min': proompters_1_min,
'24_hrs': get_distinct_ips_24h(),
},
'proompts_total': get_total_proompts() if opts.show_num_prompts else None,