2023-09-27 21:15:54 -06:00
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
|
2023-09-30 19:41:50 -06:00
|
|
|
from llm_server.cluster.cluster_config import cluster_config
|
2023-09-28 18:40:24 -06:00
|
|
|
from llm_server.custom_redis import redis
|
2023-09-28 03:44:30 -06:00
|
|
|
from llm_server.routes.queue import priority_queue
|
2023-09-27 21:15:54 -06:00
|
|
|
|
2023-09-27 21:28:25 -06:00
|
|
|
logger = logging.getLogger('console_printer')
|
|
|
|
if not logger.handlers:
|
2023-09-27 21:15:54 -06:00
|
|
|
handler = logging.StreamHandler()
|
|
|
|
handler.setLevel(logging.INFO)
|
|
|
|
logger.setLevel(logging.INFO)
|
|
|
|
formatter = logging.Formatter("%(asctime)s: %(levelname)s:%(name)s - %(message)s")
|
|
|
|
handler.setFormatter(formatter)
|
|
|
|
logger.addHandler(handler)
|
|
|
|
|
2023-09-27 21:28:25 -06:00
|
|
|
|
|
|
|
def console_printer():
|
2023-09-27 23:36:44 -06:00
|
|
|
time.sleep(3)
|
2023-09-27 21:28:25 -06:00
|
|
|
while True:
|
2023-09-30 19:41:50 -06:00
|
|
|
processing = redis.keys('active_gen_workers:http*') # backends always start with http
|
2023-09-28 03:44:30 -06:00
|
|
|
processing_count = 0
|
2023-09-30 19:41:50 -06:00
|
|
|
if len(processing):
|
|
|
|
for k in processing:
|
|
|
|
processing_count += redis.get(k, default=0, dtype=int)
|
|
|
|
backends = [k for k, v in cluster_config.all().items() if v['online']]
|
2023-10-15 20:45:01 -06:00
|
|
|
activity = priority_queue.activity()
|
2023-10-15 20:46:32 -06:00
|
|
|
|
2023-10-17 11:46:39 -06:00
|
|
|
# Active Workers and Processing should read the same. If not, that's an issue.
|
2023-10-15 20:45:01 -06:00
|
|
|
logger.info(f'REQUEST QUEUE -> Active Workers: {len([i for i in activity if i[1]])} | Processing: {processing_count} | Queued: {len(priority_queue)} | Backends Online: {len(backends)}')
|
2023-10-18 09:03:10 -06:00
|
|
|
time.sleep(2)
|