This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/workers/printer.py

31 lines
1.1 KiB
Python
Raw Normal View History

import logging
import time
2023-09-30 19:41:50 -06:00
from llm_server.cluster.cluster_config import cluster_config
from llm_server.custom_redis import redis
2023-09-28 03:44:30 -06:00
from llm_server.routes.queue import priority_queue
logger = logging.getLogger('console_printer')
if not logger.handlers:
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s: %(levelname)s:%(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def console_printer():
time.sleep(3)
while True:
2023-09-30 19:41:50 -06:00
processing = redis.keys('active_gen_workers:http*') # backends always start with http
2023-09-28 03:44:30 -06:00
processing_count = 0
2023-09-30 19:41:50 -06:00
if len(processing):
for k in processing:
processing_count += redis.get(k, default=0, dtype=int)
backends = [k for k, v in cluster_config.all().items() if v['online']]
logger.info(f'REQUEST QUEUE -> Processing: {processing_count} | Queued: {len(priority_queue)} | Backends Online: {len(backends)}')
2023-10-05 17:00:35 -06:00
priority_queue.print_all_items()
print('============================')
2023-10-05 18:48:07 -06:00
time.sleep(3)