2023-09-27 21:15:54 -06:00
|
|
|
import time
|
2023-10-27 19:19:22 -06:00
|
|
|
import traceback
|
2023-09-27 21:15:54 -06:00
|
|
|
|
2023-10-27 19:19:22 -06:00
|
|
|
from llm_server.cluster.backend import get_running_models
|
|
|
|
from llm_server.cluster.cluster_config import cluster_config
|
|
|
|
from llm_server.custom_redis import redis
|
|
|
|
from llm_server.logging import create_logger
|
2023-09-28 03:44:30 -06:00
|
|
|
from llm_server.routes.queue import priority_queue
|
2023-09-27 21:15:54 -06:00
|
|
|
|
2023-09-27 21:28:25 -06:00
|
|
|
|
|
|
|
def console_printer():
|
2023-10-27 19:19:22 -06:00
|
|
|
logger = create_logger('console_printer')
|
2023-09-27 23:36:44 -06:00
|
|
|
time.sleep(3)
|
2023-09-27 21:28:25 -06:00
|
|
|
while True:
|
2023-10-27 19:19:22 -06:00
|
|
|
try:
|
|
|
|
processing = redis.keys('active_gen_workers:http*') # backends always start with http
|
|
|
|
processing_count = 0
|
|
|
|
if len(processing):
|
|
|
|
for k in processing:
|
|
|
|
processing_count += redis.get(k, default=0, dtype=int)
|
|
|
|
backends = [k for k, v in cluster_config.all().items() if v['online']]
|
|
|
|
activity = priority_queue.activity()
|
|
|
|
|
|
|
|
# Calculate the queue size the same way it's done on the stats.
|
|
|
|
queue_size = 0
|
|
|
|
running_models = get_running_models()
|
|
|
|
for model in running_models:
|
|
|
|
queue_size += priority_queue.len(model)
|
|
|
|
|
|
|
|
# Active Workers and Processing should read the same. If not, that's an issue.
|
|
|
|
logger.info(f'Active Workers: {len([i for i in activity if (i[1] and i[1] != "waiting...")])} | Processing: {processing_count} | Queued: {queue_size} | Backends Online: {len(backends)}')
|
|
|
|
except:
|
|
|
|
logger.error(traceback.format_exc())
|
2023-09-28 03:47:27 -06:00
|
|
|
time.sleep(10)
|