This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/cluster/worker.py

43 lines
1.4 KiB
Python
Raw Normal View History

2023-09-30 19:41:50 -06:00
import time
from threading import Thread
2023-09-29 00:09:44 -06:00
from llm_server.cluster.cluster_config import cluster_config
2023-10-04 16:29:19 -06:00
from llm_server.cluster.backend import test_backend
2023-09-29 00:09:44 -06:00
from llm_server.cluster.stores import redis_running_models
def cluster_worker():
2023-09-30 19:41:50 -06:00
counter = 0
while True:
2023-09-30 19:41:50 -06:00
test_prompt = False
if counter % 4 == 0:
# Only send a test prompt every 120 seconds.
test_prompt = True
threads = []
for n, v in cluster_config.all().items():
2023-09-30 19:41:50 -06:00
thread = Thread(target=check_backend, args=(n, v, test_prompt))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
2023-09-30 19:41:50 -06:00
time.sleep(15)
counter += 1
2023-09-30 19:41:50 -06:00
def check_backend(n, v, test_prompt):
online, backend_info = test_backend(v['backend_url'], test_prompt=test_prompt)
# purge_backend_from_running_models(n)
2023-09-29 00:09:44 -06:00
if online:
2023-09-30 19:41:50 -06:00
running_model = backend_info['model']
for k, v in backend_info.items():
cluster_config.set_backend_value(n, k, v)
redis_running_models.sadd(running_model, n)
else:
for model in redis_running_models.keys():
redis_running_models.srem(model, n)
# redis_running_models.srem(backend_info['model'], n)
# backend_cycler_store.lrem(backend_info['model'], 1, n)
cluster_config.set_backend_value(n, 'online', online)