local-llm-server/llm_server/routes/queue.py

60 lines
1.5 KiB
Python
Raw Normal View History

2023-08-23 20:12:38 -06:00
import heapq
import threading
import time
2023-08-23 20:12:38 -06:00
from llm_server.llm.generator import generator
from llm_server.routes.stats import generation_elapsed, generation_elapsed_lock
2023-08-23 20:12:38 -06:00
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
self._cv = threading.Condition()
def put(self, item, priority):
event = DataEvent()
with self._cv:
heapq.heappush(self._queue, (-priority, self._index, item, event))
self._index += 1
self._cv.notify()
return event
def get(self):
with self._cv:
while len(self._queue) == 0:
self._cv.wait()
2023-08-23 20:33:49 -06:00
return heapq.heappop(self._queue)
def __len__(self):
return len(self._queue)
2023-08-23 20:12:38 -06:00
priority_queue = PriorityQueue()
class DataEvent(threading.Event):
def __init__(self):
super().__init__()
self.data = None
def worker():
while True:
2023-08-23 20:33:49 -06:00
priority, index, (request_json_body, client_ip, token, parameters), event = priority_queue.get()
start_time = time.time()
2023-08-23 20:12:38 -06:00
success, response, error_msg = generator(request_json_body)
end_time = time.time()
elapsed_time = end_time - start_time
with generation_elapsed_lock:
generation_elapsed.append((end_time, elapsed_time))
2023-08-23 20:12:38 -06:00
event.data = (success, response, error_msg)
event.set()
def start_workers(num_workers: int):
2023-08-23 20:33:49 -06:00
for _ in range(num_workers):
2023-08-23 20:12:38 -06:00
threading.Thread(target=worker).start()