local-llm-server/llm_server/routes/v1/generate_stream.py

105 lines
3.8 KiB
Python
Raw Normal View History

2023-08-29 17:56:12 -06:00
import json
import time
from flask import request
2023-09-23 17:57:23 -06:00
from ..helpers.http import validate_json
from ..ooba_request_handler import OobaRequestHandler
2023-08-29 17:56:12 -06:00
from ... import opts
from ...database.database import log_prompt
2023-09-23 17:57:23 -06:00
from ...llm.generator import generator
from ...llm.vllm import tokenize
2023-08-29 17:56:12 -06:00
from ...stream import sock
2023-08-30 18:53:26 -06:00
# TODO: have workers process streaming requests
2023-08-29 17:56:12 -06:00
@sock.route('/api/v1/stream') # TODO: use blueprint route???
def stream(ws):
if not opts.enable_streaming:
# TODO: return a formatted ST error message
return 'disabled', 401
2023-09-23 17:57:23 -06:00
message_num = 0
while ws.connected:
message = ws.receive()
request_valid_json, request_json_body = validate_json(message)
if not request_valid_json or not request_json_body.get('prompt'):
ws.send(json.dumps({
'event': 'text_stream',
'message_num': message_num,
'text': 'Invalid JSON'
}))
message_num += 1
else:
if opts.mode != 'vllm':
# TODO: implement other backends
raise NotImplementedError
handler = OobaRequestHandler(request, request_json_body)
generated_text = ''
input_prompt = None
response_status_code = 0
start_time = time.time()
request_valid, invalid_response = handler.validate_request()
if not request_valid:
ws.send(json.dumps({
'event': 'text_stream',
'message_num': message_num,
'text': invalid_response
}))
else:
input_prompt = request_json_body['prompt']
msg_to_backend = {
**handler.parameters,
'prompt': input_prompt,
'stream': True,
}
response = generator(msg_to_backend)
# Be extra careful when getting attributes from the response object
try:
response_status_code = response.status_code
except:
response_status_code = 0
partial_response = b''
for chunk in response.iter_content(chunk_size=1):
partial_response += chunk
if partial_response.endswith(b'\x00'):
2023-09-23 18:44:07 -06:00
json_str = partial_response[:-1].decode() # Remove the null character and decode the byte string to a string
2023-09-23 17:57:23 -06:00
json_obj = json.loads(json_str)
2023-09-23 21:10:14 -06:00
if len(json_obj['text'][0].split(input_prompt + generated_text)) > 2:
2023-09-23 20:55:49 -06:00
# ????
2023-09-23 21:10:14 -06:00
print(json_obj)
2023-09-23 20:55:49 -06:00
continue
2023-09-23 18:44:07 -06:00
new = json_obj['text'][0].split(input_prompt + generated_text)[1]
2023-09-23 17:57:23 -06:00
ws.send(json.dumps({
'event': 'text_stream',
'message_num': message_num,
'text': new
}))
message_num += 1
2023-09-23 18:01:12 -06:00
generated_text = generated_text + new
2023-09-23 18:44:07 -06:00
partial_response = b'' # Reset the partial response
2023-09-23 17:57:23 -06:00
# If there is no more data, break the loop
if not chunk:
break
response.close()
end_time = time.time()
elapsed_time = end_time - start_time
generated_tokens = tokenize(generated_text)
log_prompt(handler.client_ip, handler.token, input_prompt, generated_text, elapsed_time, handler.parameters, dict(request.headers), response_status_code, request.url, response_tokens=generated_tokens)
ws.send(json.dumps({
'event': 'stream_end',
'message_num': message_num
}))