2023-08-29 17:56:12 -06:00
|
|
|
import json
|
|
|
|
import time
|
2023-09-24 13:27:27 -06:00
|
|
|
import traceback
|
2023-08-29 17:56:12 -06:00
|
|
|
|
|
|
|
from flask import request
|
|
|
|
|
2023-10-01 00:20:00 -06:00
|
|
|
from . import bp
|
2023-09-25 12:30:40 -06:00
|
|
|
from ..helpers.http import require_api_key, validate_json
|
2023-09-23 17:57:23 -06:00
|
|
|
from ..ooba_request_handler import OobaRequestHandler
|
2023-10-05 20:14:28 -06:00
|
|
|
from ..queue import priority_queue
|
2023-08-29 17:56:12 -06:00
|
|
|
from ... import opts
|
2023-10-05 20:14:28 -06:00
|
|
|
from ...custom_redis import redis
|
2023-10-04 19:24:47 -06:00
|
|
|
from ...database.log_to_db import log_to_db
|
2023-09-23 17:57:23 -06:00
|
|
|
from ...llm.generator import generator
|
2023-09-29 00:09:44 -06:00
|
|
|
from ...sock import sock
|
2023-08-29 17:56:12 -06:00
|
|
|
|
2023-08-30 18:53:26 -06:00
|
|
|
|
2023-10-01 00:20:00 -06:00
|
|
|
# Stacking the @sock.route() creates a TypeError error on the /v1/stream endpoint.
|
|
|
|
# We solve this by splitting the routes
|
2023-08-29 17:56:12 -06:00
|
|
|
|
2023-10-01 01:13:13 -06:00
|
|
|
@bp.route('/v1/stream')
|
|
|
|
@bp.route('/<model_name>/v1/stream')
|
|
|
|
def stream(model_name=None):
|
2023-10-01 00:20:00 -06:00
|
|
|
return 'This is a websocket endpoint.', 400
|
|
|
|
|
|
|
|
|
2023-10-01 01:13:13 -06:00
|
|
|
@sock.route('/v1/stream', bp=bp)
|
2023-10-01 00:20:00 -06:00
|
|
|
def stream_without_model(ws):
|
|
|
|
do_stream(ws, model_name=None)
|
|
|
|
|
|
|
|
|
|
|
|
@sock.route('/<model_name>/v1/stream', bp=bp)
|
|
|
|
def stream_with_model(ws, model_name=None):
|
|
|
|
do_stream(ws, model_name)
|
|
|
|
|
|
|
|
|
|
|
|
def do_stream(ws, model_name):
|
2023-10-02 20:53:08 -06:00
|
|
|
try:
|
2023-10-04 19:24:47 -06:00
|
|
|
def send_err_and_quit(quitting_err_msg):
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'text_stream',
|
|
|
|
'message_num': 0,
|
|
|
|
'text': quitting_err_msg
|
|
|
|
}))
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'stream_end',
|
|
|
|
'message_num': 1
|
|
|
|
}))
|
|
|
|
log_to_db(ip=handler.client_ip,
|
2023-10-05 12:28:00 -06:00
|
|
|
token=handler.token,
|
|
|
|
prompt=input_prompt,
|
|
|
|
response=quitting_err_msg,
|
|
|
|
gen_time=None,
|
|
|
|
parameters=handler.parameters,
|
|
|
|
headers=r_headers,
|
|
|
|
backend_response_code=response_status_code,
|
|
|
|
request_url=r_url,
|
|
|
|
backend_url=handler.cluster_backend_info,
|
|
|
|
response_tokens=None,
|
|
|
|
is_error=True
|
|
|
|
)
|
2023-10-04 19:24:47 -06:00
|
|
|
|
|
|
|
if not opts.enable_streaming:
|
|
|
|
return 'Streaming is disabled', 500
|
|
|
|
|
|
|
|
r_headers = dict(request.headers)
|
|
|
|
r_url = request.url
|
|
|
|
message_num = 0
|
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
while ws.connected:
|
|
|
|
message = ws.receive()
|
|
|
|
request_valid_json, request_json_body = validate_json(message)
|
2023-10-01 00:20:00 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
if not request_valid_json or not request_json_body.get('prompt'):
|
|
|
|
return 'Invalid JSON', 400
|
|
|
|
else:
|
|
|
|
auth_failure = require_api_key(request_json_body)
|
|
|
|
if auth_failure:
|
|
|
|
return auth_failure
|
2023-09-26 22:49:53 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
handler = OobaRequestHandler(incoming_request=request, selected_model=model_name, incoming_json=request_json_body)
|
2023-10-05 12:28:00 -06:00
|
|
|
if handler.offline:
|
|
|
|
msg = f'{handler.selected_model} is not a valid model choice.'
|
|
|
|
print(msg)
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'text_stream',
|
|
|
|
'message_num': 0,
|
|
|
|
'text': msg
|
|
|
|
}))
|
|
|
|
return
|
|
|
|
|
|
|
|
assert not handler.offline
|
2023-10-03 13:47:18 -06:00
|
|
|
|
|
|
|
if handler.cluster_backend_info['mode'] != 'vllm':
|
|
|
|
# TODO: implement other backends
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2023-10-05 20:14:28 -06:00
|
|
|
event_id = None
|
2023-10-02 20:53:08 -06:00
|
|
|
generated_text = ''
|
|
|
|
input_prompt = request_json_body['prompt']
|
|
|
|
response_status_code = 0
|
|
|
|
start_time = time.time()
|
2023-09-25 18:18:29 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
err_msg = None
|
|
|
|
if handler.is_client_ratelimited():
|
|
|
|
r, _ = handler.handle_ratelimited(do_log=False)
|
|
|
|
err_msg = r.json['results'][0]['text']
|
|
|
|
else:
|
|
|
|
request_valid, invalid_response = handler.validate_request(prompt=input_prompt)
|
|
|
|
if not request_valid:
|
|
|
|
err_msg = invalid_response[0].json['results'][0]['text']
|
|
|
|
if err_msg:
|
|
|
|
send_err_and_quit(err_msg)
|
|
|
|
return
|
2023-09-27 21:15:54 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
llm_request = {
|
|
|
|
**handler.parameters,
|
|
|
|
'prompt': input_prompt,
|
|
|
|
'stream': True,
|
|
|
|
}
|
2023-09-27 21:15:54 -06:00
|
|
|
|
2023-10-05 20:14:28 -06:00
|
|
|
event = None
|
|
|
|
if not handler.is_client_ratelimited():
|
|
|
|
# Add a dummy event to the queue and wait for it to reach a worker
|
2023-10-05 21:37:18 -06:00
|
|
|
event = priority_queue.put(handler.backend_url, (None, handler.client_ip, handler.token, None), handler.token_priority, handler.selected_model)
|
2023-10-02 20:53:08 -06:00
|
|
|
if not event:
|
2023-10-05 20:14:28 -06:00
|
|
|
log_to_db(
|
|
|
|
handler.client_ip,
|
|
|
|
handler.token,
|
|
|
|
handler.request_json_body.get('prompt'),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
handler.parameters,
|
|
|
|
request.headers,
|
|
|
|
response_status_code,
|
|
|
|
request.url,
|
|
|
|
handler.backend_url,
|
|
|
|
)
|
|
|
|
return handler.handle_ratelimited()
|
|
|
|
|
|
|
|
# Wait for permission to begin.
|
|
|
|
event_id = event.event_id
|
|
|
|
pubsub = redis.pubsub()
|
|
|
|
pubsub.subscribe(event_id)
|
|
|
|
for item in pubsub.listen():
|
|
|
|
if item['type'] == 'message' and item['data'].decode('utf-8') == 'begin':
|
|
|
|
break
|
|
|
|
time.sleep(0.1)
|
2023-10-01 16:04:53 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
try:
|
|
|
|
response = generator(llm_request, handler.backend_url)
|
|
|
|
|
|
|
|
if not response:
|
|
|
|
error_msg = 'Failed to reach backend while streaming.'
|
|
|
|
print('Streaming failed:', error_msg)
|
|
|
|
msg = handler.handle_error(error_msg)[0].json['results'][0]['text']
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'text_stream',
|
|
|
|
'message_num': message_num,
|
|
|
|
'text': msg
|
|
|
|
}))
|
|
|
|
else:
|
|
|
|
# Be extra careful when getting attributes from the response object
|
|
|
|
try:
|
|
|
|
response_status_code = response.status_code
|
|
|
|
except:
|
|
|
|
response_status_code = 0
|
2023-09-24 13:27:27 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
partial_response = b''
|
2023-09-24 13:27:27 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
for chunk in response.iter_content(chunk_size=1):
|
|
|
|
partial_response += chunk
|
|
|
|
if partial_response.endswith(b'\x00'):
|
|
|
|
json_strs = partial_response.split(b'\x00')
|
|
|
|
for json_str in json_strs:
|
|
|
|
if json_str:
|
2023-10-01 00:20:00 -06:00
|
|
|
try:
|
2023-10-02 20:53:08 -06:00
|
|
|
json_obj = json.loads(json_str.decode())
|
|
|
|
new = json_obj['text'][0].split(input_prompt + generated_text)[1]
|
|
|
|
generated_text = generated_text + new
|
|
|
|
except IndexError:
|
|
|
|
# ????
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'text_stream',
|
|
|
|
'message_num': message_num,
|
|
|
|
'text': new
|
|
|
|
}))
|
2023-10-01 00:20:00 -06:00
|
|
|
except:
|
2023-10-02 20:53:08 -06:00
|
|
|
# The has client closed the stream.
|
2023-10-03 14:10:50 -06:00
|
|
|
if response:
|
2023-10-02 20:53:08 -06:00
|
|
|
# Cancel the backend?
|
2023-10-03 14:10:50 -06:00
|
|
|
response.close()
|
|
|
|
# used to log here
|
2023-10-02 20:53:08 -06:00
|
|
|
return
|
2023-09-25 12:30:40 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
message_num += 1
|
|
|
|
partial_response = b'' # Reset the partial response
|
2023-09-24 13:27:27 -06:00
|
|
|
|
2023-10-02 20:53:08 -06:00
|
|
|
# If there is no more data, break the loop
|
|
|
|
if not chunk:
|
|
|
|
break
|
2023-10-03 14:10:50 -06:00
|
|
|
if response:
|
|
|
|
response.close()
|
|
|
|
# used to log here
|
2023-10-02 20:53:08 -06:00
|
|
|
except:
|
|
|
|
traceback.print_exc()
|
|
|
|
generated_text = generated_text + '\n\n' + handler.handle_error('Encountered error while streaming.', 'exception')[0].json['results'][0]['text']
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'text_stream',
|
|
|
|
'message_num': message_num,
|
|
|
|
'text': generated_text
|
|
|
|
}))
|
2023-10-03 14:10:50 -06:00
|
|
|
# used to log here
|
2023-10-02 20:53:08 -06:00
|
|
|
finally:
|
2023-10-05 20:14:28 -06:00
|
|
|
if event_id:
|
|
|
|
redis.publish(event_id, 'finished')
|
|
|
|
else:
|
|
|
|
print('event_id was None!')
|
|
|
|
|
2023-10-03 14:10:50 -06:00
|
|
|
try:
|
|
|
|
ws.send(json.dumps({
|
|
|
|
'event': 'stream_end',
|
|
|
|
'message_num': message_num
|
|
|
|
}))
|
|
|
|
except:
|
|
|
|
# The client closed the stream.
|
|
|
|
pass
|
2023-10-02 20:53:08 -06:00
|
|
|
end_time = time.time()
|
|
|
|
elapsed_time = end_time - start_time
|
2023-10-04 19:24:47 -06:00
|
|
|
log_to_db(ip=handler.client_ip,
|
2023-10-05 12:28:00 -06:00
|
|
|
token=handler.token,
|
|
|
|
prompt=input_prompt,
|
|
|
|
response=generated_text,
|
|
|
|
gen_time=elapsed_time,
|
|
|
|
parameters=handler.parameters,
|
|
|
|
headers=r_headers,
|
|
|
|
backend_response_code=response_status_code,
|
|
|
|
request_url=r_url,
|
|
|
|
backend_url=handler.backend_url
|
|
|
|
)
|
2023-10-02 20:53:08 -06:00
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
# Must close the connection or greenlets will complain.
|
|
|
|
ws.close()
|
|
|
|
except:
|
|
|
|
pass
|