2023-09-25 12:30:40 -06:00
|
|
|
import json
|
|
|
|
import time
|
2023-09-14 14:36:22 -06:00
|
|
|
import traceback
|
|
|
|
|
2023-10-27 19:19:22 -06:00
|
|
|
import ujson
|
2023-09-25 12:30:40 -06:00
|
|
|
from flask import Response, jsonify, request
|
2023-10-27 19:19:22 -06:00
|
|
|
from redis import Redis
|
2023-09-12 16:40:09 -06:00
|
|
|
|
2023-10-27 19:19:22 -06:00
|
|
|
from llm_server.custom_redis import redis
|
|
|
|
from . import openai_bp, openai_model_bp
|
2023-09-12 16:40:09 -06:00
|
|
|
from ..helpers.http import validate_json
|
2023-09-26 22:09:11 -06:00
|
|
|
from ..openai_request_handler import OpenAIRequestHandler
|
2023-10-27 19:19:22 -06:00
|
|
|
from ..queue import priority_queue
|
2024-05-07 12:20:53 -06:00
|
|
|
from ...config.global_config import GlobalConfig
|
2023-10-27 19:19:22 -06:00
|
|
|
from ...database.log_to_db import log_to_db
|
2024-05-07 17:41:53 -06:00
|
|
|
from ...llm.openai.oai_to_vllm import oai_to_vllm, validate_oai, return_oai_internal_server_error
|
2023-10-27 19:19:22 -06:00
|
|
|
from ...llm.openai.transform import generate_oai_string, transform_messages_to_prompt, trim_messages_to_fit
|
2024-05-07 09:48:51 -06:00
|
|
|
from ...logging import create_logger
|
|
|
|
|
|
|
|
_logger = create_logger('OpenAIChatCompletions')
|
2023-09-12 16:40:09 -06:00
|
|
|
|
|
|
|
|
2023-09-24 21:45:30 -06:00
|
|
|
# TODO: add rate-limit headers?
|
|
|
|
|
2023-10-27 19:19:22 -06:00
|
|
|
|
2023-09-12 16:40:09 -06:00
|
|
|
@openai_bp.route('/chat/completions', methods=['POST'])
|
2023-10-27 19:19:22 -06:00
|
|
|
@openai_model_bp.route('/<model_name>/v1/chat/completions', methods=['POST'])
|
|
|
|
def openai_chat_completions(model_name=None):
|
2023-09-12 16:40:09 -06:00
|
|
|
request_valid_json, request_json_body = validate_json(request)
|
2023-09-25 09:32:23 -06:00
|
|
|
if not request_valid_json or not request_json_body.get('messages') or not request_json_body.get('model'):
|
2023-09-12 16:40:09 -06:00
|
|
|
return jsonify({'code': 400, 'msg': 'invalid JSON'}), 400
|
|
|
|
else:
|
2023-10-27 19:19:22 -06:00
|
|
|
handler = OpenAIRequestHandler(incoming_request=request, incoming_json=request_json_body, selected_model=model_name)
|
|
|
|
if handler.offline:
|
2024-07-07 15:05:35 -06:00
|
|
|
return return_oai_internal_server_error(f'backend {handler.backend_url} is offline')
|
2023-10-27 19:19:22 -06:00
|
|
|
|
|
|
|
if not request_json_body.get('stream'):
|
|
|
|
try:
|
|
|
|
return handler.handle_request()
|
|
|
|
except Exception:
|
|
|
|
traceback.print_exc()
|
|
|
|
return 'Internal server error', 500
|
|
|
|
else:
|
2024-05-07 12:20:53 -06:00
|
|
|
if not GlobalConfig.get().enable_streaming:
|
2023-10-27 19:19:22 -06:00
|
|
|
return 'Streaming disabled', 403
|
|
|
|
|
|
|
|
invalid_oai_err_msg = validate_oai(handler.request_json_body)
|
|
|
|
if invalid_oai_err_msg:
|
|
|
|
return invalid_oai_err_msg
|
|
|
|
|
|
|
|
handler.request_json_body = oai_to_vllm(handler.request_json_body, stop_hashes=True, mode=handler.cluster_backend_info['mode'])
|
|
|
|
|
|
|
|
handler.parameters, e = handler.get_parameters()
|
|
|
|
handler.request_json_body = {
|
|
|
|
'messages': handler.request_json_body['messages'],
|
|
|
|
'model': handler.request_json_body['model'],
|
|
|
|
**handler.parameters
|
|
|
|
}
|
|
|
|
|
2024-05-07 12:20:53 -06:00
|
|
|
if GlobalConfig.get().openai_silent_trim:
|
2023-10-27 19:19:22 -06:00
|
|
|
handler.prompt = transform_messages_to_prompt(trim_messages_to_fit(handler.request.json['messages'], handler.cluster_backend_info['model_config']['max_position_embeddings'], handler.backend_url))
|
|
|
|
else:
|
|
|
|
handler.prompt = transform_messages_to_prompt(handler.request.json['messages'])
|
|
|
|
if not handler.prompt:
|
|
|
|
# Prevent issues on the backend.
|
|
|
|
return 'Invalid prompt', 400
|
2023-09-25 12:30:40 -06:00
|
|
|
|
2023-10-27 19:19:22 -06:00
|
|
|
# Need to set the prompt in the JSON body since that's what the inference worker expects.
|
|
|
|
handler.request_json_body['prompt'] = handler.prompt
|
2023-09-25 12:30:40 -06:00
|
|
|
|
|
|
|
start_time = time.time()
|
2023-10-27 19:19:22 -06:00
|
|
|
|
2023-09-25 12:30:40 -06:00
|
|
|
request_valid, invalid_response = handler.validate_request()
|
|
|
|
if not request_valid:
|
2023-10-27 19:19:22 -06:00
|
|
|
return invalid_response
|
|
|
|
|
|
|
|
event = None
|
|
|
|
if not handler.is_client_ratelimited():
|
|
|
|
event = priority_queue.put(handler.backend_url, (handler.request_json_body, handler.client_ip, handler.token, handler.parameters), handler.token_priority, handler.selected_model, do_stream=True)
|
|
|
|
if not event:
|
|
|
|
log_to_db(
|
|
|
|
handler.client_ip,
|
|
|
|
handler.token,
|
|
|
|
handler.prompt,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
handler.parameters,
|
|
|
|
request.headers,
|
|
|
|
429,
|
|
|
|
request.url,
|
|
|
|
handler.backend_url,
|
|
|
|
)
|
|
|
|
return handler.handle_ratelimited()
|
|
|
|
|
|
|
|
try:
|
|
|
|
r_headers = dict(request.headers)
|
|
|
|
r_url = request.url
|
2024-05-07 12:20:53 -06:00
|
|
|
model = redis.get('running_model', 'ERROR', dtype=str) if GlobalConfig.get().openai_expose_our_model else request_json_body.get('model')
|
2023-10-27 19:19:22 -06:00
|
|
|
oai_string = generate_oai_string(30)
|
|
|
|
|
|
|
|
# Need to do this before we enter generate() since we want to be able to
|
|
|
|
# return a 408 if necessary.
|
|
|
|
_, stream_name, error_msg = event.wait()
|
|
|
|
if error_msg:
|
2024-05-07 09:48:51 -06:00
|
|
|
_logger.error(f'OAI failed to start streaming: {error_msg}')
|
2023-10-27 19:19:22 -06:00
|
|
|
stream_name = None # set to null so that the Finally ignores it.
|
|
|
|
return 'Request Timeout', 408
|
|
|
|
|
|
|
|
def generate():
|
|
|
|
stream_redis = Redis(db=8)
|
|
|
|
generated_text = ''
|
|
|
|
try:
|
|
|
|
last_id = '0-0'
|
|
|
|
while True:
|
2024-07-07 15:05:35 -06:00
|
|
|
stream_data = stream_redis.xread({stream_name: last_id}, block=GlobalConfig.get().redis_stream_timeout)
|
2023-10-27 19:19:22 -06:00
|
|
|
if not stream_data:
|
2024-07-07 15:05:35 -06:00
|
|
|
_logger.debug(f"No message received in {GlobalConfig.get().redis_stream_timeout / 1000} seconds, closing stream.")
|
2023-10-27 19:19:22 -06:00
|
|
|
yield 'data: [DONE]\n\n'
|
|
|
|
else:
|
|
|
|
for stream_index, item in stream_data[0][1]:
|
|
|
|
last_id = stream_index
|
|
|
|
timestamp = int(stream_index.decode('utf-8').split('-')[0])
|
|
|
|
data = ujson.loads(item[b'data'])
|
|
|
|
if data['error']:
|
|
|
|
# Not printing error since we can just check the daemon log.
|
2024-05-07 09:48:51 -06:00
|
|
|
_logger.warn(f'OAI streaming encountered error: {data["error"]}')
|
2023-10-27 19:19:22 -06:00
|
|
|
yield 'data: [DONE]\n\n'
|
|
|
|
return
|
|
|
|
elif data['new']:
|
|
|
|
response = {
|
2023-09-25 22:01:57 -06:00
|
|
|
"id": f"chatcmpl-{oai_string}",
|
2023-09-25 12:38:02 -06:00
|
|
|
"object": "chat.completion.chunk",
|
2023-10-27 19:19:22 -06:00
|
|
|
"created": timestamp,
|
2023-09-25 12:38:02 -06:00
|
|
|
"model": model,
|
|
|
|
"choices": [
|
|
|
|
{
|
|
|
|
"index": 0,
|
|
|
|
"delta": {
|
2023-10-27 19:19:22 -06:00
|
|
|
"content": data['new']
|
2023-09-25 12:38:02 -06:00
|
|
|
},
|
|
|
|
"finish_reason": None
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
2023-10-27 19:19:22 -06:00
|
|
|
generated_text = generated_text + data['new']
|
|
|
|
yield f'data: {json.dumps(response)}\n\n'
|
|
|
|
elif data['completed']:
|
|
|
|
yield 'data: [DONE]\n\n'
|
|
|
|
end_time = time.time()
|
|
|
|
elapsed_time = end_time - start_time
|
|
|
|
log_to_db(
|
|
|
|
handler.client_ip,
|
|
|
|
handler.token,
|
|
|
|
handler.prompt,
|
|
|
|
generated_text,
|
|
|
|
elapsed_time,
|
|
|
|
handler.parameters,
|
|
|
|
r_headers,
|
|
|
|
200,
|
|
|
|
r_url,
|
|
|
|
handler.backend_url,
|
|
|
|
)
|
|
|
|
return
|
|
|
|
except GeneratorExit:
|
|
|
|
return
|
|
|
|
except Exception:
|
|
|
|
traceback.print_exc()
|
2023-09-25 12:30:40 -06:00
|
|
|
yield 'data: [DONE]\n\n'
|
2023-10-27 19:19:22 -06:00
|
|
|
finally:
|
|
|
|
if event:
|
|
|
|
redis.publish(f'notifications:{event.event_id}', 'canceled')
|
|
|
|
if stream_name:
|
|
|
|
stream_redis.delete(stream_name)
|
|
|
|
|
|
|
|
return Response(generate(), mimetype='text/event-stream')
|
2023-09-27 14:48:47 -06:00
|
|
|
except Exception:
|
2023-09-25 12:30:40 -06:00
|
|
|
traceback.print_exc()
|
2023-10-27 19:19:22 -06:00
|
|
|
return 'INTERNAL SERVER', 500
|