2023-09-14 17:38:20 -06:00
from typing import Tuple
2023-09-12 16:40:09 -06:00
2023-09-14 17:38:20 -06:00
import flask
2023-09-27 14:48:47 -06:00
from flask import jsonify , request
2023-09-12 16:40:09 -06:00
2024-05-07 17:03:41 -06:00
import llm_server . globals
2024-05-07 12:20:53 -06:00
from llm_server . config . global_config import GlobalConfig
2023-10-27 19:19:22 -06:00
from llm_server . database . log_to_db import log_to_db
2024-05-07 09:48:51 -06:00
from llm_server . logging import create_logger
2023-09-12 16:40:09 -06:00
from llm_server . routes . helpers . client import format_sillytavern_err
from llm_server . routes . request_handler import RequestHandler
2024-05-07 09:48:51 -06:00
_logger = create_logger ( ' OobaRequestHandler ' )
2023-09-12 16:40:09 -06:00
class OobaRequestHandler ( RequestHandler ) :
def __init__ ( self , * args , * * kwargs ) :
super ( ) . __init__ ( * args , * * kwargs )
2023-10-27 19:19:22 -06:00
def handle_request ( self , return_ok : bool = True ) :
2023-09-25 22:32:48 -06:00
assert not self . used
2023-10-27 19:19:22 -06:00
if self . offline :
2024-05-07 17:03:41 -06:00
return self . handle_error ( llm_server . globals . BACKEND_OFFLINE )
2023-09-12 16:40:09 -06:00
2023-09-14 14:05:50 -06:00
request_valid , invalid_response = self . validate_request ( )
if not request_valid :
return invalid_response
2023-09-12 16:40:09 -06:00
# Reconstruct the request JSON with the validated parameters and prompt.
prompt = self . request_json_body . get ( ' prompt ' , ' ' )
llm_request = { * * self . parameters , ' prompt ' : prompt }
2023-09-14 14:05:50 -06:00
_ , backend_response = self . generate_response ( llm_request )
2023-10-27 19:19:22 -06:00
if return_ok :
# Always return 200 so ST displays our error messages
return backend_response [ 0 ] , 200
else :
# The OpenAI route needs to detect 429 errors.
return backend_response
2023-09-12 16:40:09 -06:00
2023-09-28 01:34:15 -06:00
def handle_ratelimited ( self , do_log : bool = True ) :
2024-05-07 12:20:53 -06:00
msg = f ' Ratelimited: you are only allowed to have { GlobalConfig . get ( ) . simultaneous_requests_per_ip } simultaneous requests at a time. Please complete your other requests before sending another. '
2023-09-27 14:48:47 -06:00
backend_response = self . handle_error ( msg )
2023-09-28 01:34:15 -06:00
if do_log :
2023-10-27 19:19:22 -06:00
log_to_db ( self . client_ip , self . token , self . request_json_body . get ( ' prompt ' , ' ' ) , backend_response [ 0 ] . data . decode ( ' utf-8 ' ) , None , self . parameters , dict ( self . request . headers ) , 429 , self . request . url , self . backend_url , is_error = True )
return backend_response [ 0 ] , 429
2023-09-27 14:48:47 -06:00
def handle_error ( self , error_msg : str , error_type : str = ' error ' ) - > Tuple [ flask . Response , int ] :
disable_st_error_formatting = request . headers . get ( ' LLM-ST-Errors ' , False ) == ' true '
2023-09-26 23:59:22 -06:00
if disable_st_error_formatting :
2023-09-27 14:48:47 -06:00
# TODO: how to format this
response_msg = error_msg
2023-09-26 23:59:22 -06:00
else :
2023-10-27 19:19:22 -06:00
response_msg = format_sillytavern_err ( error_msg , error_type = error_type , backend_url = self . backend_url )
2023-09-14 17:38:20 -06:00
return jsonify ( {
2023-09-27 14:48:47 -06:00
' results ' : [ { ' text ' : response_msg } ]
2023-09-27 14:36:49 -06:00
} ) , 200 # return 200 so we don't trigger an error message in the client's ST