59 lines
2.5 KiB
Python
59 lines
2.5 KiB
Python
from typing import Tuple
|
|
|
|
import flask
|
|
from flask import jsonify, request
|
|
|
|
import llm_server.globals
|
|
from llm_server.config.global_config import GlobalConfig
|
|
from llm_server.database.log_to_db import log_to_db
|
|
from llm_server.logging import create_logger
|
|
from llm_server.routes.helpers.client import format_sillytavern_err
|
|
from llm_server.routes.request_handler import RequestHandler
|
|
|
|
_logger = create_logger('OobaRequestHandler')
|
|
|
|
|
|
class OobaRequestHandler(RequestHandler):
|
|
def __init__(self, *args, **kwargs):
|
|
super().__init__(*args, **kwargs)
|
|
|
|
def handle_request(self, return_ok: bool = True):
|
|
assert not self.used
|
|
if self.offline:
|
|
return self.handle_error(llm_server.globals.BACKEND_OFFLINE)
|
|
|
|
request_valid, invalid_response = self.validate_request()
|
|
if not request_valid:
|
|
return invalid_response
|
|
|
|
# Reconstruct the request JSON with the validated parameters and prompt.
|
|
prompt = self.request_json_body.get('prompt', '')
|
|
llm_request = {**self.parameters, 'prompt': prompt}
|
|
|
|
_, backend_response = self.generate_response(llm_request)
|
|
if return_ok:
|
|
# Always return 200 so ST displays our error messages
|
|
return backend_response[0], 200
|
|
else:
|
|
# The OpenAI route needs to detect 429 errors.
|
|
return backend_response
|
|
|
|
def handle_ratelimited(self, do_log: bool = True):
|
|
msg = f'Ratelimited: you are only allowed to have {GlobalConfig.get().simultaneous_requests_per_ip} simultaneous requests at a time. Please complete your other requests before sending another.'
|
|
backend_response = self.handle_error(msg)
|
|
if do_log:
|
|
log_to_db(self.client_ip, self.token, self.request_json_body.get('prompt', ''), backend_response[0].data.decode('utf-8'), None, self.parameters, dict(self.request.headers), 429, self.request.url, self.backend_url, is_error=True)
|
|
return backend_response[0], 429
|
|
|
|
def handle_error(self, error_msg: str, error_type: str = 'error') -> Tuple[flask.Response, int]:
|
|
disable_st_error_formatting = request.headers.get('LLM-ST-Errors', False) == 'true'
|
|
if disable_st_error_formatting:
|
|
# TODO: how to format this
|
|
response_msg = error_msg
|
|
else:
|
|
response_msg = format_sillytavern_err(error_msg, error_type=error_type, backend_url=self.backend_url)
|
|
|
|
return jsonify({
|
|
'results': [{'text': response_msg}]
|
|
}), 200 # return 200 so we don't trigger an error message in the client's ST
|