41 lines
1.6 KiB
Python
41 lines
1.6 KiB
Python
from typing import Tuple
|
|
|
|
import flask
|
|
from flask import jsonify
|
|
|
|
from llm_server import opts
|
|
from llm_server.database.database import log_prompt
|
|
from llm_server.routes.helpers.client import format_sillytavern_err
|
|
from llm_server.routes.request_handler import RequestHandler
|
|
|
|
|
|
class OobaRequestHandler(RequestHandler):
|
|
def __init__(self, *args, **kwargs):
|
|
super().__init__(*args, **kwargs)
|
|
|
|
def handle_request(self):
|
|
assert not self.used
|
|
|
|
request_valid, invalid_response = self.validate_request()
|
|
if not request_valid:
|
|
return invalid_response
|
|
|
|
# Reconstruct the request JSON with the validated parameters and prompt.
|
|
prompt = self.request_json_body.get('prompt', '')
|
|
llm_request = {**self.parameters, 'prompt': prompt}
|
|
|
|
_, backend_response = self.generate_response(llm_request)
|
|
return backend_response
|
|
|
|
def handle_ratelimited(self):
|
|
backend_response = format_sillytavern_err(f'Ratelimited: you are only allowed to have {opts.simultaneous_requests_per_ip} simultaneous requests at a time. Please complete your other requests before sending another.', 'error')
|
|
log_prompt(self.client_ip, self.token, self.request_json_body.get('prompt', ''), backend_response, None, self.parameters, dict(self.request.headers), 429, self.request.url, is_error=True)
|
|
return jsonify({
|
|
'results': [{'text': backend_response}]
|
|
}), 429
|
|
|
|
def handle_error(self, msg: str) -> Tuple[flask.Response, int]:
|
|
return jsonify({
|
|
'results': [{'text': msg}]
|
|
}), 400
|