local-llm-server/llm_server/routes/ooba_request_handler.py

46 lines
1.8 KiB
Python
Raw Normal View History

from typing import Tuple
2023-09-12 16:40:09 -06:00
import flask
2023-09-12 16:40:09 -06:00
from flask import jsonify
from llm_server import opts
from llm_server.database.database import log_prompt
2023-09-12 16:40:09 -06:00
from llm_server.routes.helpers.client import format_sillytavern_err
from llm_server.routes.request_handler import RequestHandler
class OobaRequestHandler(RequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handle_request(self):
assert not self.used
2023-09-12 16:40:09 -06:00
request_valid, invalid_response = self.validate_request()
if not request_valid:
return invalid_response
2023-09-12 16:40:09 -06:00
# Reconstruct the request JSON with the validated parameters and prompt.
prompt = self.request_json_body.get('prompt', '')
llm_request = {**self.parameters, 'prompt': prompt}
_, backend_response = self.generate_response(llm_request)
return backend_response
2023-09-12 16:40:09 -06:00
def handle_ratelimited(self):
msg = f'Ratelimited: you are only allowed to have {opts.simultaneous_requests_per_ip} simultaneous requests at a time. Please complete your other requests before sending another.'
disable_st_error_formatting = self.request.headers.get('LLM-ST-Errors', False) == 'true'
if disable_st_error_formatting:
return msg, 429
else:
backend_response = format_sillytavern_err(msg, 'error')
log_prompt(self.client_ip, self.token, self.request_json_body.get('prompt', ''), backend_response, None, self.parameters, dict(self.request.headers), 429, self.request.url, is_error=True)
return jsonify({
'results': [{'text': backend_response}]
}), 429
def handle_error(self, msg: str) -> Tuple[flask.Response, int]:
return jsonify({
'results': [{'text': msg}]
}), 400