openai error message cleanup

This commit is contained in:
Cyberes 2024-05-07 17:07:34 -06:00
parent fd09c783d3
commit 5bd1044fad
4 changed files with 17 additions and 24 deletions

View File

@ -90,14 +90,7 @@ def return_invalid_model_err(requested_model: str):
msg = f"The model `{requested_model}` does not exist"
else:
msg = "The requested model does not exist"
return jsonify({
"error": {
"message": msg,
"type": "invalid_request_error",
"param": None,
"code": "model_not_found"
}
}), 404
return_oai_invalid_request_error(msg)
def return_oai_internal_server_error():
@ -109,3 +102,14 @@ def return_oai_internal_server_error():
"code": "internal_error"
}
}), 500
def return_oai_invalid_request_error(msg: str = None):
return jsonify({
"error": {
"message": msg,
"type": "invalid_request_error",
"param": None,
"code": "model_not_found"
}
}), 404

View File

@ -28,7 +28,7 @@ def handle_error(e):
"""
_logger.error(f'OAI returning error: {e}')
return_oai_internal_server_error()
return return_oai_internal_server_error()
from .models import openai_list_models

View File

@ -32,8 +32,7 @@ def openai_chat_completions(model_name=None):
else:
handler = OpenAIRequestHandler(incoming_request=request, incoming_json=request_json_body, selected_model=model_name)
if handler.offline:
# return return_invalid_model_err(model_name)
return_oai_internal_server_error()
return return_oai_internal_server_error()
if not request_json_body.get('stream'):
try:

View File

@ -14,7 +14,7 @@ from llm_server.custom_redis import redis
from llm_server.database.database import is_api_key_moderated
from llm_server.database.log_to_db import log_to_db
from llm_server.llm import get_token_count
from llm_server.llm.openai.oai_to_vllm import oai_to_vllm, validate_oai, return_invalid_model_err, return_oai_internal_server_error
from llm_server.llm.openai.oai_to_vllm import oai_to_vllm, validate_oai, return_oai_internal_server_error, return_oai_invalid_request_error
from llm_server.llm.openai.transform import ANTI_CONTINUATION_RE, ANTI_RESPONSE_RE, generate_oai_string, transform_messages_to_prompt, trim_messages_to_fit
from llm_server.logging import create_logger
from llm_server.routes.request_handler import RequestHandler
@ -31,10 +31,7 @@ class OpenAIRequestHandler(RequestHandler):
def handle_request(self) -> Tuple[flask.Response, int]:
assert not self.used
if self.offline:
# msg = return_invalid_model_err(self.selected_model)
# _logger.error(f'OAI is offline: {msg}')
# return self.handle_error(msg)
return_oai_internal_server_error()
return return_oai_internal_server_error()
if GlobalConfig.get().openai_silent_trim:
oai_messages = trim_messages_to_fit(self.request.json['messages'], self.cluster_backend_info['model_config']['max_position_embeddings'], self.backend_url)
@ -111,14 +108,7 @@ class OpenAIRequestHandler(RequestHandler):
def handle_error(self, error_msg: str, error_type: str = 'error') -> Tuple[flask.Response, int]:
_logger.error(f'OAI Error: {error_msg}')
return jsonify({
"error": {
"message": "Invalid request, check your parameters and try again.",
"type": "invalid_request_error",
"param": None,
"code": None
}
}), 400
return return_oai_invalid_request_error()
def build_openai_response(self, prompt, response, model=None):
# Seperate the user's prompt from the context