From 467e1893ea2abfbc8995913297af9077b904e38b Mon Sep 17 00:00:00 2001 From: Cyberes Date: Sun, 8 Oct 2023 19:36:12 -0600 Subject: [PATCH] fix issue with null data on openai --- llm_server/llm/openai/transform.py | 3 ++- llm_server/routes/openai/chat_completions.py | 4 ++++ llm_server/routes/openai_request_handler.py | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/llm_server/llm/openai/transform.py b/llm_server/llm/openai/transform.py index 0c2946b..daec3dc 100644 --- a/llm_server/llm/openai/transform.py +++ b/llm_server/llm/openai/transform.py @@ -87,8 +87,9 @@ def transform_messages_to_prompt(oai_messages): try: prompt = f'### INSTRUCTION: {opts.openai_system_prompt}' for msg in oai_messages: - if not msg.get('content') or not msg.get('role'): + if 'content' not in msg.keys() or 'role' not in msg.keys(): return False + msg['content'] = str(msg['content']) # Prevent any weird issues. if msg['role'] == 'system': prompt += f'### INSTRUCTION: {msg["content"]}\n\n' elif msg['role'] == 'user': diff --git a/llm_server/routes/openai/chat_completions.py b/llm_server/routes/openai/chat_completions.py index bcbd24c..b088a18 100644 --- a/llm_server/routes/openai/chat_completions.py +++ b/llm_server/routes/openai/chat_completions.py @@ -57,6 +57,10 @@ def openai_chat_completions(): else: handler.prompt = transform_messages_to_prompt(handler.request.json['messages']) + if not handler.prompt: + # Prevent issues on the backend. + return 'Invalid prompt', 400 + event_id = None response_status_code = 0 start_time = time.time() diff --git a/llm_server/routes/openai_request_handler.py b/llm_server/routes/openai_request_handler.py index 037de27..9716eb9 100644 --- a/llm_server/routes/openai_request_handler.py +++ b/llm_server/routes/openai_request_handler.py @@ -61,6 +61,10 @@ class OpenAIRequestHandler(RequestHandler): # TODO: support Ooba self.parameters = oai_to_vllm(self.parameters, stop_hashes=('instruct' not in self.request_json_body['model'].lower()), mode=self.cluster_backend_info['mode']) + if not self.prompt: + # TODO: format this as an openai error message + return 'Invalid prompt', 400 + llm_request = {**self.parameters, 'prompt': self.prompt} (success, _, _, _), (backend_response, backend_response_status_code) = self.generate_response(llm_request)