diff --git a/llm_server/routes/openai_request_handler.py b/llm_server/routes/openai_request_handler.py index 468941f..975bc59 100644 --- a/llm_server/routes/openai_request_handler.py +++ b/llm_server/routes/openai_request_handler.py @@ -57,7 +57,7 @@ class OpenAIRequestHandler(RequestHandler): self.prompt = transform_messages_to_prompt(self.request.json['messages']) except Exception as e: print(f'OpenAI moderation endpoint failed:', f'{e.__class__.__name__}: {e}') - print(traceback.format_exc()) + traceback.print_exc() # TODO: support Ooba print('converting to vllm') @@ -73,7 +73,7 @@ class OpenAIRequestHandler(RequestHandler): print('sent success response') return self.build_openai_response(self.prompt, backend_response.json['results'][0]['text'], model=model), backend_response_status_code else: - print(backend_response) + print(backend_response_status_code, backend_response.data) return backend_response, backend_response_status_code def handle_ratelimited(self, do_log: bool = True):