From 4634e36eeb1c9f06e98454636d8a9d4e61d359c1 Mon Sep 17 00:00:00 2001 From: Cyberes Date: Wed, 4 Oct 2023 10:26:39 -0600 Subject: [PATCH] text --- llm_server/routes/openai_request_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm_server/routes/openai_request_handler.py b/llm_server/routes/openai_request_handler.py index 468941f..975bc59 100644 --- a/llm_server/routes/openai_request_handler.py +++ b/llm_server/routes/openai_request_handler.py @@ -57,7 +57,7 @@ class OpenAIRequestHandler(RequestHandler): self.prompt = transform_messages_to_prompt(self.request.json['messages']) except Exception as e: print(f'OpenAI moderation endpoint failed:', f'{e.__class__.__name__}: {e}') - print(traceback.format_exc()) + traceback.print_exc() # TODO: support Ooba print('converting to vllm') @@ -73,7 +73,7 @@ class OpenAIRequestHandler(RequestHandler): print('sent success response') return self.build_openai_response(self.prompt, backend_response.json['results'][0]['text'], model=model), backend_response_status_code else: - print(backend_response) + print(backend_response_status_code, backend_response.data) return backend_response, backend_response_status_code def handle_ratelimited(self, do_log: bool = True):