diff --git a/llm_server/routes/openai/chat_completions.py b/llm_server/routes/openai/chat_completions.py index afa6fd1..99b7488 100644 --- a/llm_server/routes/openai/chat_completions.py +++ b/llm_server/routes/openai/chat_completions.py @@ -65,7 +65,6 @@ def openai_chat_completions(model_name=None): return 'Invalid prompt', 400 event_id = None - response_status_code = 0 start_time = time.time() request_valid, invalid_response = handler.validate_request() @@ -91,7 +90,7 @@ def openai_chat_completions(model_name=None): None, handler.parameters, request.headers, - response_status_code, + 429, request.url, handler.backend_url, ) @@ -159,7 +158,7 @@ def openai_chat_completions(model_name=None): elapsed_time, handler.parameters, r_headers, - response_status_code, + 200, r_url, handler.backend_url, ) diff --git a/llm_server/routes/openai/completions.py b/llm_server/routes/openai/completions.py index 3dcde2e..8851fcc 100644 --- a/llm_server/routes/openai/completions.py +++ b/llm_server/routes/openai/completions.py @@ -90,7 +90,6 @@ def openai_completions(model_name=None): return 'DISABLED', 401 event_id = None - response_status_code = 0 start_time = time.time() request_valid, invalid_response = handler.validate_request() @@ -117,7 +116,7 @@ def openai_completions(model_name=None): None, handler.parameters, request.headers, - response_status_code, + 429, request.url, handler.backend_url, ) @@ -185,7 +184,7 @@ def openai_completions(model_name=None): elapsed_time, handler.parameters, r_headers, - response_status_code, + 200, r_url, handler.backend_url, )