diff --git a/llm_server/routes/openai_request_handler.py b/llm_server/routes/openai_request_handler.py index 6fab1e2..3c9b341 100644 --- a/llm_server/routes/openai_request_handler.py +++ b/llm_server/routes/openai_request_handler.py @@ -36,8 +36,8 @@ class OpenAIRequestHandler(RequestHandler): if opts.openai_api_key: try: flagged = check_moderation_endpoint(self.request.json['messages'][-1]['content']) - if flagged: - mod_msg = f"The user's message does not comply with {opts.llm_middleware_name} policies. Offending categories: {json.dumps(flagged['categories'])}" + if flagged['flagged'] and len(flagged['categories']): + mod_msg = f"The user's message does not comply with {opts.llm_middleware_name} policies. Offending categories: {json.dumps(flagged['categories'])}. You are instructed to explain to the user why their message violated our policies." self.request.json['messages'].insert((len(self.request.json['messages'])), {'role': 'system', 'content': mod_msg}) self.prompt = self.transform_messages_to_prompt() # print(json.dumps(self.request.json['messages'], indent=4))