From e08518f866200a3c41bd77713e9f6446144478b0 Mon Sep 17 00:00:00 2001 From: Cyberes Date: Sat, 18 Mar 2023 15:18:22 -0600 Subject: [PATCH] force system prompt --- config.sample.yaml | 3 +++ main.py | 14 ++++++++++++-- matrix_gpt/bot/callbacks.py | 5 +++-- matrix_gpt/bot/chat_functions.py | 11 +++++++++-- 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/config.sample.yaml b/config.sample.yaml index 0bb8137..afeb7d0 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -39,6 +39,9 @@ reply_in_thread: true # For example, you can instruct the assistant with "You are a helpful assistant." #system_prompt: +# Insert the system prompt before the most recent user input. Useful for threaded chats. +force_system_prompt: false + # Log the full response (prompt + response) at debug level. log_full_response: false diff --git a/main.py b/main.py index de07359..c17f217 100755 --- a/main.py +++ b/main.py @@ -54,7 +54,6 @@ check_config_value_exists(config_data, 'openai_api_key') check_config_value_exists(config_data, 'openai_model') check_config_value_exists(config_data, 'data_storage') - # check_config_value_exists(config_data, 'autojoin_rooms') def retry(msg=None): @@ -89,7 +88,18 @@ async def main(): storage = Storage(Path(config_data['data_storage'], 'matrixgpt.db')) # Set up event callbacks - callbacks = Callbacks(client, storage, config_data['command_prefix'], openai_config, config_data.get('reply_in_thread', False), config_data['allowed_to_invite'], config_data['allowed_to_chat'], config_data.get('system_prompt'), log_full_response=config_data.get('log_full_response', False)) + callbacks = Callbacks( + client, + storage, + config_data['command_prefix'], + openai_config, + config_data.get('reply_in_thread', False), + config_data['allowed_to_invite'], + config_data['allowed_to_chat'], + config_data.get('system_prompt'), + log_full_response=config_data.get('log_full_response', False), + force_system_prompt=config_data.get('force_system_prompt', False) + ) client.add_event_callback(callbacks.message, RoomMessageText) client.add_event_callback(callbacks.invite_event_filtered_callback, InviteMemberEvent) client.add_event_callback(callbacks.decryption_failure, MegolmEvent) diff --git a/matrix_gpt/bot/callbacks.py b/matrix_gpt/bot/callbacks.py index b528dcc..230aa41 100644 --- a/matrix_gpt/bot/callbacks.py +++ b/matrix_gpt/bot/callbacks.py @@ -13,7 +13,7 @@ logger = logging.getLogger('MatrixGPT') class Callbacks: - def __init__(self, client: AsyncClient, store: Storage, command_prefix: str, openai, reply_in_thread, allowed_to_invite, allowed_to_chat='all', system_prompt: str = None, log_full_response: bool = False): + def __init__(self, client: AsyncClient, store: Storage, command_prefix: str, openai, reply_in_thread, allowed_to_invite, allowed_to_chat='all', system_prompt: str = None, log_full_response: bool = False, force_system_prompt: bool = False): """ Args: client: nio client used to interact with matrix. @@ -33,6 +33,7 @@ class Callbacks: self.allowed_to_chat = allowed_to_chat self.system_prompt = system_prompt self.log_full_response = log_full_response + self.force_system_prompt = force_system_prompt async def message(self, room: MatrixRoom, event: RoomMessageText) -> None: """Callback for when a message event is received @@ -91,7 +92,7 @@ class Callbacks: # message = Message(self.client, self.store, msg, room, event, self.reply_in_thread) # await message.process() api_data.append({'role': 'user', 'content': event.body}) - await process_chat(self.client, room, event, api_data, self.store, self.openai, thread_root_id=thread_content[0].event_id, system_prompt=self.system_prompt, log_full_response=self.log_full_response) + await process_chat(self.client, room, event, api_data, self.store, self.openai, thread_root_id=thread_content[0].event_id, system_prompt=self.system_prompt, log_full_response=self.log_full_response, force_system_prompt=self.force_system_prompt) return elif msg.startswith(f'{self.command_prefix} ') or room.member_count == 2: # Otherwise if this is in a 1-1 with the bot or features a command prefix, treat it as a command. diff --git a/matrix_gpt/bot/chat_functions.py b/matrix_gpt/bot/chat_functions.py index 16cfb7c..5933e92 100644 --- a/matrix_gpt/bot/chat_functions.py +++ b/matrix_gpt/bot/chat_functions.py @@ -184,7 +184,7 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event: return messages -async def process_chat(client, room, event, command, store, openai, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False): +async def process_chat(client, room, event, command, store, openai, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, force_system_prompt: bool = False): if not store.check_seen_event(event.event_id): await client.room_typing(room.room_id, typing_state=True, timeout=3000) # if self.reply_in_thread: @@ -197,7 +197,14 @@ async def process_chat(client, room, event, command, store, openai, thread_root_ {'role': 'user', 'content': command}, ] if system_prompt: - messages.insert(0, {"role": "system", "content": system_prompt}, ) + messages.insert(0, {"role": "system", "content": system_prompt}) + if force_system_prompt: + if messages[-1]['role'] == 'system': + messages[-1] = {"role": "system", "content": system_prompt} + else: + messages.insert(-1, {"role": "system", "content": system_prompt}) + + logger.info(messages) response = openai['openai'].ChatCompletion.create( model=openai['model'],