From fe4cb78e8a12832cedd124f6bb9099701f0f4a6a Mon Sep 17 00:00:00 2001 From: Cyberes Date: Sun, 19 Mar 2023 15:22:05 -0600 Subject: [PATCH] better logging --- config.sample.yaml | 7 ++-- main.py | 49 ++++++++++++---------------- matrix_gpt/bot/chat_functions.py | 55 ++++---------------------------- 3 files changed, 33 insertions(+), 78 deletions(-) diff --git a/config.sample.yaml b/config.sample.yaml index b571655..b6ccd47 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -32,8 +32,11 @@ command_prefix: '!c' reply_in_thread: true -# Log the full response (prompt + response) at debug level. -log_full_response: false +logging: + log_level: 'info' + + # Log the full response (prompt + response) at debug level. + log_full_response: false logout_other_devices: false diff --git a/main.py b/main.py index ec24b53..ad5ca5f 100755 --- a/main.py +++ b/main.py @@ -23,7 +23,6 @@ script_directory = os.path.abspath(os.path.dirname(__file__)) logging.basicConfig() logger = logging.getLogger('MatrixGPT') -logger.setLevel(logging.INFO) parser = argparse.ArgumentParser(description='MatrixGPT Bot') parser.add_argument('--config', default=Path(script_directory, 'config.yaml'), help='Path to config.yaml if it is not located next to this executable.') @@ -52,6 +51,9 @@ check_config_value_exists(config_data, 'allowed_to_invite', allow_empty=True) check_config_value_exists(config_data, 'command_prefix') check_config_value_exists(config_data, 'data_storage') +check_config_value_exists(config_data, 'logging') +check_config_value_exists(config_data['logging'], 'log_level') + check_config_value_exists(config_data, 'openai') check_config_value_exists(config_data['openai'], 'api_key') check_config_value_exists(config_data['openai'], 'model') @@ -68,17 +70,23 @@ def retry(msg=None): async def main(): + if config_data['logging']['log_level'] == 'info': + log_level = logging.INFO + elif config_data['logging']['log_level'] == 'debug': + log_level = logging.DEBUG + elif config_data['logging']['log_level'] == 'warning': + log_level = logging.WARNING + elif config_data['logging']['log_level'] == 'critical': + log_level = logging.CRITICAL + else: + log_level = logging.INFO + logger.setLevel(log_level) + # Logging in with a new device each time seems to fix encryption errors device_id = config_data['bot_auth'].get('device_id', str(uuid4())) - matrix_helper = MatrixNioGPTHelper( - auth_file=Path(config_data['bot_auth']['store_path'], 'bot_auth.json'), - user_id=config_data['bot_auth']['username'], - passwd=config_data['bot_auth']['password'], - homeserver=config_data['bot_auth']['homeserver'], - store_path=config_data['bot_auth']['store_path'], - device_id=device_id, - ) + matrix_helper = MatrixNioGPTHelper(auth_file=Path(config_data['bot_auth']['store_path'], 'bot_auth.json'), user_id=config_data['bot_auth']['username'], passwd=config_data['bot_auth']['password'], homeserver=config_data['bot_auth']['homeserver'], store_path=config_data['bot_auth']['store_path'], + device_id=device_id, ) client = matrix_helper.client openai.api_key = config_data['openai']['api_key'] @@ -86,20 +94,9 @@ async def main(): storage = Storage(Path(config_data['data_storage'], 'matrixgpt.db')) # Set up event callbacks - callbacks = Callbacks( - client, - storage, - openai_obj=openai, - command_prefix=config_data['command_prefix'], - openai_model=config_data['openai']['model'], - reply_in_thread=config_data.get('reply_in_thread', False), - allowed_to_invite=config_data['allowed_to_invite'], - allowed_to_chat=config_data['allowed_to_chat'], - log_full_response=config_data.get('log_full_response', False), - system_prompt=config_data['openai'].get('system_prompt'), - injected_system_prompt=config_data['openai'].get('injected_system_prompt', False), - hyper_temperature=config_data['openai'].get('temperature', 0) - ) + callbacks = Callbacks(client, storage, openai_obj=openai, command_prefix=config_data['command_prefix'], openai_model=config_data['openai']['model'], reply_in_thread=config_data.get('reply_in_thread', False), allowed_to_invite=config_data['allowed_to_invite'], + allowed_to_chat=config_data['allowed_to_chat'], log_full_response=config_data.get('log_full_response', False), system_prompt=config_data['openai'].get('system_prompt'), injected_system_prompt=config_data['openai'].get('injected_system_prompt', False), + hyper_temperature=config_data['openai'].get('temperature', 0)) client.add_event_callback(callbacks.message, RoomMessageText) client.add_event_callback(callbacks.invite_event_filtered_callback, InviteMemberEvent) client.add_event_callback(callbacks.decryption_failure, MegolmEvent) @@ -140,11 +137,7 @@ async def main(): device_list = [x.id for x in devices] if device_id in device_list: device_list.remove(device_id) - x = await client.delete_devices(device_list, { - "type": "m.login.password", - "user": config_data['bot_auth']['username'], - "password": config_data['bot_auth']['password'] - }) + x = await client.delete_devices(device_list, {"type": "m.login.password", "user": config_data['bot_auth']['username'], "password": config_data['bot_auth']['password']}) logger.info(f'Logged out: {device_list}') await client.sync_forever(timeout=10000, full_state=True) diff --git a/matrix_gpt/bot/chat_functions.py b/matrix_gpt/bot/chat_functions.py index e2f1de6..323be34 100644 --- a/matrix_gpt/bot/chat_functions.py +++ b/matrix_gpt/bot/chat_functions.py @@ -8,16 +8,7 @@ from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Res logger = logging.getLogger('MatrixGPT') -async def send_text_to_room( - client: AsyncClient, - room_id: str, - message: str, - notice: bool = False, - markdown_convert: bool = True, - reply_to_event_id: Optional[str] = None, - thread: bool = False, - thread_root_id: str = None -) -> Union[RoomSendResponse, ErrorResponse]: +async def send_text_to_room(client: AsyncClient, room_id: str, message: str, notice: bool = False, markdown_convert: bool = True, reply_to_event_id: Optional[str] = None, thread: bool = False, thread_root_id: str = None) -> Union[RoomSendResponse, ErrorResponse]: """Send text to a matrix room. Args: @@ -40,31 +31,16 @@ async def send_text_to_room( # Determine whether to ping room members or not msgtype = "m.notice" if notice else "m.text" - content = { - "msgtype": msgtype, - "format": "org.matrix.custom.html", - "body": message, - } + content = {"msgtype": msgtype, "format": "org.matrix.custom.html", "body": message, } if markdown_convert: content["formatted_body"] = markdown(message) if reply_to_event_id: if thread: - content["m.relates_to"] = { - 'event_id': thread_root_id, - 'is_falling_back': True, - "m.in_reply_to": { - "event_id": reply_to_event_id - }, - 'rel_type': "m.thread" - } + content["m.relates_to"] = {'event_id': thread_root_id, 'is_falling_back': True, "m.in_reply_to": {"event_id": reply_to_event_id}, 'rel_type': "m.thread"} else: - content["m.relates_to"] = { - "m.in_reply_to": { - "event_id": reply_to_event_id - } - } + content["m.relates_to"] = {"m.in_reply_to": {"event_id": reply_to_event_id}} try: return await client.room_send(room_id, "m.room.message", content, ignore_unverified_devices=True) @@ -108,13 +84,7 @@ async def react_to_event(client: AsyncClient, room_id: str, event_id: str, react Raises: SendRetryError: If the reaction was unable to be sent. """ - content = { - "m.relates_to": { - "rel_type": "m.annotation", - "event_id": event_id, - "key": reaction_text - } - } + content = {"m.relates_to": {"rel_type": "m.annotation", "event_id": event_id, "key": reaction_text}} return await client.room_send(room_id, "m.reaction", content, ignore_unverified_devices=True, ) @@ -160,19 +130,7 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event: return messages -async def process_chat( - client, - room, - event, - command, - store, - openai_obj: ModuleType, - openai_model: str, - thread_root_id: str = None, - system_prompt: str = None, - log_full_response: bool = False, - injected_system_prompt: str = False -): +async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False): if not store.check_seen_event(event.event_id): await client.room_typing(room.room_id, typing_state=True, timeout=3000) # if self.reply_in_thread: @@ -196,6 +154,7 @@ async def process_chat( if index != -9999: messages.insert(index, {"role": "system", "content": injected_system_prompt}) + logger.debug(f'Generating reply to event {event.event_id}') response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10) text_response = response["choices"][0]["message"]["content"].strip().strip('\n')