change config format, fix bugs

This commit is contained in:
Cyberes 2023-03-19 14:46:42 -06:00
parent 7513bef0ba
commit 4a2621cb1f
4 changed files with 145 additions and 65 deletions

31
main.py
View File

@ -41,7 +41,7 @@ else:
print(f'Failed to load config file: {e}')
sys.exit(1)
# Test config
# Lazy way to validate config
check_config_value_exists(config_data, 'bot_auth', dict)
check_config_value_exists(config_data['bot_auth'], 'username')
check_config_value_exists(config_data['bot_auth'], 'password')
@ -50,10 +50,12 @@ check_config_value_exists(config_data['bot_auth'], 'store_path')
check_config_value_exists(config_data, 'allowed_to_chat')
check_config_value_exists(config_data, 'allowed_to_invite', allow_empty=True)
check_config_value_exists(config_data, 'command_prefix')
check_config_value_exists(config_data, 'openai_api_key')
check_config_value_exists(config_data, 'openai_model')
check_config_value_exists(config_data, 'data_storage')
check_config_value_exists(config_data, 'openai')
check_config_value_exists(config_data['openai'], 'api_key')
check_config_value_exists(config_data['openai'], 'model')
# check_config_value_exists(config_data, 'autojoin_rooms')
@ -79,12 +81,7 @@ async def main():
)
client = matrix_helper.client
openai.api_key = config_data['openai_api_key']
openai_config = {
'model': config_data['openai_model'],
'openai': openai
}
openai.api_key = config_data['openai']['api_key']
storage = Storage(Path(config_data['data_storage'], 'matrixgpt.db'))
@ -92,14 +89,16 @@ async def main():
callbacks = Callbacks(
client,
storage,
config_data['command_prefix'],
openai_config,
config_data.get('reply_in_thread', False),
config_data['allowed_to_invite'],
config_data['allowed_to_chat'],
config_data.get('system_prompt'),
openai_obj=openai,
command_prefix=config_data['command_prefix'],
openai_model=config_data['openai']['model'],
reply_in_thread=config_data.get('reply_in_thread', False),
allowed_to_invite=config_data['allowed_to_invite'],
allowed_to_chat=config_data['allowed_to_chat'],
log_full_response=config_data.get('log_full_response', False),
injected_system_prompt=config_data.get('injected_system_prompt', False)
system_prompt=config_data['openai'].get('system_prompt'),
injected_system_prompt=config_data['openai'].get('injected_system_prompt', False),
hyper_temperature=config_data['openai'].get('temperature', 0)
)
client.add_event_callback(callbacks.message, RoomMessageText)
client.add_event_callback(callbacks.invite_event_filtered_callback, InviteMemberEvent)

View File

@ -1,4 +1,5 @@
import logging
from types import ModuleType
from nio import AsyncClient, MatrixRoom, RoomMessageText
@ -18,7 +19,8 @@ class Command:
command: str,
room: MatrixRoom,
event: RoomMessageText,
openai,
openai_obj: ModuleType,
openai_model: str,
reply_in_thread,
system_prompt: str = None,
injected_system_prompt: str = None,
@ -46,11 +48,12 @@ class Command:
self.room = room
self.event = event
self.args = self.command.split()[1:]
self.openai = openai
self.openai_model = openai_model
self.reply_in_thread = reply_in_thread
self.system_prompt = system_prompt
self.injected_system_prompt = injected_system_prompt
self.log_full_response = log_full_response
self.openai_obj = openai_obj
async def process(self):
"""Process the command"""
@ -60,13 +63,24 @@ class Command:
# await self._echo()
# elif self.command.startswith("react"):
# await self._react()
if self.command.startswith("help"):
await self._show_help()
else:
# if self.command.startswith("help"):
# await self._show_help()
# else:
await self._process_chat()
async def _process_chat(self):
await process_chat(self.client, self.room, self.event, self.command, self.store, self.openai, system_prompt=self.system_prompt, injected_system_prompt=self.injected_system_prompt, log_full_response=self.log_full_response)
await process_chat(
self.client,
self.room,
self.event,
self.command,
self.store,
openai_obj=self.openai_obj,
openai_model=self.openai_model,
system_prompt=self.system_prompt,
injected_system_prompt=self.injected_system_prompt,
log_full_response=self.log_full_response
)
async def _show_help(self):
"""Show the help text"""

View File

@ -1,6 +1,7 @@
# https://github.com/anoadragon453/nio-template
import logging
import time
from types import ModuleType
from nio import (AsyncClient, InviteMemberEvent, JoinError, MatrixRoom, MegolmEvent, RoomMessageText, UnknownEvent, )
@ -13,7 +14,20 @@ logger = logging.getLogger('MatrixGPT')
class Callbacks:
def __init__(self, client: AsyncClient, store: Storage, command_prefix: str, openai, reply_in_thread, allowed_to_invite, allowed_to_chat='all', system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: bool = False):
def __init__(self,
client: AsyncClient,
store: Storage,
command_prefix: str,
openai_obj: ModuleType,
openai_model: str,
reply_in_thread: bool,
allowed_to_invite: list,
allowed_to_chat: str = 'all',
system_prompt: str = None,
log_full_response: bool = False,
injected_system_prompt: str = False,
hyper_temperature: float = 0
):
"""
Args:
client: nio client used to interact with matrix.
@ -26,7 +40,7 @@ class Callbacks:
self.store = store
# self.config = config
self.command_prefix = command_prefix
self.openai = openai
self.openai_model = openai_model
self.startup_ts = time.time_ns() // 1_000_000
self.reply_in_thread = reply_in_thread
self.allowed_to_invite = allowed_to_invite if allowed_to_invite else []
@ -34,6 +48,7 @@ class Callbacks:
self.system_prompt = system_prompt
self.log_full_response = log_full_response
self.injected_system_prompt = injected_system_prompt
self.openai_obj = openai_obj
async def message(self, room: MatrixRoom, event: RoomMessageText) -> None:
"""Callback for when a message event is received
@ -46,8 +61,7 @@ class Callbacks:
# Extract the message text
msg = event.body.strip().strip('\n')
logger.debug(f"Bot message received for room {room.display_name} | "
f"{room.user_name(event.sender)}: {msg}")
logger.debug(f"Bot message received for room {room.display_name} | {room.user_name(event.sender)}: {msg}")
await self.client.room_read_markers(room.room_id, event.event_id, event.event_id)
@ -85,18 +99,45 @@ class Callbacks:
return
else:
thread_msg = event.body.strip().strip('\n')
api_data.append({'role': 'assistant' if event.sender == self.client.user_id else 'user', 'content': thread_msg if not thread_msg.startswith(self.command_prefix) else thread_msg[
len(self.command_prefix):].strip()}) # if len(thread_content) >= 2 and thread_content[0].body.startswith(self.command_prefix): # if thread_content[len(thread_content) - 2].sender == self.client.user
api_data.append(
{
'role': 'assistant' if event.sender == self.client.user_id else 'user',
'content': thread_msg if not thread_msg.startswith(self.command_prefix) else thread_msg[len(self.command_prefix):].strip()
}) # if len(thread_content) >= 2 and thread_content[0].body.startswith(self.command_prefix): # if thread_content[len(thread_content) - 2].sender == self.client.user
# message = Message(self.client, self.store, msg, room, event, self.reply_in_thread)
# await message.process()
# api_data.append({'role': 'user', 'content': msg})
await process_chat(self.client, room, event, api_data, self.store, self.openai, thread_root_id=thread_content[0].event_id, system_prompt=self.system_prompt, log_full_response=self.log_full_response, injected_system_prompt=self.injected_system_prompt)
await process_chat(
self.client,
room,
event,
api_data,
self.store,
openai_obj=self.openai_obj,
openai_model=self.openai_model,
thread_root_id=thread_content[0].event_id,
system_prompt=self.system_prompt,
log_full_response=self.log_full_response,
injected_system_prompt=self.injected_system_prompt
)
return
elif msg.startswith(f'{self.command_prefix} ') or room.member_count == 2:
# Otherwise if this is in a 1-1 with the bot or features a command prefix, treat it as a command.
msg = msg if not msg.startswith(self.command_prefix) else msg[len(self.command_prefix):].strip() # Remove the command prefix
command = Command(self.client, self.store, msg, room, event, self.openai, self.reply_in_thread, system_prompt=self.system_prompt, injected_system_prompt=self.injected_system_prompt, log_full_response=self.log_full_response)
command = Command(
self.client,
self.store,
msg,
room,
event,
openai_obj=self.openai_obj,
openai_model=self.openai_model,
reply_in_thread=self.reply_in_thread,
system_prompt=self.system_prompt,
injected_system_prompt=self.injected_system_prompt,
log_full_response=self.log_full_response
)
await command.process()
async def invite(self, room: MatrixRoom, event: InviteMemberEvent) -> None:
@ -104,17 +145,12 @@ class Callbacks:
Args:
room: The room that we are invited to.
event: The invite event.
"""
if not check_authorized(event.sender, self.allowed_to_invite):
logger.info(f"Got invite to {room.room_id} from {event.sender} but rejected.")
return
# if event.sender not in self.allowed_to_invite:
# logger.info(f"Got invite to {room.room_id} from {event.sender} but rejected.")
# return
logger.debug(f"Got invite to {room.room_id} from {event.sender}.")
# Attempt to join 3 times before giving up
@ -123,13 +159,11 @@ class Callbacks:
if type(result) == JoinError:
logger.error(f"Error joining room {room.room_id} (attempt %d): %s", attempt, result.message, )
else:
break
logger.info(f"Joined via invite: {room.room_id}")
return
else:
logger.error("Unable to join room: %s", room.room_id)
# Successfully joined room
logger.info(f"Joined via invite: {room.room_id}")
async def invite_event_filtered_callback(self, room: MatrixRoom, event: InviteMemberEvent) -> None:
"""
Since the InviteMemberEvent is fired for every m.room.member state received
@ -138,7 +172,6 @@ class Callbacks:
This makes sure we only call `callbacks.invite` with our own invite events.
"""
if event.state_key == self.client.user_id:
# This is our own membership (invite) event
await self.invite(room, event)
# async def _reaction(
@ -188,7 +221,6 @@ class Callbacks:
Args:
room: The room that the event that we were unable to decrypt is in.
event: The encrypted event that we were unable to decrypt.
"""
# logger.error(f"Failed to decrypt event '{event.event_id}' in room '{room.room_id}'!"

View File

@ -1,4 +1,5 @@
import logging
from types import ModuleType
from typing import List, Optional, Union
from markdown import markdown
@ -7,44 +8,66 @@ from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Res
logger = logging.getLogger('MatrixGPT')
async def send_text_to_room(client: AsyncClient, room_id: str, message: str, notice: bool = False, markdown_convert: bool = True, reply_to_event_id: Optional[str] = None, thread: bool = False, thread_root_id: str = None) -> Union[RoomSendResponse, ErrorResponse]:
async def send_text_to_room(
client: AsyncClient,
room_id: str,
message: str,
notice: bool = False,
markdown_convert: bool = True,
reply_to_event_id: Optional[str] = None,
thread: bool = False,
thread_root_id: str = None
) -> Union[RoomSendResponse, ErrorResponse]:
"""Send text to a matrix room.
Args:
client: The client to communicate to matrix with.
room_id: The ID of the room to send the message to.
message: The message content.
notice: Whether the message should be sent with an "m.notice" message type
(will not ping users).
markdown_convert: Whether to convert the message content to markdown.
Defaults to true.
reply_to_event_id: Whether this message is a reply to another event. The event
ID this is message is a reply to.
thread:
thread_root_id:
Returns:
A RoomSendResponse if the request was successful, else an ErrorResponse.
"""
# Determine whether to ping room members or not
msgtype = "m.notice" if notice else "m.text"
content = {"msgtype": msgtype, "format": "org.matrix.custom.html", "body": message, }
content = {
"msgtype": msgtype,
"format": "org.matrix.custom.html",
"body": message,
}
if markdown_convert:
content["formatted_body"] = markdown(message)
if reply_to_event_id:
if thread:
content["m.relates_to"] = {'event_id': thread_root_id, 'is_falling_back': True, "m.in_reply_to": {"event_id": reply_to_event_id}, 'rel_type': "m.thread"}
content["m.relates_to"] = {
'event_id': thread_root_id,
'is_falling_back': True,
"m.in_reply_to": {
"event_id": reply_to_event_id
},
'rel_type': "m.thread"
}
else:
content["m.relates_to"] = {"m.in_reply_to": {"event_id": reply_to_event_id}}
content["m.relates_to"] = {
"m.in_reply_to": {
"event_id": reply_to_event_id
}
}
try:
return await client.room_send(room_id, "m.room.message", content, ignore_unverified_devices=True, )
return await client.room_send(room_id, "m.room.message", content, ignore_unverified_devices=True)
except SendRetryError:
logger.exception(f"Unable to send message response to {room_id}")
@ -63,9 +86,7 @@ def make_pill(user_id: str, displayname: str = None) -> str:
The formatted user pill.
"""
if not displayname:
# Use the user ID as the displayname if not provided
displayname = user_id
return f'<a href="https://matrix.to/#/{user_id}">{displayname}</a>'
@ -87,8 +108,13 @@ async def react_to_event(client: AsyncClient, room_id: str, event_id: str, react
Raises:
SendRetryError: If the reaction was unable to be sent.
"""
content = {"m.relates_to": {"rel_type": "m.annotation", "event_id": event_id, "key": reaction_text, }}
content = {
"m.relates_to": {
"rel_type": "m.annotation",
"event_id": event_id,
"key": reaction_text
}
}
return await client.room_send(room_id, "m.reaction", content, ignore_unverified_devices=True, )
@ -104,9 +130,7 @@ async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent) -> None
# f"commands a second time)."
# )
user_msg = ("Unable to decrypt this message. "
"Check whether you've chosen to only encrypt to trusted devices.")
user_msg = "Unable to decrypt this message. Check whether you've chosen to only encrypt to trusted devices."
await send_text_to_room(self.client, room.room_id, user_msg, reply_to_event_id=event.event_id, )
@ -119,7 +143,6 @@ async def is_this_our_thread(client: AsyncClient, room: MatrixRoom, event: RoomM
if base_event_id:
return (await client.room_get_event(room.room_id, base_event_id)).event.body.startswith(f'{command_flag} ')
else:
# Better safe than sorry
return False
@ -137,7 +160,19 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event:
return messages
async def process_chat(client, room, event, command, store, openai, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: bool = False):
async def process_chat(
client,
room,
event,
command,
store,
openai_obj: ModuleType,
openai_model: str,
thread_root_id: str = None,
system_prompt: str = None,
log_full_response: bool = False,
injected_system_prompt: str = False
):
if not store.check_seen_event(event.event_id):
await client.room_typing(room.room_id, typing_state=True, timeout=3000)
# if self.reply_in_thread:
@ -146,7 +181,7 @@ async def process_chat(client, room, event, command, store, openai, thread_root_
if isinstance(command, list):
messages = command
else:
messages = [{'role': 'user', 'content': command}, ]
messages = [{'role': 'user', 'content': command}]
if system_prompt:
messages.insert(0, {"role": "system", "content": system_prompt})
@ -154,17 +189,17 @@ async def process_chat(client, room, event, command, store, openai, thread_root_
if messages[-1]['role'] == 'system':
del messages[-1]
index = -9999
if len(messages) >= 2:
if len(messages) >= 3: # only inject the system prompt if this isn't the first reply
index = -1
elif not system_prompt:
index = 0
print(index)
if index != -9999:
messages.insert(index, {"role": "system", "content": injected_system_prompt})
response = openai['openai'].ChatCompletion.create(model=openai['model'], messages=messages, temperature=0, )
response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0)
text_response = response["choices"][0]["message"]["content"].strip().strip('\n')
# Logging stuff
if log_full_response:
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': text_response})
z = text_response.replace("\n", "\\n")