fix mergedeep breaking bots, add help text, minor cleanup

This commit is contained in:
Cyberes 2024-04-07 23:42:09 -06:00
parent 34dc652213
commit 58cee52545
5 changed files with 33 additions and 28 deletions

30
main.py
View File

@ -38,7 +38,6 @@ try:
except SchemeValidationError as e: except SchemeValidationError as e:
logger.critical(f'Config validation error: {e}') logger.critical(f'Config validation error: {e}')
sys.exit(1) sys.exit(1)
config_data = global_config.config
def retry(msg=None): def retry(msg=None):
@ -50,13 +49,13 @@ def retry(msg=None):
async def main(): async def main():
if config_data['logging']['log_level'] == 'info': if global_config['logging']['log_level'] == 'info':
log_level = logging.INFO log_level = logging.INFO
elif config_data['logging']['log_level'] == 'debug': elif global_config['logging']['log_level'] == 'debug':
log_level = logging.DEBUG log_level = logging.DEBUG
elif config_data['logging']['log_level'] == 'warning': elif global_config['logging']['log_level'] == 'warning':
log_level = logging.WARNING log_level = logging.WARNING
elif config_data['logging']['log_level'] == 'critical': elif global_config['logging']['log_level'] == 'critical':
log_level = logging.CRITICAL log_level = logging.CRITICAL
else: else:
log_level = logging.INFO log_level = logging.INFO
@ -77,22 +76,19 @@ async def main():
logger.info(f'Log level is {l}') logger.info(f'Log level is {l}')
del l del l
if len(config_data['command']) == 1 and config_data['command'][0].get('mode') == 'local':
logger.info('Running in local mode, OpenAI API key not required.')
logger.debug(f'Command Prefixes: {[k for k, v in global_config.command_prefixes.items()]}') logger.debug(f'Command Prefixes: {[k for k, v in global_config.command_prefixes.items()]}')
client_helper = MatrixClientHelper( client_helper = MatrixClientHelper(
user_id=config_data['auth']['username'], user_id=global_config['auth']['username'],
passwd=config_data['auth']['password'], passwd=global_config['auth']['password'],
homeserver=config_data['auth']['homeserver'], homeserver=global_config['auth']['homeserver'],
store_path=config_data['store_path'], store_path=global_config['store_path'],
device_id=config_data['auth']['device_id'] device_id=global_config['auth']['device_id']
) )
client = client_helper.client client = client_helper.client
if config_data['openai'].get('api_base'): if global_config['openai'].get('api_base'):
logger.info(f'Set OpenAI API base URL to: {config_data["openai"].get("api_base")}') logger.info(f'Set OpenAI API base URL to: {global_config["openai"].get("api_base")}')
# Set up event callbacks # Set up event callbacks
callbacks = MatrixBotCallbacks(client=client_helper) callbacks = MatrixBotCallbacks(client=client_helper)
@ -126,8 +122,8 @@ async def main():
# Login succeeded! # Login succeeded!
logger.info(f'Logged in as {client.user_id}') logger.info(f'Logged in as {client.user_id}')
if config_data.get('autojoin_rooms'): if global_config.get('autojoin_rooms'):
for room in config_data.get('autojoin_rooms'): for room in global_config.get('autojoin_rooms'):
r = await client.join(room) r = await client.join(room)
if not isinstance(r, JoinResponse): if not isinstance(r, JoinResponse):
logger.critical(f'Failed to join room {room}: {vars(r)}') logger.critical(f'Failed to join room {room}: {vars(r)}')

View File

@ -4,7 +4,6 @@ from types import NoneType
import bison import bison
from bison.errors import SchemeValidationError from bison.errors import SchemeValidationError
from mergedeep import merge, Strategy
config_scheme = bison.Scheme( config_scheme = bison.Scheme(
bison.Option('store_path', default='bot-store/', field_type=str), bison.Option('store_path', default='bot-store/', field_type=str),
@ -32,6 +31,7 @@ config_scheme = bison.Scheme(
bison.Option('system_prompt', field_type=str, default=None), bison.Option('system_prompt', field_type=str, default=None),
bison.Option('injected_system_prompt', field_type=str, default=None), bison.Option('injected_system_prompt', field_type=str, default=None),
bison.Option('api_base', field_type=[str, NoneType], default=None), bison.Option('api_base', field_type=[str, NoneType], default=None),
bison.Option('help', field_type=[str, NoneType], default=None),
)), )),
bison.DictOption('openai', scheme=bison.Scheme( bison.DictOption('openai', scheme=bison.Scheme(
bison.Option('api_key', field_type=[str, NoneType], default=None, required=False), bison.Option('api_key', field_type=[str, NoneType], default=None, required=False),
@ -56,6 +56,7 @@ DEFAULT_LISTS = {
'system_prompt': None, 'system_prompt': None,
'injected_system_prompt': None, 'injected_system_prompt': None,
'api_base': None, 'api_base': None,
'help': None,
} }
} }
@ -86,12 +87,16 @@ class ConfigManager:
def _merge_in_list_defaults(self): def _merge_in_list_defaults(self):
new_config = copy.copy(self._config.config) new_config = copy.copy(self._config.config)
for k, v in self._config.config.items(): for d_k, d_v in DEFAULT_LISTS.items():
for d_k, d_v in DEFAULT_LISTS.items(): for k, v in self._config.config.items():
if k == d_k: if k == d_k:
assert isinstance(v, list) assert isinstance(v, list)
for i in range(len(v)): new_list = []
new_config[k][i] = merge(d_v, v[i], strategy=Strategy.ADDITIVE) for e in v:
merged_dict = copy.copy(d_v) # create a copy of the default dict
merged_dict.update(e) # update it with the new values
new_list.append(merged_dict)
new_config[k] = new_list
return new_config return new_config
@property @property
@ -101,9 +106,9 @@ class ConfigManager:
def _generate_command_prefixes(self): def _generate_command_prefixes(self):
assert not self._validated assert not self._validated
command_prefixes = {} command_prefixes = {}
for item in self._config.config['command']: for item in self._parsed_config['command']:
command_prefixes[item['trigger']] = item command_prefixes[item['trigger']] = item
if item.get('max_tokens', 0) < 1: if item['api_type'] == 'anth' and item.get('max_tokens', 0) < 1:
raise SchemeValidationError(f'Anthropic requires `max_tokens`. See <https://support.anthropic.com/en/articles/7996856-what-is-the-maximum-prompt-length>') raise SchemeValidationError(f'Anthropic requires `max_tokens`. See <https://support.anthropic.com/en/articles/7996856-what-is-the-maximum-prompt-length>')
return command_prefixes return command_prefixes

View File

@ -2,7 +2,7 @@ from matrix_gpt.config import global_config
class CommandInfo: class CommandInfo:
def __init__(self, trigger: str, api_type: str, model: str, max_tokens: int, temperature: float, allowed_to_chat: list, allowed_to_thread: list, allowed_to_invite: list, system_prompt: str, injected_system_prompt: str, api_base: str = None): def __init__(self, trigger: str, api_type: str, model: str, max_tokens: int, temperature: float, allowed_to_chat: list, allowed_to_thread: list, allowed_to_invite: list, system_prompt: str, injected_system_prompt: str, api_base: str = None, help: str = None):
self.trigger = trigger self.trigger = trigger
assert api_type in ['openai', 'anth'] assert api_type in ['openai', 'anth']
self.api_type = api_type self.api_type = api_type
@ -12,6 +12,7 @@ class CommandInfo:
self.system_prompt = system_prompt self.system_prompt = system_prompt
self.injected_system_prompt = injected_system_prompt self.injected_system_prompt = injected_system_prompt
self.api_base = api_base self.api_base = api_base
self.help = help
self.allowed_to_chat = allowed_to_chat self.allowed_to_chat = allowed_to_chat
if not len(self.allowed_to_chat): if not len(self.allowed_to_chat):

View File

@ -111,9 +111,13 @@ async def sound_off(room: MatrixRoom, event: RoomMessageText, client_helper: Mat
### Commands ### Commands
`!matrixgpt` - show this help message\n\n""" `!matrixgpt` - show this help message.\n\n"""
for command in global_config['command']: for command in global_config['command']:
text_response = text_response + f"`{command['trigger']}` - Model: {command['model']}. Temperature: {command['temperature']}. Max tokens: {command['max_tokens']}.\n\n" max_tokens = command['max_tokens'] if command['max_tokens'] > 0 else 'max'
system_prompt_text = f" System prompt: yes." if command['system_prompt'] else ''
injected_system_prompt_text = f" Injected system prompt: yes." if command['injected_system_prompt'] else ''
help_text = f" ***{command['help'].strip('.')}.***" if command['help'] else ''
text_response = text_response + f"`{command['trigger']}` - Model: {command['model']}. Temperature: {command['temperature']}. Max tokens: {max_tokens}.{system_prompt_text}{injected_system_prompt_text}{help_text}\n\n"
return await client_helper.send_text_to_room( return await client_helper.send_text_to_room(
room.room_id, room.room_id,
text_response, text_response,

View File

@ -4,5 +4,4 @@ markdown==3.6
python-olm==3.2.16 python-olm==3.2.16
openai==1.16.2 openai==1.16.2
anthropic==0.23.1 anthropic==0.23.1
mergedeep==1.3.4
git+https://git.evulid.cc/cyberes/bison.git git+https://git.evulid.cc/cyberes/bison.git