fix mergedeep breaking bots, add help text, minor cleanup
This commit is contained in:
parent
34dc652213
commit
58cee52545
30
main.py
30
main.py
|
@ -38,7 +38,6 @@ try:
|
|||
except SchemeValidationError as e:
|
||||
logger.critical(f'Config validation error: {e}')
|
||||
sys.exit(1)
|
||||
config_data = global_config.config
|
||||
|
||||
|
||||
def retry(msg=None):
|
||||
|
@ -50,13 +49,13 @@ def retry(msg=None):
|
|||
|
||||
|
||||
async def main():
|
||||
if config_data['logging']['log_level'] == 'info':
|
||||
if global_config['logging']['log_level'] == 'info':
|
||||
log_level = logging.INFO
|
||||
elif config_data['logging']['log_level'] == 'debug':
|
||||
elif global_config['logging']['log_level'] == 'debug':
|
||||
log_level = logging.DEBUG
|
||||
elif config_data['logging']['log_level'] == 'warning':
|
||||
elif global_config['logging']['log_level'] == 'warning':
|
||||
log_level = logging.WARNING
|
||||
elif config_data['logging']['log_level'] == 'critical':
|
||||
elif global_config['logging']['log_level'] == 'critical':
|
||||
log_level = logging.CRITICAL
|
||||
else:
|
||||
log_level = logging.INFO
|
||||
|
@ -77,22 +76,19 @@ async def main():
|
|||
logger.info(f'Log level is {l}')
|
||||
del l
|
||||
|
||||
if len(config_data['command']) == 1 and config_data['command'][0].get('mode') == 'local':
|
||||
logger.info('Running in local mode, OpenAI API key not required.')
|
||||
|
||||
logger.debug(f'Command Prefixes: {[k for k, v in global_config.command_prefixes.items()]}')
|
||||
|
||||
client_helper = MatrixClientHelper(
|
||||
user_id=config_data['auth']['username'],
|
||||
passwd=config_data['auth']['password'],
|
||||
homeserver=config_data['auth']['homeserver'],
|
||||
store_path=config_data['store_path'],
|
||||
device_id=config_data['auth']['device_id']
|
||||
user_id=global_config['auth']['username'],
|
||||
passwd=global_config['auth']['password'],
|
||||
homeserver=global_config['auth']['homeserver'],
|
||||
store_path=global_config['store_path'],
|
||||
device_id=global_config['auth']['device_id']
|
||||
)
|
||||
client = client_helper.client
|
||||
|
||||
if config_data['openai'].get('api_base'):
|
||||
logger.info(f'Set OpenAI API base URL to: {config_data["openai"].get("api_base")}')
|
||||
if global_config['openai'].get('api_base'):
|
||||
logger.info(f'Set OpenAI API base URL to: {global_config["openai"].get("api_base")}')
|
||||
|
||||
# Set up event callbacks
|
||||
callbacks = MatrixBotCallbacks(client=client_helper)
|
||||
|
@ -126,8 +122,8 @@ async def main():
|
|||
|
||||
# Login succeeded!
|
||||
logger.info(f'Logged in as {client.user_id}')
|
||||
if config_data.get('autojoin_rooms'):
|
||||
for room in config_data.get('autojoin_rooms'):
|
||||
if global_config.get('autojoin_rooms'):
|
||||
for room in global_config.get('autojoin_rooms'):
|
||||
r = await client.join(room)
|
||||
if not isinstance(r, JoinResponse):
|
||||
logger.critical(f'Failed to join room {room}: {vars(r)}')
|
||||
|
|
|
@ -4,7 +4,6 @@ from types import NoneType
|
|||
|
||||
import bison
|
||||
from bison.errors import SchemeValidationError
|
||||
from mergedeep import merge, Strategy
|
||||
|
||||
config_scheme = bison.Scheme(
|
||||
bison.Option('store_path', default='bot-store/', field_type=str),
|
||||
|
@ -32,6 +31,7 @@ config_scheme = bison.Scheme(
|
|||
bison.Option('system_prompt', field_type=str, default=None),
|
||||
bison.Option('injected_system_prompt', field_type=str, default=None),
|
||||
bison.Option('api_base', field_type=[str, NoneType], default=None),
|
||||
bison.Option('help', field_type=[str, NoneType], default=None),
|
||||
)),
|
||||
bison.DictOption('openai', scheme=bison.Scheme(
|
||||
bison.Option('api_key', field_type=[str, NoneType], default=None, required=False),
|
||||
|
@ -56,6 +56,7 @@ DEFAULT_LISTS = {
|
|||
'system_prompt': None,
|
||||
'injected_system_prompt': None,
|
||||
'api_base': None,
|
||||
'help': None,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,12 +87,16 @@ class ConfigManager:
|
|||
|
||||
def _merge_in_list_defaults(self):
|
||||
new_config = copy.copy(self._config.config)
|
||||
for k, v in self._config.config.items():
|
||||
for d_k, d_v in DEFAULT_LISTS.items():
|
||||
for d_k, d_v in DEFAULT_LISTS.items():
|
||||
for k, v in self._config.config.items():
|
||||
if k == d_k:
|
||||
assert isinstance(v, list)
|
||||
for i in range(len(v)):
|
||||
new_config[k][i] = merge(d_v, v[i], strategy=Strategy.ADDITIVE)
|
||||
new_list = []
|
||||
for e in v:
|
||||
merged_dict = copy.copy(d_v) # create a copy of the default dict
|
||||
merged_dict.update(e) # update it with the new values
|
||||
new_list.append(merged_dict)
|
||||
new_config[k] = new_list
|
||||
return new_config
|
||||
|
||||
@property
|
||||
|
@ -101,9 +106,9 @@ class ConfigManager:
|
|||
def _generate_command_prefixes(self):
|
||||
assert not self._validated
|
||||
command_prefixes = {}
|
||||
for item in self._config.config['command']:
|
||||
for item in self._parsed_config['command']:
|
||||
command_prefixes[item['trigger']] = item
|
||||
if item.get('max_tokens', 0) < 1:
|
||||
if item['api_type'] == 'anth' and item.get('max_tokens', 0) < 1:
|
||||
raise SchemeValidationError(f'Anthropic requires `max_tokens`. See <https://support.anthropic.com/en/articles/7996856-what-is-the-maximum-prompt-length>')
|
||||
|
||||
return command_prefixes
|
||||
|
|
|
@ -2,7 +2,7 @@ from matrix_gpt.config import global_config
|
|||
|
||||
|
||||
class CommandInfo:
|
||||
def __init__(self, trigger: str, api_type: str, model: str, max_tokens: int, temperature: float, allowed_to_chat: list, allowed_to_thread: list, allowed_to_invite: list, system_prompt: str, injected_system_prompt: str, api_base: str = None):
|
||||
def __init__(self, trigger: str, api_type: str, model: str, max_tokens: int, temperature: float, allowed_to_chat: list, allowed_to_thread: list, allowed_to_invite: list, system_prompt: str, injected_system_prompt: str, api_base: str = None, help: str = None):
|
||||
self.trigger = trigger
|
||||
assert api_type in ['openai', 'anth']
|
||||
self.api_type = api_type
|
||||
|
@ -12,6 +12,7 @@ class CommandInfo:
|
|||
self.system_prompt = system_prompt
|
||||
self.injected_system_prompt = injected_system_prompt
|
||||
self.api_base = api_base
|
||||
self.help = help
|
||||
|
||||
self.allowed_to_chat = allowed_to_chat
|
||||
if not len(self.allowed_to_chat):
|
||||
|
|
|
@ -111,9 +111,13 @@ async def sound_off(room: MatrixRoom, event: RoomMessageText, client_helper: Mat
|
|||
### Commands
|
||||
|
||||
|
||||
`!matrixgpt` - show this help message\n\n"""
|
||||
`!matrixgpt` - show this help message.\n\n"""
|
||||
for command in global_config['command']:
|
||||
text_response = text_response + f"`{command['trigger']}` - Model: {command['model']}. Temperature: {command['temperature']}. Max tokens: {command['max_tokens']}.\n\n"
|
||||
max_tokens = command['max_tokens'] if command['max_tokens'] > 0 else 'max'
|
||||
system_prompt_text = f" System prompt: yes." if command['system_prompt'] else ''
|
||||
injected_system_prompt_text = f" Injected system prompt: yes." if command['injected_system_prompt'] else ''
|
||||
help_text = f" ***{command['help'].strip('.')}.***" if command['help'] else ''
|
||||
text_response = text_response + f"`{command['trigger']}` - Model: {command['model']}. Temperature: {command['temperature']}. Max tokens: {max_tokens}.{system_prompt_text}{injected_system_prompt_text}{help_text}\n\n"
|
||||
return await client_helper.send_text_to_room(
|
||||
room.room_id,
|
||||
text_response,
|
||||
|
|
|
@ -4,5 +4,4 @@ markdown==3.6
|
|||
python-olm==3.2.16
|
||||
openai==1.16.2
|
||||
anthropic==0.23.1
|
||||
mergedeep==1.3.4
|
||||
git+https://git.evulid.cc/cyberes/bison.git
|
||||
|
|
Loading…
Reference in New Issue