fix system prompts and logging
This commit is contained in:
parent
f7cf5e2da9
commit
84bf541108
|
@ -10,14 +10,13 @@ from .config import global_config
|
|||
from .handle_actions import do_reply_msg, do_reply_threaded_msg, do_join_channel, sound_off
|
||||
from .matrix import MatrixClientHelper
|
||||
|
||||
logger = logging.getLogger('MatrixGPT')
|
||||
|
||||
|
||||
class MatrixBotCallbacks:
|
||||
def __init__(self, client: MatrixClientHelper):
|
||||
self.client_helper = client
|
||||
self.client: AsyncClient = client.client
|
||||
self.logger = logging.getLogger('ExportBot').getChild('MatrixBotCallbacks')
|
||||
self.logger = logging.getLogger('MatrixGPT').getChild('MatrixBotCallbacks')
|
||||
self.startup_ts = time.time() * 1000
|
||||
|
||||
async def handle_message(self, room: MatrixRoom, requestor_event: RoomMessageText) -> None:
|
||||
|
|
|
@ -6,7 +6,7 @@ from nio import AsyncClient, Event, MatrixRoom, RoomGetEventResponse, RoomMessag
|
|||
from matrix_gpt.config import global_config
|
||||
from matrix_gpt.generate_clients.command_info import CommandInfo
|
||||
|
||||
logger = logging.getLogger('ChatFunctions')
|
||||
logger = logging.getLogger('MatrixGPT').getChild('ChatFunctions')
|
||||
|
||||
|
||||
def is_thread(event: RoomMessageText):
|
||||
|
|
|
@ -10,27 +10,12 @@ from matrix_gpt.api_client_manager import api_client_helper
|
|||
from matrix_gpt.config import global_config
|
||||
from matrix_gpt.generate_clients.command_info import CommandInfo
|
||||
|
||||
logger = logging.getLogger('ProcessChat')
|
||||
logger = logging.getLogger('MatrixGPT').getChild('Generate')
|
||||
|
||||
|
||||
# TODO: process_chat() will set typing as false after generating.
|
||||
# TODO: If there is still another query in-progress that typing state will be overwritten by the one that just finished.
|
||||
|
||||
def assemble_messages(messages: list, mode: str):
|
||||
if mode == 'openai':
|
||||
|
||||
system_prompt = global_config['openai'].get('system_prompt', '')
|
||||
injected_system_prompt = global_config['openai'].get('injected_system_prompt', '')
|
||||
elif mode == 'anth':
|
||||
human_role = 'user'
|
||||
bot_role = 'assistant'
|
||||
system_prompt = global_config['anthropic'].get('system_prompt', '')
|
||||
injected_system_prompt = global_config['anthropic'].get('injected_system_prompt', '')
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
async def generate_ai_response(
|
||||
client_helper: MatrixClientHelper,
|
||||
|
@ -46,7 +31,7 @@ async def generate_ai_response(
|
|||
await client.room_typing(room.room_id, typing_state=True, timeout=global_config['response_timeout'] * 1000)
|
||||
|
||||
api_client = api_client_helper.get_client(command_info.api_type)
|
||||
messages = api_client.assemble_context(msg)
|
||||
messages = api_client.assemble_context(msg, system_prompt=command_info.system_prompt, injected_system_prompt=command_info.injected_system_prompt)
|
||||
|
||||
response = None
|
||||
try:
|
||||
|
|
Loading…
Reference in New Issue