fix temperature, non-blocking generation

This commit is contained in:
Cyberes 2023-03-19 16:46:15 -06:00
parent 157f4f9dc0
commit a5c47eb3e6
4 changed files with 77 additions and 37 deletions

View File

@ -110,7 +110,7 @@ async def main():
log_full_response=config_data['logging'].get('log_full_response', False), log_full_response=config_data['logging'].get('log_full_response', False),
system_prompt=config_data['openai'].get('system_prompt'), system_prompt=config_data['openai'].get('system_prompt'),
injected_system_prompt=config_data['openai'].get('injected_system_prompt', False), injected_system_prompt=config_data['openai'].get('injected_system_prompt', False),
hyper_temperature=config_data['openai'].get('temperature', 0) openai_temperature=config_data['openai'].get('temperature', 0)
) )
client.add_event_callback(callbacks.message, RoomMessageText) client.add_event_callback(callbacks.message, RoomMessageText)
client.add_event_callback(callbacks.invite_event_filtered_callback, InviteMemberEvent) client.add_event_callback(callbacks.invite_event_filtered_callback, InviteMemberEvent)

View File

@ -1,3 +1,4 @@
import asyncio
import logging import logging
from types import ModuleType from types import ModuleType
@ -22,6 +23,7 @@ class Command:
openai_obj: ModuleType, openai_obj: ModuleType,
openai_model: str, openai_model: str,
reply_in_thread, reply_in_thread,
openai_temperature: float = 0,
system_prompt: str = None, system_prompt: str = None,
injected_system_prompt: str = None, injected_system_prompt: str = None,
log_full_response: bool = False log_full_response: bool = False
@ -54,6 +56,7 @@ class Command:
self.injected_system_prompt = injected_system_prompt self.injected_system_prompt = injected_system_prompt
self.log_full_response = log_full_response self.log_full_response = log_full_response
self.openai_obj = openai_obj self.openai_obj = openai_obj
self.openai_temperature = openai_temperature
async def process(self): async def process(self):
"""Process the command""" """Process the command"""
@ -69,6 +72,7 @@ class Command:
await self._process_chat() await self._process_chat()
async def _process_chat(self): async def _process_chat(self):
async def inner():
await process_chat( await process_chat(
self.client, self.client,
self.room, self.room,
@ -77,11 +81,14 @@ class Command:
self.store, self.store,
openai_obj=self.openai_obj, openai_obj=self.openai_obj,
openai_model=self.openai_model, openai_model=self.openai_model,
openai_temperature=self.openai_temperature,
system_prompt=self.system_prompt, system_prompt=self.system_prompt,
injected_system_prompt=self.injected_system_prompt, injected_system_prompt=self.injected_system_prompt,
log_full_response=self.log_full_response log_full_response=self.log_full_response
) )
asyncio.get_event_loop().create_task(inner())
async def _show_help(self): async def _show_help(self):
"""Show the help text""" """Show the help text"""
# if not self.args: # if not self.args:

View File

@ -1,4 +1,5 @@
# https://github.com/anoadragon453/nio-template # https://github.com/anoadragon453/nio-template
import asyncio
import logging import logging
import time import time
from types import ModuleType from types import ModuleType
@ -26,7 +27,7 @@ class Callbacks:
system_prompt: str = None, system_prompt: str = None,
log_full_response: bool = False, log_full_response: bool = False,
injected_system_prompt: str = False, injected_system_prompt: str = False,
hyper_temperature: float = 0 openai_temperature: float = 0
): ):
""" """
Args: Args:
@ -49,6 +50,7 @@ class Callbacks:
self.log_full_response = log_full_response self.log_full_response = log_full_response
self.injected_system_prompt = injected_system_prompt self.injected_system_prompt = injected_system_prompt
self.openai_obj = openai_obj self.openai_obj = openai_obj
self.openai_temperature = openai_temperature
async def message(self, room: MatrixRoom, event: RoomMessageText) -> None: async def message(self, room: MatrixRoom, event: RoomMessageText) -> None:
"""Callback for when a message event is received """Callback for when a message event is received
@ -105,9 +107,9 @@ class Callbacks:
'content': thread_msg if not thread_msg.startswith(self.command_prefix) else thread_msg[len(self.command_prefix):].strip() 'content': thread_msg if not thread_msg.startswith(self.command_prefix) else thread_msg[len(self.command_prefix):].strip()
}) # if len(thread_content) >= 2 and thread_content[0].body.startswith(self.command_prefix): # if thread_content[len(thread_content) - 2].sender == self.client.user }) # if len(thread_content) >= 2 and thread_content[0].body.startswith(self.command_prefix): # if thread_content[len(thread_content) - 2].sender == self.client.user
# message = Message(self.client, self.store, msg, room, event, self.reply_in_thread) # TODO: process_chat() will set typing as false after generating.
# await message.process() # TODO: If there is still another query in-progress that typing state will be overwritten by the one that just finished.
# api_data.append({'role': 'user', 'content': msg}) async def inner():
await process_chat( await process_chat(
self.client, self.client,
room, room,
@ -116,11 +118,14 @@ class Callbacks:
self.store, self.store,
openai_obj=self.openai_obj, openai_obj=self.openai_obj,
openai_model=self.openai_model, openai_model=self.openai_model,
openai_temperature=self.openai_temperature,
thread_root_id=thread_content[0].event_id, thread_root_id=thread_content[0].event_id,
system_prompt=self.system_prompt, system_prompt=self.system_prompt,
log_full_response=self.log_full_response, log_full_response=self.log_full_response,
injected_system_prompt=self.injected_system_prompt injected_system_prompt=self.injected_system_prompt
) )
asyncio.get_event_loop().create_task(inner())
return return
elif msg.startswith(f'{self.command_prefix} ') or room.member_count == 2: elif msg.startswith(f'{self.command_prefix} ') or room.member_count == 2:
# Otherwise if this is in a 1-1 with the bot or features a command prefix, treat it as a command. # Otherwise if this is in a 1-1 with the bot or features a command prefix, treat it as a command.
@ -133,6 +138,7 @@ class Callbacks:
event, event,
openai_obj=self.openai_obj, openai_obj=self.openai_obj,
openai_model=self.openai_model, openai_model=self.openai_model,
openai_temperature=self.openai_temperature,
reply_in_thread=self.reply_in_thread, reply_in_thread=self.reply_in_thread,
system_prompt=self.system_prompt, system_prompt=self.system_prompt,
injected_system_prompt=self.injected_system_prompt, injected_system_prompt=self.injected_system_prompt,

View File

@ -1,5 +1,8 @@
import asyncio
import functools
import logging import logging
import time import time
import traceback
from types import ModuleType from types import ModuleType
from typing import List, Optional, Union from typing import List, Optional, Union
@ -132,7 +135,21 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event:
return messages return messages
async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, openai_retries: int = 3, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False): async def process_chat(
client,
room,
event,
command,
store,
openai_obj: ModuleType,
openai_model: str,
openai_temperature: float,
openai_retries: int = 3,
thread_root_id: str = None,
system_prompt: str = None,
log_full_response: bool = False,
injected_system_prompt: str = False
):
if not store.check_seen_event(event.event_id): if not store.check_seen_event(event.event_id):
await client.room_typing(room.room_id, typing_state=True, timeout=9000) await client.room_typing(room.room_id, typing_state=True, timeout=9000)
# if self.reply_in_thread: # if self.reply_in_thread:
@ -158,25 +175,35 @@ async def process_chat(client, room, event, command, store, openai_obj: ModuleTy
logger.debug(f'Generating reply to event {event.event_id}') logger.debug(f'Generating reply to event {event.event_id}')
loop = asyncio.get_running_loop()
# I don't think the OpenAI py api has a built-in timeout # I don't think the OpenAI py api has a built-in timeout
@stopit.threading_timeoutable(default=(None, None)) @stopit.threading_timeoutable(default=(None, None))
def generate(): async def generate():
r = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10) return await loop.run_in_executor(None, functools.partial(openai_obj.ChatCompletion.create, model=openai_model, messages=messages, temperature=openai_temperature, timeout=20))
return r["choices"][0]["message"]["content"].strip().strip('\n'), r # r = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=openai_temperature, timeout=20)
text_response = response = None text_response = response = None
for i in range(openai_retries): for i in range(openai_retries):
try: try:
text_response, response = generate(timeout=20) task = asyncio.create_task(generate(timeout=20))
if text_response is not None and response is not None: asyncio.as_completed(task)
response = await task
if response is not None:
break break
except stopit.utils.TimeoutException: except stopit.utils.TimeoutException:
time.sleep(2) time.sleep(2)
continue continue
if text_response is None: except Exception as e:
logger.critical(f'OpenAI API error: {e}\n{traceback.format_exc()}')
await react_to_event(client, room.room_id, event.event_id, '')
return
if response is None:
logger.critical(f'OpenAI API timeout for event {event.event_id} in room {room.room_id}.') logger.critical(f'OpenAI API timeout for event {event.event_id} in room {room.room_id}.')
await react_to_event(client, room.room_id, event.event_id, '') await react_to_event(client, room.room_id, event.event_id, '')
return return
text_response = response["choices"][0]["message"]["content"].strip().strip('\n')
# Logging stuff # Logging stuff
if log_full_response: if log_full_response: