log number of retries left, add error message to msg
This commit is contained in:
parent
c4ffd82698
commit
d53517e3fb
|
@ -7,12 +7,16 @@ from typing import List, Optional, Union
|
|||
|
||||
import stopit
|
||||
from markdown import markdown
|
||||
from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Response, RoomGetEventResponse, RoomMessageText, RoomSendResponse, SendRetryError, )
|
||||
from nio import AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Response, RoomGetEventResponse, \
|
||||
RoomMessageText, RoomSendResponse, SendRetryError
|
||||
|
||||
logger = logging.getLogger('MatrixGPT')
|
||||
|
||||
|
||||
async def send_text_to_room(client: AsyncClient, room_id: str, message: str, notice: bool = False, markdown_convert: bool = True, reply_to_event_id: Optional[str] = None, thread: bool = False, thread_root_id: str = None) -> Union[RoomSendResponse, ErrorResponse]:
|
||||
async def send_text_to_room(client: AsyncClient, room_id: str, message: str, notice: bool = False,
|
||||
markdown_convert: bool = True, reply_to_event_id: Optional[str] = None,
|
||||
thread: bool = False, thread_root_id: str = None, extra_error: str = False,
|
||||
extra_msg: str = False) -> Union[RoomSendResponse, ErrorResponse]:
|
||||
"""Send text to a matrix room.
|
||||
|
||||
Args:
|
||||
|
@ -42,10 +46,24 @@ async def send_text_to_room(client: AsyncClient, room_id: str, message: str, not
|
|||
|
||||
if reply_to_event_id:
|
||||
if thread:
|
||||
content["m.relates_to"] = {'event_id': thread_root_id, 'is_falling_back': True, "m.in_reply_to": {"event_id": reply_to_event_id}, 'rel_type': "m.thread"}
|
||||
content["m.relates_to"] = {
|
||||
'event_id': thread_root_id,
|
||||
'is_falling_back': True,
|
||||
"m.in_reply_to": {
|
||||
"event_id": reply_to_event_id
|
||||
},
|
||||
'rel_type': "m.thread"
|
||||
}
|
||||
else:
|
||||
content["m.relates_to"] = {"m.in_reply_to": {"event_id": reply_to_event_id}}
|
||||
|
||||
content["m.relates_to"] = {
|
||||
"m.in_reply_to": {
|
||||
"event_id": reply_to_event_id
|
||||
}
|
||||
}
|
||||
content["m.matrixgpt"] = {
|
||||
"error": str(extra_error),
|
||||
"msg": str(extra_msg),
|
||||
}
|
||||
try:
|
||||
return await client.room_send(room_id, "m.room.message", content, ignore_unverified_devices=True)
|
||||
except SendRetryError:
|
||||
|
@ -70,7 +88,9 @@ def make_pill(user_id: str, displayname: str = None) -> str:
|
|||
return f'<a href="https://matrix.to/#/{user_id}">{displayname}</a>'
|
||||
|
||||
|
||||
async def react_to_event(client: AsyncClient, room_id: str, event_id: str, reaction_text: str, ) -> Union[Response, ErrorResponse]:
|
||||
async def react_to_event(client: AsyncClient, room_id: str, event_id: str, reaction_text: str, extra_error: str = False,
|
||||
extra_msg: str = False) -> Union[
|
||||
Response, ErrorResponse]:
|
||||
"""Reacts to a given event in a room with the given reaction text
|
||||
|
||||
Args:
|
||||
|
@ -88,7 +108,17 @@ async def react_to_event(client: AsyncClient, room_id: str, event_id: str, react
|
|||
Raises:
|
||||
SendRetryError: If the reaction was unable to be sent.
|
||||
"""
|
||||
content = {"m.relates_to": {"rel_type": "m.annotation", "event_id": event_id, "key": reaction_text}}
|
||||
content = {
|
||||
"m.relates_to": {
|
||||
"rel_type": "m.annotation",
|
||||
"event_id": event_id,
|
||||
"key": reaction_text
|
||||
},
|
||||
"m.matrixgpt": {
|
||||
"error": str(extra_error),
|
||||
"msg": str(extra_msg),
|
||||
}
|
||||
}
|
||||
return await client.room_send(room_id, "m.reaction", content, ignore_unverified_devices=True, )
|
||||
|
||||
|
||||
|
@ -119,7 +149,8 @@ def check_command_prefix(string: str, prefixes: dict):
|
|||
return False, None, None
|
||||
|
||||
|
||||
async def is_this_our_thread(client: AsyncClient, room: MatrixRoom, event: RoomMessageText, command_prefixes: dict) -> tuple[bool, any, any]:
|
||||
async def is_this_our_thread(client: AsyncClient, room: MatrixRoom, event: RoomMessageText, command_prefixes: dict) -> \
|
||||
tuple[bool, any, any]:
|
||||
base_event_id = event.source['content'].get('m.relates_to', {}).get('event_id')
|
||||
if base_event_id:
|
||||
e = await client.room_get_event(room.room_id, base_event_id)
|
||||
|
@ -140,8 +171,11 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event:
|
|||
messages.append(new_event)
|
||||
else:
|
||||
break
|
||||
new_event = (await client.room_get_event(room.room_id, new_event.source['content']['m.relates_to']['m.in_reply_to']['event_id'])).event
|
||||
messages.append((await client.room_get_event(room.room_id, base_event.source['content']['m.relates_to']['event_id'])).event) # put the root event in the array
|
||||
new_event = (await client.room_get_event(room.room_id,
|
||||
new_event.source['content']['m.relates_to']['m.in_reply_to'][
|
||||
'event_id'])).event
|
||||
messages.append((await client.room_get_event(room.room_id, base_event.source['content']['m.relates_to'][
|
||||
'event_id'])).event) # put the root event in the array
|
||||
messages.reverse()
|
||||
return messages
|
||||
|
||||
|
@ -193,16 +227,24 @@ async def process_chat(
|
|||
@stopit.threading_timeoutable(default=(None, None))
|
||||
async def generate():
|
||||
if openai_model.startswith('gpt-3') or openai_model.startswith('gpt-4'):
|
||||
r = await loop.run_in_executor(None, functools.partial(openai_obj.ChatCompletion.create, model=openai_model, messages=messages, temperature=openai_temperature, timeout=20))
|
||||
r = await loop.run_in_executor(None, functools.partial(openai_obj.ChatCompletion.create,
|
||||
model=openai_model, messages=messages,
|
||||
temperature=openai_temperature, timeout=20))
|
||||
return r.choices[0].message.content
|
||||
elif openai_model in ['text-davinci-003', 'davinci-instruct-beta', 'text-davinci-001', 'text-davinci-002', 'text-curie-001', 'text-babbage-001']:
|
||||
r = await loop.run_in_executor(None, functools.partial(openai_obj.Completion.create, model=openai_model, temperature=openai_temperature, timeout=20, max_tokens=4096))
|
||||
elif openai_model in ['text-davinci-003', 'davinci-instruct-beta', 'text-davinci-001',
|
||||
'text-davinci-002', 'text-curie-001', 'text-babbage-001']:
|
||||
r = await loop.run_in_executor(None,
|
||||
functools.partial(openai_obj.Completion.create, model=openai_model,
|
||||
temperature=openai_temperature, timeout=20,
|
||||
max_tokens=4096))
|
||||
return r.choices[0].text
|
||||
else:
|
||||
raise Exception(f'Model {openai_model} not found!')
|
||||
|
||||
response = None
|
||||
openai_gen_error = None
|
||||
for i in range(openai_retries):
|
||||
sleep_time = i * 5
|
||||
try:
|
||||
task = asyncio.create_task(generate(timeout=20))
|
||||
asyncio.as_completed(task)
|
||||
|
@ -210,24 +252,29 @@ async def process_chat(
|
|||
if response is not None:
|
||||
break
|
||||
else:
|
||||
logger.warning(f'Response to event {event.event_id} was null, retrying.')
|
||||
time.sleep(2)
|
||||
openai_gen_error = 'response was null'
|
||||
logger.warning(
|
||||
f'Response to event {event.event_id} was null, retrying {i}/{openai_retries} after {sleep_time}s.')
|
||||
# time.sleep(2)
|
||||
except Exception as e: # (stopit.utils.TimeoutException, openai.error.APIConnectionError)
|
||||
logger.warning(f'Got exception when generating response to event {event.event_id}, retrying: {e}')
|
||||
openai_gen_error = e
|
||||
logger.warning(
|
||||
f'Got exception when generating response to event {event.event_id}, retrying {i}/{openai_retries} after {sleep_time}s. Error: {e}')
|
||||
await client.room_typing(room.room_id, typing_state=True, timeout=15000)
|
||||
time.sleep(2)
|
||||
time.sleep(sleep_time)
|
||||
continue
|
||||
|
||||
if response is None:
|
||||
logger.critical(f'Response to event {event.event_id} in room {room.room_id} was null.')
|
||||
await client.room_typing(room.room_id, typing_state=False, timeout=15000)
|
||||
await react_to_event(client, room.room_id, event.event_id, '❌')
|
||||
await react_to_event(client, room.room_id, event.event_id, '❌', extra_error=openai_gen_error)
|
||||
return
|
||||
text_response = response.strip().strip('\n')
|
||||
|
||||
# Logging stuff
|
||||
if log_full_response:
|
||||
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': response})
|
||||
logger.debug(
|
||||
{'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': response})
|
||||
z = text_response.replace("\n", "\\n")
|
||||
if isinstance(command, str):
|
||||
x = command.replace("\n", "\\n")
|
||||
|
@ -237,7 +284,9 @@ async def process_chat(
|
|||
x = command
|
||||
logger.info(f'Reply to {event.event_id} --> "{x}" and bot ({openai_model}) responded with "{z}"')
|
||||
|
||||
resp = await send_text_to_room(client, room.room_id, text_response, reply_to_event_id=event.event_id, thread=True, thread_root_id=thread_root_id if thread_root_id else event.event_id)
|
||||
resp = await send_text_to_room(client, room.room_id, text_response, reply_to_event_id=event.event_id,
|
||||
thread=True,
|
||||
thread_root_id=thread_root_id if thread_root_id else event.event_id)
|
||||
await client.room_typing(room.room_id, typing_state=False, timeout=3000)
|
||||
|
||||
store.add_event_id(event.event_id)
|
||||
|
@ -250,6 +299,7 @@ async def process_chat(
|
|||
await react_to_event(client, room.room_id, event.event_id, '❌')
|
||||
raise
|
||||
|
||||
|
||||
def check_authorized(string, to_check):
|
||||
def check_str(s, c):
|
||||
if c != 'all':
|
||||
|
|
Loading…
Reference in New Issue