handle timeouts

This commit is contained in:
Cyberes 2023-03-19 15:37:55 -06:00
parent 1027cf189e
commit e3c0f15744
4 changed files with 25 additions and 6 deletions

View File

@ -45,6 +45,10 @@ openai:
model: gpt-3.5-turbo
# If the API doesn't generate a response within 5 seconds, the request is re-sent.
# This controls how many times a retry is preformed.
api_retries: 3
# Leave at 0 for fully deterministic output.
# Range is 0 to 2
# https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature

View File

@ -61,7 +61,7 @@ class Callbacks:
# Extract the message text
msg = event.body.strip().strip('\n')
logger.debug(f"Bot message received for room {room.display_name} | {room.user_name(event.sender)}: {msg}")
logger.debug(f"Bot message received from {event.sender} in {room} --> {msg}")
await self.client.room_read_markers(room.room_id, event.event_id, event.event_id)

View File

@ -2,6 +2,7 @@ import logging
from types import ModuleType
from typing import List, Optional, Union
import stopit
from markdown import markdown
from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Response, RoomMessageText, RoomSendResponse, SendRetryError, )
@ -130,7 +131,7 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event:
return messages
async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False):
async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, openai_retries: int = 3, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False):
if not store.check_seen_event(event.event_id):
await client.room_typing(room.room_id, typing_state=True, timeout=3000)
# if self.reply_in_thread:
@ -155,12 +156,25 @@ async def process_chat(client, room, event, command, store, openai_obj: ModuleTy
messages.insert(index, {"role": "system", "content": injected_system_prompt})
logger.debug(f'Generating reply to event {event.event_id}')
response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10)
text_response = response["choices"][0]["message"]["content"].strip().strip('\n')
@stopit.threading_timeoutable(default=(None, None))
def generate():
response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10)
return response["choices"][0]["message"]["content"].strip().strip('\n'), response
text_response = response = None
for i in range(openai_retries):
text_response, response = generate(timeout=5)
if text_response is not None:
break
if text_response is None:
logger.critical(f'OpenAI API timeout for event {event.event_id} in room {room.room_id}.')
await react_to_event(client, room.room_id, event.event_id, '')
return
# Logging stuff
if log_full_response:
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': text_response})
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': response})
z = text_response.replace("\n", "\\n")
if isinstance(command, str):
x = command.replace("\n", "\\n")

View File

@ -2,4 +2,5 @@ matrix-nio[e2e]
pyyaml
markdown
python-olm
openai
openai
stopit