handle timeouts
This commit is contained in:
parent
1027cf189e
commit
e3c0f15744
|
@ -45,6 +45,10 @@ openai:
|
||||||
|
|
||||||
model: gpt-3.5-turbo
|
model: gpt-3.5-turbo
|
||||||
|
|
||||||
|
# If the API doesn't generate a response within 5 seconds, the request is re-sent.
|
||||||
|
# This controls how many times a retry is preformed.
|
||||||
|
api_retries: 3
|
||||||
|
|
||||||
# Leave at 0 for fully deterministic output.
|
# Leave at 0 for fully deterministic output.
|
||||||
# Range is 0 to 2
|
# Range is 0 to 2
|
||||||
# https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
|
# https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
|
||||||
|
|
|
@ -61,7 +61,7 @@ class Callbacks:
|
||||||
# Extract the message text
|
# Extract the message text
|
||||||
msg = event.body.strip().strip('\n')
|
msg = event.body.strip().strip('\n')
|
||||||
|
|
||||||
logger.debug(f"Bot message received for room {room.display_name} | {room.user_name(event.sender)}: {msg}")
|
logger.debug(f"Bot message received from {event.sender} in {room} --> {msg}")
|
||||||
|
|
||||||
await self.client.room_read_markers(room.room_id, event.event_id, event.event_id)
|
await self.client.room_read_markers(room.room_id, event.event_id, event.event_id)
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ import logging
|
||||||
from types import ModuleType
|
from types import ModuleType
|
||||||
from typing import List, Optional, Union
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
|
import stopit
|
||||||
from markdown import markdown
|
from markdown import markdown
|
||||||
from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Response, RoomMessageText, RoomSendResponse, SendRetryError, )
|
from nio import (AsyncClient, ErrorResponse, Event, MatrixRoom, MegolmEvent, Response, RoomMessageText, RoomSendResponse, SendRetryError, )
|
||||||
|
|
||||||
|
@ -130,7 +131,7 @@ async def get_thread_content(client: AsyncClient, room: MatrixRoom, base_event:
|
||||||
return messages
|
return messages
|
||||||
|
|
||||||
|
|
||||||
async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False):
|
async def process_chat(client, room, event, command, store, openai_obj: ModuleType, openai_model: str, openai_retries: int = 3, thread_root_id: str = None, system_prompt: str = None, log_full_response: bool = False, injected_system_prompt: str = False):
|
||||||
if not store.check_seen_event(event.event_id):
|
if not store.check_seen_event(event.event_id):
|
||||||
await client.room_typing(room.room_id, typing_state=True, timeout=3000)
|
await client.room_typing(room.room_id, typing_state=True, timeout=3000)
|
||||||
# if self.reply_in_thread:
|
# if self.reply_in_thread:
|
||||||
|
@ -155,12 +156,25 @@ async def process_chat(client, room, event, command, store, openai_obj: ModuleTy
|
||||||
messages.insert(index, {"role": "system", "content": injected_system_prompt})
|
messages.insert(index, {"role": "system", "content": injected_system_prompt})
|
||||||
|
|
||||||
logger.debug(f'Generating reply to event {event.event_id}')
|
logger.debug(f'Generating reply to event {event.event_id}')
|
||||||
|
|
||||||
|
@stopit.threading_timeoutable(default=(None, None))
|
||||||
|
def generate():
|
||||||
response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10)
|
response = openai_obj.ChatCompletion.create(model=openai_model, messages=messages, temperature=0, timeout=10)
|
||||||
text_response = response["choices"][0]["message"]["content"].strip().strip('\n')
|
return response["choices"][0]["message"]["content"].strip().strip('\n'), response
|
||||||
|
|
||||||
|
text_response = response = None
|
||||||
|
for i in range(openai_retries):
|
||||||
|
text_response, response = generate(timeout=5)
|
||||||
|
if text_response is not None:
|
||||||
|
break
|
||||||
|
if text_response is None:
|
||||||
|
logger.critical(f'OpenAI API timeout for event {event.event_id} in room {room.room_id}.')
|
||||||
|
await react_to_event(client, room.room_id, event.event_id, '❌')
|
||||||
|
return
|
||||||
|
|
||||||
# Logging stuff
|
# Logging stuff
|
||||||
if log_full_response:
|
if log_full_response:
|
||||||
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': text_response})
|
logger.debug({'event_id': event.event_id, 'room': room.room_id, 'messages': messages, 'response': response})
|
||||||
z = text_response.replace("\n", "\\n")
|
z = text_response.replace("\n", "\\n")
|
||||||
if isinstance(command, str):
|
if isinstance(command, str):
|
||||||
x = command.replace("\n", "\\n")
|
x = command.replace("\n", "\\n")
|
||||||
|
|
|
@ -3,3 +3,4 @@ pyyaml
|
||||||
markdown
|
markdown
|
||||||
python-olm
|
python-olm
|
||||||
openai
|
openai
|
||||||
|
stopit
|
Loading…
Reference in New Issue