107 lines
4.0 KiB
Python
Executable File
107 lines
4.0 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import json
|
|
import readline
|
|
import signal
|
|
import socket
|
|
import sys
|
|
|
|
from openai import OpenAI
|
|
from termcolor import colored
|
|
|
|
from config import OPENAI_KEY
|
|
from lib.openai.bash import func_run_bash
|
|
from lib.openai.functs import function_description
|
|
from lib.openai.talk import func_talk
|
|
from lib.personality import load_personality
|
|
|
|
|
|
def signal_handler(sig, frame):
|
|
print()
|
|
sys.exit(0)
|
|
|
|
|
|
# Keep pycharm from removing this import.
|
|
readline.get_completion_type()
|
|
|
|
signal.signal(signal.SIGINT, signal_handler)
|
|
|
|
client = OpenAI(api_key=OPENAI_KEY)
|
|
|
|
temp_name = 'Sakura'
|
|
character_card = load_personality('Sakura', 'a shy girl', 'a desktop computer', 'Use Japanese emoticons.')
|
|
|
|
context: list[dict[str, str]] = [character_card]
|
|
|
|
|
|
def main():
|
|
print(colored(f'System Management Intelligence Interface', 'green', attrs=['bold']) + ' ' + colored(temp_name, 'green', attrs=['bold', 'underline']) + colored(' on ', 'green', attrs=['bold']) + colored(socket.gethostname(), 'green', attrs=['bold', 'underline']) + '\n')
|
|
|
|
while True:
|
|
try:
|
|
next_input = str(input('> '))
|
|
except EOFError:
|
|
print('Exit')
|
|
sys.exit(0)
|
|
print('')
|
|
context.append({'role': 'user', 'content': next_input})
|
|
|
|
i = 0
|
|
while True:
|
|
temp_context = context
|
|
if i > 0:
|
|
# Insert a prompt if this is not the first message.
|
|
temp_context.append(
|
|
{
|
|
'role': 'system',
|
|
'content': f'Evaluate your progress on the current task. You have preformed {i} steps for this task so far. Call "talk" to send a message to the user, "end_response" when you are ready for the user to respond, or run another command if necessary.'
|
|
}
|
|
)
|
|
|
|
response = client.chat.completions.create(
|
|
model="gpt-4-1106-preview", # TODO: config
|
|
messages=temp_context,
|
|
functions=function_description,
|
|
temperature=0.7
|
|
)
|
|
function_call = response.choices[0].message.function_call
|
|
if function_call:
|
|
function_name = function_call.name
|
|
function_arguments = function_call.arguments
|
|
|
|
if function_name == 'talk':
|
|
response_text = func_talk(response)
|
|
context.append({'role': 'assistant', 'content': response_text})
|
|
print(colored(response_text, 'blue') + '\n')
|
|
break
|
|
if function_name == 'end_response':
|
|
context.append({'role': 'function', 'name': function_name, 'content': ''})
|
|
break
|
|
elif function_name == 'end_chat':
|
|
# TODO: add a config arg to control whether or not the AI is allowed to do this.
|
|
print(colored('The AI has terminated the connection.', 'red', attrs=['bold']))
|
|
sys.exit(1)
|
|
|
|
print(f'{function_name}("{function_arguments}")' + '\n')
|
|
|
|
if function_name != 'run_bash':
|
|
context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'})
|
|
else:
|
|
command_output = func_run_bash(function_arguments)
|
|
result_to_ai = {
|
|
'function': function_name,
|
|
'input': function_arguments,
|
|
'stdout': command_output[0],
|
|
'stderr': command_output[1],
|
|
'return_code': command_output[2]
|
|
}
|
|
context.append({'role': 'function', 'name': function_name, 'content': json.dumps(result_to_ai)})
|
|
|
|
# Restart the loop to let the agent decide what to do next.
|
|
else:
|
|
context.append({'role': 'system', 'content': f'Must call a function. Use "talk" to communicate with the user.'})
|
|
i += 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|