adjust personality and agent flow
This commit is contained in:
parent
4e93ed9380
commit
e69747331c
17
README.md
17
README.md
|
@ -1,4 +1,19 @@
|
|||
# server-personification
|
||||
|
||||
_It would be funny if servers could talk._
|
||||
|
||||
This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can converse with and use for server management.
|
||||
This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can
|
||||
converse with and use for server management.
|
||||
|
||||
## To Do
|
||||
|
||||
- [ ] Cache per-hostname conversation history in a database. Store message timestamps as well.
|
||||
- [ ] Have the agent pull its personality from the database as its hostname as the key.
|
||||
- [ ] Log all commands and their outputs to the database.
|
||||
- [ ] Use yaml for config.
|
||||
- [ ] Add the user's name.
|
||||
- [ ] Add a Matrix bot.
|
||||
- [ ] Integrate Icinga2 host and service checks functions.
|
||||
- [ ] Figure out system permissions and how to run as a special user.
|
||||
- [ ] Give the agent instructions on how to run the system (pulled from the database).
|
||||
- [ ] Have the agent run every `n` minutes to check Icinga2 and take action if necessary.
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
import socket
|
||||
|
||||
|
||||
def load_personality(name: str, personality: str, system: str):
|
||||
return {
|
||||
'role': 'system',
|
||||
'content': f"""Your name is {name}, who has the personality of {personality}. Interact with the user via this personality. {name} is an AI running on {system}. The system's hostname is "{socket.gethostname()}", which can be thought of as {name}'s "body". You are able to interact with the system via a Bash interpreter. You communicate with the user via the "talk" function."""
|
||||
}
|
|
@ -13,4 +13,7 @@ def func_talk(response: ChatCompletion):
|
|||
j = json.loads(function_arguments)
|
||||
return j.get('message')
|
||||
except json.decoder.JSONDecodeError:
|
||||
print(response)
|
||||
# Sometimes the AI doesn't do JSON.
|
||||
return function_arguments
|
||||
else:
|
||||
print('THE AI DID NOT CALL A FUNCTION IN TALK:', response)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
import socket
|
||||
|
||||
|
||||
def load_personality(name: str, personality: str, system: str):
|
||||
return {
|
||||
'role': 'system',
|
||||
'content': f"""Your name is {name}, who has the personality of {personality}. Interact with the user via this personality.
|
||||
{name} is an AI running on {system}.
|
||||
The system's hostname is "{socket.gethostname()}", which can be thought of as {name}'s "body".
|
||||
The user is {name}'s owner and system administrator.
|
||||
You communicate with the user via the "talk" function. You MUST use this command to send messages to the user.
|
||||
You are able to interact with the system via a Bash interpreter. When executing Bash commands, do not make any assumptions. Preform multiple steps if necessary."""
|
||||
}
|
60
run.py
60
run.py
|
@ -1,16 +1,18 @@
|
|||
#!/usr/bin/env python3
|
||||
import json
|
||||
import readline
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from openai import OpenAI
|
||||
from termcolor import colored
|
||||
|
||||
from config import OPENAI_KEY
|
||||
from lib.character import load_personality
|
||||
from lib.openai.bash import func_run_bash
|
||||
from lib.openai.functs import function_description
|
||||
from lib.openai.talk import func_talk
|
||||
from lib.personality import load_personality
|
||||
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
|
@ -18,25 +20,41 @@ def signal_handler(sig, frame):
|
|||
sys.exit(0)
|
||||
|
||||
|
||||
# Keep pycharm from removing this import.
|
||||
readline.get_completion_type()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
client = OpenAI(api_key=OPENAI_KEY)
|
||||
|
||||
temp_name = 'Sakura'
|
||||
character_card = load_personality('Sakura', 'a shy girl', 'a desktop computer')
|
||||
|
||||
context: list[dict[str, str]] = [character_card]
|
||||
|
||||
|
||||
def main():
|
||||
print(colored(f'System Management Intelligence Interface', 'green', attrs=['bold']) + ' ' + colored(temp_name, 'green', attrs=['bold', 'underline']) + colored(' on ', 'green', attrs=['bold']) + colored(socket.gethostname(), 'green', attrs=['bold', 'underline']) + '\n')
|
||||
|
||||
while True:
|
||||
next_input = str(input('> '))
|
||||
print('')
|
||||
context.append({'role': 'user', 'content': next_input})
|
||||
|
||||
i = 0
|
||||
while True:
|
||||
response = client.chat.completions.create(model="gpt-4",
|
||||
messages=context,
|
||||
functions=function_description)
|
||||
temp_context = context
|
||||
if i > 0:
|
||||
temp_context.append({'role': 'system', 'content': 'Run another command or call "talk" to communicate with the user.'})
|
||||
|
||||
print(temp_context)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-4",
|
||||
messages=temp_context,
|
||||
functions=function_description,
|
||||
temperature=0.6
|
||||
)
|
||||
function_call = response.choices[0].message.function_call
|
||||
if function_call:
|
||||
function_name = function_call.name
|
||||
|
@ -48,33 +66,23 @@ def main():
|
|||
print(colored(response_text, 'blue') + '\n')
|
||||
break
|
||||
|
||||
print(f'Executing {function_name}("{function_arguments}")')
|
||||
print(f'Executing {function_name}("{function_arguments}")' + '\n')
|
||||
|
||||
if function_name != 'run_bash':
|
||||
context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'})
|
||||
continue
|
||||
else:
|
||||
command_output = func_run_bash(function_arguments)
|
||||
result_to_ai = {
|
||||
'stdout': command_output[0],
|
||||
'stderr': command_output[1],
|
||||
'return_code': command_output[2]
|
||||
}
|
||||
context.append({'role': 'function', 'name': function_name, 'content': json.dumps(result_to_ai)})
|
||||
|
||||
command_output = func_run_bash(function_arguments)
|
||||
result_to_ai = {
|
||||
'stdout': command_output[0],
|
||||
'stderr': command_output[1],
|
||||
'return_code': command_output[2]
|
||||
}
|
||||
|
||||
context.append({'role': 'function', 'name': function_name, 'content': json.dumps(result_to_ai)})
|
||||
context.append({'role': 'system', 'content': 'Run another command or call "talk" to finish this command.'})
|
||||
|
||||
function_response = client.chat.completions.create(
|
||||
model="gpt-4",
|
||||
messages=context,
|
||||
functions=function_description
|
||||
)
|
||||
response_text = func_talk(function_response)
|
||||
context.append({'role': 'assistant', 'content': response_text})
|
||||
print(colored(response_text, 'blue'))
|
||||
break
|
||||
# Restart the loop to let the agent decide what to do next.
|
||||
else:
|
||||
continue
|
||||
context.append({'role': 'system', 'content': f'Must call a function. Use "talk" to communicate with the user.'})
|
||||
i += 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
Reference in New Issue