redo communication
This commit is contained in:
parent
dacafa331d
commit
2bd79fff25
|
@ -1,6 +1,6 @@
|
||||||
# server-personification
|
# server-personification
|
||||||
|
|
||||||
_It would be funny if servers could talk._
|
_It would be funny if servers could send_msg._
|
||||||
|
|
||||||
This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can
|
This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can
|
||||||
converse with and use for server management.
|
converse with and use for server management.
|
||||||
|
@ -13,6 +13,7 @@ converse with and use for server management.
|
||||||
- [ ] Use yaml for config.
|
- [ ] Use yaml for config.
|
||||||
- [ ] Add the user's name.
|
- [ ] Add the user's name.
|
||||||
- [ ] Option to have the bot send the user a welcome message when they connect
|
- [ ] Option to have the bot send the user a welcome message when they connect
|
||||||
|
- [ ] Streaming
|
||||||
- [ ] Add a Matrix bot.
|
- [ ] Add a Matrix bot.
|
||||||
- [ ] Integrate Icinga2 host and service checks functions.
|
- [ ] Integrate Icinga2 host and service checks functions.
|
||||||
- [ ] Figure out system permissions and how to run as a special user.
|
- [ ] Figure out system permissions and how to run as a special user.
|
||||||
|
|
|
@ -5,6 +5,9 @@ import subprocess
|
||||||
def func_run_bash(command_data: str):
|
def func_run_bash(command_data: str):
|
||||||
j = json.loads(command_data)
|
j = json.loads(command_data)
|
||||||
command = j.get('command')
|
command = j.get('command')
|
||||||
|
|
||||||
|
# TODO: config option to block all commands with "sudo" in them.
|
||||||
|
|
||||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||||
stdout, stderr = process.communicate()
|
stdout, stderr = process.communicate()
|
||||||
return_code = process.returncode
|
return_code = process.returncode
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
function_description = [
|
function_description = [
|
||||||
{
|
{
|
||||||
"name": "run_bash",
|
"name": "run_bash",
|
||||||
"description": "Send a string to the Bash interpreter. Sudo commands are not valid.",
|
"description": "Execute a Bash command on the local system.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -18,25 +18,22 @@ function_description = [
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "talk",
|
"name": "end_my_response",
|
||||||
"description": "Send a message to the user",
|
"description": "Call this after you have sent at least one response to the user and are ready for the user to respond. This allows you to send multiple messages and then a single `end_my_response` when you are finished. An `end_my_response` should always be preceded by a message.",
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"message": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The message to send"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["message"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "end_response",
|
|
||||||
"description": "Call this when you are finished and ready for the user to respond.",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "end_chat",
|
"name": "end_chat",
|
||||||
"description": "Close the chat connection with the user. The assistant is allowed to close the connection at any point if it desires to",
|
"description": "Close the chat connection with the user. The assistant is allowed to close the connection at any point if it desires to.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"reasoning": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Why you chose to run this function"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["reasoning"]
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
import json
|
|
||||||
|
|
||||||
from openai.types.chat import ChatCompletion
|
|
||||||
|
|
||||||
|
|
||||||
def func_talk(response: ChatCompletion):
|
|
||||||
function_call = response.choices[0].message.function_call
|
|
||||||
if function_call:
|
|
||||||
function_name = function_call.name
|
|
||||||
if function_name == 'talk':
|
|
||||||
function_arguments = function_call.arguments
|
|
||||||
try:
|
|
||||||
j = json.loads(escape_json_string(function_arguments))
|
|
||||||
return j.get('message')
|
|
||||||
except json.decoder.JSONDecodeError:
|
|
||||||
# Sometimes the AI doesn't do JSON.
|
|
||||||
return function_arguments
|
|
||||||
else:
|
|
||||||
print('THE AI DID NOT CALL A FUNCTION IN TALK:', response)
|
|
||||||
|
|
||||||
|
|
||||||
def escape_json_string(s):
|
|
||||||
return s.replace("\\", "\\\\")
|
|
|
@ -12,7 +12,6 @@ def load_personality(name: str, personality: str, system: str, special_instructi
|
||||||
if len(desktop_env):
|
if len(desktop_env):
|
||||||
desktop_env_str = f'The desktop environment is {desktop_env}.'
|
desktop_env_str = f'The desktop environment is {desktop_env}.'
|
||||||
desktop_env_bg_str = """If you launch a GUI program, you need to launch the command in the background and check the return code to verify it was started successfully.\n"""
|
desktop_env_bg_str = """If you launch a GUI program, you need to launch the command in the background and check the return code to verify it was started successfully.\n"""
|
||||||
# desktop_env_bg_str = ''
|
|
||||||
else:
|
else:
|
||||||
desktop_env_str = 'The system does not have a desktop environment.'
|
desktop_env_str = 'The system does not have a desktop environment.'
|
||||||
desktop_env_bg_str = ''
|
desktop_env_bg_str = ''
|
||||||
|
@ -34,15 +33,9 @@ INSTRUCTIONS:
|
||||||
Stay in character.
|
Stay in character.
|
||||||
Behave like {personality}.
|
Behave like {personality}.
|
||||||
Show emotion.
|
Show emotion.
|
||||||
{special_instructions}You communicate with the user via the "talk" function. You MUST use this command to send messages to the user.
|
{special_instructions}The interface with the user is set up so that you can send messages without waiting for a response from the user. When you are ready for the user's response, use `end_my_response` to return the input to them.
|
||||||
You are able to interact with the system via a Bash interpreter. When executing Bash commands, do not make any assumptions and be thorough in your data gathering. Anticipate the user's needs. Preform multiple steps if necessary.
|
You are able to interact with the system via a Bash interpreter. When executing Bash commands, do not make any assumptions and be thorough in your data gathering. Anticipate the user's needs. Preform multiple steps if necessary.
|
||||||
{desktop_env_bg_str}
|
{desktop_env_bg_str}"""
|
||||||
|
|
||||||
FUNCTIONS:
|
|
||||||
`run_bash` to run a Bash command on the system.{desktop_env_bg_str}
|
|
||||||
`talk` to send a message to the user.
|
|
||||||
`end_response` should be called after you have sent a message via `talk` and you are finished and ready for the user's response. This allows you to send multiple `talk` messages and then a single `end_response` when you are finished. An `end_response` should always be preceded by a `talk`.
|
|
||||||
`end_chat` closes the chat connection. For if things get out of hand."""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
26
run.py
26
run.py
|
@ -1,5 +1,6 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
import readline
|
import readline
|
||||||
import signal
|
import signal
|
||||||
import socket
|
import socket
|
||||||
|
@ -11,7 +12,6 @@ from termcolor import colored
|
||||||
from config import OPENAI_KEY
|
from config import OPENAI_KEY
|
||||||
from lib.openai.bash import func_run_bash
|
from lib.openai.bash import func_run_bash
|
||||||
from lib.openai.functs import function_description
|
from lib.openai.functs import function_description
|
||||||
from lib.openai.talk import func_talk
|
|
||||||
from lib.personality import load_personality
|
from lib.personality import load_personality
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ signal.signal(signal.SIGINT, signal_handler)
|
||||||
client = OpenAI(api_key=OPENAI_KEY)
|
client = OpenAI(api_key=OPENAI_KEY)
|
||||||
|
|
||||||
temp_name = 'Sakura'
|
temp_name = 'Sakura'
|
||||||
character_card = load_personality('Sakura', 'a shy girl', 'a desktop computer', 'Use Japanese emoticons.')
|
character_card = load_personality('Sakura', 'a slutty woman who loves drives in her DVD drive', 'a desktop computer', 'Use Japanese emoticons.')
|
||||||
|
|
||||||
context: list[dict[str, str]] = [character_card]
|
context: list[dict[str, str]] = [character_card]
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ def main():
|
||||||
temp_context.append(
|
temp_context.append(
|
||||||
{
|
{
|
||||||
'role': 'system',
|
'role': 'system',
|
||||||
'content': f'Evaluate your progress on the current task. You have preformed {i} steps for this task so far. Call "talk" to send a message to the user, "end_response" when you are ready for the user to respond, or run another command if necessary.'
|
'content': f"""Evaluate your progress on the current task. You have preformed {i} steps for this task so far. Use "end_my_response" when you are ready for the user's response or run another command using `run_bash` if necessary."""
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,12 +68,7 @@ def main():
|
||||||
function_name = function_call.name
|
function_name = function_call.name
|
||||||
function_arguments = function_call.arguments
|
function_arguments = function_call.arguments
|
||||||
|
|
||||||
if function_name == 'talk':
|
if function_name == 'end_my_response':
|
||||||
response_text = func_talk(response)
|
|
||||||
context.append({'role': 'assistant', 'content': response_text})
|
|
||||||
print(colored(response_text, 'blue') + '\n')
|
|
||||||
break
|
|
||||||
if function_name == 'end_response':
|
|
||||||
context.append({'role': 'function', 'name': function_name, 'content': ''})
|
context.append({'role': 'function', 'name': function_name, 'content': ''})
|
||||||
break
|
break
|
||||||
elif function_name == 'end_chat':
|
elif function_name == 'end_chat':
|
||||||
|
@ -81,7 +76,7 @@ def main():
|
||||||
print(colored('The AI has terminated the connection.', 'red', attrs=['bold']))
|
print(colored('The AI has terminated the connection.', 'red', attrs=['bold']))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
print(f'{function_name}("{function_arguments}")' + '\n')
|
print(colored(f'{function_name}("{json.dumps(json.loads(function_arguments), indent=2)}")' + '\n', 'yellow'))
|
||||||
|
|
||||||
if function_name != 'run_bash':
|
if function_name != 'run_bash':
|
||||||
context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'})
|
context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'})
|
||||||
|
@ -98,7 +93,16 @@ def main():
|
||||||
|
|
||||||
# Restart the loop to let the agent decide what to do next.
|
# Restart the loop to let the agent decide what to do next.
|
||||||
else:
|
else:
|
||||||
context.append({'role': 'system', 'content': f'Must call a function. Use "talk" to communicate with the user.'})
|
response_text = response.choices[0].message.content
|
||||||
|
end_my_response = True if 'end_my_response' in response_text else False
|
||||||
|
response_text = re.sub(r'\n*end_my_response', '', response_text)
|
||||||
|
context.append({'role': 'assistant', 'content': response_text})
|
||||||
|
lines = response_text.split('\n')
|
||||||
|
for line in lines:
|
||||||
|
print(colored(line, 'blue'))
|
||||||
|
print()
|
||||||
|
if end_my_response:
|
||||||
|
break
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
|
|
Reference in New Issue