add code
This commit is contained in:
parent
c1633a8fe0
commit
2fb6579031
|
@ -1,3 +1,6 @@
|
|||
.idea
|
||||
config.py
|
||||
|
||||
# ---> Python
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
|
@ -15,7 +18,6 @@ dist/
|
|||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# server-personification
|
||||
_It would be funny if servers could talk._
|
||||
|
||||
It would be funny if my servers could talk.
|
||||
This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can converse with and use for server management.
|
|
@ -0,0 +1 @@
|
|||
OPENAI_KEY = 'sk-123123kl123lkj123lkj12lk3j'
|
|
@ -0,0 +1,8 @@
|
|||
import socket
|
||||
|
||||
|
||||
def load_personality(name: str, personality: str, system: str):
|
||||
return {
|
||||
'role': 'system',
|
||||
'content': f"""Your name is {name}, who has the personality of {personality}. Interact with the user via this personality. {name} is an AI running on {system}. The system's hostname is "{socket.gethostname()}", which can be thought of as {name}'s "body". You are able to interact with the system via a Bash interpreter. You communicate with the user via the "talk" function."""
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
def get_host_status(hostname: str):
|
||||
return
|
||||
|
||||
|
||||
def get_service_status(hostname: str, service_name: str):
|
||||
return
|
|
@ -0,0 +1,11 @@
|
|||
import json
|
||||
import subprocess
|
||||
|
||||
|
||||
def func_run_bash(command_data: str):
|
||||
j = json.loads(command_data)
|
||||
command = j.get('command')
|
||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
stdout, stderr = process.communicate()
|
||||
return_code = process.returncode
|
||||
return stdout.decode('utf-8'), stderr.decode('utf-8'), return_code
|
|
@ -0,0 +1,30 @@
|
|||
function_description = [
|
||||
{
|
||||
"name": "run_bash",
|
||||
"description": "Send a string to the Bash interpreter. Sudo commands are not valid.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "string",
|
||||
"description": "The string to execute in Bash"
|
||||
}
|
||||
},
|
||||
"required": ["command"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "talk",
|
||||
"description": "Send a message to the user",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "The message to send"
|
||||
}
|
||||
},
|
||||
"required": ["message"]
|
||||
}
|
||||
}
|
||||
]
|
|
@ -0,0 +1,16 @@
|
|||
import json
|
||||
|
||||
from openai.types.chat import ChatCompletion
|
||||
|
||||
|
||||
def func_talk(response: ChatCompletion):
|
||||
function_call = response.choices[0].message.function_call
|
||||
if function_call:
|
||||
function_name = function_call.name
|
||||
if function_name == 'talk':
|
||||
function_arguments = function_call.arguments
|
||||
try:
|
||||
j = json.loads(function_arguments)
|
||||
return j.get('message')
|
||||
except json.decoder.JSONDecodeError:
|
||||
print(response)
|
|
@ -0,0 +1,3 @@
|
|||
openai==1.8.0
|
||||
requests~=2.31.0
|
||||
termcolor~=2.4.0
|
|
@ -0,0 +1,81 @@
|
|||
#!/usr/bin/env python3
|
||||
import json
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from openai import OpenAI
|
||||
from termcolor import colored
|
||||
|
||||
from config import OPENAI_KEY
|
||||
from lib.character import load_personality
|
||||
from lib.openai.bash import func_run_bash
|
||||
from lib.openai.functs import function_description
|
||||
from lib.openai.talk import func_talk
|
||||
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
client = OpenAI(api_key=OPENAI_KEY)
|
||||
|
||||
character_card = load_personality('Sakura', 'a shy girl', 'a desktop computer')
|
||||
|
||||
context: list[dict[str, str]] = [character_card]
|
||||
|
||||
|
||||
def main():
|
||||
while True:
|
||||
next_input = str(input('> '))
|
||||
print('')
|
||||
context.append({'role': 'user', 'content': next_input})
|
||||
|
||||
while True:
|
||||
response = client.chat.completions.create(model="gpt-4",
|
||||
messages=context,
|
||||
functions=function_description)
|
||||
function_call = response.choices[0].message.function_call
|
||||
if function_call:
|
||||
function_name = function_call.name
|
||||
function_arguments = function_call.arguments
|
||||
|
||||
if function_name == 'talk':
|
||||
response_text = func_talk(response)
|
||||
context.append({'role': 'assistant', 'content': response_text})
|
||||
print(colored(response_text, 'blue') + '\n')
|
||||
break
|
||||
|
||||
print(f'Executing {function_name}("{function_arguments}")')
|
||||
|
||||
if function_name != 'run_bash':
|
||||
context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'})
|
||||
continue
|
||||
|
||||
command_output = func_run_bash(function_arguments)
|
||||
result_to_ai = {
|
||||
'stdout': command_output[0],
|
||||
'stderr': command_output[1],
|
||||
'return_code': command_output[2]
|
||||
}
|
||||
|
||||
context.append({'role': 'function', 'name': function_name, 'content': json.dumps(result_to_ai)})
|
||||
context.append({'role': 'system', 'content': 'Run another command or can call "talk" to finish this command.'})
|
||||
|
||||
function_response = client.chat.completions.create(
|
||||
model="gpt-4",
|
||||
messages=context,
|
||||
functions=function_description
|
||||
)
|
||||
response_text = func_talk(function_response)
|
||||
context.append({'role': 'assistant', 'content': response_text})
|
||||
print(colored(response_text, 'blue'))
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue