diff --git a/README.md b/README.md index b3c56a3..18bce8d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # server-personification -_It would be funny if servers could talk._ +_It would be funny if servers could send_msg._ This is a project to personify computer systems and give them a voice. OpenAI is used to create an agent you can converse with and use for server management. @@ -13,6 +13,7 @@ converse with and use for server management. - [ ] Use yaml for config. - [ ] Add the user's name. - [ ] Option to have the bot send the user a welcome message when they connect +- [ ] Streaming - [ ] Add a Matrix bot. - [ ] Integrate Icinga2 host and service checks functions. - [ ] Figure out system permissions and how to run as a special user. diff --git a/lib/openai/bash.py b/lib/openai/bash.py index c036946..43b0b6d 100644 --- a/lib/openai/bash.py +++ b/lib/openai/bash.py @@ -5,6 +5,9 @@ import subprocess def func_run_bash(command_data: str): j = json.loads(command_data) command = j.get('command') + + # TODO: config option to block all commands with "sudo" in them. + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = process.communicate() return_code = process.returncode diff --git a/lib/openai/functs.py b/lib/openai/functs.py index 36189f9..ae1e096 100644 --- a/lib/openai/functs.py +++ b/lib/openai/functs.py @@ -1,7 +1,7 @@ function_description = [ { "name": "run_bash", - "description": "Send a string to the Bash interpreter. Sudo commands are not valid.", + "description": "Execute a Bash command on the local system.", "parameters": { "type": "object", "properties": { @@ -18,25 +18,22 @@ function_description = [ } }, { - "name": "talk", - "description": "Send a message to the user", - "parameters": { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "The message to send" - } - }, - "required": ["message"] - } - }, - { - "name": "end_response", - "description": "Call this when you are finished and ready for the user to respond.", + "name": "end_my_response", + "description": "Call this after you have sent at least one response to the user and are ready for the user to respond. This allows you to send multiple messages and then a single `end_my_response` when you are finished. An `end_my_response` should always be preceded by a message.", }, { "name": "end_chat", - "description": "Close the chat connection with the user. The assistant is allowed to close the connection at any point if it desires to", + "description": "Close the chat connection with the user. The assistant is allowed to close the connection at any point if it desires to.", + "parameters": { + "type": "object", + "properties": { + "reasoning": { + "type": "string", + "description": "Why you chose to run this function" + } + }, + "required": ["reasoning"] + } + } ] diff --git a/lib/openai/talk.py b/lib/openai/talk.py deleted file mode 100644 index 159c347..0000000 --- a/lib/openai/talk.py +++ /dev/null @@ -1,23 +0,0 @@ -import json - -from openai.types.chat import ChatCompletion - - -def func_talk(response: ChatCompletion): - function_call = response.choices[0].message.function_call - if function_call: - function_name = function_call.name - if function_name == 'talk': - function_arguments = function_call.arguments - try: - j = json.loads(escape_json_string(function_arguments)) - return j.get('message') - except json.decoder.JSONDecodeError: - # Sometimes the AI doesn't do JSON. - return function_arguments - else: - print('THE AI DID NOT CALL A FUNCTION IN TALK:', response) - - -def escape_json_string(s): - return s.replace("\\", "\\\\") diff --git a/lib/personality.py b/lib/personality.py index 10a7d31..e0a3a51 100644 --- a/lib/personality.py +++ b/lib/personality.py @@ -12,7 +12,6 @@ def load_personality(name: str, personality: str, system: str, special_instructi if len(desktop_env): desktop_env_str = f'The desktop environment is {desktop_env}.' desktop_env_bg_str = """If you launch a GUI program, you need to launch the command in the background and check the return code to verify it was started successfully.\n""" - # desktop_env_bg_str = '' else: desktop_env_str = 'The system does not have a desktop environment.' desktop_env_bg_str = '' @@ -34,15 +33,9 @@ INSTRUCTIONS: Stay in character. Behave like {personality}. Show emotion. -{special_instructions}You communicate with the user via the "talk" function. You MUST use this command to send messages to the user. +{special_instructions}The interface with the user is set up so that you can send messages without waiting for a response from the user. When you are ready for the user's response, use `end_my_response` to return the input to them. You are able to interact with the system via a Bash interpreter. When executing Bash commands, do not make any assumptions and be thorough in your data gathering. Anticipate the user's needs. Preform multiple steps if necessary. -{desktop_env_bg_str} - -FUNCTIONS: -`run_bash` to run a Bash command on the system.{desktop_env_bg_str} -`talk` to send a message to the user. -`end_response` should be called after you have sent a message via `talk` and you are finished and ready for the user's response. This allows you to send multiple `talk` messages and then a single `end_response` when you are finished. An `end_response` should always be preceded by a `talk`. -`end_chat` closes the chat connection. For if things get out of hand.""" +{desktop_env_bg_str}""" } diff --git a/run.py b/run.py index a630407..12ed2ce 100755 --- a/run.py +++ b/run.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import json +import re import readline import signal import socket @@ -11,7 +12,6 @@ from termcolor import colored from config import OPENAI_KEY from lib.openai.bash import func_run_bash from lib.openai.functs import function_description -from lib.openai.talk import func_talk from lib.personality import load_personality @@ -28,7 +28,7 @@ signal.signal(signal.SIGINT, signal_handler) client = OpenAI(api_key=OPENAI_KEY) temp_name = 'Sakura' -character_card = load_personality('Sakura', 'a shy girl', 'a desktop computer', 'Use Japanese emoticons.') +character_card = load_personality('Sakura', 'a slutty woman who loves drives in her DVD drive', 'a desktop computer', 'Use Japanese emoticons.') context: list[dict[str, str]] = [character_card] @@ -53,7 +53,7 @@ def main(): temp_context.append( { 'role': 'system', - 'content': f'Evaluate your progress on the current task. You have preformed {i} steps for this task so far. Call "talk" to send a message to the user, "end_response" when you are ready for the user to respond, or run another command if necessary.' + 'content': f"""Evaluate your progress on the current task. You have preformed {i} steps for this task so far. Use "end_my_response" when you are ready for the user's response or run another command using `run_bash` if necessary.""" } ) @@ -68,12 +68,7 @@ def main(): function_name = function_call.name function_arguments = function_call.arguments - if function_name == 'talk': - response_text = func_talk(response) - context.append({'role': 'assistant', 'content': response_text}) - print(colored(response_text, 'blue') + '\n') - break - if function_name == 'end_response': + if function_name == 'end_my_response': context.append({'role': 'function', 'name': function_name, 'content': ''}) break elif function_name == 'end_chat': @@ -81,7 +76,7 @@ def main(): print(colored('The AI has terminated the connection.', 'red', attrs=['bold'])) sys.exit(1) - print(f'{function_name}("{function_arguments}")' + '\n') + print(colored(f'{function_name}("{json.dumps(json.loads(function_arguments), indent=2)}")' + '\n', 'yellow')) if function_name != 'run_bash': context.append({'role': 'system', 'content': f'"{function_name}" is not a valid function.'}) @@ -98,7 +93,16 @@ def main(): # Restart the loop to let the agent decide what to do next. else: - context.append({'role': 'system', 'content': f'Must call a function. Use "talk" to communicate with the user.'}) + response_text = response.choices[0].message.content + end_my_response = True if 'end_my_response' in response_text else False + response_text = re.sub(r'\n*end_my_response', '', response_text) + context.append({'role': 'assistant', 'content': response_text}) + lines = response_text.split('\n') + for line in lines: + print(colored(line, 'blue')) + print() + if end_my_response: + break i += 1