Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Saneify #17

Open
wants to merge 26 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5467e13
factor out subprocess.run
Gurkenglas Mar 29, 2023
305a79e
factor out os.name == posix
Gurkenglas Mar 29, 2023
31125a6
factor out "Execute ..."
Gurkenglas Mar 29, 2023
1e2633f
deduplicate
Gurkenglas Mar 29, 2023
8d2f0a6
we already error if this isn't main.
Gurkenglas Mar 29, 2023
2fc47f8
fix system_prompt = 'c'
Gurkenglas Mar 29, 2023
3e81dc6
inline all functions only only once
Gurkenglas Mar 29, 2023
fb5eba0
use f"" syntax
Gurkenglas Mar 29, 2023
87f8b06
refactor away explicit recursion
Gurkenglas Mar 29, 2023
d6ce976
if you needed these comments, use copilot.
Gurkenglas Mar 29, 2023
56f908a
golf imports
Gurkenglas Mar 29, 2023
ad76866
refactor away command_start_idx
Gurkenglas Mar 29, 2023
5c596eb
fix empty-query check failing with -a
Gurkenglas Mar 29, 2023
5c61304
refactor away ask_flag
Gurkenglas Mar 29, 2023
2bd0735
inline lines used up to once
Gurkenglas Mar 29, 2023
c2bfda0
always copy command to clipboard.
Gurkenglas Mar 29, 2023
2f2dcb4
use stderr for errors
Gurkenglas Mar 29, 2023
5a339fd
use with to open
Gurkenglas Mar 29, 2023
ff86d52
hardcode in the safety.
Gurkenglas Mar 29, 2023
d0a2a00
prune the frills
Gurkenglas Mar 29, 2023
0d83086
cut the yaml frill too
Gurkenglas Mar 29, 2023
4c428c6
fix extra \n when non-verbose
Gurkenglas Mar 29, 2023
20386ba
fix spurious ==> on argful run
Gurkenglas Mar 29, 2023
709c256
hardcode oneliner preprompt
Gurkenglas Mar 29, 2023
1c95cd4
figure out what model you can access
Gurkenglas Mar 29, 2023
a25806f
don't destroy that message object we need
Gurkenglas Mar 29, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 0 additions & 31 deletions prompt.txt

This file was deleted.

1 change: 0 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,4 @@ termcolor==2.2.0
colorama==0.4.4
python-dotenv==1.0.0
distro==1.7.0
PyYAML==5.4.1
pyperclip==1.8.2
249 changes: 35 additions & 214 deletions yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,218 +4,39 @@
# Copyright (c) 2023 wunderwuzzi23
# Greetings from Seattle!

import os
import platform
import openai
import sys
import subprocess
import dotenv
import distro
import yaml
import pyperclip

from termcolor import colored
from colorama import init

def read_config() -> any:

## Find the executing directory (e.g. in case an alias is set)
## So we can find the config file
yolo_path = os.path.abspath(__file__)
prompt_path = os.path.dirname(yolo_path)

config_file = os.path.join(prompt_path, "yolo.yaml")
with open(config_file, 'r') as file:
return yaml.safe_load(file)

# Construct the prompt
def get_full_prompt(user_prompt, shell):

## Find the executing directory (e.g. in case an alias is set)
## So we can find the prompt.txt file
yolo_path = os.path.abspath(__file__)
prompt_path = os.path.dirname(yolo_path)

## Load the prompt and prep it
prompt_file = os.path.join(prompt_path, "prompt.txt")
pre_prompt = open(prompt_file,"r").read()
pre_prompt = pre_prompt.replace("{shell}", shell)
pre_prompt = pre_prompt.replace("{os}", get_os_friendly_name())
prompt = pre_prompt + user_prompt

# be nice and make it a question
if prompt[-1:] != "?" and prompt[-1:] != ".":
prompt+="?"

return prompt

def print_usage():
print("Yolo v0.2 - by @wunderwuzzi23")
print()
print("Usage: yolo [-a] list the current directory information")
print("Argument: -a: Prompt the user before running the command (only useful when safety is off)")
print()

yolo_safety_switch = "on"

if config["safety"] != True:
yolo_safety_switch = "off"

print("Current configuration per yolo.yaml:")
print("* Model : " + str(config["model"]))
print("* Temperature : " + str(config["temperature"]))
print("* Max. Tokens : " + str(config["max_tokens"]))
print("* Safety : " + yolo_safety_switch)


def get_os_friendly_name():

# Get OS Name
os_name = platform.system()

if os_name == "Linux":
return "Linux/"+distro.name(pretty=True)
elif os_name == "Windows":
return os_name
elif os_name == "Darwin":
return "Darwin/macOS"
else:
return os_name


def set_api_key():
# Two options for the user to specify they openai api key.
#1. Place a ".env" file in same directory as this with the line:
# OPENAI_API_KEY="<yourkey>"
# or do `export OPENAI_API_KEY=<yourkey>` before use
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")

#2. Place a ".openai.apikey" in the home directory that holds the line:
# <yourkey>
# Note: This options will likely be removed in the future
if not openai.api_key: #If statement to avoid "invalid filepath" error
home_path = os.path.expanduser("~")
openai.api_key_path = os.path.join(home_path,".openai.apikey")

#3. Final option is the key might be in the yolo.yaml config file
# openai_apikey: <yourkey>
if not openai.api_key:
openai.api_key = config["openai_api_key"]

if __name__ == "__main__":

config = read_config()
set_api_key()

# Unix based SHELL (/bin/bash, /bin/zsh), otherwise assuming it's Windows
shell = os.environ.get("SHELL", "powershell.exe")

command_start_idx = 1 # Question starts at which argv index?
ask_flag = False # safety switch -a command line argument
yolo = "" # user's answer to safety switch (-a) question y/n

# Parse arguments and make sure we have at least a single word
if len(sys.argv) < 2:
print_usage()
sys.exit(-1)

# Safety switch via argument -a (local override of global setting)
# Force Y/n questions before running the command
if sys.argv[1] == "-a":
ask_flag = True
command_start_idx = 2

# To allow easy/natural use we don't require the input to be a
# single string. So, the user can just type yolo what is my name?
# without having to put the question between ''
arguments = sys.argv[command_start_idx:]
user_prompt = " ".join(arguments)

def call_open_ai(query):
# do we have a prompt from the user?
if query == "":
print ("No user prompt specified.")
sys.exit(-1)

# Load the correct prompt based on Shell and OS and append the user's prompt
prompt = get_full_prompt(query, shell)

# Make the first line also the system prompt
system_prompt = prompt[1]
#print(prompt)

# Call the ChatGPT API
response = openai.ChatCompletion.create(
model=config["model"],
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
temperature=config["temperature"],
max_tokens=config["max_tokens"],
)

return response.choices[0].message.content.strip()


#Enable color output on Windows using colorama
init()

def check_for_issue(response):
prefixes = ("sorry", "i'm sorry", "the question is not clear", "i'm", "i am")
if response.lower().startswith(prefixes):
print(colored("There was an issue: "+response, 'red'))
sys.exit(-1)

def check_for_markdown(response):
# odd corner case, sometimes ChatCompletion returns markdown
if response.count("```",2):
print(colored("The proposed command contains markdown, so I did not execute the response directly: \n", 'red')+response)
sys.exit(-1)

def missing_posix_display():
display = subprocess.check_output("echo $DISPLAY", shell=True)
return display == b'\n'


def prompt_user_input(response):
print("Command: " + colored(response, 'blue'))
if config["safety"] != "off" or ask_flag == True:
prompt_text = "Execute command? [Y]es [n]o [m]odify [c]opy to clipboard ==> "
if os.name == "posix" and missing_posix_display():
prompt_text = "Execute command? [Y]es [n]o [m]odify ==> "
print(prompt_text, end = '')
user_input = input()
return user_input

def evaluate_input(user_input, command):
if user_input.upper() == "Y" or user_input == "":
if shell == "powershell.exe":
subprocess.run([shell, "/c", command], shell=False)
else:
import os, platform, openai, sys, subprocess, distro, pyperclip, termcolor, colorama

colorama.init()
os_name = platform.system()
if os_name == "Linux": os_name += distro.name(pretty=True)
if os_name == "Darwin": os_name += "/macOS"
shell = os.environ.get("SHELL", "powershell.exe")
messages = [{"role": "system", "content": f"Translate to one line of input for the shell {shell} on {os_name}."}]
try:
model = "gpt-4" if any(engine.id == 'gpt-4' for engine in openai.Engine.list().data) else "gpt-3.5-turbo"
except openai.error.AuthenticationError:
print("Set the environment variable OPENAI_API_KEY=<API-KEY>. Don't know how to do that? I'll tell you... given an API key.", file=sys.stderr)
sys.exit(1)
blather = print if len(sys.argv) == 1 else lambda *args, **kwargs: print(*args[1:], **kwargs)
try:
blather("Describe a shell command, or Ctrl-C to exit. ==> ", end = '')
query = input() if len(sys.argv) == 1 else " ".join(sys.argv[1:])
while True:
messages.append({"role": "user", "content": query})
message = openai.ChatCompletion.create(messages=messages, model=model, temperature=0, max_tokens=500).choices[0].message
messages.append(message)
blather("Command: ", termcolor.colored(message.content, 'blue'))
try:
pyperclip.copy(message.content)
blather("Copied command to clipboard.\n", end='')
except:
pass
blather(f"Enter to execute, or continue conversing. ", "==> ", end = '')
query = input()
if not query:
# Unix: /bin/bash /bin/zsh: uses -c both Ubuntu and macOS should work, others might not
subprocess.run([shell, "-c", command], shell=False)

if user_input.upper() == "M":
print("Modify prompt: ", end = '')
modded_query = input()
modded_response = call_open_ai(modded_query)
check_for_issue(modded_response)
check_for_markdown(modded_response)
modded_user_input = prompt_user_input(modded_response)
print()
evaluate_input(modded_user_input, modded_response)

if user_input.upper() == "C":
if os.name == "posix" and missing_posix_display():
return
pyperclip.copy(command)
print("Copied command to clipboard.")

res_command = call_open_ai(user_prompt)
check_for_issue(res_command)
check_for_markdown(res_command)
user_iput = prompt_user_input(res_command)
print()
evaluate_input(user_iput, res_command)
subprocess.run([shell, "/c" if shell == "powershell.exe" else "-c", message.content], shell=False)
print("==> ", end = '')
query = input()
except KeyboardInterrupt:
sys.exit(0)
9 changes: 0 additions & 9 deletions yolo.yaml

This file was deleted.