diff --git a/README.md b/README.md index 48b4b0f..9bf2986 100644 --- a/README.md +++ b/README.md @@ -378,6 +378,10 @@ default: * `git_push_force` – adds `--force-with-lease` to a `git push` (may conflict with `git_push_pull`); * `rm_root` – adds `--no-preserve-root` to `rm -rf /` command. +The following rule uses OpenAI ChatGPT. To enable it, you need to set the environment variable `THEFUCK_OPENAI_TOKEN=` and pass in `--chatgpt [No. SUGGESTIONS >= 1]`: + +- `chatgpt` – queries ChatGPT for suggestions. Arguments: `--chatgpt [No. SUGGESTIONS, default=0] --chatgpt-token [default=100] --chatgpt-model [default="gpt-3.5-turbo"]`. + ##### [Back to Contents](#contents) ## Creating your own rules diff --git a/install.sh b/install.sh index 366e4ae..79b1d54 100755 --- a/install.sh +++ b/install.sh @@ -1,4 +1,8 @@ -#!/bin/sh +#!/bin/zsh echo "Installation script is deprecated!" echo "For installation instruction please visit https://github.com/nvbn/thefuck" +echo "" +echo "To install from sources run:" +echo "python setup.py build && python setup.py install" +python setup.py build && python setup.py install \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 33ae7a9..e85ac80 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ pypandoc pytest-benchmark pytest-docker-pexpect twine +openai diff --git a/setup.py b/setup.py index 1105aef..9e981f2 100755 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ elif (3, 0) < version < (3, 5): ' ({}.{} detected).'.format(*version)) sys.exit(-1) -VERSION = '3.32' +VERSION = '3.33' install_requires = ['psutil', 'colorama', 'six'] extras_require = {':python_version<"3.4"': ['pathlib2'], diff --git a/tests/test_argument_parser.py b/tests/test_argument_parser.py index e5f1f44..19fff10 100644 --- a/tests/test_argument_parser.py +++ b/tests/test_argument_parser.py @@ -8,7 +8,9 @@ def _args(**override): 'help': False, 'version': False, 'debug': False, 'force_command': None, 'repeat': False, 'enable_experimental_instant_mode': False, - 'shell_logger': None} + 'shell_logger': None, 'chatgpt': 0, + 'chatgpt_token': 100, + 'chatgpt_model': 'gpt-3.5-turbo'} args.update(override) return args diff --git a/tests/test_conf.py b/tests/test_conf.py index e03473a..98509d6 100644 --- a/tests/test_conf.py +++ b/tests/test_conf.py @@ -75,7 +75,7 @@ class TestSettingsFromEnv(object): def test_settings_from_args(settings): - settings.init(Mock(yes=True, debug=True, repeat=True)) + settings.init(Mock(yes=True, debug=True, repeat=True, chatgpt=0, chatgpt_token=100)) assert not settings.require_confirmation assert settings.debug assert settings.repeat diff --git a/thefuck/argument_parser.py b/thefuck/argument_parser.py index 69c247f..c75c408 100644 --- a/thefuck/argument_parser.py +++ b/thefuck/argument_parser.py @@ -37,6 +37,25 @@ class Parser(object): '-h', '--help', action='store_true', help='show this help message and exit') + self._parser.add_argument( + '-c', '--chatgpt', + type=int, + default=1, + help='number of ChatGPT suggestions. set to 0 to disable ChatGPT' + ) + # todo Secret parameters only revealed with `thefuck --help --chatgpt` + # self._parser.add_argument( + # '-t', '--chatgpt-token', + # type=int, + # default=400, + # help='maximum ChatGPT tokens per query' + # ) + # self._parser.add_argument( + # '-m', '--chatgpt-model', + # type=str, + # default="gpt-3.5-turbo", + # help='ChatGPT model' + # ) self._add_conflicting_arguments() self._parser.add_argument( '-d', '--debug', diff --git a/thefuck/conf.py b/thefuck/conf.py index 611ec84..fb71054 100644 --- a/thefuck/conf.py +++ b/thefuck/conf.py @@ -134,6 +134,10 @@ class Settings(dict): from_args['debug'] = args.debug if args.repeat: from_args['repeat'] = args.repeat + + from_args['chatgpt'] = args.chatgpt if args.chatgpt >= 0 else 1 + from_args['chatgpt_token'] = args.chatgpt_token if args.chatgpt_token >= 0 else 400 + from_args['chatgpt_model'] = args.chatgpt_model or "gpt-3.5-turbo" return from_args diff --git a/thefuck/rules/chatgpt.py b/thefuck/rules/chatgpt.py new file mode 100644 index 0000000..141e492 --- /dev/null +++ b/thefuck/rules/chatgpt.py @@ -0,0 +1,77 @@ +import platform +import openai +import re +import os +from thefuck import logs +from thefuck.conf import settings + + +def _check_chatgpt(api_key: str = None) -> bool: + openai.api_key = os.getenv("THEFUCK_OPENAI_TOKEN") or os.getenv("OPENAI_API_KEY") + if settings["chatgpt"] > 0 and (api_key or openai.api_key): + return True + return False + + +enabled_by_default = _check_chatgpt() +logs.debug(f"ChatGPT enabled: {enabled_by_default}") + +MAX_NUMBER = settings["chatgpt"] or 1 +MAX_TOKENS = settings["chatgpt_token"] or 400 +MODEL = settings["chatgpt_model"] or "gpt-3.5-turbo" + + +def match(command): + return _check_chatgpt() + + +def get_new_command(command): + result = _query_chatgpt( + command=command.script, + error=command.output, + explanation=False, + ) + logs.debug(f"chatgpt result: {result}") + return result + + +def _query_chatgpt( + command: str, + error: str, + explanation: bool, # can be used to include explanations but not used yet + number: int = MAX_NUMBER, + model: str = MODEL, + max_tokens: int = MAX_TOKENS, + api_key: str = None, +): + if api_key: + openai.api_key = api_key + elif openai.api_key is None: + return [] + + os_env = f"{platform.platform()}" + prompt = f""" +OS: `{os_env}` +Command: `{command}` +Error: `{error}` +Suggest {"one command" if number == 1 else f"{number} commands"} {"with" if explanation else "without"} explanation. +Commands:""" + + logs.debug("chatgpt: " + prompt) + + try: + response = openai.ChatCompletion.create( + model=model, + messages=[ + {"role": "user", "content": prompt}, + ], + max_tokens=max_tokens, + ) + content = response["choices"][0]["message"]["content"] + contents = [item.strip() for item in content.split("\n") if item.strip() != ""] + pattern = re.compile(r"^\d+\.\ *") + cleaned_contents = [re.sub(pattern, "", item).strip('`') for item in contents] + return cleaned_contents + except Exception as e: + logs.debug(f"chatgpt error: {e}") + return []