From 9d86e30f696dfb142a8339ffb2c45cad6723a076 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Mon, 2 Oct 2023 03:57:58 -0500 Subject: [PATCH 1/4] Dynamically load requirements.txt in pyproject.toml This allows use of `pip install -r requirements.txt` by users who prefer that way of trying out the package, while not affecting my typical usage. This does create a dependency on a beta feature of setuptools, so it could be fragile. Closes: #5 --- pyproject.toml | 16 +++------------- requirements-dev.txt | 2 +- requirements.txt | 13 +++++++++++++ 3 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 requirements.txt diff --git a/pyproject.toml b/pyproject.toml index 116c81a..f600f03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ [build-system] requires = [ - "setuptools>=61", + "setuptools>=68.2.2", "setuptools_scm[toml]>=6.0", ] build-backend = "setuptools.build_meta" @@ -19,18 +19,7 @@ where = ["src"] name="chap" authors = [{name = "Jeff Epler", email = "jepler@gmail.com"}] description = "Interact with the OpenAI ChatGPT API (and other text generators)" -dynamic = ["readme","version"] -dependencies = [ - "click", - "dataclasses_json", - "httpx", - "lorem-text", - "platformdirs", - "simple_parsing", - "textual>=0.18.0", - "tiktoken", - "websockets", -] +dynamic = ["readme","version","dependencies"] classifiers = [ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", @@ -50,3 +39,4 @@ chap = "chap.__main__:main" write_to = "src/chap/__version__.py" [tool.setuptools.dynamic] readme = {file = ["README.md"], content-type="text/markdown"} +dependencies = {file = "requirements.txt"} diff --git a/requirements-dev.txt b/requirements-dev.txt index cba52e8..2f5d23c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,6 @@ # SPDX-License-Identifier: MIT build -setuptools>=45 +setuptools>=68.2.2 twine wheel diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..9dd6779 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: 2023 Jeff Epler +# +# SPDX-License-Identifier: Unlicense + +click +dataclasses_json +httpx +lorem-text +platformdirs +simple_parsing +textual>=0.18.0 +tiktoken +websockets From 6298f5cac72a58e8ad1888016f56909cfacc02ed Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Sat, 7 Oct 2023 08:19:25 +0100 Subject: [PATCH 2/4] this script allows python -mchap in top-level to work --- chap.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100755 chap.py diff --git a/chap.py b/chap.py new file mode 100755 index 0000000..c597cde --- /dev/null +++ b/chap.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2023 Jeff Epler +# +# SPDX-License-Identifier: MIT + +import pathlib +import sys + +sys.path[0] = str(pathlib.Path(__file__).parent / "src") + +if __name__ == "__main__": + # pylint: disable=import-error,no-name-in-module + from chap.core import main + + main() +else: + raise ImportError( + "this script exists to facilitate running 'python -mchap' in the top directory; it should not be imported" + ) From 6af267cb4730cbcbfcafdde1c6609c38f9744909 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Sat, 7 Oct 2023 09:27:49 +0100 Subject: [PATCH 3/4] update README.md --- README.md | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c3d8671..4825080 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ SPDX-License-Identifier: MIT [![Release chap](https://github.com/jepler/chap/actions/workflows/release.yml/badge.svg?event=release)](https://github.com/jepler/chap/actions/workflows/release.yml) [![PyPI](https://img.shields.io/pypi/v/chap)](https://pypi.org/project/chap/) -# chap - A Python interface to chatgpt, including a terminal user interface (tui) +# chap - A Python interface to chatgpt and other LLMs, including a terminal user interface (tui) ![Chap screencast](https://github.com/jepler/chap/blob/main/chap.gif) @@ -15,23 +15,33 @@ SPDX-License-Identifier: MIT Chap is developed on Linux with Python 3.11. Due to use of the `list[int]` style of type hints, it is known not to work on 3.8 and older; the target minimum Python version is 3.9 (debian oldstable). -## installation +## Installation -Install with e.g., `pipx install chap` +Install with e.g., `pipx install chap`, or `pip install chap` in a virtual environment. -## configuration +## Installation for development + +Install in developer mode e.g., with `pip install -e .`. +In this mode, you get the "chap" commandline program installed but can edit the source files in place. +This is the [recommended practice per PyPA](https://setuptools.pypa.io/en/latest/userguide/development_mode.html). + +A shim script `chap.py` is included so that the older development style of `pip install -r requirements.txt` + `python chap.py` (or `./chap.py`) functions as well. + +## Configuration Put your OpenAI API key in the platform configuration directory for chap, e.g., on linux/unix systems at `~/.config/chap/openai_api_key` -## commandline usage +## Commandline usage * `chap ask "What advice would you give a 20th century human visiting the 21st century for the first time?"` - * `chap render --last` + * `chap render --last` / `chap cat --last` * `chap import chatgpt-style-chatlog.json` (for files from pionxzh/chatgpt-exporter) -## interactive terminal usage + * `chap grep needle` + +## Interactive terminal usage * chap tui ## Sessions & Commandline Parameters @@ -49,15 +59,18 @@ You can set the "system message" with the `-S` flag. You can select the text generating backend with the `-b` flag: * openai\_chatgpt: the default, paid API, best quality results - * llama_cpp: Works with (llama.cpp's http server)[https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md] and can run locally with various models. Set the server URL with `-B url:...`. - * textgen: Works with https://github.com/oobabooga/text-generation-webui and can run locally with various models. Needs the server URL in *$configuration_directory/textgen\_url*. + * llama\_cpp: Works with (llama.cpp's http server)[https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md] and can run locally with various models, + though it is (optimized for models that use the llama2-style prompting)[https://huggingface.co/blog/llama2#how-to-prompt-llama-2]. + Set the server URL with `-B url:...`. + * textgen: Works with https://github.com/oobabooga/text-generation-webui and can run locally with various models. + Needs the server URL in *$configuration_directory/textgen\_url*. * lorem: local non-AI lorem generator for testing ## Environment variables The backend can be set with `CHAP_BACKEND`. Backend settings can be set with `CHAP__`, with `backend_name` and `parameter_name` all in caps. -For instance, `CHAP_LLAMA_CPP_URL=http://server.local:8080/completion` changes the default server URL for the llama_cpp back-end. +For instance, `CHAP_LLAMA_CPP_URL=http://server.local:8080/completion` changes the default server URL for the llama\_cpp back-end. ## Importing from ChatGPT From 6b201e6a49dde8e563f999e3fdda59f1a7723553 Mon Sep 17 00:00:00 2001 From: Jeff Epler Date: Sat, 7 Oct 2023 09:29:33 +0100 Subject: [PATCH 4/4] fix markdown --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4825080..81f9381 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,8 @@ You can set the "system message" with the `-S` flag. You can select the text generating backend with the `-b` flag: * openai\_chatgpt: the default, paid API, best quality results - * llama\_cpp: Works with (llama.cpp's http server)[https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md] and can run locally with various models, - though it is (optimized for models that use the llama2-style prompting)[https://huggingface.co/blog/llama2#how-to-prompt-llama-2]. + * llama\_cpp: Works with [llama.cpp's http server](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md) and can run locally with various models, + though it is [optimized for models that use the llama2-style prompting](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). Set the server URL with `-B url:...`. * textgen: Works with https://github.com/oobabooga/text-generation-webui and can run locally with various models. Needs the server URL in *$configuration_directory/textgen\_url*.