diff --git a/src/chap/backends/llama_cpp.py b/src/chap/backends/llama_cpp.py index 9eb8f38..ba7b9c7 100644 --- a/src/chap/backends/llama_cpp.py +++ b/src/chap/backends/llama_cpp.py @@ -101,5 +101,10 @@ A dialog, where USER interacts with AI. AI is helpful, kind, obedient, honest, a def factory() -> Backend: - """Uses the llama.cpp completion web API""" + """Uses the llama.cpp completion web API + + Note: Consider using the openai-chatgpt backend with a custom URL instead. + The llama.cpp server will automatically apply common chat templates with the + openai-chatgpt backend, while chat templates must be manually configured client side + with this backend.""" return LlamaCpp() diff --git a/src/chap/backends/openai_chatgpt.py b/src/chap/backends/openai_chatgpt.py index 8f78afc..fd0eb9b 100644 --- a/src/chap/backends/openai_chatgpt.py +++ b/src/chap/backends/openai_chatgpt.py @@ -73,7 +73,7 @@ class ChatGPT: """The approximate greatest number of tokens to send in a request. When the session is long, the system prompt and 1 or more of the most recent interaction steps are sent.""" url: str = "https://api.openai.com/v1/chat/completions" - """The URL of a chatgpt-pcompatible server's completion endpoint.""" + """The URL of a chatgpt-compatible server's completion endpoint. Notably, llama.cpp's server is compatible with this backend, and can automatically apply common chat templates too.""" temperature: float | None = None """The model temperature for sampling"""