From 9572b4a465142e9f9c92e6086eb97a12683887a2 Mon Sep 17 00:00:00 2001 From: zipped <100anonyo@gmail.com> Date: Mon, 22 Apr 2024 00:48:04 +0000 Subject: [PATCH] update readme --- .gitignore | 2 ++ README.md | 27 ++++++++++++++++++++++++++- pyproject.toml | 4 ++-- src/cria.py | 25 +++++++++++++++---------- 4 files changed, 45 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index a75c9fb..db76ddc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.vscode .venv dist + **__pycache__/ diff --git a/README.md b/README.md index 937d476..5137e03 100644 --- a/README.md +++ b/README.md @@ -30,12 +30,13 @@ Cria is a library for programatically running Large Language Models through Pyth - [Closing](#closing) - [Message History](#message-history) - [Multiple Models and Parallel Conversations](#multiple-models-and-parallel-conversations) + - [Running Standalone](#running-standalone) - [Contributing](#contributing) - [License](#license) ## Quickstart -Running Cria is easy, after installation, you need just five lines of code. +Running Cria is easy. After installation, you need just five lines of code — no configurations, no manual downloads, and no servers to worry about. ```python import cria @@ -142,6 +143,30 @@ response = ai.chat(prompt, stream=False) print(response) # I apologize, but I don't have any information about "him" because the conversation just started... ``` +You can also create a custom message history, and pass in your own context. + +```python +context = "Our AI system employed a hybrid approach combining reinforcement learning and generative adversarial networks (GANs) to optimize the decision-making..." +messages = [ + {"role": "system", "content": "You are a technical documentation writer"}, + {"role": "user", "content": context}, +] + +prompt = "Write some documentation using the text I gave you." +for chunk in ai.chat(messages=messages, prompt=prompt): + print(chunk, end="") # AI System Optimization: Hybrid Approach Combining Reinforcement Learning and... +``` + +In the example, instructions are given to the LLM and context is given as the user, before the user prompts. You can use any mixture of roles to specify the LLM to your liking. + +The available roles for messages are: + +- `user` - Pass prompts as the user. +- `system` - Give instructions as the system. +- `assistant` - Act as the AI assistant yourself, and give the LLM lines. + +The prompt parameter will always be appended to messages under the `user` role, to override this, you can choose to pass in nothing for `prompt`. + ### Multiple Models and Parallel Conversations If you are running multiple models or parallel conversations, the `Model` class is also available. This is recommended for most usecases. diff --git a/pyproject.toml b/pyproject.toml index 1d38a40..14fbad8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "cria" -version = "1.4.0" +version = "1.4.1" authors = [{ name = "leftmove", email = "100anonyo@gmail.com" }] description = "Run AI locally with as little friction as possible" readme = "README.md" @@ -17,7 +17,7 @@ Issues = "https://github.com/leftmove/cria/issues" [tool.poetry] name = "cria" -version = "1.4.0" +version = "1.4.1" description = "Run AI locally with as little friction as possible." authors = ["leftmove"] license = "MIT" diff --git a/src/cria.py b/src/cria.py index d56c80e..a78b250 100644 --- a/src/cria.py +++ b/src/cria.py @@ -6,11 +6,10 @@ import subprocess import time +from httpx import ConnectError, ReadError from ollama._client import Client as OllamaClient import ollama -from httpx import ConnectError, ReadError - DEFAULT_MODEL = "llama3:8b" DEFAULT_MESSAGE_HISTORY = [ {"role": "system", "content": "You are a helpful AI assistant."} @@ -76,19 +75,25 @@ def chat_stream(self, messages): def chat( self, - prompt: str, + prompt: Optional[str] = None, messages: Optional[list] = DEFAULT_MESSAGE_HISTORY, stream: Optional[bool] = True, ) -> str: model = self.model ai = ollama - messages = getattr( - self, - "messages", - messages, - ) - messages.append({"role": "user", "content": prompt}) + if not prompt and not messages: + raise ValueError("You must pass in a prompt.") + + if messages == DEFAULT_MESSAGE_HISTORY: + messages = getattr( + self, + "messages", + messages, + ) + + if prompt: + messages.append({"role": "user", "content": prompt}) if stream: return self.chat_stream(messages) @@ -218,7 +223,7 @@ def output(self): "You must pass in capture_ouput as True to capture output." ) - return iter(lambda: ollama_subprocess.stdout.read(1), "") + return iter(c for c in iter(lambda: ollama_subprocess.stdout.read(1), b"")) def close(self): llm = self.llm