Skip to content

Commit

Permalink
add support for load and save functions
Browse files Browse the repository at this point in the history
  • Loading branch information
javierluraschi committed Jun 1, 2024
1 parent 7aa6168 commit 1484cdd
Show file tree
Hide file tree
Showing 16 changed files with 126 additions and 93 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Changelog

## v2.1.8

- Add support for load, save functions

## v2.0.0

- Create and deploy projects
2 changes: 1 addition & 1 deletion python/README.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
See [../README.md](../README.md)
See [../README.md](../README.md) and See [CONTRIBUTING.md](CONTRIBUTING.md)
3 changes: 2 additions & 1 deletion python/hal9/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from hal9.create import create
from hal9.run import run
from hal9.deploy import deploy
from hal9.deploy import deploy
from hal9.iobind import load, save
44 changes: 22 additions & 22 deletions python/hal9/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,49 +6,49 @@

@click.group()
def cli():
"""
Create and Deploy Generative Applications
"""
Create and Deploy Generative Applications
Use this tool to create apps from templates that use Generative AI,
run them locally and deploy them to the cloud.
"""
pass
Use this tool to create apps from templates that use Generative AI,
run them locally and deploy them to the cloud.
"""
pass

@click.command()
@click.argument('path')
def create(path :str):
"""
Create Project
"""
Create Project
PATH: The path for the new project. Required argument.
"""
api_create(path, "openai")
PATH: The path for the new project. Required argument.
"""
api_create(path, "openai")

@click.command()
@click.argument('path')
def run(path :str):
"""
Run Project
"""
Run Project
PATH: The path to the project. Required argument.
"""
api_run(path)
PATH: The path to the project. Required argument.
"""
api_run(path)

@click.command()
@click.argument('path')
@click.option('--target', default="hal9", help='Deployment target')
@click.option('--url', default="https://api.hal9.com", help='Deployment url')
def deploy(path :str, target :str, url :str):
"""
Deploy Project
"""
Deploy Project
PATH: The path to the project. Required argument.
"""
api_deploy(path, target, url)
PATH: The path to the project. Required argument.
"""
api_deploy(path, target, url)

cli.add_command(create)
cli.add_command(run)
cli.add_command(deploy)

if __name__ == "__main__":
cli()
cli()
36 changes: 18 additions & 18 deletions python/hal9/create.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,26 +3,26 @@
from pathlib import Path

def create(path :str, template :str) -> str:
"""Create an application
"""Create an application
Parameters
----------
path : str
Path to the application.
template : str
The template to use.
"""
Parameters
----------
path : str
Path to the application.
template : str
The template to use.
"""

package_dir = Path(__file__).parent
template_path = package_dir / "templates" / template
package_dir = Path(__file__).parent
template_path = package_dir / "templates" / template

os.makedirs(path, exist_ok=True)
os.makedirs(path, exist_ok=True)

for item in template_path.iterdir():
dest = Path(path) / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)
for item in template_path.iterdir():
dest = Path(path) / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)

print(f'Project created!')
print(f'Project created!')
28 changes: 14 additions & 14 deletions python/hal9/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,22 @@
from hal9.targets.hal9 import deploy as deploy_hal9

targets = {
'docker': deploy_docker,
'hal9': deploy_hal9,
'docker': deploy_docker,
'hal9': deploy_hal9,
}

def deploy(path :str, target :str, url :str) -> str:
"""Deploy an application
"""Deploy an application
Parameters
----------
path : str
Path to the application.
target : str
The deployment target, defaults to 'hal9.com'.
"""
Parameters
----------
path : str
Path to the application.
target : str
The deployment target, defaults to 'hal9.com'.
"""

if target in targets:
targets[target](path, url)
else:
raise Exception(f"Deployment target '{target}' is unsupported.")
if target in targets:
targets[target](path, url)
else:
raise Exception(f"Deployment target '{target}' is unsupported.")
13 changes: 13 additions & 0 deletions python/hal9/iobind.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import json

def load(file, default):
file_path = Path(file + ".json")
if file_path.exists():
contents = json.loads(file_path.read_text())
else:
contents = default
return contents

def save(file, contents):
file_path = Path(file + ".json")
file_path.write_text(json.dumps(contents, indent=2))
32 changes: 16 additions & 16 deletions python/hal9/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,23 @@
from pathlib import Path

def run(path :str) -> str:
"""Run an application
"""Run an application
Parameters
----------
path : str
Path to the application.
"""
Parameters
----------
path : str
Path to the application.
"""

app_path = Path(path) / 'app.py'
app_path = Path(path) / 'app.py'

if not app_path.is_file():
print(f"Failed to run {app_path}")
return
if not app_path.is_file():
print(f"Failed to run {app_path}")
return

try:
command = ['python', str(app_path)]
with subprocess.Popen(command) as proc:
proc.wait()
except Exception as e:
print(f"An error occurred while running app.py: {e}")
try:
command = ['python', str(app_path)]
with subprocess.Popen(command) as proc:
proc.wait()
except Exception as e:
print(f"An error occurred while running app.py: {e}")
2 changes: 1 addition & 1 deletion python/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "hal9"
version = "2.1.7"
version = "2.1.8"
description = ""
authors = ["Javier Luraschi <[email protected]>"]
readme = "README.md"
Expand Down
2 changes: 2 additions & 0 deletions website/learn/genai/dnn.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import Imagenet from './dnn-imagenet.jpg';

# Deep Neural Networks

Learn the origins of AI from the perceptron to deep neural networks, covering key advancements like backpropagation, gradient descent, and the importance of GPUs and large datasets in training complex models.

## The Perceptron

Back in 1958, [Frank Rosenblatt](https://en.wikipedia.org/wiki/Frank_Rosenblatt) believed computers should be able to recognize people and call out their names and instantly translate speech in one language or writing in another language.
Expand Down
2 changes: 2 additions & 0 deletions website/learn/genai/llm.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import GPT1 from './llm-gpt-1.png';

# Large Language Models

Learn foundational concepts like autoencoders, embeddings, transformers, and attention that lead to the development of GPTs and LLMs that exhibit advanced emergent abilities that generate content effectively.

## Embeddings

An Autoencoder is a type of [DNN](dnn.md) that does not require classification labels but rather, performs unsupervised learning by asking the DNN to classify the inputs of the network as the outputs. For example, when classifying the image of a cat, the pixels of that cat would be the input and the classification label would also be all the pixels of the cat.
Expand Down
2 changes: 2 additions & 0 deletions website/learn/genai/prompts.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@

# Prompt Engineering

Learn the principles of prompt engineering, including techniques for crafting effective text prompts to guide AI behavior, optimizing input to achieve desired outputs, and enhancing the performance of large language models (LLMs) like GPTs in tasks such as question answering, summarization, and content generation.

Under construction, in the meantime, check [A Systematic Survey of Prompt Engineering in Large Language Models: Techniques and Applications](https://arxiv.org/pdf/2402.07927)
6 changes: 6 additions & 0 deletions website/learn/genapps/chatbots.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ sidebar_position: 1

# Chatbots

Learn how to create a "Hello World" chatbot using Python and a [LLM](../genai/llm.md) powered chatbot.

:::tip
Deploy your chatbot code to the cloud in one click using the button available on each code block.
:::

## Echo

ChatGPT populatized the chat interface as the application interface to interoperate with LLMs, tools like MidJourney have also popularized through their use of Disscord.
Expand Down
40 changes: 22 additions & 18 deletions website/learn/genapps/conversations.md
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@

# Conversations

It is expected from a chatbot to not only reply once, but rather keep an open ended conversation with the user.
It is expected from a chatbot to not only reply once to a message, but rather keep an open ended conversation with the user.

We can accomplish this with something like:
We can think of two main strategies to accomplish this. The first one is to run an infinite amount of question-answer cycles through the use of an infinite loop as follows:

```python
while(True):
echo = input("What's your name? ")
print(f"Hello, {echo}!")
```

However, for various LLMs we will need to pass the conversation history. As a first approach.
The other approach is to run the Python program continuously; however, in both cases we usually need to remember all the previous messages (the **conversation**) that took place to provide more accurate answers for our chatbot.

There are two main strategies we can use to remember the conversation: we can store this in **memory** or store it in our computer **storage**.
## Memory

The easiest way to manage a conversation is to store it in memory. Using OpenAI, we can create a conversational chatbot as follows:
Expand All @@ -21,7 +22,7 @@ The easiest way to manage a conversation is to store it in memory. Using OpenAI,
from openai import OpenAI

messages = [
{"role": "system", "content": "Reply in Spanish"}
{"role": "system", "content": "Spanish replies"}
];

while(True):
Expand All @@ -30,24 +31,27 @@ while(True):
print(completion.choices[0].message.content)
```

## Disk
This method is easy to implement since it only requires adding the infinite loop, but is not suited for intermittent use since the code might restart and the conversation lost.

However, you might want to caome back later to chat in which case, exiting Python will loose the history. We can fix this by storing and loading history as a file:
We often reffer to the memory we need to remember as the programs **state**, and a computer program that needs to remember state referred to as **stateful**.

## Storage

To use computing resources efficiently and reliably, we can store the conversation on your computer storage. Therefore, even if Python restarts or you come back later to interact with your chatbot after a comptuer restart, your chatbot will behave correctly remembering the conversation.

To make your chatbot behave correctly even after Python restarts, we can store the conversation messages to files. You can use any Python library to store and load files, but we recommend the `hal9` package convenience functions to `save` and `load` files with ease:

```python
import json
from openai import OpenAI
from hal9 import h9

messages_path = Path("messages.json")
if messages_path.exists():
messages = json.loads(file_path.read_text())
else:
messages = [{ "role": "system", "content": "Reply in Spanish" }];
messages = h9.load("messages", [{ "role": "system", "content": "Spanish replies" }])

while(True):
messages.append({"role": "user", "content": input()})
completion = OpenAI().chat.completions.create(model = "gpt-4", messages = messages)
print(completion.choices[0].message.content)
messages.append({"role": "user", "content": input()})
completion = OpenAI().chat.completions.create(model = "gpt-4", messages = messages)
print(completion.choices[0].message.content)

h9.save("messages", messages)
```

messages_path.write_text(json.dumps(messages, indent=4))
```
In contrast to stateless, a computer program that does not need to remember its state on its own, is referred to as **stateles**. The system as a whole, chatbot and file, is indeed stateful; however, giving someone else the job of remembering state (in this case the file) makes programs more reliable, efficient, and is a concept we will use through this guide.
1 change: 0 additions & 1 deletion website/src/css/custom.scss
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ nav {

p {
text-align: justify;
margin-bottom: 0;
}

.floating {
Expand Down
2 changes: 1 addition & 1 deletion website/src/pages/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ hal9 deploy chatbot

<div class="FloatingWrapper">
<Floating title="Generative AI" href="learn/genai/dnn">Learn <b>Generative AI</b> concepts: neural networks, embeddings, transformers, LLMs and GPTs.</Floating>
<Floating title="Genearative App" href="learn/genapps/chatbots">Learn how to build <b>Geneartive Applications</b> (chatbots and APIs) that make use of Geneartive AI.</Floating>
<Floating title="Genearative App" href="learn/genapps/chatbots">Learn how to build <b>Generative Applications</b> (chatbots and APIs) that make use of Geneartive AI.</Floating>
</div>

0 comments on commit 1484cdd

Please sign in to comment.