diff --git a/.github/workflows/PULL_REQUEST_TEMPLATE.md b/.github/workflows/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..f5b28099
--- /dev/null
+++ b/.github/workflows/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,19 @@
+
+**Features**
+
+
+
+- xx
+- yy
+
+**Feature Docs**
+
+
+**Influence**
+
+
+**Result**
+
+
+**Other**
+
\ No newline at end of file
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
new file mode 100644
index 00000000..e885e5b1
--- /dev/null
+++ b/.github/workflows/deploy.yml
@@ -0,0 +1,65 @@
+# Sample workflow for building and deploying a VitePress site to GitHub Pages
+#
+name: Deploy VitePress site to Pages
+
+on:
+ # Runs on pushes targeting the `master` branch. Change this to `master` if you're
+ # using the `master` branch as the default branch.
+ push:
+ branches: [master]
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+concurrency:
+ group: pages
+ cancel-in-progress: false
+
+jobs:
+ # Build job
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # Not needed if lastUpdated is not enabled
+ # - uses: pnpm/action-setup@v3 # Uncomment this if you're using pnpm
+ # - uses: oven-sh/setup-bun@v1 # Uncomment this if you're using Bun
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: npm # or pnpm / yarn
+ cache-dependency-path: docs/package-lock.json
+ - name: Setup Pages
+ uses: actions/configure-pages@v4
+ - name: Install dependencies
+ run: npm ci --prefix docs # or pnpm install / yarn install / bun install
+ - name: Build with VitePress
+ run: npm run --prefix docs docs:build # or pnpm docs:build / yarn docs:build / bun run docs:build
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: docs/.vitepress/dist
+
+ # Deployment job
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ needs: build
+ runs-on: ubuntu-latest
+ name: Deploy
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..bbc27129
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,149 @@
+*.vscode
+*.ruff_cache
+workspace
+node_modules
+testbed
+repos
+test_db
+*.db
+output/*
+*.bak
+*.dat
+*.dir
+tmp
+catboost_info
+outputs
+logs
+runs
+langchain
+*.DS_Store
+repos_offline
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..2ff622c1
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,30 @@
+default_stages: [ commit ]
+exclude: '.*__init__\.py$'
+
+# Install
+# 1. pip install l2mac[dev]
+# 2. pre-commit install
+# 3. pre-commit run --all-files # make sure all files are clean
+repos:
+ - repo: https://github.com/pycqa/isort
+ rev: 5.11.5
+ hooks:
+ - id: isort
+ args: ['--profile', 'black']
+ exclude: >-
+ (?x)^(
+ .*__init__\.py$
+ )
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ # Ruff version.
+ rev: v0.0.284
+ hooks:
+ - id: ruff
+ args: [ --fix ]
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
+ args: ['--line-length', '120']
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..ba724df7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Samuel Holt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..ac2bea04
--- /dev/null
+++ b/README.md
@@ -0,0 +1,132 @@
+
+# L2MAC: Large Langauge Model Automatic Computer
+
+
+
+
+
+
+Pioneering the first practical LLM-based general-purpose stored-program automatic computer (von Neumann architecture) framework in an LLM-based multi-agent system, for solving complex tasks through generating extensive and consistent outputs, unbounded by the LLMs fixed context window constraint.
+
+
+
+
+
+
+
+
+## News
+🌟 May. 7 - 11th, 2024: We will present L2MAC at the International Conference on Learning Representations (ICLR) 2024. Come meet us at ICLR in Vienna Austria! Please reach out to me at sih31 (at) cam.ac.uk so we can meet, virtual meetings accepted as well!
+
+🌟 April. 13, 2024: L2MAC is fully open-sourced with the initial version released.
+
+🚀 Jan. 16, 2024: The paper [L2MAC: Large Language Model Automatic Computer for Extensive Code Generation
+](https://arxiv.org/abs/2310.02003) is accepted for presentation at ICLR 2024!
+
+## LLM-Automatic Computer (L2MAC) framework excels at solving large complex tasks, such as being state-of-the-art for generating large codebases, or it can even write entire books, all of which bypass the traditional constraints of the LLMs fixed context window constraint.
+
+* LLM-Automatic Computer can take a **one line input prompt** and generate an extensive and large output, for example generating an entire complex codebase.
+ * Internally, the LLM-Automatic Computer uses a **prompt-program** which is a series of prompts, each providing a instruction step to execute. Unless explicitly given, the **prompt-program** is self generated (bootstrapped) and executed. Specifically each instruction step of the **prompt-program** is loaded into a new LLM agent to execute, whose context is managed by a control unit and is provided with tools so that it can read and write to a persistent memory, here a file store, which contains the final and intermediate outputs. This enables it to automatically execute general-purpose prompt programs to solve complex tasks, that require extensive cohesive outputs, where the output is unbounded and not constrained by the LLMs underlying context window constraint.
+
+![BlockDiagram](docs/public/l2mac-block-diagram.png)
+
LLM-Automatic Computer (L2MAC) instantiation for coding a large complex codebase for an entire application based on a single user prompt. Here we provide L2MAC with additional tools to check for any syntax errors within the code and run any unit tests if they exist, and call this instantiation Code-L2MAC.
+
+
+
+
+
+
diff --git a/docs/generated_examples/url_shortener_web_application/tests/test_app.py b/docs/generated_examples/url_shortener_web_application/tests/test_app.py
new file mode 100644
index 00000000..00eaca8d
--- /dev/null
+++ b/docs/generated_examples/url_shortener_web_application/tests/test_app.py
@@ -0,0 +1,58 @@
+from datetime import datetime, timedelta
+
+import pytest
+from app import app as flask_app
+from app import generate_short_url, urls_db, users_db
+
+
+@pytest.fixture
+def client():
+ flask_app.config["TESTING"] = True
+ with flask_app.test_client() as client:
+ yield client
+
+
+# Test admin dashboard functionality
+def test_admin_access(client):
+ # Attempt to access admin routes without being logged in
+ response = client.get("/admin/urls")
+ assert response.status_code == 401
+
+ # Login as a regular user
+ client.post("/register", data={"username": "regular", "password": "user"})
+ client.post("/login", data={"username": "regular", "password": "user"})
+ response = client.get("/admin/urls")
+ assert response.status_code == 401
+
+ # Login as admin
+ client.post("/login", data={"username": "admin", "password": "admin"})
+ response = client.get("/admin/urls")
+ assert response.status_code == 200
+
+ # Test deleting a URL as admin
+ short_url = generate_short_url()
+ urls_db[short_url] = {"url": "http://example.com", "expiration": datetime.now() + timedelta(days=1)}
+ response = client.delete(f"/admin/delete/url/{short_url}")
+ assert response.status_code == 200
+ assert short_url not in urls_db
+
+ # Test deleting a user as admin
+ response = client.delete("/admin/delete/user/regular")
+ assert response.status_code == 200
+ assert "regular" not in users_db
+
+
+# Test URL expiration functionality
+def test_url_expiration(client):
+ # Create a URL that expires in the past
+ expired_short_url = generate_short_url()
+ urls_db[expired_short_url] = {"url": "http://expired.com", "expiration": datetime.now() - timedelta(days=1)}
+ response = client.get(f"/{expired_short_url}")
+ assert response.status_code == 410
+ assert "URL has expired" in response.get_data(as_text=True)
+
+ # Create a URL that does not expire
+ non_expired_short_url = generate_short_url()
+ urls_db[non_expired_short_url] = {"url": "http://nonexpired.com", "expiration": datetime.now() + timedelta(days=1)}
+ response = client.get(f"/{non_expired_short_url}")
+ assert response.status_code == 302 # Redirect status code
diff --git a/docs/guide/api.md b/docs/guide/api.md
new file mode 100644
index 00000000..e9d06e5f
--- /dev/null
+++ b/docs/guide/api.md
@@ -0,0 +1,58 @@
+# API
+
+## `run_l2mac`
+
+Generate output based on the input prompt for specified domain and settings.
+
+**Parameters:**
+
+- `prompt_task` (str): The input prompt to generate, such as 'Create a playable snake game in PyGame'.
+- `domain` (Domain): Domain to generate. Existing options are 'codebase', 'book'. Default is 'codebase'.
+- `run_tests` (bool): Whether to run self-generated unit-tests when generating code. Default is `False`.
+- `project_name` (Optional[str]): Unique project name, such as 'snakegame'.
+- `steps` (int): Number of internal steps to use when creating the prompt program internally. Default is 10.
+- `prompt_program` (Optional[str]): Path to the prompt program to use, or the prompt program as a string in list format.
+- `prompts_file_path` (Optional[str]): Overrides the existing prompts to be used. Useful when creating a new prompt set for a new task.
+- `tools_enabled` (Optional[str]): List of functions that the agents can use, separated by commas. Defaults to use all tools available.
+- `debugging_level` (DebuggingLevel): Whether to print full context-windows out. Default is `info`.
+- `init_config` (bool): Initialize the configuration file for L2MAC. Default is `False`.
+
+**Examples:**
+
+```bash
+python core.py run_l2mac --prompt_task "Create a simple blog in Django" --domain codebase
+```
+
+Equivalent python code.
+
+```python
+from l2mac import run_l2mac, Domain
+
+code = run_l2mac("Create a beautiful, playable and simple snake game with pygame. Make the snake and food be aligned to the same 10-pixel grid.", domain=Domain.codebase)
+```
+
+### Utility Functions
+
+#### `generate_codebase`
+
+Helper function to generate output specifically for codebase domain.
+
+#### `generate_book`
+
+Helper function to generate output specifically for book domain.
+
+#### `generate_custom`
+
+Helper function to generate output for custom domains as specified.
+
+## Configuration
+
+To manage configurations, modify the configuration files located at `config/config.yaml`. Initial setup can be triggered using the `--init_config` flag when running commands.
+
+## Logging
+
+Logging is handled via the `l2mac.utils.logging` module. Logs are stored in the specified directory `logs` in the local folder as per the configuration file.
+
+## Error Handling
+
+Errors are logged with detailed stack traces to assist in debugging. Errors during the generation process are captured and logged, with partial outputs stored if possible.
\ No newline at end of file
diff --git a/docs/guide/contribute/contribute_guide.md b/docs/guide/contribute/contribute_guide.md
new file mode 100644
index 00000000..d5cbf0d2
--- /dev/null
+++ b/docs/guide/contribute/contribute_guide.md
@@ -0,0 +1,88 @@
+# Contribution Guide
+
+We invite developers to join our effort in enhancing L2MAC by contributing to our open-source community. Below are various ways you can contribute to the project:
+
+## How to Contribute
+
+### Code Contributions
+- **Implement Features:** Refer to our [`docs/roadmap.md`](../roadmap) for a list of planned features. Implement these features and submit your pull requests (PRs).
+- **Beyond the Roadmap:** Feel free to contribute new features, bug fixes, use cases, and even code annotations not listed on the roadmap.
+
+### Documentation
+- **Enhance Documentation:** Help improve our documentation by adding tutorials, new exciting use cases, examples, advanced guides, and more.
+- **Contribute to Our Docs Site:** Submit supplementary documentation that supports existing or new features.
+
+### Issues
+- **Report Bugs:** If you find any bugs while using L2MAC, please submit an issue.
+- **Request Features:** Suggest new features that you would like to see implemented.
+- **Community Discussions:** Engage in discussions about implementations and applications of L2MAC, by joining our discord [dev/contributors channel](https://discord.gg/z27CxnwdhY).
+
+## Submitting Pull Requests (PR)
+
+Please use [fork and pull request](https://docs.github.com/en/get-started/exploring-projects-on-github/contributing-to-a-project) to submit updates to the code or documentation.
+
+1. Fork the repository.
+2. Create a new branch for your feature or fix.
+3. Implement your changes.
+4. Ensure your code adheres to the established standards and guidelines.
+5. Submit a pull request to our main branch.
+
+After a PR with more than 10 lines of code is merged, the submitted can join the `L2MAC-dev` group.
+We encourage the submission of small, frequent code contributions. Large pull requests often demand a deeper understanding of context and require more time to review. It would be helpful if you could include additional supporting materials with large submissions.
+
+### Before Submission
+
+Please follow these steps to ensure your code is ready for submission:
+
+- **Unit Tests:** Ensure that all unit tests related to modified code pass successfully. Run the specific command, such as `pytest tests/*`, to execute the tests.
+- **New Code Testing:** If you are adding new code files, ensure they have corresponding unit tests that pass.
+- **Documentation:** All submitted code should include comprehensive `Google Docstring` descriptions and detailed comments explaining the functionality.
+
+Before committing your code, set up `pre-commit` to check that your code meets our standards:
+
+```bash
+pip3 install pre-commit
+pre-commit install
+pre-commit run --all-files
+```
+
+This will automatically modify your local code to meet our coding standards. Remember to `git add` the changes again after running pre-commit.
+
+### During Submission
+
+Our project uses a `PULL_REQUEST_TEMPLATE` by default. When you submit a pull request, include the following necessary information:
+
+- **Features:** Describe the features added or issues fixed by your PR. Required.
+- **Documentation:** If applicable, link to the corresponding documentation on our docs site. Optional.
+- **Impact:** Discuss any potential impacts this PR may have on the project. Optional.
+- **Results:** Include results from tests or logs that demonstrate the functionality of your changes. Required.
+- **Additional Materials:** Any other supplementary materials can be included here. Optional.
+
+Providing this information helps our reviewers understand your PR better and speeds up the review process.
+
+### After Submission
+
+Once submitted, our default GitHub CI workflow will automatically check your code's compliance and unit tests. If your submission fails, you'll need to make the necessary modifications until it passes. Thus, running `pre-commit` checks and reviewing test results beforehand can greatly enhance the efficiency of this process.
+
+## Issues
+
+When submitting issues, you can include:
+
+- **Bug Reports:** Use the `show_me_the_bug` template to provide a detailed description of the bug, how you might solve it (if known), environment details (like system version, Python version, dependencies), and any relevant screenshots or logs.
+- **Feature Requests:** Describe new features you want supported or propose optimizations for existing features. We'll discuss these in the community and potentially add them to our ROADMAP.
+
+## Documentation Contribution
+
+Documentation site [docs](/).
+
+We currently support documentation in English; however, we welcome contributions to create documentation to other languages. Please ensure your submissions are available in both languages:
+
+- **Adding Content:** Follow the structure of existing documents. Save new content as Markdown files in the appropriate directory, e.g., `docs/get_started/`.
+- **Media Files:** Store media files like images or videos in `docs/public`. Ensure they are correctly referenced in your documents.
+- **Sidebar Updates:** If you add or change documentation, update the sidebar configuration in `docs/.vitepress/config.mts` accordingly.
+
+To verify the appearance of new documents, you can deploy the documentation site locally by running `cd docs && npm ci && npm run docs:dev`.
+
+For any issues or discussions, please join our [Discord Channel](https://discord.gg/z27CxnwdhY).
+
+We look forward to your contributions, and thank you for helping us improve L2MAC!
\ No newline at end of file
diff --git a/docs/guide/contribute/rfc_template.md b/docs/guide/contribute/rfc_template.md
new file mode 100644
index 00000000..d24a92ab
--- /dev/null
+++ b/docs/guide/contribute/rfc_template.md
@@ -0,0 +1 @@
+Will be updated soon, stay tuned.
diff --git a/docs/guide/faq.md b/docs/guide/faq.md
new file mode 100644
index 00000000..3f349a1f
--- /dev/null
+++ b/docs/guide/faq.md
@@ -0,0 +1,39 @@
+Our vision empowers users to achieve 10x more with AI. The LLM Automatic Computer Framework (L2MAC) transforms cutting-edge LLMs into user-directed, reusable, and efficient AI assistants capable of autonomously executing complex real-world tasks, revolutionizing productivity and code generation.
+
+### Convenient Link for Sharing this Document:
+
+```
+- FAQ https://github.com/samholt/L2MAC/blob/master/docs/faq.md
+```
+
+### Link
+
+1. Code:https://github.com/samholt/l2mac/
+2. Roadmap:https://github.com/samholt/L2MAC/blob/master/docs/roadmap.md
+
+### How do I become a contributor?
+
+1. Choose a task from the Roadmap (or you can propose one). By submitting a PR, you can become a contributor and join the dev team.
+2. Current contributors come from backgrounds including Oxford/Cambridge Universities and companies.
+
+### Become the Chief Evangelist at for the L2MAC Community
+
+Join us as the Chief Evangelist, a dynamic role that changes hands every month, fueling continuous innovation and fresh ideas within our community. Here's what you'll do:
+
+- **Community Leadership and Support:** Take charge of maintaining essential community resources such as FAQ documents, announcements, and GitHub READMEs. Ensure that every community member has the information they need to thrive.
+- **Rapid Response:** Act as the first point of contact for community inquiries. Your goal will be to respond to questions on platforms like GitHub Issues and Discord within 30 minutes, ensuring our community remains informed and engaged.
+- **Foster Positive Engagement:** Cultivate an environment that is not only enthusiastic and genuine but also welcoming. We aim to make every member feel valued and supported.
+- **Encourage Active Participation:** Inspire community members to contribute to projects that push the boundaries towards achieving tools to 10x people's work productivity. Your encouragement will help harness the collective expertise and passion of our community.
+- **Event Coordination (Optional):** Have a flair for event planning? You can choose to organize small-scale events, such as hackathons, which are crucial for sparking innovation and collaboration within the community.
+
+**Why Join Us?**
+
+This role offers the unique opportunity to be at the forefront of the AI revolution, engage with like-minded individuals, and play a pivotal part in steering our community towards significant contributions in the field of AGI. If you are passionate about AI, eager to help others, and ready to lead, the Chief Evangelist position is your platform to shine and make an impact. Interested applicants for the position, please email `sih31 (at) cam.ac.uk`.
+
+
+### FAQ
+
+1. Code tests are failing due to an import error:
+ 1. At present any package the LLM agent tries to use must already be installed on your current virtualenv that you are running L2MAC from. Therefore, it is best to find out which packages L2MAC is trying to use and install them, and or the specific package version of them. We plan to fix this in the future with self-created virtualenvs per codebase generation; see the [roadmap](./roadmap), and welcome contributions on this.
+2. Want to join the contributor team? How to proceed?
+ 1. Merging a PR will get you into the contributor's team. The main ongoing tasks are all listed on the [roadmap](./roadmap).
\ No newline at end of file
diff --git a/docs/guide/get_started/configuration.md b/docs/guide/get_started/configuration.md
new file mode 100644
index 00000000..46b03f13
--- /dev/null
+++ b/docs/guide/get_started/configuration.md
@@ -0,0 +1,8 @@
+# Configuration
+
+## [LLM API Configuration](./configuration/llm_api_configuration.md)
+
+## Others (Optional)
+
+Check [config.yaml](https://github.com/samholt/L2MAC/blob/master/config/config.yaml) and
+[config.py](https://github.com/samholt/L2MAC/blob/master/l2mac/config.py) for more details.
diff --git a/docs/guide/get_started/configuration/llm_api_configuration.md b/docs/guide/get_started/configuration/llm_api_configuration.md
new file mode 100644
index 00000000..b6d24384
--- /dev/null
+++ b/docs/guide/get_started/configuration/llm_api_configuration.md
@@ -0,0 +1,54 @@
+# LLM API Configuration
+
+After installation, follow these steps to configure the LLM API, using the OpenAI API as an example. This process is similar to other LLM APIs.
+
+## Steps
+
+1. **Initialize Configuration**:
+
+ - Execute `l2mac --init-config` to generate `~/.l2mac/config.yaml`. Edit this file with your API configuration to avoid accidentally sharing your API key.
+
+2. **Edit Configuration**:
+
+ - Update `~/.l2mac/config.yaml` according to the [example](https://github.com/samholt/L2MAC/blob/master/config/config.yaml) and [configuration code](https://github.com/samholt/L2MAC/blob/master/l2mac/config.py):
+
+```yaml
+llm:
+ api_type: "openai" # or azure etc. Check ApiType for more options
+ model: "gpt-4-turbo-preview" # or "gpt-4-turbo"
+ base_url: "https://api.openai.com/v1" # or forward url / other llm url
+ api_key: "YOUR_API_KEY"
+```
+
+> **Note**:
+> Configuration priority is `~/.l2mac/config.yaml > config/config.yaml`.
+
+With these steps, your setup is complete. To start with L2MAC, check out the [Quickstart guide](../quickstart) or our [Tutorials](../../tutorials/concepts).
+
+L2MAC supports a range of LLM models. Configure your model API keys as needed.
+
+
+## OpenAI API
+
+Check [config.py](https://github.com/samholt/L2MAC/blob/master/l2mac/config.py)
+
+```yaml
+llm:
+ api_type: 'openai'
+ model: 'YOUR_MODEL_NAME'
+ base_url: 'YOUR_BASE_URL'
+ api_key: 'YOUR_API_KEY'
+```
+
+## Azure OpenAI API
+
+Check [config.py](https://github.com/samholt/L2MAC/blob/master/l2mac/config.py)
+
+```yaml
+llm:
+ api_type: 'azure'
+ model: 'YOUR_MODEL_NAME'
+ base_url: 'YOUR_AZURE_BASE_URL'
+ api_version: 'YOUR_API_VERSION'
+ api_key: 'YOUR_API_KEY'
+```
diff --git a/docs/guide/get_started/installation.md b/docs/guide/get_started/installation.md
new file mode 100644
index 00000000..b6e35af2
--- /dev/null
+++ b/docs/guide/get_started/installation.md
@@ -0,0 +1,53 @@
+# Installation
+
+We provide several ways to install L2MAC; please select the most appropriate way for your use case.
+
+## Support system and version
+
+| System Version | Python Version | Supported |
+| -------------- | -------------- | --------- |
+| macOS 13.x | python 3.9 | Yes |
+| Windows 11 | python 3.9 | Yes |
+| Ubuntu 22.04 | python 3.9 | Yes |
+
+Ensure that Python 3.9+ is installed on your system. You can check this by using:
+
+```
+python3 --version
+```
+
+## Install stable version
+
+This is recommended for most users. You can import L2MAC like any Python package, create complete code applications, generate extensive text-based output for tasks, and fully customize the generation and which tools are available or the generation settings for your application.
+
+```
+pip install l2mac
+```
+
+### Install submodules
+
+- Code Testing, `pip install l2mac[all]`. On macOS/zsh: `pip install 'l2mac[all]'`
+
+
+
+## Install the latest development version
+
+Best for experiencing the newest features.
+
+```
+pip install git+https://github.com/samholt/l2mac
+```
+
+## Install in development mode
+
+This is recommended for developers and researchers looking to customize the framework for their unique requirements, contribute new tools, or extend the framework through core contributions.
+
+```
+git clone https://github.com/samholt/L2MAC.git
+cd ./L2MAC
+pip install -e .
+```
+
+### Install submodules
+
+- Code Testing, `pip install -e .[all]`
\ No newline at end of file
diff --git a/docs/guide/get_started/introduction.md b/docs/guide/get_started/introduction.md
new file mode 100644
index 00000000..67283866
--- /dev/null
+++ b/docs/guide/get_started/introduction.md
@@ -0,0 +1,26 @@
+# L2MAC: Large Langauge Model Automatic Computer
+
+
+Pioneering the first practical LLM-based general-purpose stored-program automatic computer (von Neumann architecture) framework in an LLM-based multi-agent system, for solving complex tasks through generating extensive and consistent outputs, unbounded by the LLMs fixed context window constraint.
+
+
+
+## LLM-Automatic Computer (L2MAC) framework excels at solving large complex tasks, such as being state-of-the-art for generating large codebases, or it can even write entire books, all of which bypass the traditional constraints of the LLMs fixed context window constraint.
+
+* LLM-Automatic Computer can take a **one line input prompt** and generate an extensive and large output, for example generating an entire complex codebase.
+ * Internally, the LLM-Automatic Computer uses a **prompt-program** which is a series of prompts, each providing a instruction step to execute. Unless explicitly given, the **prompt-program** is self generated (bootstrapped) and executed. Specifically each instruction step of the **prompt-program** is loaded into a new LLM agent to execute, whose context is managed by a control unit and is provided with tools so that it can read and write to a persistent memory, here a file store, which contains the final and intermediate outputs. This enables it to automatically execute general-purpose prompt programs to solve complex tasks, that require extensive cohesive outputs, where the output is unbounded and not constrained by the LLMs underlying context window constraint.
+
+![BlockDiagram](/l2mac-block-diagram.png)
+
LLM-Automatic Computer (L2MAC) instantiation for coding a large complex codebase for an entire application based on a single user prompt. Here we provide L2MAC with additional tools to check for any syntax errors within the code and run any unit tests if they exist, and call this instantiation Code-L2MAC.
+
+## L2MAC's Abilities
+
+LLM-Automatic Computer (L2MAC) is an LLM-agent framework created within the University of Cambridge van der Schaar research lab, emanating from the peer-reviewed published paper in [ICLR 2024](https://openreview.net/forum?id=EhrzQwsV4K). You can use this multi-agent framework to solve your complex task, and create your own full code application or large text outputs, such as writing books or reports. For more details, you can refer to [CodeBase Generator](../use_cases/codebase_generator) and [Book Generator](../use_cases/book_generator) under **Use Cases**. Let us start with a complete example.
+
+## Examples (fully generated by GPT-4)
+
+For example, if you type `l2mac "Create a beautiful, playable and simple snake game with pygame. Make the snake and food be aligned to the same 10-pixel grid."`, you would get a complete codebase for a fully playable game. See the generated codebase at [CodeBase Generator](../use_cases/codebase_generator).
+
+![Snake Game Gameplay](/images/snake_game_gameplay.png)
+
+This example costs around **$0.16** for the complete codebase repository.
diff --git a/docs/guide/get_started/quickstart.md b/docs/guide/get_started/quickstart.md
new file mode 100644
index 00000000..f89193e0
--- /dev/null
+++ b/docs/guide/get_started/quickstart.md
@@ -0,0 +1,66 @@
+# Quickstart
+
+## Installation
+
+```
+pip install l2mac
+```
+
+Available installation methods can be found in the [Installation](./installation) section
+
+## Configuration
+
+Variations for setting up the LLM API (OpenAI, Azure, etc.) and other components can be found in the [Configuration](./configuration/llm_api_configuration) section.
+
+## Create an entire codebase with a single user prompt
+
+> Note:
+>
+> Below is a breakdown of the [codebase generator example](https://github.com/samholt/L2MAC/blob/master/examples/generate_codebase_simple_blackjack.py). If you installed L2MAC with the git clone approach, simply run
+>
+> ```
+> l2mac "Create a simple playable blackjack cli game"
+> ```
+>
+> Now, let's get started! We will create a LLM-automatic computer of sequential LLM agents to write all the software based on our initial prompt.
+
+First, import the library
+
+```python
+from l2mac import generate
+```
+
+Next, run it to generate the codebase
+
+```python
+generate("Create a simple playable blackjack cli game")
+```
+
+You may expect a similar output to that shown in [CodeBase Generator](../use_cases/codebase_generator)
+
+
+---
+
+## Usage
+
+```
+ Usage: l2mac [OPTIONS] PROMPT_TASK
+
+ Generate based on the input prompt with LLM-automatic Computer (L2MAC).
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * prompt_task TEXT Your input prompt to generate for such as 'Create a playable snake game in PyGame' [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --domain [codebase|book|custom] Domain to generate, existing options are 'codebase', 'book'. [default: codebase] │
+│ --run-tests --no-run-tests Whether to run self-generated unit-tests when generating code. [default: no-run-tests] │
+│ --project-name TEXT Unique project name, such as 'snakegame'. [default: None] │
+│ --steps INTEGER Number of internal steps to use when creating the prompt program internally. [default: 10] │
+│ --prompt-program TEXT Path to the prompt program to use, or the prompt program as a string in a list format. [default: None] │
+│ --prompts-file-path TEXT Overrides the existing prompts to be used. Useful when creating a new prompt set for a new task. [default: None] │
+│ --tools-enabled TEXT List of functions that the agents can use, separated by commas. Defaults to use all tools available. [default: None] │
+│ --debugging-level [debug|info|warn|error] Whether to print full context-windows out. [default: info] │
+│ --init-config --no-init-config Initialize the configuration file for L2MAC. [default: no-init-config] │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+```
diff --git a/docs/guide/mission.md b/docs/guide/mission.md
new file mode 100644
index 00000000..9b2c5994
--- /dev/null
+++ b/docs/guide/mission.md
@@ -0,0 +1,27 @@
+> “You want to wake up in the morning and open up your computer to find it continued working on your exact work task progressing it for you automatically, perfectly aligned to your workflow and exactly how you would like it done – and that’s what being in a world of AI assistants is all about. It’s about empowering humans to significantly achieve and produce more, than we have ever been able to in the past. And I can’t think of anything more exciting than defining and making this future a reality.” – Sam Holt (L2MAC's Creator).
+
+## Making AI Assistants Directed
+
+Building on all the incredible advancements with Large Language Models, L2MAC aims to make AI assistants that align exactly to user control over, nearly infinite task lengths, high complexity tasks, and perform highly repeatable operations akin to complex business processes. We envisage our AI assistants empowering LLMs to become the most powerful compound AI system to achieve a specific complex real-world task, such as coding an entire application's codebase, which was previously only possible by a team of professional software engineers taking months; we aim to replicate the same level of quality in minutes, generating the code from scratch, or working with an existing codebase to implement new functionality.
+
+## Creating the Most Re-usable General Purpose Task Automatic Computer LLM Framework
+
+We believe a fully and rapidly reusable general-purpose LLM framework is the pivotal breakthrough needed to substantially increase the adoption of LLM-based assistants. The majority of previous LLM frameworks are specialized for one particular task, comparable to specialized computing machines in the early days of computing.
+
+Continuing with the computing analogy, with the invention of a general-purpose computer, the computer architecture, framework, and tooling stack can be developed and progressed independently of the explicit program being run on it, and it can be easily re-programmed to execute any general-purpose task. We believe such a separation is crucial for the advancement of AI assistants and seek to build the most powerful LLM-based computing task framework.
+
+## Real-world Work Tasks Follow Business Processes
+
+On the quest to tackle real-world business tasks, we believe that an AI assistant framework should be made up of the highest-performing components that can be re-used to solve general-purpose tasks. In the world of economic repeatable processes, or business processes that power businesses internationally, we believe that AI assistants should execute tasks in a fully controllable, repeatable manner, following a “prompt program”, if you will.
+
+### Prompt Programs
+
+The LLM-automatic computer introduces the first prompt program-driven automatic LLM-based computer for solving general-purpose tasks. Progressing with such a paradigm is crucial, as it allows complex tasks to be decomposed into complex prompt-flows that can condition the underlying LLM as a general-purpose, yet limited in length powerful task solver for some human tasks. This is further augmented by tools to leverage the distinct benefits of explicitly created tools for computation or compiling and running code, providing a powerful framework that blends the strengths of LLMs with those of tools. We envisage future prompt programs to allow complex control flow patterns, such as loops, conditional while loops, for loops, and if statements, amongst other control flows, opening up a rich possibility for complex control of such AI assistants that will be needed when solving complex tasks.
+
+## Making History
+
+We have already introduced the first LLM-based automatic computer framework and published it in a [seminal paper](https://openreview.net/forum?id=EhrzQwsV4K) in the competitive peer-reviewed conference of the International Conference on Learning Representations in 2024. We have open-sourced the framework and now invite contributors to advance the cutting edge with us and help make the world’s most powerful yet user-directed, fully controllable AI assistants.
+
+### Enter your email to sign up for LLM-automatic Computer Updates.
+
+
diff --git a/docs/guide/roadmap.md b/docs/guide/roadmap.md
new file mode 100644
index 00000000..12dbe276
--- /dev/null
+++ b/docs/guide/roadmap.md
@@ -0,0 +1,43 @@
+## Roadmap
+
+### Long-term Objectives
+
+Enable LLM-Automatic Computer (L2MAC) to run prompt programs with advanced control flows (for example, `while` or `for` loops and conditional `if` statements, combined with additional checks) and re-program its prompt program at runtime. Working towards making the [vision](./faq) and [mission](./mission) a reality.
+
+### Short-term Objective
+
+1. Solve user tasks with the highest ROI.
+2. Fully implement complete high-quality codebases (repositories) for projects.
+3. Implement the most crucial [future work](https://openreview.net/pdf?id=EhrzQwsV4K).
+
+### Tasks
+
+* Add support for other programming languages other than Python
+* Add support for additional tools, such as searching online
+* In Python, when generating a codebase, and specific new package versions are being recommended by the LLM generating, creating an on-the-fly python virtualenv to install these the packages in the requirements file, as the requirements file is updated. We are looking for contributors for this feature, so we warmly welcome any PR's implementing this.
+
+1. Usability
+ 1. Write documentation describing the current features and usage at all levels (ongoing, continuously adding contents to [documentation site](https://samholt.github.io/l2mac/)
+ 2. Support Docker
+ 3. Support multiple languages
+2. Features
+ 1. Improve test coverage of all code
+ 2. Implement more LLM api types and local running LLMs and their associated providers to map the tools of reading and writing files to work with them.
+ 3. Support loading an existing file directory of files.
+ 4. Implement reflecting on the original proposed prompt program n times to improve it (partially implemented).
+ 4. Support re-planning the prompt program at runtime
+ 3. Support ToT planning
+4. Tools
+ 1. Implementation a web search tool
+ 2. Implement a code debugger tool
+ 3. Implement local data collection for training your own custom local LLM
+7. Use cases
+ 1. Real-world use-cases (please submit your ideas; just open a GitHub issue)
+ 2. Web Researcher, creating a survey paper or a document summarizing a particular topic or answering a particular question by searching online and synthesizing content.
+ 3. Analyzing data
+8. Evaluation
+ 1. Reproduce a complete survey paper, e.g. "multi-agent LLM frameworks and systems".
+ 2. Implement on the [GAIA benchmark](https://arxiv.org/abs/2311.12983).
+9. LLM
+ 1. Support streaming version of all APIs
+ 2. Support more APIs
\ No newline at end of file
diff --git a/docs/guide/tutorials/concepts.md b/docs/guide/tutorials/concepts.md
new file mode 100644
index 00000000..47ca271d
--- /dev/null
+++ b/docs/guide/tutorials/concepts.md
@@ -0,0 +1,109 @@
+# Concepts
+
+After this tutorial, you will be able to:
+
+- Understand L2MAC's concept of a prompt program and how it works
+- How it uses memory to store the intermediate outputs from completing each step instruction in the prompt program, and re-use this growing memory for subsequent step instructions
+
+The goal is to provide an intuitive and simplified explanation of the concepts so that users have a background to further explore the tutorial series. While we aim for clarity, we also recognize simplifications can produce inaccuracy or omission. Therefore, we encourage more navigation over subsequent documents for complete comprehension.
+
+You may also jump to [L2MAC 101](./l2mac_101) if you want hands-on coding first.
+
+Check out our [ICLR 2024 paper](https://openreview.net/forum?id=EhrzQwsV4K) for a complete, rigorous explanation.
+
+## High-Level Overview
+
+![BlockDiagram](/l2mac-block-diagram.png)
+
+**L2MAC Overview**. Here the LLM-automatic Computer Framework is instantiated for extensive code generation tasks. First, it takes in a **single user input prompt**, for example, `"Create an online chat app..."`, and the underlying LLM understands the user task and breaks it down into sequential instructions, a **prompt program** to execute to complete the task. We note that self-generating a prompt program is a form of bootstrapping, and L2MAC also supports being given the prompt program explicitly as input. Mirroring the operation of a computer, each instruction of the prompt program is sequentially loaded into a new instance of an LLM agent, where we make the LLM an agent by providing it with tools and the ability to perform actions sequentially.
+
+The **Control Unit (CU)** manages the LLM agent's *context window* for each instruction (agent) and empowers the LLM agent to interact with an *external memory file store* through read, write, and evaluate tools. Crucially, this *external memory file store* stores the *prompt program*, as well as the intermediate and final outputs of the execution of following the prompt program to solve the given task; this follows the stored-program computer, or von Neumann architecture approach of storing both the program and the program output in memory. It identifies and reads relevant files from the memory to generate or update files per instruction. This ensures proper conditioning of existing files without losing vital context. Automatic checks evaluate the LLM's outputs for correctness and completion, with iterative error corrections involving both code syntactical checks of the code and running self-generated unit tests to check desired functionality. Overall, this produces a complete large codebase that fulfills the detailed user task in the file store.
+
+## Benefits of the LLM-automatic Computer
+
+By using the LLM-automatic computer framework, we gain the immediate strong benefits of:
+
+* Strong empirical performance beating existing LLMs and other leading LLM multi-agent and LLM compound AI frameworks for [codebase generation tasks](https://arxiv.org/pdf/2310.02003.pdf), and [single coding tasks, such as HumanEval.](https://paperswithcode.com/sota/code-generation-on-humaneval).
+* Can augment *any existing LLM* with the ability to follow a near-infinite length and complex *prompt program* to solve a given task.
+* Can generate a near-infinite amount of output for a given task when following a prompt program.
+* Can generate intermediate outputs, either from thinking and solving parts of the difficult task already or from the use of tools, and re-use these intermediate thoughts at later stages of its operation to solve more complex and difficult tasks that require many LLM thinking steps to solve, for example, generating an entire codebase for a complex application, refires conditioning, understanding and potentially modifying many of the previously generated code files in the code base.
+* By creating and following a prompt program, we can create large unbounded outputs that align exactly with what the user desires rather than autonomously think and forget what the original user input requirements were, which is the case of [AutoGPT (page 8)](https://arxiv.org/pdf/2310.02003.pdf).
+* By breaking a large task into a sequential prompt program, we can generate the final output as one part at a time, enabling LLMs with fixed context windows to generate significant unbounded outputs that are significantly greater than their underlying context window. We note that this also helps large context LLM models, as prior work has shown that even when using a large context LLM model, their attention is largely restricted to the most recent [small percentage of the context window](https://arxiv.org/abs/2307.03172).
+* [Mirroring the development of the computer](../mission), we believe that the advancement of the LLM-automatic Computer Framework enables a general-purpose task-solving framework, where it can solve any task by simply re-programming the prompt program, whereas many existing multi-agent systems today, specialize for only one task, which is reminisnicnt of the first computing machines, where the [breakthrough was a re-programmable automatic or Universal automatic computing machine](https://arxiv.org/pdf/2310.02003.pdf).
+
+
+## Low-Level details
+
+All transformer-based LLMs have a fixed context window, limiting the number of tokens and characters they can process. Therefore, this restricts a single LLM from generating any larger output than its fixed context window constraint, such as a large codebase or entire books. A natural solution is to extend an LLM agent with external memory. However, existing methods use too simplistic memory stores, such as an external corpus of previous summarizations, which is append-only or maintains precise values for variables with databases or a dictionary without any provision for in-place updates. Compounding this, the existing works do not include mechanisms for maintaining syntactic or semantic consistency within the memory store, a vital requirement for the generation of coherent and interdependent large code structures.
+
+Considering these issues, we introduce the LLM-automatic computer (L2MAC) framework, which is the first practical LLM-based general-purpose stored-program automatic computer (von Neumann architecture) framework, an LLM-based multi-agent system for long and consistent output generation. Here we mean *automatic* in the sense that it can automatically follow the internal prompt program without human intervention, mirroring early computers, such as [Turing's (Automatic-)machine (page 17)](https://arxiv.org/pdf/2310.02003.pdf).
+
+A Control Unit (CU) orchestrates the execution of the individual LLM agents and their interaction with the memory store. As outlined in the above Figure, an LLM agent first generates a task-oriented *prompt program* from a detailed user-specified task. The CU tailors the LLM agent's context, so it always includes the next unresolved instruction in the *prompt program* and information about the execution of past iterations (agents), and declutters the context when approaching its limit. It also endows the LLM agent with the ability to read and update any existing region of the memory store or extend it with new outputs. Furthermore, the CU plays a crucial role in checking the generated output. It feeds the LLM agent with syntactical checker errors and requests the LLM agent to generate checks alongside generating output, here unit tests when generating code, which are verified at each update of the memory file store to trigger corrective actions if needed, thereby ensuring that the extensive output in memory is both syntactically and functionally consistent.
+
+### L2MAC Framework
+
+Now we outline the L2MAC framework for the first practical LLM-based stored-program computer, with an instantiation for coding illustrated in the above Figure. L2MAC consists of three main components: the LLM processor, the memory file store, and the Control Unit (CU) that controls the flow of the execution, thus endowing the LLM agent with read-and-write capabilities, among other capacities---this is illustrated in the below Figure.
+
+#### LLM-based Processor
+
+An LLM can be viewed as a more complex atomic unit of computation with a fixed context window input; this allows for a flexible and powerful computation unit that can be used to solve a range of different tasks. Additionally, the LLM is empowered with tools forming an LLM agent, where it can select which action to execute next or provide an output. Critically, the LLM produces a probabilistic output; it is regarded as a hallucination when it makes an erroneous output. Thus, crucial to effectively updating an interrelated memory is the ability to enforce periodic checks on the LLM output to ensure correctness and consistency.
+
+#### Memory
+
+Following a stored-program computer, we define two types of memory: that of the prompt program (or instructions) and that of the file store. Here, the file store stores information relevant for the processor to read, write, and evaluate, with the final output ultimately stored in the file store.
+
+#### Control Unit
+
+The control unit is responsible for managing the context window for the LLM, encompassing both its inputs and outputs, executing the LLM, checking its outputs for errors, and enabling it to call tools (functions), which include reading and writing to the memory file store. We provide the following figure to detail its operation.
+
+![ControlFlow](/control_flow.png)
+**Control Unit**-Control flow diagram for one dialog turn $t$. Here this executes one current instruction $\mathcal{I}^{(k)}$ of the *prompt program*. It starts by loading the first instruction into the context window $C^0\leftarrow \{\mathcal{I}^{(0)}\}$ and iterates it automatically until all instructions in the *prompt program* $\mathcal{I}$ have been executed. First, the LLMs context window $C^t$ is processed by the LLM Processor $\mathcal{P}_{\text{LLM}}(C^t)$ to output a response $M_r$. The CU stores this in a buffer $\Delta_{C^{t+1}} \leftarrow \{M_r\}$, and checks if $M_r$ has called a tool, and if so, it executes the tool with the specified input in $M_r$, which includes reading, writing and evaluating $\mathcal{E}(D)$ the file store $\mathcal{D}$-outputting the tool response $M_f$, which is appended to the buffer $\Delta_{C^{t+1}}$. The CU performs additional control flow (as outlined below) to check if an instruction has been completed, continue an instruction beyond the context window, and continue executing the current instruction.
+
+##### Task-Oriented Context Management
+
+The Control Unit (CU) uses the LLM as a multi-turn dialog system, filling its context window $C$ with a combination of messages $m$ which can come from the user $M_u$, an LLM response $M_r$, a function (tool) output $M_f$, or the CU $M_c$, so that $m \in \{ M_u, M_r, M_f, M_c\}$.
+Consequently, at turn $t$ then the context window $C^t\in \text{List}(M)$ is of the form $C^t = (m^1, m^2, \dots, m^{n_t})$.
+
+
+To make L2MAC an automatic computer, the CU prompts the LLM to fill the initially empty instruction registry $\mathcal{I}$ with a list of instructions $\{\mathcal{I}^{(1)},\dots,\mathcal{I}^{(K)}\}$ where each will be executed in the LLM processor. We consider the simplest case of sequential instructions of a prompt program and however we realize more complicated control flow paradigms are possible, and leave this for [contributors to add](../roadmap). L2MAC then loads an empty context window of an LLM agent with the first instruction $C^0\leftarrow\{\mathcal{I}^{(0)}\}$ and iterates the CU control flow loop until all instructions have been achieved.
+The LLM can signal when the current instruction $\mathcal{I}^{(i)}$ has been completed through calling a special tool `step\_complete' at which point the CU evaluates the file store $\mathcal{D}$ using its evaluator module $\mathcal{E}$ (discussed below) to check for any introduced errors. If none are found, it asks the LLM to summarize the generated output in the current context window $C^t$ as a message $M_{rs}$ and resets the context window as $C^{t+1}\leftarrow \{\mathcal{I}^{(k+1)},M_{rs}\}$.
+
+
+**Overcoming the fixed context window constraint**. The input to the LLM cannot exceed the context window constraint $c$: the combined length of the initial context $C^t$ and the additional messages buffer $\Delta_{C^{t+1}}=\{m^0,\dots,m^n\}$ must fit in the context window, that is\footnote{We use $\oplus: \text{List}(A)\times \text{List}(A) \rightarrow \text{List}(A)$ as the concatenation of two lists on the set $A$. We abuse notation by considering any $a\in A$ as a singleton $\{a\}$.}, $|C^t \oplus \Delta_{C^{t+1}}| \leq c$.
+However, the length of $\Delta_{C^{t+1}}$ is not known a priori, so the CU should have a way of handling the cases where $\Delta_{C^{t+1}}$ exceeds the context margin $c-|C^{t}|$.
+This can be achieved through a combination of three different strategies: (1) minimize the occurrence by promoting the task at each time step to be small enough and economizing the filling of the context $C$;
+and if the situation occurs, (2) store in the file store $\mathcal{D}$ as much relevant output as possible from the current $C^t$ and (3) update or include a new summary message with $\mathcal{I}^{(k)}$ as in-context tuning for the next iteration.
+
+Regarding (1), through appropriate crafting $C^t$, the CU can prompt the LLM to plan sub-steps for the current instruction (most likely the original task prompt given by the user) and then target each sub-step in the following iterations. For illustration, in a coding setting, (2) can be achieved by storing the generated code so far to avoid rewriting it in the next iteration, and (3) by initializing a new prompt with a summary $M_{rs}$ of the current progress and helpful information to complete the current instruction, e.g., which files should be read or modified, or the current progress made fixing errors-(3) is further detailed at the bottom right of the above Figure.
+
+##### Precise Read/Write tools for entire memory
+
+The need for a reading mechanism that retrieves the relevant information at each iteration is evident and has been reasonably explored in previous literature.
+In contrast, previous work on memory has paid little attention to the writing component, which gets mostly reduced to the [appending of new prompts and LLM outputs](https://arxiv.org/abs/2307.08191) or updating the values of very structured and thus [restrictive forms of memory](https://arxiv.org/abs/2305.14322), e.g., variables or [tables](https://arxiv.org/abs/2306.03901).
+
+These approaches make sense for summarization, dialogs, and database manipulation tasks but are not suitable for long interconnected output generation tasks, such as generating large codebases for system design tasks. Indeed, in such settings, the possibility of downstream subtasks $\mathcal{I}^{(j)}$ demanding extensions of previous outputs (such as modules in a codebase) due to imperfect planning, plus the non-determinism and possible hallucination of LLMs, make it probable to require modifications of previously stored memories $\mathcal{D}$ to rectify these defects.
+
+In L2MAC it is thus key to implement read/write interactions with any part of the memory. We want the agent to be able to scan on demand $\mathcal{D}$, retrieve parts of the memory that it considers relevant, and potentially update them. In the next section, we detail our implementation of an LLM with a write component that allows it not only to add new information to $\mathcal{D}$ but also to delete and update any of its contents, an essential element that allows L2MAC to succeed in long output generation tasks.
+
+##### Checking the generated output
+
+
+As discussed in LLM-based Processor above, the intrinsic stochasticity of LLMs and the well-known phenomenon of hallucination makes it likely that incoherent or erroneous outputs occur during long interactions, which can be disastrous, for example, in coding. More profoundly, changes (e.g., to a function) to satisfy a given instruction $\mathcal{I}^{(j)}$ can hamper the solution to formerly completed instructions $\mathcal{I}^{(i)}$, $i
\ No newline at end of file
diff --git a/docs/guide/use_cases/book_generator.md b/docs/guide/use_cases/book_generator.md
new file mode 100644
index 00000000..1fd65d72
--- /dev/null
+++ b/docs/guide/use_cases/book_generator.md
@@ -0,0 +1,53 @@
+# Book Generator
+
+The LLM Automatic Computer Framework (L2MAC) excels at generating any user-directed text output of any length; it can be used to effectively write books or long documents from a single input prompt.
+
+The following provides a complete example of creating an entire book. Run:
+
+```python
+from l2mac import generate_book
+
+book: dict = generate_book(
+ r"""
+Write a complete recipe book for the following book title of "Twirls & Tastes: A Journey Through Italian Pasta"
+
+Description: "Twirls & Tastes" invites you on a flavorful expedition across Italy, exploring the diverse pasta landscape from the sun-drenched hills of Tuscany to the bustling streets of Naples. Discover regional specialties, learn the stories behind each dish, and master the art of pasta making with easy-to-follow recipes that promise to delight your senses.
+""",
+ steps=30,
+)
+
+print(book) # it will print the book folder complete with all the files as a dictionary
+```
+
+This will create a new `workspace` folder in your local directory where you run this script, including all the files generated while running and when L2MAC has completed. The final output book folder will be within the newly generated folder in the local `workspace` directory, organized by the time and date as the sub-folder name.
+
+The output should look something like this.
+
+```bash
+italian_pasta_recipe_book
+├── title_page.txt
+├── dedication_page.txt
+├── detailed_outline.txt
+├── table_of_contents.txt
+├── introduction.txt
+├── chapter_1_tuscany.txt
+├── chapter_2_campania.txt
+├── chapter_3_sicily.txt
+├── chapter_4_lombardy.txt
+├── chapter_5_pasta_making_techniques.txt
+├── chapter_6_sauces_and_accompaniments.txt
+├── conclusion.txt
+└── index.txt
+```
+
+The complete raw output generated by the above is given below.
+
+## Entire Book | Italian Pasta Recipe Book
+
+
+
+Click here for the [complete book](/TwirlsAndTastesAJourneyThroughItalianPasta.pdf); L2MAC produced all the text for the book, and all images were created with DALLE.
+
+The complete output text files are on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/italian_pasta_recipe_book); you can [download them here](/italian_pasta_recipe_book.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_book.py).
diff --git a/docs/guide/use_cases/codebase_generator.md b/docs/guide/use_cases/codebase_generator.md
new file mode 100644
index 00000000..3b124d3d
--- /dev/null
+++ b/docs/guide/use_cases/codebase_generator.md
@@ -0,0 +1,219 @@
+# Codebase Generator
+
+The LLM Automatic Computer Framework (L2MAC) is state-of-the-art for generating large complex codebases with a Large Language Model.
+
+The following provides a complete example of creating an simple codebase. Run:
+
+```python
+from l2mac import generate_codebase
+
+# run pip install pygame==2.1.2
+codebase: dict = generate_codebase(
+ "Create a beautiful, playable and simple snake game with pygame. Make the snake and food be aligned to the same 10-pixel grid.",
+ steps=2,
+)
+
+print(codebase) # it will print the codebase (repo) complete with all the files as a dictionary
+```
+
+This will create a new `workspace` folder in your local directory where you run this script, including all the files generated while running and when L2MAC has completed. The final output codebase will be within the newly generated folder in the local `workspace` directory, organized by the time and date as the sub-folder name.
+
+The output should look something like this.
+
+```
+snake_game
+├── main.py
+├── requirements.txt
+└── test_main.py
+```
+
+The complete raw output generated by the above is given below.
+
+main.py
+```python
+import random
+import sys
+
+import pygame
+
+# Initialize Pygame
+pygame.init()
+
+# Screen dimensions
+SCREEN_WIDTH = 800
+SCREEN_HEIGHT = 600
+
+# Colors
+BLACK = (0, 0, 0)
+GREEN = (0, 255, 0)
+RED = (255, 0, 0)
+WHITE = (255, 255, 255)
+
+# Set up the display
+screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
+pygame.display.set_caption("Snake Game")
+
+# Frame rate
+clock = pygame.time.Clock()
+FPS = 10
+
+# Grid size
+GRID_SIZE = 10
+
+
+# Snake class
+class Snake:
+ def __init__(self):
+ self.length = 1
+ self.positions = [((SCREEN_WIDTH // 2), (SCREEN_HEIGHT // 2))]
+ self.direction = random.choice(["up", "down", "left", "right"])
+ self.color = GREEN
+ self.score = 0
+ self.speed = 10
+
+ def move(self):
+ cur_x, cur_y = self.positions[0]
+ if self.direction == "up":
+ new_head = (cur_x, cur_y - GRID_SIZE)
+ elif self.direction == "down":
+ new_head = (cur_x, cur_y + GRID_SIZE)
+ elif self.direction == "left":
+ new_head = (cur_x - GRID_SIZE, cur_y)
+ elif self.direction == "right":
+ new_head = (cur_x + GRID_SIZE, cur_y)
+ self.positions.insert(0, new_head)
+ if len(self.positions) > self.length:
+ self.positions.pop()
+
+ def grow(self):
+ self.length += 1
+ self.score += 1
+ self.speed += 1
+ global FPS
+ FPS = min(60, self.speed) # Cap the FPS at 60 to prevent it from getting too fast.
+
+
+# Food class
+class Food:
+ def __init__(self):
+ self.position = (0, 0)
+ self.color = RED
+ self.randomize_position()
+
+ def randomize_position(self):
+ self.position = (
+ random.randint(0, (SCREEN_WIDTH // GRID_SIZE) - 1) * GRID_SIZE,
+ random.randint(0, (SCREEN_HEIGHT // GRID_SIZE) - 1) * GRID_SIZE,
+ )
+
+
+# Main game loop
+def main():
+ # Game initialization
+ snake = Snake()
+ food = Food()
+
+ while True:
+ for event in pygame.event.get():
+ if event.type == pygame.QUIT:
+ pygame.quit()
+ sys.exit()
+ elif event.type == pygame.KEYDOWN:
+ if event.key == pygame.K_UP and snake.direction != "down":
+ snake.direction = "up"
+ elif event.key == pygame.K_DOWN and snake.direction != "up":
+ snake.direction = "down"
+ elif event.key == pygame.K_LEFT and snake.direction != "right":
+ snake.direction = "left"
+ elif event.key == pygame.K_RIGHT and snake.direction != "left":
+ snake.direction = "right"
+
+ # Game logic
+ snake.move()
+ if snake.positions[0] == food.position:
+ snake.grow()
+ food.randomize_position()
+
+ # Check for collisions
+ if snake.positions[0] in snake.positions[1:]:
+ # Game over logic
+ print("Game Over! Your score was:", snake.score)
+ pygame.quit()
+ sys.exit()
+
+ # Render the game state
+ screen.fill(BLACK)
+ for pos in snake.positions:
+ pygame.draw.rect(screen, snake.color, pygame.Rect(pos[0], pos[1], GRID_SIZE, GRID_SIZE))
+ pygame.draw.rect(screen, food.color, pygame.Rect(food.position[0], food.position[1], GRID_SIZE, GRID_SIZE))
+ # Display the score
+ font = pygame.font.SysFont("arial", 20)
+ score_text = font.render("Score: " + str(snake.score), True, WHITE)
+ screen.blit(score_text, [0, 0])
+
+ pygame.display.update()
+ clock.tick(FPS)
+
+
+if __name__ == "__main__":
+ main()
+```
+
+test_main.py
+```python
+import pygame
+import pytest
+from main import Food, Snake
+
+# Mock pygame to run headless
+pygame.display.set_mode = lambda x: None
+pygame.init = lambda: None
+pygame.quit = lambda: None
+
+
+@pytest.fixture
+def snake():
+ return Snake()
+
+
+@pytest.fixture
+def food():
+ return Food()
+
+
+@pytest.mark.parametrize(
+ "direction, expected_position",
+ [("up", (400, 290)), ("down", (400, 310)), ("left", (390, 300)), ("right", (410, 300))],
+)
+def test_snake_movement(snake, direction, expected_position):
+ snake.direction = direction
+ snake.move()
+ assert snake.positions[0] == expected_position
+
+
+@pytest.mark.parametrize("initial_score, expected_score", [(0, 1), (5, 6)])
+def test_snake_eating(snake, food, initial_score, expected_score):
+ snake.score = initial_score
+ snake.positions[0] = food.position # Simulate snake eating the food
+ snake.grow()
+ assert snake.score == expected_score
+
+
+@pytest.mark.parametrize("initial_length, expected_length", [(1, 2), (3, 4)])
+def test_snake_growing(snake, initial_length, expected_length):
+ snake.length = initial_length
+ snake.grow()
+ assert snake.length == expected_length
+```
+
+requirements.txt
+```
+pygame==2.1.2
+pytest
+```
+
+## Playable Snake Game
+
+[![Snake Game Gameplay](/images/snake_game_gameplay.png)](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/snake_game)
+
+Click here for the complete files on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/snake_game) or [download them here](/snake_game.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_codebase_simple_playable_snake.py).
diff --git a/docs/guide/use_cases/gallery.md b/docs/guide/use_cases/gallery.md
new file mode 100644
index 00000000..e68b6bf1
--- /dev/null
+++ b/docs/guide/use_cases/gallery.md
@@ -0,0 +1,39 @@
+# Gallery of Examples
+
+We include a gallery of example applications entirely produced by the LLM Automatic Computer (L2MAC) from a single input prompt. L2MAC excels at solving large complex tasks, such as being state-of-the-art for generating large codebases, or it can even write entire books, all of which bypass the traditional constraints of the LLMs fixed context window constraint.
+
+## Playable Snake Game
+
+[![Snake Game Gameplay](/images/snake_game_gameplay.png)](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/snake_game)
+
+Click here for the complete files on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/snake_game) or [download them here](/snake_game.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_codebase_simple_playable_snake.py).
+
+## Large Complex URL Shortener Web Application
+
+[![Url Shortener Web Application](/images/url_shortener_1.png)](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/url_shortener_web_application)
+
+[![Url Shortener Web Application 2](/images/url_shortener_2.png)](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/url_shortener_web_application)
+
+Click here for the complete files on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/url_shortener_web_application) or [download them here](/url_shortener_web_application.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_codebase_complex_url_shortener_app.py).
+
+
+## Entire Book | Italian Pasta Recipe Book
+
+
+
+Click here for the [complete book](/TwirlsAndTastesAJourneyThroughItalianPasta.pdf); L2MAC produced all the text for the book, and all images were created with DALLE.
+
+The complete output text files are on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/italian_pasta_recipe_book); you can [download them here](/italian_pasta_recipe_book.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_book.py).
+
+
+## Playable Blackjack CLI Game
+
+[![Blackjack Game Gameplay](/images/blackjack_gameplay.png)](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/blackjack_game)
+
+Click here for the complete files on [github](https://github.com/samholt/L2MAC/tree/master/docs/generated_examples/blackjack_game) or [download them here](/blackjack_game.zip). The code and prompt to generate this is [here](https://github.com/samholt/L2MAC/blob/master/examples/generate_codebase_simple_blackjack.py).
+
+## Contribute Your Own Created Applications
+
+We are actively looking for you to upload your own awesome applications here by submitting a PR with the application you made, sharing it with a GitHub issue, or sharing it on the [Discord channel](https://github.com/samholt/l2mac).
\ No newline at end of file
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 00000000..031fa909
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,35 @@
+---
+# https://vitepress.dev/reference/default-theme-home-page
+layout: home
+
+hero:
+ name: "L2MAC"
+ text: "The LLM Automatic Computer Framework"
+ tagline: A collaborative LLM-based framework for complex tasks, bypassing the fixed context limit of LLMs
+ image:
+ src: /l2mac-icon.png
+ alt: The first practical LLM-based general-purpose stored-program automatic computer (von Neumann architecture) framework, an LLMbased multi-agent system, for extensive and consistent output generation.
+ actions:
+ - theme: brand
+ text: Get Started
+ link: /guide/get_started/introduction
+ - theme: alt
+ text: Gallery of Examples
+ link: /guide/use_cases/gallery
+
+features:
+ - title: "Multi-Agent Collaboration"
+ details: "Harness multiple LLM agents operating in tandem to perform complex tasks, bypassing the fixed context limits of individual models."
+ - title: "Extensive Output Generation"
+ details: "Generate extensive outputs like complete codebases or entire books, all from a single prompt, powered by our innovative LLM Automatic Computer."
+ - title: "Advanced Memory Handling"
+ details: "Utilize advanced memory systems that allow agents to store, recall, and utilize past interactions and outputs, enhancing consistency and depth in task handling."
+ - title: "Self-Generating Prompt Programs"
+ details: "Automatically generate and execute sequential prompt programs that guide LLM agents through complex tasks, eliminating the need for manual intervention."
+ - title: "Tool Integration and Error Handling"
+ details: "Incorporate external tools for syntax checking and test running of code, ensuring high-quality and error-free outputs."
+ - title: "Customizable Task Execution"
+ details: "Adapt the framework to various domains by customizing the task execution steps, ensuring versatility and broad applicability."
+
+---
+
diff --git a/docs/package-lock.json b/docs/package-lock.json
new file mode 100644
index 00000000..5cc3307c
--- /dev/null
+++ b/docs/package-lock.json
@@ -0,0 +1,2076 @@
+{
+ "name": "docs",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "devDependencies": {
+ "markdown-it-mathjax3": "^4.3.2",
+ "vitepress": "^1.1.0"
+ }
+ },
+ "node_modules/@algolia/autocomplete-core": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz",
+ "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/autocomplete-plugin-algolia-insights": "1.9.3",
+ "@algolia/autocomplete-shared": "1.9.3"
+ }
+ },
+ "node_modules/@algolia/autocomplete-plugin-algolia-insights": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz",
+ "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/autocomplete-shared": "1.9.3"
+ },
+ "peerDependencies": {
+ "search-insights": ">= 1 < 3"
+ }
+ },
+ "node_modules/@algolia/autocomplete-preset-algolia": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz",
+ "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/autocomplete-shared": "1.9.3"
+ },
+ "peerDependencies": {
+ "@algolia/client-search": ">= 4.9.1 < 6",
+ "algoliasearch": ">= 4.9.1 < 6"
+ }
+ },
+ "node_modules/@algolia/autocomplete-shared": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz",
+ "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==",
+ "dev": true,
+ "peerDependencies": {
+ "@algolia/client-search": ">= 4.9.1 < 6",
+ "algoliasearch": ">= 4.9.1 < 6"
+ }
+ },
+ "node_modules/@algolia/cache-browser-local-storage": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.23.3.tgz",
+ "integrity": "sha512-vRHXYCpPlTDE7i6UOy2xE03zHF2C8MEFjPN2v7fRbqVpcOvAUQK81x3Kc21xyb5aSIpYCjWCZbYZuz8Glyzyyg==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/cache-common": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/cache-common": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.23.3.tgz",
+ "integrity": "sha512-h9XcNI6lxYStaw32pHpB1TMm0RuxphF+Ik4o7tcQiodEdpKK+wKufY6QXtba7t3k8eseirEMVB83uFFF3Nu54A==",
+ "dev": true
+ },
+ "node_modules/@algolia/cache-in-memory": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.23.3.tgz",
+ "integrity": "sha512-yvpbuUXg/+0rbcagxNT7un0eo3czx2Uf0y4eiR4z4SD7SiptwYTpbuS0IHxcLHG3lq22ukx1T6Kjtk/rT+mqNg==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/cache-common": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/client-account": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.23.3.tgz",
+ "integrity": "sha512-hpa6S5d7iQmretHHF40QGq6hz0anWEHGlULcTIT9tbUssWUriN9AUXIFQ8Ei4w9azD0hc1rUok9/DeQQobhQMA==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/client-common": "4.23.3",
+ "@algolia/client-search": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/client-analytics": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.23.3.tgz",
+ "integrity": "sha512-LBsEARGS9cj8VkTAVEZphjxTjMVCci+zIIiRhpFun9jGDUlS1XmhCW7CTrnaWeIuCQS/2iPyRqSy1nXPjcBLRA==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/client-common": "4.23.3",
+ "@algolia/client-search": "4.23.3",
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/client-common": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.23.3.tgz",
+ "integrity": "sha512-l6EiPxdAlg8CYhroqS5ybfIczsGUIAC47slLPOMDeKSVXYG1n0qGiz4RjAHLw2aD0xzh2EXZ7aRguPfz7UKDKw==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/client-personalization": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.23.3.tgz",
+ "integrity": "sha512-3E3yF3Ocr1tB/xOZiuC3doHQBQ2zu2MPTYZ0d4lpfWads2WTKG7ZzmGnsHmm63RflvDeLK/UVx7j2b3QuwKQ2g==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/client-common": "4.23.3",
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/client-search": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.23.3.tgz",
+ "integrity": "sha512-P4VAKFHqU0wx9O+q29Q8YVuaowaZ5EM77rxfmGnkHUJggh28useXQdopokgwMeYw2XUht49WX5RcTQ40rZIabw==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/client-common": "4.23.3",
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/logger-common": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.23.3.tgz",
+ "integrity": "sha512-y9kBtmJwiZ9ZZ+1Ek66P0M68mHQzKRxkW5kAAXYN/rdzgDN0d2COsViEFufxJ0pb45K4FRcfC7+33YB4BLrZ+g==",
+ "dev": true
+ },
+ "node_modules/@algolia/logger-console": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.23.3.tgz",
+ "integrity": "sha512-8xoiseoWDKuCVnWP8jHthgaeobDLolh00KJAdMe9XPrWPuf1by732jSpgy2BlsLTaT9m32pHI8CRfrOqQzHv3A==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/logger-common": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/recommend": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.23.3.tgz",
+ "integrity": "sha512-9fK4nXZF0bFkdcLBRDexsnGzVmu4TSYZqxdpgBW2tEyfuSSY54D4qSRkLmNkrrz4YFvdh2GM1gA8vSsnZPR73w==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/cache-browser-local-storage": "4.23.3",
+ "@algolia/cache-common": "4.23.3",
+ "@algolia/cache-in-memory": "4.23.3",
+ "@algolia/client-common": "4.23.3",
+ "@algolia/client-search": "4.23.3",
+ "@algolia/logger-common": "4.23.3",
+ "@algolia/logger-console": "4.23.3",
+ "@algolia/requester-browser-xhr": "4.23.3",
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/requester-node-http": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/requester-browser-xhr": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.23.3.tgz",
+ "integrity": "sha512-jDWGIQ96BhXbmONAQsasIpTYWslyjkiGu0Quydjlowe+ciqySpiDUrJHERIRfELE5+wFc7hc1Q5hqjGoV7yghw==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/requester-common": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/requester-common": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.23.3.tgz",
+ "integrity": "sha512-xloIdr/bedtYEGcXCiF2muajyvRhwop4cMZo+K2qzNht0CMzlRkm8YsDdj5IaBhshqfgmBb3rTg4sL4/PpvLYw==",
+ "dev": true
+ },
+ "node_modules/@algolia/requester-node-http": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.23.3.tgz",
+ "integrity": "sha512-zgu++8Uj03IWDEJM3fuNl34s746JnZOWn1Uz5taV1dFyJhVM/kTNw9Ik7YJWiUNHJQXcaD8IXD1eCb0nq/aByA==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/requester-common": "4.23.3"
+ }
+ },
+ "node_modules/@algolia/transporter": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.23.3.tgz",
+ "integrity": "sha512-Wjl5gttqnf/gQKJA+dafnD0Y6Yw97yvfY8R9h0dQltX1GXTgNs1zWgvtWW0tHl1EgMdhAyw189uWiZMnL3QebQ==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/cache-common": "4.23.3",
+ "@algolia/logger-common": "4.23.3",
+ "@algolia/requester-common": "4.23.3"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.24.4",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.4.tgz",
+ "integrity": "sha512-zTvEBcghmeBma9QIGunWevvBAp4/Qu9Bdq+2k0Ot4fVMD6v3dsC9WOcRSKk7tRRyBM/53yKMJko9xOatGQAwSg==",
+ "dev": true,
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@docsearch/css": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.0.tgz",
+ "integrity": "sha512-+sbxb71sWre+PwDK7X2T8+bhS6clcVMLwBPznX45Qu6opJcgRjAp7gYSDzVFp187J+feSj5dNBN1mJoi6ckkUQ==",
+ "dev": true
+ },
+ "node_modules/@docsearch/js": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/@docsearch/js/-/js-3.6.0.tgz",
+ "integrity": "sha512-QujhqINEElrkIfKwyyyTfbsfMAYCkylInLYMRqHy7PHc8xTBQCow73tlo/Kc7oIwBrCLf0P3YhjlOeV4v8hevQ==",
+ "dev": true,
+ "dependencies": {
+ "@docsearch/react": "3.6.0",
+ "preact": "^10.0.0"
+ }
+ },
+ "node_modules/@docsearch/react": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.0.tgz",
+ "integrity": "sha512-HUFut4ztcVNmqy9gp/wxNbC7pTOHhgVVkHVGCACTuLhUKUhKAF9KYHJtMiLUJxEqiFLQiuri1fWF8zqwM/cu1w==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/autocomplete-core": "1.9.3",
+ "@algolia/autocomplete-preset-algolia": "1.9.3",
+ "@docsearch/css": "3.6.0",
+ "algoliasearch": "^4.19.1"
+ },
+ "peerDependencies": {
+ "@types/react": ">= 16.8.0 < 19.0.0",
+ "react": ">= 16.8.0 < 19.0.0",
+ "react-dom": ">= 16.8.0 < 19.0.0",
+ "search-insights": ">= 1 < 3"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "react-dom": {
+ "optional": true
+ },
+ "search-insights": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz",
+ "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz",
+ "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz",
+ "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz",
+ "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz",
+ "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz",
+ "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz",
+ "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz",
+ "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz",
+ "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz",
+ "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz",
+ "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz",
+ "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz",
+ "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz",
+ "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz",
+ "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz",
+ "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz",
+ "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz",
+ "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz",
+ "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz",
+ "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz",
+ "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz",
+ "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz",
+ "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.4.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
+ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
+ "dev": true
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.14.2.tgz",
+ "integrity": "sha512-ahxSgCkAEk+P/AVO0vYr7DxOD3CwAQrT0Go9BJyGQ9Ef0QxVOfjDZMiF4Y2s3mLyPrjonchIMH/tbWHucJMykQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.14.2.tgz",
+ "integrity": "sha512-lAarIdxZWbFSHFSDao9+I/F5jDaKyCqAPMq5HqnfpBw8dKDiCaaqM0lq5h1pQTLeIqueeay4PieGR5jGZMWprw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.14.2.tgz",
+ "integrity": "sha512-SWsr8zEUk82KSqquIMgZEg2GE5mCSfr9sE/thDROkX6pb3QQWPp8Vw8zOq2GyxZ2t0XoSIUlvHDkrf5Gmf7x3Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.14.2.tgz",
+ "integrity": "sha512-o/HAIrQq0jIxJAhgtIvV5FWviYK4WB0WwV91SLUnsliw1lSAoLsmgEEgRWzDguAFeUEUUoIWXiJrPqU7vGiVkA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.14.2.tgz",
+ "integrity": "sha512-nwlJ65UY9eGq91cBi6VyDfArUJSKOYt5dJQBq8xyLhvS23qO+4Nr/RreibFHjP6t+5ap2ohZrUJcHv5zk5ju/g==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.14.2.tgz",
+ "integrity": "sha512-Pg5TxxO2IVlMj79+c/9G0LREC9SY3HM+pfAwX7zj5/cAuwrbfj2Wv9JbMHIdPCfQpYsI4g9mE+2Bw/3aeSs2rQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.14.2.tgz",
+ "integrity": "sha512-cAOTjGNm84gc6tS02D1EXtG7tDRsVSDTBVXOLbj31DkwfZwgTPYZ6aafSU7rD/4R2a34JOwlF9fQayuTSkoclA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-powerpc64le-gnu": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.14.2.tgz",
+ "integrity": "sha512-4RyT6v1kXb7C0fn6zV33rvaX05P0zHoNzaXI/5oFHklfKm602j+N4mn2YvoezQViRLPnxP8M1NaY4s/5kXO5cw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.14.2.tgz",
+ "integrity": "sha512-KNUH6jC/vRGAKSorySTyc/yRYlCwN/5pnMjXylfBniwtJx5O7X17KG/0efj8XM3TZU7raYRXJFFReOzNmL1n1w==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.14.2.tgz",
+ "integrity": "sha512-xPV4y73IBEXToNPa3h5lbgXOi/v0NcvKxU0xejiFw6DtIYQqOTMhZ2DN18/HrrP0PmiL3rGtRG9gz1QE8vFKXQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.14.2.tgz",
+ "integrity": "sha512-QBhtr07iFGmF9egrPOWyO5wciwgtzKkYPNLVCFZTmr4TWmY0oY2Dm/bmhHjKRwZoGiaKdNcKhFtUMBKvlchH+Q==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.14.2.tgz",
+ "integrity": "sha512-8zfsQRQGH23O6qazZSFY5jP5gt4cFvRuKTpuBsC1ZnSWxV8ZKQpPqOZIUtdfMOugCcBvFGRa1pDC/tkf19EgBw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.14.2.tgz",
+ "integrity": "sha512-H4s8UjgkPnlChl6JF5empNvFHp77Jx+Wfy2EtmYPe9G22XV+PMuCinZVHurNe8ggtwoaohxARJZbaH/3xjB/FA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.14.2.tgz",
+ "integrity": "sha512-djqpAjm/i8erWYF0K6UY4kRO3X5+T4TypIqw60Q8MTqSBaQNpNXDhxdjpZ3ikgb+wn99svA7jxcXpiyg9MUsdw==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.14.2.tgz",
+ "integrity": "sha512-teAqzLT0yTYZa8ZP7zhFKEx4cotS8Tkk5XiqNMJhD4CpaWB1BHARE4Qy+RzwnXvSAYv+Q3jAqCVBS+PS+Yee8Q==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@shikijs/core": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.3.0.tgz",
+ "integrity": "sha512-7fedsBfuILDTBmrYZNFI8B6ATTxhQAasUHllHmjvSZPnoq4bULWoTpHwmuQvZ8Aq03/tAa2IGo6RXqWtHdWaCA==",
+ "dev": true
+ },
+ "node_modules/@shikijs/transformers": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/transformers/-/transformers-1.3.0.tgz",
+ "integrity": "sha512-3mlpg2I9CjhjE96dEWQOGeCWoPcyTov3s4aAsHmgvnTHa8MBknEnCQy8/xivJPSpD+olqOqIEoHnLfbNJK29AA==",
+ "dev": true,
+ "dependencies": {
+ "shiki": "1.3.0"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
+ "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==",
+ "dev": true
+ },
+ "node_modules/@types/linkify-it": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.5.tgz",
+ "integrity": "sha512-yg6E+u0/+Zjva+buc3EIb+29XEg4wltq7cSmd4Uc2EE/1nUVmxyzpX6gUXD0V8jIrG0r7YeOGVIbYRkxeooCtw==",
+ "dev": true
+ },
+ "node_modules/@types/markdown-it": {
+ "version": "13.0.7",
+ "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-13.0.7.tgz",
+ "integrity": "sha512-U/CBi2YUUcTHBt5tjO2r5QV/x0Po6nsYwQU4Y04fBS6vfoImaiZ6f8bi3CjTCxBPQSO1LMyUqkByzi8AidyxfA==",
+ "dev": true,
+ "dependencies": {
+ "@types/linkify-it": "*",
+ "@types/mdurl": "*"
+ }
+ },
+ "node_modules/@types/mdurl": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.5.tgz",
+ "integrity": "sha512-6L6VymKTzYSrEf4Nev4Xa1LCHKrlTlYCBMTlQKFuddo1CvQcE52I0mwfOJayueUC7MJuXOeHTcIU683lzd0cUA==",
+ "dev": true
+ },
+ "node_modules/@types/web-bluetooth": {
+ "version": "0.0.20",
+ "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz",
+ "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==",
+ "dev": true
+ },
+ "node_modules/@vitejs/plugin-vue": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-5.0.4.tgz",
+ "integrity": "sha512-WS3hevEszI6CEVEx28F8RjTX97k3KsrcY6kvTg7+Whm5y3oYvcqzVeGCU3hxSAn4uY2CLCkeokkGKpoctccilQ==",
+ "dev": true,
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "peerDependencies": {
+ "vite": "^5.0.0",
+ "vue": "^3.2.25"
+ }
+ },
+ "node_modules/@vue/compiler-core": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.4.21.tgz",
+ "integrity": "sha512-MjXawxZf2SbZszLPYxaFCjxfibYrzr3eYbKxwpLR9EQN+oaziSu3qKVbwBERj1IFIB8OLUewxB5m/BFzi613og==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.23.9",
+ "@vue/shared": "3.4.21",
+ "entities": "^4.5.0",
+ "estree-walker": "^2.0.2",
+ "source-map-js": "^1.0.2"
+ }
+ },
+ "node_modules/@vue/compiler-dom": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.4.21.tgz",
+ "integrity": "sha512-IZC6FKowtT1sl0CR5DpXSiEB5ayw75oT2bma1BEhV7RRR1+cfwLrxc2Z8Zq/RGFzJ8w5r9QtCOvTjQgdn0IKmA==",
+ "dev": true,
+ "dependencies": {
+ "@vue/compiler-core": "3.4.21",
+ "@vue/shared": "3.4.21"
+ }
+ },
+ "node_modules/@vue/compiler-sfc": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.4.21.tgz",
+ "integrity": "sha512-me7epoTxYlY+2CUM7hy9PCDdpMPfIwrOvAXud2Upk10g4YLv9UBW7kL798TvMeDhPthkZ0CONNrK2GoeI1ODiQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.23.9",
+ "@vue/compiler-core": "3.4.21",
+ "@vue/compiler-dom": "3.4.21",
+ "@vue/compiler-ssr": "3.4.21",
+ "@vue/shared": "3.4.21",
+ "estree-walker": "^2.0.2",
+ "magic-string": "^0.30.7",
+ "postcss": "^8.4.35",
+ "source-map-js": "^1.0.2"
+ }
+ },
+ "node_modules/@vue/compiler-ssr": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.4.21.tgz",
+ "integrity": "sha512-M5+9nI2lPpAsgXOGQobnIueVqc9sisBFexh5yMIMRAPYLa7+5wEJs8iqOZc1WAa9WQbx9GR2twgznU8LTIiZ4Q==",
+ "dev": true,
+ "dependencies": {
+ "@vue/compiler-dom": "3.4.21",
+ "@vue/shared": "3.4.21"
+ }
+ },
+ "node_modules/@vue/devtools-api": {
+ "version": "7.0.27",
+ "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-7.0.27.tgz",
+ "integrity": "sha512-BFCFCusSDcw2UcOFD/QeK7OxD1x2C/m+uAN30Q7jLKECSW53hmz0urzJmX834GuWDZX/hIxkyUKnLLfEIP1c/w==",
+ "dev": true,
+ "dependencies": {
+ "@vue/devtools-kit": "^7.0.27"
+ }
+ },
+ "node_modules/@vue/devtools-kit": {
+ "version": "7.0.27",
+ "resolved": "https://registry.npmjs.org/@vue/devtools-kit/-/devtools-kit-7.0.27.tgz",
+ "integrity": "sha512-/A5xM38pPCFX5Yhl/lRFAzjyK6VNsH670nww2WbjFKWqlu3I+lMxWKzQkCW6A1V8bduITgl2kHORfg2gTw6QaA==",
+ "dev": true,
+ "dependencies": {
+ "@vue/devtools-shared": "^7.0.27",
+ "hookable": "^5.5.3",
+ "mitt": "^3.0.1",
+ "perfect-debounce": "^1.0.0",
+ "speakingurl": "^14.0.1"
+ },
+ "peerDependencies": {
+ "vue": "^3.0.0"
+ }
+ },
+ "node_modules/@vue/devtools-shared": {
+ "version": "7.0.27",
+ "resolved": "https://registry.npmjs.org/@vue/devtools-shared/-/devtools-shared-7.0.27.tgz",
+ "integrity": "sha512-4VxtmZ6yjhiSloqZZq2UYU0TBGxOJ8GxWvp5OlAH70zYqi0FIAyWGPkOhvfoZ7DKQyv2UU0mmKzFHjsEkelGyQ==",
+ "dev": true,
+ "dependencies": {
+ "rfdc": "^1.3.1"
+ }
+ },
+ "node_modules/@vue/reactivity": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.4.21.tgz",
+ "integrity": "sha512-UhenImdc0L0/4ahGCyEzc/pZNwVgcglGy9HVzJ1Bq2Mm9qXOpP8RyNTjookw/gOCUlXSEtuZ2fUg5nrHcoqJcw==",
+ "dev": true,
+ "dependencies": {
+ "@vue/shared": "3.4.21"
+ }
+ },
+ "node_modules/@vue/runtime-core": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.4.21.tgz",
+ "integrity": "sha512-pQthsuYzE1XcGZznTKn73G0s14eCJcjaLvp3/DKeYWoFacD9glJoqlNBxt3W2c5S40t6CCcpPf+jG01N3ULyrA==",
+ "dev": true,
+ "dependencies": {
+ "@vue/reactivity": "3.4.21",
+ "@vue/shared": "3.4.21"
+ }
+ },
+ "node_modules/@vue/runtime-dom": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.4.21.tgz",
+ "integrity": "sha512-gvf+C9cFpevsQxbkRBS1NpU8CqxKw0ebqMvLwcGQrNpx6gqRDodqKqA+A2VZZpQ9RpK2f9yfg8VbW/EpdFUOJw==",
+ "dev": true,
+ "dependencies": {
+ "@vue/runtime-core": "3.4.21",
+ "@vue/shared": "3.4.21",
+ "csstype": "^3.1.3"
+ }
+ },
+ "node_modules/@vue/server-renderer": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.4.21.tgz",
+ "integrity": "sha512-aV1gXyKSN6Rz+6kZ6kr5+Ll14YzmIbeuWe7ryJl5muJ4uwSwY/aStXTixx76TwkZFJLm1aAlA/HSWEJ4EyiMkg==",
+ "dev": true,
+ "dependencies": {
+ "@vue/compiler-ssr": "3.4.21",
+ "@vue/shared": "3.4.21"
+ },
+ "peerDependencies": {
+ "vue": "3.4.21"
+ }
+ },
+ "node_modules/@vue/shared": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.4.21.tgz",
+ "integrity": "sha512-PuJe7vDIi6VYSinuEbUIQgMIRZGgM8e4R+G+/dQTk0X1NEdvgvvgv7m+rfmDH1gZzyA1OjjoWskvHlfRNfQf3g==",
+ "dev": true
+ },
+ "node_modules/@vueuse/core": {
+ "version": "10.9.0",
+ "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-10.9.0.tgz",
+ "integrity": "sha512-/1vjTol8SXnx6xewDEKfS0Ra//ncg4Hb0DaZiwKf7drgfMsKFExQ+FnnENcN6efPen+1kIzhLQoGSy0eDUVOMg==",
+ "dev": true,
+ "dependencies": {
+ "@types/web-bluetooth": "^0.0.20",
+ "@vueuse/metadata": "10.9.0",
+ "@vueuse/shared": "10.9.0",
+ "vue-demi": ">=0.14.7"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/@vueuse/core/node_modules/vue-demi": {
+ "version": "0.14.7",
+ "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.7.tgz",
+ "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "vue-demi-fix": "bin/vue-demi-fix.js",
+ "vue-demi-switch": "bin/vue-demi-switch.js"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ },
+ "peerDependencies": {
+ "@vue/composition-api": "^1.0.0-rc.1",
+ "vue": "^3.0.0-0 || ^2.6.0"
+ },
+ "peerDependenciesMeta": {
+ "@vue/composition-api": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vueuse/integrations": {
+ "version": "10.9.0",
+ "resolved": "https://registry.npmjs.org/@vueuse/integrations/-/integrations-10.9.0.tgz",
+ "integrity": "sha512-acK+A01AYdWSvL4BZmCoJAcyHJ6EqhmkQEXbQLwev1MY7NBnS+hcEMx/BzVoR9zKI+UqEPMD9u6PsyAuiTRT4Q==",
+ "dev": true,
+ "dependencies": {
+ "@vueuse/core": "10.9.0",
+ "@vueuse/shared": "10.9.0",
+ "vue-demi": ">=0.14.7"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ },
+ "peerDependencies": {
+ "async-validator": "*",
+ "axios": "*",
+ "change-case": "*",
+ "drauu": "*",
+ "focus-trap": "*",
+ "fuse.js": "*",
+ "idb-keyval": "*",
+ "jwt-decode": "*",
+ "nprogress": "*",
+ "qrcode": "*",
+ "sortablejs": "*",
+ "universal-cookie": "*"
+ },
+ "peerDependenciesMeta": {
+ "async-validator": {
+ "optional": true
+ },
+ "axios": {
+ "optional": true
+ },
+ "change-case": {
+ "optional": true
+ },
+ "drauu": {
+ "optional": true
+ },
+ "focus-trap": {
+ "optional": true
+ },
+ "fuse.js": {
+ "optional": true
+ },
+ "idb-keyval": {
+ "optional": true
+ },
+ "jwt-decode": {
+ "optional": true
+ },
+ "nprogress": {
+ "optional": true
+ },
+ "qrcode": {
+ "optional": true
+ },
+ "sortablejs": {
+ "optional": true
+ },
+ "universal-cookie": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vueuse/integrations/node_modules/vue-demi": {
+ "version": "0.14.7",
+ "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.7.tgz",
+ "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "vue-demi-fix": "bin/vue-demi-fix.js",
+ "vue-demi-switch": "bin/vue-demi-switch.js"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ },
+ "peerDependencies": {
+ "@vue/composition-api": "^1.0.0-rc.1",
+ "vue": "^3.0.0-0 || ^2.6.0"
+ },
+ "peerDependenciesMeta": {
+ "@vue/composition-api": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vueuse/metadata": {
+ "version": "10.9.0",
+ "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-10.9.0.tgz",
+ "integrity": "sha512-iddNbg3yZM0X7qFY2sAotomgdHK7YJ6sKUvQqbvwnf7TmaVPxS4EJydcNsVejNdS8iWCtDk+fYXr7E32nyTnGA==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/@vueuse/shared": {
+ "version": "10.9.0",
+ "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-10.9.0.tgz",
+ "integrity": "sha512-Uud2IWncmAfJvRaFYzv5OHDli+FbOzxiVEQdLCKQKLyhz94PIyFC3CHcH7EDMwIn8NPtD06+PNbC/PiO0LGLtw==",
+ "dev": true,
+ "dependencies": {
+ "vue-demi": ">=0.14.7"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/@vueuse/shared/node_modules/vue-demi": {
+ "version": "0.14.7",
+ "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.7.tgz",
+ "integrity": "sha512-EOG8KXDQNwkJILkx/gPcoL/7vH+hORoBaKgGe+6W7VFMvCYJfmF2dGbvgDroVnI8LU7/kTu8mbjRZGBU1z9NTA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "vue-demi-fix": "bin/vue-demi-fix.js",
+ "vue-demi-switch": "bin/vue-demi-switch.js"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ },
+ "peerDependencies": {
+ "@vue/composition-api": "^1.0.0-rc.1",
+ "vue": "^3.0.0-0 || ^2.6.0"
+ },
+ "peerDependenciesMeta": {
+ "@vue/composition-api": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/algoliasearch": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.23.3.tgz",
+ "integrity": "sha512-Le/3YgNvjW9zxIQMRhUHuhiUjAlKY/zsdZpfq4dlLqg6mEm0nL6yk+7f2hDOtLpxsgE4jSzDmvHL7nXdBp5feg==",
+ "dev": true,
+ "dependencies": {
+ "@algolia/cache-browser-local-storage": "4.23.3",
+ "@algolia/cache-common": "4.23.3",
+ "@algolia/cache-in-memory": "4.23.3",
+ "@algolia/client-account": "4.23.3",
+ "@algolia/client-analytics": "4.23.3",
+ "@algolia/client-common": "4.23.3",
+ "@algolia/client-personalization": "4.23.3",
+ "@algolia/client-search": "4.23.3",
+ "@algolia/logger-common": "4.23.3",
+ "@algolia/logger-console": "4.23.3",
+ "@algolia/recommend": "4.23.3",
+ "@algolia/requester-browser-xhr": "4.23.3",
+ "@algolia/requester-common": "4.23.3",
+ "@algolia/requester-node-http": "4.23.3",
+ "@algolia/transporter": "4.23.3"
+ }
+ },
+ "node_modules/ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==",
+ "dev": true
+ },
+ "node_modules/cheerio": {
+ "version": "1.0.0-rc.10",
+ "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.10.tgz",
+ "integrity": "sha512-g0J0q/O6mW8z5zxQ3A8E8J1hUgp4SMOvEoW/x84OwyHKe/Zccz83PVT4y5Crcr530FV6NgmKI1qvGTKVl9XXVw==",
+ "dev": true,
+ "dependencies": {
+ "cheerio-select": "^1.5.0",
+ "dom-serializer": "^1.3.2",
+ "domhandler": "^4.2.0",
+ "htmlparser2": "^6.1.0",
+ "parse5": "^6.0.1",
+ "parse5-htmlparser2-tree-adapter": "^6.0.1",
+ "tslib": "^2.2.0"
+ },
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/cheeriojs/cheerio?sponsor=1"
+ }
+ },
+ "node_modules/cheerio-select": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.6.0.tgz",
+ "integrity": "sha512-eq0GdBvxVFbqWgmCm7M3XGs1I8oLy/nExUnh6oLqmBditPO9AqQJrkslDpMun/hZ0yyTs8L0m85OHp4ho6Qm9g==",
+ "dev": true,
+ "dependencies": {
+ "css-select": "^4.3.0",
+ "css-what": "^6.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.3.1",
+ "domutils": "^2.8.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/commander": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
+ "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/css-select": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz",
+ "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==",
+ "dev": true,
+ "dependencies": {
+ "boolbase": "^1.0.0",
+ "css-what": "^6.0.1",
+ "domhandler": "^4.3.1",
+ "domutils": "^2.8.0",
+ "nth-check": "^2.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/css-what": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
+ "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
+ "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+ "dev": true
+ },
+ "node_modules/dom-serializer": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz",
+ "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==",
+ "dev": true,
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.2.0",
+ "entities": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
+ }
+ },
+ "node_modules/dom-serializer/node_modules/entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/domelementtype": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
+ "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ]
+ },
+ "node_modules/domhandler": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz",
+ "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==",
+ "dev": true,
+ "dependencies": {
+ "domelementtype": "^2.2.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domhandler?sponsor=1"
+ }
+ },
+ "node_modules/domutils": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
+ "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==",
+ "dev": true,
+ "dependencies": {
+ "dom-serializer": "^1.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domutils?sponsor=1"
+ }
+ },
+ "node_modules/entities": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
+ "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz",
+ "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.20.2",
+ "@esbuild/android-arm": "0.20.2",
+ "@esbuild/android-arm64": "0.20.2",
+ "@esbuild/android-x64": "0.20.2",
+ "@esbuild/darwin-arm64": "0.20.2",
+ "@esbuild/darwin-x64": "0.20.2",
+ "@esbuild/freebsd-arm64": "0.20.2",
+ "@esbuild/freebsd-x64": "0.20.2",
+ "@esbuild/linux-arm": "0.20.2",
+ "@esbuild/linux-arm64": "0.20.2",
+ "@esbuild/linux-ia32": "0.20.2",
+ "@esbuild/linux-loong64": "0.20.2",
+ "@esbuild/linux-mips64el": "0.20.2",
+ "@esbuild/linux-ppc64": "0.20.2",
+ "@esbuild/linux-riscv64": "0.20.2",
+ "@esbuild/linux-s390x": "0.20.2",
+ "@esbuild/linux-x64": "0.20.2",
+ "@esbuild/netbsd-x64": "0.20.2",
+ "@esbuild/openbsd-x64": "0.20.2",
+ "@esbuild/sunos-x64": "0.20.2",
+ "@esbuild/win32-arm64": "0.20.2",
+ "@esbuild/win32-ia32": "0.20.2",
+ "@esbuild/win32-x64": "0.20.2"
+ }
+ },
+ "node_modules/escape-goat": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-3.0.0.tgz",
+ "integrity": "sha512-w3PwNZJwRxlp47QGzhuEBldEqVHHhh8/tIPcl6ecf2Bou99cdAt0knihBV0Ecc7CGxYduXVBDheH1K2oADRlvw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/esm": {
+ "version": "3.2.25",
+ "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz",
+ "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
+ "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==",
+ "dev": true
+ },
+ "node_modules/focus-trap": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/focus-trap/-/focus-trap-7.5.4.tgz",
+ "integrity": "sha512-N7kHdlgsO/v+iD/dMoJKtsSqs5Dz/dXZVebRgJw23LDk+jMi/974zyiOYDziY2JPp8xivq9BmUGwIJMiuSBi7w==",
+ "dev": true,
+ "dependencies": {
+ "tabbable": "^6.2.0"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/hookable": {
+ "version": "5.5.3",
+ "resolved": "https://registry.npmjs.org/hookable/-/hookable-5.5.3.tgz",
+ "integrity": "sha512-Yc+BQe8SvoXH1643Qez1zqLRmbA5rCL+sSmk6TVos0LWVfNIB7PGncdlId77WzLGSIB5KaWgTaNTs2lNVEI6VQ==",
+ "dev": true
+ },
+ "node_modules/htmlparser2": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
+ "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
+ "dev": true,
+ "funding": [
+ "https://github.com/fb55/htmlparser2?sponsor=1",
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ],
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "domutils": "^2.5.2",
+ "entities": "^2.0.0"
+ }
+ },
+ "node_modules/htmlparser2/node_modules/entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/juice": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/juice/-/juice-8.1.0.tgz",
+ "integrity": "sha512-FLzurJrx5Iv1e7CfBSZH68dC04EEvXvvVvPYB7Vx1WAuhCp1ZPIMtqxc+WTWxVkpTIC2Ach/GAv0rQbtGf6YMA==",
+ "dev": true,
+ "dependencies": {
+ "cheerio": "1.0.0-rc.10",
+ "commander": "^6.1.0",
+ "mensch": "^0.3.4",
+ "slick": "^1.12.2",
+ "web-resource-inliner": "^6.0.1"
+ },
+ "bin": {
+ "juice": "bin/juice"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/magic-string": {
+ "version": "0.30.9",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.9.tgz",
+ "integrity": "sha512-S1+hd+dIrC8EZqKyT9DstTH/0Z+f76kmmvZnkfQVmOpDEF9iVgdYif3Q/pIWHmCoo59bQVGW0kVL3e2nl+9+Sw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.4.15"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/mark.js": {
+ "version": "8.11.1",
+ "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz",
+ "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==",
+ "dev": true
+ },
+ "node_modules/markdown-it-mathjax3": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/markdown-it-mathjax3/-/markdown-it-mathjax3-4.3.2.tgz",
+ "integrity": "sha512-TX3GW5NjmupgFtMJGRauioMbbkGsOXAAt1DZ/rzzYmTHqzkO1rNAdiMD4NiruurToPApn2kYy76x02QN26qr2w==",
+ "dev": true,
+ "dependencies": {
+ "juice": "^8.0.0",
+ "mathjax-full": "^3.2.0"
+ }
+ },
+ "node_modules/mathjax-full": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/mathjax-full/-/mathjax-full-3.2.2.tgz",
+ "integrity": "sha512-+LfG9Fik+OuI8SLwsiR02IVdjcnRCy5MufYLi0C3TdMT56L/pjB0alMVGgoWJF8pN9Rc7FESycZB9BMNWIid5w==",
+ "dev": true,
+ "dependencies": {
+ "esm": "^3.2.25",
+ "mhchemparser": "^4.1.0",
+ "mj-context-menu": "^0.6.1",
+ "speech-rule-engine": "^4.0.6"
+ }
+ },
+ "node_modules/mensch": {
+ "version": "0.3.4",
+ "resolved": "https://registry.npmjs.org/mensch/-/mensch-0.3.4.tgz",
+ "integrity": "sha512-IAeFvcOnV9V0Yk+bFhYR07O3yNina9ANIN5MoXBKYJ/RLYPurd2d0yw14MDhpr9/momp0WofT1bPUh3hkzdi/g==",
+ "dev": true
+ },
+ "node_modules/mhchemparser": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/mhchemparser/-/mhchemparser-4.2.1.tgz",
+ "integrity": "sha512-kYmyrCirqJf3zZ9t/0wGgRZ4/ZJw//VwaRVGA75C4nhE60vtnIzhl9J9ndkX/h6hxSN7pjg/cE0VxbnNM+bnDQ==",
+ "dev": true
+ },
+ "node_modules/mime": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz",
+ "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==",
+ "dev": true,
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/minisearch": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/minisearch/-/minisearch-6.3.0.tgz",
+ "integrity": "sha512-ihFnidEeU8iXzcVHy74dhkxh/dn8Dc08ERl0xwoMMGqp4+LvRSCgicb+zGqWthVokQKvCSxITlh3P08OzdTYCQ==",
+ "dev": true
+ },
+ "node_modules/mitt": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz",
+ "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==",
+ "dev": true
+ },
+ "node_modules/mj-context-menu": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/mj-context-menu/-/mj-context-menu-0.6.1.tgz",
+ "integrity": "sha512-7NO5s6n10TIV96d4g2uDpG7ZDpIhMh0QNfGdJw/W47JswFcosz457wqz/b5sAKvl12sxINGFCn80NZHKwxQEXA==",
+ "dev": true
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
+ "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "dev": true,
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/nth-check": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
+ "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
+ "dev": true,
+ "dependencies": {
+ "boolbase": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/nth-check?sponsor=1"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
+ "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==",
+ "dev": true
+ },
+ "node_modules/parse5-htmlparser2-tree-adapter": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz",
+ "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==",
+ "dev": true,
+ "dependencies": {
+ "parse5": "^6.0.1"
+ }
+ },
+ "node_modules/perfect-debounce": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz",
+ "integrity": "sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==",
+ "dev": true
+ },
+ "node_modules/picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "dev": true
+ },
+ "node_modules/postcss": {
+ "version": "8.4.38",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz",
+ "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.7",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/preact": {
+ "version": "10.20.2",
+ "resolved": "https://registry.npmjs.org/preact/-/preact-10.20.2.tgz",
+ "integrity": "sha512-S1d1ernz3KQ+Y2awUxKakpfOg2CEmJmwOP+6igPx6dgr6pgDvenqYviyokWso2rhHvGtTlWWnJDa7RaPbQerTg==",
+ "dev": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/preact"
+ }
+ },
+ "node_modules/rfdc": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.1.tgz",
+ "integrity": "sha512-r5a3l5HzYlIC68TpmYKlxWjmOP6wiPJ1vWv2HeLhNsRZMrCkxeqxiHlQ21oXmQ4F3SiryXBHhAD7JZqvOJjFmg==",
+ "dev": true
+ },
+ "node_modules/rollup": {
+ "version": "4.14.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.14.2.tgz",
+ "integrity": "sha512-WkeoTWvuBoFjFAhsEOHKRoZ3r9GfTyhh7Vff1zwebEFLEFjT1lG3784xEgKiTa7E+e70vsC81roVL2MP4tgEEQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "1.0.5"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.14.2",
+ "@rollup/rollup-android-arm64": "4.14.2",
+ "@rollup/rollup-darwin-arm64": "4.14.2",
+ "@rollup/rollup-darwin-x64": "4.14.2",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.14.2",
+ "@rollup/rollup-linux-arm64-gnu": "4.14.2",
+ "@rollup/rollup-linux-arm64-musl": "4.14.2",
+ "@rollup/rollup-linux-powerpc64le-gnu": "4.14.2",
+ "@rollup/rollup-linux-riscv64-gnu": "4.14.2",
+ "@rollup/rollup-linux-s390x-gnu": "4.14.2",
+ "@rollup/rollup-linux-x64-gnu": "4.14.2",
+ "@rollup/rollup-linux-x64-musl": "4.14.2",
+ "@rollup/rollup-win32-arm64-msvc": "4.14.2",
+ "@rollup/rollup-win32-ia32-msvc": "4.14.2",
+ "@rollup/rollup-win32-x64-msvc": "4.14.2",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/search-insights": {
+ "version": "2.13.0",
+ "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz",
+ "integrity": "sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==",
+ "dev": true,
+ "peer": true
+ },
+ "node_modules/shiki": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-1.3.0.tgz",
+ "integrity": "sha512-9aNdQy/etMXctnPzsje1h1XIGm9YfRcSksKOGqZWXA/qP9G18/8fpz5Bjpma8bOgz3tqIpjERAd6/lLjFyzoww==",
+ "dev": true,
+ "dependencies": {
+ "@shikijs/core": "1.3.0"
+ }
+ },
+ "node_modules/slick": {
+ "version": "1.12.2",
+ "resolved": "https://registry.npmjs.org/slick/-/slick-1.12.2.tgz",
+ "integrity": "sha512-4qdtOGcBjral6YIBCWJ0ljFSKNLz9KkhbWtuGvUyRowl1kxfuE1x/Z/aJcaiilpb3do9bl5K7/1h9XC5wWpY/A==",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz",
+ "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/speakingurl": {
+ "version": "14.0.1",
+ "resolved": "https://registry.npmjs.org/speakingurl/-/speakingurl-14.0.1.tgz",
+ "integrity": "sha512-1POYv7uv2gXoyGFpBCmpDVSNV74IfsWlDW216UPjbWufNf+bSU6GdbDsxdcxtfwb4xlI3yxzOTKClUosxARYrQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/speech-rule-engine": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/speech-rule-engine/-/speech-rule-engine-4.0.7.tgz",
+ "integrity": "sha512-sJrL3/wHzNwJRLBdf6CjJWIlxC04iYKkyXvYSVsWVOiC2DSkHmxsqOhEeMsBA9XK+CHuNcsdkbFDnoUfAsmp9g==",
+ "dev": true,
+ "dependencies": {
+ "commander": "9.2.0",
+ "wicked-good-xpath": "1.3.0",
+ "xmldom-sre": "0.1.31"
+ },
+ "bin": {
+ "sre": "bin/sre"
+ }
+ },
+ "node_modules/speech-rule-engine/node_modules/commander": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-9.2.0.tgz",
+ "integrity": "sha512-e2i4wANQiSXgnrBlIatyHtP1odfUp0BbV5Y5nEGbxtIrStkEOAAzCUirvLBNXHLr7kwLvJl6V+4V3XV9x7Wd9w==",
+ "dev": true,
+ "engines": {
+ "node": "^12.20.0 || >=14"
+ }
+ },
+ "node_modules/tabbable": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz",
+ "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==",
+ "dev": true
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "dev": true
+ },
+ "node_modules/tslib": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
+ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==",
+ "dev": true
+ },
+ "node_modules/valid-data-url": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/valid-data-url/-/valid-data-url-3.0.1.tgz",
+ "integrity": "sha512-jOWVmzVceKlVVdwjNSenT4PbGghU0SBIizAev8ofZVgivk/TVHXSbNL8LP6M3spZvkR9/QolkyJavGSX5Cs0UA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.2.8",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.8.tgz",
+ "integrity": "sha512-OyZR+c1CE8yeHw5V5t59aXsUPPVTHMDjEZz8MgguLL/Q7NblxhZUlTu9xSPqlsUO/y+X7dlU05jdhvyycD55DA==",
+ "dev": true,
+ "dependencies": {
+ "esbuild": "^0.20.1",
+ "postcss": "^8.4.38",
+ "rollup": "^4.13.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vitepress": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/vitepress/-/vitepress-1.1.0.tgz",
+ "integrity": "sha512-G+NS5I2OETxC0SfGAMDO75JWNkrcir0UCptuhQMNoaZhhlqvYtTDQhph4qGc5dtiTtZkcFa/bCcSx+A2gSS3lA==",
+ "dev": true,
+ "dependencies": {
+ "@docsearch/css": "^3.6.0",
+ "@docsearch/js": "^3.6.0",
+ "@shikijs/core": "^1.2.4",
+ "@shikijs/transformers": "^1.2.4",
+ "@types/markdown-it": "^13.0.7",
+ "@vitejs/plugin-vue": "^5.0.4",
+ "@vue/devtools-api": "^7.0.25",
+ "@vueuse/core": "^10.9.0",
+ "@vueuse/integrations": "^10.9.0",
+ "focus-trap": "^7.5.4",
+ "mark.js": "8.11.1",
+ "minisearch": "^6.3.0",
+ "shiki": "^1.2.4",
+ "vite": "^5.2.8",
+ "vue": "^3.4.21"
+ },
+ "bin": {
+ "vitepress": "bin/vitepress.js"
+ },
+ "peerDependencies": {
+ "markdown-it-mathjax3": "^4",
+ "postcss": "^8"
+ },
+ "peerDependenciesMeta": {
+ "markdown-it-mathjax3": {
+ "optional": true
+ },
+ "postcss": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vue": {
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/vue/-/vue-3.4.21.tgz",
+ "integrity": "sha512-5hjyV/jLEIKD/jYl4cavMcnzKwjMKohureP8ejn3hhEjwhWIhWeuzL2kJAjzl/WyVsgPY56Sy4Z40C3lVshxXA==",
+ "dev": true,
+ "dependencies": {
+ "@vue/compiler-dom": "3.4.21",
+ "@vue/compiler-sfc": "3.4.21",
+ "@vue/runtime-dom": "3.4.21",
+ "@vue/server-renderer": "3.4.21",
+ "@vue/shared": "3.4.21"
+ },
+ "peerDependencies": {
+ "typescript": "*"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/web-resource-inliner": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/web-resource-inliner/-/web-resource-inliner-6.0.1.tgz",
+ "integrity": "sha512-kfqDxt5dTB1JhqsCUQVFDj0rmY+4HLwGQIsLPbyrsN9y9WV/1oFDSx3BQ4GfCv9X+jVeQ7rouTqwK53rA/7t8A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-colors": "^4.1.1",
+ "escape-goat": "^3.0.0",
+ "htmlparser2": "^5.0.0",
+ "mime": "^2.4.6",
+ "node-fetch": "^2.6.0",
+ "valid-data-url": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/web-resource-inliner/node_modules/domhandler": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-3.3.0.tgz",
+ "integrity": "sha512-J1C5rIANUbuYK+FuFL98650rihynUOEzRLxW+90bKZRWB6A1X1Tf82GxR1qAWLyfNPRvjqfip3Q5tdYlmAa9lA==",
+ "dev": true,
+ "dependencies": {
+ "domelementtype": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 4"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domhandler?sponsor=1"
+ }
+ },
+ "node_modules/web-resource-inliner/node_modules/entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/web-resource-inliner/node_modules/htmlparser2": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-5.0.1.tgz",
+ "integrity": "sha512-vKZZra6CSe9qsJzh0BjBGXo8dvzNsq/oGvsjfRdOrrryfeD9UOBEEQdeoqCRmKZchF5h2zOBMQ6YuQ0uRUmdbQ==",
+ "dev": true,
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^3.3.0",
+ "domutils": "^2.4.2",
+ "entities": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/htmlparser2?sponsor=1"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "dev": true
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "dev": true,
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/wicked-good-xpath": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/wicked-good-xpath/-/wicked-good-xpath-1.3.0.tgz",
+ "integrity": "sha512-Gd9+TUn5nXdwj/hFsPVx5cuHHiF5Bwuc30jZ4+ronF1qHK5O7HD0sgmXWSEgwKquT3ClLoKPVbO6qGwVwLzvAw==",
+ "dev": true
+ },
+ "node_modules/xmldom-sre": {
+ "version": "0.1.31",
+ "resolved": "https://registry.npmjs.org/xmldom-sre/-/xmldom-sre-0.1.31.tgz",
+ "integrity": "sha512-f9s+fUkX04BxQf+7mMWAp5zk61pciie+fFLC9hX9UVvCeJQfNHRHXpeo5MPcR0EUf57PYLdt+ZO4f3Ipk2oZUw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.1"
+ }
+ }
+ }
+}
diff --git a/docs/package.json b/docs/package.json
new file mode 100644
index 00000000..4b91e416
--- /dev/null
+++ b/docs/package.json
@@ -0,0 +1,11 @@
+{
+ "devDependencies": {
+ "markdown-it-mathjax3": "^4.3.2",
+ "vitepress": "^1.1.0"
+ },
+ "scripts": {
+ "docs:dev": "vitepress dev .",
+ "docs:build": "vitepress build .",
+ "docs:preview": "vitepress preview ."
+ }
+}
diff --git a/docs/public/TwirlsAndTastesAJourneyThroughItalianPasta.pdf b/docs/public/TwirlsAndTastesAJourneyThroughItalianPasta.pdf
new file mode 100644
index 00000000..42a4ff81
Binary files /dev/null and b/docs/public/TwirlsAndTastesAJourneyThroughItalianPasta.pdf differ
diff --git a/docs/public/android-chrome-192x192.png b/docs/public/android-chrome-192x192.png
new file mode 100644
index 00000000..510d74e0
Binary files /dev/null and b/docs/public/android-chrome-192x192.png differ
diff --git a/docs/public/android-chrome-512x512.png b/docs/public/android-chrome-512x512.png
new file mode 100644
index 00000000..e3238d43
Binary files /dev/null and b/docs/public/android-chrome-512x512.png differ
diff --git a/docs/public/apple-touch-icon.png b/docs/public/apple-touch-icon.png
new file mode 100644
index 00000000..c82cb89a
Binary files /dev/null and b/docs/public/apple-touch-icon.png differ
diff --git a/docs/public/blackjack_game.zip b/docs/public/blackjack_game.zip
new file mode 100644
index 00000000..a9879050
Binary files /dev/null and b/docs/public/blackjack_game.zip differ
diff --git a/docs/public/browserconfig.xml b/docs/public/browserconfig.xml
new file mode 100644
index 00000000..b3930d0f
--- /dev/null
+++ b/docs/public/browserconfig.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ #da532c
+
+
+
diff --git a/docs/public/control_flow.png b/docs/public/control_flow.png
new file mode 100644
index 00000000..4e982638
Binary files /dev/null and b/docs/public/control_flow.png differ
diff --git a/docs/public/favicon-16x16.png b/docs/public/favicon-16x16.png
new file mode 100644
index 00000000..b1bf3658
Binary files /dev/null and b/docs/public/favicon-16x16.png differ
diff --git a/docs/public/favicon-32x32.png b/docs/public/favicon-32x32.png
new file mode 100644
index 00000000..34b5421f
Binary files /dev/null and b/docs/public/favicon-32x32.png differ
diff --git a/docs/public/favicon.ico b/docs/public/favicon.ico
new file mode 100644
index 00000000..a5057c0b
Binary files /dev/null and b/docs/public/favicon.ico differ
diff --git a/docs/public/images/blackjack_gameplay.png b/docs/public/images/blackjack_gameplay.png
new file mode 100644
index 00000000..7fc5d0bf
Binary files /dev/null and b/docs/public/images/blackjack_gameplay.png differ
diff --git a/docs/public/images/snake_game_gameplay.png b/docs/public/images/snake_game_gameplay.png
new file mode 100644
index 00000000..ac440555
Binary files /dev/null and b/docs/public/images/snake_game_gameplay.png differ
diff --git a/docs/public/images/url_shortener_1.png b/docs/public/images/url_shortener_1.png
new file mode 100644
index 00000000..0897a866
Binary files /dev/null and b/docs/public/images/url_shortener_1.png differ
diff --git a/docs/public/images/url_shortener_2.png b/docs/public/images/url_shortener_2.png
new file mode 100644
index 00000000..6b79c472
Binary files /dev/null and b/docs/public/images/url_shortener_2.png differ
diff --git a/docs/public/italian_pasta_recipe_book.zip b/docs/public/italian_pasta_recipe_book.zip
new file mode 100644
index 00000000..8c25199d
Binary files /dev/null and b/docs/public/italian_pasta_recipe_book.zip differ
diff --git a/docs/public/l2mac-block-diagram.png b/docs/public/l2mac-block-diagram.png
new file mode 100644
index 00000000..31a81b50
Binary files /dev/null and b/docs/public/l2mac-block-diagram.png differ
diff --git a/docs/public/l2mac-icon-white.png b/docs/public/l2mac-icon-white.png
new file mode 100644
index 00000000..16189528
Binary files /dev/null and b/docs/public/l2mac-icon-white.png differ
diff --git a/docs/public/l2mac-icon.png b/docs/public/l2mac-icon.png
new file mode 100644
index 00000000..0415edbf
Binary files /dev/null and b/docs/public/l2mac-icon.png differ
diff --git a/docs/public/mstile-150x150.png b/docs/public/mstile-150x150.png
new file mode 100644
index 00000000..3231c077
Binary files /dev/null and b/docs/public/mstile-150x150.png differ
diff --git a/docs/public/safari-pinned-tab.svg b/docs/public/safari-pinned-tab.svg
new file mode 100644
index 00000000..07f1337e
--- /dev/null
+++ b/docs/public/safari-pinned-tab.svg
@@ -0,0 +1,71 @@
+
+
+
diff --git a/docs/public/site.webmanifest b/docs/public/site.webmanifest
new file mode 100644
index 00000000..b20abb7c
--- /dev/null
+++ b/docs/public/site.webmanifest
@@ -0,0 +1,19 @@
+{
+ "name": "",
+ "short_name": "",
+ "icons": [
+ {
+ "src": "/android-chrome-192x192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/android-chrome-512x512.png",
+ "sizes": "512x512",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
+}
diff --git a/docs/public/snake_game.zip b/docs/public/snake_game.zip
new file mode 100644
index 00000000..55254e18
Binary files /dev/null and b/docs/public/snake_game.zip differ
diff --git a/docs/public/url_shortener_web_application.zip b/docs/public/url_shortener_web_application.zip
new file mode 100644
index 00000000..235da17d
Binary files /dev/null and b/docs/public/url_shortener_web_application.zip differ
diff --git a/examples/generate_book.py b/examples/generate_book.py
new file mode 100644
index 00000000..fc0afd3d
--- /dev/null
+++ b/examples/generate_book.py
@@ -0,0 +1,12 @@
+from l2mac import generate_book
+
+book: dict = generate_book(
+ r"""
+Write a complete recipe book for the following book title of "Twirls & Tastes: A Journey Through Italian Pasta"
+
+Description: "Twirls & Tastes" invites you on a flavorful expedition across Italy, exploring the diverse pasta landscape from the sun-drenched hills of Tuscany to the bustling streets of Naples. Discover regional specialties, learn the stories behind each dish, and master the art of pasta making with easy-to-follow recipes that promise to delight your senses.
+""",
+ steps=30,
+)
+
+print(book) # it will print the book folder complete with all the files as a dictionary
diff --git a/examples/generate_codebase_complex_url_shortener_app.py b/examples/generate_codebase_complex_url_shortener_app.py
new file mode 100644
index 00000000..8f032dd1
--- /dev/null
+++ b/examples/generate_codebase_complex_url_shortener_app.py
@@ -0,0 +1,44 @@
+from l2mac import generate_codebase
+
+codebase: dict = generate_codebase(
+ r"""
+**Online URL Shortening Service**
+
+**Overview**:
+A service that allows users to submit long URLs and then receive a shortened version of that URL for ease of sharing.
+
+**Functional Requirements to implement**:
+
+1. **URL Shortening**:
+ - [ ] 1.1. Users can input a URL to be shortened.
+ - [ ] 1.2. The system validates that the URL is active and legitimate.
+ - [ ] 1.3. The system generates a unique shortened URL.
+ - [ ] 1.4. Users can choose custom short links (subject to availability).
+
+2. **Redirection**:
+ - [ ] 2.1. Accessing the shortened URL redirects to the original URL.
+
+3. **Analytics**:
+ - [ ] 3.1. Users can view statistics about their shortened URLs.
+ - [ ] 3.2. View number of clicks.
+ - [ ] 3.3. View date/time of each click.
+ - [ ] 3.4. View geographical location of the clicker.
+
+4. **User Accounts**:
+ - [ ] 4.1. Users can create accounts.
+ - [ ] 4.2. Account holders can view all their shortened URLs.
+ - [ ] 4.3. Account holders can edit or delete their shortened URLs.
+ - [ ] 4.4. Account holders can view analytics for all their shortened URLs.
+
+5. **Admin Dashboard**:
+ - [ ] 5.1. Administrators can view all shortened URLs.
+ - [ ] 5.2. Administrators can delete any URL or user account.
+ - [ ] 5.3. Administrators can monitor system performance and analytics.
+
+6. **Expiration**:
+ - [ ] 6.1. Users can set an expiration date/time for the shortened URL.
+""",
+ steps=10,
+)
+
+print(codebase) # it will print the codebase (repo) complete with all the files as a dictionary
diff --git a/examples/generate_codebase_simple_blackjack.py b/examples/generate_codebase_simple_blackjack.py
new file mode 100644
index 00000000..e35dc134
--- /dev/null
+++ b/examples/generate_codebase_simple_blackjack.py
@@ -0,0 +1,6 @@
+from l2mac import generate_codebase
+
+# run pip install pygame==2.1.2 (for best results)
+codebase: dict = generate_codebase("Create a simple playable blackjack cli game", steps=2, run_tests=True)
+
+print(codebase) # it will print the codebase (repo) complete with all the files as a dictionary
diff --git a/examples/generate_codebase_simple_playable_snake.py b/examples/generate_codebase_simple_playable_snake.py
new file mode 100644
index 00000000..e84b2778
--- /dev/null
+++ b/examples/generate_codebase_simple_playable_snake.py
@@ -0,0 +1,10 @@
+from l2mac import generate_codebase
+
+# run pip install pygame==2.1.2
+codebase: dict = generate_codebase(
+ "Create a beautiful, playable and simple snake game with pygame. Make the snake and food be aligned to the same 10-pixel grid.",
+ steps=2,
+ run_tests=True,
+)
+
+print(codebase) # it will print the codebase (repo) complete with all the files as a dictionary
diff --git a/l2mac/__init__.py b/l2mac/__init__.py
new file mode 100644
index 00000000..b80ca373
--- /dev/null
+++ b/l2mac/__init__.py
@@ -0,0 +1,5 @@
+from l2mac.core import generate_codebase, generate_book, generate_custom, run_l2mac
+from l2mac.utils.run import (
+ DebuggingLevel,
+ Domain,
+)
\ No newline at end of file
diff --git a/l2mac/config.py b/l2mac/config.py
new file mode 100644
index 00000000..7cf37af8
--- /dev/null
+++ b/l2mac/config.py
@@ -0,0 +1,120 @@
+from enum import Enum
+from pathlib import Path
+from typing import Optional
+
+import yaml
+from pydantic import BaseModel, ValidationError
+
+
+class OpenAIRateLimitTier(str, Enum):
+ free = "free"
+ tier1 = "tier1"
+ tier2 = "tier2"
+ tier3 = "tier3"
+ tier4 = "tier4"
+ tier5 = "tier5"
+
+
+class ApiType(str, Enum):
+ openai = "openai"
+ azure = "azure"
+
+
+class LLMCoreConfig(BaseModel):
+ api_type: ApiType = ApiType.openai
+ model: str = "gpt-4-1106-preview"
+ base_url: Optional[str] = "https://api.openai.com/v1"
+ api_key: str
+ api_version: Optional[str] = None
+
+
+class LLMSettingsConfig(BaseModel):
+ temperature: float = 0.01
+ top_p: float = 1
+ frequency_penalty: float = 0
+ presence_penalty: float = 0
+ stop: str = ""
+ rate_limit_tier: OpenAIRateLimitTier = OpenAIRateLimitTier.tier3
+ rate_limit_requests_per_minute: float = 3000
+ api_retry_with_exponential_backoff__initial_delay: float = 1
+ api_retry_with_exponential_backoff__exponential_base: float = 2
+ api_retry_with_exponential_backoff__jitter: bool = True
+ api_retry_with_exponential_backoff__max_retries: float = 10
+ api_stream: bool = False
+
+
+class SetupConfig(BaseModel):
+ debug_mode: bool = True
+ log_dir: str = "logs"
+ enable_tests: bool = True
+ log_path: str = ""
+ seed: int = 0
+
+
+class WandbConfig(BaseModel):
+ project: str = "l2mac"
+ track: bool = False
+
+
+class L2MACConfig(BaseModel):
+ llm: LLMCoreConfig
+ llm_settings: LLMSettingsConfig = LLMSettingsConfig()
+ setup: SetupConfig = SetupConfig()
+ wandb: WandbConfig = WandbConfig()
+
+
+def find_config_file() -> Path:
+ home_config = Path.home() / ".l2mac" / "config.yaml"
+ local_config = Path.cwd() / "config" / "config.yaml"
+
+ if home_config.exists():
+ return home_config
+ elif local_config.exists():
+ return local_config
+ else:
+ raise FileNotFoundError(
+ "No config file can be loaded. Please create one at '~/.l2mac/config.yaml' or './config/config.yaml'."
+ )
+
+
+def load_config() -> L2MACConfig:
+ config_path = find_config_file()
+ with open(config_path, "r") as file:
+ config_data = yaml.safe_load(file)
+ try:
+ return L2MACConfig(**config_data)
+ except ValidationError as e:
+ print("Invalid configuration:", e)
+ raise e
+
+
+DEFAULT_CONFIG = """# Full Example: https://github.com/samholt/L2MAC/blob/master/config/config.yaml
+# Reflected Code: https://github.com/samholt/L2MAC/blob/master/l2mac/config.py
+llm:
+ api_type: "openai" # or azure etc. Check ApiType for more options
+ model: "gpt-4-turbo-preview" # or "gpt-4-turbo"
+ base_url: "https://api.openai.com/v1" # or forward url / other llm url
+ api_key: "YOUR_API_KEY"
+"""
+
+
+def copy_config_to_home(home_config=Path.home() / ".l2mac" / "config.yaml"):
+ """Initialize the configuration file for L2MAC."""
+
+ home_config.parent.mkdir(parents=True, exist_ok=True)
+
+ if home_config.exists():
+ backup_path = home_config.with_suffix(".bak")
+ home_config.rename(backup_path)
+ print(f"Existing configuration file backed up at {backup_path}")
+
+ home_config.write_text(DEFAULT_CONFIG, encoding="utf-8")
+ print(f"Configuration file initialized at {home_config}")
+
+
+if __name__ == "__main__":
+ try:
+ config = load_config()
+ print("Configuration loaded successfully:", config)
+ except Exception as e:
+ print("Failed to load configuration:", e)
diff --git a/l2mac/core.py b/l2mac/core.py
new file mode 100644
index 00000000..8cdf0501
--- /dev/null
+++ b/l2mac/core.py
@@ -0,0 +1,250 @@
+import traceback
+from typing import Optional
+
+import typer
+from typing_extensions import Annotated
+
+from l2mac.config import L2MACConfig, WandbConfig, copy_config_to_home, load_config
+from l2mac.envs.general import get_env
+from l2mac.l2mac import L2MAC
+from l2mac.llm_providers.general import setup_chat_rate_limiter
+from l2mac.llm_providers.rate_limiter import ChatRateLimiter
+from l2mac.prompts.load_prompts import L2MACPrompts, get_l2mac_prompts
+from l2mac.utils.logging import create_logger_in_process, generate_log_file_path
+from l2mac.utils.run import (
+ DebuggingLevel,
+ Domain,
+ load_prompt_program,
+ seed_all,
+ to_dotdict,
+)
+
+app = typer.Typer(help="Generate based on the prompt with LLM-automatic Computer")
+
+
+@app.command()
+def run_l2mac(
+ prompt_task: Annotated[
+ str, typer.Argument(help="Your input prompt to generate for such as 'Create a playable snake game in PyGame'")
+ ],
+ domain: Annotated[
+ Domain, typer.Option(help="Domain to generate, existing options are 'codebase', 'book'.")
+ ] = Domain.codebase,
+ run_tests: Annotated[
+ bool, typer.Option(help="Whether to run self-generated unit-tests when generating code.")
+ ] = False,
+ project_name: Annotated[Optional[str], typer.Option(help="Unique project name, such as 'snakegame'.")] = None,
+ steps: Annotated[
+ int, typer.Option(help="Number of internal steps to use when creating the prompt program internally.")
+ ] = 10,
+ prompt_program: Annotated[
+ Optional[str],
+ typer.Option(help="Path to the prompt program to use, or the prompt program as a string in a list format."),
+ ] = None,
+ prompts_file_path: Annotated[
+ Optional[str],
+ typer.Option(
+ help="Overrides the existing prompts to be used. Useful when creating a new prompt set for a new task."
+ ),
+ ] = None,
+ tools_enabled: Annotated[
+ Optional[str],
+ typer.Option(
+ help="List of functions that the agents can use, separated by commas. Defaults to use all tools available."
+ ),
+ ] = None,
+ debugging_level: Annotated[
+ DebuggingLevel, typer.Option(help="Whether to print full context-windows out.")
+ ] = DebuggingLevel.info,
+ init_config: Annotated[bool, typer.Option(help="Initialize the configuration file for L2MAC.")] = False,
+):
+ """
+ Generate based on the input prompt with LLM-automatic Computer (L2MAC).
+ """
+ if init_config:
+ print("Initializing configuration file...")
+ copy_config_to_home()
+ return None
+ # Process inputs
+ if prompt_program is not None:
+ prompt_program = load_prompt_program(prompt_program)
+ l2mac_prompts = get_l2mac_prompts(prompts_file_path, domain)
+ config = load_config()
+ log_path = generate_log_file_path(__file__, log_folder=config.setup.log_dir, config=config)
+ config.setup.log_path = log_path
+ logger = create_logger_in_process(log_path)
+ rate_limiter = setup_chat_rate_limiter(config)
+ if config.wandb.track:
+ import wandb
+
+ wandb.init(
+ project=config.wandb.project,
+ config=to_dotdict(config),
+ )
+ else:
+ wandb = None
+ seed_all(config.setup.seed)
+ if debugging_level == DebuggingLevel.debug:
+ logger.info(f"Starting run \t | See log at : {log_path}")
+ logger.info(f"[Main Config] {config}")
+ if config.setup.debug_mode:
+ output_file_store = l2mac_internal(
+ prompt_task,
+ domain,
+ run_tests,
+ project_name,
+ steps,
+ prompt_program,
+ prompts_file_path,
+ tools_enabled,
+ debugging_level,
+ config,
+ rate_limiter,
+ wandb,
+ l2mac_prompts,
+ logger,
+ )
+ else:
+ try:
+ output_file_store = l2mac_internal(
+ prompt_task,
+ domain,
+ run_tests,
+ project_name,
+ steps,
+ prompt_program,
+ prompts_file_path,
+ tools_enabled,
+ debugging_level,
+ config,
+ rate_limiter,
+ wandb,
+ l2mac_prompts,
+ logger,
+ )
+ except Exception as e:
+ logger.exception(f"[Error] {e}")
+ logger.info(f"[Failed running]\t| error={e}")
+ traceback.print_exc()
+ output_file_store = {"errored": True}
+ if config.wandb.track:
+ wandb.finish()
+ logger.info("Run completed.")
+ return output_file_store
+
+
+def l2mac_internal(
+ prompt_task: str,
+ domain: Domain,
+ run_tests: bool,
+ project_name: Optional[str],
+ steps: int,
+ prompt_program: Optional[str],
+ prompts_file_path: Optional[str],
+ tools_enabled: Optional[str],
+ debugging_level: DebuggingLevel,
+ config: L2MACConfig,
+ rate_limiter: ChatRateLimiter,
+ wandb: Optional[WandbConfig],
+ l2mac_prompts: L2MACPrompts,
+ logger=None,
+):
+ env = get_env(domain=domain, config=config, logger=logger, seed=config.setup.seed)
+ env.set_seed(seed=config.setup.seed)
+ env.reset()
+ l2mac = L2MAC(
+ prompt_task=prompt_task,
+ env=env,
+ config=config,
+ logger=logger,
+ rate_limiter=rate_limiter,
+ l2mac_prompts=l2mac_prompts,
+ run_tests=run_tests,
+ project_name=project_name,
+ prompt_program=prompt_program,
+ prompts_file_path=prompts_file_path,
+ tools_enabled=tools_enabled,
+ debugging_level=debugging_level,
+ wandb=wandb,
+ )
+ output_file_store = l2mac.run(steps=steps)
+ return output_file_store
+
+
+def generate_codebase(
+ prompt_task: str,
+ run_tests: bool = False,
+ project_name: Optional[str] = None,
+ steps: int = 10,
+ prompt_program: Optional[str] = None,
+ prompts_file_path: Optional[str] = None,
+ tools_enabled: Optional[str] = None,
+ debugging_level: DebuggingLevel = DebuggingLevel.info,
+ init_config: bool = False,
+):
+ return run_l2mac(
+ prompt_task=prompt_task,
+ domain=Domain.codebase,
+ run_tests=run_tests,
+ project_name=project_name,
+ steps=steps,
+ prompt_program=prompt_program,
+ prompts_file_path=prompts_file_path,
+ tools_enabled=tools_enabled,
+ debugging_level=debugging_level,
+ init_config=init_config,
+ )
+
+
+def generate_book(
+ prompt_task: str,
+ run_tests: bool = False,
+ project_name: Optional[str] = None,
+ steps: int = 10,
+ prompt_program: Optional[str] = None,
+ prompts_file_path: Optional[str] = None,
+ tools_enabled: Optional[str] = None,
+ debugging_level: DebuggingLevel = DebuggingLevel.info,
+ init_config: bool = False,
+):
+ return run_l2mac(
+ prompt_task=prompt_task,
+ domain=Domain.book,
+ run_tests=run_tests,
+ project_name=project_name,
+ steps=steps,
+ prompt_program=prompt_program,
+ prompts_file_path=prompts_file_path,
+ tools_enabled=tools_enabled,
+ debugging_level=debugging_level,
+ init_config=init_config,
+ )
+
+
+def generate_custom(
+ prompt_task: str,
+ run_tests: bool = False,
+ project_name: Optional[str] = None,
+ steps: int = 10,
+ prompt_program: Optional[str] = None,
+ prompts_file_path: Optional[str] = None,
+ tools_enabled: Optional[str] = None,
+ debugging_level: DebuggingLevel = DebuggingLevel.info,
+ init_config: bool = False,
+):
+ return run_l2mac(
+ prompt_task=prompt_task,
+ domain=Domain.custom,
+ run_tests=run_tests,
+ project_name=project_name,
+ steps=steps,
+ prompt_program=prompt_program,
+ prompts_file_path=prompts_file_path,
+ tools_enabled=tools_enabled,
+ debugging_level=debugging_level,
+ init_config=init_config,
+ )
+
+
+if __name__ == "__main__":
+ app()
diff --git a/l2mac/envs/__init__.py b/l2mac/envs/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/l2mac/envs/general.py b/l2mac/envs/general.py
new file mode 100644
index 00000000..74d95b57
--- /dev/null
+++ b/l2mac/envs/general.py
@@ -0,0 +1,55 @@
+import random
+
+import numpy as np
+
+from l2mac.config import L2MACConfig
+from l2mac.utils.run import Domain
+
+
+def get_env(domain: Domain, config: L2MACConfig, logger, seed: int):
+ if domain == Domain.codebase:
+ return GeneralEnvironment(config=config, logger=logger, seed=seed, env_name="Codebase")
+ elif domain == Domain.book:
+ return GeneralEnvironment(config=config, logger=logger, seed=seed, env_name="Book")
+ else:
+ raise Exception(
+ f'Domain {domain} environment not found, please use "codebase" or "book", or implement a new domain for your task.'
+ )
+
+
+class Environment:
+ def __init__(self, config, logger, env_name, seed):
+ self.config = config
+ self.logger = logger
+ self.env_name = env_name
+ self.seed = seed
+
+ def log(self, message):
+ if self.logger is not None:
+ self.logger.info(f"[Environment: {self.env_name}] {message}")
+
+ def reset(self):
+ pass
+
+ def step(self, action):
+ pass
+
+
+class GeneralEnvironment(Environment):
+ def __init__(self, config, logger, seed, env_name="Codebase"):
+ super().__init__(config, logger, env_name, seed)
+ self.seed = None
+ self.description = None
+ self.attribute_names = None
+ self.prepend_code_libraries = ""
+
+ def set_seed(self, seed):
+ self.seed = seed
+ random.seed(seed)
+ np.random.seed(seed)
+
+ def reset(self):
+ pass
+
+ def get_obs(self):
+ pass
diff --git a/l2mac/l2mac.py b/l2mac/l2mac.py
new file mode 100644
index 00000000..9cefb9dd
--- /dev/null
+++ b/l2mac/l2mac.py
@@ -0,0 +1,408 @@
+import json
+import random
+import traceback
+from copy import deepcopy
+from pathlib import Path
+from typing import Optional
+
+import numpy as np
+import openai
+from openai import APIError
+
+from l2mac.config import L2MACConfig
+from l2mac.envs.general import Environment
+from l2mac.llm_providers.general import (
+ chat_completion_rl,
+ get_llm_config,
+ get_model_max_tokens,
+)
+from l2mac.llm_providers.openai import num_tokens_consumed_by_chat_request
+from l2mac.llm_providers.rate_limiter import ChatRateLimiter
+from l2mac.llm_providers.utils import pretty_print_chat_messages
+from l2mac.prompts.load_prompts import L2MACPrompts
+from l2mac.tools.core import (
+ available_functions_factory,
+ function_definition_list_factory,
+ process_function_call_and_return_message,
+ process_functions_into_function_names,
+)
+from l2mac.tools.utils import write_files_from_dict
+from l2mac.utils.l2mac import clean_string, detect_cycles, hash_messages
+from l2mac.utils.run import DebuggingLevel
+
+
+class L2MAC:
+ def __init__(
+ self,
+ prompt_task: str,
+ env: Environment,
+ config: L2MACConfig,
+ logger: object,
+ rate_limiter: ChatRateLimiter,
+ l2mac_prompts: L2MACPrompts,
+ run_tests: bool = True,
+ project_name: Optional[str] = None,
+ steps: int = 10,
+ prompt_program: Optional[str] = None,
+ prompts_file_path: Optional[str] = None,
+ tools_enabled: Optional[str] = None,
+ debugging_level: DebuggingLevel = DebuggingLevel.info,
+ wandb: Optional[object] = None,
+ ):
+ self.env = env
+ self.prompt_task = prompt_task
+ self.config = config
+ self.seed_value = None
+ self.logger = logger
+ self.rate_limiter = rate_limiter
+ self.l2mac_prompts = l2mac_prompts
+ self.name = "L2MAC"
+ self.run_tests = run_tests
+ self.project_name = project_name
+ self.steps = steps
+ self.prompt_program = prompt_program
+ self.prompts_file_path = prompts_file_path
+ self.tools_enabled = tools_enabled
+ self.debugging_level = debugging_level
+ self.wandb = wandb
+ # Internal state
+ self.file_dict = {}
+ self.step = None
+ self.meta_messages = []
+ self.base_dialog = []
+ self.sub_messages = []
+ self.responses = []
+ self.message_hash = hash_messages([])
+ self.reset()
+
+ def seed(self, seed_value):
+ self.seed_value = seed_value
+ random.seed(seed_value)
+ np.random.seed(seed_value)
+
+ def get_llm_config(self):
+ return get_llm_config(self.config, self.logger, self.name, self.rate_limiter)
+
+ def reset(self):
+ self.load_from_checkpoint = ""
+ self.replay_llm_responses_path = ""
+ self.replay_llm_responses_path_index = 0
+ self.responses = []
+ self.message_hash_same_increase_temperature = 0
+ self.step_idx = 0
+ self.max_re_tries = 30
+ self.re_tries = 0
+ if self.load_from_checkpoint:
+ with open(self.load_from_checkpoint, "r") as f:
+ data = json.load(f)
+ self.file_dict = data["file_dict"]
+ self.prompt_program = data["prompt_program"]
+ self.steps = data["steps"]
+ self.step = data["step"]
+ self.meta_messages = data["meta_messages"]
+ self.base_dialog = data["base_dialog"]
+ self.sub_messages = data["sub_messages"]
+ self.responses = data["responses"]
+ self.message_hash = data["message_hash"]
+
+ self.log_folder_path = f"{self.config.setup.log_path.split('.txt')[0]}_{self.env.env_name}_{self.env.seed}/"
+ self.folder_path = f"workspace/{self.project_name + '_' if self.project_name else ''}{self.config.setup.log_path.split('_')[0].split('/')[1]}/"
+ Path(self.folder_path).mkdir(parents=True, exist_ok=True)
+ Path(self.log_folder_path).mkdir(parents=True, exist_ok=True)
+ write_files_from_dict(self.file_dict, base_dir=f"{self.folder_path}")
+ self.max_tokens = get_model_max_tokens(self.config)
+ self.functions = function_definition_list_factory()
+ if self.tools_enabled is not None:
+ self.functions = [
+ tool for tool in self.functions if tool["function"]["name"] in self.tools_enabled.split(",")
+ ]
+ system_message = self.l2mac_prompts.system
+ self.system_message = {"role": "system", "content": system_message}
+
+ def print_dialog(self, messages, response_msg=False):
+ num_tokens = num_tokens_consumed_by_chat_request(messages=messages, functions=self.functions)
+ pretty_print_chat_messages(
+ messages,
+ num_tokens,
+ self.max_tokens,
+ logger=self.logger,
+ response_msg=response_msg,
+ step_idx=self.step_idx,
+ total_steps=0 if self.prompt_program is None else len(self.prompt_program),
+ max_re_tries=self.max_re_tries,
+ re_tries=self.re_tries,
+ )
+
+ def save_agent_state(self, messages, beginning_step=""):
+ data_to_save = {
+ "messages": messages,
+ "file_dict": self.file_dict,
+ "steps": self.steps,
+ "step": self.step,
+ "meta_messages": self.meta_messages,
+ "base_dialog": self.base_dialog,
+ "sub_messages": self.sub_messages,
+ "message_hash": self.message_hash,
+ }
+ if not beginning_step:
+ path = f"{self.log_folder_path}current_{self.name}_state.json"
+ else:
+ path = f"{self.log_folder_path}L2MAC_state_beginning_step_{self.step_idx}.json"
+ with open(path, "w") as f:
+ json.dump(data_to_save, f)
+
+ def get_llm_response(self, messages, max_tokens=None, tool_choice="auto"):
+ self.print_dialog(messages)
+ self.save_agent_state(messages)
+ llm_config = self.get_llm_config()
+ if max_tokens is not None:
+ llm_config["max_tokens"] = max_tokens
+ llm_config["messages"] = messages
+ # Check if the messages have changed, if they have, then set temperature to zero, if still the same then set temperature to 0.1, as we are repeating ourselves.
+ tmp_messages = [clean_string(str(msg)) for msg in messages]
+ if detect_cycles(tmp_messages):
+ self.message_hash_same_increase_temperature += 0.4
+ if self.message_hash_same_increase_temperature >= 1:
+ self.message_hash_same_increase_temperature = 1
+ self.logger.info(f"[Increasing LLM temperature to {self.message_hash_same_increase_temperature}]")
+ else:
+ if self.message_hash_same_increase_temperature > 0:
+ self.logger.info(f"[Annealing LLM temperature to {self.message_hash_same_increase_temperature}]")
+ self.message_hash_same_increase_temperature -= 0.1
+ if self.message_hash_same_increase_temperature <= 0:
+ self.message_hash_same_increase_temperature = 0
+ llm_config["temperature"] = self.message_hash_same_increase_temperature
+ llm_config["tools"] = self.functions
+ if tool_choice == "auto":
+ llm_config["tool_choice"] = "auto"
+ elif tool_choice is not None:
+ llm_config["tool_choice"] = {"type": "function", "function": {"name": tool_choice}}
+ else:
+ llm_config["tool_choice"] = "none"
+ if self.replay_llm_responses_path:
+ with open(self.replay_llm_responses_path, "r") as f:
+ responses = json.load(f)
+ response = responses[self.replay_llm_responses_path_index]
+ self.replay_llm_responses_path_index += 1
+ if "error" in response:
+ raise APIError(response["error"], "")
+ else:
+ try:
+ # Check number of tokens
+ num_tokens = num_tokens_consumed_by_chat_request(messages=messages, functions=self.functions)
+ if num_tokens > self.max_tokens:
+ raise APIError("InvalidRequestError", "SelfGeneratedErrorOverTokenLimit")
+ response = chat_completion_rl(**llm_config)
+ self.responses.append(response)
+ with open(f"{self.log_folder_path}{self.name}_llm_responses.json", "w") as f:
+ json.dump(self.responses, f)
+ except APIError as e:
+ self.responses.append({"error": "InvalidRequestError"})
+ self.logger.error("Error: InvalidRequestError")
+ self.logger.error(traceback.format_exc())
+ self.logger.info("Error:", e.__dict__) # or use a logging framework
+ raise e
+ message_response = response["choices"][0]["message"]
+ self.print_dialog([message_response], response_msg=True)
+ return message_response
+
+ def get_function_names_as_str(self):
+ fns = process_functions_into_function_names(self.functions)
+ return ", ".join([f"`{fn}`" for fn in fns])
+
+ def get_file_names(self):
+ # Simple implementation, for now, can be improved.
+ return list(self.file_dict.keys())
+
+ def run(self, steps: int = 10):
+ return self._run(steps=steps)
+
+ def _run(self, steps: int = 10):
+ self.reset()
+ if not self.load_from_checkpoint:
+ self.meta_messages = [self.system_message]
+ first_message = self.l2mac_prompts.first_message.format(prompt_task=self.prompt_task, steps=steps)
+ self.meta_messages.append({"role": "user", "content": first_message})
+ prompt_program = []
+ # Loop until we get a multi-step plan, as sometimes the first plan is not multi-step, and only a single step.
+ max_reflections = 1
+ current_reflection = 0
+ current_dialog = deepcopy(self.meta_messages)
+ if self.prompt_program is None:
+ # Bootstrap and generate the prompt program to follow internally to complete the user input prompt task.
+ while len(prompt_program) <= 50 and current_reflection < max_reflections:
+ current_reflection += 1
+ initial_response_message = self.get_llm_response(
+ current_dialog, tool_choice="provide_detailed_sub_task_steps_for_sub_agents"
+ )
+ current_dialog.append(initial_response_message)
+ current_dialog.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.reflect_on_prompt_program,
+ }
+ )
+ # Could reflect and improve plan etc a few times here.
+ function_response = initial_response_message["tool_calls"][0]["function"]
+ function_name = function_response["name"]
+ try:
+ function_args = json.loads(function_response["arguments"])
+ except json.decoder.JSONDecodeError:
+ try:
+ function_args = json.loads(function_response["arguments"].replace("\n", ""))
+ except json.decoder.JSONDecodeError:
+ try:
+ function_args = json.loads(function_response["arguments"] + '"]}')
+ except json.decoder.JSONDecodeError:
+ try:
+ function_args = json.loads(function_response["arguments"] + '"]}')
+ except json.decoder.JSONDecodeError:
+ try:
+ function_args = json.loads(function_response["arguments"] + "]}")
+ except Exception as e:
+ print(e)
+ fuction_to_call = available_functions_factory()[function_name]
+ prompt_program = fuction_to_call(**function_args)
+ self.prompt_program = prompt_program
+ # self.base_dialog = deepcopy(current_dialog)
+ self.base_dialog = deepcopy([self.system_message, {"role": "user", "content": first_message}])
+ # Remove provide_detailed_sub_task_steps_for_sub_agents function from functions list
+ self.functions = [
+ tool
+ for tool in self.functions
+ if tool["function"]["name"] != "provide_detailed_sub_task_steps_for_sub_agents"
+ ]
+ previous_step_output_summary = ""
+ for step_idx, step in enumerate(self.prompt_program):
+ self.step = deepcopy(step)
+ self.step_idx = step_idx
+ self.sub_messages = deepcopy(self.base_dialog)
+ self.save_agent_state(self.sub_messages, beginning_step=self.step)
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_execute_instruction.format(
+ step=step,
+ file_names=self.get_file_names(),
+ test_writing_advice=self.l2mac_prompts.test_writing_advice,
+ previous_step_output_summary=previous_step_output_summary,
+ functions_provided=self.get_function_names_as_str(),
+ ),
+ }
+ )
+ has_completed_sub_step = False
+ task_step_complete = False
+ self.max_re_tries = 30
+ self.re_tries = 0
+ while not has_completed_sub_step:
+ try:
+ response_message = self.get_llm_response(self.sub_messages)
+ if task_step_complete:
+ previous_step_output_summary = response_message["content"]
+ has_completed_sub_step = True
+ break
+ except openai.BadRequestError:
+ # Calculate exactly where the token limit overflowed, and undo messages till just before it overflowed.
+ while (
+ self.max_tokens
+ - num_tokens_consumed_by_chat_request(messages=self.sub_messages, functions=self.functions)
+ ) < 700:
+ self.sub_messages.pop(3)
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_exhaust_context_window,
+ }
+ )
+ response_message = self.get_llm_response(self.sub_messages, tool_choice=None)
+ summary_step_message = response_message["content"]
+ self.re_tries += 1
+ if self.re_tries > self.max_re_tries:
+ has_completed_sub_step = True
+ self.logger.warning(
+ f"[WARNING] Maximum re-tries reached: {self.re_tries}/{self.max_re_tries}, skipping step"
+ )
+ raise ValueError(
+ f"[ERROR] Maximum re-tries reached: {self.re_tries}/{self.max_re_tries}, stopping run."
+ )
+ # Restart the step. Should control re-try times.
+ self.sub_messages = deepcopy(self.base_dialog)
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_execute_instruction.format(
+ step=step,
+ file_names=self.get_file_names(),
+ test_writing_advice=self.l2mac_prompts.test_writing_advice,
+ previous_step_output_summary=summary_step_message,
+ functions_provided=self.get_function_names_as_str(),
+ ),
+ }
+ )
+ response_message = self.get_llm_response(self.sub_messages)
+ self.sub_messages.append(response_message)
+ if response_message.get("tool_calls"):
+ function_return_messages, self.file_dict = process_function_call_and_return_message(
+ response_message["tool_calls"],
+ self.file_dict,
+ tools=self.functions,
+ enable_tests=self.run_tests,
+ )
+ for function_return_message in function_return_messages:
+ self.sub_messages.append(function_return_message)
+ if (
+ "status" in json.loads(function_return_message["content"])
+ and json.loads(function_return_message["content"])["status"] == "TASK_STEP_COMPLETE"
+ ):
+ task_step_complete = True
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_instruction_complete_summarize_output,
+ }
+ )
+ elif (
+ "name" in function_return_message
+ and function_return_message["name"] == "sub_task_step_complete"
+ and json.loads(function_return_message["content"])["status"] == "error"
+ ):
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_instruction_erroring_fix_the_code.format(
+ error_message=json.loads(function_return_message["content"])["message"],
+ file_names=self.get_file_names(),
+ test_writing_advice=self.l2mac_prompts.test_writing_advice,
+ functions_provided=self.get_function_names_as_str(),
+ ),
+ }
+ )
+ else:
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_cycle_message_to_check_if_instruction_complete.format(
+ step=step,
+ file_names=self.get_file_names(),
+ functions_provided=self.get_function_names_as_str(),
+ ),
+ }
+ )
+ else:
+ self.sub_messages.append(
+ {
+ "role": "user",
+ "content": self.l2mac_prompts.control_unit_cycle_message_to_check_if_instruction_complete.format(
+ step=step,
+ file_names=self.get_file_names(),
+ functions_provided=self.get_function_names_as_str(),
+ ),
+ }
+ )
+ write_files_from_dict(self.file_dict, base_dir=f"{self.folder_path}")
+ self.logger.info("[STEP COMPLETE] sub step completed")
+ self.logger.info("[TASK COMPLETE SUCCESSFULLY] All steps of prompt program complete")
+ self.logger.info(f"You can run your new code at: {self.folder_path}")
+ write_files_from_dict(self.file_dict, base_dir=f"{self.folder_path}")
+ self.save_agent_state(self.sub_messages)
+ return self.file_dict
diff --git a/l2mac/llm_providers/__init__.py b/l2mac/llm_providers/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/l2mac/llm_providers/general.py b/l2mac/llm_providers/general.py
new file mode 100644
index 00000000..5eba9c60
--- /dev/null
+++ b/l2mac/llm_providers/general.py
@@ -0,0 +1,277 @@
+import asyncio
+import json
+import random
+import re
+from copy import deepcopy
+from time import perf_counter, sleep
+
+import openai
+from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
+
+from l2mac.config import ApiType
+from l2mac.llm_providers.openai import (
+ openai_models,
+ openai_rate_limits_per_tier_per_model,
+)
+from l2mac.llm_providers.rate_limiter import ChatRateLimiter
+from l2mac.llm_providers.utils import find_best_match
+
+
+def setup_chat_rate_limiter(config) -> ChatRateLimiter:
+ request_limit, token_limit = setup_chat_rate_limiter_internal(config)
+ return ChatRateLimiter(request_limit=request_limit, token_limit=token_limit)
+
+
+def remove_nulls(d):
+ return {k: v for k, v in d.items() if v is not None}
+
+
+def setup_chat_rate_limiter_internal(config: dict):
+ if config.llm.api_type == ApiType.azure:
+ model = config.llm.model
+ model_details = find_best_match(
+ openai_rate_limits_per_tier_per_model[config.llm_settings.rate_limit_tier], model
+ )
+ request_limit = model_details["RPM"]
+ token_limit = model_details["TPM"]
+ elif config.llm.api_type == ApiType.openai:
+ model = config.llm.model
+ model_details = find_best_match(
+ openai_rate_limits_per_tier_per_model[config.llm_settings.rate_limit_tier], model
+ )
+ request_limit = model_details["RPM"]
+ token_limit = model_details["TPM"]
+ else:
+ raise ValueError(
+ f"API type {config.llm.api_type} not yet supported, please use 'openai' or 'azure' as the API type, or contribute your own LLM API"
+ )
+ return request_limit, token_limit
+
+
+def get_model_max_tokens(config: dict):
+ if config.llm.api_type == ApiType.azure:
+ model = config.llm.model
+ model_details = find_best_match(openai_models, model)
+ max_tokens = model_details["context_window"]
+ elif config.llm.api_type == ApiType.openai:
+ model = config.llm.model
+ model_details = find_best_match(openai_models, model)
+ max_tokens = model_details["context_window"]
+ else:
+ raise ValueError(
+ f"API type {config.llm.api_type} not yet supported, please use 'openai' or 'azure' as the API type, or contribute your own LLM API"
+ )
+ return max_tokens
+
+
+def get_llm_config(config, logger, name, rate_limiter):
+ llm_config_dict = {
+ "model": config.llm.model,
+ "api_type": config.llm.api_type,
+ "temperature": config.llm_settings.temperature,
+ "top_p": config.llm_settings.top_p,
+ "frequency_penalty": config.llm_settings.frequency_penalty,
+ "presence_penalty": config.llm_settings.presence_penalty,
+ "stop": config.llm_settings.stop,
+ "stream": config.llm_settings.api_stream,
+ "api_key": config.llm.api_key,
+ "_open_ai_rate_limit_requests_per_minute": config.llm_settings.rate_limit_requests_per_minute,
+ "_logger": logger,
+ "_name": name,
+ "_rate_limiter": rate_limiter,
+ "_retry_with_exponential_backoff__initial_delay": config.llm_settings.api_retry_with_exponential_backoff__initial_delay,
+ "_retry_with_exponential_backoff__exponential_base": config.llm_settings.api_retry_with_exponential_backoff__exponential_base,
+ "_retry_with_exponential_backoff__jitter": config.llm_settings.api_retry_with_exponential_backoff__jitter,
+ "_retry_with_exponential_backoff__max_retries": config.llm_settings.api_retry_with_exponential_backoff__max_retries,
+ }
+ if config.llm.api_type == ApiType.azure:
+ llm_config_dict.update({"azure_endpoint": config.llm.base_url, "api_version": config.llm.api_version})
+ return deepcopy(llm_config_dict)
+
+
+def chat_completion_rl(**kwargs):
+ # Implements retry_with_exponential_backoff
+ initial_delay = kwargs.get("_retry_with_exponential_backoff__initial_delay", 1)
+ exponential_base = kwargs.get("_retry_with_exponential_backoff__exponential_base", 2)
+ jitter = kwargs.get("_retry_with_exponential_backoff__jitter", True)
+ max_retries = kwargs.get("_retry_with_exponential_backoff__max_retries", 10)
+ stream = kwargs.get("stream", False)
+
+ kwargs.pop("_retry_with_exponential_backoff__initial_delay", None)
+ kwargs.pop("_retry_with_exponential_backoff__exponential_base", None)
+ kwargs.pop("_retry_with_exponential_backoff__jitter", None)
+ kwargs.pop("_retry_with_exponential_backoff__max_retries", None)
+
+ logger = kwargs.get("_logger", None)
+ name = kwargs.get("_name", None)
+ if kwargs.get("config", None) and kwargs.get("config", None).llm.api_type == "azure":
+ use_azure_api = True
+ else:
+ use_azure_api = False
+
+ errors: tuple = (openai.RateLimitError, openai.APIError, openai.APITimeoutError, openai.APIConnectionError)
+
+ # Initialize variables
+ num_retries = 0
+ delay = initial_delay
+
+ # Loop until a successful response or max_retries is hit or an exception is raised
+ while True:
+ try:
+ if stream:
+ return asyncio.run(async_chat_completion_rl_inner(**kwargs))
+ else:
+ return chat_completion_rl_inner(**kwargs)
+
+ # Retry on specified errors
+ except errors as e:
+ # Increment retries
+ if logger:
+ logger.info(
+ f"[{name}][OpenAI API Request Error] {type(e)} {e.args} | num_retries: {num_retries} / {max_retries}"
+ )
+ else:
+ print(
+ f"[{name}][OpenAI API Request Error] {type(e)} {e.args} | num_retries: {num_retries} / {max_retries}"
+ )
+ num_retries += 1
+
+ # Check if max retries has been reached
+ if num_retries > max_retries:
+ if logger:
+ logger.info(
+ f"[{name}][OpenAI API Request Error] Exception Maximum number of retries ({max_retries}) exceeded. | num_retries: {num_retries} / {max_retries}"
+ )
+ else:
+ print(
+ f"[{name}][OpenAI API Request Error] Exception Maximum number of retries ({max_retries}) exceeded. | num_retries: {num_retries} / {max_retries}"
+ )
+ raise Exception(f"Maximum number of retries ({max_retries}) exceeded.")
+
+ # Increment the delay
+ delay *= exponential_base * (1 + jitter * random.random())
+
+ # Sleep for the delay
+ if use_azure_api:
+ match = re.search(r"Please retry after (\d+) seconds", e.args[0])
+ if match:
+ delay = int(match.group(1))
+ # delay = int(match.group(1))
+ if logger:
+ logger.info(
+ f"[{name}][OpenAI API Request Error] {type(e)} {e.args} | num_retries: {num_retries} / {max_retries} | Now sleeping for {delay} seconds"
+ )
+ else:
+ print(
+ f"[{name}][OpenAI API Request Error] {type(e)} {e.args} | num_retries: {num_retries} / {max_retries} | Now sleeping for {delay} seconds"
+ )
+ # sleep(delay // 2.0)
+ if delay > 60:
+ delay = 60
+ sleep(delay)
+
+ # Raise exceptions for any errors not specified
+ except Exception as e:
+ raise e
+
+
+async def async_chat_completion_rl_inner(**kwargs):
+ kwargs.get("_logger", None)
+ kwargs.get("_name", None)
+ rate_limiter = kwargs.get("_rate_limiter", None)
+ api_type = kwargs.get("api_type", ApiType.openai)
+ if api_type == ApiType.openai:
+ aclient = AsyncOpenAI(api_key=kwargs["api_key"])
+ elif api_type == ApiType.azure:
+ aclient = AsyncAzureOpenAI(
+ api_key=kwargs["api_key"], api_version=kwargs["api_version"], azure_endpoint=kwargs["azure_endpoint"]
+ )
+ keys_to_remove = {
+ "_open_ai_rate_limit_requests_per_minute",
+ "_logger",
+ "_name",
+ "api_key",
+ "api_version",
+ "azure_endpoint",
+ "_rate_limiter",
+ "stream",
+ "api_type",
+ }
+ kwargs = {k: v for k, v in kwargs.items() if k not in keys_to_remove}
+ perf_counter()
+ # if logger:
+ # logger.info(f"[{name}][OpenAI API Request] {kwargs}")
+ # pretty_print_chat_messages(kwargs['messages'])
+
+ if rate_limiter:
+ rate_limiter.consume(**kwargs)
+ responses = await aclient.chat.completions.create(**kwargs)
+ else:
+ responses = await aclient.chat.completions.create(**kwargs)
+ response = {}
+ chunks = []
+ async for chunk in responses:
+ print(chunk)
+ if "choices" not in chunk or len(chunk["choices"]) == 0:
+ continue
+ chunk_message = chunk["choices"][0]["delta"].to_dict_recursive() # extract the message
+ chunks.append(chunk_message)
+ print(chunk_message)
+ for k, v in chunk_message.items():
+ if k in response:
+ if isinstance(response[k], dict):
+ for k2, v2 in v.items():
+ if k2 in response[k]:
+ response[k][k2] += v2
+ else:
+ response[k][k2] = v2
+ else:
+ response[k] += v
+ else:
+ response[k] = v
+ print(response)
+ return_response = {"choices": [{"message": response}]}
+ # if logger:
+ # logger.info(f"[{name}][OpenAI API Returned] Elapsed request time: {perf_counter() - t0}s | response: {response}")
+ return return_response
+
+
+def chat_completion_rl_inner(**kwargs):
+ kwargs.get("_logger", None)
+ kwargs.get("_name", None)
+ rate_limiter = kwargs.get("_rate_limiter", None)
+ api_type = kwargs.get("api_type", ApiType.openai)
+ if api_type == ApiType.openai:
+ client = OpenAI(api_key=kwargs["api_key"])
+ elif api_type == ApiType.azure:
+ client = AzureOpenAI(
+ api_key=kwargs["api_key"], api_version=kwargs["api_version"], azure_endpoint=kwargs["azure_endpoint"]
+ )
+ keys_to_remove = {
+ "_open_ai_rate_limit_requests_per_minute",
+ "_logger",
+ "_name",
+ "api_key",
+ "api_version",
+ "azure_endpoint",
+ "_rate_limiter",
+ "stream",
+ "api_type",
+ }
+ kwargs = {k: v for k, v in kwargs.items() if k not in keys_to_remove}
+ perf_counter()
+ # if logger:
+ # logger.info(f"[{name}][OpenAI API Request] {kwargs}")
+ # pretty_print_chat_messages(kwargs['messages'])
+ if rate_limiter:
+ rate_limiter.consume(**kwargs)
+ response = client.chat.completions.create(**kwargs)
+ else:
+ response = client.chat.completions.create(**kwargs)
+ # if logger:
+ # logger.info(f"[{name}][OpenAI API Returned] Elapsed request time: {perf_counter() - t0}s | response: {response}")
+ response = json.loads(
+ response.model_dump_json(), object_hook=remove_nulls
+ ) # Convert to dict, easier to save as a replay buffer for debugging
+ # OpenAI-API expects none objects to be removed
+ return response
diff --git a/l2mac/llm_providers/openai.py b/l2mac/llm_providers/openai.py
new file mode 100644
index 00000000..39fa0d7c
--- /dev/null
+++ b/l2mac/llm_providers/openai.py
@@ -0,0 +1,294 @@
+import tiktoken
+
+# Tokenizer
+CL100K_ENCODER = tiktoken.get_encoding("cl100k_base")
+P50K_ENCODER = tiktoken.get_encoding("p50k_base")
+
+
+# OpenAI model details
+
+
+openai_models = { # Updated from https://platform.openai.com/docs/models/continuous-model-upgrades on 16th April 2024
+ "gpt-4-turbo": {
+ "description": "New GPT-4 Turbo with Vision. The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling. Currently points to gpt-4-turbo-2024-04-09.",
+ "context_window": 128_000,
+ "training_data": "Up to Dec 2023",
+ },
+ "gpt-4-turbo-2024-04-09": {
+ "description": "GPT-4 Turbo with Vision model. Vision requests can now use JSON mode and function calling. gpt-4-turbo currently points to this version.",
+ "context_window": 128_000,
+ "training_data": "Up to Dec 2023",
+ },
+ "gpt-4-turbo-preview": {
+ "description": "GPT-4 Turbo preview model. Currently points to gpt-4-0125-preview.",
+ "context_window": 128_000,
+ "training_data": "Up to Dec 2023",
+ },
+ "gpt-4-0125-preview": {
+ "description": "GPT-4 Turbo preview model intended to reduce cases of “laziness” where the model doesn’t complete a task. Returns a maximum of 4,096 output tokens. Learn more.",
+ "context_window": 128_000,
+ "training_data": "Up to Dec 2023",
+ },
+ "gpt-4-1106-preview": {
+ "description": "GPT-4 Turbo preview model featuring improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This is a preview model. Learn more.",
+ "context_window": 128_000,
+ "training_data": "Up to Apr 2023",
+ },
+ "gpt-4-vision-preview": {
+ "description": "GPT-4 model with the ability to understand images, in addition to all other GPT-4 Turbo capabilities. This is a preview model, we recommend developers to now use gpt-4-turbo which includes vision capabilities. Currently points to gpt-4-1106-vision-preview.",
+ "context_window": 128_000,
+ "training_data": "Up to Apr 2023",
+ },
+ "gpt-4-1106-vision-preview": {
+ "description": "GPT-4 model with the ability to understand images, in addition to all other GPT-4 Turbo capabilities. This is a preview model, we recommend developers to now use gpt-4-turbo which includes vision capabilities. Returns a maximum of 4,096 output tokens. Learn more.",
+ "context_window": 128_000,
+ "training_data": "Up to Apr 2023",
+ },
+ "gpt-4": {
+ "description": "Currently points to gpt-4-0613. See continuous model upgrades.",
+ "context_window": 8_192,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-4-0613": {
+ "description": "Snapshot of gpt-4 from June 13th 2023 with improved function calling support.",
+ "context_window": 8_192,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-4-32k": {
+ "description": "Currently points to gpt-4-32k-0613. See continuous model upgrades. This model was never rolled out widely in favor of GPT-4 Turbo.",
+ "context_window": 32_768,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-4-32k-0613": {
+ "description": "Snapshot of gpt-4-32k from June 13th 2023 with improved function calling support. This model was never rolled out widely in favor of GPT-4 Turbo.",
+ "context_window": 32_768,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-0125": {
+ "description": "New Updated GPT 3.5 Turbo. The latest GPT-3.5 Turbo model with higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls. Returns a maximum of 4,096 output tokens. Learn more.",
+ "context_window": 16_385,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo": {
+ "description": "Currently points to gpt-3.5-turbo-0125.",
+ "context_window": 16_385,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-1106": {
+ "description": "GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. Learn more.",
+ "context_window": 16_385,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-instruct": {
+ "description": "Similar capabilities as GPT-3 era models. Compatible with legacy Completions endpoint and not Chat Completions.",
+ "context_window": 4_096,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-16k": {
+ "description": "Legacy Currently points to gpt-3.5-turbo-16k-0613.",
+ "context_window": 16_385,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-0613": {
+ "description": "Legacy Snapshot of gpt-3.5-turbo from June 13th 2023. Will be deprecated on June 13, 2024.",
+ "context_window": 4_096,
+ "training_data": "Up to Sep 2021",
+ },
+ "gpt-3.5-turbo-16k-0613": {
+ "description": "Legacy Snapshot of gpt-3.5-16k-turbo from June 13th 2023. Will be deprecated on June 13, 2024.",
+ "context_window": 16_385,
+ "training_data": "Up to Sep 2021",
+ },
+}
+
+openai_rate_limits_per_tier_per_model = { # Updated from https://platform.openai.com/docs/guides/rate-limits/usage-tiers?context=tier-free on 16th April 2024
+ "free": {
+ "gpt-3.5-turbo": {"RPM": 3, "RPD": 200, "TPM": 40000, "Batch Queue Limit": 200000},
+ "text-embedding-3-small": {"RPM": 3, "RPD": 200, "TPM": 150000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": 3, "RPD": 200, "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": 3, "RPD": 200, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "5 img/min", "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "1 img/min", "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ },
+ "tier1": {
+ "gpt-4-turbo": {"RPM": 500, "RPD": None, "TPM": 300000, "Batch Queue Limit": 900000},
+ "gpt-4": {"RPM": 500, "RPD": 10000, "TPM": 10000, "Batch Queue Limit": 100000},
+ "gpt-3.5-turbo": {"RPM": 3500, "RPD": 10000, "TPM": 60000, "Batch Queue Limit": 200000},
+ "text-embedding-3-large": {"RPM": 500, "RPD": 10000, "TPM": 1000000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": 50, "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": 50, "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ "tts-1-hd": {"RPM": 3, "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "5 img/min", "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "5 img/min", "RPD": None, "TPM": None, "Batch Queue Limit": None},
+ },
+ "tier2": {
+ "gpt-4-turbo": {"RPM": 5000, "TPM": 450000, "Batch Queue Limit": 1350000},
+ "gpt-4": {"RPM": 5000, "TPM": 40000, "Batch Queue Limit": 200000},
+ "gpt-3.5-turbo": {"RPM": 3500, "TPM": 80000, "Batch Queue Limit": 400000},
+ "text-embedding-3-large": {"RPM": 500, "TPM": 1000000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": "50", "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": "50", "TPM": None, "Batch Queue Limit": None},
+ "tts-1-hd": {"RPM": "5", "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "50 img/min", "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "7 img/min", "TPM": None, "Batch Queue Limit": None},
+ },
+ "tier3": {
+ "gpt-4-turbo": {"RPM": 5000, "TPM": 600000, "Batch Queue Limit": 40000000},
+ "gpt-4": {"RPM": 5000, "TPM": 80000, "Batch Queue Limit": 5000000},
+ "gpt-3.5-turbo": {"RPM": 3500, "TPM": 160000, "Batch Queue Limit": 10000000},
+ "text-embedding-3-large": {"RPM": 5000, "TPM": 5000000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": 100, "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": 100, "TPM": None, "Batch Queue Limit": None},
+ "tts-1-hd": {"RPM": 7, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "100 img/min", "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "7 img/min", "TPM": None, "Batch Queue Limit": None},
+ },
+ "tier4": {
+ "gpt-4-turbo": {"RPM": 10000, "TPM": 800000, "Batch Queue Limit": 80000000},
+ "gpt-4": {"RPM": 10000, "TPM": 300000, "Batch Queue Limit": 30000000},
+ "gpt-3.5-turbo": {"RPM": 10000, "TPM": 1000000, "Batch Queue Limit": 100000000},
+ "text-embedding-3-large": {"RPM": 10000, "TPM": 5000000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": 100, "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": 100, "TPM": None, "Batch Queue Limit": None},
+ "tts-1-hd": {"RPM": 10, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "100 img/min", "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "15 img/min", "TPM": None, "Batch Queue Limit": None},
+ },
+ "tier5": {
+ "gpt-4-turbo": {"RPM": 10000, "TPM": 1500000, "Batch Queue Limit": 250000000},
+ "gpt-4": {"RPM": 10000, "TPM": 300000, "Batch Queue Limit": 45000000},
+ "gpt-3.5-turbo": {"RPM": 10000, "TPM": 2000000, "Batch Queue Limit": 300000000},
+ "text-embedding-3-large": {"RPM": 10000, "TPM": 10000000, "Batch Queue Limit": None},
+ "whisper-1": {"RPM": 500, "TPM": None, "Batch Queue Limit": None},
+ "tts-1": {"RPM": 500, "TPM": None, "Batch Queue Limit": None},
+ "tts-1-hd": {"RPM": 20, "TPM": None, "Batch Queue Limit": None},
+ "dall-e-2": {"RPM": "500 img/min", "TPM": None, "Batch Queue Limit": None},
+ "dall-e-3": {"RPM": "50 img/min", "TPM": None, "Batch Queue Limit": None},
+ },
+}
+
+
+def num_tokens_consumed_by_chat_request(messages, max_tokens=15, n=1, functions="", **kwargs):
+ # num_tokens = n * max_tokens
+ # for message in messages:
+ # num_tokens += 4 # Every message follows {role/name}\n{content}\n
+ # for key, value in message.items():
+ # num_tokens += len(CL100K_ENCODER.encode(str(value)))
+
+ # if key == "name": # If there's a name, the role is omitted
+ # num_tokens -= 1
+
+ # num_tokens += 2 # Every reply is primed with assistant
+ num_tokens = num_tokens_from_messages(messages)
+
+ if functions:
+ function_tokens = num_tokens_from_functions(functions)
+ num_tokens += function_tokens
+
+ return num_tokens
+
+
+def num_tokens_from_messages(messages, model="gpt-4-0613"):
+ """Return the number of tokens used by a list of messages."""
+ try:
+ encoding = tiktoken.encoding_for_model(model)
+ except KeyError:
+ print("Warning: model not found. Using cl100k_base encoding.")
+ encoding = tiktoken.get_encoding("cl100k_base")
+ if model in {
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4-0314",
+ "gpt-4-32k-0314",
+ "gpt-4-0613",
+ "gpt-4-32k-0613",
+ }:
+ tokens_per_message = 3
+ tokens_per_name = 1
+ elif model == "gpt-3.5-turbo-0301":
+ tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
+ tokens_per_name = -1 # if there's a name, the role is omitted
+ elif "gpt-3.5-turbo" in model:
+ print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
+ return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
+ elif "gpt-4" in model:
+ print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
+ return num_tokens_from_messages(messages, model="gpt-4-0613")
+ else:
+ raise NotImplementedError(
+ f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
+ )
+ num_tokens = 0
+ for message in messages:
+ num_tokens += tokens_per_message
+ for key, value in message.items():
+ try:
+ num_tokens += len(encoding.encode(value))
+ except TypeError:
+ num_tokens += len(encoding.encode(str(value)))
+ if key == "name":
+ num_tokens += tokens_per_name
+ num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
+ return num_tokens
+
+
+def num_tokens_from_functions(tools, model="gpt-3.5-turbo-0613"):
+ """Return the number of tokens used by a list of functions."""
+ num_tokens = 0
+ for tool in tools:
+ function_tokens = len(CL100K_ENCODER.encode(tool["function"]["name"]))
+ function_tokens += len(CL100K_ENCODER.encode(tool["function"]["description"]))
+
+ if "parameters" in tool["function"]:
+ parameters = tool["function"]["parameters"]
+ if "properties" in parameters:
+ for propertiesKey in parameters["properties"]:
+ function_tokens += len(CL100K_ENCODER.encode(propertiesKey))
+ v = parameters["properties"][propertiesKey]
+ for field in v:
+ if field == "type":
+ function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(v["type"]))
+ elif field == "description":
+ function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(v["description"]))
+ elif field == "enum":
+ function_tokens -= 3
+ for o in v["enum"]:
+ function_tokens += 3
+ function_tokens += len(CL100K_ENCODER.encode(o))
+ elif field == "items":
+ # function_tokens += 2
+ # function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(v["type"]))
+ if "properties" in v[field]:
+ NestedParameters = v[field]
+ for NestedpropertiesKey in NestedParameters["properties"]:
+ function_tokens += len(CL100K_ENCODER.encode(NestedpropertiesKey))
+ Nestedv = NestedParameters["properties"][NestedpropertiesKey]
+ for Nestedfield in Nestedv:
+ if Nestedfield == "type":
+ # function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(Nestedv["type"]))
+ elif Nestedfield == "description":
+ # function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(Nestedv["description"]))
+ elif Nestedfield == "enum":
+ function_tokens -= 3
+ for Nestedo in Nestedv["enum"]:
+ # function_tokens += 3
+ function_tokens += len(CL100K_ENCODER.encode(Nestedo))
+ elif field == "items":
+ # function_tokens += 2
+ # function_tokens += 2
+ function_tokens += len(CL100K_ENCODER.encode(Nestedv["type"]))
+
+ print("")
+ else:
+ print(f"Warning: not supported field {field} : {v[field]}")
+ function_tokens += 11
+
+ num_tokens += function_tokens
+
+ num_tokens += 12
+ return num_tokens
diff --git a/l2mac/llm_providers/rate_limiter.py b/l2mac/llm_providers/rate_limiter.py
new file mode 100644
index 00000000..144eacd7
--- /dev/null
+++ b/l2mac/llm_providers/rate_limiter.py
@@ -0,0 +1,62 @@
+import time
+
+from l2mac.llm_providers.openai import num_tokens_consumed_by_chat_request
+
+
+class TokenBucket:
+ def __init__(self, rate, capacity):
+ self.rate = rate # Tokens per second
+ self.capacity = capacity
+ self.tokens = self.capacity
+ self.last_time = time.time()
+
+ def consume(self, tokens=1):
+ if tokens < 0:
+ return False
+
+ current_time = time.time()
+ time_delta = current_time - self.last_time
+ self.last_time = current_time
+
+ # Add tokens based on the time passed
+ self.tokens += time_delta * self.rate
+
+ # Ensure the tokens do not exceed the capacity
+ self.tokens = min(self.tokens, self.capacity)
+
+ if self.tokens >= tokens:
+ self.tokens -= tokens
+ return True
+ return False
+
+
+class RateLimiter(object):
+ def __init__(self, request_limit_per_minute, token_limit_per_minute, token_counter):
+ # Rate limits
+ self.request_limit = request_limit_per_minute
+ self.token_limit = token_limit_per_minute
+
+ # Token counter
+ self.token_counter = token_counter
+
+ # Buckets
+ self._request_bucket = TokenBucket(self.request_limit / 60.0, self.request_limit)
+ self._token_bucket = TokenBucket(self.token_limit / 60.0, self.token_limit)
+
+ def consume(self, **kwargs):
+ num_tokens = self.token_counter(**kwargs)
+ while not self._token_bucket.consume(num_tokens):
+ if num_tokens > self.token_limit:
+ num_tokens = self.token_limit // 2
+ time.sleep(1 / self._token_bucket.rate)
+ while not self._request_bucket.consume():
+ time.sleep(1 / self._request_bucket.rate)
+
+
+class ChatRateLimiter(RateLimiter):
+ def __init__(self, request_limit=3500, token_limit=90000):
+ super().__init__(
+ request_limit_per_minute=request_limit,
+ token_limit_per_minute=token_limit,
+ token_counter=num_tokens_consumed_by_chat_request,
+ )
diff --git a/l2mac/llm_providers/utils.py b/l2mac/llm_providers/utils.py
new file mode 100644
index 00000000..9cd7963b
--- /dev/null
+++ b/l2mac/llm_providers/utils.py
@@ -0,0 +1,94 @@
+from difflib import get_close_matches
+
+
+def find_best_match(data_dict, model_name, n=1):
+ """
+ Find the best matching model name from the provided dictionary keys.
+
+ Args:
+ - data_dict (dict): Dictionary of models with their details.
+ - model_name (str): The model name to match.
+ - n (int): Number of close matches to return; default is 1.
+
+ Returns:
+ - dict or None: Returns the dictionary of the closest match, or None if no match is found.
+ """
+ # Retrieve close matches from the dictionary keys
+ matches = get_close_matches(model_name, data_dict.keys(), n=n, cutoff=0.0)
+ if matches:
+ return data_dict[matches[0]] # Return the best match's details
+ return None # Return None if no matches found
+
+
+def pretty_print_chat_messages(
+ messages,
+ num_tokens=None,
+ max_tokens=None,
+ logger=None,
+ response_msg=False,
+ step_idx=None,
+ total_steps=None,
+ max_re_tries=None,
+ re_tries=None,
+):
+ COLORS = {
+ "system": "\033[95m", # Light Magenta
+ "user": "\033[94m", # Light Blue
+ "assistant": "\033[92m", # Light Green
+ "tokens": "\033[91m", # Light Red
+ }
+
+ if response_msg:
+ print("[LLM RESPONSE MESSAGE]") # Reset color at the end
+ if logger:
+ logger.info("[LLM RESPONSE MESSAGE]")
+
+ for msg in messages:
+ role = msg["role"]
+ color = COLORS.get(role, COLORS["system"]) # Default to system color if role not found
+ formatted_role = role.capitalize()
+ if role == "assistant" and msg.get("tool_calls", False):
+ formatted_role = "Function Call"
+ for tool_call in msg["tool_calls"]:
+ print(
+ f"{color}[{formatted_role}] [{tool_call['function']['name']}] {tool_call['function']['arguments']}\033[0m"
+ ) # Reset color at the end
+ if logger:
+ logger.info(
+ f"[{formatted_role}] [{tool_call['function']['name']}] {tool_call['function']['arguments']}"
+ )
+ else:
+ content = msg.get("content", None)
+ print(f"{color}[{formatted_role}] {content}\033[0m") # Reset color at the end
+ if logger:
+ logger.info(f"[{formatted_role}] {content}")
+
+ if not response_msg:
+ if step_idx is not None and total_steps is not None:
+ if num_tokens and max_tokens:
+ if max_re_tries is not None and re_tries is not None:
+ print(
+ f"{COLORS['tokens']}[Progress: Step {step_idx + 1}/{total_steps} | Retries: {re_tries}/{max_re_tries} | Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]\033[0m"
+ )
+ if logger:
+ logger.info(
+ f"[Progress: Step {step_idx + 1}/{total_steps} | Retries: {re_tries}/{max_re_tries} | Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]"
+ )
+ else:
+ print(
+ f"{COLORS['tokens']}[Progress: Step {step_idx + 1}/{total_steps} | Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]\033[0m"
+ )
+ if logger:
+ logger.info(
+ f"[Progress: Step {step_idx + 1}/{total_steps} | Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]"
+ )
+
+ else:
+ if num_tokens and max_tokens:
+ print(
+ f"{COLORS['tokens']}[Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]\033[0m"
+ )
+ if logger:
+ logger.info(
+ f"[Token Capacity Used: {((num_tokens / max_tokens) * 100.0):.2f}% | Tokens remaining {max_tokens - num_tokens}]"
+ )
diff --git a/l2mac/prompts/__init__.py b/l2mac/prompts/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/l2mac/prompts/book.yaml b/l2mac/prompts/book.yaml
new file mode 100644
index 00000000..aeaa5e2a
--- /dev/null
+++ b/l2mac/prompts/book.yaml
@@ -0,0 +1,54 @@
+system: |
+ Objective: Write large amounts of text for the following task.
+ Please note that the generated text should be fully complete. No placeholders.
+ Only use the functions you have been provided with.
+ Only use the `write_files` to output text files.
+
+ You must act autonomously and you will receive no human input at any stage. You have to return as output the complete text for completing this task, and correctly incorporate it into the existing text files.
+ You always write out the whole file contents.
+ Please always view the files before writing to them, to make sure you are writing to the correct files.
+
+ Provide the text to achieve the task conditioned on the existing generated text---including changing the existing generated text where necessary.
+
+ You cannot visualize any graphical output. You exist within a Actor Model machine, and when you list out steps, each step will be taken by a new separate sub-ChatGPT model. When you list out a sub-task steps, you can optionally specify the sub-task validation to check that it has been completed successfully.
+
+ No data saved to disk will persist between steps or write operations.
+
+ Use the functions provided. When calling functions only provide a RFC8259 compliant JSON request following this format without deviation.
+first_message: |
+ You will get instructions for a book to write.
+ First lay out the the structure outline of the book, and the chapters with detailed descriptions of what each chapter will contain. Feel free to make a quick comment on the purpose of each chapter.
+ Do not comment on what every piece of text does. Please note that the text should be fully complete. No placeholders.
+
+ You will start with the "detailed_outline" file, then go to the chapters in chronological order, and so on.
+ Please note that the text should be fully complete. No placeholders.
+
+ Follow a the best practices for writing a book, and naming convention.
+ Make sure that files are correctly conditioned on the subsequent chapters and outline(s). The text should be fully complete. Make sure that text in different files are compatible with each other.
+ When writing text if you are unsure, write the most plausible text.
+
+ Useful to know:
+
+ It is helpful to write a detailed outline of the book first, and then write the chapters in order.
+ Always add a comment briefly describing the purpose of each file.
+ Always follow the best practices for the requested structure and how to package the completed book.
+
+ Objective:```
+ {prompt_task}
+ ```
+
+ Understand the problem, by creating an extremely detailed step-by-step plan, where each step is long (multiple sentences) and in total includes every single feature requirement specified above, feel free to copy directly from it. Use no more than {steps} steps in the plan. Perform additional, checks and evaluation at each step when applicable to help make an excellent coherent book, where all the text is fully complete. Use best book design practices, and you can output very large amounts of text at once, i.e., complete book chapters of many pages in length. Please include a last sentence to perform checks when implementing or writing text in that same step. You will receive no human input at any stage, so you cannot use a human to perform any checks. Only create a detailed plan to begin with, which includes performing consistency checks. Please be sure to include all of the specified feature requirements in the following plan.
+reflect_on_prompt_program: |
+ Please reflect on the plan, and increase the number of generated steps to that of 100 or so very detailed steps that include all the feature requirements.
+test_writing_advice: |
+ You can output very large amounts of text at once, i.e., complete book chapters of many pages in length.
+control_unit_execute_instruction: |
+ Objective: Execute sub task step:```{step}```.\n\n Note: Condition any new text files on the existing text files: {file_names}. Fully implement in text, no placeholders. You can now optionally view the existing files if you need to view them to complete the current task step. You have a limited context window so be selective about which files you view, only view the files you think you might need to view. {test_writing_advice}\n\nSummary output of previous step: ""{previous_step_output_summary}""\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
+control_unit_exhaust_context_window: |
+ You have exhausted your context window. Reflect on your progress. Provide a short concise response, of two sentences maximum, this will be used to restart this step from the beginning without the previous messages.
+control_unit_instruction_complete_summarize_output: |
+ Please provide a one or two sentence summary of the output of this step, which is useful for the next step. Your response will be used when starting the next step without any of the previous messages.
+control_unit_instruction_erroring_fix_the_code: |
+ {error_message}\n\nReflect and write the full complete corrected text to correct the text. Condition it on existing text: {file_names}.\n{test_writing_advice}\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
+control_unit_cycle_message_to_check_if_instruction_complete: |
+ Has the sub task step been completed of:```{step}```.\n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full text to complete the task. Note: Condition any new text files on the existing text files: {file_names}. Fully implement these features in the text, no placeholders. {test_writing_advice}\n Respond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
\ No newline at end of file
diff --git a/l2mac/prompts/codebase.yaml b/l2mac/prompts/codebase.yaml
new file mode 100644
index 00000000..a3659ea0
--- /dev/null
+++ b/l2mac/prompts/codebase.yaml
@@ -0,0 +1,73 @@
+system: |
+ Objective: Write code for a large system design task.
+ Please note that the code should be fully functional. No placeholders.
+ Only use the functions you have been provided with.
+ Only use the `write_files` to output code.
+
+ You must act autonomously and you will receive no human input at any stage. You have to return as output the complete code for completing this task, and correctly incorporate it into the existing code base.
+ You always write out the whole file contents. You always indent code with tabs.
+ Please always view the files before writing to them, to make sure you are writing to the correct files.
+ When writing a test, make the filename start with the prefix 'test_'.
+
+ Provide the minimal code necessary to achieve the task conditioned on the existing generated code---including changing the existing generated code.
+
+ You cannot visualize any graphical output. You exist within a Actor Model machine, and when you list out steps, each step will be taken by a new separate sub-ChatGPT model. When you list out a sub-task steps, you can optionally specify the sub-task validation to check that it has been completed successfully.
+
+ You cannot use any databases as none are setup in the local environment, instead mock a database with an in memory dictionary to store data. No data saved to disk will persist between steps or write operations.
+
+ If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.
+
+ Use the functions provided. When calling functions only provide a RFC8259 compliant JSON request following this format without deviation.
+first_message: |
+ You will get instructions for code to write.
+ First lay out the names of the core classes, functions, methods that will be necessary, As well as a quick comment on their purpose.
+ Do not comment on what every file does. Please note that the code should be fully functional. No placeholders.
+
+ You will start with the "entrypoint" file, then go to the ones that are imported by that file, and so on.
+ Please note that the code should be fully functional. No placeholders.
+
+ Follow a language and framework appropriate best practice file naming convention.
+ Make sure that files contain all imports, types etc. The code should be fully functional. Make sure that code in different files are compatible with each other.
+ When writing code if you are unsure, write a plausible implementation.
+ Include module dependency or package manager dependency definition file.
+
+ Useful to know:
+
+ For Python, you always create an appropriate requirements.txt file.
+ Always implement the simplest implementation possible, and concise code, unless explicitly stated otherwise.
+ Always add a comment briefly describing the purpose of the function definition.
+ Add comments explaining very complex bits of logic.
+ Always follow the best practices for the requested languages for folder/file structure and how to package the project.
+ You can use any package and any other packages you wish to install.
+ You cannot use any databases as none are setup in the local environment, instead mock a database with an in memory dictionary to store data. No data saved to disk will persist between steps or write operations.
+ When writing a test, make the filename start with the prefix 'test_'.
+ When putting files in folders, always be sure to include a file called __init__.py where relevant, or put all files in the same working directory. Always prefer the most simplest approach.
+ Always add a readme on how to run the code, or a .sh file to run the code.
+ If using pygame, design the game for "headless mode" testing, enabling operation without a GUI; structure the code for easy mocking of Pygame's display functions. As you cannot use any human input to test.
+ All tests should always be interpretable and you should be able to clearly reason what the correct answer is without any doubt.
+
+ Python toolbelt preferences:
+ - pytest
+ - dataclasses
+ - flask
+ - pygame==2.1.2
+
+ Objective:```
+ {prompt_task}
+ ```
+
+ Understand the problem, by creating an extremely detailed step-by-step plan, where each step is long (multiple sentences) and in total includes every single feature requirement specified above, feel free to copy directly from it. Use no more than {steps} steps in the plan. Create additional tests, checks and evaluation at each step when applicable to help make an excellent code implementation, where all the code is fully functional. Use best software design practices, and you can output large amounts of code at once. Please include a last sentence to create and run tests when implementing or writing code in that same step. You will receive no human input at any stage, so you cannot use a human to test. Only create a detailed plan to begin with, which includes designing and running tests to check that they all pass. Add to the last step in the plan to add a readme on how to run the code, or a .sh file to run the code. Please be sure to include all of the specified feature requirements in the following plan.
+reflect_on_prompt_program: |
+ Please reflect on the plan, and increase the number of generated steps to that of 100 or so very detailed steps that include all the feature requirements.
+test_writing_advice: |
+ All tests should always be interpretable and you should be able to clearly reason what the correct answer is without any doubt. Do not write tests for large numbers and large inputs, if they exist delete them. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass. Avoid making complicated tests. If a test repeatedly fails delete the test.
+control_unit_execute_instruction: |
+ Objective: Execute sub task step:```{step}```.\n\n Note: Condition any new code files on the existing code files: {file_names}. Fully implement these features in the code, no placeholders. You can now optionally view the existing files if you need to view them to complete the current task step. You have a limited context window so be selective about which files you view, only view the files you think you might need to view. {test_writing_advice}\n\nSummary output of previous step: ""{previous_step_output_summary}""\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output code only use the `write_files` function to output code.
+control_unit_exhaust_context_window: |
+ You have exhausted your context window. Reflect on your progress. Provide a short concise response, of two sentences maximum, this will be used to restart this step from the beginning without the previous messages.
+control_unit_instruction_complete_summarize_output: |
+ Please provide a one or two sentence summary of the output of this step, which is useful for the next step. Your response will be used when starting the next step without any of the previous messages.
+control_unit_instruction_erroring_fix_the_code: |
+ {error_message}\n\nReflect and write the full complete corrected code to correct the code. Condition it on existing code: {file_names}.\n{test_writing_advice}\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output code only use the `write_files` function to output code.
+control_unit_cycle_message_to_check_if_instruction_complete: |
+ Has the sub task step been completed of:```{step}```.\n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Note: Condition any new code files on the existing code files: {file_names}. Fully implement these features in the code, no placeholders.\n Respond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output code only use the `write_files` function to output code.
\ No newline at end of file
diff --git a/l2mac/prompts/custom.yaml b/l2mac/prompts/custom.yaml
new file mode 100644
index 00000000..daead87d
--- /dev/null
+++ b/l2mac/prompts/custom.yaml
@@ -0,0 +1,54 @@
+system: |
+ Objective: Write large amounts of text for the following task.
+ Please note that the generated text should be fully complete. No placeholders.
+ Only use the functions you have been provided with.
+ Only use the `write_files` to output text files.
+
+ You must act autonomously and you will receive no human input at any stage. You have to return as output the complete text for completing this task, and correctly incorporate it into the existing text files.
+ You always write out the whole file contents.
+ Please always view the files before writing to them, to make sure you are writing to the correct files.
+
+ Provide the text to achieve the task conditioned on the existing generated text---including changing the existing generated text where necessary.
+
+ You cannot visualize any graphical output. You exist within a Actor Model machine, and when you list out steps, each step will be taken by a new separate sub-ChatGPT model. When you list out a sub-task steps, you can optionally specify the sub-task validation to check that it has been completed successfully.
+
+ No data saved to disk will persist between steps or write operations.
+
+ Use the functions provided. When calling functions only provide a RFC8259 compliant JSON request following this format without deviation.
+first_message: |
+ You will get instructions for a task to write.
+ First lay out the the structure outline of the task, and the chapters with detailed descriptions of what each chapter will contain. Feel free to make a quick comment on the purpose of each chapter.
+ Do not comment on what every piece of text does. Please note that the text should be fully complete. No placeholders.
+
+ You will start with the "detailed_outline" file, then go to the chapters in chronological order, and so on.
+ Please note that the text should be fully complete. No placeholders.
+
+ Follow a the best practices for writing a task, and naming convention.
+ Make sure that files are correctly conditioned on the subsequent chapters and outline(s). The text should be fully complete. Make sure that text in different files are compatible with each other.
+ When writing text if you are unsure, write the most plausible text.
+
+ Useful to know:
+
+ It is helpful to write a detailed outline of the task first, and then write the chapters in order.
+ Always add a comment briefly describing the purpose of each file.
+ Always follow the best practices for the requested structure and how to package the completed task.
+
+ Objective:```
+ {prompt_task}
+ ```
+
+ Understand the problem, by creating an extremely detailed step-by-step plan, where each step is long (multiple sentences) and in total includes every single feature requirement specified above, feel free to copy directly from it. Use no more than 10 steps in the plan. Perform additional, checks and evaluation at each step when applicable to help make an excellent coherent task, where all the text is fully complete. Use best task design practices, and you can output large amounts of text at once. Please include a last sentence to perform checks when implementing or writing text in that same step. You will receive no human input at any stage, so you cannot use a human to perform any checks. Only create a detailed plan to begin with, which includes performing consistency checks. Please be sure to include all of the specified feature requirements in the following plan.
+reflect_on_prompt_program: |
+ Please reflect on the plan, and increase the number of generated steps to that of 100 or so very detailed steps that include all the feature requirements.
+test_writing_advice: |
+ .
+control_unit_execute_instruction: |
+ Objective: Execute sub task step:```{step}```.\n\n Note: Condition any new text files on the existing text files: {file_names}. Fully implement in text, no placeholders. You can now optionally view the existing files if you need to view them to complete the current task step. You have a limited context window so be selective about which files you view, only view the files you think you might need to view. {test_writing_advice}\n\nSummary output of previous step: ""{previous_step_output_summary}""\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
+control_unit_exhaust_context_window: |
+ You have exhausted your context window. Reflect on your progress. Provide a short concise response, of two sentences maximum, this will be used to restart this step from the beginning without the previous messages.
+control_unit_instruction_complete_summarize_output: |
+ Please provide a one or two sentence summary of the output of this step, which is useful for the next step. Your response will be used when starting the next step without any of the previous messages.
+control_unit_instruction_erroring_fix_the_code: |
+ {error_message}\n\nReflect and write the full complete corrected text to correct the text. Condition it on existing text: {file_names}.\n{test_writing_advice}\n\nRespond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
+control_unit_cycle_message_to_check_if_instruction_complete: |
+ Has the sub task step been completed of:```{step}```.\n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full text to complete the task. Note: Condition any new text files on the existing text files: {file_names}. Fully implement these features in the text, no placeholders.\n Respond now only with a function call of one of the following functions provided: {functions_provided}, and if you want to output text only use the `write_files` function to output text.
\ No newline at end of file
diff --git a/l2mac/prompts/load_prompts.py b/l2mac/prompts/load_prompts.py
new file mode 100644
index 00000000..0c546a9f
--- /dev/null
+++ b/l2mac/prompts/load_prompts.py
@@ -0,0 +1,56 @@
+import os
+from importlib import resources
+from typing import Optional
+
+import yaml
+from pydantic import BaseModel, ValidationError
+
+from l2mac.utils.run import Domain
+
+
+class L2MACPrompts(BaseModel):
+ system: str
+ first_message: str
+ reflect_on_prompt_program: str
+ test_writing_advice: str
+ control_unit_execute_instruction: str
+ control_unit_exhaust_context_window: str
+ control_unit_instruction_complete_summarize_output: str
+ control_unit_instruction_erroring_fix_the_code: str
+ control_unit_cycle_message_to_check_if_instruction_complete: str
+
+
+def get_l2mac_prompts(prompts_file_path: Optional[str], domain: Domain) -> L2MACPrompts:
+ """
+ Loads the L2MAC prompts from a given file path.
+
+ Args:
+ prompts_file_path Optional(str): The path to the L2MAC prompts file.
+
+ Returns:
+ list: The loaded L2MAC prompts as L2MAC prompt objects
+ """
+ if prompts_file_path is not None:
+ if os.path.isfile(prompts_file_path):
+ with open(prompts_file_path, "r") as file:
+ prompts_data = yaml.safe_load(file)
+ try:
+ return L2MACPrompts(**prompts_data)
+ except ValidationError as e:
+ print(f"Invalid prompts file at `{prompts_file_path}`:", e)
+ raise e
+ else:
+ raise FileNotFoundError(f"File not found at `prompts_file_path` of {prompts_file_path}")
+ elif domain == Domain.codebase:
+ prompts_file = "codebase.yaml"
+ elif domain == Domain.book:
+ prompts_file = "book.yaml"
+ elif domain == Domain.custom:
+ prompts_file = "custom.yaml"
+ with resources.open_text("l2mac.prompts", prompts_file) as file:
+ prompts_data = yaml.safe_load(file)
+ try:
+ return L2MACPrompts(**prompts_data)
+ except ValidationError as e:
+ print(f"Invalid prompts file at `{prompts_file_path}`:", e)
+ raise e
diff --git a/l2mac/tools/__init__.py b/l2mac/tools/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/l2mac/tools/code_analysis.py b/l2mac/tools/code_analysis.py
new file mode 100644
index 00000000..ab5782ba
--- /dev/null
+++ b/l2mac/tools/code_analysis.py
@@ -0,0 +1,254 @@
+import json
+import re
+import subprocess
+import tempfile
+import unittest
+import xml.etree.ElementTree as ET
+from pathlib import Path
+from typing import List
+
+import numpy as np
+from timeout_decorator import timeout
+
+from l2mac.tools.read import load_code_files_into_dict
+from l2mac.tools.utils import write_files_from_dict
+
+
+def check_syntax_with_timeout(file_dict):
+ try:
+ syntax_results = check_syntax(file_dict)
+ except StopIteration:
+ syntax_results = "Manual tests passed."
+ return syntax_results
+
+
+def check_pytest_with_timeout(file_dict):
+ try:
+ test_results = check_pytest(file_dict)
+ except StopIteration:
+ test_results = "Manual tests passed."
+ return test_results
+
+
+@timeout(60, timeout_exception=StopIteration)
+def check_syntax(file_dict: dict):
+ external_modules = find_external_modules(file_dict)
+ ignored_modules = ",".join(external_modules)
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ write_files_from_dict(file_dict, tmpdirname)
+ # Look for top level folders
+ top_folders = list(set([file_name.split("/")[0] for file_name in file_dict.keys() if "/" in file_name]))
+ top_modules = list(
+ set([top_folder for top_folder in top_folders if f"{top_folder}/__init__.py" in file_dict.keys()])
+ )
+ top_modules_extra = list(
+ set(
+ [
+ file_name.split("/")[0]
+ for file_name in file_dict.keys()
+ if file_name.count("/") >= 2 and ".py" in file_name
+ ]
+ )
+ )
+ top_modules.extend(top_modules_extra)
+ top_folder_with_code = [
+ top_folder
+ for top_folder in top_folders
+ if (f"{top_folder}/__init__.py" not in file_dict.keys())
+ and (len([f for f in file_dict.keys() if top_folder in f and f.endswith(".py")]) >= 1)
+ ]
+ top_code_files = [f for f in file_dict.keys() if f.endswith(".py") and "/" not in f]
+ pylint_args = []
+ pylint_args.extend(top_modules)
+ pylint_args.extend([f"{f}/*.py" for f in top_folder_with_code])
+ pylint_args.extend(top_code_files)
+ command = [
+ "python3",
+ "-m",
+ "pylint",
+ "--disable=all,E0401", # Import module error bug
+ "--enable=E",
+ "--score=no",
+ "--ignored-modules=" + ignored_modules,
+ ]
+ if len(pylint_args) == 0:
+ # No python files found, therefore skipping
+ return ""
+ command.extend(pylint_args)
+ result = subprocess.run(command, capture_output=True, text=True, cwd=tmpdirname)
+ stdout = result.stdout
+ return stdout
+
+
+@timeout(60, timeout_exception=StopIteration)
+def check_pytest(file_dict: dict):
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ write_files_from_dict(file_dict, tmpdirname)
+ command = ["python3", "-m", "pytest", "--junitxml=result_pytest.xml"]
+ result = subprocess.run(command, capture_output=True, text=True, cwd=tmpdirname)
+ stdout = result.stdout
+ try:
+ results = parse_and_print_junit_xml(Path(tmpdirname) / "result_pytest.xml")
+ total_tests = np.sum([r["total"] for r in results])
+ total_passed = np.sum([r["passed"] for r in results])
+ if total_tests == total_passed:
+ output = "All tests passed."
+ else:
+ output = f"Failed tests. Please check the output below. {stdout}"
+ except FileNotFoundError:
+ output = "No tests found."
+ # print(stdout)
+ # print(stderr)
+ # print(results)
+ # print('=====================')
+ return output
+
+
+def pytest_files(files_to_test: List[str] = None, file_dict: dict = {}, enable_tests=True):
+ if not enable_tests:
+ output = "Manual tests passed."
+ return json.dumps({"output": output})
+ try:
+ output = pytest_code_base(file_dict, files_to_test)
+ # if len(output) > 6000:
+ # output = output[:6000]
+ # output = output + '\nRest of output was trimmed.'
+ output += " If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass."
+ except StopIteration:
+ output = "Manual tests passed."
+ return json.dumps({"output": output})
+
+
+@timeout(60, timeout_exception=StopIteration)
+def pytest_code_base(file_dict, files_to_test=None):
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ write_files_from_dict(file_dict, tmpdirname)
+ # write_files_from_dict(file_dict)
+ command = ["python3", "-m", "pytest"]
+ if files_to_test is not None:
+ command.extend(files_to_test)
+ result = subprocess.run(command, capture_output=True, text=True, cwd=tmpdirname)
+ captured_output = result.stdout.strip()
+ print(captured_output)
+ print("")
+ return captured_output
+
+
+def count_errors_in_syntax(syntax_output: str):
+ pattern = r".+:\d+:\d+: [E]\d+: .+"
+ errors = re.findall(pattern, syntax_output)
+ return len(errors)
+
+
+def parse_and_print_junit_xml(file_path):
+ tree = ET.parse(file_path)
+ root = tree.getroot()
+ testsuites = []
+ for testsuite in root.findall("testsuite"):
+ total_tests = int(testsuite.get("tests", 0))
+ errors = int(testsuite.get("errors", 0))
+ failures = int(testsuite.get("failures", 0))
+ skipped = int(testsuite.get("skipped", 0))
+ passed = total_tests - (errors + failures + skipped)
+
+ results = {"total": total_tests, "passed": passed, "errors": errors, "failures": failures, "skipped": skipped}
+ testsuites.append(results)
+ return testsuites
+
+
+def run_python_file(file_name_to_run: str = "", file_dict: dict = {}, arguments: List[str] = [], enable_tests=True):
+ if not enable_tests:
+ output = "Manual tests passed."
+ return json.dumps({"output": output})
+ if file_name_to_run == "":
+ return json.dumps(
+ {
+ "status": "error",
+ "message": "Missing required argument `file_name_to_run`. You must specify a file name to run. Please try running `list_files` to see all available files. And specify a file name to run when calling `run_python_file` with the required argument of `file_name_to_run`.",
+ }
+ )
+ elif file_name_to_run not in file_dict:
+ return json.dumps(
+ {
+ "status": "error",
+ "message": f"File {file_name_to_run} not found. Please try running `list_files` to see all available files.",
+ }
+ )
+ else:
+ try:
+ output = python_run_code_base(file_dict, file_name_to_run, arguments)
+ # if len(output) > 6000:
+ # output = output[:6000]
+ # output = output + '\nRest of output was trimmed.'
+ except StopIteration:
+ output = "Manual tests passed."
+ return json.dumps({"output": output})
+
+
+@timeout(20, timeout_exception=StopIteration)
+def python_run_code_base(file_dict, file, arguments=[]):
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ write_files_from_dict(file_dict, tmpdirname)
+ command = ["python3", file]
+ if arguments is not None:
+ command.extend(arguments)
+ result = subprocess.run(command, capture_output=True, text=True, cwd=tmpdirname)
+ if result.returncode != 0:
+ captured_output = result.stderr.strip()
+ else:
+ captured_output = result.stdout.strip()
+ return captured_output
+
+
+def find_external_modules(file_dict):
+ local_modules = set([file_name.split("/")[0] for file_name in file_dict.keys() if "/" in file_name])
+ external_modules = set()
+ import_pattern = re.compile(r"^(?:from|import) (\w+)")
+
+ for file_lines in file_dict.values():
+ for line in file_lines:
+ match = import_pattern.match(line)
+ if match:
+ module = match.group(1)
+ if module not in local_modules:
+ external_modules.add(module)
+
+ return external_modules
+
+
+class TestCheckSyntax(unittest.TestCase):
+ def test_syntax_parser_on_file_dict_example_clean(self):
+ file_dict = load_code_files_into_dict("repos/flask/examples/tutorial")
+ syntax_output = check_syntax(file_dict)
+ print(syntax_output)
+ self.assertTrue(check_syntax({}), "Empty dictionary should be considered valid syntax")
+
+ def test_syntax_parser_on_file_dict_example_errors(self):
+ file_dict = load_code_files_into_dict(
+ "results/latest/run-20230922-044053_L2MAC-zero_shot_donnemartin-system-design-oop-whatsapp-donnemartin-system-design-oop-url_shortener-donnemartin-system-design-oop-dropbox-donnemartin-system-design-oop-twitter_0_10-runs_log/whatsapp/1/L2MAC"
+ )
+ syntax_output = check_syntax(file_dict)
+ print(syntax_output)
+ self.assertTrue(check_syntax({}), "Empty dictionary should be considered valid syntax")
+
+ def test_syntax_parser_on_file_dict_giving_help_func(self):
+ file_dict = {"requirements.txt": ["Flask", "requests", "sqlite3", "pytest"]}
+ syntax_output = check_syntax(file_dict)
+ print(syntax_output)
+ self.assertTrue(check_syntax({}), "Empty dictionary should be considered valid syntax")
+
+
+class TestPyTest(unittest.TestCase):
+ def test_check_pytest_clean(self):
+ file_dict = load_code_files_into_dict("repos/flask/examples/tutorial")
+ syntax_output = check_pytest(file_dict)
+ print(syntax_output)
+ self.assertTrue(check_syntax({}), "Empty dictionary should be considered valid syntax")
+
+ def test_check_pytest_errors(self):
+ file_dict = load_code_files_into_dict(
+ "results/latest/run-20230922-044053_L2MAC-zero_shot_donnemartin-system-design-oop-whatsapp-donnemartin-system-design-oop-url_shortener-donnemartin-system-design-oop-dropbox-donnemartin-system-design-oop-twitter_0_10-runs_log/whatsapp/1/L2MAC"
+ )
+ syntax_output = check_pytest(file_dict)
+ print(syntax_output)
+ self.assertTrue(check_syntax({}), "Empty dictionary should be considered valid syntax")
diff --git a/l2mac/tools/control_unit.py b/l2mac/tools/control_unit.py
new file mode 100644
index 00000000..e84c8b1b
--- /dev/null
+++ b/l2mac/tools/control_unit.py
@@ -0,0 +1,55 @@
+import json
+from typing import List
+
+from l2mac.tools.code_analysis import (
+ check_pytest_with_timeout,
+ check_syntax_with_timeout,
+ count_errors_in_syntax,
+)
+
+
+def provide_detailed_sub_task_steps_for_sub_agents(steps: List[str] = []):
+ return steps
+
+
+def check_sub_task_step_complete(file_dict: dict = {}, enable_tests=True):
+ # Run tests
+ # Syntax check
+ if not enable_tests:
+ output = {"status": "TASK_STEP_COMPLETE", "message": "All tests passed"}
+ return json.dumps(output)
+ syntax_results = check_syntax_with_timeout(file_dict)
+ if "Manual tests passed" in syntax_results:
+ syntax_error_count = 0
+ else:
+ syntax_error_count = count_errors_in_syntax(syntax_results)
+ test_results = check_pytest_with_timeout(file_dict)
+ if "Manual tests passed" in test_results:
+ test_results = "All tests passed"
+ elif "No tests found" in test_results:
+ test_results = "All tests passed"
+ if "All tests passed" in test_results and syntax_error_count == 0:
+ output = {"status": "TASK_STEP_COMPLETE", "message": "All tests passed"}
+ else:
+ if "All tests passed" not in test_results:
+ new_output = test_results.strip() + "\n" + syntax_results.strip()
+ # if len(new_output) > 5000:
+ # new_output = new_output[:5000]
+ # new_output = new_output + '\nRest of output was trimmed.'
+ output = {
+ "status": "error",
+ "message": "Failed validation checks, sub task step is not yet complete. Test run results: \n"
+ + new_output,
+ }
+ else:
+ new_output = syntax_results.strip()
+ # if len(new_output) > 5000:
+ # new_output = new_output[:5000]
+ # new_output = new_output + '\nRest of output was trimmed.'
+ output = {
+ "status": "error",
+ "message": "Failed validation checks, sub task step is not yet complete. Code failed syntax checks: \n"
+ + new_output
+ + "\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to modify and change the tests when they are incorrect, to make all tests pass.",
+ }
+ return json.dumps(output)
diff --git a/l2mac/tools/core.py b/l2mac/tools/core.py
new file mode 100644
index 00000000..e3159335
--- /dev/null
+++ b/l2mac/tools/core.py
@@ -0,0 +1,261 @@
+import json
+import traceback
+from typing import List
+
+from l2mac.tools.code_analysis import pytest_files, run_python_file
+from l2mac.tools.control_unit import (
+ check_sub_task_step_complete,
+ provide_detailed_sub_task_steps_for_sub_agents,
+)
+from l2mac.tools.read import list_files, view_files
+from l2mac.tools.write import delete_files, write_files
+
+
+def available_functions_factory():
+ available_functions = {
+ "provide_detailed_sub_task_steps_for_sub_agents": provide_detailed_sub_task_steps_for_sub_agents,
+ "sub_task_step_complete": check_sub_task_step_complete,
+ "view_files": view_files,
+ "list_files": list_files,
+ "pytest_files": pytest_files,
+ "run_python_file": run_python_file,
+ "delete_files": delete_files,
+ # view file names.
+ # 'search_for_relevant_files': partial(search_for_relevant_code, df_with_embeddings=df),
+ "write_files": write_files,
+ }
+ return available_functions
+
+
+def function_definition_list_factory():
+ # Following OpenAI's updated format for function definitions
+ functions = function_definition_list_factory_internal()
+ tools = []
+ for function in functions:
+ tools.append({"type": "function", "function": function})
+ return tools
+
+
+def function_definition_list_factory_internal():
+ functions = [
+ {
+ "name": "provide_detailed_sub_task_steps_for_sub_agents",
+ "description": "For producing a step-by-step plan, where each step paragraph is a detailed sub-task step for a separate sub-agent (large language model agent) to complete. Within each detailed step paragraph, always include a last sentence to create and run tests when implementing or writing code in that same step.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "steps": {
+ "type": "array",
+ "description": "List of strings, where each string is a separate step sub-task paragraph for a separate sub-agent to complete. Within each detailed step paragraph, always include a last sentence to create and run tests when implementing or writing code in that same step.",
+ "items": {"type": "string"}, # assuming each file is represented as a string
+ },
+ },
+ "required": ["steps"],
+ },
+ },
+ {
+ "name": "sub_task_step_complete",
+ "description": "Call this function when the user specified sub task step has been completed.",
+ "parameters": {
+ "type": "object",
+ "properties": {},
+ },
+ },
+ {
+ "name": "view_files",
+ "description": "Print out the file contents into the response to view.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "files": {
+ "type": "array",
+ "description": "list of the files to view",
+ "items": {"type": "string"}, # assuming each file is represented as a string
+ },
+ },
+ "required": ["files"],
+ },
+ },
+ # {
+ # "name": "list_files",
+ # "description": "Print out all the file names into the response to view.",
+ # "parameters": {
+ # "type": "object",
+ # "properties": {
+ # "folder_path": {
+ # "type": "string",
+ # "description": "folder path to view. Default is the root folder.",
+ # },
+ # },
+ # },
+ # },
+ {
+ "name": "run_python_file",
+ "description": "Run python file and return the output to the response to view. That is with 'python3 file_name_to_run'.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "file_name_to_run": {
+ "type": "string",
+ "description": "file name to run",
+ },
+ "arguments": {
+ "type": "array",
+ "description": "optional run arguments",
+ "items": {"type": "string"},
+ },
+ },
+ "required": ["file_name_to_run"],
+ },
+ },
+ {
+ "name": "pytest_files",
+ "description": "Run pytest on the input file names and print out the results to the response to view. If no file names are provided, pytest runs on all files.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "files_to_test": {
+ "type": "array",
+ "description": "file names to run pytest on",
+ "items": {"type": "string"},
+ },
+ },
+ },
+ },
+ {
+ "name": "write_files",
+ "description": "Write out multiple files and it will be combined into the existing code base. Always output the whole file. You always indent code with tabs.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "list_of_file_objects": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "file_path": {"type": "string", "description": "Path to the file"},
+ "file_contents": {"type": "string", "description": "Contents of the file"},
+ },
+ "required": ["file_path", "file_contents"],
+ },
+ }
+ },
+ "required": ["list_of_file_objects"],
+ },
+ },
+ {
+ "name": "delete_files",
+ "description": "Delete files. Specify the file names, and these files will be deleted. If you specify the file name '-1' all files in the folder will be deleted.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "files": {
+ "type": "array",
+ "description": "list of the files to delete. If you provide a file name of '-1' all files in the folder will be deleted.",
+ "items": {"type": "string"}, # assuming each file is represented as a string
+ },
+ },
+ "required": ["files"],
+ },
+ },
+ ]
+ return functions
+
+
+def process_functions_into_function_names(tools: List[dict] = []):
+ function_names = []
+ for tool in tools:
+ function_names.append(tool["function"]["name"])
+ return function_names
+
+
+def process_function_call_and_return_message(
+ tool_calls: dict, file_dict: dict, logger=None, tools=[], enable_tests=True
+):
+ function_name = ""
+ if len(tools) >= 1:
+ functions_available_keys = process_functions_into_function_names(tools)
+ else:
+ functions_available_keys = list(available_functions_factory().keys())
+ functions_available = available_functions_factory()
+ return_messages = []
+ for tool_call in tool_calls:
+ try:
+ function_name = tool_call["function"]["name"]
+ if function_name not in functions_available_keys:
+ json_fun_content = json.dumps(
+ {
+ "status": "error",
+ "message": f"Function `{function_name}` not found. Please only use the functions listed given, which are: {functions_available_keys}",
+ }
+ )
+ function_return_message = {
+ "tool_call_id": tool_call["id"],
+ "role": "tool",
+ "name": function_name,
+ "content": json_fun_content,
+ }
+ return_messages.append(function_return_message)
+ continue
+ fuction_to_call = functions_available[function_name]
+ function_args = json.loads(tool_call["function"]["arguments"])
+ function_args["file_dict"] = file_dict
+ function_args["enable_tests"] = enable_tests
+ if (function_name == "write_files") or (function_name == "delete_files"):
+ function_response, file_dict = fuction_to_call(**function_args)
+ else:
+ function_response = fuction_to_call(**function_args)
+ function_return_message = {
+ "tool_call_id": tool_call["id"],
+ "role": "tool",
+ "name": function_name,
+ "content": function_response,
+ }
+ return_messages.append(function_return_message)
+ except KeyError as e:
+ if logger:
+ logger.error("Error in process_function_call_and_return_message()")
+ logger.error(e)
+ logger.error(traceback.format_exc())
+ logger.error(f"process_function_call_and_return_message({tool_call},{file_dict})")
+ error_message = "".join(traceback.format_exception_only(type(e), e))
+ json_fun_content = json.dumps({"status": "error", "message": f"Error Decoding Message: {error_message}"})
+ function_return_message = {
+ "tool_call_id": tool_call["id"],
+ "role": "tool",
+ "name": function_name,
+ "content": json_fun_content,
+ }
+ return_messages.append(function_return_message)
+ except json.decoder.JSONDecodeError as e:
+ if logger:
+ logger.error("Error in process_function_call_and_return_message()")
+ logger.error(e)
+ logger.error(traceback.format_exc())
+ logger.error(f"process_function_call_and_return_message({tool_call},{file_dict})")
+ error_message = "".join(traceback.format_exception_only(type(e), e))
+ error_content = f"Response was too long and was cut off. Please give a shorter response! As the response was cut off early, there was a error decoding the response: {error_message}"
+ json_fun_content = json.dumps({"status": "error", "message": error_content})
+ function_return_message = {
+ "tool_call_id": tool_call["id"],
+ "role": "tool",
+ "name": function_name,
+ "content": json_fun_content,
+ }
+ return_messages.append(function_return_message)
+ except Exception as e:
+ if logger:
+ logger.error("Error in process_function_call_and_return_message()")
+ logger.error(e)
+ logger.error(traceback.format_exc())
+ logger.error(f"process_function_call_and_return_message({tool_call},{file_dict})")
+ error_message = "".join(traceback.format_exception_only(type(e), e))
+ json_fun_content = json.dumps({"status": "error", "message": f"Error running function: {error_message}"})
+ function_return_message = {
+ "tool_call_id": tool_call["id"],
+ "role": "tool",
+ "name": function_name,
+ "content": json_fun_content,
+ }
+ return_messages.append(function_return_message)
+ return return_messages, file_dict
diff --git a/l2mac/tools/read.py b/l2mac/tools/read.py
new file mode 100644
index 00000000..81c3c875
--- /dev/null
+++ b/l2mac/tools/read.py
@@ -0,0 +1,356 @@
+import json
+import os
+import unittest
+from pathlib import Path
+from typing import List
+
+
+def view_files(files: List[str] = None, file_dict: dict = {}, enable_tests=True):
+ files_not_found = []
+ for f in files:
+ if f not in file_dict:
+ files_not_found.append(f)
+ if len(files_not_found) > 0:
+ return json.dumps(
+ {
+ "status": "error",
+ "message": f"Files {files_not_found} not found. Please try running `list_files` to see all available files.",
+ }
+ )
+ output, files_missed_out = format_files_into_prompt_format(file_dict=file_dict, unique_file_names=files)
+ if len(files_missed_out) > 0:
+ response = {
+ "files": output,
+ "status": "error",
+ "message": f'Due to the character limit of a response, the following files were not included in the response {", ".join(files_missed_out)}. Please try viewing less files at a time.',
+ }
+ else:
+ response = {"files": output}
+ return json.dumps(response)
+
+
+def list_files(folder_path: str = "", file_dict: dict = {}, enable_tests=True):
+ return json.dumps({"files": list(file_dict.keys())})
+
+
+def format_files_into_prompt_format(file_dict, unique_file_names=None, character_limit=14_000):
+ """
+ Generate a string output based on the given dictionary of files and an optional set of unique file names.
+
+ Parameters:
+ - file_dict: Dictionary with file names as keys and list of lines as values.
+ - unique_file_names (optional): Set of unique file names to be extracted from the files dictionary.
+
+ Returns:
+ - A string representation in the specified format.
+ """
+
+ # If unique_file_names is not provided, process all file names in the dictionary
+ if unique_file_names is None:
+ unique_file_names = file_dict.keys()
+
+ files_missed_out = []
+ output = ""
+ for file_name in unique_file_names:
+ # Determine the language based on the file extension
+ file_extension = file_name[file_name.rfind(".") :]
+ lang = extension_to_language.get(file_extension, "LANG")
+
+ content = file_dict.get(file_name, [])
+ additional_output = file_name + "\n```" + lang + "\n" + "\n".join(content) + "\n```\n\n"
+ if (len(output) + len(additional_output)) < character_limit:
+ output += file_name + "\n```" + lang + "\n" + "\n".join(content) + "\n```\n\n"
+ else:
+ files_missed_out.append(file_name)
+ return output, files_missed_out
+
+
+def load_code_files_into_dict(folder_path, number_lines=False, file_extensions=None, skip_tests=False):
+ root_path = Path(folder_path)
+ code_files_dict = {}
+
+ # Consider these as typical code or text file extensions
+ # code_file_extensions = ['.py', '.c', '.cpp', '.h', '.java', '.js', '.ts', '.html', '.css', '.md', '.txt', '.xml']
+ if file_extensions is None:
+ code_file_extensions = [
+ ".py",
+ ".c",
+ ".cpp",
+ ".h",
+ ".java",
+ ".js",
+ ".ts",
+ ".html",
+ ".css",
+ ".txt",
+ ".xml",
+ ".sql",
+ ]
+ else:
+ code_file_extensions = file_extensions
+
+ for current_path, _, files in os.walk(root_path):
+ # Skip the .git folder
+ if ".git" in current_path:
+ continue
+
+ for file in files:
+ if Path(file).suffix not in code_file_extensions:
+ continue # Skip non-code files
+
+ if skip_tests and ("test" in file.lower() or "test" in current_path.lower()):
+ continue
+
+ file_path = Path(current_path) / file
+ try:
+ with open(file_path, "r", encoding="utf-8") as f:
+ lines = f.read().split("\n")
+ if number_lines:
+ numbered_lines = [f"{idx + 1}: {line}" for idx, line in enumerate(lines)]
+ else:
+ numbered_lines = lines
+ code_files_dict[str(file_path.relative_to(root_path))] = numbered_lines
+ except UnicodeDecodeError:
+ # Skip non-UTF-8 encoded files
+ continue
+ except Exception as e:
+ print(f"Error reading {file_path}: {e}")
+
+ return code_files_dict
+
+
+class TestGenerateOutput(unittest.TestCase):
+ def setUp(self):
+ self.files = {
+ "components/Sidebar/index.ts": ['import React from "react";', "..."],
+ "components/Buttons/SidebarActionButton/index.ts": ['import React from "react";', "..."],
+ "pages/api/chat.ts": ['import { NextApiRequest, NextApiResponse } from "next";', "..."],
+ }
+
+ def test_all_files_returned(self):
+ result = format_files_into_prompt_format(self.files)
+ for file_name in self.files.keys():
+ self.assertIn(file_name, result)
+
+ def test_specific_files_returned(self):
+ unique_files = {"pages/api/chat.ts"}
+ result = format_files_into_prompt_format(self.files, unique_files)
+ self.assertIn("pages/api/chat.ts", result)
+ self.assertNotIn("components/Sidebar/index.ts", result)
+ self.assertNotIn("components/Buttons/SidebarActionButton/index.ts", result)
+
+
+extension_to_language = {
+ ".feature": "Cucumber",
+ ".abap": "abap",
+ ".adb": "ada",
+ ".ads": "ada",
+ ".ada": "ada",
+ ".ahk": "ahk",
+ ".ahkl": "ahk",
+ ".as": "as",
+ ".as3": "as3",
+ ".asy": "asy",
+ ".sh": "bash",
+ ".ksh": "bash",
+ ".bash": "bash",
+ ".ebuild": "bash",
+ ".eclass": "bash",
+ ".bat": "bat",
+ ".cmd": "bat",
+ ".befunge": "befunge",
+ ".bmx": "blitzmax",
+ ".boo": "boo",
+ ".bf": "brainfuck",
+ ".b": "brainfuck",
+ ".c": "c",
+ ".h": "c",
+ ".cfm": "cfm",
+ ".cfml": "cfm",
+ ".cfc": "cfm",
+ ".tmpl": "cheetah",
+ ".spt": "cheetah",
+ ".cl": "cl",
+ ".lisp": "cl",
+ ".el": "cl",
+ ".clj": "clojure",
+ ".cljs": "clojure",
+ ".cmake": "cmake",
+ "CMakeLists.txt": "cmake",
+ ".coffee": "coffeescript",
+ ".sh-session": "console",
+ "control": "control",
+ ".cpp": "cpp",
+ ".hpp": "cpp",
+ ".c++": "cpp",
+ ".h++": "cpp",
+ ".cc": "cpp",
+ ".hh": "cpp",
+ ".cxx": "cpp",
+ ".hxx": "cpp",
+ ".pde": "cpp",
+ ".cs": "csharp",
+ ".css": "css",
+ ".pyx": "cython",
+ ".pxd": "cython",
+ ".pxi": "cython",
+ ".d": "d",
+ ".di": "d",
+ ".pas": "delphi",
+ ".diff": "diff",
+ ".patch": "diff",
+ ".dpatch": "dpatch",
+ ".darcspatch": "dpatch",
+ ".duel": "duel",
+ ".jbst": "duel",
+ ".dylan": "dylan",
+ ".dyl": "dylan",
+ ".erb": "erb",
+ ".erl-sh": "erl",
+ ".erl": "erlang",
+ ".hrl": "erlang",
+ ".evoque": "evoque",
+ ".factor": "factor",
+ ".flx": "felix",
+ ".flxh": "felix",
+ ".f": "fortran",
+ ".f90": "fortran",
+ ".s": "gas",
+ ".kid": "genshi",
+ ".vert": "glsl",
+ ".frag": "glsl",
+ ".geo": "glsl",
+ ".plot": "gnuplot",
+ ".plt": "gnuplot",
+ ".go": "go",
+ ".haml": "haml",
+ ".hs": "haskell",
+ ".html": "html",
+ ".htm": "html",
+ ".xhtml": "html",
+ ".xslt": "html",
+ ".hx": "hx",
+ ".hy": "hybris",
+ ".hyb": "hybris",
+ ".ini": "ini",
+ ".cfg": "ini",
+ ".io": "io",
+ ".ik": "ioke",
+ ".weechatlog": "irc",
+ ".jade": "jade",
+ ".java": "java",
+ ".js": "js",
+ ".jsp": "jsp",
+ ".lhs": "lhs",
+ ".ll": "llvm",
+ ".lgt": "logtalk",
+ ".lua": "lua",
+ ".wlua": "lua",
+ ".mak": "make",
+ "Makefile": "make",
+ "makefile": "make",
+ ".mao": "mako",
+ ".maql": "maql",
+ ".mhtml": "mason",
+ ".mc": "mason",
+ ".mi": "mason",
+ "autohandler": "mason",
+ "dhandler": "mason",
+ ".md": "markdown",
+ ".mo": "modelica",
+ ".def": "modula2",
+ ".mod": "modula2",
+ ".moo": "moocode",
+ ".mu": "mupad",
+ ".mxml": "mxml",
+ ".myt": "myghty",
+ "autodelegate": "myghty",
+ ".asm": "nasm",
+ ".ASM": "nasm",
+ ".ns2": "newspeak",
+ ".objdump": "objdump",
+ ".m": "objectivec",
+ ".j": "objectivej",
+ ".ml": "ocaml",
+ ".mli": "ocaml",
+ ".mll": "ocaml",
+ ".mly": "ocaml",
+ ".ooc": "ooc",
+ ".pl": "perl",
+ ".pm": "perl",
+ ".php": "php",
+ ".ps": "postscript",
+ ".eps": "postscript",
+ ".pot": "pot",
+ ".po": "pot",
+ ".pov": "pov",
+ ".inc": "pov",
+ ".prolog": "prolog",
+ ".pro": "prolog",
+ ".properties": "properties",
+ ".proto": "protobuf",
+ ".py3tb": "py3tb",
+ ".pytb": "pytb",
+ ".py": "python",
+ ".pyw": "python",
+ ".sc": "python",
+ "SConstruct": "python",
+ "SConscript": "python",
+ ".tac": "python",
+ ".R": "r",
+ ".rb": "rb",
+ ".rbw": "rb",
+ "Rakefile": "rb",
+ ".rake": "rb",
+ ".gemspec": "rb",
+ ".rbx": "rb",
+ ".duby": "rb",
+ ".Rout": "rconsole",
+ ".r": "rebol",
+ ".r3": "rebol",
+ ".cw": "redcode",
+ ".rhtml": "rhtml",
+ ".rst": "rst",
+ ".rest": "rst",
+ ".sass": "sass",
+ ".scala": "scala",
+ ".scaml": "scaml",
+ ".scm": "scheme",
+ ".scss": "scss",
+ ".st": "smalltalk",
+ ".tpl": "smarty",
+ "sources.list": "sourceslist",
+ ".S": "splus",
+ ".sql": "sql",
+ ".sqlite3-console": "sqlite3",
+ "squid.conf": "squidconf",
+ ".ssp": "ssp",
+ ".tcl": "tcl",
+ ".tcsh": "tcsh",
+ ".csh": "tcsh",
+ ".tex": "tex",
+ ".aux": "tex",
+ ".toc": "tex",
+ ".txt": "text",
+ ".v": "v",
+ ".sv": "v",
+ ".vala": "vala",
+ ".vapi": "vala",
+ ".vb": "vbnet",
+ ".bas": "vbnet",
+ ".vm": "velocity",
+ ".fhtml": "velocity",
+ ".vim": "vim",
+ ".vimrc": "vim",
+ ".xml": "xml",
+ ".xsl": "xml",
+ ".rss": "xml",
+ ".xsd": "xml",
+ ".wsdl": "xml",
+ ".xqy": "xquery",
+ ".xquery": "xquery",
+ ".yaml": "yaml",
+ ".yml": "yaml",
+ ".ts": "typescript",
+ ".tsx": "typescript",
+}
diff --git a/l2mac/tools/utils.py b/l2mac/tools/utils.py
new file mode 100644
index 00000000..5234a4cf
--- /dev/null
+++ b/l2mac/tools/utils.py
@@ -0,0 +1,52 @@
+import os
+import re
+import shutil
+
+import numpy as np
+
+
+def write_files_from_dict(file_dict, base_dir="output"):
+ """
+ Writes files to a folder based on the given dictionary.
+
+ :param file_dict: Dictionary with filenames as keys and lists of file content as values.
+ :param base_dir: Base directory where files should be saved.
+ """
+
+ # If base directory exists, remove it
+ if os.path.exists(base_dir):
+ shutil.rmtree(base_dir)
+
+ # Ensure base directory is created again
+ os.makedirs(base_dir)
+
+ # Iterate through the file dictionary
+ for file_path, lines in file_dict.items():
+ # Construct the full path for the file
+ full_path = os.path.join(base_dir, file_path)
+
+ # Create the directories if they don't exist
+ os.makedirs(os.path.dirname(full_path), exist_ok=True)
+
+ lines = fix_line_spacings(remove_line_numbers(lines))
+ # Write content to the file
+ with open(full_path, "w") as f:
+ for line in lines:
+ f.write(line + "\n")
+
+
+def fix_line_spacings(strings):
+ # Regular expression pattern to match 'number: ' at the beginning of each string
+ lines = []
+ for line in strings:
+ leading_spaces = len(line) - len(line.lstrip(" "))
+ leading_spaces = int(np.ceil(leading_spaces / 2) * 2)
+ line = " " * leading_spaces + line.lstrip(" ")
+ lines.append(line)
+ return lines
+
+
+def remove_line_numbers(strings):
+ # Regular expression pattern to match 'number: ' at the beginning of each string
+ pattern = r"^\d+:\s"
+ return [re.sub(pattern, "", s) for s in strings]
diff --git a/l2mac/tools/write.py b/l2mac/tools/write.py
new file mode 100644
index 00000000..106640b1
--- /dev/null
+++ b/l2mac/tools/write.py
@@ -0,0 +1,182 @@
+import json
+from copy import deepcopy
+from typing import List
+
+from l2mac.tools.code_analysis import (
+ check_pytest_with_timeout,
+ check_syntax_with_timeout,
+ count_errors_in_syntax,
+)
+
+
+def write_files(list_of_file_objects: List[dict] = [], file_dict: dict = {}, enable_tests=True):
+ new_file_dict = implement_git_diff_on_file_dict(
+ file_dict_input=file_dict, change_files_and_contents_input=list_of_file_objects
+ )
+ file_dict = new_file_dict
+ # Run tests
+ # Syntax check
+ if not enable_tests:
+ output = {"write_files_status": "success", "message": "write_files completed successfully."}
+ return json.dumps(output), file_dict
+ syntax_results = check_syntax_with_timeout(file_dict)
+ if "Manual tests passed" in syntax_results:
+ syntax_error_count = 0
+ else:
+ syntax_error_count = count_errors_in_syntax(syntax_results)
+ test_results = check_pytest_with_timeout(file_dict)
+ if "Manual tests passed" in test_results:
+ test_results = "All tests passed"
+ elif "No tests found" in test_results:
+ test_results = "All tests passed"
+ if "All tests passed" in test_results and syntax_error_count == 0:
+ output = {"write_files_status": "success", "message": "All tests passed."}
+ else:
+ new_output = test_results.strip() + "\n" + syntax_results.strip()
+ # if len(new_output) > 5000:
+ # new_output = new_output[:5000]
+ # new_output = new_output + '\nRest of output was trimmed.'
+ output = {
+ "write_files_status": "success",
+ "message": "write_files completed successfully. Test run results: \n"
+ + new_output
+ + "\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.",
+ }
+ return json.dumps(output), file_dict
+
+
+def implement_git_diff_on_file_dict(file_dict_input: dict, change_files_and_contents_input: []) -> dict:
+ """Implement git diff on file_dict, and return the new file_dict.
+
+ Args: file_dict: dict, change_files_and_contents: []
+
+ Returns: dict
+
+ Description: Adheres to this definition: When writing any code you will always give it in diff format, with line numbers. For example. Adding two new lines to a new file is "+ 1: import time\n+ 2: import os". Editing an existing line is "- 5: apple = 2 + 2\n+ 5: apple = 2 + 3". Deleting a line is "- 5: apple = 2 + 2".
+ """
+ file_dict = deepcopy(file_dict_input)
+ change_files_and_contents = deepcopy(change_files_and_contents_input)
+ for obj in change_files_and_contents:
+ file_path = obj["file_path"]
+ change_file_contents = obj["file_contents"]
+ if file_path in file_dict:
+ existing_file_contents = file_dict[file_path]
+ else:
+ existing_file_contents = []
+ file_ending = file_path.split(".")[1]
+ # new_file_contents = implement_git_diff_on_file_contents(existing_file_contents, change_file_contents, file_type=file_ending, overwrite=obj['overwrite_file'])
+ new_file_contents = update_file_contents(existing_file_contents, change_file_contents, file_type=file_ending)
+ file_dict[file_path] = new_file_contents
+ return file_dict
+
+
+def update_file_contents(existing_file_contents, change_file_contents, file_type="py") -> [str]:
+ """Implement git diff on file_contents, and return the new file_contents.
+
+ Args: existing_file_contents: [str], change_file_contents: [str]
+
+ Returns: [str]
+
+ Description: Adheres to this definition: When writing any code you will always give it in diff format, with line numbers. For example. Adding two new lines to a new file is "+ 1: import time\n+ 2: import os". Editing an existing line is "- 5: apple = 2 + 2\n+ 5: apple = 2 + 3". Deleting a line is "- 5: apple = 2 + 2".
+ """
+ existing_file_contents = change_file_contents
+ return existing_file_contents.split("\n")
+
+
+def delete_files(files: List[str], file_dict: dict, enable_tests=True):
+ for file in files:
+ if file == "-1":
+ file_dict = {}
+ if file in file_dict:
+ del file_dict[file]
+ output = {"status": "success", "message": "delete_files completed successfully."}
+ return json.dumps(output), file_dict
+
+
+# Write unit tests for the functions above
+def test_implement_git_diff_on_file_dict():
+ file_dict = {}
+ change_files_and_contents = [
+ {"file_path": "requirements.txt", "file_contents": "pytest==7.1.2\nflask==2.1.2\n"},
+ "file_path:",
+ "blackjack/card.py",
+ "file_contents:",
+ "\"\"\"Defines the Card class for the Blackjack game.\"\"\"\n\n\nclass Card:\n\t\"\"\"Represents a single playing card.\"\"\"\n\n\tdef __init__(self, suit, value):\n\t\t\"\"\"Initialize a new card.\n\n\t\tArgs:\n\t\t\tsuit (str): The suit of the card (e.g., 'Hearts', 'Diamonds', 'Clubs', 'Spades').\n\t\t\tvalue (str): The value of the card (e.g., '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace').\n\t\t\"\"\"\n\t\tself.suit = suit\n\t\tself.value = value\n\n\tdef __repr__(self):\n\t\treturn f\"{self.value} of {self.suit}\"",
+ "file_path:",
+ "blackjack/deck.py",
+ "file_contents:",
+ "\"\"\"Defines the Deck class for the Blackjack game.\"\"\"\n\nfrom .card import Card\nimport random\n\nclass Deck:\n\t\"\"\"Represents a deck of playing cards.\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Initialize a new deck of cards.\"\"\"\n\t\tself.cards = [Card(suit, value) for suit in ['Hearts', 'Diamonds', 'Clubs', 'Spades'] for value in ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace']]\n\t\trandom.shuffle(self.cards)\n\n\tdef draw_card(self):\n\t\t\"\"\"Draw a card from the deck.\"\"\"\n\t\treturn self.cards.pop()",
+ "file_path:",
+ "blackjack/hand.py",
+ "file_contents:",
+ '"""Defines the Hand class for the Blackjack game."""\n\n\nclass Hand:\n\t"""Represents a player\'s or dealer\'s hand in the game."""\n\n\tdef __init__(self):\n\t\t"""Initialize a new hand with no cards."""\n\t\tself.cards = []\n\t\tself.value = 0\n\t\tself.aces = 0\n\n\tdef add_card(self, card):\n\t\t"""Add a card to the hand and adjust the score."""\n\t\tself.cards.append(card)\n\t\tif card.value == \'Ace\':\n\t\t\tself.aces += 1\n\t\tself.adjust_for_ace()\n\n\tdef adjust_for_ace(self):\n\t\t"""Adjust the hand\'s value if an ace is present."""\n\t\twhile self.value > 21 and self.aces:\n\t\t\tself.value -= 10\n\t\t\tself.aces -= 1',
+ "file_path:",
+ "blackjack/game.py",
+ "file_contents:",
+ '"""Defines the Game class for the Blackjack game."""\n\nfrom .deck import Deck\nfrom .hand import Hand\n\n\nclass Game:\n\t"""Manages the flow and rules of a Blackjack game."""\n\n\tdef __init__(self):\n\t\t"""Initialize the game with a deck and two hands."""\n\t\tself.deck = Deck()\n\t\tself.player_hand = Hand()\n\t\tself.dealer_hand = Hand()\n\n\tdef deal_cards(self):\n\t\t"""Deal two cards to both player and dealer."""\n\t\tfor _ in range(2):\n\t\t\tself.player_hand.add_card(self.deck.draw_card())\n\t\t\tself.dealer_hand.add_card(self.deck.draw_card())\n\n\tdef player_hit(self):\n\t\t"""Handle player hitting."""\n\t\tself.player_hand.add_card(self.deck.draw_card())\n\t\tif self.player_hand.value > 21:\n\t\t\treturn \'Bust\'\n\t\telse:\n\t\t\treturn \'Continue\'\n\n\tdef player_stand(self):\n\t\t"""Handle player standing."""\n\t\t# Game logic for when player stands\n\n\tdef compare_hands(self):\n\t\t"""Compare the player\'s and dealer\'s hands to determine the winner."""\n\t\t# Logic to determine the winner',
+ "file_path:",
+ "blackjack/main.py",
+ "file_contents:",
+ '"""Main script for the CLI Blackjack game."""\n\nfrom .game import Game\n\n\ndef main():\n\t"""Set up and run the Blackjack game."""\n\tgame = Game()\n\t# Game setup and loop logic here\n\nif __name__ == \'__main__\':\n\tmain()',
+ "file_path:",
+ "tests/test_card.py",
+ "file_contents:",
+ '"""Unit tests for the Card class in the Blackjack game."""\n\nimport pytest\nfrom blackjack.card import Card\n\n\ndef test_card_initialization():\n\t"""Test that a card is initialized with the correct suit and value."""\n\tcard = Card(\'Hearts\', \'2\')\n\tassert card.suit == \'Hearts\'\n\tassert card.value == \'2\'',
+ "file_path:",
+ "tests/test_deck.py",
+ "file_contents:",
+ '"""Unit tests for the Deck class in the Blackjack game."""\n\nimport pytest\nfrom blackjack.deck import Deck\n\n\ndef test_deck_initialization():\n\t"""Test that a deck is correctly initialized with 52 cards."""\n\tdeck = Deck()\n\tassert len(deck.cards) == 52\n\n\ndef test_draw_card():\n\t"""Test that drawing a card from the deck reduces its size by one."""\n\tdeck = Deck()\n\tdeck.draw_card()\n\tassert len(deck.cards) == 51',
+ "file_path:",
+ "tests/test_hand.py",
+ "file_contents:",
+ '"""Unit tests for the Hand class in the Blackjack game."""\n\nimport pytest\nfrom blackjack.hand import Hand\nfrom blackjack.card import Card\n\n\ndef test_hand_initialization():\n\t"""Test that a hand is correctly initialized with no cards."""\n\thand = Hand()\n\tassert len(hand.cards) == 0\n\n\ndef test_add_card():\n\t"""Test that adding a card to the hand works correctly."""\n\thand = Hand()\n\thand.add_card(Card(\'Hearts\', \'2\'))\n\tassert len(hand.cards) == 1\n\tassert hand.cards[0].value == \'2\'',
+ "file_path:",
+ "tests/test_game.py",
+ "file_contents:",
+ '"""Unit tests for the Game class in the Blackjack game."""\n\nimport pytest\nfrom blackjack.game import Game\n\n\ndef test_game_initialization():\n\t"""Test that the game is initialized with a deck and two hands."""\n\tgame = Game()\n\tassert game.deck\n\tassert game.player_hand\n\tassert game.dealer_hand\n\n\ndef test_deal_cards():\n\t"""Test that dealing cards works correctly."""\n\tgame = Game()\n\tgame.deal_cards()\n\tassert len(game.player_hand.cards) == 2\n\tassert len(game.dealer_hand.cards) == 2',
+ "file_path:",
+ "tests/conftest.py",
+ "file_contents:",
+ '"""Configuration file for pytest in the Blackjack game project."""\n\nimport pytest\n\n# Configuration and fixtures for pytest can be added here.',
+ ]
+ new_file_dict = implement_git_diff_on_file_dict(file_dict, change_files_and_contents)
+ assert new_file_dict == {"test.py": ["import time", "import os"]}
+ print("All tests passed.")
+
+
+def test_write_files():
+ # Test write_files
+ files_and_contents = [{"file_path": "test.py", "file_contents": "+ 1: import time\n+ 2: import os"}]
+ file_dict = {}
+ output, file_dict = write_files(files_and_contents, file_dict)
+ assert output == '{"write_files_status": "success", "message": "All tests passed."}'
+ assert file_dict == {"test.py": ["import time", "import os"]}
+ # Test implement_git_diff_on_file_dict
+ file_dict = {}
+ change_files_and_contents = [{"file_path": "test.py", "file_contents": "+ 1: import time\n+ 2: import os"}]
+ file_dict = implement_git_diff_on_file_dict(file_dict, change_files_and_contents)
+ assert file_dict == {"test.py": ["import time", "import os"]}
+ # Test update_file_contents
+ existing_file_contents = "import time\nimport os"
+ change_file_contents = "+ 1: import time\n+ 2: import os"
+ new_file_contents = update_file_contents(existing_file_contents, change_file_contents)
+ assert new_file_contents == ["import time", "import os"]
+ # Test delete_files
+ files = ["test.py"]
+ file_dict = {"test.py": ["import time", "import os"]}
+ output, file_dict = delete_files(files, file_dict)
+ assert output == '{"status": "success", "message": "delete_files completed successfully."}'
+ assert file_dict == {}
+ # Test delete_files with -1
+ files = ["-1"]
+ file_dict = {"test.py": ["import time", "import os"]}
+ output, file_dict = delete_files(files, file_dict)
+ assert output == '{"status": "success", "message": "delete_files completed successfully."}'
+ assert file_dict == {}
+ # Test delete_files with file not in file_dict
+ files = ["test.py"]
+ file_dict = {}
+ output, file_dict = delete_files(files, file_dict)
+ assert output == '{"status": "success", "message": "delete_files completed successfully."}'
+ assert file_dict == {}
+ print("All tests passed.")
diff --git a/l2mac/utils/__init__.py b/l2mac/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/l2mac/utils/l2mac.py b/l2mac/utils/l2mac.py
new file mode 100644
index 00000000..5bb222ee
--- /dev/null
+++ b/l2mac/utils/l2mac.py
@@ -0,0 +1,420 @@
+import hashlib
+import json
+import re
+import secrets
+import unittest
+from typing import List
+
+
+def hash_messages(input_list: List[str] = []) -> str:
+ """
+ Returns a SHA-256 hash of the input string. If an error occurs, returns a random hash.
+
+ Args:
+ input_string (str): String to be hashed.
+
+ Returns:
+ str: SHA-256 hash of the input string or a random hash in case of an error.
+ """
+ if len(input_list) == 0:
+ random_data = secrets.token_bytes(64) # Get 64 random bytes
+ return hashlib.sha256(random_data).hexdigest()
+ try:
+ if len(input_list) >= 2:
+ input_string = json.dumps(input_list[-2:])
+ else:
+ input_string = json.dumps(input_list)
+ sha256 = hashlib.sha256()
+ sha256.update(input_string.encode("utf-8"))
+ return sha256.hexdigest()
+ except Exception:
+ # In case of any error, generate a random hash
+ random_data = secrets.token_bytes(64) # Get 64 random bytes
+ return hashlib.sha256(random_data).hexdigest()
+
+
+def detect_cycles(lst, cycle_lengths=[2, 3, 4]):
+ """
+ Detect if the list has repeating cycles of specified lengths.
+
+ Args:
+ lst (list): The list to be checked.
+ cycle_lengths (list): Lengths of cycles to be detected.
+
+ Returns:
+ bool: True if a cycle of specified lengths is found, False otherwise.
+ """
+ for cycle_length in cycle_lengths:
+ if len(lst) >= 2 * cycle_length:
+ # Extract the last 2 segments of the list for comparison
+ last_segment = lst[-cycle_length:]
+ comparison_segment = lst[-2 * cycle_length : -cycle_length]
+
+ # Check if these segments are identical
+ if last_segment == comparison_segment:
+ return True
+
+ return False
+
+
+def clean_string(input_string):
+ """
+ Clean the input string by removing temporary directory paths using regex.
+
+ Args:
+ input_string (str): The string to be cleaned.
+
+ Returns:
+ str: The cleaned string.
+ """
+ # Regex pattern to match temporary directory paths
+ temp_dir_pattern = r"/tmp/[^/\\]+"
+
+ # Replace temporary directory paths with an empty string
+ cleaned_string = re.sub(temp_dir_pattern, "", input_string)
+
+ return cleaned_string
+
+
+# =========================================================================
+# Unit tests
+
+
+# Unit tests for the function
+class TestDetectCycles(unittest.TestCase):
+ def test_detect_cycles(self):
+ self.assertTrue(detect_cycles(["a", "b", "c", "d", "c", "d"]))
+ self.assertTrue(detect_cycles(["a", "b", "c", "d", "e", "c", "d", "e"]))
+ self.assertFalse(detect_cycles(["a", "b", "c", "d", "e", "c", "d", "e", "f"]))
+ self.assertFalse(detect_cycles(["x", "y", "z"]))
+ self.assertTrue(detect_cycles(["1", "2", "1", "2", "1", "2"]))
+ self.assertFalse(detect_cycles(["1", "2", "3", "4", "5"]))
+
+
+class TestHashMessages(unittest.TestCase):
+ def test_hashmessages_two_messages_match(self):
+ messages_latest = [
+ {
+ "role": "system",
+ "content": "\nObjective: Write code for a large system design task.\nPlease note that the code should be fully functional. No placeholders.\nOnly use the functions you have been provided with.\nOnly use the `write_files` to output code.\n\nYou must act autonomously and you will receive no human input at any stage. You have to return as output the complete code for completing this task, and correctly incorporate it into the existing code base.\nYou always write out the whole file contents. You always indent code with tabs.\nPlease always view the files before writing to them, to make sure you are writing to the correct files.\nWhen writing a test, make the filename start with the prefix 'test_'.\n\nProvide the full exhaustive code to achieve the task conditioned on the existing generated code---including changing the existing generated code.\n\nYou cannot visualize any graphical output. You exist within a Actor Model machine, and when you list out steps, each step will be taken by a new separate sub-ChatGPT model. When you list out a sub-task steps, you can optionally specify the sub-task validation to check that it has been completed successfully.\n\nYou cannot use any databases as none are setup in the local environment, instead mock a database with an in memory dictionary to store data. No data saved to disk will persist between steps or write operations.\n \nIf a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\n\nUse the functions provided. When calling functions only provide a RFC8259 compliant JSON request following this format without deviation.\n",
+ },
+ {
+ "role": "user",
+ "content": "\nObjective: Execute sub task step: Create a method named 'set_language_preference' in the 'User' class to allow users to set their language preference. This method should take input parameters such as 'user_id', 'language_code', etc. and update the 'preferences' attribute of the user in the database..\n\n Note: Condition any new code files on the existing code files: ['course.py', 'user.py', 'language.py']. Fully implement these features in the code, no placeholders. You can now optionally view the existing files if you need to view them to complete the current task step. You have a limited context window so be selective about which files you view, only view the files you think you might need to view.\n\nSummary output of previous step: \"\"The sub task step of creating a method named 'translate_content' in the 'Language' class to translate content from one language to another has been completed.\"\"\n\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {"name": "python", "arguments": "functions.sub_task_step_complete()"},
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "python",
+ "content": "{\"status\": \"error\", \"message\": \"Function `python` not found. Please only use the functions listed given, which are: ['sub_task_step_complete', 'view_files', 'run_python_file', 'pytest_files', 'write_files', 'delete_files']\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\nCreate a method named 'set_language_preference' in the 'User' class to allow users to set their language preference. This method should take input parameters such as 'user_id', 'language_code', etc. and update the 'preferences' attribute of the user in the database.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['course.py', 'user.py', 'language.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {"name": "view_files", "arguments": '{\n "files": ["user.py"]\n}'},
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "view_files",
+ "content": "{\"files\": \"user.py\\n```python\\nclass User:\\n def __init__(self, username, email, password, courses_enrolled=None, progress=None, preferences=None):\\n self.username = username\\n self.email = email\\n self.password = password\\n self.courses_enrolled = courses_enrolled if courses_enrolled is not None else []\\n self.progress = progress if progress is not None else {}\\n self.preferences = preferences if preferences is not None else {}\\n\\n def register_user(self, username, email, password):\\n new_user = User(username, email, password)\\n # Store the user details in the database\\n # Add code here to store the user details in the database\\n\\n def update_user_profile(self, user_id, username=None, email=None, password=None):\\n # Find the user with the given user_id in the database\\n # Update the corresponding user details with the provided parameters\\n # Add code here to update the user profile in the database\\n pass\\n\\n def enroll_course(self, user_id, course_id):\\n # Find the user with the given user_id in the database\\n # Find the course with the given course_id in the database\\n # Add the course_id to the user's courses_enrolled attribute\\n # Add code here to update the user's courses_enrolled attribute in the database\\n pass\\n\\n def submit_assignment(self, user_id, course_id, assignment_id, submission):\\n # Find the user with the given user_id in the database\\n # Find the course with the given course_id in the database\\n # Find the assignment with the given assignment_id in the database\\n # Update the user's progress for the assignment with the provided submission\\n # Add code here to update the user's progress attribute in the database\\n pass\\n\\n```\\n\\n\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\nCreate a method named 'set_language_preference' in the 'User' class to allow users to set their language preference. This method should take input parameters such as 'user_id', 'language_code', etc. and update the 'preferences' attribute of the user in the database.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['course.py', 'user.py', 'language.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {"name": "view_files", "arguments": '{\n "files": ["user.py"]\n}'},
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "view_files",
+ "content": "{\"files\": \"user.py\\n```python\\nclass User:\\n def __init__(self, username, email, password, courses_enrolled=None, progress=None, preferences=None):\\n self.username = username\\n self.email = email\\n self.password = password\\n self.courses_enrolled = courses_enrolled if courses_enrolled is not None else []\\n self.progress = progress if progress is not None else {}\\n self.preferences = preferences if preferences is not None else {}\\n\\n def register_user(self, username, email, password):\\n new_user = User(username, email, password)\\n # Store the user details in the database\\n # Add code here to store the user details in the database\\n\\n def update_user_profile(self, user_id, username=None, email=None, password=None):\\n # Find the user with the given user_id in the database\\n # Update the corresponding user details with the provided parameters\\n # Add code here to update the user profile in the database\\n pass\\n\\n def enroll_course(self, user_id, course_id):\\n # Find the user with the given user_id in the database\\n # Find the course with the given course_id in the database\\n # Add the course_id to the user's courses_enrolled attribute\\n # Add code here to update the user's courses_enrolled attribute in the database\\n pass\\n\\n def submit_assignment(self, user_id, course_id, assignment_id, submission):\\n # Find the user with the given user_id in the database\\n # Find the course with the given course_id in the database\\n # Find the assignment with the given assignment_id in the database\\n # Update the user's progress for the assignment with the provided submission\\n # Add code here to update the user's progress attribute in the database\\n pass\\n\\n```\\n\\n\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\nCreate a method named 'set_language_preference' in the 'User' class to allow users to set their language preference. This method should take input parameters such as 'user_id', 'language_code', etc. and update the 'preferences' attribute of the user in the database.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['course.py', 'user.py', 'language.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ ]
+ tmp_messages = [clean_string(str(msg)) for msg in messages_latest]
+ self.assertEqual(detect_cycles(tmp_messages), True)
+ hash1 = hash_messages(messages_latest)
+ hash2 = hash_messages(messages_latest)
+ self.assertEqual(hash1, hash2)
+ print("")
+
+ def test_hashmessages_three_cycles_match(self):
+ messages_latest = [
+ {
+ "role": "system",
+ "content": "\nObjective: Write code for a large system design task.\nPlease note that the code should be fully functional. No placeholders.\nOnly use the functions you have been provided with.\nOnly use the `write_files` to output code.\n\nYou must act autonomously and you will receive no human input at any stage. You have to return as output the complete code for completing this task, and correctly incorporate it into the existing code base.\nYou always write out the whole file contents. You always indent code with tabs.\nPlease always view the files before writing to them, to make sure you are writing to the correct files.\nWhen writing a test, make the filename start with the prefix 'test_'.\n\nProvide the full exhaustive code to achieve the task conditioned on the existing generated code---including changing the existing generated code.\n\nYou cannot visualize any graphical output. You exist within a Actor Model machine, and when you list out steps, each step will be taken by a new separate sub-ChatGPT model. When you list out a sub-task steps, you can optionally specify the sub-task validation to check that it has been completed successfully.\n\nYou cannot use any databases as none are setup in the local environment, instead mock a database with an in memory dictionary to store data. No data saved to disk will persist between steps or write operations.\n \nIf a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\n\nUse the functions provided. When calling functions only provide a RFC8259 compliant JSON request following this format without deviation.\n",
+ },
+ {
+ "role": "user",
+ "content": "\nObjective: Execute sub task step: 16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly. \n\n Note: Condition any new code files on the existing code files: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. You can now optionally view the existing files if you need to view them to complete the current task step. You have a limited context window so be selective about which files you view, only view the files you think you might need to view. \n\n The TranslationService class has been implemented and tests have been written, but the tests are failing. The issue seems to be with the translation function not correctly translating the words.\n\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {
+ "name": "view_files",
+ "arguments": '{\n "files": ["translation_service.py", "tests/test_translation_service.py"]\n}',
+ },
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "view_files",
+ "content": "{\"files\": \"translation_service.py\\n```python\\nclass TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\ttranslated_words.append(self.dictionary[to_lang][self.dictionary[from_lang][word]])\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\\n```\\n\\ntests/test_translation_service.py\\n```python\\nimport pytest\\nfrom translation_service import TranslationService\\n\\ndef test_translation_service():\\n\\tts = TranslationService()\\n\\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\n\\tassert ts.translate('hola mundo', 'es', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('bonjour monde', 'fr', 'de') == 'hallo welt'\\n\\tassert ts.translate('hallo welt', 'de', 'en') == 'hello world'\\n\\tassert ts.translate('hello world', 'en', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('hello world', 'en', 'de') == 'hallo welt'\\n\\tassert ts.translate('hola mundo', 'es', 'en') == 'hello world'\\n\\tassert ts.translate('bonjour monde', 'fr', 'en') == 'hello world'\\n\\tassert ts.translate('hallo welt', 'de', 'en') == 'hello world'\\n\\tassert ts.translate('hola mundo', 'es', 'de') == 'hallo welt'\\n\\tassert ts.translate('bonjour monde', 'fr', 'de') == 'hallo welt'\\n\\tassert ts.translate('hallo welt', 'de', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('hola mundo', 'es', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('bonjour monde', 'fr', 'es') == 'hola mundo'\\n\\tassert ts.translate('hallo welt', 'de', 'es') == 'hola mundo'\\n\\tassert ts.translate('hola mundo', 'es', 'en') == 'hello world'\\n\\tassert ts.translate('hello world', 'en', 'en') == 'hello world'\\n\\tassert ts.translate('hola mundo', 'es', 'es') == 'hola mundo'\\n\\tassert ts.translate('bonjour monde', 'fr', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('hallo welt', 'de', 'de') == 'hallo welt'\\n\\tassert ts.translate('hello world', 'en', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('hola mundo', 'es', 'fr') == 'bonjour monde'\\n\\tassert ts.translate('bonjour monde', 'fr', 'es') == 'hola mundo'\\n```\\n\\n\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {
+ "name": "pytest_files",
+ "arguments": '{\n "files_to_test": ["tests/test_translation_service.py"]\n}',
+ },
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "pytest_files",
+ "content": "{\"output\": \"============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpwqdylo3f\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 1 item\\n\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n============================== 1 failed in 0.07s =============================== If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is failing because the translation function is not correctly translating the words. The issue is in the line where we are trying to translate the word. We are trying to translate the word from the source language to the target language using the dictionary of the source language. However, we should be using the dictionary of the target language to translate the word. \n\nLet's correct the code in the `translate` function of the `TranslationService` class.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\ttranslated_words.append(self.dictionary[to_lang][self.dictionary[from_lang][word]])\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpxyk39ki3\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpxyk39ki3/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {
+ "name": "pytest_files",
+ "arguments": '{\n "files_to_test": ["tests/test_translation_service.py"]\n}',
+ },
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "pytest_files",
+ "content": "{\"output\": \"============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmprzckhols\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 1 item\\n\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n============================== 1 failed in 0.08s =============================== If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpl8yxw9iw\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpl8yxw9iw/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "function_call": {
+ "name": "pytest_files",
+ "arguments": '{\n "files_to_test": ["tests/test_translation_service.py"]\n}',
+ },
+ "content": None,
+ },
+ {
+ "role": "function",
+ "name": "pytest_files",
+ "content": "{\"output\": \"============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpbpju825z\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 1 item\\n\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n============================== 1 failed in 0.07s =============================== If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmplslte_q_\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmplslte_q_/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.22s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmp6xfgjxot\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmp6xfgjxot/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmp9d2jil56\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmp9d2jil56/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.22s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpi4590_cu\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpi4590_cu/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpdfz9ol22\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpdfz9ol22/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.22s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpmxetqnm5\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpmxetqnm5/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.24s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpatpx_2yc\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpatpx_2yc/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ {
+ "role": "assistant",
+ "content": "The test is still failing, which means the issue has not been resolved. The problem seems to be in the translation logic. The current logic is trying to translate the word from the source language to the target language using the dictionary of the source language. However, this is incorrect. We should be translating the word from the source language to the target language using the dictionary of the target language.\n\nLet's correct the code in the `translate` function of the `TranslationService` class. We need to first find the English equivalent of the word in the source language and then find the equivalent word in the target language.",
+ "function_call": {
+ "name": "write_files",
+ "arguments": "{\n \"files_and_contents\": [\n {\n \"file_path\": \"translation_service.py\",\n \"file_contents\": \"class TranslationService:\\n\\tdef __init__(self):\\n\\t\\tself.dictionary = {\\n\\t\\t\\t'en': {'hello': 'hola', 'world': 'mundo'},\\n\\t\\t\\t'es': {'hola': 'hello', 'mundo': 'world'},\\n\\t\\t\\t'fr': {'bonjour': 'hello', 'monde': 'world'},\\n\\t\\t\\t'de': {'hallo': 'hello', 'welt': 'world'}\\n\\t\\t}\\n\\n\\tdef translate(self, text, from_lang, to_lang):\\n\\t\\twords = text.split()\\n\\t\\ttranslated_words = []\\n\\t\\tfor word in words:\\n\\t\\t\\tif word in self.dictionary[from_lang]:\\n\\t\\t\\t\\tenglish_word = self.dictionary[from_lang][word]\\n\\t\\t\\t\\ttranslated_word = self.dictionary[to_lang][english_word]\\n\\t\\t\\t\\ttranslated_words.append(translated_word)\\n\\t\\t\\telse:\\n\\t\\t\\t\\ttranslated_words.append(word)\\n\\t\\treturn ' '.join(translated_words)\"\n }\n ]\n}",
+ },
+ },
+ {
+ "role": "function",
+ "name": "write_files",
+ "content": "{\"write_files_status\": \"success\", \"message\": \"write_files completed successfully. Test run results: \\nFailed tests. Please check the output below. ============================= test session starts ==============================\\nplatform linux -- Python 3.9.7, pytest-7.4.2, pluggy-1.3.0\\nrootdir: /tmp/tmpwjdmt2gx\\nplugins: hydra-core-1.3.2, anyio-3.7.0\\ncollected 17 items\\n\\ntest_assignment.py .. [ 11%]\\ntest_course.py ... [ 29%]\\ntests/test_app.py .......... [ 88%]\\ntests/test_recommendation_engine.py . [ 94%]\\ntests/test_translation_service.py F [100%]\\n\\n=================================== FAILURES ===================================\\n___________________________ test_translation_service ___________________________\\n\\n def test_translation_service():\\n \\tts = TranslationService()\\n> \\tassert ts.translate('hello world', 'en', 'es') == 'hola mundo'\\nE AssertionError: assert 'hello world' == 'hola mundo'\\nE - hola mundo\\nE + hello world\\n\\ntests/test_translation_service.py:6: AssertionError\\n------------ generated xml file: /tmp/tmpwjdmt2gx/result_pytest.xml ------------\\n=========================== short test summary info ============================\\nFAILED tests/test_translation_service.py::test_translation_service - Assertio...\\n========================= 1 failed, 16 passed in 0.23s =========================\\n\\n You must fix this code by writing code to complete this sub task step. If a test is failing the error could be the code, or the test is incorrect, so feel free to overwrite and change the tests when they are incorrect, to make all tests pass.\"}",
+ },
+ {
+ "role": "user",
+ "content": "\nHas the sub task step been completed of: ```\n16. Implement the interface translation feature. This involves creating a TranslationService class that can translate text from one language to another. The TranslationService class should use a simple dictionary-based approach for translation. After implementing this feature, write tests to ensure that the TranslationService class can translate text correctly.\n``` \n\n If yes, call the function `sub_task_step_complete`, otherwise reflect and correct the full code to complete the task. Only use the functions you have been provided with, and if you want to output code only use the `write_files` function to output code. Condition it on existing code: ['app.py', 'requirements.txt', 'tests/test_app.py', 'course.py', 'test_course.py', 'assignment.py', 'test_assignment.py', 'recommendation_engine.py', 'tests/test_recommendation_engine.py', 'translation_service.py', 'tests/test_translation_service.py'] Fully implement these features in the code, no placeholders. If you have not viewed the files before writing to them, please view them, to make sure you are writing to the correct files.\nRespond now only with a function call of one of the following functions provided: `sub_task_step_complete`, `view_files`, `run_python_file`, `pytest_files`, `write_files`, `delete_files`, and if you want to output code only use the `write_files` function to output code.\n",
+ },
+ ]
+ tmp_messages = [clean_string(str(msg)) for msg in messages_latest]
+ self.assertEqual(detect_cycles(tmp_messages), True)
+ # hash1 = hash_messages(messages_latest)
+ # hash2 = hash_messages(messages_latest)
+ # hash3 = hash_messages(messages_latest)
+ # self.assertEqual(hash1, hash2)
+ # self.assertEqual(hash2, hash3)
+
+ def test_hashmessages_four_cycles_match(self):
+ input_list = ["message1", "message2"]
+ hash1 = hash_messages(input_list)
+ hash2 = hash_messages(input_list)
+ hash3 = hash_messages(input_list)
+ hash4 = hash_messages(input_list)
+ self.assertEqual(hash1, hash2)
+ self.assertEqual(hash2, hash3)
+ self.assertEqual(hash3, hash4)
diff --git a/l2mac/utils/logging.py b/l2mac/utils/logging.py
new file mode 100644
index 00000000..2dfbeaa9
--- /dev/null
+++ b/l2mac/utils/logging.py
@@ -0,0 +1,27 @@
+import logging
+import multiprocessing
+
+
+def generate_log_file_path(file, log_folder="logs", config={}):
+ import time
+
+ # file_name = os.path.basename(os.path.realpath(file)).split('.py')[0]
+ from pathlib import Path
+
+ Path(f"./{log_folder}").mkdir(parents=True, exist_ok=True)
+ path_run_name = "{}".format(time.strftime("%Y%m%d-%H%M%S"))
+ return f"{log_folder}/{path_run_name}_log.txt"
+
+
+def create_logger_in_process(log_file_path):
+ logger = multiprocessing.get_logger()
+ if not logger.hasHandlers():
+ formatter = logging.Formatter("%(processName)s| %(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s")
+ stream_handler = logging.StreamHandler()
+ file_handler = logging.FileHandler(log_file_path)
+ stream_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+ logger.addHandler(stream_handler)
+ logger.addHandler(file_handler)
+ logger.setLevel(logging.INFO)
+ return logger
diff --git a/l2mac/utils/run.py b/l2mac/utils/run.py
new file mode 100644
index 00000000..01f5e2c3
--- /dev/null
+++ b/l2mac/utils/run.py
@@ -0,0 +1,97 @@
+import ast
+import os
+import random
+import time
+from enum import Enum
+
+import numpy as np
+from pydantic import BaseModel
+
+
+class Domain(str, Enum):
+ codebase = "codebase"
+ book = "book"
+ custom = "custom" # Contributors add your custom domains here
+
+
+class DebuggingLevel(str, Enum):
+ debug = "debug"
+ info = "info"
+ warn = "warn"
+ error = "error"
+
+
+def seed_all(seed=None):
+ """
+ Set the torch, numpy, and random module seeds based on the seed
+ specified in config. If there is no seed or it is None, a time-based
+ seed is used instead and is written to config.
+ """
+ # Default uses current time in milliseconds, modulo 1e9
+ if seed is None:
+ seed = round(time() * 1000) % int(1e9)
+
+ # Set the seeds using the shifted seed
+ np.random.seed(seed)
+ random.seed(seed)
+
+
+class DotDict(dict):
+ """Dictionary subclass that supports dot notation access to keys."""
+
+ def __getattr__(self, name):
+ return self.get(name, None)
+
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __delattr__(self, name):
+ del self[name]
+
+
+def to_dotdict(obj):
+ """Function to convert Pydantic models to DotDict recursively."""
+ if isinstance(obj, dict):
+ return DotDict({k: to_dotdict(v) for k, v in obj.items()})
+ elif isinstance(obj, BaseModel):
+ return DotDict({k: to_dotdict(v) for k, v in obj.dict().items()})
+ elif isinstance(obj, list):
+ return [to_dotdict(item) for item in obj]
+ else:
+ return obj
+
+
+def load_prompt_program(input_string: str):
+ """
+ Loads a prompt program from a given file path or parses it from a string in list format.
+
+ Args:
+ input_string (str): The path to the prompt program file or the prompt program as a string in a list format.
+
+ Returns:
+ list: The loaded or parsed prompt program as a list, or None if not found or invalid.
+ """
+ # check input_string is a list type
+ if isinstance(input_string, list) and len(input_string) >= 1:
+ return input_string
+ # Check if the input string is a path to a file
+ if os.path.isfile(input_string):
+ try:
+ with open(input_string, "r", encoding="utf-8") as file:
+ # Read the file content and attempt to parse it
+ file_content = file.read()
+ return ast.literal_eval(file_content)
+ except (SyntaxError, ValueError, IOError) as e:
+ print(f"Error reading or parsing `prompt_program` file path of {input_string} | Error: {e}")
+ raise e
+ else:
+ # Try to parse it directly as a list from the string
+ try:
+ result = ast.literal_eval(input_string)
+ if isinstance(result, list):
+ return result
+ else:
+ raise ValueError("Input of `prompt_program` is not a list.")
+ except (SyntaxError, ValueError) as e:
+ print(f"Error reading or parsing `prompt_program` string encoded of {input_string} | Error: {e}")
+ raise e
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..87d62347
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,7 @@
+pyyaml
+pydantic
+typer>=0.9.0
+numpy
+openai
+tiktoken
+timeout-decorator
\ No newline at end of file
diff --git a/ruff.toml b/ruff.toml
new file mode 100644
index 00000000..21de5ee1
--- /dev/null
+++ b/ruff.toml
@@ -0,0 +1,40 @@
+select = ["E", "F"]
+ignore = ["E501", "E712", "E722", "F821", "E731"]
+
+# Allow autofix for all enabled rules (when `--fix`) is provided.
+fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
+unfixable = []
+
+# Exclude a variety of commonly ignored directories.
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "node_modules",
+ "venv",
+]
+
+# Same as Black.
+line-length = 120
+
+# Allow unused variables when underscore-prefixed.
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+
+# Assume Python 3.9
+target-version = "py39"
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..ffc4868c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,33 @@
+from pathlib import Path
+
+from setuptools import find_packages, setup
+
+here = Path(__file__).resolve().parent
+long_description = (here / "README.md").read_text(encoding="utf-8")
+requirements = (here / "requirements.txt").read_text(encoding="utf-8").splitlines()
+
+
+extras_require = {}
+extras_require = {"dev": ["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pre-commit~=3.6.0"]}
+
+setup(
+ name="l2mac",
+ version="0.0.1",
+ description="The LLM Automatic Computer Framework",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/samholt/l2mac",
+ author="Sam Holt",
+ author_email="samuel.holt.direct@gmail.com",
+ license="MIT",
+ keywords="l2mac multi-agent programming gpt llm metaprogramming automatic computer llm-automatic",
+ packages=find_packages(exclude=["contrib", "docs", "examples", "tests*"]),
+ include_package_data=True,
+ package_data={
+ "": ["*.yaml"],
+ },
+ python_requires=">=3.9",
+ install_requires=requirements,
+ extras_require=extras_require,
+ entry_points={"console_scripts": ["l2mac = l2mac.core:app"]},
+)
diff --git a/tests/test_l2mac.py b/tests/test_l2mac.py
new file mode 100644
index 00000000..cf1848bd
--- /dev/null
+++ b/tests/test_l2mac.py
@@ -0,0 +1,2 @@
+def test_l2mac():
+ assert True
diff --git a/tests/test_l2mac_tools.py b/tests/test_l2mac_tools.py
new file mode 100644
index 00000000..e69de29b