Skip to content

Commit

Permalink
Add YAML support
Browse files Browse the repository at this point in the history
  • Loading branch information
manstis committed Jan 14, 2025
1 parent ae9853c commit de5afa1
Show file tree
Hide file tree
Showing 9 changed files with 246 additions and 63 deletions.
22 changes: 21 additions & 1 deletion ansible_ai_connect/ai/api/model_pipelines/config_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from json import JSONDecodeError

import yaml
from django.conf import settings
from yaml import YAMLError

from ansible_ai_connect.ai.api.model_pipelines.config_providers import Configuration
from ansible_ai_connect.ai.api.model_pipelines.config_serializers import (
Expand All @@ -22,8 +25,25 @@


def load_config() -> Configuration:
source = json.loads(settings.ANSIBLE_AI_MODEL_MESH_CONFIG)
# yaml.safe_load(..) seems to also support loading JSON. Nice.
# However, try to load JSON with the correct _loader_ first in case of corner cases
source = load_json() or load_yaml()
serializer = ConfigurationSerializer(data=source)
serializer.is_valid(raise_exception=True)
serializer.save()
return serializer.instance


def load_json():
try:
return json.loads(settings.ANSIBLE_AI_MODEL_MESH_CONFIG)
except JSONDecodeError:
return None


def load_yaml():
try:
y = yaml.safe_load(settings.ANSIBLE_AI_MODEL_MESH_CONFIG)
return y
except YAMLError:
return None
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Copyright Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import yaml
from django.test import override_settings

from ansible_ai_connect.ai.api.model_pipelines.config_loader import load_config
from ansible_ai_connect.ai.api.model_pipelines.config_providers import Configuration
from ansible_ai_connect.ai.api.model_pipelines.pipelines import MetaData
from ansible_ai_connect.ai.api.model_pipelines.registry import REGISTRY_ENTRY
from ansible_ai_connect.ai.api.model_pipelines.tests import mock_config
from ansible_ai_connect.test_utils import WisdomTestCase


def _convert_json_to_yaml(json_config: str):
yaml_config = yaml.safe_load(json_config)
return yaml.safe_dump(yaml_config)


class TestConfigLoader(WisdomTestCase):

@override_settings(ANSIBLE_AI_MODEL_MESH_CONFIG=None)
def test_config_undefined(self):
with self.assertRaises(TypeError):
load_config()

def assert_config(self):
config: Configuration = load_config()
pipelines = [i for i in REGISTRY_ENTRY.keys() if issubclass(i, MetaData)]
for k in pipelines:
self.assertTrue(k.__name__ in config)

@override_settings(ANSIBLE_AI_MODEL_MESH_CONFIG=mock_config("ollama"))
def test_config_json(self):
self.assert_config()

@override_settings(ANSIBLE_AI_MODEL_MESH_CONFIG=_convert_json_to_yaml(mock_config("ollama")))
def test_config_yaml(self):
self.assert_config()
69 changes: 58 additions & 11 deletions docs/config/examples/README-ANSIBLE_AI_MODEL_MESH_CONFIG.md
Original file line number Diff line number Diff line change
@@ -1,40 +1,87 @@
# Example `ANSIBLE_AI_MODEL_MESH_CONFIG` configuration

Pay close attention to the formatting of the blocks.
`ANSIBLE_AI_MODEL_MESH_CONFIG` can be defined with either JSON or YAML.

Each ends with `}},` otherwise conversion of the multi-line setting to a `str` can fail.
## JSON Configuration

```text
ANSIBLE_AI_MODEL_MESH_CONFIG="{
```json
{
"ModelPipelineCompletions": {
"provider": "ollama",
"config": {
"inference_url": "http://host.containers.internal:11434",
"model_id": "mistral:instruct"}},
"model_id": "mistral:instruct"
}
},
"ModelPipelineContentMatch": {
"provider": "ollama",
"config": {
"inference_url": "http://host.containers.internal:11434",
"model_id": "mistral:instruct"}},
"model_id": "mistral:instruct"
}
},
"ModelPipelinePlaybookGeneration": {
"provider": "ollama",
"config": {
"inference_url": "http://host.containers.internal:11434",
"model_id": "mistral:instruct"}},
"model_id": "mistral:instruct"
}
},
"ModelPipelineRoleGeneration": {
"provider": "ollama",
"config": {
"inference_url": "http://host.containers.internal:11434",
"model_id": "mistral:instruct"}},
"model_id": "mistral:instruct"
}
},
"ModelPipelinePlaybookExplanation": {
"provider": "ollama",
"config": {
"inference_url": "http://host.containers.internal:11434",
"model_id": "mistral:instruct"}},
"model_id": "mistral:instruct"
}
},
"ModelPipelineChatBot": {
"provider": "http",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "granite3-8b"}}
}"
"model_id": "granite3-8b"
}
}
}
```

## YAML Configuration

```yaml
MetaData:
provider: ollama
config:
inference_url: http://localhost
model_id: a-model-id
ModelPipelineCompletions:
provider: ollama
config:
inference_url: http://localhost
model_id: a-model-id
ModelPipelinePlaybookGeneration:
provider: ollama
config:
inference_url: http://localhost
model_id: a-model-id
ModelPipelineRoleGeneration:
provider: ollama
config:
inference_url: http://localhost
model_id: a-model-id
ModelPipelinePlaybookExplanation:
provider: ollama
config:
inference_url: http://localhost
model_id: a-model-id
ModelPipelineChatBot:
provider: http,
config:
inference_url: http://localhost
model_id: granite3-8b
```
30 changes: 21 additions & 9 deletions docs/config/examples/README-example-dummy.md
Original file line number Diff line number Diff line change
@@ -1,46 +1,58 @@
# Example `ANSIBLE_AI_MODEL_MESH_CONFIG` configuration for `dummy`

```text
ANSIBLE_AI_MODEL_MESH_CONFIG="{
```json
{
"ModelPipelineCompletions": {
"provider": "dummy",
"config": {
"inference_url": "http://localhost:8000",
"body": "{\"predictions\":[\"ansible.builtin.apt:\\n name: nginx\\n update_cache: true\\n state: present\\n\"]}",
"latency_max_msec": "3000",
"latency_use_jitter": "False"}},
"latency_use_jitter": "False"
}
},
"ModelPipelineContentMatch": {
"provider": "dummy",
"config": {
"inference_url": "http://localhost:8000",
"body": "{\"predictions\":[\"ansible.builtin.apt:\\n name: nginx\\n update_cache: true\\n state: present\\n\"]}",
"latency_max_msec": "3000",
"latency_use_jitter": "False"}},
"latency_use_jitter": "False"
}
},
"ModelPipelinePlaybookGeneration": {
"provider": "dummy",
"config": {
"inference_url": "http://localhost:8000",
"body": "{\"predictions\":[\"ansible.builtin.apt:\\n name: nginx\\n update_cache: true\\n state: present\\n\"]}",
"latency_max_msec": "3000",
"latency_use_jitter": "False"}},
"latency_use_jitter": "False"
}
},
"ModelPipelineRoleGeneration": {
"provider": "dummy",
"config": {
"inference_url": "http://localhost:8000",
"body": "{\"predictions\":[\"ansible.builtin.apt:\\n name: nginx\\n update_cache: true\\n state: present\\n\"]}",
"latency_max_msec": "3000",
"latency_use_jitter": "False"}},
"latency_use_jitter": "False"
}
},
"ModelPipelinePlaybookExplanation": {
"provider": "dummy",
"config": {
"inference_url": "http://localhost:8000",
"body": "{\"predictions\":[\"ansible.builtin.apt:\\n name: nginx\\n update_cache: true\\n state: present\\n\"]}",
"latency_max_msec": "3000",
"latency_use_jitter": "False"}},
"latency_use_jitter": "False"
}
},
"ModelPipelineChatBot": {
"provider": "http",
"config": {
"inference_url": "<CHATBOT_URL>",
"model_id": "<CHATBOT_DEFAULT_MODEL>"}}
}"
"model_id": "<CHATBOT_DEFAULT_MODEL>"
}
}
}
```
18 changes: 12 additions & 6 deletions docs/config/examples/README-example-hybrid.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,15 @@ Separating the configuration for each pipeline allows different settings to be u

For example the following configuration uses `ollama` for "Completions" however `wca-onprem` for "Playbook Generation". "ContentMatches", "Playbook Explanation" and "Role Generation" are not configured and would fall back to a "No Operation" implementation. The "Chat Bot" uses a plain `http` implementation to another service.

```text
ANSIBLE_AI_MODEL_MESH_CONFIG="{
```json
{
"ModelPipelineCompletions": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelinePlaybookGeneration": {
"provider": "wca-onprem",
"config": {
Expand All @@ -22,11 +24,15 @@ ANSIBLE_AI_MODEL_MESH_CONFIG="{
"enable_ari_postprocessing": "False",
"health_check_api_key": "<api_key>",
"health_check_model_id": "<model_id>",
"username": "<username>"}},
"username": "<username>"
}
},
"ModelPipelineChatBot": {
"provider": "http",
"config": {
"inference_url": "<CHATBOT_URL>",
"model_id": "<CHATBOT_DEFAULT_MODEL>"}}
}"
"model_id": "<CHATBOT_DEFAULT_MODEL>"
}
}
}
```
30 changes: 21 additions & 9 deletions docs/config/examples/README-example-ollama.md
Original file line number Diff line number Diff line change
@@ -1,36 +1,48 @@
# Example `ANSIBLE_AI_MODEL_MESH_CONFIG` configuration for `ollama`

```text
ANSIBLE_AI_MODEL_MESH_CONFIG="{
```json
{
"ModelPipelineCompletions": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelineContentMatch": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelinePlaybookGeneration": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelineRoleGeneration": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelinePlaybookExplanation": {
"provider": "ollama",
"config": {
"inference_url": "http://localhost:8000",
"model_id": "ollama-model"}},
"model_id": "ollama-model"
}
},
"ModelPipelineChatBot": {
"provider": "http",
"config": {
"inference_url": "<CHATBOT_URL>",
"model_id": "<CHATBOT_DEFAULT_MODEL>"}}
}"
"model_id": "<CHATBOT_DEFAULT_MODEL>"
}
}
}
```
Loading

0 comments on commit de5afa1

Please sign in to comment.