diff --git a/Summit.md b/Summit.md index a4e16a0a..d8edeff4 100644 --- a/Summit.md +++ b/Summit.md @@ -13,6 +13,12 @@ Then you will receive 2 invitation to join the : - Github MLOpsPython team. - AzureML CatsDogsOthers project : https://ml.azure.com/home (you should see project name: Cats-Dogs) +## 0. Prerequisite + +- Pycharm (https://www.jetbrains.com/pycharm/) +- Download and install python 3.11.x on your laptop. +https://www.python.org/downloads/ + ## 1. Introduction [README.md](README.md) diff --git a/packages/inference/src/mlopspython_inference/inference_pillow.py b/packages/inference/src/mlopspython_inference/inference_pillow.py index 63e5a9d3..6dfa4c9d 100644 --- a/packages/inference/src/mlopspython_inference/inference_pillow.py +++ b/packages/inference/src/mlopspython_inference/inference_pillow.py @@ -1,4 +1,6 @@ +from dataclasses import dataclass from io import BytesIO +from typing import List import numpy as np from tensorflow.keras.preprocessing.image import load_img, img_to_array @@ -22,16 +24,20 @@ def load_image(filename: str|BytesIO): BASE_PATH = Path(__file__).resolve().parent +@dataclass +class InferenceOutput: + prediction: str + values: List[float] class Inference: def __init__(self, logging, model_path: str): self.logger = logging.getLogger(__name__) self.model = load_model(model_path) - def execute(self, filepath:str|BytesIO): + def execute(self, filepath:str|BytesIO) -> InferenceOutput: img = load_image(filepath) result = self.model.predict(img) values = [float(result[0][0]), float(result[0][1]), float(result[0][2])] switcher = ['Cat', 'Dog', 'Other'] prediction = np.argmax(result[0]) - return {"prediction": switcher[prediction], "values": values} + return InferenceOutput(switcher[prediction], values) diff --git a/packages/inference/src/tests/inference_tests.py b/packages/inference/src/tests/inference_tests.py index 96d3122a..fb013f41 100644 --- a/packages/inference/src/tests/inference_tests.py +++ b/packages/inference/src/tests/inference_tests.py @@ -11,12 +11,14 @@ class TestInference(unittest.TestCase): def test_inference(self): + # Arrange inference = Inference(logging, str(input_directory / "model" / "final_model.h5")) + # Act inference_result = inference.execute(str(input_directory / "images" / "cat.png")) - + # Assert expected_result = {'prediction': 'Cat', 'values': [1.0, 2.370240289845506e-30, 0.0]} - self.assertEqual(inference_result['prediction'], expected_result['prediction']) - self.assertEqual(len(inference_result['values']), len(expected_result['values'])) + self.assertEqual(inference_result.prediction, expected_result['prediction']) + self.assertEqual(len(inference_result.values), len(expected_result['values'])) if __name__ == "__main__": diff --git a/production/api/core/model/inference_pillow.py b/production/api/core/model/inference_pillow.py index b4f9c4db..9be2c6b9 100644 --- a/production/api/core/model/inference_pillow.py +++ b/production/api/core/model/inference_pillow.py @@ -1,3 +1,4 @@ +import dataclasses import io from pathlib import Path from mlopspython_inference.inference_pillow import Inference as InferencePillow @@ -11,4 +12,5 @@ def __init__(self, logging, app_settings): self.inference = InferencePillow(logging, str(model_path)) def execute(self, file, filename, settings=None): - return self.inference.execute(io.BytesIO(file.read())) + prediction = self.inference.execute(io.BytesIO(file.read())) + return dataclasses.asdict(prediction) diff --git a/step_0_setup.md b/step_0_setup.md index b3df03c2..16c1a015 100644 --- a/step_0_setup.md +++ b/step_0_setup.md @@ -1,6 +1,6 @@ # Setup -## 1. Download ans install python 3.11.x on your laptop. +## 1. Download and install python 3.11.x on your laptop. https://www.python.org/downloads/ It is a good practice to always use the lastest version with supports: diff --git a/train/evaluate/evaluate.py b/train/evaluate/evaluate.py index 57b253d6..6e5b7ef6 100644 --- a/train/evaluate/evaluate.py +++ b/train/evaluate/evaluate.py @@ -21,7 +21,7 @@ def evaluate(logging, input_model_directory: Path, input_images_directory: Path, continue model_result = model.execute(str(path)) - prediction = model_result["prediction"] + prediction = model_result.prediction prediction_truth = path.parent.name.lower().replace("s", "") status = prediction_truth == prediction.lower() statistics["ok" if status else "ko"] += 1 @@ -29,7 +29,7 @@ def evaluate(logging, input_model_directory: Path, input_images_directory: Path, "ok": status, "prediction": prediction, "prediction_truth": prediction_truth, - "values": model_result["values"]} + "values": model_result.values} results.append(result) statistics["total"] = statistics["ok"] + statistics["ko"]