diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b740863 --- /dev/null +++ b/.gitignore @@ -0,0 +1,117 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache +tests/.cache + +# C extensions +*.so + +# neptune, pycharm +.cache +.cache/ +.idea/ +.idea_modules/ +out/ +output +output/ +*.log +target/ +devbook.ipynb +devbook_local.ipynb + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# Jupyter Notebook +Untitled*.ipynb +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# Working directories +examples/cache/ +tutorials/examples/cache/ diff --git a/tutorials/1-getting-started.ipynb b/tutorials/1-getting-started.ipynb new file mode 100644 index 0000000..dc7340b --- /dev/null +++ b/tutorials/1-getting-started.ipynb @@ -0,0 +1,332 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started with steps\n", + "\n", + "This notebook shows how to **create** steps, **fit** them to data, **transform** new data and take advantage of persistence" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "from steppy.base import Step, BaseTransformer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# By default pipelines will cache some results so we delete the cache to ba sure we're starting from scratch\n", + "!rm -r ./cache" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grabbing some data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll import a dataset from scikit-learn for our experiments and divide it into training and test sets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import load_digits\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "digits = load_digits()\n", + "X_digits, y_digits = digits.data, digits.target\n", + "\n", + "X_train, X_test, y_train, y_test = train_test_split(X_digits, y_digits, test_size=0.2, stratify=y_digits, random_state=42)\n", + "\n", + "print('{} samples for training'.format(len(y_train)))\n", + "print('{} samples for test'.format(len(y_test)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Steps communicate data between each other with plain **Python dictionaries**. This makes it easy to pass collections of **arbitrary data types** (Numpy arrays, Pandas dataframes, etc.). The basic structure is as follows (you can get much more fancy but we leave that to the next example)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_train = {'input':\n", + " {\n", + " 'X': X_train,\n", + " 'y': y_train,\n", + " }\n", + " }\n", + "\n", + "data_test = {'input':\n", + " {\n", + " 'X': X_test,\n", + " 'y': y_test,\n", + " }\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The main component of a step is a transformer. You just have to define a class following a **simple API** of ` BaseTransformer` and then it's up to you to be as creative as you want!\n", + "\n", + "... or you can just **wrap you favorite Scikit-learn estimator** like we do here:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.externals import joblib\n", + "\n", + "class RandomForestTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = RandomForestClassifier()\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_pred = self.estimator.predict(X)\n", + " return {'y_pred': y_pred}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So what does the transformer do? It must be able to:\n", + "* **initialize** itself\n", + "* **fit** and **transform** the incoming data prepared by the adapter; when transforming, the result should be returned as a **dictionary** that can be **passed on to the next step**\n", + "* **save** and **load** its parameters; this is handy when you're trying to avoid re-computing things over and over.\n", + "\n", + "See how flexible this is? You can just as easily wrap your Keras or Pytorch models." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's turn our transformer into a step:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "classifier_step = Step(name='classifier',\n", + " transformer=RandomForestTransformer(),\n", + " input_data=['input'], \n", + " cache_dirpath='./cache'\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And that's our one-step pipeline finished. You can visualize it too:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "classifier_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is just about the simplest pipeline you can imagine. Now let's train it!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training\n", + "\n", + "Training a pipeline is a one-liner:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds_train = classifier_step.fit_transform(data_train);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see how well we do on our training data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import accuracy_score\n", + "acc_train = accuracy_score(data_train['input']['y'], preds_train['y_pred'])\n", + "print('Training accuracy = {:.4f}'.format(acc_train))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generating test predictions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Running test data through our pipeline is a one-liner too:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds_test = classifier_step.transform(data_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "How good is our test score?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_test = accuracy_score(data_test['input']['y'], preds_test['y_pred'])\n", + "print('Test accuracy = {:.4f}'.format(acc_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's pretty good for a first attempt!\n", + "\n", + "Let's have a look at some predictions to make sure they're sensible" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fix, axs = plt.subplots(4, 8, figsize=(10, 6))\n", + "for i, ax in enumerate(axs.ravel()):\n", + " ax.imshow(data_test['input']['X'][i].reshape(8, 8), cmap='gray')\n", + " ax.axis('off')\n", + " ax.set_title('pred = {}'.format(preds_test['y_pred'][i]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And that's about it for a start! As you can see:\n", + "* It's easy to create steps by inheriting from `BaseTransformer`\n", + "* Transferring data between steps with Python dicts gives you a lot of flexibility\n", + "* Steps wrap easily around Scikit-learn estimators\n", + "* You can display a graph showing the structure of your pipeline\n", + "* Training and testing are pretty much one-liners\n", + "\n", + "At this point it may seem like a lot of work for not much benefit but once we start moving towards more complex pipelines, the reasoning behind all the components will become more clear. Have a look at the next notebook for a more advanced, multi-step pipeline!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/2-multi-step.ipynb b/tutorials/2-multi-step.ipynb new file mode 100644 index 0000000..326eb12 --- /dev/null +++ b/tutorials/2-multi-step.ipynb @@ -0,0 +1,405 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Adapters: Creating steps with multiple inputs\n", + "\n", + "This notebook shows how to create a more complex pipeline, including steps with multiple inputs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "from steppy.base import Step, BaseTransformer\n", + "from steppy.adapter import Adapter, E" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# By default pipelines will cache some results so we delete the cache to ba sure we're starting from scratch\n", + "!rm -r ./cache" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As before, we'll import a dataset from Scikit-learn for our experiments and divide it into training and test sets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import load_breast_cancer\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "dset = load_breast_cancer()\n", + "X_dset, y_dset = dset.data, dset.target\n", + "\n", + "X_train, X_test, y_train, y_test = train_test_split(X_dset, y_dset, test_size=0.2, stratify=y_dset, random_state=42)\n", + "\n", + "print('{} samples for training'.format(len(y_train)))\n", + "print('{} samples for test'.format(len(y_test)))\n", + "\n", + "data_train = {'input':\n", + " {\n", + " 'X': X_train,\n", + " 'y': y_train,\n", + " }\n", + " }\n", + "\n", + "data_test = {'input':\n", + " {\n", + " 'X': X_test,\n", + " 'y': y_test,\n", + " }\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating pipeline components\n", + "\n", + "This time we want to build a more fancy pipeline. We'll normalize our data, run PCA to compute some features of a different flavour and then combine them with our original features in a final logistic regression step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our first step will be a normalization step. We could use the one from Scikit-learn but we'll write a pure Numpy implementation just to show how this could be done:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.externals import joblib\n", + "\n", + "class NormalizationTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.mean = None\n", + " self.std = None\n", + " \n", + " # Having only X as input ensures that we don't accidentally fit y\n", + " def fit(self, X):\n", + " self.mean = np.mean(X, axis=0)\n", + " self.std = np.std(X, axis=0)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " X_tfm = (X - self.mean) / self.std\n", + " return {'X': X_tfm}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump([self.mean, self.std], filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.mean, self.std = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll also construct a PCA transformer for our normalized features:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.decomposition import PCA\n", + "\n", + "class PCATransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = PCA(n_components=10)\n", + " \n", + " def fit(self, X):\n", + " self.estimator.fit(X)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " X_tfm = self.estimator.transform(X)\n", + " return {'X': X_tfm}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll use logistic regression as our classifier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "\n", + "class LogRegTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = LogisticRegression()\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_pred = self.estimator.predict(X)\n", + " return {'y_pred': y_pred}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assembling the pipeline\n", + "Now we'll create steps from our transformers and link them all together:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our normalization step will only require the features from the input, not the labels. In fact, we would like to *avoid* giving it the labels just in case there could be data leak in the implementation (the first rule of data science is you don't trust anyone). To achieve this, we will use a special `adapter` argument to the step constructors, which allows us to extract just the required variables from the data dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "norm_step = Step(name='Normalizer',\n", + " transformer=NormalizationTransformer(),\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'X': E('input', 'X')\n", + " }),\n", + " cache_dirpath='./cache')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pca_step = Step(name='PCA',\n", + " transformer=PCATransformer(),\n", + " input_steps=[norm_step], \n", + " cache_dirpath='./cache')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our classifier step will have to combine two data flows: the features processed by PCA, and the labels fed directly from input. Therefore, we will have to use the `adapter` argument to specify how to map those inputs to transformer arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lr_step = Step(name='LogReg',\n", + " transformer=LogRegTransformer(),\n", + " input_steps=[pca_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'X': E('PCA', 'X'),\n", + " 'y': E('input', 'y')\n", + " }),\n", + " cache_dirpath='./cache')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One may think it's a bit cumbersome to create your transformers and then have to wrap them with steps. However, there is an advantage to this - think about it:\n", + "* The **transformer** is the ***implementation*** of a machine learning algorithm. It has an input and outputs but it doesn't even know what these are connected to.\n", + "* The **steps** define the ***connections*** between different transformers. At this level of abstraction, all the algorithmic details are hidden. The code that defines steps and connects them together is compact and it's easier to see what is connected to what." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So what does our pipeline look like?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lr_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This looks about right - let's move on to training!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training\n", + "\n", + "Training a pipeline is a one-liner. When we fit the final logistic regression step, it will go back to its input steps and fit them too (assuming there's no cache or persistent outputs - that's why we delete any leftover cache at the start of the notebook). This also works recursively, so the parent steps will ask the grandparent steps to fit etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds_train = lr_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see how well we do on our training data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import accuracy_score\n", + "acc_train = accuracy_score(data_train['input']['y'], preds_train['y_pred'])\n", + "print('Training accuracy = {:.4f}'.format(acc_train))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generating test predictions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Running test data through our pipeline is a one-liner too:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds_test = lr_step.transform(data_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What is our test score?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_test = accuracy_score(data_test['input']['y'], preds_test['y_pred'])\n", + "print('Test accuracy = {:.4f}'.format(acc_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That seems pretty good. Have a look at the next notebook for even more complex pipelines with parallel branches." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/3-adapter_advanced.ipynb b/tutorials/3-adapter_advanced.ipynb new file mode 100644 index 0000000..d41c485 --- /dev/null +++ b/tutorials/3-adapter_advanced.ipynb @@ -0,0 +1,708 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Adapters in bigger pipelines" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this tutorial we show how to use adapters to create more complicated pipelines in Steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import xgboost\n", + "import traceback\n", + "\n", + "from sklearn.datasets import load_digits\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.externals import joblib\n", + "from sklearn.metrics import log_loss\n", + "\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from steppy.base import Step, BaseTransformer, NoOperation, make_transformer\n", + "from steppy.adapter import Adapter, E" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The problem" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's recreate the pipeline for digits recognition from notebook #1.\n", + "\n", + "We start off by fetching the data. In the latter part of this notebook we will create a model ensembling, hence this time we split the data into three parts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "CACHE_DIR = './cache'\n", + "digits = load_digits()\n", + "X_digits, y_digits = digits.data, digits.target\n", + "\n", + "X_train, X_test, y_train, y_test = train_test_split(X_digits, y_digits, test_size=0.15, stratify=y_digits, random_state=643793)\n", + "X_train, X_ens, y_train, y_ens = train_test_split(X_train, y_train, test_size=0.35, stratify=y_train, random_state=976542)\n", + "\n", + "print('{} samples for training'.format(len(y_train)))\n", + "print('{} samples for ensembling'.format(len(y_ens)))\n", + "print('{} samples for test'.format(len(y_test)))\n", + "\n", + "data_train = {\n", + " 'input': {\n", + " 'images': X_train,\n", + " 'labels': y_train,\n", + " }\n", + "}\n", + "\n", + "data_ensembling = {\n", + " 'input': {\n", + " 'images': X_ens,\n", + " 'labels': y_ens\n", + " }\n", + "}\n", + "\n", + "data_test = {\n", + " 'input': {\n", + " 'images': X_test,\n", + " 'labels': y_test\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -r ./cache" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We define `RandomForestTransformer` in similar manner as before. With one difference, though. `Transform` will use RandomForest's `predict_proba` instead of `predict` which will be useful in the latter part of this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class RandomForestTransformer(BaseTransformer):\n", + " def __init__(self, random_state=None):\n", + " self.estimator = RandomForestClassifier(random_state=random_state)\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_proba = self.estimator.predict_proba(X)\n", + " return {'y_proba': y_proba}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step = Step(name='random_forest',\n", + " transformer=RandomForestTransformer(),\n", + " input_data=['input'], \n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The graph looks just like in notebook #1. Let's try to execute it!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " preds_train_rf = rf_step.fit_transform(data_train)\n", + "except:\n", + " traceback.print_exc()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, something went wrong. The problem is that `input` dictionary in `data_train` contains fields `images` and `labels`, whereas `RandomForestTransformer` expects arguments `X` and `y`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The solution: adapter" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To handle such issues, `Step`'s initializer has `adapter` argument. `Adapter` describes how to reshape the data from the input nodes into the form expected by the transformer or further steps. \n", + "\n", + "The basic usage is as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step = Step(name='random_forest',\n", + " transformer=RandomForestTransformer(),\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'X': E('input', 'images'),\n", + " 'y': E('input', 'labels')\n", + " }),\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We created a new step which gets its data from `input` node.\n", + "\n", + "When the program flow gets to `rename_step`, first `adapter`-related code is executed. `RandomForestTransformer`'s `fit_transform` and `transform` methods expect arguments `X` and `y`. The `adapter` is basically a dictionary which for each expected argument tells how to get it. For instance `'X': [('input', 'images')]` tells the step, that value for `X` is stored under `images` key in the dictionary returned by `input` node.\n", + "\n", + "Let's try to fit Random Forest again!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time it worked like charm - we see class probabilites for the train cases." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pipeline with model ensembling" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Very often when we have multiple models which perform on the same level it makes sense to combine them. The created model ensembling tends to be more stable and can even improve results a little.\n", + "\n", + "To take advantage of that fact, we will train a couple of forests. Thanks to a different random seeds each forest will make somewhat different predictions, and therefore their combination will improve performance of the entire pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "NR_OF_FORESTS = 4\n", + "random_seeds = [np.random.randint(1000000) for _ in range(NR_OF_FORESTS)]\n", + "\n", + "rf_steps = [Step(name='random_forest_{}'.format(i),\n", + " transformer=RandomForestTransformer(random_state=seed),\n", + " input_data=['input'], \n", + " adapter=Adapter({\n", + " 'X': E('input', 'images'),\n", + " 'y': E('input', 'labels')\n", + " }), \n", + " cache_dirpath=CACHE_DIR)\n", + " for i, seed in enumerate(random_seeds)]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_steps[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For ensembling we will use boosting trees. First we need to create a transformer that will wrap XGBoost. What we need to do is really analogous to what we did for Random Forests." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class XGBoostTransformer(BaseTransformer):\n", + " def __init__(self, xgb_params, num_boost_round):\n", + " self.estimator = None\n", + " self.xgb_params = xgb_params\n", + " self.num_boost_round = num_boost_round\n", + " \n", + " def fit(self, X, y):\n", + " tr_mat = xgboost.DMatrix(X, label=y)\n", + " evals = [(tr_mat, 'train')]\n", + " self.estimator = xgboost.train(self.xgb_params,\n", + " tr_mat,\n", + " num_boost_round=self.num_boost_round,\n", + " verbose_eval=False,\n", + " evals=evals)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " test_mat = xgboost.DMatrix(X)\n", + " y_proba = self.estimator.predict(test_mat)\n", + " return {'y_proba': y_proba}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump({'estimator': self.estimator,\n", + " 'xgb_params': self.xgb_params,\n", + " 'num_boost_round': self.num_boost_round},\n", + " filepath)\n", + " \n", + " def load(self, filepath):\n", + " d = joblib.load(filepath)\n", + " self.estimator = d['estimator']\n", + " self.xgb_params = d['xgb_params']\n", + " self.num_boost_round = d['num_boost_round']\n", + " return self\n", + " \n", + "def get_xgb_params():\n", + " return {\n", + " 'objective': 'multi:softprob',\n", + " \"num_class\": 10,\n", + " 'eta': 0.5,\n", + " 'max_depth': 4,\n", + " 'silent': True,\n", + " 'nthread': -1,\n", + " 'lambda': 2.0,\n", + " 'eval_metric': [\"mlogloss\", \"merror\"]\n", + " }\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To connect ensembling step with random forests we need to do some more advanced adapting." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gather_step = Step(\n", + " name='gather_step',\n", + " transformer=make_transformer(lambda lst, y: {'X': np.hstack(lst), 'y': y}),\n", + " input_steps=rf_steps,\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'lst': [E(rf_step.name, 'y_proba') for rf_step in rf_steps],\n", + " 'y': E('input', 'labels')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + ")\n", + "\n", + "ensemble_step = Step(name='ensembler',\n", + " transformer=XGBoostTransformer(xgb_params=get_xgb_params(), num_boost_round=10),\n", + " input_steps=[gather_step],\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ensemble_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We used a little different syntax in `adapter` this time. Recipe for `X` consists of two things:\n", + "- a list of objects returned by input steps that should be used to construct `X`,\n", + "- a function which merges them into a final `X` object.\n", + "\n", + "So `[(rf_step.name, 'y_proba') for rf_step in rf_steps]` tells the adapter to extract `y_proba` arrays from dictionaries returned by all random forests. All these `y_proba`s are put on a list which is then passed to `lambda lst: np.hstack(lst))`. This function will merge outputs of all forests into one big array, which is eventually passed to the `XGBoostTransformer`.\n", + "\n", + "An adapter is actually a description of how to build arguments for `fit_transform` and `transform`. Let _brick description_ mean a pair of node name and key in the dictionary returned by that node. An adapter is a dictionary, where:\n", + "- keys must agree with transormer's `fit_transform` and `transform` arguments,\n", + "- values must be either:\n", + " 1. a brick description,\n", + " 2. a list of brick descriptions,\n", + " 3. a pair of:\n", + " - a list of brick descriptions,\n", + " - a function that adjusts objects extracted according to the above list,\n", + "\n", + "Step with an adapter proceeds like this:\n", + "1. It gathers results from preceeding nodes.\n", + "2. It builds a dictionary with the same keys as the adapter and with values built according to descriptions:\n", + " - if the key in the adapter maps to a single brick description, an appropriate object is extracted from the results of input nodes,\n", + " - if list of brick descriptions is given, objects are extracted according to brick descriptions and added to a list,\n", + " - if a function is also passed, it will be applied to the list from the previous step, and its returned value will be assigned to the key.\n", + "3. Arguments of `fit_transform` and `transform` are filled using the above dictionary." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check if our ensembling works. To properly fit the pipeline we have to fit random forests first using the train data, and then fit the ensembler using part of the data for this purpose." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for rf_step in rf_steps:\n", + " rf_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ensemble_step.fit_transform(data_ensembling)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looks fine! However, often we are interested only in the class with the highest probability. Let's make a step that will find this class for us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class GuessesTransformer(BaseTransformer):\n", + " def transform(self, y_proba):\n", + " return {'y_pred': np.argmax(y_proba, axis=1)}\n", + "\n", + "guesses_step = Step(name='guesses_maker',\n", + " transformer=GuessesTransformer(),\n", + " input_steps=[ensemble_step], \n", + " cache_dirpath=CACHE_DIR\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "guesses_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should be already familiar with everything that happened here. New step, `guesses_maker`, takes its input from `ensembler`. Adapter will create just one element: `y_pred`. List of bricks used to build `y_pred` has only one element: `y_proba` found in `ensembler`'s result. Function `lambda lst: np.argmax(lst[0], axis=1)` takes this list and performs row-wise `argmax` on its only element." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "guesses_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have created a quite complicated pipeline, so for sure everyone is anxious to see how it performs. Our final step will carry out the evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class EvaluationTransformer(BaseTransformer):\n", + " def transform(self, y_true, y_proba, y_pred):\n", + " return {'Log-loss': log_loss(y_pred=y_proba, y_true=y_true),\n", + " 'Acc:': '{:.2f}'.format(sum(y_true == y_pred) / len(y_pred))\n", + " }\n", + "\n", + "evaluation_step = Step(name='evaluator',\n", + " transformer=EvaluationTransformer(),\n", + " input_steps=[ensemble_step, guesses_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'y_proba': E(ensemble_step.name, 'y_proba'),\n", + " 'y_pred': E(guesses_step.name, 'y_pred'),\n", + " 'y_true': E('input', 'labels')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "evaluation_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "evaluation_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "evaluation_step.transform(data_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "As we can see thanks to ensembling we improved in comparison to a single model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Peek on pipeline predictions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Comparing images with model's predictions is always a very rewarding feeling. As a last example we show a step that displays a few images with the predicted probability distributions!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model_names = [rf_step.name for rf_step in rf_steps] + [ensemble_step.name]\n", + "class LookAtPredictions(BaseTransformer):\n", + " def transform(self, probas, images): \n", + " pd.options.display.float_format = '{:5.2f}'.format\n", + " for img_nr in range(5):\n", + " df = pd.DataFrame({model_names[j]: probas[j][img_nr]\n", + " for j in range(len(model_names))\n", + " },\n", + " index=list(range(10)))\n", + " df = df[model_names]\n", + " plt.figure(figsize=(6,2))\n", + " left = plt.subplot(1, 2, 1)\n", + " right = plt.subplot(1, 2, 2)\n", + " left.imshow(images[img_nr].reshape(8, 8), cmap='gray')\n", + " right.axis('off')\n", + " right.text(0, 0.3, str(df.T), fontsize=14, fontname='monospace')\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display_step = Step(\n", + " name='display',\n", + " transformer=LookAtPredictions(),\n", + " input_steps=[ensemble_step] + rf_steps,\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'probas': [E(rf_step.name, 'y_proba') for rf_step in rf_steps] +\n", + " [E(ensemble_step.name, 'y_proba')],\n", + " 'images': E('input', 'images')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "display_step.fit_transform(data_train)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "display_step.transform(data_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/4-caching-persistence.ipynb b/tutorials/4-caching-persistence.ipynb new file mode 100644 index 0000000..d402240 --- /dev/null +++ b/tutorials/4-caching-persistence.ipynb @@ -0,0 +1,778 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Data persistence and data caching" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook presents data persistence and data caching features in steps.\n", + "* Persistence helps to avoid re-running early steps of a pipeline when subsequent steps are changed\n", + "* Caching makes it possible to run complex, multi-path pipelines without re-computing the results of early steps\n", + "\n", + "Note that the features presented here are different from *model persistence*, which saves the transformers as the steps are trained." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "from sklearn.externals import joblib\n", + "from sklearn.metrics import log_loss\n", + "\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from steps.base import Step, BaseTransformer\n", + "from steps.adapter import Adapter, E\n", + "CACHE_DIR = './cache'\n", + "CACHE_DIR_2 = './cache_2'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# By default pipelines will cache some results so we delete the cache to ba sure we're starting from scratch\n", + "import os\n", + "import shutil\n", + "\n", + "shutil.rmtree(CACHE_DIR, ignore_errors=True)\n", + "shutil.rmtree(CACHE_DIR_2, ignore_errors=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time we'll hava a go at text classification. We'll use the classic 20newsgroups dataset, but without the headers, footers or quotes which would make the task too easy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import fetch_20newsgroups\n", + "\n", + "newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))\n", + "newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import train_test_split\n", + "\n", + "X_train, y_train = newsgroups_train.data, newsgroups_train.target\n", + "\n", + "X_fit, X_val, y_fit, y_val = train_test_split(X_train, y_train, test_size=0.1, stratify=y_train, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use a label encoder to ensure out labels are well-behaved" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.preprocessing import LabelEncoder\n", + "input_label_enc = LabelEncoder().fit(newsgroups_train.target)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time we have pre-defined training and test sets but we would like to have a hold-out set of training data available for ensembling" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_fit = {'input':\n", + " {\n", + " 'text': X_fit,\n", + " 'label': input_label_enc.transform(y_fit),\n", + " }\n", + " }\n", + "\n", + "data_val = {'input':\n", + " {\n", + " 'text': X_val,\n", + " 'label': input_label_enc.transform(y_val),\n", + " }\n", + " }\n", + "\n", + "data_test = {'input':\n", + " {\n", + " 'text': newsgroups_test.data,\n", + " 'label': input_label_enc.transform(newsgroups_test.target),\n", + " }\n", + " }\n", + "\n", + "def print_data_summary(data, title):\n", + " print(title)\n", + " print(' Num. documents: {}'.format(len(data['input']['text'])))\n", + " print(' Num. categories: {}'.format(len(np.unique(data['input']['label']))))\n", + "\n", + "for data, title in [(data_fit, 'Model fitting data'), (data_val, 'Validation data'), (data_test, 'Testing data')]:\n", + " print_data_summary(data, title)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Text processing transformers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We define a transformer that does count vectorization on our documents - again, we can just wrap the one from sklearn:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.feature_extraction.text import CountVectorizer\n", + "\n", + "class CountVecTransformer(BaseTransformer):\n", + " def __init__(self, max_features):\n", + " self.estimator = CountVectorizer(max_features=max_features)\n", + " \n", + " def fit(self, X):\n", + " self.estimator.fit(X)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " X_tfm = self.estimator.transform(X)\n", + " return {'X': X_tfm}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similarly for the IDFs in our TF-IDF model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.feature_extraction.text import TfidfTransformer\n", + "\n", + "class StepsTfidfTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = TfidfTransformer()\n", + " \n", + " def fit(self, X):\n", + " self.estimator.fit(X)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " X_tfm = self.estimator.transform(X)\n", + " return {'X': X_tfm}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will give us a bunch of features to train on." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Linear model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As a first attempt, we'll try to do our predictions with (sparse) logistic regression" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "\n", + "class SparseLogRegProbaTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = LogisticRegression(penalty='l1')\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_proba = self.estimator.predict_proba(X)\n", + " return {'y_proba': y_proba}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "count_vec_step = Step(name='CountVec',\n", + " transformer=CountVecTransformer(max_features=1000),\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('input', 'text')}),\n", + " cache_dirpath=CACHE_DIR)\n", + "\n", + "tfidf_step = Step(name='TF-IDF',\n", + " transformer=StepsTfidfTransformer(),\n", + " input_steps=[count_vec_step], \n", + " cache_dirpath=CACHE_DIR,\n", + " save_output=True,\n", + " load_saved_output=True # This breaks when switching from training data to val data or test data!\n", + " )\n", + "\n", + "logreg_step = Step(name='SparseLogReg',\n", + " transformer=SparseLogRegProbaTransformer(),\n", + " input_steps=[tfidf_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('TF-IDF', 'X'),\n", + " 'y': E('input', 'label')\n", + " }),\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that we have passed `save_output=True` to the `tfidf_step` constructor. This will make this step save its output so that once it's been computed once, it can later just be loaded from disk. Therefore, we will be able to work on the logistic regression classifier without having to re-compute the outputs of its ancestor nodes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "logreg_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds_linear_fit = logreg_step.fit_transform(data_fit)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import accuracy_score\n", + "\n", + "acc_linear_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(preds_linear_fit['y_proba'], axis=1))\n", + "print('Model fitting accuracy: {:.4f}'.format(acc_linear_fit))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "preds_linear_val = logreg_step.transform(data_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_linear_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(preds_linear_val['y_proba'], axis=1))\n", + "print('Validation accuracy: {:.4f}'.format(acc_linear_val))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Random forest model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As an alternative, we'll also build a neural net model on top of the same TF-IDF features. We'll use the multi-layer perceptron (MLP) which is available in Scikit-learn" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.ensemble import RandomForestClassifier\n", + "\n", + "class RfClfTransformer(BaseTransformer):\n", + " def __init__(self, n_estimators, max_depth):\n", + " self.estimator = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth)\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_proba = self.estimator.predict_proba(X)\n", + " return {'y_proba': y_proba}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step = Step(name='RF',\n", + " transformer=RfClfTransformer(n_estimators=200, max_depth=8),\n", + " input_steps=[tfidf_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('TF-IDF', 'X'),\n", + " 'y': E('input', 'label')\n", + " }),\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rf_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "OK, so it was easy to add a different model on top of TF-IDF features. Indeed, this time we will be able to use the **saved** TF-IDF output, so we can get straight to fitting the random forest." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "\n", + "preds_rf_fit = rf_step.fit_transform(data_fit)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_rf_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(preds_rf_fit['y_proba'], axis=1))\n", + "print('Model fitting accuracy: {:.4f}'.format(acc_rf_fit))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "\n", + "preds_rf_val = rf_step.transform(data_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_rf_val = accuracy_score(y_true=data_val['input']['label'], \n", + " y_pred=np.argmax(preds_rf_val['y_proba'], axis=1))\n", + "print('Validation accuracy: {:.4f}'.format(acc_rf_val))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Ensembling" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll do simple ensembling by averaging predictions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class AvgTransformer(BaseTransformer):\n", + " def __init__(self):\n", + " pass\n", + " \n", + " def fit(self, y_proba_1, y_proba_2):\n", + " return self\n", + "\n", + " def transform(self, y_proba_1, y_proba_2, **kwargs):\n", + " y_proba = (y_proba_1 + y_proba_2) / 2\n", + " return {'y_proba': y_proba}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump({}, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ens_step = Step(name='Ensembler',\n", + " transformer=AvgTransformer(),\n", + " input_steps=[logreg_step, rf_step],\n", + " adapter=Adapter({'y_proba_1': E('SparseLogReg', 'y_proba'),\n", + " 'y_proba_2': E('RF', 'y_proba'),\n", + " }),\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ens_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that for the TF-IDF step we set `cache_output` to `True`. What does this do? Note that the output of the TF-IDF step is used both by RF and SparseLogReg. This means that when we run the Ensemble node on some data, it will in turn call MLP and SparseLogReg, which will both call TF-IDF. Without caching, this would mean we're computing the output of the TF-IDF step twice, which is definitely a waste of precious compute time and could possibly lead to some inconsistencies in the data (e.g. if the TF-IDF step was randomized in some way). Caching solves both problems without keeping anything in memory - the caching is done on disk, not in RAM." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "\n", + "# This is just a dummy step to \"activate\" the ensembler\n", + "preds_ens_fit = ens_step.fit_transform(data_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "\n", + "preds_ens_val = ens_step.transform(data_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bug workaround: manually delete saved output when switching datasets\n", + "os.remove(os.path.join(CACHE_DIR, 'outputs', 'TF-IDF'))\n", + "\n", + "preds_ens_test = ens_step.transform(data_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "acc_ens_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(preds_ens_val['y_proba'], axis=1))\n", + "print('Validation accuracy: {:.4f}'.format(acc_ens_val))\n", + "\n", + "acc_ens_test = accuracy_score(y_true=data_test['input']['label'], y_pred=np.argmax(preds_ens_test['y_proba'], axis=1))\n", + "print('Test accuracy: {:.4f}'.format(acc_ens_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Caching: saving output within one run only" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Sometimes you want to keep your output within one run of your pipeline but discard it at the end. This use case is handled by **caching**. Let's build a new pipeline that uses caching instead of saving to avoid re-computing results:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_count_vec_step = Step(name='CountVec',\n", + " transformer=CountVecTransformer(max_features=1000),\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('input', 'text')}),\n", + " cache_dirpath=CACHE_DIR_2)\n", + "\n", + "new_tfidf_step = Step(name='TF-IDF',\n", + " transformer=StepsTfidfTransformer(),\n", + " input_steps=[new_count_vec_step], \n", + " cache_dirpath=CACHE_DIR_2,\n", + " cache_output=True)\n", + "\n", + "new_logreg_step = Step(name='SparseLogReg',\n", + " transformer=SparseLogRegProbaTransformer(),\n", + " input_steps=[new_tfidf_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('TF-IDF', 'X'),\n", + " 'y': E('input', 'label')\n", + " }),\n", + " cache_dirpath=CACHE_DIR_2)\n", + "\n", + "new_rf_step = Step(name='RF',\n", + " transformer=RfClfTransformer(n_estimators=200, max_depth=8),\n", + " input_steps=[new_tfidf_step],\n", + " input_data=['input'],\n", + " adapter=Adapter({'X': E('TF-IDF', 'X'),\n", + " 'y': E('input', 'label')\n", + " }),\n", + " cache_dirpath=CACHE_DIR_2)\n", + "\n", + "new_ens_step = Step(name='Ensembler',\n", + " transformer=AvgTransformer(),\n", + " input_steps=[new_logreg_step, new_rf_step],\n", + " adapter=Adapter({'y_proba_1': E('SparseLogReg', 'y_proba'),\n", + " 'y_proba_2': E('RF', 'y_proba')\n", + " }),\n", + " cache_dirpath=CACHE_DIR_2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_ens_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_ens_step.clean_cache()\n", + "new_preds_ens_fit = new_ens_step.fit_transform(data_fit)\n", + "new_ens_step.clean_cache()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you look carefully at the training log above, you should see that when training the second branch, TF-IDF just loaded outputs instead of re-computing them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_ens_step.clean_cache()\n", + "new_preds_ens_val = new_ens_step.transform(data_val)\n", + "new_ens_step.clean_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_ens_step.clean_cache()\n", + "new_preds_ens_test = new_ens_step.transform(data_test)\n", + "new_ens_step.clean_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_acc_ens_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(new_preds_ens_fit['y_proba'], axis=1))\n", + "print('New fitting accuracy: {:.4f}'.format(new_acc_ens_fit))\n", + "\n", + "new_acc_ens_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(new_preds_ens_val['y_proba'], axis=1))\n", + "print('New validation accuracy: {:.4f}'.format(new_acc_ens_val))\n", + "\n", + "new_acc_ens_test = accuracy_score(y_true=data_test['input']['label'], y_pred=np.argmax(new_preds_ens_test['y_proba'], axis=1))\n", + "print('New test accuracy: {:.4f}'.format(new_acc_ens_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now you should be familiar with data persistence features. The next few notebooks will focus on building deep learning pipelines with steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/5-steps-with-keras.ipynb b/tutorials/5-steps-with-keras.ipynb new file mode 100644 index 0000000..bf6f4cd --- /dev/null +++ b/tutorials/5-steps-with-keras.ipynb @@ -0,0 +1,548 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "# Using Keras and Steps" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "In this notebook we show how a Keras model for image recognition can be incorporated into Steps pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "from pathlib import Path\n", + "\n", + "from sklearn.datasets import load_digits\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.metrics import log_loss\n", + "from sklearn.externals import joblib\n", + "\n", + "from keras.models import Sequential, Model, load_model\n", + "from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n", + "from keras import optimizers, regularizers\n", + "from keras.preprocessing.image import ImageDataGenerator\n", + "from keras.callbacks import ModelCheckpoint\n", + "from keras.optimizers import Adam\n", + "\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from steppy.base import Step, BaseTransformer\n", + "from steppy.adapter import Adapter, E\n", + "from steppy_toolkit.keras.models import ClassifierGenerator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We start off by loading our favourite dataset for digits recognition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "digits = load_digits()\n", + "X_digits, y_digits = digits.data, digits.target\n", + "\n", + "X_train, X_valid, y_train, y_valid = train_test_split(X_digits, y_digits, test_size=0.2, stratify=y_digits, random_state=643793)\n", + "\n", + "print('{} samples for training'.format(len(y_train)))\n", + "print('{} samples for test'.format(len(y_valid)))\n", + "\n", + "data = {\n", + " 'input': {\n", + " 'images': X_train,\n", + " 'labels': y_train,\n", + " },\n", + " 'input_valid': {\n", + " 'images': X_valid,\n", + " 'labels': y_valid\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For convenience let's define a few constants." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "TARGET_SHAPE = (8, 8, 1) # Shape of images. In this dataset we have 8x8 pictures.\n", + " # Third dimension stands for the number of channels. We uses grayscale images, so 1 channel only.\n", + "N_CLASSES = 10 # Number of categories in this classification problem\n", + "CACHE_DIR = './cache' # directory for saved transformers and outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "To ensure that each run of the notebook trains the net from scratch (instead of just loading previously trained model), we start by removing cache." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!rm -r ./cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "## Data loader" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before we train a neural net we have to prepare the data properly.\n", + "\n", + "Sklearn keeps the digit images as one-dimensional vectors. It's fine for models like XGBoost or RandomForest, because they ignore the two-dimensional nature of images anyway. However, CNNs don't. That's why the first transformer that we define recovers this structure." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ReshapeData(BaseTransformer): \n", + " def transform(self, X, y, **kwargs):\n", + " X = X.reshape((X.shape[0], ) + TARGET_SHAPE)\n", + " return {\n", + " 'X': X,\n", + " 'y': y\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Further we use Keras' tool, ImageDataGenerator, for preparation of image stream. It takes care of mundane tasks like standarization, shuffling, augmenting or portioning the stream into batches. Let's create a generator with quite a few online augmentations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class PrepareDatagen(BaseTransformer):\n", + " def fit(self, X, **kwargs): \n", + " self.datagen = ImageDataGenerator(\n", + " featurewise_center=True,\n", + " featurewise_std_normalization=True,\n", + " rotation_range=10,\n", + " width_shift_range=0.1,\n", + " height_shift_range=0.1)\n", + " self.datagen.fit(X)\n", + " \n", + " def transform(self, X, **kwargs): \n", + " return {\n", + " 'datagen': self.datagen,\n", + " }\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.datagen, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.datagen = joblib.load(filepath)\n", + " return self" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can put together the first steps of the pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reshape_step = Step(\n", + " name=\"reshape\",\n", + " transformer=ReshapeData(),\n", + " input_data=['input'],\n", + " adapter=Adapter({\n", + " 'X': E('input', 'images'),\n", + " 'y': E('input', 'labels')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + ")\n", + "\n", + "reshape_valid_step = Step(\n", + " name=\"reshape_valid\",\n", + " transformer=ReshapeData(),\n", + " input_data=['input_valid'],\n", + " adapter=Adapter({\n", + " 'X': E('input_valid', 'images'),\n", + " 'y': E('input_valid', 'labels')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + ")\n", + "\n", + "datagen_step = Step(\n", + " name=\"loader\",\n", + " transformer=PrepareDatagen(),\n", + " input_steps=[reshape_step],\n", + " cache_dirpath=CACHE_DIR\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we created a step that reshapes vector representations of train images into two-dimensional arrays. Later we will need validation images in the same form, so we also created an analogical step for them. The third step creates an instance of ImageDataGenerator. It takes as input reshaped train images, so that it can calculate means and variances for standarization.\n", + "\n", + "To check that what we did actually works, let's define an auxilliary step that displays the generated image data stream." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class DataDisplay(BaseTransformer):\n", + " def transform(self, datagen, X, y, **kwargs):\n", + " img_batch, lbl_batch = datagen.flow(X, y, batch_size=32).next()\n", + " n_row = 4\n", + " fix, axs = plt.subplots(n_row, 8, figsize=(8, 2 * n_row))\n", + " for i, ax in enumerate(axs.ravel()):\n", + " ax.imshow(img_batch[i].reshape(8, 8), cmap='gray')\n", + " ax.axis('off')\n", + " ax.set_title('lbl = {}'.format(lbl_batch[i]))\n", + " \n", + "display_step = Step(\n", + " name=\"display\",\n", + " transformer=DataDisplay(),\n", + " input_steps=[reshape_step, datagen_step],\n", + " adapter=Adapter({\n", + " 'datagen': E(datagen_step.name, 'datagen'),\n", + " 'X': E(reshape_step.name, 'X'),\n", + " 'y': E(reshape_step.name, 'y')\n", + " }),\n", + " cache_dirpath=CACHE_DIR\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display_step" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "display_step.fit_transform(data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Steps for CNN training" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We proceed to the crux of this notebook: step/transformer wrapping a Keras model. Steps library contains classes that facilitate this task. We will use `ClassifierGenerator` which extends `KerasModelTransformer`. Their design follows a _template method pattern_ which means that the main part of the code is defined in abstract classes and the user has to derive from them and implement some auxiliary methods, in this case: `_build_optimizer`, `_build_loss`, `_build_model`, `_create_callbacks`. That's what we do below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class KerasCnn(ClassifierGenerator):\n", + " def _build_optimizer(self, **kwargs):\n", + " return Adam(lr=kwargs['learning_rate'])\n", + "\n", + " def _build_loss(self, **kwargs):\n", + " return 'sparse_categorical_crossentropy'\n", + " \n", + " def _build_model(self, **kwargs):\n", + " dropout_ratio = kwargs['dropout_ratio']\n", + " regularization = kwargs['regularization']\n", + " \n", + " input_img = Input(shape=TARGET_SHAPE)\n", + "\n", + " layer = Conv2D(8, kernel_size=(3, 3), padding='same', activation='relu')(input_img)\n", + " layer = Conv2D(8, kernel_size=(3, 3), padding='same', activation='relu')(layer)\n", + " layer = MaxPooling2D((2, 2), padding='same')(layer)\n", + "\n", + " layer = Conv2D(16, kernel_size=(3, 3), padding='same', activation='relu')(layer)\n", + " layer = Conv2D(16, kernel_size=(3, 3), padding='same', activation='relu')(layer)\n", + " layer = MaxPooling2D((2, 2), padding='same')(layer)\n", + "\n", + " layer = Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu')(layer)\n", + " layer = Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu')(layer)\n", + " layer = MaxPooling2D((2, 2), padding='same')(layer)\n", + "\n", + " layer = Flatten()(layer)\n", + " layer = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(regularization))(layer)\n", + " if dropout_ratio > 0:\n", + " layer = Dropout(dropout_ratio)(layer)\n", + " predictions = Dense(N_CLASSES, activation='softmax')(layer)\n", + "\n", + " model = Model(input_img, predictions)\n", + " return model\n", + "\n", + " def _create_callbacks(self, **kwargs):\n", + " checkpoint_filepath = kwargs['model_checkpoint']['filepath']\n", + " Path(checkpoint_filepath).parents[0].mkdir(parents=True, exist_ok=True)\n", + " model_checkpoint = ModelCheckpoint(**kwargs['model_checkpoint'])\n", + " return [model_checkpoint]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`KerasModelTransformer`'s initializer takes 3 arguments.\n", + "1. `architecture_config` - contains model and optimizer parameters.\n", + "2. `training_config` - contains parameters for model's `fit_generator` and generator's `flow` methods.\n", + "3. `callbacks_config` - contains parameters for callbacks instantiated in `_create_callbacks` methods.\n", + "\n", + "The exact structure of these arguments is best explained on an example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "architecture_config = {\n", + " 'model_params': {\n", + " 'dropout_ratio': 0.5,\n", + " 'regularization': 0.01\n", + " },\n", + " 'optimizer_params': {\n", + " 'learning_rate': 1e-3\n", + " }\n", + "}\n", + "\n", + "training_config = {\n", + " 'fit_args': {\n", + " 'epochs': 100,\n", + " 'verbose': True\n", + " },\n", + " 'flow_args': {\n", + " 'batch_size': 64,\n", + " }\n", + "}\n", + "\n", + "callbacks_config = {\n", + " 'model_checkpoint': {\n", + " 'filepath': str(Path(CACHE_DIR) / 'checkpoints' / 'best_model.hdf5'),\n", + " 'save_best_only': True\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we have all dependencies necessary to add the crucial step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cnn_step = Step(\n", + " name=\"CNN\",\n", + " transformer=KerasCnn(architecture_config, training_config, callbacks_config),\n", + " input_steps=[datagen_step, reshape_step, reshape_valid_step],\n", + " cache_dirpath=CACHE_DIR,\n", + " adapter=Adapter({\n", + " 'datagen': E(datagen_step.name, 'datagen'),\n", + " 'X': E(reshape_step.name, 'X'),\n", + " 'y': E(reshape_step.name, 'y'),\n", + " 'X_valid': E(reshape_valid_step.name, 'X'),\n", + " 'y_valid': E(reshape_valid_step.name, 'y')\n", + " }),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cnn_step" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since we didn't specify `datagen_valid` the same generator will be used for train and validation data. In particular it means that validation images are augmented as well." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "result = cnn_step.fit_transform(data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A short function below summarizes the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def eval_pred(title, y_true, y_pred):\n", + " print(title)\n", + " print(\" Log-loss: \", log_loss(y_true=y_true, y_pred=y_pred))\n", + " choices = np.argmax(y_pred, axis=1)\n", + " print(\" Accuracy: {:.2%}\".format(np.sum(choices == y_true) / len(y_true)))\n", + " \n", + "eval_pred(\"Results on training\", y_true=y_train, y_pred=result['output'])\n", + "eval_pred(\"Results on validation\", y_true=y_valid, y_pred=result['output_valid'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because we do test time augmentation, it makes sense to run prediction phase a few times and average the results.\n", + "As we can see below it improves the overall score." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "results_valid = []\n", + "for i in range(10):\n", + " print(\"Iteration {}/10\".format(i+1))\n", + " results_valid.append(cnn_step.transform(data)['output_valid'])\n", + "y_avg_pred = np.mean(np.array(results_valid), axis=0)\n", + "eval_pred(\"Results on averaged predictions\", y_true=y_valid, y_pred=y_avg_pred)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/intro.ipynb b/tutorials/intro.ipynb new file mode 100644 index 0000000..e9582d4 --- /dev/null +++ b/tutorials/intro.ipynb @@ -0,0 +1,904 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Why?\n", + "\n", + "* Sklearn Pipelines are awesome... \n", + "\n", + "```python\n", + "import numpy as np\n", + "import pandas as pd\n", + "from sklearn.pipeline import Pipeline\n", + "\n", + "pipeline = Pipeline([\n", + " ('vect', CountVectorizer()),\n", + " ('tfidf', TfidfTransformer()),\n", + " ('clf', SGDClassifier()),\n", + "])\n", + "\n", + "...\n", + "\n", + "grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)\n", + "grid_search.fit(data.data, data.target)\n", + "\n", + "\n", + "```\n", + "http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDA4MChAODQ4SERATGCgaGBYWGDEjJR0oOjM9PDkzODdASFxOQERXRTc4UG1RV19iZ2hnPk1xeXBkeFxlZ2MBERISGBUYLxoaL2NCOEJjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAABAUBBgcDAv/EAEAQAQABAgMFBAgEBQMCBwAAAAABAgMEERUSIVKR0QUTMVQGNUFRcXOTsSIyYaEUMzRywSOBsiRCJURTYoLh8P/EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/8QAIhEBAQEBAAMAAgIDAQAAAAAAAAERAgMhMRIyE0EiQlEE/9oADAMBAAIRAxEAPwDn4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAsNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AV4sNIxHHa5z0NIxHHa5z0BXiw0jEcdrnPQ0jEcdrnPQFeLDSMRx2uc9DSMRx2uc9AXIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJWF7OxeLomvD2KrlMTlMx73tofafk7n7Ni9DvVt35s/aGwJqOe6H2n5O5+xofafk7n7Ohhprnmh9p+TufsaH2n5O5+zoYaOeaH2n5O5+xofafk7n7Ohho55ofafk7n7Gh9p+Tufs6GGjnmh9p+TufsaH2n5O5+zoYaOeaH2n5O5+xofafk7n7Ohho55ofafk7n7M6H2n5O5+zoQaOfR2D2pP8A5OvnHU0HtTydfOOroIaa59oPank6+cdTQe1PJ1846ughprn2g9qeTr5x1NB7U8nXzjq6CGmufaD2p5OvnHU0HtTydfOOroIaa59oPank6+cdTQe1PJ1846ughprn2g9qeTr5x1NB7U8nXzjq6CGmufaD2p5OvnHU0HtTydfOOroIaa59oPank6+cdTQe1PJ1846ughprn2g9qeTr5x1NB7U8nXzjq6CGmufaD2p5OvnHU0HtTydfOOroIaa59oPank6+cdTQe1PJ1846ughprn2g9qeTr5x1NB7U8nXzjq6CGmufaD2p5OvnHVDt4a9dvVWrduarlOedPuydNaP2X68xXxr/AOTXPu4mq+ezMZHjYq5w8ruFv2ZiLluac/DfDc9iJjwUnbtOxds/CXXriSaS6pO7r4Tuq+FKh9ZOLSJ3VfCd1XwpmW5iZiPGQRO6ucJ3VzhlKpqpq/LVE/B9Ah9zc4Tuq+FMDRD7q5wndV8KWGiH3VfCd1XwpjGSaInd18J3dfuSpYyNEXYq9zExMeMJTwvfmj4KNu9DvVt35s/aGwNf9DvVt35s/aGwJWWQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABpHZW/tzFfGv/k3dpHZM/8AjuJj3zX/AMm/H+w2GIUPpDP+tZ/tlfexrvpDP/U2v7Z+70eT9U5+oES9IeVMvV5m0HtTFzh7cUUVZV1e33QqO+u3d1G1PvmEvtmiasba/wDdTEfut+z6LFmiKZ2c/wBS3FnOtem5XTOcxXTPv8F5gcRGIsRV/wB0bpfXaP8AC1z3ecbUx4RHgi9kW5td/bq/7ao+ybq3nFgMgywM5GQMMPrImAfGRMPvJiYB55PC/wDnj4JWSLiPzx8Abb6HerbvzZ+0Nga/6HerbvzZ+0NgKyyAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANE7NiZ7fu5RO6uv7t7aT2P69xXxr/5Nce6Nhnwa36Qf1Vr+z/LZfY1r0i3Yy1/Z/l6fJ+qc/VdT7HvS8KHvHg80bRO0MP3vdXY8bdX7PmrAU1z3m3ujf+qdMRMZSjVTNEzETlKVvmvuMPhr1ym5tZzERnk99mimqqaIyznejWaa6c5ndCVRTM25ry3RMRKSL1WBkVzAAYZZyYAfMw+iYB8IuJ/PHwTJhExX8yPgDa/Q71bd+bP2hsDX/Q71bd+bP2hsBWWQEAAFb21i8Tg7VurDRFVVc7ERMZ7/AGfaUCx27iKae9v26Zt1xtUeyYzzmI8PdDYZiJ8YfM0Uz40xyBS2O1sTjJpm1FFvarpimnPaziYznP8AddxM+1iKKI8KaY/2fQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADnU4q5hO0sRctTlVNdUeH6uiuZ4z+tv/ADKvu1x9PifruM4o5Qh4rGXcZdiu7OcxGUbkcp3y7d85Dnq170PenweFETOURGcz4LvC9md1am7iNnajLKiZ3R8XPnm9fFtxEs4Wu7Ttb6aYjPPLxQ+0LFdu7NUROzM5Z/r7my3Iiua6YiqY2N0UxlCtxWCqud3PdzFMxPtz3u98UzGZ17UdumuqYp3/AO8tkwFq3bt91Xs1R7c4zir9VfRg5t3oqypmMvCZW2GiaJiiq9TlaoyjKE44z3V6v/HnX2ZZvURVZuRRVM5ZT4K7EYW9h6pi5RMRn4+yV5YrzmimLtMz+aNqNz673bs5XKKK4rqy3HXjl+MzqxrbKZ2jhe4ubdNMU0VT4R7ENwvP43K6S6DIyMewZYBjJDxf8yPgmSiYv+ZHwUbV6HerbvzZ+0Nga/6HerbvzZ+0NgSssgIAAPmquij89UU/GcnnGKw81TTF6jOPH8Tx7Rw+FvWoqxVua6ad0bPjv3K+LHZNU0RGHr3eEznGXMF3ExMZxLKHh8VY7muqibmzRGc7Web7tY+zeu93Rt7X60zkCSPKvEUUW9uYryzy/K8rWPs3a4ppivf76cgShF/jrfezbiiuZics8t3s6vq5i7dqrZriqJ/SMxZLfiQPKnEUVU01ZVRTV4TMPKe0LMV1UzFf4Zy8BEoQ6e0sPVMRG3vnKPwy9f4u33c1ztZRET4A9xCjtOxMxERc/wB6cvu+6MfZuVbNO3nnl+X9cgSh8264uURVTnlMZvoAAAAAAAAAAAAAAAAAABzPG7sbiPmVfd0xze7h7mJ7QxFFumapiuqco+Lfj/ZL8Q5l9W53puk4r/0av2R72GuYa5FF2maZmM97v5Ob+Kc32seybUVXpuVUzNNHu965i7FW1OztRXTnt1exV9nfgwucV7NUxM/pKRXiJpqpoqpznPKI9k5w6ePnOTr3U6iumuvfcqmJo8Iec0xNijZs11TteMy+rV2dujauUURseFMEV0TZp/HcnKr2N1FPRax9XbUzct0UWaad9ETuzn/K9/FRVeiK7VP4UTKiqNum3VVNUzO+d/juSIombl7OxEZ0e9iQKq6oixXF2iZiN8T7YedFzZqqommmjOmfxR8Xxeoq7u3HdUzunLfCDg7lVeJ2opmIomI/FPhKidfo7+xsxROc057Uz7YVC22orznOqrZr/LT4Kq5GzcqjLLe4+af23ywywexwaZYAGJQ8X/Mj4JiHi/5kfAG1+h3q2782ftDYGv8Aod6tu/Nn7Q2CErLICAAAADE00zMTMRMx4ZwzERHhAAMTETGUxEx+rIDGUZZZRl7jKPcyAZQABERHhGRlEewAAAAAAAAAAAAAAAAAAAAAAAGjdm+u8T8a/wDk3loWDvW7PbGJquVRTG1XGf8A8m+Po2DNr3b8/wDWUf2f5XFOMw9c5U3aZmVJ2/nOOtxHB/l6O/fKT6m4XajBxGUTGxG6Xhia6IuZTtUxFVP4Xzhq4pommuvPdGW/exdxERnnVFv9fGqSeWTnGvw9pdrERYromKIjfMfiSK8TEYSZ76mnKfBUWr9q5ezmYqzndEzuWFFmnY/FNrOa/bPhENTrWbHtVdtTbinvq5ypy3fF9W7turE91nVnXTlG1OWT0vTRFP8AOtxMR7Kf1VnaNM270XIu7UTEb4hOurJ6OZtT7kbEWqaqLntnON8T8JVdi7FV65TFNcx7s/bEkdpXqbdNE3d1PhRV/iXlRdjvtqImmJ/NMOf822N/x5F5TXcj82zbt1055Qq8TTlemYmZirfEykYGum5ambdOWxvivxzfPaFFUV03Nuaoqj2t+SbyxPVRR87/AHxyN/vjk87b6Hzv4o5G/wB8ckCUTF/zI+CVv98ckXFfzI+ANr9DvVt35s/aGwQ1/wBDvVt35s/aGwJWWQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABzLHTljb/AM2r7y6a5h2j/V398R/q1ePxleRK7Nibl+n4vXtmvu8fRX7qOry7Ev2reJ/1a6aYy8ZlN7T7Nqx9cXLGLwkxFOWU3d7t/rSfVVbu5VTVE7MUxnmh3cVRt5xGefjm88VNzC0TbuxNNVSDTVnDjXTU+cVbnKIpyj9E3A9od3VTTt010zumKozyUec5MW7uxVMEtnxL7dDuxX3UVU02IiafZ8UTFRcrqi3drpiNn2Ruh8YbH1U4aiK7VOUxHjGfsS7eKs3Jt7Vm3NUxllm9v47HL4prnZ9dVMVW8TbmJ9/vQbt6vCVbMxFzON2zVubBesXbtMfhooo35RCoxGE2q9mctnL2OXXiz43O3hg8R3dERn7E6nFV3o2JmZop8M/Yr4wFcTuuRl8Eyxb7qjLPOfbLhPVbtmPUfObOass+xhjMzQEbE/nj4JGaPiPzx8AbZ6HerbvzZ+0Nghr/AKHerbvzZ+0NgSssgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADlHbs5Xb/zqvvLq7mHaNum5i79NUZx3tX3lYsUEVT4xL7pvXI8K55rCcDYzz2Zj4SfwVj3TzaEDFV112bdVVW1MzP+yPRXVG6IzW84GxVGUxPNiMBYic4pnmiqzvapjLZ+L4p8YmFv/A2PdPNj+AscM8wXdjtCqmzTbm3M0RHjNuZjw96RGLtXKqd+FnKnjhT0V10UTTTcriKqdmcp9iP/AAlrLwnm78+Wxm8rivF4W3TTt3rFHj+W50eFGNw92dmzXFdWXulWVYGxV40zzetizRh5mbcTGfvkvmp+KxzM0Xvq/wBDvq/0cWkrNhG76v8AQ76v9ASszNF76v8AQ76v9EEnNHv/AJ4+DHfV/o+aqpqnOQbh6HerbvzZ+0Nga/6HerbvzZ+0NgSssgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADmeN/rb/zKvu6Y5njf62/8yr7rFjwAVQAAS8LhIxFmqra2aonL9Ee7aqs3Joqyzj3Mzvm9Xn+2+vH1zzOr8r4AaYAAAAAAAAAAbl6HerbvzZ+0Nga/wCh3q2782ftDYErLICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5njf62/8yr7umOZ43+tv/Mq+6xY8AFUe1ixN+vKJimPfLxTcDszRVTOW+fCVnN69S4ze5x/lZqZasfw9GVFWftnaV+KnvsRVNPsjfnKTX+aaZmZpjwiZRa5iq5dymMtnx5OHj8F46vXV2u/l/wDTO+JzxMjxmiYjPdMe+JzfL7/DTTOU5zMZM7MTVFWX4cs//p6McNKbVVVO1E0xGeW+qIfNdFVGW1Hj4T733V/T0f31faGaN+Grz8NqNn4ueumR4s5Ts55bp3JNNNM1d3VVRnvjKKPCfi85rmMLRGVP5pj8se6D8j8XiJNERTNNFc0b8s6djOd/6vCuNm5VT7pmFl1LMfICoAA3L0O9W3fmz9obA1/0O9W3fmz9obAlZZAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJnLxAczxn9bf+ZV93THM8b/W4j5lX3WLHgAqspmBimq3XTVlO/wAEJmJmJzicpa5uXWO+fymPfF503ppiqcso8ZeETMZ5e2Mma66q5zqnOXyW7dXmZMo+5qiKNmJnfOc5vgRXrarqyiiIpy3zvjP/APeDMxcuxtZ5xTu3bsnlE5TuZ2qs5nanOfHemRdqRT39UxNOznVOW1ERnLziK6f9Gdnf74zyfEXbkTnFc8yLlcTnFdUTllnE+xMi7XtE3Kct9G1T4Tlv3Rm8Lme3Vtfmz3m1VnE7U5x4b/BhckTbWAAAAbl6HerbvzZ+0Nga/wCh3q2782ftDYErLICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiUX4xdy9Zm1coiiYjOrdmlvi7TVVRMU1bNXslYPGMTsYuML3VycqYnbiM4/wB3PMZ/W3/mVfd0umMoiJnOfe5pjP63EfMq+6rHgAKAAAAAAAAAAAAAAAA97OLxOHpmmzfuW6ZnOYpqmHpqWO85f+pKICJWp47zl/6ks6njvOX/AKkogCXqeP8AOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6njvOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6njvOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6njvOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6njvOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6njvOX/qSanjvOX/AKkogCXqeO85f+pJqeO85f8AqSiAJep47zl/6kmp47zl/wCpKIAl6ljvOX/qSizM1VTVVOczOczLAAAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACm1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1Bcim1fEcFrlPU1fEcFrlPUFyKbV8RwWuU9TV8RwWuU9QXIptXxHBa5T1NXxHBa5T1BXgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//2Q==\n", + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import YouTubeVideo, HTML\n", + "YouTubeVideo(\"URdnFlZnlaE\", width=600,height=400)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* ... but sometimes not enough\n", + " * wrapping keras/pytorch models in transformers is tricky\n", + " * caching/saving intermediate outputs is not easy\n", + " * it has to be X,y input" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Airflow does it all but is just to much\n", + "\n", + "\n", + "\n", + "\n", + "https://airflow.apache.org/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Why not build one?\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Transformer\n", + "\n", + "* Almost like sklearn transformers\n", + "* Every transformer has `fit_transform` and `transform`\n", + "\n", + "```python\n", + "def fit_transform(self, X, y):\n", + " return\n", + "\n", + "def transform(self, X):\n", + " return\n", + "```\n", + "\n", + "* Those methods return `dict`\n", + "* Inputs can be named **however** you like and can be **whatever** you like\n", + "* Every transformer implements `save` and `load` methods\n", + "\n", + "```python\n", + "from keras.models import load_model\n", + "\n", + "def save(self, filepath):\n", + " self.model.save(filepath)\n", + "\n", + "def load(self, filepath):\n", + " self.model = load_model(filepath)\n", + " return self\n", + "```\n", + "\n", + "* They can do much **more than** just **transform data**." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package wordnet to\n", + "[nltk_data] /home/jakub.czakon/nltk_data...\n", + "[nltk_data] Package wordnet is already up-to-date!\n", + "[nltk_data] Downloading package stopwords to\n", + "[nltk_data] /home/jakub.czakon/nltk_data...\n", + "[nltk_data] Package stopwords is already up-to-date!\n" + ] + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "from steps.preprocessing import TextCounter" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "text_counter = TextCounter()\n", + "\n", + "outputs = text_counter.fit_transform(['calculate featueres for this text',\n", + " 'Get Some Features For This As Well !!!'])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'X'" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
char_countdigit_countlower_case_countnewline_countpunctuation_countspace_countupper_case_countword_countcaps_vs_lengthnum_symbolsnum_wordsnum_unique_wordswords_vs_uniquemean_word_len
033029004050.0000000551.05.800
138021037780.1842110881.03.875
\n", + "
" + ], + "text/plain": [ + " char_count digit_count lower_case_count newline_count \\\n", + "0 33 0 29 0 \n", + "1 38 0 21 0 \n", + "\n", + " punctuation_count space_count upper_case_count word_count \\\n", + "0 0 4 0 5 \n", + "1 3 7 7 8 \n", + "\n", + " caps_vs_length num_symbols num_words num_unique_words words_vs_unique \\\n", + "0 0.000000 0 5 5 1.0 \n", + "1 0.184211 0 8 8 1.0 \n", + "\n", + " mean_word_len \n", + "0 5.800 \n", + "1 3.875 " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for key, output in outputs.items():\n", + " display(key)\n", + " display(output)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# %load steps/keras/models.py\n", + "import shutil\n", + "\n", + "from keras.models import load_model\n", + "\n", + "from ..base import BaseTransformer\n", + "from .contrib import AttentionWeightedAverage\n", + "from .architectures import vdcnn, scnn, dpcnn, cudnn_gru, cudnn_lstm\n", + "\n", + "\n", + "class KerasModelTransformer(BaseTransformer):\n", + " \"\"\"\n", + " Todo:\n", + " load the best model at the end of the fit and save it\n", + " \"\"\"\n", + "\n", + " def __init__(self, architecture_config, training_config, callbacks_config):\n", + " self.architecture_config = architecture_config\n", + " self.training_config = training_config\n", + " self.callbacks_config = callbacks_config\n", + "\n", + " def reset(self):\n", + " self.model = self._build_model(**self.architecture_config)\n", + "\n", + " def _compile_model(self, model_params, optimizer_params):\n", + " model = self._build_model(**model_params)\n", + " optimizer = self._build_optimizer(**optimizer_params)\n", + " loss = self._build_loss()\n", + " model.compile(optimizer=optimizer, loss=loss)\n", + " return model\n", + "\n", + " def _create_callbacks(self, **kwargs):\n", + " return NotImplementedError\n", + "\n", + " def _build_model(self, **kwargs):\n", + " return NotImplementedError\n", + "\n", + " def _build_optimizer(self, **kwargs):\n", + " return NotImplementedError\n", + "\n", + " def _build_loss(self, **kwargs):\n", + " return NotImplementedError\n", + "\n", + " def save(self, filepath):\n", + " checkpoint_callback = self.callbacks_config.get('model_checkpoint')\n", + " if checkpoint_callback:\n", + " checkpoint_filepath = checkpoint_callback['filepath']\n", + " shutil.copyfile(checkpoint_filepath, filepath)\n", + " else:\n", + " self.model.save(filepath)\n", + "\n", + " def load(self, filepath):\n", + " self.model = load_model(filepath,\n", + " custom_objects={'AttentionWeightedAverage': AttentionWeightedAverage})\n", + " return self\n", + "\n", + "\n", + "class ClassifierXY(KerasModelTransformer):\n", + " def fit(self, X, y, validation_data, *args, **kwargs):\n", + " self.callbacks = self._create_callbacks(**self.callbacks_config)\n", + " self.model = self._compile_model(**self.architecture_config)\n", + "\n", + " self.model.fit(X, y,\n", + " validation_data=validation_data,\n", + " callbacks=self.callbacks,\n", + " verbose=1,\n", + " **self.training_config)\n", + " return self\n", + "\n", + " def transform(self, X, y=None, validation_data=None, *args, **kwargs):\n", + " predictions = self.model.predict(X, verbose=1)\n", + " return {'prediction_probability': predictions}\n", + "\n", + "\n", + "class ClassifierGenerator(KerasModelTransformer):\n", + " def fit(self, datagen, validation_datagen, *args, **kwargs):\n", + " self.callbacks = self._create_callbacks(**self.callbacks_config)\n", + " self.model = self._compile_model(**self.architecture_config)\n", + "\n", + " train_flow, train_steps = datagen\n", + " valid_flow, valid_steps = validation_datagen\n", + " self.model.fit_generator(train_flow,\n", + " steps_per_epoch=train_steps,\n", + " validation_data=valid_flow,\n", + " validation_steps=valid_steps,\n", + " callbacks=self.callbacks,\n", + " verbose=1,\n", + " **self.training_config)\n", + " return self\n", + "\n", + " def transform(self, datagen, validation_datagen=None, *args, **kwargs):\n", + " test_flow, test_steps = datagen\n", + " predictions = self.model.predict_generator(test_flow, test_steps, verbose=1)\n", + " return {'prediction_probability': predictions}\n", + "\n", + "\n", + "class PretrainedEmbeddingModel(ClassifierXY):\n", + " def fit(self, X, y, validation_data, embedding_matrix):\n", + " X_valid, y_valid = validation_data\n", + " self.callbacks = self._create_callbacks(**self.callbacks_config)\n", + " self.architecture_config['model_params']['embedding_matrix'] = embedding_matrix\n", + " self.model = self._compile_model(**self.architecture_config)\n", + " self.model.fit(X, y,\n", + " validation_data=[X_valid, y_valid],\n", + " callbacks=self.callbacks,\n", + " verbose=1,\n", + " **self.training_config)\n", + " return self\n", + "\n", + " def transform(self, X, y=None, validation_data=None, embedding_matrix=None):\n", + " predictions = self.model.predict(X, verbose=1)\n", + " return {'prediction_probability': predictions}\n", + "\n", + "\n", + "class CharVDCNNTransformer(ClassifierXY):\n", + " def _build_model(self, embedding_size, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first):\n", + " return vdcnn(embedding_size, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first)\n", + "\n", + "\n", + "class WordSCNNTransformer(PretrainedEmbeddingModel):\n", + " def _build_model(self, embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first):\n", + " return scnn(embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first)\n", + "\n", + "\n", + "class WordDPCNNTransformer(PretrainedEmbeddingModel):\n", + " def _build_model(self, embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first):\n", + " \"\"\"\n", + " Implementation of http://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf\n", + " \"\"\"\n", + " return dpcnn(embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,\n", + " filter_nr, kernel_size, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, conv_dropout, dense_dropout, dropout_mode,\n", + " conv_kernel_reg_l2, conv_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first)\n", + "\n", + "\n", + "class WordCuDNNLSTMTransformer(PretrainedEmbeddingModel):\n", + " def _build_model(self, embedding_matrix, embedding_size, trainable_embedding,\n", + " maxlen, max_features,\n", + " unit_nr, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,\n", + " rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first):\n", + " return cudnn_lstm(embedding_matrix, embedding_size, trainable_embedding,\n", + " maxlen, max_features,\n", + " unit_nr, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,\n", + " rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first)\n", + "\n", + "\n", + "class WordCuDNNGRUTransformer(PretrainedEmbeddingModel):\n", + " def _build_model(self, embedding_matrix, embedding_size, trainable_embedding,\n", + " maxlen, max_features,\n", + " unit_nr, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,\n", + " rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first):\n", + " return cudnn_gru(embedding_matrix, embedding_size, trainable_embedding,\n", + " maxlen, max_features,\n", + " unit_nr, repeat_block,\n", + " dense_size, repeat_dense, output_size, output_activation,\n", + " max_pooling, mean_pooling, weighted_average_attention, concat_mode,\n", + " dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,\n", + " rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,\n", + " dense_kernel_reg_l2, dense_bias_reg_l2,\n", + " use_prelu, use_batch_norm, batch_norm_first)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step\n", + "\n", + "```python\n", + "glove_dpcnn = Step(name='glove_dpcnn',\n", + " transformer=WordDPCNN(**config.dpcnn_network),\n", + " input_data = [],\n", + " input_steps=[word_tokenizer, \n", + " preprocessed_input, \n", + " glove_embeddings],\n", + " adapter={'X': ([('word_tokenizer', 'X')]),\n", + " 'y': ([('cleaning_output', 'y')]),\n", + " 'embedding_matrix': ([('glove_embeddings', 'embeddings_matrix')]),\n", + " 'validation_data': (\n", + " [('word_tokenizer', 'X_valid'), ('cleaning_output', 'y_valid')],\n", + " to_tuple_inputs),\n", + " },\n", + " cache_dirpath=config.env.cache_dirpath,\n", + " cache_output = True,\n", + " save_output=False, \n", + " load_saved_output=False,\n", + " force_fitting=True\n", + " )\n", + "```\n", + "\n", + "* Building block of pipelines\n", + "* Wraps around transformer and adds functionality\n", + "* easy to plug in outputs from other steps and data sources with `input_steps`, `input_data` and `adapter`\n", + "* transformers are cached/persisted as the pipeline trains (not only after it has trained)\n", + "* outputs are cached by default but you can save outputs for debugging/inspection with `save_output`\n", + "* if you want to always fit step even if it was fit before use `force_fitting`\n", + "* objects are stored in the `cache_dirpath` folder" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "binary_fill\t drop_smaller\t output\t\t watershed_contour\r\n", + "contour_resize\t loader\t\t reader_inference\r\n", + "contour_thresholding mask_resize\t reader_train\r\n", + "detached\t mask_thresholding unet_multitask\r\n" + ] + } + ], + "source": [ + "! ls /mnt/ml-team/dsb_2018/kuba/trained_pipelines/weighted_loss/transformers/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pipeline\n", + "\n", + "DAG of steps" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "from steps.base import Step\n", + "from steps.preprocessing import XYSplit, TextCleaner\n", + "from steps.keras.loaders import Tokenizer\n", + "from steps.keras.embeddings import GloveEmbeddingsMatrix\n", + "from steps.keras.models import WordDPCNNTransformer" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "CACHE_DIR = '/mnt/ml-team/minerva/debug/ml_seminar'\n", + "\n", + "xy_train = Step(name='xy_train',\n", + " transformer=XYSplit(x_columns=['comment_text'],\n", + " y_columns=['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n", + " ),\n", + " input_data=['input'],\n", + " adapter={'meta': ([('input', 'meta')]),\n", + " 'train_mode': ([('input', 'train_mode')])\n", + " },\n", + " cache_dirpath=CACHE_DIR)\n", + "\n", + "text_cleaner = Step(name='text_cleaner_train',\n", + " transformer=TextCleaner(drop_punctuation=True,\n", + " drop_newline=True,\n", + " drop_multispaces=True,\n", + " all_lower_case=True,\n", + " fill_na_with='',\n", + " deduplication_threshold=10,\n", + " anonymize=False,\n", + " apostrophes=False,\n", + " use_stopwords=True),\n", + " input_steps=[xy_train],\n", + " adapter={'X': ([('xy_train', 'X')])},\n", + " cache_dirpath=CACHE_DIR)\n", + "\n", + "word_tokenizer = Step(name='word_tokenizer',\n", + " transformer=Tokenizer(char_level=False,\n", + " maxlen=200,\n", + " num_words=10000),\n", + " input_steps=[text_cleaner],\n", + " adapter={'X': ([(text_cleaner.name, 'X')]),\n", + " 'train_mode': ([('cleaning_output', 'train_mode')])\n", + " },\n", + " cache_dirpath=CACHE_DIR)\n", + "\n", + "glove_embeddings = Step(name='glove_embeddings',\n", + " transformer=GloveEmbeddingsMatrix(pretrained_filepath='glove.840B.300d.txt',\n", + " max_features=10000,\n", + " embedding_size=300),\n", + " input_steps=[word_tokenizer],\n", + " adapter={'tokenizer': ([(word_tokenizer.name, 'tokenizer')]),\n", + " },\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "dpcnn_config = {\n", + " 'architecture_config': {'model_params': {'max_features': 300,\n", + " 'maxlen': 200,\n", + " 'embedding_size': 300,\n", + " 'trainable_embedding': True,\n", + " 'filter_nr': 64,\n", + " 'kernel_size': 3,\n", + " 'repeat_block': 6,\n", + " 'dense_size': 256,\n", + " 'repeat_dense': 2,\n", + " 'output_size': 6,\n", + " 'output_activation': 'sigmoid',\n", + " 'max_pooling': True,\n", + " 'mean_pooling': True,\n", + " 'weighted_average_attention': False,\n", + " 'concat_mode': 'concat',\n", + " 'dropout_embedding': 0.5,\n", + " 'conv_dropout': 0.25,\n", + " 'dense_dropout': 0.25,\n", + " 'dropout_mode': 'spatial',\n", + " 'conv_kernel_reg_l2': 0.0,\n", + " 'conv_bias_reg_l2': 0.0,\n", + " 'dense_kernel_reg_l2': 0.0,\n", + " 'dense_bias_reg_l2': 0.0,\n", + " 'use_prelu': True,\n", + " 'use_batch_norm': True,\n", + " 'batch_norm_first': True,\n", + " },\n", + " 'optimizer_params': {'lr': 0.01,\n", + " 'momentum': 0.9,\n", + " 'nesterov': True\n", + " },\n", + " },\n", + " 'training_config': {'epochs': 10,\n", + " 'shuffle': True,\n", + " 'batch_size': 128,\n", + " },\n", + " 'callbacks_config': {'model_checkpoint': {\n", + " 'filepath': os.path.join(CACHE_DIR, 'checkpoints', 'dpcnn_network', 'best_model.h5'),\n", + " 'save_best_only': True,\n", + " 'save_weights_only': False},\n", + " 'lr_scheduler': {'gamma': 0.95},\n", + " 'unfreeze_layers': {'unfreeze_on_epoch': 10},\n", + " 'early_stopping': {'patience': 5},\n", + " 'neptune_monitor': {'model_name': 'dpcnn'},\n", + " },\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "glove_dpcnn = Step(name='glove_dpcnn',\n", + " transformer=WordDPCNNTransformer(**dpcnn_config),\n", + " input_steps=[word_tokenizer, xy_train, glove_embeddings],\n", + " adapter={'X': ([('word_tokenizer', 'X')]),\n", + " 'y': ([('xy_train', 'y')]),\n", + " 'embedding_matrix': ([('glove_embeddings', 'embeddings_matrix')]),\n", + " },\n", + " cache_dirpath=CACHE_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUgAAAIbCAYAAACJ5gf3AAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nOzdeViU5f4G8HvYZQfZV0EFWUREBQzEDTtuqGVqYGFpmh4ty720LNe0UDu2mMfU3LPUXLLc0gIFBFRQQERl35F9mWGW5/eHh/mJihsD77wz3891zSXMDM97zyD3PO/MuwgYYwyEEEIeocF1AEIIUVZUkIQQ0goqSEIIaYUW1wHUSX19PQoKClBVVYWamhoAQGVlJQDAwMAAOjo60NPTg4mJCaysrGBlZQWBQMBlZELUGhWkgkkkEly7dg0pKSm4efMm0tLSkJmZiYKCAtTX1z/XWFpaWrCysoKrqys8PDzg4eEBT09P9OvXD+bm5u30CAghzQT0KXbbSKVSXLx4EWfOnEFMTAwuX76MhoYG6Ovro0ePHvKLo6MjrKysYGdnB3NzcxgZGQEATE1NIRAIUF9fj6amJohEIlRVVaGkpAQFBQUoLS1FZmamvGyLi4shEAjg4eGBoKAgDBo0CMOHD6fCJKQdUEG+gKamJpw4cQKHDx/GH3/8gYqKCnTr1g3BwcEIDg5GUFAQ3N3d22X1uLy8HLGxsbh48SIuXryIy5cvQyaTITg4GGFhYQgPD4etra3Cl0uIOqKCfA5paWnYtm0b9uzZg3v37mHAgAEICwtDWFgY3NzcOMlUXV2N06dP4/jx4zhx4gRqa2sxYsQITJ06FWFhYdDU1OQkFyGqgAryGcTExGDdunX4/fff4eDggIiICLz77rtwcXHhOloLIpEIx44dw65du/Dnn3/C0dERc+fOxYwZM9CpUyeu4xHCP4y0KjExkQ0cOJABYEFBQezYsWNMJpNxHeuZ3L59m82cOZPp6ekxGxsbtmXLFiaRSLiORQiv0HaQj1FSUoJp06bB398fEokE0dHRiImJQVhYGG82u+natSu+//57ZGdn4/XXX8d7770HPz8/nD9/nutohPAGFeRDDh8+jJ49e+Ls2bPYu3cvoqOjERwczHWsF2ZtbY2NGzfixo0bcHJywtChQzFnzhw0NDRwHY0QpUfvQf5PQ0MDZs2ahV27dmH69OmIioqSb4qjSg4cOIDZs2fDwsICP//8M3x9fbmORIjSooIEUFhYiHHjxuHu3bv46aefMGrUKK4jtauCggK8+eabSEhIwJ49ezB27FiuIxGilNR+FfvmzZsICAhAbW0t4uLiVL4cAcDe3h6nTp3C5MmT8eqrr2Lz5s1cRyJEKan1roa3b99GaGgonJ2d8fvvv8PU1JTrSB1GW1sbW7ZsgaurK+bOnQstLS3MmjWL61iEKBW1Lcji4mIMHToUdnZ2OHnyJExMTLiOxIlFixZBKpVi9uzZMDY2xuTJk7mORIjSUMv3IKVSKUJDQ5Gfn4/4+HjajxnA/PnzsWXLFsTFxaFnz55cxyFEKahlQX7yySeIiopCbGwsevXqxXUcpSCRSDB06FAUFxfj6tWr0NfX5zoSIZxTuw9pMjIysH79eqxfv57K8QFaWlrYv38/SkpK8MUXX3AdhxCloHYzyBEjRqC4uBiJiYl0IIfH2LBhA5YtW4a0tDR06dKF6ziEcEqtZpBJSUn4888/ERUVpdByDAgIwMKFCxU2Hpfee+892NnZYcOGDVxHIYRzalWQW7ZsgYeHBwYPHqzQcV1cXKCnp6fQMZ9HXl6ewsbS1taW71H0vEdAJ0TVqM0qtlgshrm5OVavXo3333+f6zgKk5WVhcjISERHRytszHv37sHOzg47d+5EeHi4wsYlhG/UZgaZmJiIuro6jBgxgusoCpOfn4/Ro0ejrKxMoeN27twZ/fr1w99//63QcQnhG7UpyOjoaNja2qJ79+4KG1MqleLgwYOYMmUKQkJCwBjD0aNHMWPGDDg4OKCyshJTpkxB586d4e3tjcTERDDGEBsbi/nz56NLly4oLi7G+PHjYW5uDm9vbxw6dAgA8MMPP0AgEMgPr1ZTU4OoqKgW1+3cuVN+npqZM2cq7HEBwMCBA/HPP/8odExCeIeLg1ByYebMmWzw4MEKHzcnJ4cBYO7u7kwmk7G8vDxmYGDAALBVq1ax7Oxstnv3bgaA+fv7M4lEwo4fP8709PQYADZnzhz2999/s7179zJDQ0MGgMXExDDGGHN1dWUP/4oevq552Yq2a9cupqenp/BxCeETtZlBlpeXw9LSUuHjOjo6yr8WCARwcHCAvb09AGDp0qVwdnbG5MmTYWVlhWvXrkFTUxOjR4+W/9wXX3yBkJAQREREYOXKlQAgP3iEtrb2I8t73HXtwdLSEkKhELW1tR2yPEKUkdoUZH19fbvsHfK4I4w/fJ1AIICZmRmamprk12lo3H/qDQwM5NeNGTMGAJCZmanwnM+rOVddXR3HSQjhjtoUZOfOnVFRUcF1jCeys7MD0HJWypXy8nIA9583QtSV2hSkhYUFSkpKuI7xRPfu3QMAhIaGAvj/mahIJAIAyGQyVFdXAwDYA1tnSSQShWcpLS2FiYkJdHR0FD42IXyhNgXZs2dPpKSktFjNVYTm9+hqamrk1wmFQgAtS6z5fmKxuMXPP1hu586dg5+fH959910AQI8ePQAAq1atQmZmJr7++mt5WZ46dQpSqRRdu3ZFUVERcnNzFfq4EhISaF91ovbUpiAHDhyIxsZGJCQkKGzM+vp6rFmzBgBQVFSEDRs2YO3atcjJyQEArF69GtXV1di0aRMKCwsB3D+SUGNjo3yMTZs2oby8HKWlpSgsLMTff/8t/yBm3bp18Pf3x4YNGzB79myMGjUKXl5eeOONN1BVVQWJRIIJEybA2NhYoY8LuL9Z1IABAxQ6JiF8ozZ70gBAt27dEBYWho0bN3IdBT169EBGRgaU8elPTk6Gr68v/vnnHypJotbUZgYJANOnT8fOnTtpH+On+O677+Dh4cHr090SoghqVZBTp06FUCjE1q1buY4iL2ll24wmPz8fe/fuxb///e/HbsJEiDpRq4K0tLTE/Pnz8dlnn6G4uJiTDHV1dfj444+Rn58PAHj//fcRGxvLSZbHWbRoEaytrTF9+nSuoxDCObV6DxK4P3Pr0aMHgoODsX//fq7jKJVTp05hxIgROHLkCJ0rmxCo2QwSuL+HyI8//oiDBw/iu+++4zqO0sjNzcUbb7yBiIgIKkdC/kftZpDNPv/8c6xduxZ//PGHwg+gyzc1NTUYOnQoGhsbER8f32L3R0LUmdoWpEwmQ0REBH7//Xf8+eefCAoK4joSJ+rr6zF8+HDcuXMH//zzD7p168Z1JEKUhtqtYjfT0NDAnj17MGzYMIwcORJnzpzhOlKHKy8vx/Dhw3Hr1i2cO3eOypGQh6htQQL3T3V64MABhIWFYeTIkWr1nmRaWhoCAwORn5+Pv/76Cx4eHlxHIkTpqHVBAoCOjg52796N5cuXY86cOfLd+FTZ9u3bERgYCGtra8THx8PLy4vrSIQoJbUvSOD+UXOWLVuGEydO4Ny5c/Dx8cHp06e5jqVwRUVFGDduHKZPn44ZM2bgr7/+gpWVFdexCFFaVJAPGDlyJK5fv46AgAD861//wrhx45Ti4LVtJRQKsXbtWri7uyM5ORnnzp3DV199BV1dXa6jEaLUqCAfYmFhgV9++QV//vknMjMz4e3tjdmzZyMrK4vraM9NKBRiy5Yt6NGjB1avXo2FCxciNTUVgwYN4joaIbygtpv5PAuJRIJt27Zh3bp1yM/Px6RJk/Dee+8hICCA62hPVFJSgh07duDrr79GVVUV3nrrLSxbtkx+rhxCyLOhgnwGEokEBw4cwFdffYXk5GR4enri7bffRnh4uNKUjlAoxKlTp7B9+3b88ccfMDAwwLvvvosPPvgANjY2XMcjhJeoIJ/T5cuXsWPHDhw4cADV1dXo06cPwsLCMGrUKPj6+kJTU7PDshQWFuL06dM4fvw4Tp06hcbGRgwdOhRvvfUWXn31Vejp6XVYFkJUERXkCxIKhTh37hyOHz+O48ePo7CwEEZGRggICEBQUBB8fX3h5eUFFxcXaGlptXl5paWlSE1NRWpqKuLi4hATE4OcnBzo6elhyJAhGDNmDMLCwuQn/iKEtB0VpAIwxpCamoqLFy/i0qVLuHTpEu7cuQPGGHR0dNCtWzc4ODjAxsYGNjY2MDc3h6mpKQQCAYyMjKClpYWamhpIpVLU1dWhtrYWxcXFKCwsRElJCTIzM+VnZDQzM0NgYCD69++PoKAgBAQE0L7ThLQTKsh20tDQgJs3byI9PR0ZGRkoKChAaWkpioqKUFlZierqashkMnkxNhelkZERDA0NYW1tDTs7O1hZWaFr167w8PCAp6cnvZ9ISAeiguTY2rVr8eOPP+L27dtcRyGEPIS2gySEkFZQQRJCSCuoIAkhpBVUkIQQ0goqSEIIaQUVJCGEtIIKkhBCWkEFSQghraCCJISQVlBBEkJIK6ggCSGkFVSQhBDSCipIQghpBRUkIYS0ggqSEEJaQQVJCCGtoIIkhJBWUEESQkgrqCAJIaQVVJCEENIKKkhCCGkFFSQhhLSCCpIQQlpBBUkIIa2ggiSEkFZQQRJCSCuoIAkhpBVUkIQQ0goqSEIIaQUVJCGEtIIKkhBCWkEFSQghrRAwxhjXIdRFbW0tRowYgerqavl1FRUVqK6uhouLi/w6gUCAFStWYNy4cVzEJIT8jxbXAdSJrq4url+/jpqamkduu3HjRovvRSJRR8UihLSCVrE7kI6ODiZOnAhtbe0n3k9PTw+jR4/uoFSEkNZQQXawiIgIiMXiVm/X1tbGa6+9BgMDgw5MRQh5HCrIDjZw4EBYWVm1ertYLEZEREQHJiKEtIYKsoNpaGjgjTfegI6OzmNvNzExQWhoaAenIoQ8DhUkB8LDw9HU1PTI9dra2oiIiHjqe5SEkI5BBcmBvn37ttisp5lYLEZ4eDgHiQghj0MFyZHIyMhHZoo2NjYICgriKBEh5GFUkBwJDw9v8Wm2jo4OIiMjoaFBvxJClAX9NXLE3d0dPXv2hEAgAAA0NTXR6jUhSoYKkkORkZHQ1NQEALi6usLX15fjRISQB1FBcuj111+HVCqFQCDAlClTuI5DCHkI7YvdASoqKlBZWYmqqirU19fLN/ERCoVwc3NDRkYGzM3NcfbsWQCApqYmjI2Noa+vD3Nzc5iZmbW63SQhpP3Q0XzaQCqVIisrCxkZGcjLy5NfcnJyUFRUhHv37qGiokIhyzIwMIC5uTmsra3h6OgIJycnODk5wcHBAa6urvDw8KDdEwlRMCrIZ1RZWYn4+HhcuXIFN27cQHp6Om7evAmhUAgAMDMzg4ODA5ydneHo6Ag7OztYWFjAzMxMPgs0NzeHnp4eOnXqBOD+J9fNpSaRSFBbW9tieQ0NDfLZZ/O/hYWFyM/PlxdxcXGxfDXd2dkZnp6e8PLygo+PD/z9/eHm5tbxTxYhKoIKshW3b9/GmTNnEBsbi8uXL+PWrVtgjMHZ2Rne3t7w8vKCh4cHvLy80KNHDxgZGXGSUywWIzs7G6mpqUhPT5eXd2pqKpqammBubo6AgAD4+/tj8ODBeOmll2hPHUKeERXk/9TV1eH06dM4ffo0zpw5g7t378LIyAiBgYHyggkICHjigSaUiUgkwtWrVxEfH4/4+HjExsYiOzsbhoaGGDJkCIYNG4aRI0fC1dWV66iEKC21LsjGxkacPXsWv/zyC44cOYKGhgb07t0boaGhCA0NRUhIiEp9OHL37l2cPXtWfqmsrISnpycmTJiAiIgIWh0n5CFqWZCXLl3Cli1bcOjQITQ1NWHw4MGYMGECXnnlFVhYWHAdr0NIJBKcP38ev/zyCw4fPox79+4hMDAQM2bMwKRJk6Cvr891REI4pzYF2dDQgF27dmHLli1ITk6Gn58fpk+fjtdee01tSrE1EokE586dw08//YRDhw7BwMAAkZGRmD17Nrp37851PEI4o/IFWV9fj23btmH9+vW4d+8exowZgxkzZtAxF1tRWVmJXbt24T//+Q+ys7Mxfvx4fP755/Dw8OA6GiEdj6mopqYmFhUVxSwtLZmhoSFbuHAhKykp4ToWb0gkErZ3717m4eHBNDQ0WHh4OMvOzuY6FiEdSiVnkOfPn8ecOXOQlZWFDz74APPmzVP71egXJZPJ8Msvv+DTTz9Ffn4+li5digULFqjUh1eEtEalCrKmpgZz5szB7t27ERYWhk2bNtFmLArS1NSEqKgorF69Gg4ODti1axf8/f25jkVIu1KZg1UkJibCz88Pp0+fxtGjR3Hs2DEqRwXS0dHBRx99hLS0NHTp0gXBwcGIioqCCr2+EvIIlSjIH3/8EUFBQXBxccG1a9cwZswYriOpLCcnJ/zxxx9YuXIllixZgrFjx6KhoYHrWIS0C94X5FdffYXp06dj0aJFOHXqFGxsbLiOpPIEAgEWL16Mv//+G7GxsfjXv/6FqqoqrmMRonC8fg9yxYoV+Oyzz7Bp0ya8//77XMdRS+np6Xj55ZdhYWGB8+fPw9TUlOtIhCgMbwty//79mDx5Mn744QdMnz6d6zhqLScnB0FBQfDx8cHx48flR0knhO94uYqdlJSEadOmYd68eVSOSsDZ2RlHjhzB+fPnsXTpUq7jEKIwvJtBymQy9OvXDyYmJjhz5gzNVh4QEBCAkJAQfPnll5wsf8eOHXjnnXeQmJiI3r17c5KBEEXi3Qxyx44dSElJwebNm1WiHPPy8hQ2louLC/T09BQ23vN6++23ERQUhDlz5tDmP0Ql8GoGyRiDi4sLRo8ejW+++YbrOG2WlZWFyMhIREdHcx1FYRITE9GvXz+cO3cOQ4YM4ToOIW3CqxnkpUuXkJOTg1mzZnEdpc3y8/MxevRolJWVcR1Fofr27Yt+/fph//79XEchpM14VZC//vorvLy84OXl1S7jJycnY9iwYRAIBAgLC8O9e/ewcOFCODo6YteuXQCAPXv2QF9fHwKBAF988QUkEgkAYO/evdDR0cFPP/30TMvauXMn0tLSUFxcjJkzZ0IqleLChQv44IMP0KVLFxQUFGDgwIFwcnJCZWUlbt26hddeew2LFy/Gm2++iQEDBiAlJQXA/ZOHHTx4EFOmTEFISAgYYzh69ChmzJgBBwcHVFZWYsqUKejcuTO8vb2RmJjYLs9fs0mTJuHIkSOQSqXtuhxC2l3HHx/jxYWGhrJ33323XZdRV1fHPDw8mIuLCxMKhSwsLIxlZGS0uM/SpUsZAHbjxg35dTk5OWzcuHHPtSwAzN3dnTHGmFAoZBcvXmSdOnViANiaNWvYmTNn2LRp01htbS3r1q0bc3V1ZYzdP1KRiYkJ8/LyarH85vFkMhnLy8tjBgYGDABbtWoVy87OZrt372YAmL+//4s+Pc8kNjaWAWB5eXntuhxC2huvCtLd3Z2tWLGi3Zdz+fJlpqmpyQIDA9n27dsfub28vJwZGhqyadOmya9bs2YNO378+HMt58GCbObm5sYAsHv37rW4Pioqiu3bt48xxphUKmWurq5MS0tLfrtMJntkvOaxHryPlZUV09HRea6czys/P58BYJcuXWrX5RDS3ni1il1TUwMTE5N2X06/fv2wePFixMfHw9fX95HbO3fujPfeew+7du1CQUEBGGM4d+4chg8f3uZlCwQCAIC5uXmL6+fNm4ewsDB8++23WL16NUQikXz1/sGfe9xYD35vZmaGpqamNud8kubfEe1+SPiOVwVpa2uLoqKidl+OTCbDnTt34OjoiMjISIhEokfuM2/ePOjo6GDjxo1ISkqCv78/tLS02i3T5cuX0bNnT7i6uuKTTz6BoaFhuy2rrQoLCwEA9vb2HCchpG14VZAODg7Izs5u9+WsX78er776KrZv344bN25g+fLlj9zHwsICs2bNwg8//ID//Oc/mDp16gst68FZ4JNERkZCLBZjxIgRAO6XOACl3N6w+Xfk4ODAbRBC2ohXBTlgwACcOnWqXVcR4+LikJycjIkTJ2Lo0KGYNWsWvvzyS/z999+P3Hf+/PloampCbm4uunXr9tzL6tq1K4qKipCbmyu/TigUArh/nu4HFRUVoaCgAGfOnMHevXvlq6+XL19GXl4eamtrAdx/G+LhsR4s0eb7icXi5877rE6cOAFvb+9H3iYghG94VZCTJk1CVVUVzp492y7jHzp0CGFhYS2OSGNqagqZTIaxY8dix44dLe5vY2ODYcOGYdq0aS+0vAkTJsDY2BgJCQmor6/HihUrkJOTA+D+KvzVq1fl912zZg2MjY2xdOlSdO3aFUuXLoWpqSnWrFkDxhjWrFkD4H6RbtiwAWvXrpWPtXr1alRXV2PTpk3y1d9PPvkEjY2NL5T7SaRSKX799VdMnDhR4WMT0tF4tScNAAwbNgyNjY2Ijo5+7AcTHam+vh69evVCSkoKnUf6f/773/9i9uzZuHnzJh3RnfAer2aQAPDll18iLi4OP//8M9dR8O233+K99957pBwFAsFTLzdv3uQodfupqanBp59+ijlz5lA5EpXAuxkkAEyfPh0nTpxAQkJCh38QEBcXhxkzZqChoQFSqRQ3b96Erq5uh2ZQVpMnT8aZM2eQkZEBMzMzruMQ0ma8m0ECwMaNG2FpaYkxY8Z0+PlQDAwMUFNTAw0NDezbt4/K8X/Wr1+Pn3/+GT/99BOVI1EZvJxBAsCdO3fg7++PoKAgHDx4kNPDfKm73bt34+2338b69esxb948ruMQojC8nEEC9zeR+f333xETE4ORI0fKN18hHWvz5s2YMmUKFixYQOVIVA5vCxIAAgMDceHCBaSnpyMkJAS3b9/mOpLaEIvFmD9/PubOnYt169bhiy++4DoSIQrH64IEAB8fH1y8eBEaGhro06cPHYewA2RlZSE4OBhbt27Fnj17sHDhQq4jEdIueF+QAODq6opLly5hypQpmDx5MiIiIjpkn211I5VK8e2336J3795oampCYmIiIiIiuI5FSLtRiYIEAF1dXfznP//B8ePHERcXhx49emDjxo3PvK8zebJLly6hX79+mDdvHmbNmoXY2Fi4u7tzHYuQdqUyBdls1KhRSE1NxYcffoiPP/4Ybm5u2Lp1KxXlC7p+/TomTpyI4OBgmJiY4MqVK1i7di1tNUDUgsoVJAB06tQJn332GVJTUzFo0CDMmTMH7u7u2LZtW7vsf6yKYmNjMXbsWPTq1Qt3797Fb7/9hvPnz7fb6S4IUUYqWZDNXF1dsX37dmRkZGDIkCGYPXs2HBwcMG/ePGRkZHAdT+nU1tbihx9+QO/evfHSSy+hqKgIx44dQ0JCAsaMGcN1PEI6HG83FH8RJSUl2L59O7Zu3YqcnByEhIRg0qRJGD9+PKysrLiOxwmxWIyzZ8/i4MGDOHToECQSCSZOnIiZM2ciMDCQ63iEcEqtCrKZTCbDn3/+iV27duHEiRMQCoUYOHAgXnvtNfzrX/9S+QMt1NXV4fz58zh69CiOHDmCyspKBAQE4PXXX8ebb75Jx3Ek5H/UsiAf1NDQgJMnT+LgwYP4448/UFdXh27dumHYsGEYNmwYgoODYWlpyXXMNhGJRLhy5QrOnTuH06dPIy4uDlKpFH379sWECRMwYcIEODs7cx2TEKWj9gX5oKamJsTGxuL06dM4c+YMkpKSIJPJ0LVrVwQGBiIgIAD9+vWDl5cXjIyMuI77WBKJBHfv3kVSUhLi4+MRHx+Pq1evQiQSwd7eHsOGDcPLL7+M0NBQ3hc/Ie2NCvIJKisrERcXJy+a+Ph4VFZWAgCcnZ3RtWtX+Pr6wt3dHU5OTvJLe59QSyKRoKCgAHl5ecjJycHt27eRnp6O9PR0ZGRkQCQSQUtLCz4+PvJiDwwMhJubW7vmIkTVUEE+B8YY7ty5g7S0NMTExODrr79Gly5dUFxc3OJcMGZmZnBwcEDnzp1hZmYGc3Nz+b/a2towNjYGAGhqasq/FolE8kO3NX9dW1uLyspKVFRUyP8tLCxEcXExpFIpAEBLSwuampoYMmQIevXqBQ8PD3h6esLT05OOck5IG1FBvgCZTIYhQ4agvLwciYmJ0NPTQ1VVFfLz85GdnY28vDwUFhaioqJCXm7NBSeVSuUn3JJIJPKjEOnq6soLrVOnTtDT04ORkVGLgjUzM4ONjQ0cHR3h6OgIZ2dnaGhowM/PD8HBwTh48CBnzwkhqqj9TuSswtauXYu4uDhcvnxZvkeJqakpTE1N4e3t3eF5du7ciREjRmD79u0vfPpZQsijaAb5nK5cuYL+/fvjiy++wIcffsh1HLlFixbhu+++Q0JCAjw8PLiOQ4hKoIJ8Do2NjejTpw9sbW1x5swZaGgoz45IEokEAwYMQF1dHS5fvoxOnTpxHYkQ3lOev3AeWLZsGYqKirBz506lKkfg/oc1e/fuRW5uLpYsWcJ1HEJUgnL9lSux+Ph4fP3119iwYQMcHR25jvNYrq6u+O9//4vNmzfj6NGjXMchhPdoFfsZiEQi9OnTBzY2Njhz5gwEAgHXkZ7orbfewrFjx3Dt2jU4OTlxHYcQ3qKCfAYfffQRNm/ejJSUFF7sp11fX4++ffvCysoKf/31FzQ1NbmORAgv0Sr2U1y9ehVRUVH46quveFGOwP1zdx88eBCXL1+mk2kR0gY0g3wCsViMvn37wtzcHH/99ZfSr1o/bOPGjVi0aBEuXLiAoKAgruMQwjtUkE+wfv16fPbZZ0hJSUG3bt24jvPcGGMYN24crl69imvXrtFhzAh5TrSK3Yrc3FysXLkSH330ES/LEQAEAgG2bdsGqVSKGTNmcB2HEN6hGWQrxo4di/T0dKSkpPD+BFV///03hg4diu+//x7Tp0/nOg4hvEEzyMf47bffcOzYMXz//fe8L0cAGDhwIBYtWoT3338fKSkpXMchhDdoBvmQhoYGeHl5ITg4GLt37+Y6jsJIJBIMHDgQ1dXVSEhIoF0RCXkGNIN8yIoVK1BdXY2vvvqK6ygKpaWlhQMHDqCoqAgLF5DMHbYAACAASURBVC7kOg4hvEAF+YC7d+9i06ZN+Pzzz2Ftbc11HIVzdHTEDz/8gO+++w6//fYb13EIUXq0iv2A8ePHIy0tDSkpKdDW1uY6Trt55513cPjwYVy9epVO1kXIE1BB/s+lS5cQHByMEydOYOTIkVzHaVcNDQ3o27cvLCwscP78edoVkZBWUEHi/ikUAgMDYWRkhHPnznEdp0PcuHED/v7+WLx4MZYvX851HEKUEp1yAcDu3btx5coVXL16lesoHcbb2xvr1q3DBx98gJCQEAwePJjrSIQoHbWfQTY0NMDNzQ2jR4/Gli1buI7ToRhjeOWVV5CUlIRr166hc+fOXEciRKmo/afY3377Laqrq7FixQquo3Q4gUCAHTt2QENDg/awIeQx1Log6+rq8NVXX2Hu3LmwsrLiOg4nzMzMsHv3bhw7dkztZtCEPI1aF+SGDRsgEokwb948rqNwKiQkBB9//DE+/PBDJCcncx2HEKWhtu9BVlVVwdXVFR9++CE++eQTruNwTiqVIjQ0FKWlpUhISIC+vj7XkQjhnNrOIL/88ktoaGhg7ty5XEdRCpqamti9ezeKi4sxf/58ruMQohTUsiDLy8uxefNmLFq0CMbGxlzHURoODg7473//iy1btuDAgQNcxyGEc2q5iv3xxx9j+/btuHv3Lq1KPsbMmTPx888/4+rVq+jSpQvXcQjhjNoVZG1tLZycnLBo0SJ89NFHXMdRSkKhEAEBAejUqROio6NVer90Qp5E7Vaxt23bBrFYTKcgeAI9PT3s27cPKSkpWLVqFddxCOGMWs0gJRIJunXrhnHjxmHTpk1cx1F633//PebMmYMzZ85gyJAhXMchpMOpVUHu3bsXU6ZMwa1bt3hzjmuuvf7664iJicG1a9dgYWHBdRxCOpRaFaSfnx969OiBffv2cR2FN6qqquDr6wsvLy+cOHGCd+cGJ6Qt1OY9yHPnzuHq1au0jd9zMjU1xZ49e3D69Gl8++238utlMhnWrVsHMzMz5ObmcpiQkPajNgW5ZcsWBAcHo0+fPlxH4Z3g4GAsW7YMCxYswLVr11BcXIzQ0FB8/PHHqKmpodM3EJWlFqvY5eXlcHBwwNatWxEZGcl1HF6SSqUYOnQoSkpKUF5ejurqaojFYmhoaCAkJATnz5/nOiIhCqcWM8idO3dCT08Pr732GtdReIsxBj8/P2RkZKCiogJisRjA/VXt6OhoVFVVcZyQEMVTi4LcsWMHJk+eTHvNvKDs7GwEBgZi8+bNYIxBJpO1uJ0xhtOnT3OUjpD2o/IFGR0djbS0NEybNo3rKLx05MgReHl5ISUlBRKJ5LH30dTUxLFjxzo4GSHtT+UL8scff4SPjw/8/Py4jsJLV65cQUNDQ6vlCABisRjHjx9/4n0I4SOVLsj6+nr88ssvePfdd7mOwlsrV67EwYMHYWxs/MR9smtqanDp0qUOTEZI+1Ppgvzjjz8gEokwYcIErqPw2oQJE5CRkYFhw4a1uqG4jo4Ojh8/3sHJCGlfKr2Zz+TJk1FQUIALFy5wHUVl7Nq1CzNnzoREIpF/kt3MxcUFd+/e5SgZIYqnsjNIsViMP/74A6+88grXUVRKZGQkUlNT0bdvX2hqara4LSsrC7du3eIoGSGKp7IF+ddff6GyshJjxozhOorKcXFxQXR0NFavXg0tLS1oaWkBALS1tXHixAmO0xGiOCpbkEeOHIGfnx9cXFy4jqKSNDU1sXjxYsTHx8PV1RVaWloQi8U4fPgw19EIURiVfA9SJpPBwcEB//73v7Fs2TKu46g8oVCIpUuXYuPGjdDQ0EBZWRk0NDQgkUhQXV2NpqYm1NfXA7h/LvKH37sE7h816HH/FQ0NDR/76bmpqSkEAgG0tbVhaGgIPT09dOrUqdX7E/IitLgO0B6uXLmCoqIijB07lusovFRVVYXi4mKUlZWhoqICVVVV8kt1dfVjvxYKhTA0NERdXR3Mzc25fggwNTWFpqYmTExM0KlTJ5iamsovJiYmLb43NTWFmZkZrKysYGlpCUtLS/nbBkS9qeT/gnPnzsHa2hre3t5cR1EqJSUlyMvLQ15eHnJzc1FSUoLi4mKUlpaitLQURUVFKCsrg0gkavFz+vr6jy0WGxsb+fWdOnWCnp6efAZnYmICTU1NmJqaQktLC0ZGRgAAXV3dx+7y2drM73EzS6lUipqaGgCQz04bGxshFArlM9Tq6mpIpVJUVVWhsbGxRckXFBS0+P7h/cgFAoG8KK2srGBrawtLS0vY2NjA3t4ezs7OcHR0hL29PXR0dNr0OyHKTSVXsV9++WVYWlpi7969XEfpUOXl5cjMzERGRgays7ORk5PTohCFQqH8vjY2NrC2toatra185mRnZ/dIKXTu3FktSqCiogKlpaUoKyuTv3CUlZWhuLgYJSUlKC0tRXFxMQoLC+UvIBoaGrCxsZEXpqOjI5ydneHm5obu3bvD2dn5kU/6Cb+oXEE2NTXB3NwcX3/9tUrufy0Wi5Geno6bN2/Ky/DWrVvIzMxERUUFgPsn3XJ1dYWTk1OLP9zmrx0dHaGrq8vxI+EnxhiKi4uRm5uLvLw85OfnIzs7W/5ClJ2djbKyMgD3N57v2rUr3N3d0b17d7i5ucHNzQ09e/aEmZkZx4+EPAuVK8gLFy5g8ODBuHv3Lu8/wa6srERqaiqSkpKQlpYm/1ooFEJLSwtOTk5wdXWVXzw9PeHl5UUzF45VVVXhzp07uHv3rvySmpqK69evy98asLW1hZeXFzw9PdGnTx94eXnB29ubXriUjMoV5Keffoq9e/fizp07XEd5LrW1tbh8+TJiY2MRFxeHhIQElJaWAgDs7e3h4+MDHx8f9OrVCz179oS7uzt9WstDBQUFuH79OpKTk5GSkoKUlBTcvHkTEokEenp68PHxQWBgIAIDA9G/f3906dKF68hqTeUKMigoCF5eXti6dSvXUZ7o7t27+OeffxAbG4vY2FikpaVBKpXC2dkZ/fv3h7+/P3r16oVevXqhc+fOXMcl7aipqQmpqalISUlBUlISYmNjkZycDLFYDBsbG3lZBgUFISAggD5h70AqVZC1tbXo3Lkzdu3ahddff53rOC3cu3cPf/31F2JiYnDx4kUkJSVBW1sbPj4+CAoKQp8+fRASEkIzBgLg/nvNKSkpiImJQVJSEqKjo5GdnQ0DAwP0798foaGhCA0NhZ+fH51psh2pVEH+/vvvCAsLQ2FhIWxsbLiOg6SkJBw6dAinTp3CtWvXIBAI0K9fP/l/7v79+6vFJ8REMTIzM3H27FmcPXsW58+fR2VlJWxtbTFs2DCMHTsWw4cPp6PmK5hKFeTChQvx559/4vr165wsXyaTIT4+HocOHcKhQ4eQnZ2NLl26YNSoUQgNDcXgwYNhYmLCSTaiWqRSKZKSknD27Fn8+eefuHjxIvT09DBy5EiMHz8eo0aNkm97Sl6cShXkSy+9hF69euH777/v0OXeunUL27Ztw759+1BQUIDu3btj/PjxGD9+PPr27duhWYh6KikpwZEjR3Do0CFcuHABWlpaGD58ON555x0MHz6ctmp4QSpTkCKRCKampvjhhx865NSuIpEIR44cwdatW3HhwgU4ODjgrbfewmuvvQYfH592Xz4hrbl37x6OHj2KPXv2yP9vvvPOO5g6dSocHBy4jscvTEVcunSJAWCZmZntupzKykq2fPlyZmFhwbS0tNiYMWPYiRMnmEQiadflEvIiMjIy2IIFC5ilpSXT1NRkr7zyCrt69SrXsXhDZQryq6++YhYWFkwmk7XL+NXV1WzFihXM1NSUmZmZseXLl7P8/Px2WRYhiiYSidiBAwdYnz59mEAgYK+88gpLTk7mOpbSU5njQcbGxuKll15S+CYPUqkUGzduhIuLCzZs2IAPPvgAWVlZ+Oyzz2Bvb6/QZRHSXnR0dDBp0iQkJCTgt99+Q3Z2Nnx9fTFp0iQUFhZyHU9pqUxBxsXFoX///godMz09HQMGDMCSJUswa9YsZGVlYfny5fRJNOEtgUCAMWPGICkpCYcPH8aVK1fg7e2Nn376ietoSkklCjInJwcFBQV46aWXFDIeYwxffvkl/Pz8IJFIkJSUhFWrVsHU1FQh46uakpISHDx4EKtXr+Y6CnlGAoEA48aNQ3JyMqZMmYKpU6di1KhRKCkp4TqacuF6HV8R9u3bx7S1tVl9fX2bxxIKhSwiIoJpa2uzL774gonFYgUkVF1paWns3//+NwPA3N3duY6jtPz9/dmCBQu4jtGqmJgY1rVrV9alSxd248YNruMoDZWYQcbHx8PHx6fNexFIJBKEh4fj999/x8mTJ7F48WLO9nvNy8vjxbgeHh6IiopS6JjKQpHPlYuLC/T09BQ2nqIFBQUhPj4e9vb2GDJkCG7evMl1JOXAdUMrQlBQEJs5c2abx5k/fz7T19dnMTExCkj14u7evcuCg4N5My5jTOVmkO35XCmz2tpa9tJLLzFXV1dWVVXFdRzO8X4GKZPJkJKSgt69e7dpnH/++QcbNmzAli1bEBQUpKB0zy8/Px+jR4+WH3RV2cdVRer8XBkaGuLIkSNobGzEvHnzuI7DPa4buq0yMjIYAHb58uU2jTNw4ED28ssvKyjVi1u5ciUDwExMTNi7774rv76hoYF98cUXbOrUqaxPnz5s6NChLCUlhTHG2LVr11hoaCgDwEaPHs3Ky8vZggULmIODA/vpp5+eOO6zqK2tZStWrGCTJ09m7733HgsJCWEbN25ssc0pHppBPikvY/d/b+PHj2eLFi1ib7zxBgsODmbJyclMJpOx3377jU2fPp3Z29uziooKFhkZyczNzZmXlxdLSEh46jIkEgk7f/48mzt3LnN2dmb5+fksJCSEOTo6soqKiuf+HTxtvNYeC2OMSSQS9vPPP7PIyEg2YMCA53p8XNq7dy/T1NRkt27d4joKp3hfkAcOHGBaWlqssbHxhcfIy8tjAoGAnTx5UoHJXtzDZcMYY++88w5LT0+Xfz9s2DBmZWXFqqurGWOM1dXVMQ8PD+bi4sKEQiELCwtjGRkZTx33aZqamtjAgQPZG2+8waRSKWOMse3btzMA7NixY62O/bS83bp1Y66urvJlmJiYMC8vLyaTyVheXh4zMDBgANiqVatYdnY22717NwPA/P39n7qM0tJSdvHiRdapUycGgK1Zs4adOXOGTZs2jdXW1j7T437w8QiFwieO19pjaZaTkyMf73keH5ekUimzs7NjK1eu5DoKp3hfkIsXL2Y9e/Zs0xhHjx5lAoGANTQ0KChV2zxcNnFxcQzAYy/Hjx+X3+/y5ctMU1OTBQYGsu3btz913GcRFRXFALCbN2/KrxOLxWz79u0tZmMPjv0seaOioti+ffsYY/f/GF1dXZmWlpZ8PDc3N/bgCo5MJmNWVlZMR0fnmZfRPMa9e/ee6zE//HgezvTweE97LDKZ7JHxnvb4lEF4eDgbN24c1zE4xftDE1+9erXN7z/W1NRAR0cHnTp1UlAqxUpISICnpydSU1OfeL9+/fph8eLFWLt2Lb777juFLPvChQsA0OIgB1paWnj77bfblHfevHmoq6vDt99+i4qKCohEIkgkEvntD+8RJRAIYGZmJj8NxbMso3kMRZ2nu7XxnvexPO66hx+fMjA1NUVxcTHXMTjF+w9prl271uaCtLOzg0gkQlFRkYJSKda9e/eQlZWF+vr6R26TSqXyr2UyGe7cuQNHR0dERkY+cn7rF9G84XBmZqZC816+fBk9e/aEq6srPvnkExgaGj5Xrmd9TjpCWx+LssrKyoKdnR3XMTjF64LMz89HaWlpmwsyMDAQ+vr6OHLkiIKStd2DM5AePXqgsbER69ata3GftLQ0fPPNN/Lv169fj1dffRXbt2/HjRs3sHz58ieO+yx69eoFAFi9ejVkMpn8+uzsbJw8efKxP/MseSMjIyEWizFixAgAkI/NnvHoe8/6nLTFsz5XbX0syqiiogIXLlxAaGgo11G4xfU6flscP36cCQQCVllZ2eaxZs2axZycnFhdXZ0CkrVN165dmb6+PsvJyWGMMdbY2MhcXFwYADZ16lS2Z88etnTpUjZs2DD5hx6xsbHs9ddfl48xa9YspqGhwS5cuNDquM/izp07TF9fnwFggwcPZt988w1btmwZmzFjhvxDm/r6egaAOTs7P3NeY2NjBoCdPn2a7dmzh1laWjIALC4ujuXm5jJnZ2cGoMUn5XZ2dgwAa2pqeqZlNI/xrB/MPOl38KTxnvZYampqGABma2v7yFitPT6uzZs3j1laWr7Qc6dKeF2Qa9askf9RtlVhYSHr3LkzmzJlikLGa4slS5YwGxsb9uuvv8qvy8rKYmFhYczMzIxZW1uz6dOns9LSUsYYY7/++iuzsLBosbH8Rx99JN9UpfkDm8eN+yxSUlLYyy+/zExNTZmdnR2bO3eufCPiO3fusPfee0/+AcnGjRtZRUXFE/Myxtg333zDjI2NWb9+/VhsbCzbtGkTMzU1ZWPGjGGff/65fLyVK1eyqqoqtnHjRvl1ixcvZg0NDa0uo66ursUY06dPZ1euXHnh38HTxnvSY8nJyWFLliyR/2xUVBRbs2bNMz0+rpw9e5ZpaGiwrVu3cpZBWfD6iOKRkZEoLy9vdVXveZ08eRJjxozBokWLsGbNGoWMSQifJCQkIDQ0FCNHjsT+/fu5jsM5Xr8HmZ6eDg8PD4WNN3LkSGzbtg3r1q3Du+++C7FYrLCxlZFAIHjqRdX2yVXHx/ysfv/9dwwZMgTBwcF0+LP/4W1BMsaQkZGh0IIEgLfeegsnTpzA/v370bdvX1y5ckWh4ysTdv8tlideevTowXVMhVLHx/w0jY2NWLJkCcaMGYOJEyfit99+o9MR/w9vCzIvLw+1tbUKL0gAGDFiBK5duwZzc3MEBgZiyZIlaGpqUvhyCOHaxYsX4evrix9++AHff/89fvzxR2hra3MdS2nwtiDT09MBoN1e7V1dXXHu3DmsW7cOmzdvhq+vLw4cONBiUxdC+Co9PR3h4eEICQlBjx49kJaWhhkzZnAdS+nwuiCtrKzQuXPndluGhoYGPvzwQ6SkpMDPzw9vvPEGfHx8cPDgQSpKwkvp6emIiIiAt7c3bty4gYMHD+Lo0aOwtbXlOppS4nVBtsfq9eN07doVe/bswfXr1+Hj44Pw8HB4eXlh06ZNqKio6JAMhLwomUyGkydP4pVXXoG3tzdSUlKwf/9+JCcnY/z48VzHU2pUkM/Bw8MD+/btw/Xr1xESEoJPP/0U9vb2ePPNNxEdHd2hWQh5moKCAqxcuRKurq4YPXo0qqqqcODAAaSkpGDixInQ0ODtn3+H4e12kNbW1vj4448xd+5czjLU1tZi//792Lp1K5KSkuDm5obx48dj/Pjx6NOnD2e5iPoqLS3FkSNHcOjQIZw/fx6mpqaYMmUKpk+fDnd3d67j8Q4vC7Kurg5GRkY4fvw4Ro8ezXUcAEBSUhL27t2Lw4cPIycnB126dJGXZUBAAL1ak3aTn58vL8WYmBjo6upi+PDhmDhxIsaNGwddXV2uI/IWLwsyOTkZvr6+SE1NhaenJ9dxHpGamopffvkFP//8M27evAkLCwsMHjwYoaGhGDZsGFxcXLiOSHisoaEBly5dwtmzZ3H27FlcuXIFnTp1wpAhQzBhwgS88sorMDIy4jqmSuBlQR45cgTjx49HfX290h7Dsdn169dx6tQpnD17FtHR0WhoaED37t0RGhqKIUOGoH///rC3t+c6JlFiDQ0NSExMxD///IMzZ84gLi4OYrEY3t7e8hfdIUOG0EyxHfCyIKOiorBx40bk5+dzHeW5iEQiXLx4scUrv1QqhaOjI/r374/AwEAEBgbCz8+P/rOrsdu3byMuLg5xcXGIjY1FSkoKJBIJHBwcEBoaitDQUAwdOhQ2NjZcR1V5vCzIOXPmICUlBf/88w/XUdqktrYWCQkJiI2NRXx8POLi4lBWVgYdHR34+vqiV69e8PHxkV9MTU25jkwUSCwWIyMjAykpKUhOTkZycjKuXLmCsrIy6Orqonfv3ggMDERAQAD69+8PZ2dnriOrHV4W5MiRI2FlZYWdO3dyHUXhmmcPiYmJ8j+c5m0tu3Tpgp49e8LHxwdeXl7o3r07unfvDhMTE45TkycRi8XIyspCZmYmbt68ievXryMlJQWpqaloamqCjo4OPD094ePjg969eyMgIIDWIpQELwuyR48eCA8Pf+wRs1VRfn4+UlJSkJKSgmvXruH69evIzMyUH23IysoK7u7u8sJ0c3ND165d4ejoqLDzsZAnE4lEyM3NRU5ODjIzM3Hr1i3cunULmZmZyMrKkh+d3M7ODt7e3vD19ZW/2Hl4eND+z0qKdwUpk8lgYGCArVu34s033+Q6DmckEgmys7Mf+WPMzMxEbm6ufFdIQ0NDODk5wdnZGY6OjnB0dISzszOcnJxgbW0NGxsbWnV/CqFQiLKyMhQWFqKgoAB5eXnIzs5GXl4e8vLykJub2+LkVubm5vIXqwdfuLp3706fLvMM7woyLy8PTk5OiImJQVBQENdxlJJIJEJWVhZyc3Plf8A5OTny7/Py8lqc0EtXVxeWlpawsbGBtbV1i6/NzMxgamr6yIWvq/WNjY2oqqpqcamurkZFRQXKyspQVlaGoqIilJaWyr+uqamR/7xAIICNjQ2cnJzg6OgIJycn+QtQ87/teXwA0rF4V5CXLl1CUFAQ8vLyWpyKlDyfoqKix5ZB89fFxcUoLS1FZWUlGhoaHvl5gUAAU1NTeYFqa2vDyMgIurq60NfXh76+PnR1dWFkZAQtLS2YmZnJf9bQ0PCRVUptbe1HzgbY1NT02LMWVlVVyU+IVV9fj6amJtTU1EAqlaKyshJSqRQ1NTXyn6+rq5OX4ePO9KipqQkzMzNYWlrCysoKNjY2sLKyeuyLhp2dHb03qEZ4d17s/Px8aGpq0tFH2sjW1ha2trbw8fF56n2bmpoemXU9fBGLxaipqYFIJEJDQwPKysrkxSWRSFBVVSUf78GCa9bY2AihUNjiOg0NjcfOVB8s2McVsa6uLlxdXeVlbWho2Oos2NTUlFZ7Sat4V5AFBQWwtraGpqYm11HUho6ODqysrGBlZdXuy1q7di1+/PFH3L59u92XRcjT8G4H4YKCAtrzhBDSIaggCSGkFVSQhBDSCipIQghpBe8KsqioiAqSENIheFWQ9+7dQ2NjI+zs7LiOQghRA7wqyKKiIgCggiSEdAheFWRJSQmA++ejIYSQ9sargiwvL5fvFkYIIe2NVwVZVlYGc3NzOgEWIaRD8KppysvLYWlpyXUMQoia4F1BWlhYcB2DEKImeFeQNIMkhHQUXhVkWVkZzSAJIR2GVwVJM0hCSEfiVUHSDJIQ0pF4VZCVlZV0lj5CSIfhTUGKRCIIhULeniyKEMI/vCnI5jPLGRsbc5yEEKIueFeQNIMkhHQU3hRkdXU1AJpBEkI6Dm8KklaxCSEdjQqSEEJawZuCrK6uhp6eHnR1dbmOQghRE7wpyJqaGpo9EkI6FG8Ksra2lgqSENKheFOQ9fX1MDAw4DoGIUSN8KYgGxsb0alTJ65jEELUCK8KUk9Pj+sYhBA1wquCpBkkIaQjUUESQkgreFOQQqGQVrEJIR1Ki+sAz6qxsRGdO3fmOgZRoNraWowYMUK+nz0AVFRUoLq6Gj179pRfJxAIsGLFCowbN46LmESN8aogaQapWnR1dXH9+nX5bqQPunHjRovvRSJRR8UiRI43q9j0HqTq0dHRwcSJE6Gtrf3E++np6WH06NEdlIqQ/8ebghSJRLQftgqKiIiAWCxu9XZtbW289tprtJMA4QRvClIsFj91pkH4Z+DAgbCysmr1drFYjIiIiA5MRMj/401BSqVSaGpqch2DKJiGhgbeeOMN6OjoPPZ2ExMThIaGdnAqQu6jgiScCw8PR1NT0yPXa2trIyIigtYcCGd4U5ASiQRaWrz50J08h759+8LFxeWR68ViMcLDwzlIRMh9vClImkGqtsjIyEdmijY2NggKCuIoESFUkERJhIeHt/g0W0dHB5GRkdDQ4M1/UaKCePO/jwpStbm7u6Nnz54QCAQAgKamJlq9JpyjgiRKIzIyUv47dnV1ha+vL8eJiLrjTUHShzSq7/XXX4dUKoVAIMCUKVO4jkMIf/bFBiBf/SL8IJFIUFtbi/r6eohEIlRVVQEA6urqHtl7pqGhASKRCG5ubsjIyICpqSl+/fVXmJqaPjKuqakpBAIB9PX1oaurCzMzM+jq6kJfX79DHhdRH7wpSIFAAJlMxnUMtSORSFBSUoLCwkKUlpaioqICFRUVqKyslH/94HUNDQ2or69HbW0tJBLJCy937ty5L/RzxsbG0NXVhZGREYyNjWFubt7qxcLCAtbW1nBwcKByJY/Fm4LU0NCgglQwiUSC/Px8ZGVl4e7du8jLy0NhYSGKiork/5aUlLR43nV1dR8pGhsbG3h4eMDc3BwGBgbQ19eHkZERdHV1YWxsLJ/pNc/89PT0HjnwiI6OziP7WzfPQB8klUrlR/95cGYqFArR2NiImpoaiEQi1NbWoqamRl7ed+7cQUJCgrzIHz6CkLGxMezt7WFjYwN7e3vY2trC3t4eLi4ucHV1hYuLC+0ProaoIFWcVCpFVlYWbty4gZs3b+Lu3bvIyspCVlYWcnNz5au6+vr6cHZ2lheDp6cn7OzsYGtrCzs7O9jZ2cHa2rpDS0JLSwtmZmaPXG9hYdHmsSUSCcrKylq8GBQWFsovaWlpKCgoQElJifxnrKys5GXp6uqKbt26wdvbGz169IChoWGbMxHlw5uCFAgEYIxxHUOpZWdn49q1a0hPT8eNGzeQnp6O9PR0CIVCCAQCODk5yf/ABw0aBBcXF/kfu7W1NdfxO5SWlhZsuCxBUgAAIABJREFUbW1ha2sLPz+/Vu/X0NAgf1F58N+jR4/i9u3b8ue2S5cu8PT0hJeXFzw9PeHt7Q0fHx/aTZLneFOQNINsqbCwEElJSfJLQkKCfLZja2sLLy8vDBgwADNnzoSnpyd69eoFIyMjjlPzj76+Pry9veHt7f3Y25tnm6mpqUhLS8PFixfxzTffoKGhAVpaWnBzc0OfPn3kF39//1YPzEGUDxUkT6Snp+PChQv4+++/ER0djcLCQggEAri5uaFv375YsmQJ+vbtC19fX1rd60DNbz88eMQhqVSKW7duITExUX45dOgQGhoaoK+vj379+mHQoEEYNGgQAgIC6EDQSkzAeLLe6uDggPnz5+PDDz/kOkqHuHPnDk6fPi0vxZKSEhgZGWHAgAEYMGAAAgIC4OfnBxMTE66jkmcgkUiQlpaGxMRExMTE4MKFC8jKyoKuri4CAgIwaNAgDB48GMHBwbS9rxLhTUE6OTlh7ty5mD9/PtdR2oVMJsPVq1dx/PhxnDhxAklJSTAwMED//v0RFBSE4OBghISE0OqZCikqKkJMTAzOnj2LmJgYpKWlwcDAAIMHD0ZYWBjGjh2rdu8NKxveFGSXLl0we/ZsLFy4kOsoCiOVSnH69Gns378fJ0+exL1799C9e3eEhYVh9OjRGDBgAM0m1Mjt27flL5DR0dFgjCE4OBjjx49HeHg4ndWTA7wpyG7dumHq1Kn4+OOPuY7SZikpKfjpp5+wf/9+FBcX46WXXsK4ceMQFhYGd3d3ruMRJVBVVYVTp07h2LFjOHbsGJqamjBixAhERkZi1KhRdH6mDsKbfbF1dXUfe9RpvhAKhdi2bRt8fX3Rq1cv/Pbbb5gxYwYyMzMRExODBQsWUDkSOVNTU0yaNAl79+5FcXExtm3bhoaGBkyYMAF2dnb48MMPkZ2dzXVMlcerguTjuZHLy8uxcuVKdOnSBXPmzEHv3r0RHR2N27dv47PPPkPXrl25jkiUnIGBAd58802cPn0aubm5WLRoEQ4fPozu3btj0qRJSEhI4DqiyqKCbCeVlZWYN28enJyc8PXXX+Odd95BdnY2duzYgeDgYDrwBnkh9vb2WLx4Me7cuYNdu3bhzp078Pf3x6BBg6go2wEVpIJJJBJ888036N69O/bu3Yv169cjNzcXq1atgo2NDdfxiIrQ0tJCeHg4EhMTcf78eQBAQEAAIiMjkZ+fz3E61UEFqUDx8fHw8fHBggULMG3aNGRmZmLOnDl0pBjSrgYNGoQLFy7gl19+wcWLF9GjRw+sX79erXesUBQqSAVgjCEqKgoDBgyAs7MzUlNTsW7dOhgbG3MdjaiR8ePHIy0tDcuWLcMnn3yCkSNHorS0lOtYvEYF2UbV1dUYM2YMlixZglWrVuHkyZNq9cFL80Fw21tTUxNiYv6PvfMOi+Jq2/i9S+8dVJo0qUZBRUURQdAYsUVNYjQxrxJLjI3EWGKi2DUaTTRGozExlhhjfFExxooNG4qFIgjSVXqHhYXd5/uDj3nZAAq67LC787uuuXRnz5xzzy57zzPnnHnONZm0Jc9oaGhg8eLFiIqKQnJyMjw9PbnP7TWQG4NUV1fvcAZZVFSEIUOGICYmBleuXMEXX3yhFIMv1dXVWLNmDfr379/uk5eLioqwZMkSGBkZwdfXt13baqBv375y/0BC7969ERMTgz59+mDo0KE4e/Ys25LkErkxSE1NTVRXV7Mtg0EoFGLs2LEoKCjA1atX0b9/f7YlyQxNTU2EhoYiKSnplfq5srKyWl3W2NgYa9eulWkeSjs7O2hqasqsvfbCwMAAf/31FyZMmIC3334bMTExbEuSO+TmOTYdHR1UVVWxLYPhyy+/xL1793Djxg3Y29uzLUfmaGlpwdzcHMXFxW06Li0tDR9++CGuXr3a6mN4PB6MjY2Rn5/fVpmvxOHDh2XSjixQUVHBzz//jOfPn2PcuHGIjY3lsj21AbmJIHV1dVFZWcm2DABAXFwctmzZgm+//Rbu7u5sy5EbsrOzERwcLDOj46hHVVUV+/fvR3l5OVavXs22HLlCbgxSR0cHFRUVbMsAAGzevBmurq6YNm0aK+1fvHgR6urq0NXVxZUrV1BSUoLJkyeDx+Nh8ODBiIuLAwDExMSgc+fO2LVrF4D6AaWFCxdi8eLFCA0NxdChQxEaGori4mKIRCJcunQJ8+fPR9euXfH06VP4+fnBxsaGWYwrNDQU06dPx7Jly7BkyZI2X7B+/fVXJCQkICcnBzNnzmT2v0hXS2zatAkaGhr47LPPmEEIgUCADRs2YNq0aejduzcCAwMRGxsLIsLx48cxffp0WFlZobi4GFOmTIGJiQk8PDxw584dAPXJQ44cOYIpU6Zg0KBBTFtGRkZ49913MX/+fMyfPx8WFhbg8/m4ffv2C9t92WcqSywsLLB48WLs2LGjyTo/HC+A5IQtW7ZQly5d2JZBdXV1ZGxsTFu2bGFVx6xZs0hDQ4NKSkqIiKiqqorMzc1p0qRJTJna2lry9fUlsVhMZWVl5OTkRMuXL2fez83NJScnJ7Kzs6OcnByKiooiLS0tAkBr166lc+fO0bRp06i4uJi8vb0pJCSExGIxERGlpKSQiooKtfVPCAA5Ozszr1+mq7i4mIiInJ2dmbYKCwtp8uTJ9ODBA4m6Q0JC6NGjR8zroKAgMjc3p5KSEsrKyiIdHR0CQKtXr6b09HTav38/ASBvb2/mmIyMDAmNtbW1EtpOnz5NAOiLL754abt5eXktfqbl5eVt+tykQUFBAamoqNCxY8dk3ra8IjcGuXv3bjIwMGBbBvMDun79Oqs64uPjCQD98MMPzL6RI0eSjo4OlZWVERHR8ePHaefOnUREtHTpUgJAz549k6hn3759BIAWLlxIRETdunUjAFRYWMiU2bZtGwGg+Ph4iWOdnJxe2yBbq6vBIJ88eUJTp06lvLw8ifI3b94kAM1uJ0+elDi3BsRiMZmbm5O6urrEvsYaxWIxVVVVERFRUVERdenShTw8PKi6urrN7Tb+TNnC1dWVVqxYwbYMuYG7xW4jDbcnbE8Cd3Nzg7+/P3766ScQEdLS0iASiSAUCvH7778DAPbv34/JkycDAKKiogCgybo0DbeS169fBwBmmpKxsTFTpmGKiJ2dncSxfP7r//m0VlcDI0aMQGVlZZOVDaOjo+Hm5gaqv+hLbMHBwQDQZAoWj8eDkZGRRJao5so0LIkwd+5c5OfnY//+/Uy6sba02/gzZQt9ff0mS95ytIzcGKSuri5EIhHrcyEbMjx3hOddP/30Uzx48ADR0dHYuHEjNm7ciLfffhu7d+9GfHw8bG1tmekxDWb27xRZDefzoqUbnj59CgAoLCyU+jm0VdemTZvwxx9/YMOGDRL7CwsLkZaW1my/qEgkem2dx44dw4EDBxAWFoaePXvKrF1pk5WVxeUEaANyY5ANP3S2o0hTU1O4uLjgzJkzrOoAgFGjRsHKygorVqxAZWUl3N3dMXPmTNy5cwezZ8/GJ598wpRtiMhOnTolUUfDnMTGi079GxcXl2aPfVXq6upeWdeIESOwdOlSLF26FH///beExobBksYkJCRg+/btr6U3Ly8PM2fORP/+/SUmkN+/f79d25U2sbGxePbsGQYOHMi2FPmBpVv7NnPr1i0CQOnp6WxLoXXr1pGhoSEVFRWxLYVWrVpFPB6PYmNjiai+z8zZ2ZmCg4MlylVWVpK7uztZWlpK9PfNnTuXfHx8SCgUEhGRra0tAZAYRLh37x6pqKiQsbExnT59mqqqqujChQukp6dHACg1NbXVeh0cHEhbW5syMjLapKtr164EgEQiEdXW1pK/vz8ZGBhQTEwMEREJBAKys7MjADR16lQ6cOAAffnllxQUFESlpaUS59Yw0ERE1KVLFwLAtFNWVkYAqHPnzszn+fbbb5O2tjY9fvyYOa6wsJAWLVrUpnbZGJhpzIcffkguLi4kEolY1SFPyI1BPnr0iADQw4cP2ZZCJSUl1LlzZ/roo4/YlkL5+fm0YMECiX179+6lGzduNClbVlZGCxcupKCgIAoNDaWFCxfSypUrqbq6mioqKigsLIwZYPj4448Z8yEiunz5Mvn4+JCuri7Z29vTunXryNfXl2bMmEHnz5+nurq6VuldvHgxderUiY4ePdoqXYWFhbRy5UpG15o1ayg7O5sZxNHT06O1a9dScXExpaWl0ciRI8nIyIgsLCzo448/ZgZztm/fztSxatUqKikpoS1btjD7Fi1aRPn5+bR48WJm3+bNm+nnn38mAOTu7k7z5s2jefPm0dSpU8nBwYE2b95MRNRiuy/7TGVJZGQk8Xg8+uOPP1hpX16RmzVpnj9/ji5duuDq1asd4hbhxIkTGDNmDHbs2CExp4+Do6ORnp6Ofv36YcCAAfjrr7/YliNXyE0fpKGhIQDZZY95GaNGjcLKlSvx6aefYu/evWzLYR0ej/fSLTExkW2ZSkdqair8/f3RpUsX7Nu3j205cofcPIutpaUFDQ2NDmOQALBs2TLU1tYiJCQEcXFxWL9+vdKuWy0nNyJKxT///IMPP/wQNjY2OHPmDPcM9isgNxEkUB9FdiSDBICwsDD8+eef2Lt3LwYMGIDU1FS2JXEoOSKRCCtWrMCIESMwbNgwXLp0iVtT+xXhDFIKjBs3Drdu3YJQKISXlxc2b94s10vUcsgv165dQ79+/fDNN99g9+7d2L9/Pxc5vgZyZ5ClpaVsy2gWZ2dn3Lx5E3PmzMFXX30Fd3d3hIeHsy2LQ0lIS0vDO++8A19fXxgbG+POnTuYOnUq27LkHrkzyI4YQTagpaWFVatWITExEd7e3nj77bcxcOBAhIeHcwsocbQLSUlJmDlzJlxdXREbG4tTp07hzJkzcHV1ZVuaQsAZZDtgY2ODgwcP4saNGzAxMcG4cePg4uKCH3/8sUMl/eWQX65cuYLRo0fDzc0NFy9exHfffYeHDx/irbfeYluaQiF3BinrPHqvQ9++fXH8+HEkJCQgICAAn332GaytrTFnzhzcunWLbXkcckZubi62bt0KT09P+Pn5oaioCH/99RcSExMxY8YMqKmpsS1R4ZArgzQzM5PLbNTOzs7YuXMnMjIysHDhQly4cAH9+vWDi4sL1qxZg4yMDLYlcnRQBAIBfv/9d4wYMYJ57r5Xr164efMmrl69ijFjxkglqxJH88jNkzQA8P3332PdunV4/vw521Jem/j4eOzfvx+//vorcnNz4ebmhpEjRyI4OBgDBgxQitUROZqnoKAAf//9NyIiIvDPP/+gqqoK/v7++OCDDzBu3DiZLmCm7MiVQR4+fBiTJ0+GUChUmKtmbW0tzp8/jxMnTiAiIgLZ2dmwtLREcHAw3nrrLQwaNIh5iohDMamrq0NMTAzOnTuHkydPIjo6GhoaGggICMDIkSMxZswYJv0bh2yRK4O8ePEihgwZgvz8/CYJUxUBIsK9e/dw8uRJnDx5EjExMeDxeOjRowf8/Pzg7+8PX19fGBkZsS2V4zWoq6vD3bt3cfnyZVy+fBlXr15FeXk5OnXqhODgYAQHByMoKAja2tpsS1V65Mog4+Li0L17d8TFxSnFaoINa25funQJly9fRmxsLACge/fu8Pb2Ru/evdG7d290796d66DvwGRmZuLOnTuIjo7GnTt3cPPmTVRUVKBTp04YNGgQ/Pz8MHjwYLi6unJdKx0MuTLIvLw8WFhY4OLFi/D392dbjswpKirC1atXcfXqVdy5cwcxMTEoLy+HpqYmevTogd69e8PT0xPu7u5wdXV9YZZwDulTV1eHlJQUxMfH4+HDh7hz5w7u3LmDvLw8qKiowMXFBb1790b//v3h5+fHJCLm6LjIlUGKRCJoaGjg4MGDePfdd9mWwzpisRiJiYnMDzE6OhoPHz5k5lpaW1vD1dUV7u7ucHNzg6urKxwcHLiU+69JZWUlUlNTkZSUhISEBMTHxyMhIQGPHz9m+scdHByYCL9Pnz7w9PTkHvmTQ+TKIIH6tUqWLVuGOXPmsC2lQyIWi5Geno6EhASJH++jR4+YdVO0tbVhZ2cHe3t72NvbM//v2rUrLCwsYG5uzvJZsEtVVRWePn2Kp0+fIjU1FWlpaUhNTWW2vLw8AICKigrs7OyYC1BD5O7q6sos9MUh38idQXbv3h1jx47FypUr2ZYiVxARMjMzm/zYG143/OgBQF1dHRYWFrCyspL4t3PnzjA2Nm6ydXQzEIlEKCoqarIVFhbi6dOnyMnJkfi38ap/WlpaTS4kDZuDgwM0NTVZPDOO9kbuDHLIkCFwcnLCzp072ZaiUFRUVCAzM7OJWeTk5ODZs2d4/vw5cnNzmWVvG6OlpSVhlvr6+tDS0oKmpiYMDQ2hrq4OXV1d6OrqMoNJenp6UFWVTEf679H5qqqqJqtYlpWVMasFlpSUQCgUoqKiAhUVFRAKhSgpKUF1dTUEAgFjhM0lONHQ0ICJiQm6dOmCzp07M/82/n/DvxzKi9wkzG3A3NxcItrhkA66urpwc3ODm5tbi2UiIyMxYsQIvP/++/j888+bjcqqq6tRWloKgUCA6upqpKeno6amBpWVlSgvL2dWNPz3I6MikajJes0aGhpNprro6OgwSYn19fWhoaEBHR0d3L9/H87OzujevTs0NTWhpaUFIyMjmJiYwMjIqEnUy0225mgNcmmQd+/eZVuG0nHr1i2MHj0aw4cPx86dO5tEf2wzdepUnDt3DufOnePMj0NqyN3jKGZmZlwEKWNiY2Px1ltvoX///jh06FCHM0cAWL9+PcrLy7Fx40a2pXAoEHJnkNwttmxJTk7G0KFD0bNnTxw/fhwaGhpsS2oWc3NzfPXVV9i4cSPS0tLYlsOhIMilQZaWlqK6upptKQpPZmYmAgICYGdnh+PHj3f4Edt58+bB0dERn3/+OdtSOBQEuTRIAHKZ9kyeKCwsxJtvvgljY2P8/fffcjHJWVVVFVu2bMGxY8dw5swZtuVwKABya5DcbXb7IRAIMHr0aJSXlyMiIkKusgkFBgZi9OjRWLBgAWpra9mWwyHncAbJIYFIJMKkSZOQmJiIc+fOwdramm1Jbebbb79FWloafvjhB7alcMg5cmeQ+vr60NbWVoikuR0NIsKMGTPwzz//4MSJE3KbTMHe3h6fffYZli9fjpycHLblcMgxcmeQQH0ShqysLLZlKBzLli3Dvn37cPToUfj4+LAt57VYunQpjIyM8NVXX7EthUOOkUuDtLGx4QxSyuzZswfr1q3D7t27FWJlPG1tbaxbtw579+7F7du32ZbDIafI3bPYADBt2jRkZ2dzI5VS4vLlyxg6dCgWL16MsLAwtuVIlcGDB0MoFCIqKopLRsvRZuQygrS2tkZmZibbMhSCxMREjB07FqNHj8aKFSvYliN1tm/fjujoaOzfv59tKRxyiFwapI2NDWeQUqCwsBAjR46Eo6Mjfv31V4WMsDw8PPDxxx/jiy++aDarDwfHi5BLg7S2tkZVVRWKiorYliK3VFdXY9SoURCJRIiIiFDoBaJWr14NkUiEtWvXsi2FQ86QW4MEwEWRrwgRYdq0aUhISMCpU6cUPoO4sbExVqxYga1btyIpKYltORxyhFwapI2NDQBwI9mvyKZNm3DkyBH8+eefcHV1ZVuOTJg5cyZcXV25pTo42oRcGqS2tjZMTEw4g3wFLly4gCVLlmDjxo0IDAxkW47MUFFRwfbt23H+/HlERESwLYdDTpBLgwS4yeKvQkZGBt577z28++67WLBgAdtyZM7AgQPxzjvvYP78+U2WcuDgaA65NUhuJLttCAQCvP3227C0tMTu3bvZlsMamzZtQk5ODrZu3cq2FA45QG4Nkosg28bUqVORnp6OY8eOKfSI9cuwsrLCokWLsHr1ajx79oxtORwdHLk2SC6CbB2bN2/G0aNHceTIEdjb27Mth3W++OILdO7cGYsXL2ZbCkcHR24N0sbGBs+ePWOWAOVonhs3bmDJkiVYu3YthgwZwracDoGGhgY2bNiAAwcO4OrVq2zL4ejAyOWz2ABw7do1+Pr6Ijs7G5aWlmzL6ZCUlJTA09MTrq6uOHXqlEI+KfM6DB8+HLm5uYiOjoaKigrbcjg6IHIdQQLcZPGWICJMnToVtbW1+O233zhzbIZvv/0WcXFx2Lt3L9tSODoocmuQXbp0gaqqKjdQ0wLff/89jh8/jt9++w2mpqZsy+mQuLq6Yvbs2ViyZAkKCwvZlsPRAZFbg1RVVUXnzp2RkZHBtpQOx927d7Fo0SKEhYUhICCAbTkdmrCwMKipqWHVqlVsS+HogMitQQKAg4MDnjx5wraMDkVFRQUmTZqE/v37Y8mSJWzL6fDo6+tj1apV+OGHHxAbG8u2HI4OhlwbpJOTE5KTk9mW0aGYM2cOSkpK8Pvvv3MDD61k6tSp8PLywvz589mWwtHBkGuDdHR0REpKCtsyOgwnT57Er7/+ih07dqBTp05sy5Eb+Hw+tm7disjISPz1119sy+HoQMjtNB8AOHbsGCZMmICKigpoaWmxLYdVCgsL4eHhgWHDhuHXX39lW45cMmXKFERGRiIxMVGpnzbi+B9yHUE6OTlBLBYjNTWVbSmsM3v2bKioqGDLli1sS5FbNm7ciLKyMnzzzTdsS+HoIMi1QTo4OIDH4yn9bXZ4eDiOHDmC3bt3w8jIiG05couFhQWWLl2KDRs2ID09nW05HB0AuTZIbW1tdOnSRakHavLz8zFjxgxMnz4dw4cPZ1uO3DN//nzY2Njgiy++kNhfVFSE9evXIzc3lyVlHGwg1wYJ1N9mK3MEOXPmTOjo6HC3hVJCXV0d27Ztw59//omzZ89CJBJh165dsLe3x5IlS3Ds2DG2JXLIEFW2Bbwujo6OShtBRkRE4NixYzh37hz09PTYlqMwBAUFITg4GDNmzICOjg4ePXoEIoKqqipiYmLYlschQxTCIM+ePcu2DJkjEAgwb948TJw4UamWTpAFz549A5/PR3p6OlRVVSEWiwEAdXV1uHnzJsvqOGSJQtxiZ2dnQyAQsC1FpqxZswb5+fncrbUUqa2txXfffQcnJyf8888/AOpNsTGJiYnccg1KhEJEkA1Tfdzd3dmWIxOSk5OxefNmrF+/nkv1JiXq6urg6uqK9PT0F+YYraurQ0JCAjw9PWWojoMt5D6CdHR0BI/HU6p+yHnz5sHJyQmzZ89mW4rCoKqqioCAAOZ2uiVUVFS4fkglQu4NsmGqj7KMZB8+fBhnzpzBrl27oKoq9zcAHYqffvoJW7ZsAY/HA5/f/E+Dz+dzBqlEyL1BAsrzTLZAIMDnn3+O//znP+jfvz/bchSSefPm4dSpU9DQ0Gj2AlRbW8sN1CgRCmGQypLV5/vvv0dJSQlWr17NthSFZvjw4bh58yZMTU2bNcm4uLgmgzcciolCGKQyRJAlJSXYuHEjQkNDuUw9MuCNN97A3bt34ebm1sQkhUIhEhMTWVLGIUsUwiCdnZ2RlZWFyspKtqW0Gxs3bgQRYcGCBWxLURq6dOmC69evY/jw4RJ9klw/pPKgEAbp5uYGIsKjR4/YltIuPH/+HN9//z2+/PJLLhmFjNHR0cHx48exbNkyZp+qqiru3bvHoioOWaEQBung4AAtLS3Ex8ezLaVdWLVqFQwNDfHJJ5+wLUUp4fF4CAsLw86dO8Hn8yEUCnHr1i22ZXHIAIWYJ6KiooJu3bohISGBbSlSJy0tDT///DN27Nih9EmB2WbGjBno1KkTJk2ahPv37yM5ORllZWUA6vslm+viaWk/ABgYGDQ7ncjQ0BA8Hg8qKirQ19eHmpoadHV1oampyf0NyBiFMEgAcHd3V8gIctWqVejatSumTJnCthS5prq6Gvn5+cjNzUVhYSFKS0tRUlKCkpISlJaWNruVl5ejqqoKNTU1qKioQG1trUSd3bp1Y+VctLW1oaGhAV1dXairq8PQ0BCGhoYwMDBocTM2NoaZmRlMTU1hZmbGrVfUShTKIHfv3s22DKmSnZ2NgwcPYufOndyk8BbIycnB06dPkZ2djczMTOTl5SEvLw+5ubnIz89Hfn4+cnJyUF5eLnEcj8dr1lgMDQ1ha2sLAwMD6OnpQUdHB+rq6tDV1YWamhr09fWhoqICQ0ND8Pl8GBoaAqgfuDEwMGiir6X9IpGIiT5b2l9XV4fy8nImCq2uroZAIIBAIEB1dTUqKytRU1MjYfQ5OTlISkpqYvT/PvfGZtmpUyeYm5vD1NQUlpaWsLS0hLW1NaytraGvr//K340ioDC/Ojc3N2RkZKC8vFxhUn9t2rQJ5ubmmDRpEttSWEEkEiEzMxMpKSlISUlBZmYmY4QNptg4cYSZmRksLCxgbm6OTp06oWvXrowBmJmZwczMDObm5jAzM2P9h6+iotLigJupqalU2xKLxSgqKmIuGLm5ucjNzUVBQQHy8/Px/Plz3Lt3D/n5+cjOzpboEtDX12fMssE4u3btCicnJzg6OsLc3FyqWjsacr1oV2OSk5PRrVs33L59G3369GFbzmtTWFiIrl27YtWqVQq/HGlWVhYePXrEGGFycjKSk5ORlpYGoVAIADAyMoKtrW2TH6u1tTWsrKxgZWUFTU1Nls9EMSgpKWlyIWr8/7S0NFRXVwOoN1BHR0fGMB0dHdGtWze4ubkx0bU8ozAGKRKJoKenhx07duCjjz5iW85rs3z5cmzfvh0ZGRnQ1dVlW45UKC0tRUpKCuLj43H37l0kJCTg4cOHyMvLA1Bvgvb29i1uHB2H4uJipKamIj4+HgkJCUhNTUVqaiqSkpJQUVEBAOjcuTPc3d3h5ubG/Ovl5SVXK0YqjEECgKenJ4KCgrBx40a2pbwWAoEANjY2mD17NlasWMG2nFeirKwM0dHRuHXrFm7fvo27d+8iOzsbQP0orbu7Ozw8PPDGG28w/zcxMWFDh8u2AAAgAElEQVRZNcfrQkTIyMhAfHw8YmNjERcXh7i4ODx69AhCoRCqqqro1q0b+vTpA29vb/Tt2xdvvPEG1NTU2JbeLAplkJMnT0ZxcTFOnTrFtpTXYs+ePZg9ezYyMjLk4rFCIkJcXByuXbuG27dv4/bt20hMTIRYLIaVlRW8vb3h7e3NmKGNjQ3bkjlkTF1dHR4/foy4uDjcv38ft2/fRnR0NMrKyqCpqQlPT0/m78TPz6/D5DlVKINct24ddu3aJfdLdvbs2RM9evTAvn372JbSIqmpqTh//jyuXbuGixcv4unTp9DV1UWPHj3Qq1cv9OrVC76+vrCzs2NbKkcHJjU1FdeuXcPdu3eZrbq6Gvb29ggMDMSAAQMQEBAAKysrVvQplEGeOHECY8aMQWlpqdyOZF+6dAn+/v4dbrCptLQUp0+fRkREBC5evIjnz59DT08PgwYNgr+/P/z9/dGzZ88W8yhycLSGqqoqREVFITIyEpGRkbhz5w5EIhHc3d0RGBiIUaNGwdfXV2bT3hTKIFNSUuDk5IRbt27B29ubbTmvxLhx45CTk4OoqCi2peDZs2c4ceIEwsPDERkZCSLCoEGDMGTIEPj7+6N3797c/EyOdqW8vBxXrlxBZGQkzpw5g7i4OJiYmGDEiBEYM2YMhg0b1r6DPqRAiEQi0tbWpr1797It5ZV4+vQpqaqq0sGDB1nTUFxcTD/88AP169ePeDwe6ejo0Pjx42n//v1UVFTEmi4ODiKilJQU+uabb2jgwIHE5/NJS0uLxo4dSydOnKDa2lqpt6dQBklE5OnpSZ9//jnbMl6JVatWkYmJCQkEApm2KxKJ6Ny5c/T++++TpqYm6ejo0IcffkgnT56UuRYOjtaSm5tLu3fvpoCAAOLz+dS5c2f64osvKDExUWptKJxBTp48mYYPH862jDYjFovJwcGBFixYILM2KysracuWLdS1a1cCQP3796effvqJSktLZaaBg0MapKam0tdff002NjYEgHx9fSkiIoLEYvFr1atwBrlhwwaytLRkW0abOXPmDAGguLi4dm+rrKyM1q9fT+bm5qSjo0Pz5s2jhISEdm+Xg6O9EYlEdObMGQoODiYej0eenp509OhREolEr1Sfwhlkg9Hk5OSwLaVNjB8/ngYMGNCubdTW1tLGjRvJ2NiY9PX1acmSJZSXl9eubXJwsMW9e/do/PjxxOfzyc3Njf7+++8216FwBpmfn08A6MyZM2xLaTUFBQWkrq5Ov/zyS7u1ER0dTT179iQtLS36+uuvuQEXDqUhISGBJkyYQADo/fffp9zc3FYfq3CT1hpSNslTSvw//vgDqqqqGD9+vNTrrqmpQWhoKPr16wcjIyM8ePAAYWFhHWrphtzcXBw5cgRr1qxhWworlJaWtku9JSUlUikj77i6uuLIkSOIiIjAtWvX4ObmhoMHD7bu4HY0btYIDg6m9957j20ZrcbHx4cmTZok9XpzcnLIx8eH9PX1ae/eva/dYd0eJCQk0CeffEIAyNnZmW05MqO2tpbWrVtHAwYMIBUVFanVKxAIaPXq1dSvXz/i8/ltLuPt7S23s0BaQ3l5Oc2ZM4d4PB7Nnz+f6urqXlheIQ3yq6++kpsfW0pKCvF4PDp9+rRU683JySFXV1dydHTs8AMwAoFA6QySiKiqqoqMjIxI2nFKa+ptqcy7775Ly5Ytk6qejsjvv/9OmpqaNGXKlBcO4CjkYxA9e/bEmjVrUFFR0eFThR08eBBmZmYIDAyUWp01NTUYNWoU6urqcPnyZXTp0kVqdbcHyprHUUtLC+bm5iguLpZ5vS2VOXz4sFS1dFTee+89GBkZYcyYMejcuTPWrVvXbDmF64ME6tOeicVixMbGsi3lpRw6dAgTJ06U6iN7K1asQGJiIiIiIjq8OXJwsMWwYcPwww8/YMOGDbh8+XKzZRTSILt27QojI6MOP1ATGxuLpKQkvPvuu1Kr8/nz5/juu++wevVq1haVagwRYdu2bZg8eTJmzZoFDQ0N8Hg8ZmuJ0tJSLFy4EIsXL0ZoaCiGDh2K0NBQJuL5888/YWxsDB6PJ7Fm9Y4dO8Dn8/HTTz8BqM+tuWHDBkybNg29e/dGYGBgmy+cL6qjsrISBw4cwMSJE+Hj44MbN27A09MTtra2uHbtGpKSkjBmzBiYmprCxcUFd+7cabaN5ORkjBw5EkZGRujTpw8iIyNb1T5Qn+AhNDQU06dPx7Jly7BkyZImKym+rIxIJMKRI0cwZcoUDBo0CESE48ePY/r06bCyskJxcTGmTJkCExMTeHh4SJxHa77j6Oho9O3bF7Nnz8ZXX30FVVXVJmvlsMHUqVMxbNgwLFmypPkCsrrnlzWDBw+mkJAQtmW8kLCwMOrUqdMrT2Jtju+++44MDAyourpaanW+Dt9//z3x+XwqKCggIqK1a9cSAAoNDZUoh0Z9kGVlZeTk5ETLly9n3s/NzSUnJyeys7Oj4uJipm4AEvPbMjIyaOLEiczrkJAQevToEfM6KCiIzM3N2/S00IvqEIlElJycTABIX1+fIiIiKD4+ngCQra0tbdy4kUpKSigmJoYAkJ+fn0Tdzs7OBIDmzZtHZ8+epZ07d5K2tjbx+Xx68ODBS9uvra0lb29vCgkJYQbhUlJSSEVFhelfbE2Zhs+u4XsQi8WUlZVFOjo6BIBWr15N6enptH//fgJA3t7ezHGt+Y6dnJzIyMiIaf+dd95p03Sb9iQyMpIA0OPHj5u8p7AGuWDBAurduzfbMl5Iz5496ZNPPpFqnR988AGNGDFCqnW+DiNHjiQej0c1NTVERBQbG0sAqG/fvhLlGhvk0qVLCQA9e/ZMosy+ffsIAC1cuJCIiGpqasja2ppGjhzJlFm2bBnFxMQQEdHNmzcJQLPbyZMnW6W/NXWIxeImg0xdunSRMB+xWEympqZkYGAgUX+DQTY27K1btxIA+vDDD1/a/rZt2wgAxcfHS9Tr5OTEtN+aMi2dR7du3ZqUMTc3J3V1dWZfa75jU1NTAkBbt24lkUhEsbGxHeaR1rq6OlJXV282SYxC3mID9QM1sbGxTdYy7iikp6fj/v37GDt2rFTrLS0t7VCLJQUFBYGImCzvDQMyAQEBLR7TkOrt3zk9Bw0aBAC4fv06AEBdXR3z5s1DREQEnjx5AqFQiKSkJHh6egKov61zc3MD1QcCEltwcHCr9Lemjua6Cv6tncfjwcTEpMU5j41XWRwzZgwAICEh4aXtnz17FgCaJCZunJezNWVaOo9/7+PxeDAyMmIWUwNa9x3/+OOP0NXVxfz58+Ht7Y2KigrWV5ZsQEVFBfr6+s3OCVXIUWygfqCmpqYGiYmJ6N69O9tymnD06FEYGhrCz89PqvVaWlriwYMHUq3zdfj000+hpaWFadOmISoqCsnJyQgLC8PSpUtbPKbhh5ueng4PDw9mv4WFBQBIrDMdEhKCFStWYPv27ejfv7/EZPvCwkKkpaWhsrISOjo6Em2IRCKoqKi8VL806mgrDedpY2Pz0vafPn3K6Gwp63ZryrwOrfmOx48fD09PT3zyySc4e/YsfH19sXv37g6xwF5JSUmLn43CRpCurq7Q1NTssAM1//3vfzF69GipL1Y0dOhQ3Lp1C1lZWVKt91URiUSIi4vDzZs3sWnTJhw/fhxff/31C0ftGyLFf68t1HBOjadEGRgYICQkBHv37sUff/whEZG7uLgwAxyNSUhIwPbt21ulXxp1tJWG8wwODn5p+y4uLgCaflaNaU2Z16E13/HXX38NBwcHnDlzBocOHUJdXZ3E4Bqb/PXXX1BXV28+WGn/O3z26N27t0zTh7WWnJwc4vP5FB4eLvW6a2pqyN7enj744AOp1/0qhIWFkb29Pe3Zs4dOnz5NUVFRlJSUJJHctLKykhnUaHjt7u5OlpaWEv2Qc+fOJR8fHxIKhRJtpKamEp/Pp1WrVknsFwgEZGdnRwBo6tSpdODAAfryyy8pKCio1f1framjqqqKAFC3bt2Y4+zt7QkAlZWVMftsbW0JgMTTGy4uLgSACgsLiai+j2/WrFk0atQoEovFL23/3r17pKKiQsbGxnT69GmqqqqiCxcukJ6eHgGg1NTUVpUhqh8cA0CdO3duornxU1gN/asN30NrvmMtLS3m+X+hUEj6+voSAz1sUV5eTra2tvTxxx83+75CG2RISAgNHjyYbRlN2LFjB2lra1NlZWW71H/ixAni8Xi0b9++dqm/LZw9e5bMzc2bDDCYmprS0aNH6cmTJzRnzhxm/5YtW6ioqIjKyspo4cKFFBQURKGhobRw4UJauXJli6Pz8+bNY0ZRG5OWlkYjR44kIyMjsrCwoI8//rjNGYxeVEdOTg4tWLCAAJC6ujqdO3eO/vnnH2aEeM6cOVRQUMCMuAOgDRs2UH5+PvP5BAcHk5+fH4WEhNCcOXNo+/btEib6snO4fPky+fj4kK6uLtnb29O6devI19eXZsyYQefPn6e6urqXliktLaXFixczGjdv3syMRgOgVatWUUlJCW3ZsoXZt2jRIqqqqnrpd0xUPwjn6elJ69ato/fff59GjBjBGDNbiMVimjhxIpmZmTUZEGxAoQ1yx44dZGBgINVpNNIgKCiIxo8f365tLFq0iNTU1Oivv/5q13ZehFgspp9//pnWr1/P7Kurq6PMzEzat28fmZmZsaaNQzrI63csEolo1qxZpK6uTmfPnm2xnEIbZHR0NAGQmEPGNsXFxS1OKZAmYrGYZs+eTSoqKvTNN9+0a1stsW7dOgLQbGT35MkT6tmzJwuq/se/I57mto70t9MR6ejfcXOUlZXRmDFjSF1dnf773/++sKxCG6RQKCQtLa0OcavZwKFDh0hNTY1KSkpk0t6uXbtIVVWV/Pz8KCkpSSZtNjBixAhmknHDLSUR0d27d2nChAkyyZ7O0b7I23d86dIlcnJyIlNTU4qMjHxpeYU2SCKifv360ezZs9mWwfCf//yHfH19ZdpmTEwMeXl5kZaWFi1fvrzJIEd7UVBQQJ9++inZ2dmRhoYG9e/fn8aPH08//fQTM6mYQ76Rl++4qKiIpk+fTjwej4KDgyk7O7tVxym8Qc6dO5f69OnDtgwGa2trWrlypczbFQqFtGrVKtLU1CQ3Nzc6cODAS3PhcXDIO0VFRRQWFkbGxsZkZWVFJ06caNPxCm+Q+/fvJ3V19Q7xbHJCQgIBoBs3brCmISkpiSZPnkyqqqrk6OhIe/bs6VBXeg4OaZCbm0uLFy8mfX19MjIyouXLl7/So40Kb5BJSUkEgG7evMm2FPruu+/I0NCwXRY4byspKSkUEhJC6urqZGVlRV9//TXr0y44OF4HsVhMV69epY8++oi0tbXJ3Nyc1q9fLzEXta0o7JM0DTg5OcHExAS3b99mWwrOnTuHgIAAqeZ+fFUcHBywe/dupKSkYNKkSdi9ezccHR0xZMgQHDx4EAKBgG2JHByt4vnz51i/fj1cXFzg6+uLhw8fYtOmTUhLS8OiRYuaPBffFnhERFLU2iEZNmwYLCws8Ntvv7Gmoba2FqamptiwYQNmzpzJmo6WqKurw+nTp7F3716cOnUK2traCA4OxpgxY/Dmm292+MzsHMpFVlYWTpw4gfDwcFy6dAn6+vqYNGkSpk6dip49e0qtHaUwyK+++gp//vknEhMTWdNw5coV+Pn5ISUlBQ4ODqzpaA25ubk4fPgwwsPDcfXqVaipqWHIkCEYPXo0Ro0axSRT4OCQJXFxcQgPD0d4eDhiYmKgp6eHN998E+PHj8eoUaOgoaEh9TaVwiBPnDiBMWPGoLCwkLXlTr/66iscOHAAaWlprLT/qhQVFeHChQs4efIkwsPDUV5eDnt7ewQGBiIwMBBDhgyBsbEx2zI5FJDnz5/j2rVrOH/+PM6cOYOMjAyYmppi+PDhmDBhAoYOHdouptgYpTDIvLw8WFhY4OzZswgKCmJFQ79+/dCjRw/s2rWLlfalQVVVFSIjI3HhwgVERkbi4cOH4PF48PLygr+/PwYNGgRvb2+YmZmxLZVDDklJScGtW7cQGRmJyMhIpKamQktLCz4+PggICMCQIUPQp0+fJnks2xOlMEgAsLW1xccff8xKiqWSkhKYmpri8OHDEvkK5Z3CwkJcunQJkZGRuHjxIh49egSgPjFr37594e3tjT59+sDLywva2tosq+XoSOTl5SE6Ohq3b99mtqKiIqipqaFv374ICAiAv78/+vfv3+5R4otQGoOcMGEChEIhjh8/LvO2T548idGjRyM/Px8mJiYyb19WFBUVSfzB3759G/n5+VBVVYW7uzveeOMNeHh4oHv37vDw8IC1tTXbkjnambq6Ojx+/BhxcXGIjY1FXFwc7t+/j/T0dAD1s0y8vb2Zi6mnp2eHWgZYaQxy48aN2LJlC54/fy7zthctWoRTp04hLi5O5m2zTVpaGm7duoWYmBg8fPgQcXFxTIZrQ0NDeHh4MJuTkxMcHR1hY2PTIaZCcbQegUCAlJQUpKSkMIYYFxeHhIQECIVCqKqqwtHREd27d0f37t3Rp08feHt7d/j+a6UxyMuXL2Pw4MFIT0+Hra2tTNseOHAg3N3d5br/UZoUFRUhNjYW8fHxjGkmJCQwS7qqqanBzs4Ojo6OcHR0lDBOW1vbJksPcMiG/Px8ZGdnIzU1lTHDhi07OxtA/Zo11tbWcHNzY+4UPDw84Obm1qEiw9aiNAYpEAhgaGiIX3/9FRMnTpRZuzU1NTA0NMSuXbvw4YcfyqxdeaSwsBApKSl48uQJUlJSkJyczPwACwoKmHJGRkawtLSEra0trKysYGVlBRsbG1hZWaFTp04wMzODqanpC9fd5vgfQqEQBQUFjAFmZWXh6dOnyMzMRFZWFrOvuroaQP2aQdbW1swFrPHm4OAALS0tls9IeiiNQQJg+jraay2R5oiKisLAgQPlYv5jR6a0tBSZmZnMj/ZFP2CgfqU6MzMzZjM1NYWZmRljoEZGRjAwMJDYDA0N5X4wqbS0FCUlJSgtLZXYiouLkZ+fj/z8fOTk5CA/Px8FBQXIzc1lIvcGDA0NYWVlBVtbW1haWjL/b7gY2drasjpwIkuUqqNnwIABuHTpkkzbjIqKgoWFBWeOr4mBgQHTf9USeXl5yMvLa2ICDx48wKlTp2BiYgJNTU0UFBSguLgYzcUGampqEoaprq4OHR0daGpqQktLC1paWtDU1ISOjg7U1dWhp6fH9Jc2lP03urq6TRZnEwgEEobeQElJCaOroUxlZSWEQiHKy8tRV1eHsrIyiEQilJSUoLa2VsIEm0NdXR1GRkYSFwlPT0/m4tGpUyfmPUtLS+6pqUYonUFu27YNpaWlEkuHticNESRH+2Nubg5zc3PmtUAgQFhYGE6dOoWgoCDs3r1bYmnPsrKyJpFW4+irwYAqKipQU1ODqqoqFBYWoqamBhUVFaitrWXMCqifJ1pTU9NEV2PTa6A1Ztpgytra2tDQ0GDes7a2Bp/Ph5GREVRVVRlDNzIygqGhYZPIWJFueWWNUhnkwIEDIRKJcPv2bZlMGCci3LhxA0uWLGn3tjgkuX79OqZOnYqcnBzs2LED06dPb1JGX18f+vr63HQjjhZR+Gw+jenUqRPs7e0RFRUlk/aSkpKQn5+PAQMGyKQ9jvqocfHixRg0aBDs7OwQFxfXrDlycLQGpYoggfrbbFkZZFRUFLS0tKSaXYSjZVoTNXJwtAWliiCBeoO8ceMG6urq2r2tGzduwNvbG+rq6u3eljLDRY0c7YVSGmRlZSUePnzY7m1FR0ejb9++7d6OMnP9+nV4enpi586d2LFjB06fPi0xEMPB8ToonUG6u7vD2Ni43W+zq6ur8ejRI3h5ebVrO8oKFzVyyAKlM0gej4e+ffu2u0E+ePAAtbW1nEG2A1zUyCErlM4ggfrb7GvXrrVrGzExMdDX1+cmiEsRLmrkkDVKaZADBw5kHlVrL2JiYuDp6SnT5J6KDBc1crCBUv56+/TpAzU1tXaNImNiYtCrV692q19Z4KJGDjZRSoPU1tZGr169cOXKlXapXygUIj4+Hp6enu1Sv7LARY0cbKOUBgkAgwcPxuXLl9ul7ri4ONTU1HAR5CvCRY0cHQWlNUg/Pz8kJibi2bNnUq87JiYG2tra6Natm9TrVnS4qJGjI6G0Bjlw4ECoqanh6tWrUq+7YYBGRUVF6nUrKlzUyNERUVqD1NXVhZeXV7vcZj948AA9evSQer2KChc1cnRUlNYggfp+yPZIoJuQkAAPDw+p16tocFEjR0dHqQ2yoR8yNzdXanU+ffoUJSUlcHd3l1qdiggXNXLIA0ptkL6+vlBRUZHqbXZ8fDwAwM3NTWp1KhJc1MghTyi1QbZHP2R8fDwsLCxgamoqtToVBS5q5JA3lNogAen3Q8bHx3O31/+Cixo55BWlN0g/Pz88evRIav2Q8fHx3O11I7iokUOeUXqDbOiHlMZjh0SER48ecREkuKiRQzFQeoPU09ODp6enVG6zs7OzUVpaqvQGyUWNHIqC0hskAAQEBODixYuvXU/DCLarq+tr1yWPcFEjh6LBGSSAIUOGIDExEVlZWa9VT2JiIszNzZVyBJuLGjkUEc4gUd8PqaWlhfPnz79WPSkpKUqXoIKLGjkUGc4gAWhqasLHxwcXLlx4rXpSUlLg6OgoJVUdHy5q5FB0OIP8fwIDA3H+/HkQ0SvXkZKSohRr0HBRI4eywBnk/xMYGIjc3FzExcW90vF1dXXIzMxU+AiSixo5lAnOIP8fLy8vmJqavnI/ZFpaGmpra+Hk5CRlZR0DLmrkUEY4g/x/+Hw+Bg8e/Mr9kCkpKQCgkLfYXNTIoaxwBtmIIUOG4NKlSxAKhW0+NiUlBWZmZjA0NGwHZezARY0cyg5nkI0IDAxEZWUlbt26BQAQiUS4fv06tm7dCoFAwJQrLS3FwYMHcfHiRTx+/BgCgUDhRrC5qJGDA+DR6wzbKiC2trbo2bMn1NXVcebMGZSXlwP43zozABAeHo6xY8dKHKepqQk9PT307dsXXbt2hbW1Nezs7DBu3Djw+fJzHRIIBAgLC8OmTZsQFBSE3bt3c8bIobQovUHW1tbi+vXr+Oeff3Dy5EnEx8eDz+eDz+ejrq6OKZecnMxEiJmZmbC1tW2xTnV1dYjFYtTV1Ukc19G5fv06pk6dipycHGzcuJG7neZQelTZFsAm1dXVsLGxQX5+PtTV1Zm+R7FYDLFYLFG2cd+ijY0NzM3NkZeX12y9QqEQqqqqGDx4sFyY47+jxvPnz3NRIwcHlLwPsuEJGlVV1ZcOzBgYGEi8HjRo0AuXda2rq8Py5culorM94foaOThaRqkNEgD27NkDIyOjF5qdpqYm1NTUJPYNGDCgxb5FFRUV9OnTB4MHD5am1DaRkpKC33//vcX3uRFqDo5WQBx09uxZ4vF4BKDZzczMrMkxt27darE8ADp9+jQLZ1JPRUUFOTk5EY/HowsXLjR5PyoqipydncnAwIB27drFgkIODvlA6SNIAAgKCsLs2bOhqtp8l+y/b68BwNPTExoaGk32q6iowMPDA8OGDZO6ztYya9YspKeng8fjYcqUKaioqADARY0cHG2GbYfuKAgEAnJxcSFVVdUm0WDv3r2bPaZ///5NyvJ4PDp27JiM1f+PPXv2SOhRU1OjWbNmcVEjB8crwEWQ/4+mpiYOHTrU7HsmJibN7h88eDDU1dWZ13w+Hw4ODhg9enS7aHwZcXFxmD17tsS+2tpa/Pjjjxg4cCAcHBwQHx/PRY0cHK2EM8hGeHp6YvXq1RKDLzweD8bGxs2W79+/f5PR77CwMFYmhldUVGDs2LEQiURN3uPz+TA2Nsbhw4dhaWkpc20cHPIKZ5D/YuHChRgwYAAzaq2qqtri89U+Pj7g8XgA6o3UysoK77zzjsy0NmbmzJnIyMiQmNzegFgsRllZGRYtWsSCMg4O+YUzyH/B5/Nx6NAhaGpqgsfjgc/nNztIA9Tfenft2hVAvUEuX768xYGe9mTXrl04dOgQamtrWyxTW1uLnTt34uzZszJUxsEh33AG2QxWVlbYsWMHiAi1tbXQ19dvsWzDXEcLCwtMnjxZRgr/x/379zFnzpxWZ0KfNm1as7fhHBwcTVHqRw2bQyAQoLKyEj4+PggKCsK5c+eQl5cnkUi3urqaye7TMEjz9ttv48qVK0wZTU1NaGlpQVNTEzo6OjAwMICenp5UI8yysjKMHTv2heaopqYGkUgEsVgsl8kzODjYRKGTVdTV1eHp06fIzMxEbm4u8vLykJ+fj4KCAhQUFCAnJwcFBQUoLy9HWVkZSktLmzyDLW00NDSgq6sLAwMDGBkZMcvEmpqawszMDBYWFjA1NUXnzp1hY2MDCwuLFuuaOHEijh49KtHvqKKiAiKCWCyGpaUlAgICMHDgQAwfPhzW1tbtem4cHIqG3BtkXl4eHj16hMePHyMtLQ2ZmZnIyMhARkYGnj17JnE72WBCDYbUqVMnmJqaMtGdvr4+dHR0mkR82traEpPC1dTUoKurK6GjuLhY4nVlZSWEQiETkZaVlaGsrAyVlZWoqqpCSUkJioqKkJ+fz5h2Xl4e8vLymIndQH0kamtrCxsbG2ZzcnLC/fv3sXHjRqafVCQSwczMDMOGDUNgYCD8/f1hY2PTTp86B4dyIDcGmZeXh7t37+LBgwdISkpiTLHBmPT09GBvbw8bGxt07doVNjY2sLa2ho2NDWxtbWFhYfHC5607EtXV1Uzkm5mZifT0dGRlZTHm37D+DQBoa2vD1tYWXl5e8PHxQc+ePdGjRw/o6OiwfBYcHPJPhzTI/Px83Lx5EzExMcyWnZ0NoD6hrYuLC5ydnZl/nZ2dlWp+X11dHRITE/HkyRM8fvwYSUlJSExMREJCAoqLi6GiogJnZ2f06tULXl5e6NWrF/r06QNNTS9WTuAAABDlSURBVE22pXNwyBUdwiCfP3+Oa9eu4dq1a4iKikJMTAyICJ07d0avXr2YrW/fvjA3N2dbbofm2bNnuHv3LrNFR0cjNzcXqqqq6NGjBwYMGICBAwciMDAQRkZGbMvl4OjQsGKQAoEAkZGRiIiIwD///IO0tDSoqamhd+/e8PX1ha+vLwYMGMD9gKVEeno6rl69iitXruDq1atISkqCiooKvLy8MGLECIwYMQK9evViJr1zcHDUIzODzM3NRXh4OCIiInDx4kUIBAJ4enrirbfegr+/P/r16wdtbW1ZSFF6cnNzcfXqVZw/fx6nTp1CdnY2OnXqxJjl8OHDudtxDg60s0EKBAJERETgt99+w5kzZ6CmpgYfHx8EBwdj3LhxXObqDkJqaipOnjyJiIgIXL58Gdra2hg1ahQmTJiA4cOHs/J0EAdHR6BdDPLixYvYtWsXTp48CbFYjOHDh+P9999HcHAwtLS0pN0chxTJycnBH3/8gUOHDuH27dvo3LkzJk6ciE8++QQODg5sy+PgkClSM0iBQICDBw/i+++/R2xsLAYMGICPPvoI48aN4/oS5ZTk5GQcOnQIv/zyC7KyshAcHIy5c+ciICCA66/kUApe2yArKiqwZcsWfPfdd6ioqMB7772HuXPnwsvLS1oaOVhGJBIhPDwc27Ztw+XLl+Hh4YHly5dj3LhxnFFyKDSvbJBCoRA7d+7EmjVrUF1djQULFmDWrFkvfDSOQ/65f/8+vvnmGxw+fBheXl5Yu3YtgoKC2JbFwdEuvFLWgtOnT8PZ2RmLFi3CBx98gNTUVKxYsYIzRyWgZ8+eOHjwIO7duwcLCwsMHToUQ4cORWpqKtvSODikTpsMsrS0FCEhIXjrrbfQr18/PH78GJs2bWpxSQIOxeWNN95AREQErly5gpycHPTo0YNJEcfBoSi0+hb7xo0bePfdd1FTU4Mff/wRb7/9dntr45AThEIhVq5ciQ0bNsDPzw9//PEHd9HkUAhaZZD//e9/MWnSJAQEBOCXX36BmZmZLLRxyBl37tzBO++8AzU1NZw+fRr29vZsS+LgeC1eeou9Y8cOTJgwAR999BGOHz/Oqjnm5ubiyJEjWLNmDWsaXoWSkhK2JciE3r1748aNG9DX10f//v0RExPDtiQOjtfjRWvCRkREEJ/Pp9WrV0tnkdnXICEhgT755BMCQM7OzmzLeSkCgYBWr15N/fr1Iz6fz7YcmVJRUUFDhw6lLl260PPnz9mWw8HxyrR4i52SkoI+ffpg5MiR+O2332Rs281TXV0NLS0tODs7IzExkW05L0UgEMDS0hLFxcVKN3hRXl6Ofv36wcDAAJcuXZJYP5yDQ15o9habiPDBBx/AyckJP/30k6w1tYi8JVDQ0tJS2vRsenp6OHr0KOLi4rB+/Xq25XBwvBLNGmR4eDhu376Nn376Se5MiaPj4OrqiqVLl+Kbb75psiQFB4c80KxB7tmzB2+++SZ69uwpMyFEhG3btmHy5MmYNWsWNDQ0wOPxmK0lSktLsXDhQixevBihoaEYOnQoQkNDmR/kn3/+CWNjY/B4PCxbtow5bseOHeDz+UyELBAIsGHDBkybNg29e/dGYGAgYmNj23QOVVVVCA0NxfTp07Fs2TIsWbIElZWVzPnduHEDn332Gbp27YqcnByMGzcOxsbG8PDwwF9//cXUU1FRgVWrVmHy5MmYO3cu/Pz8sHXrVojFYhw/fhzTp0+HlZUViouLMWXKFJiYmMDDwwN37twBEb20jCyZPXs2AODQoUMybZeDQyr8u1NSKBSStrY27dmzR6adod9//z3x+XwqKCggIqK1a9cSAAoNDZUoh0aDNGVlZeTk5ETLly9n3s/NzSUnJyeys7Oj4uJipm4A9PfffzPlMjIyaOLEiczrkJAQevToEfM6KCiIzM3NqbS0tFX6a2trydvbm0JCQkgsFhMRUUpKCqmoqBAAqquro5MnT5KmpiYBoE8//ZQuX75MBw8eJF1dXQJA165dI6FQSH5+fjR58mQSiURERLR3714CQMePH6esrCzS0dEhALR69WpKT0+n/fv3EwDy9vYmsVj80jKyZty4cTRmzBiZt8vB8bo0McgnT54QAIqOjpapkJEjRxKPx6OamhoiIoqNjSUA1LdvX4lyjQ1y6dKlBICePXsmUWbfvn0EgBYuXEhERDU1NWRtbU0jR45kyixbtoxiYmKIiOjmzZsEoNnt5MmTrdK/bds2AkDx8fES+52cnKjxdajhdUVFBbNvy5YtBIDeffdd2rx5MwGgxMRE5v3a2lrau3cvFRUVERFRt27dJOoUi8Vkbm5O6urqzL7WlJEVK1euJBcXF5m3y8HxujS5xa6qqgIAma+KFxQUBCLCqVOnAPxvQCYgIKDFY6KiogDUDwg0ZtCgQQCA69evAwDU1dUxb948RERE4MmTJxAKhUhKSoKnpycAIDo6Gm5ubqD6C4bEFhwc3Cr9Z8+eBQDY2dlJ7Ofz+c2+bvz5jho1CkB9erFLly4BgEQyYVVVVfznP/9h0sb9u8uBx+PByMgIQqFQYt/LysgKXV1diaVsOTjkhSYG2fCIWG5urkyFfPrpp9i9ezemTZuGzz//HJ999hnCwsKwcuXKFo9pMJv09HSJ/Q1JMwwMDJh9ISEh0NHRwfbt2xEeHo7x48cz7xUWFiItLY3pL2xM43W1X8TTp0+ZutpKly5dAADW1tbM556cnNzmejoqz58/556+4pBLmhhk586d0aVLFyY6kxUikQhxcXG4efMmNm3ahOPHj+Prr79+Ybr/hkixIepsICsrCwAQGBjI7DMwMEBISAj27t2LP/74A2PHjmXec3FxYQZpGpOQkIDt27e3Sr+Li0uzWlpDg6kGBgaiR48eAIA1a9ZALBYzZdLT0/H333+3ue6OQFRUFHr16sW2DA6OttPcfffs2bPJ2dmZGSSQBWFhYWRvb0979uyh06dPU1RUFCUlJVFtbS1TprKykgCQra0t89rd3Z0sLS0l+iHnzp1LPj4+JBQKJdpITU0lPp9Pq1atktgvEAjIzs6OANDUqVPpwIED9OWXX1JQUFCrB2nu3btHKioqZGxsTKdPn6aqqiq6cOEC6enpEQBKTU0lIiJnZ2cCIHFe+/btIy8vLxIKhfTkyRPS1tYmAOTv70/bt2+nZcuW0fTp05nvw9bWlgAwg0FERF26dCEAzDm3powsSExMJB6PR6dOnZJZmxwc0qJZg0xISCBVVVX65ZdfZCbk7NmzZG5u3mSQxNTUlI4ePUpPnjyhOXPmMPu3bNlCRUVFVFZWRgsXLqSgoCAKDQ2lhQsX0sqVK6m6urrZdubNm8eMlDcmLS2NRo4cSUZGRmRhYUEff/wx5eXltekcLl++TD4+PqSrq0v29va0bt068vX1pRkzZtD58+eprq6OMchvvvmG8vPzKTc3l9atW0fl5eVMPQ8fPvy/9u4vpMkujgP4d6mpm6IN50RwUzHdEqJcN5ZNIeaFC4pIy8IgiIKUvCsjAqGLAqMLa5GFkJR28UJYWFld5J+h2chAITcs2J4JZVtuui0xp+e9eNnzusoy23ym/T7w4Cbbzm/K891h5zznsNLSUpacnMzS09NZbW0tc7vdjDHGrl27xv8NLly4wNxuNz/IA4CdOXOGNTQ0/PIxX758+a33tlzl5eVMrVYzv9+/Iu0REkqLXotdU1PDpFIps9vtYS9ifn6eNTc3s0uXLvG/8/v9jOM41tLSwmQyWdhrWCmBgPwbNDc3M5FIxJ48eSJ0KYQsy6JnqtfrZWq1mmk0mqDeTThcvHiRAfhhz+79+/dsy5YtYW3/V77t1f7oWDiH8mf+loAcGBhgcXFx7Pz580KXQsiyLbrcmUQiQUdHB8bGxqDT6eB0OkP1ted3jEYjAODGjRtB7QwODqKurg53794NW9tLwX4w/efbIzBI8yuBkfK1PO2lv78fer0eu3btQn19vdDlELJ8v0rQd+/esY0bN7Ls7GxmsVjCktJOp5PV1NSwrKwsFhsbywoLC9n+/fvZzZs3+Ynjq53H42Fnz57le5xHjx5lfX19QpcVcu3t7UwsFrM9e/Ywn88ndDmE/JElrSg+Pj6O3bt3w2q1wmAwoKKiIvzJTVaVmZkZ1NfXo6GhASdOnEBjYyOioqKELouQP7KkTbvkcjlevHiBffv24eDBg6ioqIDD4Qh3bWSVMJlM0Gg0MBgMuH79OgwGA4UjWROWvKthQkICmpqa8PTpUwwMDCA/Px9Xr14V5NI1Ehk+fvyI6upqbN++HWlpaRgeHsbx48eFLouQkPntfbF1Oh2Gh4dx5MgRnD59GiqVCnfu3Am66oOsbZOTkzh37hxycnLw8OFDNDU14fnz51AqlUKXRkhILXnb1x+x2+2or69HS0sLcnNzUVtbi6qqKojF4lDWSCKE3W6HwWDArVu3IBKJUFdXh+rqasTHxwtdGiFh8UcBGTAyMoLLly+jra0N8fHxOHbsGKqrq6lHsUb09vaisbER7e3tSE1NxcmTJ1FTUxO0GAgha1FIAjLA7XajpaUFV65cwdjYGAoLC1FeXo7Dhw8jJSUlVM2QFcBxHO7du4fbt2/DbDZDo9Hg1KlTqKysRExMjNDlEbIiQhqQAbOzs+jo6EBrayu/uo1er0dlZSVKS0u/W7+RRAa73Y4HDx6gra0NL1++RGpqKg4cOICqqips27ZN6PIIWXFhCciFJicncf/+fbS2tqKrqwtRUVHQarUoKyuDXq9Hbm5uOJsnPzE3N4f+/n48evQIjx8/xtDQEBITE7F3714cOnQIOp2OpuuQv1rYA3Khz58/o7OzEx0dHXj27BkmJiaQk5ODkpISaLVaaLVa+t4yjObm5jA0NISenh709PSgq6uL/x/o9XqUlZWhuLgYsbGxQpdKSERY0YBcaG5uDn19fejs7ER3dzdMJhO+fv0KhUIBrVaLHTt2QKPRYPPmzXTCLpPL5cLr169hMplgNBphNBoxNTUFqVSKoqIilJSUUC+ekJ8QLCC/NT09jVevXqG7uxu9vb0YGBiAx+NBTEwMNm3ahIKCAhQUFGDr1q1Qq9WQSqVClxwxGGPgOA5v377F4OAgfwS2okhPT0dRURF27tyJ4uJi5Ofnf7dXDiHkexETkN+an5/H6OgoBgcH8ebNG/6kD+x3LZPJoFarkZeXh7y8PKjVamRnZ0OpVK7ZeXkTExOw2WwYHR2FxWLByMgILBYLLBYLv0pQZmYm/0ES+FBJS0sTuHJCVqeIDcjF2Gw2WCwWmM1mPiDMZjM+fPjAP0YmkyEjIwMKhQIKhQJKpRJyuRwymYz/mZKSgvXr1wv4Tv7n8/ngcDgwPj4Oh8MBp9OJsbEx2O12cBwHjuNgs9n4EIyOjkZmZibUajVUKhXy8vKgUqmoZ01IiK26gFzM5OQkrFYrOI6D1Wrlw8Vut8NqtcLhcGB2djboOUlJSZDL5UhMTMSGDRsgkUggFouRmJiIpKQkSCQSxMXFQSQSITk5Oei5gS1Ygf+mNS1c39Hv98Pj8QAAPB4PfD4ffD4f3G43f9vj8cDpdMLhcGB6ejrotcViMdLT06FQKJCRkYHMzMygwM/KyoqYcCdkLVszAbkULpcLnz594oMp0Gvzer1wuVx8eHm9Xj7MZmZmggIP+G+AaWpqir+/bt26oKtKFgZqQkICJBIJJBIJH8ISiQQJCQl8TzYlJSWod0uXahISGf6qgCSEkN9BQ5mEELIICkhCCFkEBSQhhCwiGsA/QhdBCCGR6F9S9Wq/Eql0EwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "glove_dpcnn" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAANAAAAFbCAYAAACkpADbAAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nO3deVxU9R438M8wMMywDAzCyL4KsrmlAm6QCqZXUG9mpaamZtp2K59My8pbmlld01vabXvM0iwrLdN7TcRCUxERDWVxZd+XYR1mYJbv84dxHhAQdAbOgL/36zUv4Mzh9/uemfmcbc4iICICwzB3xYzvAhimL2MBYhgDsAAxjAHM+S6Auam+vh5FRUWora1FfX09iAg1NTUAABsbG1hYWEAikcDOzg4DBw6Ek5MTzxUzAAtQr2pubsb58+dx6dIlXL58GZmZmbh+/TqKi4vR2Nh4R22JRCLI5XIMGjQIgYGBCAoKQnBwMMLCwiCVSntoCphbCdheuJ6j0Whw4sQJHD16FKdOncK5c+egVqtha2vLfegDAgLg7u4OuVwONzc3yGQy2NjYAABkMhmAm0snrVYLtVqN6upqlJWVobi4GKWlpbh27RoXxoqKCgiFQoSEhGD8+PG4//778cADD7BA9SAWICNTq9X46aef8NNPPyE+Ph61tbUICgrC+PHjMW7cOIwdOxb+/v490ndZWRmSkpLwxx9/4PTp00hJSYFQKERkZCRmzpyJRx99FI6Ojj3S972KBchILly4gC+++AJ79uxBQ0MDJk6ciLi4OMTGxsLHx4eXmhQKBQ4fPoyDBw/i8OHDUKvVmDFjBpYsWYIHHngAZmZsH5LBiDHIH3/8QbGxsSQQCGjw4MG0bt06ysvL47usdlQqFX3//fcUHR1NAoGA/Pz8aOvWraRSqfgurU9jAbpLJ0+epLCwMAJA0dHRdPToUb5L6rbMzEx6/PHHSSQSkYeHB3399dek1+v5LqtPYgG6Q/n5+fToo4+SQCCg6OhoSklJ4buku5afn0/Lly8noVBIYWFhlJSUxHdJfQ4L0B3YuXMn2dnZkb+/Px04cIDvcowmLS2NJk2aREKhkF5++WVSq9V8l9RnsAB1Q01NDf39738nMzMzevHFF/vtdsPnn39Otra2NHToULp8+TLf5fQJbC9cF3JychAXF4fq6mp88803uP/++/kuqUfl5ORg7ty5uHr1Kr7//ntER0fzXZJJY/sxb+P8+fMIDw+HSCRCcnJyvw8PAPj4+CAxMRHTpk3DtGnT8PXXX/Ndkkljh/J04uLFi5gyZQpGjRqFffv2wdramu+Seo1YLMbu3bvh5eWFJUuWwNzcHPPmzeO7LJPEAtSBnJwcxMTEYOjQofjpp58gkUj4LqnXCQQCbNy4EVqtFosWLYJUKkVsbCzfZZkctg10C7VajXHjxkGv1+PkyZP31JKnM0uXLsX+/fuRmpoKX19fvssxKSxAt3jmmWfwzTff4Ny5cxg0aBDf5ZgEtVqNsWPHQiAQ4MyZM7CwsOC7JJPBdiK0cvbsWXzyySf4+OOPWXhaEYvF+P7775GZmYlt27bxXY5JYUugvxARxowZA7FYjMTERL7LMUlvvPEGPvzwQ1y9ehVyuZzvckwCWwL95dixY0hOTsaWLVuM1mZ4eDhWrVpltPb4tmbNGojFYmzfvp3vUkwGC9Bf/vOf/yAyMhIjRowwWps+Pj4Qi8VGa+9OFRQUGLU9KysrPPHEE/j888+h0WiM2nafxdsxECakpqaGzM3NadeuXXyXYjTZ2dk0fvx4o7ebm5tLAoGADh8+bPS2+yK2BAJw8uRJ6HQ6TJkyhe9SjKKwsBCxsbGoqKgwetteXl4ICgrC8ePHjd52X8QCBOCPP/5AUFCQ0TaMdTodvv/+eyxatAiRkZEgIhw4cABPPvkk3N3dUV1djUWLFmHAgAEIDQ3FuXPnQERISkrC//k//wfe3t4oLS3F7Nmz4eDggNDQUOzbtw8A8Omnn0IgEEAgEAAA6urqsHnz5jbDdu7ciczMTJSWlmLFihVGmabWoqKicOLECaO32yfxvQg0BQ8//DDNnj3bqG3m5eURABo8eDDp9XoqKCgga2trAkAbNmyg3Nxc2rVrFwGgsLAw0mq1dPDgQRKLxQSAnn32WTp+/Dh98803ZGNjQwDo5MmTRETk6+tLt751tw5r6bsn/Pvf/yYXF5ceabuvYQEiokmTJtGKFSuM2qZer2/3IQ4ICGjzIdfr9SSXy0kkEnHD/P39CQA1NDRww7Zs2UIA6JFHHiEiosGDB7cL0K3DejJAe/bsIXNzc3YWK7FtIACAUqmElZWVUdtsWZ263TCBQACZTIbm5mZuWMuFPlofQjRjxgwAwLVr14xa492ytraGVqtFU1MT36XwjgUIwIABA6BQKPguo1Ourq4AAA8PD54ruamyshLW1ta87qI3FSxAABwdHVFWVsZ3GZ2qqqoCAO7ktpYlWcsSQK/Xo7a2FsDNIypaaLXaHqmnvLycXVr4LyxAAIYMGcLtCTOW+vp6ADf3krVQq9UA2n7IW8a79YvJ1h/+Y8eO4b777sPy5csBAIGBgQCADRs24Nq1a/j3v//NhenIkSPQ6XTw8/NDSUkJ8vPzjTZNLVJSUjBs2DCjt9sXsQDh5m7ZiooKXL582SjtKZVKbNy4EQBQUlKCDz74AO+88w7y8vIAAG+//TZqa2uxdetWFBcXAwBef/11qFQqro2tW7eisrIS5eXlKC4uxvHjx7mjoN99912EhYXhgw8+wDPPPIPp06cjJCQEjz32GGpqaqDVajFnzhxIpVKkpKQYZZpaEBFOnjyJCRMmGLXdvoodTIqbc3tXV1c8++yzeOONN3itJTAwEFeuXDHq0tCYEhMTMXHiRFy6dAmhoaF8l8M7tgQCYG5ujsWLF+PTTz9lx3h14eOPP8a4ceNYeP7CAvSX5cuXo6ysDHv27OG1DqVSCQBoaGjgtY6OXLlyBT///DOeeuopvksxHTx+B2VynnrqKRo4cCDV1NT0et/19fX0yiuvEAACQIsXL6bTp0/3eh23M23aNBo+fDhptVq+SzEZbBuolaqqKgQEBODRRx9l57zcYu/evXj00UeRmJiIqKgovssxHXwn2NTs3buXBAIBffvtt3yXYjIuX75Mtra29Oyzz/JdislhS6AOPP/889ixYwd+//13jBo1iu9yeFVZWYnIyEhIpVKcOHECIpGI75JMCgtQBzQaDWbOnInk5GT89ttv9+yXhtXV1Zg8eTJqa2tx4sQJuLm58V2SyWF74TpgYWGBffv2YcSIEYiJiUFSUhLfJfW64uJiREdHo6qqCseOHWPh6QQLUCckEgkOHDiAMWPGYNKkSdi9ezffJfWa1NRUhIWFobGxEb///ju8vb35Lsl08bsJZvp0Oh299NJLJBAIaMWKFW3O0+lv9Ho9ffjhhySRSCgmJoaqq6v5LsnksQB10969e8nBwYEGDRpEf/zxB9/lGF1ubi5FR0eTubk5rVu3jjQaDd8l9QksQHegqKiIpk6dSgKBgObOnWuSNxO+U/X19fTaa6+RRCKhwYMHU3JyMt8l9SksQHdh//795OfnRxKJhFauXEmFhYV8l3THGhoaaMuWLeTq6kp2dnb0/vvvU1NTE99l9TksQHdJrVbTBx98QK6uriQSiWjp0qV04cIFvsvqUmFhIa1bt44GDBhA1tbW9MILL1B5eTnfZfVZLEAGUqvV9Pnnn3MX9RgxYgT9+9//prKyMr5L4yiVStq7dy9NmzaNhEIhOTk50bp166iyspLv0vo89kWqkdBfJ5p9+eWX+OGHH9DY2Ijw8HDMmDEDf/vb3xAaGspdMKQ35OXl4ciRIzh48CCOHTsGjUaDqVOn4vHHH0dcXBw7osBIWIB6gFKpRHx8PA4dOoRDhw6hvLwcdnZ2GDt2LMaMGYPhw4cjODgY3t7eEAqFBvdXXFyMrKwsXLp0CUlJSTh16hSKiopgZWWFKVOmIC4uDrGxseyOCj2ABaiH6fV6pKWl4dSpU0hKSkJSUhJycnIA3LzvzqBBg+Dh4QG5XA5XV1fY2dlBJpMBAOzs7GBmZoba2lro9XrU19ejvr4excXFKC0tRUlJCa5fv46amhoAgJOTE8LDwzF27FiMGzcOo0ePvidvT9mbWIB4UF9fj8uXLyMzMxPXr19HYWEhd+2Duro61NTUgIi44EilUgiFQkilUtjY2MDZ2RkuLi5wdnaGn58fAgMDERISwq6UwwMWIBP28ssvIzExEWfPnuW7FKYT7Fg4hjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjAHaDLRNRVlaGmTNnQqlUcsMqKiqgUqng6enJDRMKhdi8eTMmT57MR5nMLcz5LoC5SSKR4Pz589BoNO2eS09Pb/O3TqfrrbKYLrBVOBMhlUoRGxsLc/Pbz9NkMhkmTZrUS1UxXWEBMiHz58+/7dJFJBJhwYIFXYaM6T1sG8iEqNVqODo6ttkOulVSUhIiIiJ6sSrmdtgSyISIxWI89NBDEIlEHT7v6uqK8PDwXq6KuR0WIBMzb948NDc3txsuEomwePFiCAQCHqpiOsNW4UyMTqeDXC6HQqFo99ylS5cQGhrKQ1VMZ9gSyMQIhULMnz+/3WpcYGAgC48JYgEyQXPnzm2zGmdhYYFFixbxWBHTGbYKZ4KICJ6enigsLAQACAQC3LhxAz4+PjxXxtyKLYFMkEAgwMKFC2FhYQGBQIDRo0ez8JgoFiATNXfuXO6wngULFvBcDdMZ9pU2j4gICoUC1dXVqK2tRUNDAxeaxsZGuLq6orS0FNbW1khISAAAmJubw9bWFtbW1nBwcIBMJoOFhQWfk3FPY9tAPUSr1eLGjRu4evUq8vPzUVhYiIKCAuTl5aG0tBSVlZWoqakxSl82NjZwcHCAs7MzPDw84OHhAS8vL3h4eMDX1xeBgYGQSCRG6YtpiwXICCorK5GcnIzz588jIyMDmZmZuHLlCrcnbcCAAW0+2M7OznB0dOSWIA4ODrC3t4dEIoFYLAYAWFpawsrKCgCg0WjQ0NAA4OZSq6amBkqlklt6KRQKKBQKFBcXo6CgAIWFhcjPz0dJSQmICGZmZvDx8UFwcDCCg4MxbNgwREREsO0qI2ABugtZWVlISEjAmTNnkJycjBs3bgAA/Pz8EBISguDgYO7n4MGDYW1tzUudzc3NyM7ORkZGBrKyspCeno6srCxkZmZCq9VCLpcjLCwM4eHhmDx5MsLCwiAUCnmpta9iAeqG2tpaHDlyBPHx8YiPj0dBQQHs7e0xZswY7gMYFhaGAQMG8F1qtzQ2NuL8+fNITk5GcnIykpKSUFhYCHt7e0yaNAlTpkzB3/72N3h4ePBdqsljAepEY2Mjjh07hh9++AH79u1DU1MThg8fjujoaERHRyMqKqpfbbxnZ2cjISEBCQkJiI+PR21tLYKDgzFnzhwsWLAAfn5+fJdomojh6PV6+u233+jhhx8msVhMIpGIpk+fTjt37qTq6mq+y+s1TU1NdOjQIVq0aBHZ29uTQCCgyMhI+uabb0itVvNdnklhASKiuro62rp1KwUGBhIAGjNmDO3YsYMUCgXfpfFOrVbTL7/8Qg8++CCZm5uTk5MTrV69mnJzc/kuzSTc0wGqq6ujTZs2kYODA4nFYlqwYAFduHCB77JMVklJCW3atIm8vLzIwsKCFixYQFevXuW7LF7dkwFSqVS0fv16sre3J3t7e3rjjTeoqqqK77L6jObmZvriiy/I19eXLCwsaMmSJVRSUsJ3Wby45wJ06NAh8vPzI1tbW3rrrbeopqaG75L6LI1GQzt37iQvLy+ys7OjrVu3kkaj4busXnXPBKiyspIefPBBAkCPPPIIFRYW8l1Sv6FUKmnt2rVkaWlJw4YNo0uXLvFdUq+5JwJ04sQJcnd3J09PTzp69Cjf5fRbV69epfHjx5NEIqFPP/2U73J6Rb8P0AcffEBCoZBmzpzJtnN6gUajobVr15KZmRnNnz+fmpqa+C6pR/XrAK1du5YEAgG99957pNfr+S7nnnLkyBGSSqU0depUUiqVfJfTY/ptgF544QUyNzenL7/8ku9S7lkpKSnk6OhI48ePp8bGRr7L6RH9MkDbt28nMzMz2rt3L9+l3PMyMzNpwIABNHfu3H65FtDvApSYmEgWFha0fv16vkth/pKQkEDm5ub07rvv8l2K0fWrADU1NZG/vz/NmjWrX87t7lZYWBi99NJLvNbwr3/9i0QiEV27do3XOoytX10T4cMPP0RhYSG2bt3a56/gWVBQYLS2fHx8uBP1+PLCCy9g8ODBWLVqFa91GB3fCTaWpqYmGjBgAL322mt8l2Kw7OxsGj9+PN9lGN2RI0cIQL863rDfLIHi4+OhUCiwbNkyvksxSGFhIWJjY1FRUcF3KUY3ZcoU+Pv749tvv+W7FKPpNwH68ccfMW7cuDa3QzSWtLQ0xMTEQCAQIC4uDlVVVVi1ahU8PDzw9ddfAwB2794NKysrCAQCbNq0CVqtFgDwzTffQCQS4auvvupWXzt37kRmZiZKS0uxYsUK6HQ6JCYm4oUXXoC3tzeKiooQFRUFT09PVFdX4+rVq3jooYewevVqLFiwABMmTMDFixcB3LzO9vfff49FixYhMjISRIQDBw7gySefhLu7O6qrq7Fo0SIMGDAAoaGhOHfunNFfu1vNmTMHP/74Y4/302v4XgQay8iRI2nNmjU91n5DQwMFBQWRj48PqdVqiouLoytXrrQZZ+3atQSA0tPTuWF5eXk0a9asO+oLAA0ePJiIbp6Pc+rUKZJIJASANm7cSEePHqWlS5dSfX09DRo0iHx9fYno5lHSdnZ2FBIS0qb/lvb0ej0VFBSQtbU1AaANGzZQbm4u7dq1iwBQWFjY3b483fbf//6XAFB9fX2P99Ub+k2A5HI5ffTRRz3ax9mzZ0koFFJERATt2LGj3fOVlZVkY2NDS5cu5YZt3LiRDh48eEf9tA5Qi4CAAALQ7nCkzZs30549e4iISKfTka+vL5mbm3PP6/X6du21tNV6HLlcTiKR6I7qvBsXL14kAHT58uUe76s39JtVuLq6OtjZ2fVoH6NHj8bq1auRnJyM4cOHt3t+wIABeO655/D111+jqKgIRIRjx45h6tSpBvfdslfRwcGhzfCVK1ciLi4O27dvx9tvv42mpiZu9bH1/3XUVuu/ZTJZh/clMraW98hY18TjW78JkIuLC4qLi3u0D71ejxs3bsDDwwMLFy5EU1NTu3FWrlwJkUiELVu2IDU1FWFhYT16T9OzZ89iyJAh8PX1xeuvvw4bG5se68sYWt4jNzc3nisxjn4TIHd3d+Tl5fVoH++99x4efPBB7NixA+np6Vi3bl27cRwdHfHUU0/h008/xYcffoglS5bcVV+tlyK3s3DhQmg0GkybNg3AzZADNy/AaIpyc3MhFArh7OzMdynGwfc6pLGsXbuWPDw8euwIhKSkJHr00Ue5v5966ikyMzOjxMTEduOWlJSQSCSiqKiou+rLz8+PrKysKC8vjxvm5eXV4ca3VColABQfH0+7d+8mJycnAkBnzpyh/Px8qqurIwDk4uLSrq3Wr5WrqysBoObm5ruqubvmz59PkZGRPdpHb+o3S6BHHnkEBQUFSEpKMnrb+/btQ1xcHOzt7blh9vb20Ov1mDlzJr788ss24zs7OyMmJgZLly69q/7mzJkDqVSKlJQUKJVKvPXWW9zSdeXKlbhw4QI37saNGyGVSrF27Vr4+flh7dq1sLe3x8aNG0FE2LhxIwCgpKQEH3zwAd555x2urbfffhu1tbXYunUrt2r1+uuvQ6VS3VXdXVGpVPjll1/w8MMP90j7fOhXF1YcNWoUnJ2dcejQIV7rUCqVGDZsGC5evMhd35oB3n33Xaxfvx7Z2dmQy+V8l2MU/WYJBAD/+te/8N///he//vorr3Vs374dzz33XLvwCASCLh+XL1/mqeqeVVZWho0bN2LNmjX9JjxAP1sCAcBDDz2E1NRUpKSkwNHRsdf6PXPmDJ588kk0NjZCp9Ph8uXLsLS07LX+TZler0dcXBx3kfv+dKuVfrUEAoDPPvsMQqEQf//733vle40W1tbWqKurg5mZGfbs2cPC08qrr76K3377DXv37u1X4QHQf/bCtZaWlkY2NjY0f/78e+46Zabmo48+IoFA0G9Pre+XASIiOnr0KNnY2FBcXFy/PR/f1L355pskEAho06ZNfJfSY/ptgIhufnfj4OBAY8eOpfz8fL7LuWc0NjbS0qVLSSgU0meffcZ3OT2q320DtRYREYE//vgD1dXVGDFiBA4ePMh3Sf1eVlYWwsLCsH//fvz00099/vysrvTrAAFAcHAwUlJSMGPGDMycORPLli1DZWUl32X1OxqNBu+99x5GjRoFW1tb/Pnnn4iLi+O7rJ7H9yKwN+3du5dcXV3JwcGBPvnkE9LpdHyX1C8kJCRQYGAgSSQSWr9+/T214+aeChDRzRPj1q1bRyKRiIKDg+mrr74irVbLd1l90unTpyk2NpYEAgHFxsZSdnY23yX1unsuQC0yMzNp7ty5ZGZmRkFBQbRr165+fx1nY/ntt98oJiaGANC4cePu6Qv237MBapGVlUWPPfYYmZubk1wupzVr1tyTc9KuVFdX09atWykoKIgAUGRkJCUkJPBdFu/u+QC1KCgooHXr1pGbmxuZmZlRTEwMffHFF/f0HR1UKhX9/PPPNG/ePLKysiIbGxt68skn6fz583yXZjL63bFwhtJqtTh48CB2796Nw4cPQ6vVYvLkyZg9ezYeeOABeHh48F1ij6qpqcFvv/2Gn3/+GQcOHEBDQwPGjRuHefPmYd68eZBKpXyXaFJYgG6jvr4eBw8exA8//IAjR45ApVIhMDAQMTExmDJlCsaNGweZTMZ3mQZRqVQ4d+4cEhISEB8fj5SUFADAmDFjMGfOHDz00ENwdXXluUrTxQLUTWq1GidPnsTRo0cRHx+PtLQ0AEBAQADCw8MRHh6OsLAwBAUFwdramudqO6bRaHD9+nWkpqbizJkzSE5ORlpaGjQaDby9vbkZw6RJk9pdvITpGAvQXaqoqEBycjL3QTx79izq6uogEAjg7e2NoKAghISEICAgAJ6envDw8ICXl1ePn2DX3NyMoqIiFBQUIC8vD9evX0dmZiYyMzNx7do1aDQaiEQijBgxggt+REQEfH19e7Su/ooFyEj0ej2uX7+O9PR0XL58mTv35erVq1Aqldx4AwYMgJubGwYMGACZTAYHBwfup4WFBWxtbQEA5ubm3O9qtZo7zVqlUkGtVqOurg7V1dVQKBTco6SkBKWlpdyFRSwtLeHt7Y2QkBAEBgYiJCQEQUFBCA4OZqdbGAkLUC9QKBQoKChAfn4+8vPzUVJSwn3oW0JQXV0NnU6H2tpaADdXtxoaGgAAYrGYO4/GysoKlpaWkEqlbQIok8ng4uICDw8PeHh4wNPTE87Ozn3+LhWmjgXIhL388stITEzE2bNn+S6F6US/P5iUYXoSCxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjAHaDLRNRVlaGmTNntrkdZEVFBVQqFTw9PblhQqEQmzdvxuTJk/kok7mFOd8FMDdJJBKcP38eGo2m3XPp6elt/tbpdL1VFtMFtgpnIqRSKWJjY2Fufvt5mkwmw6RJk3qpKqYrLEAmZP78+bdduohEIixYsKDLkDG9h20DmRC1Wg1HR8c220G3SkpKQkRERC9WxdwOWwKZELFYjIceeggikajD511dXREeHt7LVTG3wwJkYubNm4fm5uZ2w0UiERYvXgyBQMBDVUxn2CqcidHpdJDL5VAoFO2eu3TpEkJDQ3moiukMWwKZGKFQiPnz57dbjQsMDGThMUEsQCZo7ty5bVbjLCwssGjRIh4rYjrDVuFMEBHB09MThYWFAACBQIAbN27Ax8eH58qYW7ElkAkSCARYuHAhLCwsIBAIMHr0aBYeE8UCZKLmzp3LHdazYMECnqthOsO+0u4F1dXV0Ol0qKurQ1NTExobGwEA9fX10Gq1HY4P3Pzep7S0FGKxGD/88ANsbW07PApBJpMBuLmr29raGlZWVrC0tOx0fMZ42DZQFxQKBcrKylBRUQGFQoGamhruUVtb2+7v2tpaqFQqqNXqTgPS22QyGczNzWFrawtra2vY2dnB3t6ee9z6t4ODA5ycnODk5AS5XA4zM7ai0pl7NkAlJSUoKCjgHqWlpSgtLUVFRQVKS0u50Nz6paa1tXWnHzx7e3tIpdJ2SwB7e3uYm5tDKpVySwng5pEHEomkXW2dLTlalkytabVa1NfXAwC3dGtsbERTUxMX4NZLwMbGxjahv3VGUFtb26Z9MzMzLkzOzs4YOHAg5HI5nJ2d4ebmBi8vL3h4eMDNze2eXNr12wCVl5fjypUruHr1KvLy8pCXl4f8/HwUFBSgsLAQTU1NAG5usDs7O3MPuVyOgQMHwtnZGU5OTtzvcrkcDg4OsLCw4HnKehYRQaFQoLy8vN3MpKSkhPu9uLgYJSUl3HaaUCiEs7MzvL294eHhAQ8PD3h7e8Pf3x/+/v7w9PTsl0uyPh2gpqYmZGRk4MqVK7h27Rr38+rVq9yc1NraGt7e3vD29oa7uzs8PDzg6ekJLy8vuLu7w93dvdNjz5jb0+v1KC0tRV5eHrckz8/P52ZUubm5qKqqAnBzadsSpoCAAPj7+2Pw4MEIDQ2FnZ0dz1Ny9/pMgKqrq0inOtcAACAASURBVJGRkYHU1FSkpqYiMzMT6enpaGpqgrm5OTw9PeHr6wtfX18EBwcjJCQEvr6+8Pb27pdzvr6iuroa2dnZ3CMjIwOZmZm4evUqt+rp4uKCkJAQBAcHY+TIkRg5ciQCAwMhFAp5rr5rJhmgmpoanDlzhnukpKRwx4Z5enpi6NChGDp0KIYNG4ahQ4di0KBB9+T6d1+Xl5eHS5cu4eLFi0hLS8PFixdx7do16HQ6WFlZYfjw4YiIiMCYMWMQEREBd3d3vktuxyQCdOXKFZw4cQJJSUlITk5GVlYWiAh+fn4YM2YMwsLCuMDY29vzXS7Tg1QqFTIyMpCWlobU1FScPn0a6enp0Ol0cHd35wI1btw4jBo1ivelFC8BqqioQGJiIhISEhAfH4/c3FxYW1tj+PDhGDlyJMaPH4+oqCjI5fLeLo0xQUqlEhcuXEBqaipOnTqF48ePo7y8HDY2NoiIiEB0dDSio6MxcuTIXq+tVwJERDhz5gz279+P+Ph4XLp0Cebm5m0mPiwsjK2GMd2WmZmJhIQEJCQkIDExEfX19fD09ERMTAxmzZqFmJgYWFpa9ngdPRYgnU6HkydPYt++fdi/fz+Kiorg7++P6dOnIzo6GlFRUbCxsemJrpl7jFarRXJyMhISEvDrr78iOTkZtra2mD59OmbPno1p06bBysqqR/o2eoDS09Px+eef47vvvkN5eTlCQkIwe/ZszJ49G0OHDjVmVwzTocLCQuzfvx/79+/HyZMnYWlpibi4ODzxxBOYPHmycc/qJSNQKpW0c+dOGjt2LAEgPz8/Wr9+PWVlZRmjeYa5a6WlpfSf//ynzWdz06ZNVFpaapT2DQpQRUUFvfzyy2Rvb08ikYjmzJlDR48eJb1eb5TiGMaYLl26RM899xzJZDISiUQ0b948yszMNKjNuwpQZWUlvfLKK2RjY0NyuZzeeecdKisrM6gQhuktjY2NtHPnTgoJCSEzMzOaN28eXb58+a7auqMANTc30/r168nW1pacnJzo3XffpYaGhrvqmGH4ptPp6LvvvqPg4GASCoX0+OOPU0VFxR210e0AnT9/noYNG0ZWVlb09ttvU319/R0XzDCmSKfT0Z49e8jDw4Pkcjn98MMP3f7fLgOk1Wrp9ddfJwsLC4qMjKRr164ZVGx/V1paSnv37qUNGzbwXQpzh2pqauiJJ54ggUBAc+bMIYVC0eX/3DZADQ0NFBsbS2KxmLZt28Z2DnQhMzOTnn76aQJAgwcP5rsckxUWFkYvvfQS32V06siRI+Tm5kaBgYF048aN247b6WHKarUaM2fORHJyMn7//Xc888wzvF0Vs6CgoE+0GxQUhM2bNxu1TVNhzNfKx8cHYrHYaO0Z25QpU5CcnAyJRIKJEyciPz+/85E7S9bixYvJ3t6e/vzzT6Mn/E5kZ2fT+PHj+0y7RNTvlkA9+VqZsqqqKho6dCgNGTKEGhsbOxynwyXQgQMHsHPnTuzatQvDhg3roZx3rbCwELGxsaioqOgT7fZH9/Jr5eDggAMHDqCgoABvvPFGxyN1lKohQ4bQo48+2qPp7o7169cTALKzs6Ply5dzwxsbG2nTpk20ZMkSGjlyJE2ePJkuXrxIRER//vknRUdHEwCKjY2lyspKeumll8jd3Z2++uqr27bbHfX19fTWW2/R/Pnz6bnnnqPIyEjasmVLm+1D3LIEul29RERXrlyh2bNn08svv0yPPfYYjR8/ntLS0kiv19PPP/9My5YtIzc3N1IoFLRw4UJycHCgkJAQSklJ6bIPrVZLv//+Oz3//PPk5eVFhYWFFBkZSR4eHt3aSL71teqqvc6mhejmDqm9e/fSwoULacKECXc0fXz66KOPSCwWU0lJSbvn2gXo4sWLBIDOnDnTK8V15dYPIxHRE0880eYwoZiYGJLL5VRbW0tEN3d+BAUFkY+PD6nVaoqLi6MrV6502W5XmpubKSoqih577DHS6XRERLRjxw4CQL/88kunbXdV76BBg8jX15frw87OjkJCQkiv11NBQQFZW1sTANqwYQPl5ubSrl27CACFhYV12Ud5eTmdOnWKJBIJAaCNGzfS0aNHaenSpd3+KqL19KjV6tu219m0tMjLy+Pau5Pp45NarSapVErbt29v91y7AH355ZdkbW1tMnvcbv0wnjlzhgB0+Dh48CA33tmzZ0koFFJERATt2LGjy3a7Y/PmzQSgzbfWGo2GduzY0WZu3rrt7tS7efNm2rNnDxHd/E7C19eXzM3NufYCAgKo9cqCXq8nuVxOIpGo2320tFFVVXVH03zr9Nxa063tdTUter2+XXtdTZ8peOCBB2jJkiXthrc7Aaeurg5SqdRk70OTkpKC4OBgZGRk3Ha80aNHY/Xq1XjnnXfw8ccfG6XvxMREAGhzarG5uTkWL15sUL0rV65EQ0MDtm/fDoVCgaampjbXk7v1vRAIBJDJZCgvL+92Hy1tODg4dD6Bd6Cz9u50Wjoaduv0mQJ7e3vU1NS0G95uJ4KrqysqKipue5tBPlVVVSEnJ6fD+lrfX1Sv1+PGjRvw8PDAwoULuctYGaKsrAwAcO3aNaPWe/bsWQwZMgS+vr54/fXX7/g8qe6+Jr3B0GkxVTk5OXBzc2s3vF2A7r//fhARDh482CuFdUfrOVhgYCBUKhXefffdNuNkZmZi27Zt3N/vvfceHnzwQezYsQPp6elYt27dbdvtjpY9km+//Tb0ej03PDc3F//73/86/J/u1Ltw4UJoNBpMmzYNALi2qZunanX3NTFEd18rQ6fFFBUUFODcuXOIjo5u/2RH63uPPPIIhYSEUFNTU8+uWHaDn58fWVlZUV5eHhERqVQq8vHxIQC0ZMkS2r17N61du5ZiYmK4jfKkpKQ2exGfeuopMjMzo8TExE7b7Y4bN26QlZUVAaCJEyfStm3b6LXXXqMnn3yS26mgVCoJAHl5eXW7XqlUSgAoPj6edu/eTU5OTtyOnPz8fPLy8iIAbbZLXV1dCQA1Nzd3q4+WNu7mGMaOXqvO2utqWurq6ggAubi4tGurs+nj2+OPP04+Pj4d5qHDAF27do1sbGzohRde6PHiurJmzRpydnamH3/8kRuWk5NDcXFxJJPJaODAgbRs2TIqLy8nIqIff/yRHB0dacWKFdz4r7zyCrcrtmWHQkftdsfFixdpypQpZG9vT66urvT8889TTU0NEd0M2HPPPcdtwG/ZsoUUCsVt6yUi2rZtG0mlUho9ejQlJSXR1q1byd7enmbMmEFvvvkm19769euppqaGtmzZwg1bvXo1NTY2dtpHQ0NDmzaWLVtG58+fv+v3oKv2bjcteXl5tGbNGu5/N2/eTBs3buzW9PHlxx9/JIFA0OnnpNMjEb755hsSCAT0/vvv91hxDGPKfvvtN5JIJPT00093Os5tDybdsmULCQQCWrVqFbeK0l+1zPFu9+hvp6jfi9PcXXv27CGxWExz584lrVbb6Xhdns7w3XffkVgspjFjxtz1WXsM01fU1NTQk08+SQKBgP7xj390ueDo1gl1ly5dolGjRpFEIqFNmzbdNpEM01f9+uuv5OHhQQMHDqR9+/Z163+6fUZqc3MzvfXWWyQSiWjEiBF04MABkzlagWEMcf78eZo5cyYBoHnz5lFlZWW3//eOLyqSnp5Of//730kgENB9991Hv/zyCwsS0ydduHCBZs2aRQKBgEaNGkWHDx++4zbu+rJWrTsfNmwYffzxx9x3DgxjqjQaDe3fv5+mTp1qlIWAwRdWPH/+PC1atIgkEglZW1vT0qVLKTk52dBmGcaocnJyaO3ateTi4kJmZmb0wAMPGGXtyWiX9q2pqcGuXbvw2WefIT09HSEhIXjooYcwe/ZsDBkyxBhdMMwdKSoqwk8//YR9+/bhxIkTGDhwIBYvXownnngCPj4+RumjRy4uf/r0aezZswc//fQTiouLERAQwF0fm49bUDD3jpycHOzfvx/79u3DmTNnYGNjg+nTp+ORRx7B9OnTjX6P2x6/vUlGRgZ++OEH7NmzB9euXYNcLkdUVBSio6MxdepUeHp69mT3TD+nVCqRlJTE3erk/PnzsLe3R2xsLOLi4vC3v/2Nuyt6T+jVG2ylpqYiPj4eCQkJOHXqFJqamhAUFISYmBhMmjQJERERGDhwYG+Vw/RB9fX1SElJwfHjx5GQkICzZ89Cr9dj+PDhiI6ORkxMDKKionrtbuq83eKxsbERJ0+exNGjR5GQkICLFy9Cr9fDx8eHuydmREQEhg8f3u9vLc90jIhw5coV7l65SUlJyMjIgE6ng7e3NxeYSZMmwdHRkZcaTeIeqQBQW1uLM2fOIDk5mXvBqqurIZFIMHz4cAwbNoy7qfCQIUNga2vLd8mMETU3NyMzMxMXL17kbjqcmprKfQZGjhyJ8PBwbuba0cltfDCZAN2q9dzn3Llz3AtbW1sLgUAAHx8fLlBBQUHw9/dHQEBAvzkDsr9qbm5GdnY2rly5gitXrnDv6+XLl6HRaCAWixESEoKhQ4dixIgRJr8WYrIB6kxubi73orfMqbKzs7kzJl1dXREQEAB/f3/u4efnB09PT9jZ2fFc/b1BpVIhPz8fubm5uHbtGq5evYqrV6/i2rVryMvL404z9/T0RGhoKHcH9qFDhyIgIKBP3Su3zwWoIxqNBjk5OW3eqJY3rrCwkDudWCqVwsPDA97e3vDw8ICnpyf3t5OTE1xcXCCVSnmeGtOmUqlQXl6OkpISFBYWoqCgAHl5ecjPz0d+fj4KCgraXAzE0dER/v7+GDx4cJuZWkBAQI/dt7Q39YsA3Y5KpUJOTg735ubn53NveEFBAQoLC9Hc3MyNLxaLuTDJ5fI2v8tkMtjb27d79NXQNTY2oqampt2juroaFRUVXFBa/97Q0MD9v5mZGZydneHl5cXNjDw9Pbm/vby8IJPJeJzCntfvA9QVvV6P0tLSDj8sFRUVqKioQElJCcrLy1FdXQ21Wt2uDTMzMy5MMpkMFhYWsLGxgUQigVgsho2NDSwsLGBnZwehUAh7e3vuf21tbdutsohEonbfXTQ1NaGxsbFd39XV1dzvDQ0N0Gg0qK2thU6nQ01NDbRaLerr66FWq6FSqVBfX88FpfWMo4W5uTlkMlmbmYeTk1OHMxUXFxeIRKI7fs37k3s+QHeqqampw7l267m3RqNBQ0MD96Ft+WDX1NRAp9OhtraWa691AFo0Nja2uwyXUCjscEnXOoDW1tYQiUSQSqUwNzeHvb09F2axWAyJRAJbW9sOl6Itj5780rE/YgEyYS+//DISExNx9uxZvkthOtHp/YEYhukaCxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjABYghjEACxDDGIAFiGEMwALEMAZgAWIYA7AAMYwBWIAYxgAsQAxjAHaDLRNRVlaGmTNnQqlUcsMqKiqgUqng6enJDRMKhdi8eTMmT57MR5nMLfrO/cT7OYlEgvPnz0Oj0bR7Lj09vc3fLbeJZ/jHVuFMhFQqRWxsbLsbDt9KJpNh0qRJvVQV0xUWIBMyf/782y5dRCIRFixY0GXImN7DtoFMiFqthqOjY5vtoFslJSUhIiKiF6tiboctgUyIWCzGQw89BJFI1OHzrq6uCA8P7+WqmNthATIx8+bNQ3Nzc7vhIpEIixcvhkAg4KEqpjNsFc7E6HQ6yOVyKBSKds9dunQJoaGhPFTFdIYtgUyMUCjE/Pnz263GBQYGsvCYIBYgEzR37tw2q3EWFhZYtGgRjxUxnWGrcCaIiODp6YnCwkIAgEAgwI0bN+Dj48NzZcyt2BLIBAkEAixcuBAWFhYQCAQYPXo0C4+JYgEyUXPnzuUO61mwYAHP1TCdYV9p9xKNRoOGhgY0NDSgqakJtbW1AID6+npotdo24yqVSjQ3N8PV1RWlpaUQi8XYt28f7Ozs2rUrk8kAANbW1rC0tIS9vT3EYjEkEknPTxTDtoHuhEajQVlZGQoLC1FZWQmFQsE9qqur2/ytUCjQ2NgIlUqF2tpa6PX6Xq/Xzs4OlpaWsLGxgZ2dHRwcHDBgwAA4ODhAJpPBwcGBezg6OsLFxQWurq4sfHeABegvGo0GBQUFyM7ORk5ODgoLC1FUVITS0lIUFhaitLQU5eXlaP1yicXiNh/C1g+ZTAZra2tIJBLY2dlBJBLB1tYW1tbWEIlE3JJDIpFALBa3qcXS0hJWVlZthjU3N7c7xEer1aK+vh4A2izZVCoV1Go1amtr0dTUhIaGBtTW1kKhUKCqqqpd0BsaGtq0a29vD1dXVy5Qrq6ucHNzg4+PD3x9feHj48NC9pd7KkBarRY3btxAeno6rly5woUlOzsbhYWF3KqUjY0NPD09uQ9Py4fJzc2N+ymXy/vNh0ij0aCiogLFxcUoKSnhZhxFRUXc30VFRaioqOD+x8XFpU2gAgICEBwcjKCgoH7zunRHvw3QjRs38OeffyIrKwvp6enIysrC5cuX0dzcDDMzM3h7e7f5ALT+3cnJie/yTVJ9fT1ycnK4mU7rn9evX+deWx8fH4SEhCA4OBghISEYMmQIQkJC+uVR5P0iQMXFxUhNTeUeycnJ3NzSxcWlzZsZHByMESNGwNramueq+xetVov8/HxkZGQgMzOT+5mZmQmVSgULCwv4+/tj5MiRGD9+PMaNG4fAwEAIhUK+SzdInwsQESE9PR2JiYk4fvw4Tp48ibKyMpiZmSEwMBCjRo3iHsOGDWu3LcH0Lq1Wi6ysLJw7d457pKWloampCba2tggPD0dUVBTuv/9+hIWFdXokuqnqEwG6fPky4uPjcfz4cZw4cQKVlZWwt7fHhAkTEBUVhdGjR+O+++6DjY0N36Uy3aDRaHDp0iWcO3cOJ0+eRGJiIgoKCiCRSDBmzBjcf//9mDx5MsLDw01+CWWSAdLpdEhKSsKhQ4fwyy+/ICsrCzY2NoiIiEB0dDTGjRuH8PBwWFhY8F0qYyTFxcU4deoUEhISEB8fj9zcXAwYMACTJk1CbGwsZs6c2eH3YHwzmQBpNBocPnwY3377LX799VfU1NQgODgYcXFxiI2NxZgxY0x+bsQYT1ZWFg4ePIhDhw7h9OnTEAqFiIqKwpw5czBnzhzY29vzXSIAEwhQSkoKdu3ahW+//RYKhQKRkZGYNWsW4uLi4Ovry2dpjImoqqrC//73Py5QRIQZM2ZgwYIFmDp1Kr9794gHDQ0N9NFHH1FQUBABoMDAQHr77bcpLy+Pj3KYPqSmpoa++OILioyMJIFAQHK5nFavXk1FRUW81NOrASouLqZXXnmFHBwcyMrKilasWEHJycm9WQLTj+Tk5NCbb75JLi4uJBKJaOHChfTnn3/2ag29EqDS0lJavnw5iUQicnZ2pvXr11NlZWVvdM3cA9RqNe3YsYOGDBlCAGjq1KmUlpbWK333aIDUajW9++67JJVKyd3dnb744gtSq9U92SVzD9Pr9fTrr79SWFgYCYVCWr58OZWVlfVonz0WoGPHjpGvry9ZWVnRP//5T1IqlT3VFcO0odfradeuXeTu7k52dna0ffv2HuvL6AHSarW0bt06EgqFNHv2bCooKDB2FwzTLUqlkl577TUyNzenBx98kKqrq43eh1EDVFZWRhMnTiSxWEwff/yxMZs2eT3x5nSkqamJ/vjjj17pq784fvw4ubm5kY+PD6Wmphq1baMFqKioiIKCgsjX15cuXLhgrGZNmkqlog0bNlBERASZmZn1aF9VVVW0Zs0asrKyot769iEsLIxeeumlXumrp5WXl1N0dDTZ2dnRyZMnjdauUd6JhoYGGjFiBAUGBvK2P54vjY2NJJPJ7upDnZ+ff0fj6/V6cnJy6rUAPfLII/Taa6/1Sl+9oampiWbNmkUymYwuX75slDaN8k4sWbKEnJycKDs72xjN9TmDBw++4w91dnY2jR8/vlf6Yv4/lUpFERERFBoaapQ9wgZflefUqVP48ssv8cknn7BLL3VTYWEhYmNj25zhyfQOsViMb7/9Frm5udiyZYvhDRqawBkzZtCECRMMTvLdOHbsGFlYWJC1tTUdP36cqquraf78+QSAoqKi6NKlS0RElJqaSs7OzvTJJ58Q0c3DQV566SVavXo1vfjiixQTE0MvvvgiKRQK0mq19Pvvv9Pzzz9PXl5eVFhYSJGRkeTh4UEKhYKUSiW9+OKLtGzZMlq7di2tWbOG3N3d72ipsH79egJAdnZ2tHz5cm747epqcesS6P333yeRSEQrV67kdi40NjbSpk2baMmSJTRy5EiaPHkyXbx4kfR6Pf3888+0bNkycnNzI4VCQQsXLiQHBwcKCQmhlJQUIrq5J3Xv3r20cOHCNu+tvb09Pfzww/T888/T888/T3K5nAQCAXc0SWf9dvWa8mHt2rU0cOBAampqMqgdgwKkVCpJLBbTzp07DSrCEE899RRZWlpSTU0NEd18E+VyOc2fP58bR6PR0IQJE0iv11NdXR35+/vTunXruOfLysrI39+ffHx8qLS0lE6dOkUSiYQA0MaNG+no0aO0dOlSqq6uprCwMHriiSdIr9cTEdH169dJKBTe8WoVABo8eDD3d1d1tezlax2gqqoqeuyxx9p96/7EE09QVlYW93dMTAzJ5XKqqamhgoICsra2JgC0YcMGys3NpV27dhEACgsL4/4nLy+vTY0ajaZNbYcPHyYA9PLLL3fZb3l5eaevaX19/R29bsaSnZ1NAOj33383qB2DAnThwgUCYLQNsruRkZFBANp8WRYXF0fW1tZUV1dHREQHDhzglj6vvvoqAaDi4uI27Xz11VcEgFatWkVERAEBAQSAqqqquHE++ugjAkAZGRlt/tff39/gAHW3rpYA3bhxg5YsWULl5eVtxj9z5gwB6PBx8ODBNtPWQq/Xk1wuJ5FI1GZY6xr1ej01NjYSEZFCoSBXV9c22xF30m/r15RPzs7OtHXrVoPaMGgbqOVySFKp1JBmDBIcHIyJEyfis88+AxEhJycHOp0Ozc3N+PbbbwEAu3btwmOPPQbg5jYbANja2rZpJzIyEgBw+vRpAODuw+Pg4MCNEx8fDwDttvXMzAy/wGt362oxffp0KJVKODo6thmekpKC4OBg0M2ZY5tHbGwsALS7x5BAIIBMJmtzQfuOxmm52s4//vEPVFRUYNeuXbC0tLzjflu/pnySSqXcZcHulkHvvFwuBwDuIuh8efbZZ5GWloaUlBS89957eO+99/Dggw/i888/R0ZGBry8vLiLiLR82HNzc9u0MXDgQAC47VmPRUVFAG6en2Jsd1rXv/71L+zduxfvvvtum+FVVVXIycnp8DaRxri79/79+7F79268+eabGD58eK/1a2w6nQ7FxcVwdnY2qB2DAuTv7w9nZ2ccOXLEoCIMNWPGDLi7u+Of//wnlEolQkJCsGLFCpw7dw7PPPMMnn76aW7cljn6f//73zZtFBQUAACio6M77ScwMLDD/71brS/pe6d1TZ8+Ha+++ipeffVV/O9//2tTo0qlaheszMxMbNu2zaB6y8vLsWLFCowZMwarVq3ihv/555892m9POHXqFBoaGjB+/HjDGjJoBZCIVq1aRa6urrwfLLp+/XoSCATcnje9Xk+DBw+m2NjYNuMplUoKCQkhNze3Ntsb//jHP2js2LHU3NxMREReXl4EoM1G7oULF0goFJKDgwMdPnyYGhsb6dixY2Rra0sA7uh7MD8/P7KysuJOIuxuXd7e3gSAdDodaTQamjhxItnZ2dH58+eJ6Ob3HD4+PgSAlixZQrt376a1a9dSTEwM1dbWtpm2lh0hRESurq4EgOunrq6OAJCLiwv3ej744INkZWVFV69e5f6vqqqKVq9efUf98rXjoLW4uDgKDw83uB2DA1RSUkL29va0cuVKg4sxREVFBb344otthu3YsYOSkpLajVtXV0erVq2imJgYWrlyJa1atYreeustUqvV1NDQQG+++Sa3Abxs2TLuw0l087iqsWPHko2NDfn6+tI777xDEyZMoOXLl1NCQgJptdpu1btmzRpydnamH3/8sVt1VVVV0VtvvcXV9fbbb1NhYSG3k8HW1pY2btxI1dXVlJOTQ3FxcSSTyWjgwIG0bNkybmfDtm3buDbWr19PNTU1tGXLFm7Y6tWrqaKigtasWcMN27x5M/3f//t/CQCFhIRwu7GXLFlCfn5+tHnzZiKiTvvt6jXtbT/99BMBoCNHjhjcllG+0v7qq69IIBDQd999Z4zmGKbHZGRkkJ2dHS1dutQo7RntmJAXX3yRRCJRmznqvQid7Mpt/Wj9XQnTe9LT08nFxYUmTJhgtBM7jRYgvV5Pzz33HJmZmdG6deu6vSrDML3hxx9/JDs7O4qMjOS+HzQGox+V+NVXX5GVlRVFRUXdc0dmM6anubmZVq9eTQKBgBYsWMB9GWwsPXJY74ULF8jf358cHR3pP//5D2k0mp7ohmFuKz4+nkJDQ0kqldL333/fI3302HHxdXV1tHLlShKJRBQaGmqUPR4M0x1ZWVk0ffp0AkCzZs2iGzdu9FhfPX5iydWrV2nWrFkEgCZPnkyHDx9u8/0DwxjLxYsXadGiRWRhYUHDhw+n3377rcf77LUzs37//XeKiYkhgUBAoaGhtGPHDnaJK8Zger2e4uPjacqUKSQQCCgkJIR27txJOp2uV/rv9VMb09LSaNGiRSQSiWjgwIH04osv3jPXUGCMp6CggDZt2kTBwcG8rt3wdm5wUVER/fOf/yQ/Pz8CQEOGDKH333+f7bljOlVfX09fffUVTZ48mczMzMjBwYGefvppXmfAvN+dAQBSU1Px9ddfY8+ePVAo5r8zrAAAA7hJREFUFBgxYgRiY2MRFxeH++67r92h9cy9o7y8HL/++isOHTqEw4cPo7m5GVOmTOFuc8L3DY1NIkAtmpqa8Ouvv3K3sSgrK4OXlxfi4uIwbdo0TJgwod35Mkz/otFokJKSgvj4eBw8eBAXLlyAlZUVpkyZgtjYWMyaNctkzicCTCxAren1epw7dw6//PILDh06hLS0NJibm+O+++5DVFQUoqKiMGHCBF5P5mMM19zcjJSUFO6et6dPn4ZSqYSHhwemT5+OGTNmYOLEiRCLxXyX2iGTDdCtysrKcPz4ce6RmZkJMzMzDB8+HGFhYdyNhYODg/vl7dT7i+zsbO5mwykpKTh79iwaGxvh5uaG+++/n5s5BgQE8F1qt/SZAN2qvLwcJ06cwMmTJ3Hu3DlcuHABjY2NsLKywvDhwzFq1CiMGDECISEhCAoKYjcg7mXNzc24du0aMjMzkZaWxgVGoVDA3NwcISEhGDVqFMaMGYOoqCgMGjSI75LvSp8N0K1abqeekpLCzeEuXboEtVoNgUAALy8vBAUFcYEKDg6Gr68vd1o6c3fq6uqQnZ2NK1euICMjA1lZWcjIyMD169eh0WggFArh7++P0aNHc2sJw4cPh5WVFd+lG0W/CVBHdDodcnJykJGRgczMTO4NzsrKgkqlAgDY2NjA19cXPj4+8PX15X738vKCs7Nzu4t23GuUSiUKCgpQXFyM7OxsZGdnIycnh/u9srISAGBubg4/Pz9uBhUaGoqgoCAEBgZyFx7pj/p1gDqj1+uRl5fX7sPQ8mh90RBLS0u4uLjA1dW1zc+BAwfCwcGh3cNUN3ZbaLVaKBSKdo+qqioUFhairKyM+1lQUNDmIiHW1tbcTKb1zMbX1xd+fn4QiUQ8Thk/7skAdaWuro6b65aUlHA/W/9eXl7OXdarNSsrKy5MEokEtra2sLKygqWlJWQyGSwtLWFlZQVbW1tuZ4dUKoVQKOTaEAgE7W7jrlQq21x2CgBqa2uh1+sBANXV1WhuboZSqUR9fT2amppQV1cHlUoFlUrFBaWurq5dzWKxGAMGDICbmxucnZ3b/XR1dYWrqyt3hSDm/2MBMkBzczP3wayurm43V1er1airq0NjYyOamppQXV0NtVoNlUrFffiJCDU1NW3a1Wg07cIpFovbfWloY2MDCwsLADcve2VpaQkbGxvY2NjA0tISdnZ2kEgkkEgk7ZaUMpmM+72/bI/wgQWIYQxg+CU1GeYexgLEMAZgAWIYA5gD+IHvIhimr/p/Cwd0EXJNno0AAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "intermediate_step = glove_dpcnn.get_step('word_tokenizer')\n", + "intermediate_step" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'char_level': False,\n", + " 'maxlen': 200,\n", + " 'num_words': 10000,\n", + " 'tokenizer': }" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "intermediate_step.transformer.__dict__" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training/Inference\n", + "\n", + "Just run `fit_transform` on last the very last step and all steps will be fitted recursively\n", + "```python\n", + " data_train = {'input': {'meta': train,\n", + " 'meta_valid': valid,\n", + " 'train_mode': True,\n", + " },\n", + " }\n", + " train_predictions = glove_dpcnn.fit_transform(data_train)\n", + "```\n", + "\n", + "prediction will be done on `transform`\n", + "\n", + "```python\n", + " data_inference = {'input': {'meta': test,\n", + " 'meta_valid': None,\n", + " 'train_mode': False,\n", + " },\n", + " }\n", + " test_predictions = glove_dpcnn.transform(data_inference)\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# What is missing?\n", + "\n", + "* tests \n", + "* docstrings\n", + "* automatic sklearn/steps conversion\n", + "\n", + "```python\n", + "\n", + "from steps.base import make_step\n", + "\n", + "step_transformer = make_step(SklearnTransformer())\n", + "step_transformer = make_step(Pipeline())\n", + "```\n", + "\n", + "* automatic grid search\n", + "\n", + "```python\n", + "\n", + "xgboost_ensemble = Step(name='xgboost_ensemble',\n", + " transformer=XGBoostClassifierMultilabel(**config.xgboost_ensemble),\n", + " input_data=['input'],\n", + " cache_dirpath=CACHE_DIR,\n", + " grid_search_params=parameter_space,\n", + " grid_runs=100,\n", + " grid_search_method='hyperopt')\n", + "```\n", + "\n", + "* paralelization\n", + "* automatic multistep bagging" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Where is it?\n", + "https://github.com/neptune-ml/steps/tree/dev" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Let's talk toxic\n", + "https://github.com/neptune-ml/kaggle-toxic-starter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dl_py3", + "language": "python", + "name": "dl_py3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/simple_step_example.ipynb b/tutorials/simple_step_example.ipynb new file mode 100644 index 0000000..d903645 --- /dev/null +++ b/tutorials/simple_step_example.ipynb @@ -0,0 +1,288 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import steps\n", + "from steps.base import Step, BaseTransformer, hstack_inputs\n", + "from steps.sklearn.models import make_transformer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import load_diabetes\n", + "import sklearn.preprocessing as prep \n", + "from sklearn.ensemble import RandomForestRegressor as RFR\n", + "from sklearn.externals import joblib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "class MinMaxScaler(BaseTransformer):\n", + " def __init__(self):\n", + " self.scaler = prep.MinMaxScaler()\n", + " \n", + " def fit(self, X):\n", + " self.scaler.fit(X)\n", + " return self\n", + "\n", + " def transform(self, X):\n", + " X_ = self.scaler.transform(X)\n", + " return {'X':X_}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.scaler, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.scaler = joblib.load(filepath)\n", + " return self\n", + " \n", + "class Normalizer(BaseTransformer):\n", + " def __init__(self):\n", + " self.scaler = prep.Normalizer()\n", + " \n", + " def fit(self, X):\n", + " self.scaler.fit(X)\n", + " return self\n", + "\n", + " def transform(self, X):\n", + " X_ = self.scaler.transform(X)\n", + " return {'X':X_}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.scaler, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.scaler = joblib.load(filepath)\n", + " return self\n", + " \n", + "class RandomForest(BaseTransformer):\n", + " def __init__(self):\n", + " self.estimator = RFR()\n", + " \n", + " def fit(self, X, y):\n", + " self.estimator.fit(X, y)\n", + " return self\n", + "\n", + " def transform(self, X, **kwargs):\n", + " y_pred = self.estimator.predict(X)\n", + " return {'y_pred':y_pred}\n", + " \n", + " def save(self, filepath):\n", + " joblib.dump(self.estimator, filepath)\n", + " \n", + " def load(self, filepath):\n", + " self.estimator = joblib.load(filepath)\n", + " return self\n", + " \n", + "def hstack_vector_inputs(inputs):\n", + " inputs_ = [input_.reshape(-1,1) for input_ in inputs]\n", + " return np.hstack(inputs_)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X,y = load_diabetes(return_X_y=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ls /mnt/ml-team/minerva/debug/example_problem/outputs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "CACHE_DIR = '/mnt/ml-team/minerva/debug/example_problem'\n", + "\n", + "scaler = Step(name='scaler',\n", + " transformer=MinMaxScaler(),\n", + " input_data=['input'],\n", + " adapter={'X':[('input','X')]},\n", + " cache_dirpath=CACHE_DIR\n", + " )\n", + "\n", + "normalizer = Step(name='normalizer',\n", + " transformer=Normalizer(),\n", + " input_data=['input'],\n", + " adapter={'X':[('input','X')]},\n", + " cache_dirpath=CACHE_DIR,\n", + " cache_output=True\n", + " )\n", + "\n", + "classifer = Step(name='clf',\n", + " transformer=RandomForest(),\n", + " input_data=['input'],\n", + " input_steps=[scaler, normalizer], \n", + " adapter={'y':([('input','y')]),\n", + " 'X':([('scaler','X'),\n", + " ('normalizer','X')], hstack_inputs)\n", + " },\n", + " cache_dirpath=CACHE_DIR\n", + " )\n", + "\n", + "scaler1 = Step(name='scaler1',\n", + " transformer=MinMaxScaler(),\n", + " input_data=['input'],\n", + " adapter={'X':[('input','X')]},\n", + " cache_dirpath=CACHE_DIR\n", + " )\n", + "\n", + "normalizer = Step(name='normalizer',\n", + " transformer=Normalizer(),\n", + " input_data=['input'],\n", + " adapter={'X':[('input','X')]},\n", + " cache_dirpath=CACHE_DIR\n", + " )\n", + "\n", + "classifer1 = Step(name='clf1',\n", + " transformer=RandomForest(),\n", + " input_data=['input'],\n", + " input_steps=[scaler1, normalizer], \n", + " adapter={'y':([('input','y')]),\n", + " 'X':([('scaler1','X'),\n", + " ('normalizer','X')], hstack_inputs)\n", + " },\n", + " cache_dirpath=CACHE_DIR\n", + " )\n", + "\n", + "ensemble = Step(name='ensemble',\n", + " transformer=RandomForest(),\n", + " input_data=['input'],\n", + " input_steps=[classifer, classifer1], \n", + " adapter={'y':([('input','y')]),\n", + " 'X':([('clf','y_pred'),\n", + " ('clf1','y_pred')], hstack_vector_inputs)\n", + " },\n", + " cache_dirpath=CACHE_DIR,\n", + " force_fitting=True\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ensemble" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = {'input': {'X': X,\n", + " 'y': y,\n", + " },\n", + " }\n", + "\n", + "ensemble.clean_cache()\n", + "output = ensemble.fit_transform(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ensemble" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output['y_pred'].shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!ls /mnt/ml-team/minerva/debug/example_problem/outputs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "clf = joblib.load('/mnt/ml-team/minerva/debug/example_problem/outputs/clf')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "clf['y_pred']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "cpu py3", + "language": "python", + "name": "cpu_py3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}