From 58ef31926f4cde766d99955ee5e509f529c40d13 Mon Sep 17 00:00:00 2001 From: David Josephs <42522233+josephsdavid@users.noreply.github.com> Date: Mon, 27 Jun 2022 15:55:02 -0500 Subject: [PATCH 01/24] Add docstrings, todo line formatting --- src/MLJFlux.jl | 898 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 897 insertions(+), 1 deletion(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index 84bce73f..34c7d95d 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -1,4 +1,4 @@ -module MLJFlux +module MLJFlux export CUDALibs, CPU1 @@ -37,4 +37,900 @@ MLJModelInterface.metadata_pkg.((NeuralNetworkRegressor, export NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor export NeuralNetworkClassifier, ImageClassifier +""" +$(MMI.doc_header(NeuralNetworkRegressor)) + +`NeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` target, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Continuous`; check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. + Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder + using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating + of the weights of the network. For further reference, see either the examples or + [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). + To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to + start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function + which can be called in the form `loss(yhat, y)`. + Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). + For a regression task, the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents + one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents + the number of samples per update of the networks weights. Typcally, batch size should be + somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, + while larger batch sizes lead towards smoother training loss curves. + In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), + and stick with it, and only tune the learning rate. In most examples, batch size is set + in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value + in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. + A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a + machine if the associated optimiser has changed. If true, the associated machine will + retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. + For training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. + Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a regression model using the Boston house price dataset +```julia + using MLJ + using MLJFlux + using Flux + using Plots +``` +First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: +```julia +data = OpenML.load(531); # Loads from https://www.openml.org/d/531 + +y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); + +scitype(y) +schema(X) +``` +Since MLJFlux models do not handle ordered factos, we can treat `:RAD` as `Continuous`: +```julia +X = coerce(X, :RAD=>Continuous) +``` +Lets also make a test set: +```julia +(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) +end +``` +Finally, we can define the model! +```julia +NeuralNetworkRegressor = @load NeuralNetworkRegressor + model = NeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +```julia +mach = machine(pipe, X, y) +fit!(mach, verbosity=2) + +# first element initial loss, 2:end per epoch training losses +report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + +rates = 10. .^ (-5:0) + +foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) +end +plt #!md + +savefig(joinpath("assets", "learning_rate.png")) + +pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +```julia +# For initializing or clearing the traces: + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + + # And to update the traces: + +update_loss(loss) = push!(losses, loss) +update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), +WithIterationsDo(update_epochs)] + + +iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach) + +plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); +scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + +savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia +e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +using Measurements +l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) +@show l1_loss +``` +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +```julia +function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean +end +performance(iterated_pipe) + +three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + +errs = performance.(three_models) + +(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty +``` + +See also +[`MultitargetNeuralNetworkRegressor`](@ref) +""" +NeuralNetworkRegressor + +""" +$(MMI.doc_header(MultitargetNeuralNetworkRegressor)) + +`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any table of output targets whose element + scitype is `Continuous`; check the scitype with `schema(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a regression model using the Boston house price dataset. +```julia +using MLJ +using MLJFlux +using Flux +using Plots +using MLJBase: augment_X +``` +First, we generate some data: +```julia +X = augment_X(randn(10000, 8), true); +θ = randn((9,2)); +y = X * θ; +X = MLJ.table(X) +y = MLJ.table(y) + +schema(y) +schema(X) +``` +Lets also make a test set: +```julia +(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) +end +``` +Finally, we can define the model! +```julia +MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor + model = MultitargetNeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +```julia +mach = machine(pipe, X, y) +fit!(mach, verbosity=2) + +# first element initial loss, 2:end per epoch training losses +report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + +rates = 10. .^ (-5:0) + +foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) +end +plt #!md + +savefig(joinpath("assets", "learning_rate.png")) + + +pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 + +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +```julia +# For initializing or clearing the traces: + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + +# And to update the traces: + +update_loss(loss) = push!(losses, loss) +update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), +WithIterationsDo(update_epochs)] + +iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach) + +plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); +scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + +savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia +e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +using Measurements +l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) +@show l1_loss +``` +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +```julia + +function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean +end +performance(iterated_pipe) + +three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + +errs = performance.(three_models) + +(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty + + +``` +See also +[`NeuralNetworkRegressor`](@ref) +""" +MultitargetNeuralNetworkRegressor +""" +$(MMI.doc_header(NeuralNetworkClassifier)) + +`NeuralNetworkClassifier`: a neural network model for making probabilistic predictions +of a Multiclass or OrderedFactor target, given a table of Continuous features. ) + TODO: + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; + check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: + - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + - `Flux.binary_focal_loss`: Binary version of the above +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a classification task, `softmax` is used for multiclass, single label regression, `sigmoid` is used for either binary classification or multi label classification (when there are multiple possible labels for a given sample). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a classification model using the Iris dataset. +```julia +using MLJ +using Flux +import RDatasets + +using Random +Random.seed!(123) + +MLJ.color_off() + +using Plots +pyplot(size=(600, 300*(sqrt(5)-1))); +``` +This is a very basic example, using a default builder and no standardization. +For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: +```julia +iris = RDatasets.dataset("datasets", "iris"); +y, X = unpack(iris, ==(:Species), colname -> true, rng=123); +NeuralNetworkClassifier = @load NeuralNetworkClassifier +clf = NeuralNetworkClassifier() +``` +Next, we can train the model: +```julia +import Random.seed!; seed!(123) +mach = machine(clf, X, y) +fit!(mach) +``` +We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: +```julia +clf.optimiser.eta = clf.optimiser.eta * 2 +clf.epochs = clf.epochs + 5 + +# note that if the optimizer_changes_trigger_retraining flag was set to true +# the model would be completely retrained from scratch because the optimizer was +# updated +fit!(mach, verbosity=2); +``` +We can inspect the mean training loss using the `cross_entropy` function: +```julia + +training_loss = cross_entropy(predict(mach, X), y) |> mean + +``` +And we can access the Flux chain (model) using `fitted_params`: +```julia +chain = fitted_params(mach).chain +``` +Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function +```julia +r = range(clf, :epochs, lower=1, upper=200, scale=:log10) +curve = learning_curve(clf, X, y, + range=r, + resampling=Holdout(fraction_train=0.7), + measure=cross_entropy) +using Plots +plot(curve.parameter_values, + curve.measurements, + xlab=curve.parameter_name, + xscale=curve.parameter_scale, + ylab = "Cross Entropy") + +savefig("iris_history.png") +``` +See also +[`ImageClassifier`](@ref) +""" +NeuralNetworkClassifier +""" +$(MMI.doc_header(ImageClassifier)) + +`ImageClassifier`: A neural network model for making probabilistic +"predictions of a `GrayImage` target, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with +mach = machine(model, X, y) +Where +- `X`: is any `AbstractVector` of input features (eg, a `DataFrame`) whose items + are of scitype `GrayImage`; check the scitype with `scitype(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; + check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: + - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + - `Flux.binary_focal_loss`: Binary version of the above +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we use MLJ to classify the MNIST image dataset +```julia +using MLJ +using Flux +import MLJFlux +import MLJIteration # for `skip` + +MLJ.color_off() + +using Plots +pyplot(size=(600, 300*(sqrt(5)-1))); +``` +First we want to download the MNIST dataset, and unpack into images and labels +```julia +import MLDatasets: MNIST + +ENV["DATADEPS_ALWAYS_ACCEPT"] = true +images, labels = MNIST.traindata(); +``` +In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` [scientific type](https://juliaai.github.io/ScientificTypes.jl/dev/). For more in this, see [Working with Categorical Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/): +```julia +labels = coerce(labels, Multiclass); +images = coerce(images, GrayImage); + +# Checking scientific types: + +@assert scitype(images) <: AbstractVector{<:Image} +@assert scitype(labels) <: AbstractVector{<:Finite} + +images[1] +``` +For general instructions on coercing image data, see [type coercion for image data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/%23Type-coercion-for-image-data-1) +We start by defining a suitable `builder` object. This is a recipe +for building the neural network. Our builder will work for images of +any (constant) size, whether they be color or black and white (ie, +single or multi-channel). The architecture always consists of six +alternating convolution and max-pool layers, and a final dense +layer; the filter size and the number of channels after each +convolution layer is customisable. +```julia +import MLJFlux + +struct MyConvBuilder + filter_size::Int + channels1::Int + channels2::Int + channels3::Int +end + +make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) + +function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) + k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3 + mod(k, 2) == 1 || error("`filter_size` must be odd. ") + p = div(k - 1, 2) # padding to preserve image size + init = Flux.glorot_uniform(rng) + front = Chain( + Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init), + MaxPool((2, 2)), + Conv((k, k), c1 => c2, pad=(p, p), relu, init=init), + MaxPool((2, 2)), + Conv((k, k), c2 => c3, pad=(p, p), relu, init=init), + MaxPool((2 ,2)), + make2d) + d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first + return Chain(front, Dense(d, n_out, init=init)) +end +``` +It is important to note that in our `build` function, there is no final softmax. This is applie by default in all MLJFlux classifiers, using the `finaliser` hyperparameter of the classifier. Now that we have our builder defined, we can define the actual moel. If you have a GPU, you can substitute in `acceleration=CudaLibs()` below. Note that in the case of convolutions, this will **greatly** increase the speed of training. +```julia +ImageClassifier = @load ImageClassifier +clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), + batch_size=50, + epochs=10, + rng=123) +``` +You can add flux options such as `optimiser` and `loss` in the snippet above. Currently, `loss` must be a flux-compatible loss, and not an MLJ measure. +Next, we can bind the model with the data in a machine, and fit the first 500 or so images: +```julia +mach = machine(clf, images, labels); + +fit!(mach, rows=1:500, verbosity=2); + +report(mach) + +chain = fitted_params(mach) + +Flux.params(chain)[2] +``` +We can tack on 20 more epochs by modifying the `epochs` field, and iteratively fit some more: +```julia +clf.epochs = clf.epochs + 20 +fit!(mach, rows=1:500); +``` +We can also make predictions and calculate an out-of-sample loss estimate, in two ways! +```julia +predicted_labels = predict(mach, rows=501:1000); +cross_entropy(predicted_labels, labels[501:1000]) |> mean +# alternative one liner! +evaluate!(mach, + resampling=Holdout(fraction_train=0.5), + measure=cross_entropy, + rows=1:1000, + verbosity=0) +``` + +## Wrapping in iteration controls + +Any iterative MLJFlux model can be wrapped in **iteration controls**, as we demonstrate next. For more on MLJ's `IteratedModel` wrapper, see the [MLJ documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/). +The "self-iterating" classifier (`iterated_clf` below) is for iterating the image classifier defined above until a stopping criterion is hit. We use the following stopping criterion: +- `Patience(3)`: 3 consecutive increases in the loss +- `InvalidValue()`: an out-of-sample loss or a training loss that is `NaN` or `±Inf` +- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes. +We can specify how often these checks (and other controls) are applied using the `Step` control. Additionally, we can define controls to +- save a snapshot of the machine every N control cycles (`save_control`) +- record traces of the out-of-sample loss and training losses for plotting (`WithLossDo`) +- record mean value traces of each Flux parameter for plotting (`Callback`) +And other controls. For a full list, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). +First, we define some helper functions and some empty vectors to store traces: +```julia +make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) +make1d(x::AbstractArray) = reshape(x, length(x)); + +# to extract the flux parameters from a machine +parameters(mach) = make1d.(Flux.params(fitted_params(mach))); + +# trace storage +losses = [] +training_losses = [] +parameter_means = Float32[]; +epochs = [] + +# to update traces +update_loss(loss) = push!(losses, loss) +update_training_loss(losses) = push!(training_losses, losses[end]) +update_means(mach) = append!(parameter_means, mean.(parameters(mach))); +update_epochs(epoch) = push!(epochs, epoch) +``` +Next, we can define our controls! We store them in a simple vector: +```julia +save_control = + MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) + +controls=[Step(2), + Patience(3), + InvalidValue(), + TimeLimit(5/60), + save_control, + WithLossDo(), + WithLossDo(update_loss), + WithTrainingLossesDo(update_training_loss), + Callback(update_means), + WithIterationsDo(update_epochs) +``` +Once the controls are defined, we can instantiate and fit our "self-iterating" classifier: +```julia +iterated_clf = IteratedModel(model=clf, + controls=controls, + resampling=Holdout(fraction_train=0.7), + measure=log_loss) + +mach = machine(iterated_clf, images, labels); +fit!(mach, rows=1:500); +``` +Next we can compare the training and out-of-sample losses, as well as view the evolution of the weights: +```julia +plot(epochs, losses, + xlab = "epoch", + ylab = "root squared error", + label="out-of-sample") +plot!(epochs, training_losses, label="training") + +savefig(joinpath(DIR, "loss.png")) + +n_epochs = length(losses) +n_parameters = div(length(parameter_means), n_epochs) +parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)' +plot(epochs, parameter_means2, + title="Flux parameter mean weights", + xlab = "epoch") +# **Note.** The the higher the number, the deeper the chain parameter. +savefig(joinpath(DIR, "weights.png")) +``` +Since we saved our model every few epochs, we can retrieve the snapshots so we can make predictions! +```julia +mach2 = machine(joinpath(DIR, "mnist3.jlso")) +predict_mode(mach2, images[501:503]) +``` + +## Resuming training + +If we change `iterated_clf.controls` or `clf.epochs`, we can resume training from where it left off. This is very useful for long-running training sessions, where you may be interrupted by for example a bad connection or computer hibernation. +```julia +iterated_clf.controls[2] = Patience(4) +fit!(mach, rows=1:500) + +plot(epochs, losses, + xlab = "epoch", + ylab = "root squared error", + label="out-of-sample") +plot!(epochs, training_losses, label="training") +``` +See also +[`NeuralNetworkClassifier`](@ref) +""" +ImageClassifier + + end #module From 387f88c4f17077be3bc647c960fca7e9399520cc Mon Sep 17 00:00:00 2001 From: David Josephs <42522233+josephsdavid@users.noreply.github.com> Date: Mon, 27 Jun 2022 16:56:33 -0500 Subject: [PATCH 02/24] First model properly indented --- src/MLJFlux.jl | 97 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 73 insertions(+), 24 deletions(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index 34c7d95d..4fe7d3bd 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -94,7 +94,8 @@ Where - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. - `finaliser=Flux.softmax`: The final activation function of the neural network. - Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include + `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). # Operations @@ -108,7 +109,8 @@ Where The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. +- `chain`: The trained "chain", or series of layers, functions, and activations which + make up the neural network. # Report @@ -116,7 +118,9 @@ The fields of `fitted_params(mach)` are: The fields of `report(mach)` are: - `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - + all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + epoch n-1. # Examples In this example we build a regression model using the Boston house price dataset @@ -135,7 +139,7 @@ y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); scitype(y) schema(X) ``` -Since MLJFlux models do not handle ordered factos, we can treat `:RAD` as `Continuous`: +Since MLJFlux models do not handle ordered factors, we can treat `:RAD` as `Continuous`: ```julia X = coerce(X, :RAD=>Continuous) ``` @@ -144,6 +148,8 @@ Lets also make a test set: (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); ``` Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +random initial weights of the network. ```julia builder = MLJFlux.@builder begin init=Flux.glorot_uniform(rng) @@ -160,11 +166,14 @@ NeuralNetworkRegressor = @load NeuralNetworkRegressor epochs=20) ``` For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! ```julia pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) ``` If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` - +also see the losses in the output of `report(mach)` ```julia mach = machine(pipe, X, y) fit!(mach, verbosity=2) @@ -199,6 +208,8 @@ pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 ## Using Iteration Controls We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. +`NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. ```julia # For initializing or clearing the traces: @@ -252,7 +263,12 @@ savefig(joinpath("assets", "loss.png")) ### Brief note on iterated models -Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. +Training an `IteratedModel` means holding out some data (80% in this case) so an +out-of-sample loss can be tracked and used in the specified stopping criterion, +`NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by +`IteratedModel` (our pipeline model) is retrained on all data for the same number of +iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned +parameters. ## Evaluating Iterated Models @@ -266,11 +282,14 @@ using Measurements l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) @show l1_loss ``` -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). +We take this estimate of the uncertainty of the generalization error with a [grain of +salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). ## Comparison with other models on the test set -Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +Although we cannot assign them statistical significance, here are comparisons, on the +untouched test set, of the eror of our self-iterating neural network regressor with a +couple of other models trained on the same data (using default hyperparameters): ```julia function performance(model) mach = machine(model, X, y) |> fit! @@ -297,7 +316,8 @@ NeuralNetworkRegressor $(MMI.doc_header(MultitargetNeuralNetworkRegressor)) `MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. +predictions of a `Continuous` multi-target, presented as a table, given a table of +`Continuous` features. # Training data @@ -314,22 +334,47 @@ Where # Hyper-parameters -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural + network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct + your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the + updating of the weights of the network. For further reference, see either the examples + or [the Flux optimiser + documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a + learning rate (the update rate of the optimizer), a good rule of thumb is to start out + at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a + function which can be called in the form `loss(yhat, y)`. Possible loss functions are + listed in [the Flux loss function + documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, + the most natural loss functions are: - `Flux.mse` - `Flux.mae` - `Flux.msle` - `Flux.huber_loss` -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents + one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents + the number of samples per update of the networks weights. Typcally, batch size should be + somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, + while larger batch sizes lead towards smoother training loss curves. In general, it is a + good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and + only tune the learning rate. In most literature, batch size is set in powers of twos, + but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be + any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of + 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during + training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting + a machine if the associated optimiser has changed. If true, the associated machine will + retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. + For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. +Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include +`Flux.sigmoid` and the identity function (otherwise known as "linear activation"). # Operations @@ -342,18 +387,22 @@ Where The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. +- `chain`: The trained "chain", or series of layers, functions, and activations which + make up the neural network. # Report The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. +- `training_losses`: The history of training losses, a vector containing the history of + all the losses during training. The first element of the vector is the initial + penalized loss. After the first element, the nth element corresponds to the loss of + epoch n-1. # Examples -In this example we build a regression model using the Boston house price dataset. +In this example we build a regression model using a toy dataset. ```julia using MLJ using MLJFlux From 4f3597345389a44afe48b3ec38c43a5a8550ebfa Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Thu, 23 Jun 2022 19:07:22 +1200 Subject: [PATCH 03/24] Add preliminary Metalhead.jl integration and fix #162 first attempt Metalhead integration (with hack); tests lacking minor add docstring comment rm invalidated test mv metalhead stuff out to separate src file add show methods for Metalhead wraps add forgotten files with tests fix test rename metal -> image_builder --- Project.toml | 2 + src/MLJFlux.jl | 4 +- src/builders.jl | 8 +- src/metalhead.jl | 152 ++++++++++++++++++++++++++++++++++++ src/types.jl | 4 +- test/builders.jl | 16 +++- test/metalhead.jl | 59 ++++++++++++++ test/mlj_model_interface.jl | 4 - test/runtests.jl | 4 + 9 files changed, 241 insertions(+), 12 deletions(-) create mode 100644 src/metalhead.jl create mode 100644 test/metalhead.jl diff --git a/Project.toml b/Project.toml index a2f70565..dd10ab8b 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ ColorTypes = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" ComputationalResources = "ed09eef8-17a6-5b46-8889-db040fac31e3" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" MLJModelInterface = "e80e1ace-859a-464e-9ed9-23947d8ae3ea" +Metalhead = "dbeba491-748d-5e0e-a39e-b530a07fa0cc" ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" @@ -19,6 +20,7 @@ CategoricalArrays = "0.10" ColorTypes = "0.10.3, 0.11" ComputationalResources = "0.3.2" Flux = "0.10.4, 0.11, 0.12, 0.13" +Metalhead = "0.7" MLJModelInterface = "1.1.1" ProgressMeter = "1.7.1" Tables = "1.0" diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index 84bce73f..d3a88064 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -1,4 +1,4 @@ -module MLJFlux +module MLJFlux export CUDALibs, CPU1 @@ -13,10 +13,12 @@ using Statistics using ColorTypes using ComputationalResources using Random +import Metalhead include("penalizers.jl") include("core.jl") include("builders.jl") +include("metalhead.jl") include("types.jl") include("regressor.jl") include("classifier.jl") diff --git a/src/builders.jl b/src/builders.jl index 2c417c20..b106058a 100644 --- a/src/builders.jl +++ b/src/builders.jl @@ -1,4 +1,4 @@ -## BUILDING CHAINS A FROM HYPERPARAMETERS + INPUT/OUTPUT SHAPE +# # BUILDING CHAINS A FROM HYPERPARAMETERS + INPUT/OUTPUT SHAPE # We introduce chain builders as a way of exposing neural network # hyperparameters (describing, architecture, dropout rates, etc) to @@ -9,7 +9,7 @@ # input/output dimensions/shape. # Below n or (n1, n2) etc refers to network inputs, while m or (m1, -# m2) etc refers to outputs. +# m2) etc refers to outputs. abstract type Builder <: MLJModelInterface.MLJType end @@ -38,7 +38,7 @@ using `n_hidden` nodes in the hidden layer and the specified `dropout` (defaulting to 0.5). An activation function `σ` is applied between the hidden and final layers. If `n_hidden=0` (the default) then `n_hidden` is the geometric mean of the number of input and output nodes. The -number of input and output nodes is determined from the data. +number of input and output nodes is determined from the data. The each layer is initialized using `Flux.glorot_uniform(rng)`. If `rng` is an integer, it is instead used as the seed for a @@ -96,6 +96,8 @@ function MLJFlux.build(mlp::MLP, rng, n_in, n_out) end +# # BUILER MACRO + struct GenericBuilder{F} <: Builder apply::F end diff --git a/src/metalhead.jl b/src/metalhead.jl new file mode 100644 index 00000000..d0ec1a07 --- /dev/null +++ b/src/metalhead.jl @@ -0,0 +1,152 @@ +#= + +TODO: After https://github.com/FluxML/Metalhead.jl/issues/176: + +- Export and externally document `metal` method + +- Delete definition of `ResNetHack` below + +- Change default builder in ImageClassifier (see /src/types.jl) from + `image_builder(ResNetHack(...))` to `image_builder(Metalhead.ResNet(...))`, + +- Add nicer `show` methods for `MetalheadBuilder` instances + +=# + +const DISALLOWED_KWARGS = [:imsize, :inchannels, :nclasses] +const human_disallowed_kwargs = join(map(s->"`$s`", DISALLOWED_KWARGS), ", ", " and ") +const ERR_METALHEAD_DISALLOWED_KWARGS = ArgumentError( + "Keyword arguments $human_disallowed_kwargs are disallowed "* + "as their values are inferred from data. " +) + +# # WRAPPING + +struct MetalheadWrapper{F} <: MLJFlux.Builder + metalhead_constructor::F +end + +struct MetalheadBuilder{F} <: MLJFlux.Builder + metalhead_constructor::F + args + kwargs +end + +Base.show(io::IO, w::MetalheadWrapper) = + print(io, "image_builder($(repr(w.metalhead_constructor)))") + +function Base.show(io::IO, ::MIME"text/plain", w::MetalheadBuilder) + println(io, "builder wrapping $(w.metalhead_constructor)") + if !isempty(w.args) + println(io, " args:") + for (i, arg) in enumerate(w.args) + println(io, " 1: $arg") + end + end + if !isempty(w.kwargs) + println(io, " kwargs:") + for kwarg in w.kwargs + println(io, " $(first(kwarg)) = $(last(kwarg))") + end + end +end + +Base.show(io::IO, w::MetalheadBuilder) = + print(io, "image_builder($(repr(w.metalhead_constructor)))(…)") + + +""" + image_builder(constructor)(args...; kwargs...) + +Return an MLJFlux builder object based on the Metalhead.jl constructor/type +`constructor` (eg, `Metalhead.ResNet`). Here `args` and `kwargs` are +passed to the `MetalheadType` constructor at "build time", along with +the extra keyword specifiers `imsize=...`, `inchannels=...` and +`nclasses=...`, with values inferred from the data. + +# Example + +If in Metalhead.jl you would do + +```julia +using Metalhead +model = ResNet(50, pretrain=true, inchannels=1, nclasses=10) +``` + +then in MLJFlux, it suffices to do + +```julia +using MLJFlux, Metalhead +builder = image_builder(ResNet)(50, pretrain=true) +``` + +which can be used in `ImageClassifier` as in + +```julia +clf = ImageClassifier( + builder=builder, + epochs=500, + optimiser=Flux.ADAM(0.001), + loss=Flux.crossentropy, + batch_size=5, +) +``` + +The keyord arguments `imsize`, `inchannels` and `nclasses` are +dissallowed in `kwargs` (see above). + +""" +image_builder(metalhead_constructor) = MetalheadWrapper(metalhead_constructor) + +function (pre_builder::MetalheadWrapper)(args...; kwargs...) + kw_names = keys(kwargs) + isempty(intersect(kw_names, DISALLOWED_KWARGS)) || + throw(ERR_METALHEAD_DISALLOWED_KWARGS) + return MetalheadBuilder(pre_builder.metalhead_constructor, args, kwargs) +end + +MLJFlux.build( + b::MetalheadBuilder, + rng, + n_in, + n_out, + n_channels +) = b.metalhead_constructor( + b.args...; + b.kwargs..., + imsize=n_in, + inchannels=n_channels, + nclasses=n_out +) + +# See above "TODO" list. +function VGGHack( + depth::Integer=16; + imsize=nothing, + inchannels=3, + nclasses=1000, + batchnorm=false, + pretrain=false, +) + + # Note `imsize` is ignored, as here: + # https://github.com/FluxML/Metalhead.jl/blob/9edff63222720ff84671b8087dd71eb370a6c35a/src/convnets/vgg.jl#L165 + + @assert( + depth in keys(Metalhead.vgg_config), + "depth must be from one in $(sort(collect(keys(Metalhead.vgg_config))))" + ) + model = Metalhead.VGG((224, 224); + config = Metalhead.vgg_conv_config[Metalhead.vgg_config[depth]], + inchannels, + batchnorm, + nclasses, + fcsize = 4096, + dropout = 0.5) + if pretrain && !batchnorm + Metalhead.loadpretrain!(model, string("VGG", depth)) + elseif pretrain + Metalhead.loadpretrain!(model, "VGG$(depth)-BN)") + end + return model +end diff --git a/src/types.jl b/src/types.jl index bf5674af..6a36c2be 100644 --- a/src/types.jl +++ b/src/types.jl @@ -50,6 +50,8 @@ doc_classifier(model_name) = doc_regressor(model_name)*""" for Model in [:NeuralNetworkClassifier, :ImageClassifier] + default_builder_ex = Model == :ImageClassifier ? :(image_builder(VGGHack)()) : Short() + ex = quote mutable struct $Model{B,F,O,L} <: MLJFluxProbabilistic builder::B @@ -65,7 +67,7 @@ for Model in [:NeuralNetworkClassifier, :ImageClassifier] acceleration::AbstractResource # eg, `CPU1()` or `CUDALibs()` end - function $Model(; builder::B = Short() + function $Model(; builder::B = $default_builder_ex , finaliser::F = Flux.softmax , optimiser::O = Flux.Optimise.ADAM() , loss::L = Flux.crossentropy diff --git a/test/builders.jl b/test/builders.jl index 030cbfa0..cd9d4f00 100644 --- a/test/builders.jl +++ b/test/builders.jl @@ -1,3 +1,11 @@ +# # Helpers + +function an_image(rng, n_in, n_channels) + n_channels == 3 && + return coerce(rand(rng, Float32, n_in..., 3), ColorImage) + return coerce(rand(rng, Float32, n_in...), GreyImage) +end + # to control chain initialization: myinit(n, m) = reshape(convert(Vector{Float32}, (1:n*m)), n , m) @@ -52,9 +60,11 @@ end end @testset_accelerated "@builder" accel begin - builder = MLJFlux.@builder(Flux.Chain(Flux.Dense(n_in, 4, - init = (out, in) -> randn(rng, out, in)), - Flux.Dense(4, n_out))) + builder = MLJFlux.@builder(Flux.Chain(Flux.Dense( + n_in, + 4, + init = (out, in) -> randn(rng, out, in) + ), Flux.Dense(4, n_out))) rng = StableRNGs.StableRNG(123) chain = MLJFlux.build(builder, rng, 5, 3) ps = Flux.params(chain) diff --git a/test/metalhead.jl b/test/metalhead.jl new file mode 100644 index 00000000..8c937e54 --- /dev/null +++ b/test/metalhead.jl @@ -0,0 +1,59 @@ +using StableRNGs +using MLJFlux +const Metalhead = MLJFlux.Metalhead + +@testset "display" begin + io = IOBuffer() + builder = MLJFlux.image_builder(MLJFlux.Metalhead.ResNet)(50, pretrain=false) + show(io, MIME("text/plain"), builder) + @test String(take!(io)) == + "builder wrapping Metalhead.ResNet\n args:\n"* + " 1: 50\n kwargs:\n pretrain = false\n" + show(io, builder) + @test String(take!(io)) == "image_builder(Metalhead.ResNet)(…)" + close(io) +end + +@testset "disallowed kwargs" begin + @test_throws( + MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, + MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(imsize=(241, 241)), + ) + @test_throws( + MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, + MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(inchannels=2), + ) + @test_throws( + MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, + MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(nclasses=10), + ) +end + +@testset "constructors" begin + depth = 16 + imsize = (128, 128) + nclasses = 10 + inchannels = 1 + wrapped = MLJFlux.image_builder(Metalhead.VGG) + @test wrapped.metalhead_constructor == Metalhead.VGG + builder = wrapped(depth, batchnorm=true) + @test builder.metalhead_constructor == Metalhead.VGG + @test builder.args == (depth, ) + @test (; builder.kwargs...) == (; batchnorm=true) + ref_chain = Metalhead.VGG( + imsize; + config = Metalhead.vgg_conv_config[Metalhead.vgg_config[depth]], + inchannels, + batchnorm=true, + nclasses, + fcsize = 4096, + dropout = 0.5 + ) + # needs https://github.com/FluxML/Metalhead.jl/issues/176 + # chain = + # MLJFlux.build(builder, StableRNGs.StableRNG(123), imsize, nclasses, inchannels) + # @test length.(MLJFlux.Flux.params(ref_chain)) == + # length.(MLJFlux.Flux.params(chain)) +end + +true diff --git a/test/mlj_model_interface.jl b/test/mlj_model_interface.jl index 6b15aca4..24b9a59e 100644 --- a/test/mlj_model_interface.jl +++ b/test/mlj_model_interface.jl @@ -6,10 +6,6 @@ ModelType = MLJFlux.NeuralNetworkRegressor @test model == clone clone.optimiser.eta *= 10 @test model != clone - - clone = deepcopy(model) - clone.builder.dropout *= 0.5 - @test clone != model end @testset "clean!" begin diff --git a/test/runtests.jl b/test/runtests.jl index ab44a92f..b0e84fd0 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -57,6 +57,10 @@ end include("builders.jl") end +@testset "metalhead" begin + include("metalhead.jl") +end + @testset "mlj_model_interface" begin include("mlj_model_interface.jl") end From 4ed6a8c6c49f07238b21112b989c293cee4ee274 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Tue, 28 Jun 2022 15:37:17 +1200 Subject: [PATCH 04/24] get rid of intermediate wrapper --- src/metalhead.jl | 31 ++++++++++++------------------- src/types.jl | 2 +- test/metalhead.jl | 18 ++++++++++-------- 3 files changed, 23 insertions(+), 28 deletions(-) diff --git a/src/metalhead.jl b/src/metalhead.jl index d0ec1a07..48602a71 100644 --- a/src/metalhead.jl +++ b/src/metalhead.jl @@ -2,14 +2,12 @@ TODO: After https://github.com/FluxML/Metalhead.jl/issues/176: -- Export and externally document `metal` method +- Export and externally document `image_builder` method - Delete definition of `ResNetHack` below - Change default builder in ImageClassifier (see /src/types.jl) from - `image_builder(ResNetHack(...))` to `image_builder(Metalhead.ResNet(...))`, - -- Add nicer `show` methods for `MetalheadBuilder` instances + `image_builder(ResNetHack)` to `image_builder(Metalhead.ResNet)`. =# @@ -22,19 +20,12 @@ const ERR_METALHEAD_DISALLOWED_KWARGS = ArgumentError( # # WRAPPING -struct MetalheadWrapper{F} <: MLJFlux.Builder - metalhead_constructor::F -end - struct MetalheadBuilder{F} <: MLJFlux.Builder metalhead_constructor::F args kwargs end -Base.show(io::IO, w::MetalheadWrapper) = - print(io, "image_builder($(repr(w.metalhead_constructor)))") - function Base.show(io::IO, ::MIME"text/plain", w::MetalheadBuilder) println(io, "builder wrapping $(w.metalhead_constructor)") if !isempty(w.args) @@ -52,14 +43,14 @@ function Base.show(io::IO, ::MIME"text/plain", w::MetalheadBuilder) end Base.show(io::IO, w::MetalheadBuilder) = - print(io, "image_builder($(repr(w.metalhead_constructor)))(…)") + print(io, "image_builder($(repr(w.metalhead_constructor)), …)") """ - image_builder(constructor)(args...; kwargs...) + image_builder(metalhead_constructor, args...; kwargs...) Return an MLJFlux builder object based on the Metalhead.jl constructor/type -`constructor` (eg, `Metalhead.ResNet`). Here `args` and `kwargs` are +`metalhead_constructor` (eg, `Metalhead.ResNet`). Here `args` and `kwargs` are passed to the `MetalheadType` constructor at "build time", along with the extra keyword specifiers `imsize=...`, `inchannels=...` and `nclasses=...`, with values inferred from the data. @@ -77,7 +68,7 @@ then in MLJFlux, it suffices to do ```julia using MLJFlux, Metalhead -builder = image_builder(ResNet)(50, pretrain=true) +builder = image_builder(ResNet, 50, pretrain=true) ``` which can be used in `ImageClassifier` as in @@ -96,13 +87,15 @@ The keyord arguments `imsize`, `inchannels` and `nclasses` are dissallowed in `kwargs` (see above). """ -image_builder(metalhead_constructor) = MetalheadWrapper(metalhead_constructor) - -function (pre_builder::MetalheadWrapper)(args...; kwargs...) +function image_builder( + metalhead_constructor, + args...; + kwargs... +) kw_names = keys(kwargs) isempty(intersect(kw_names, DISALLOWED_KWARGS)) || throw(ERR_METALHEAD_DISALLOWED_KWARGS) - return MetalheadBuilder(pre_builder.metalhead_constructor, args, kwargs) + return MetalheadBuilder(metalhead_constructor, args, kwargs) end MLJFlux.build( diff --git a/src/types.jl b/src/types.jl index 6a36c2be..968dacbf 100644 --- a/src/types.jl +++ b/src/types.jl @@ -50,7 +50,7 @@ doc_classifier(model_name) = doc_regressor(model_name)*""" for Model in [:NeuralNetworkClassifier, :ImageClassifier] - default_builder_ex = Model == :ImageClassifier ? :(image_builder(VGGHack)()) : Short() + default_builder_ex = Model == :ImageClassifier ? :(image_builder(VGGHack)) : Short() ex = quote mutable struct $Model{B,F,O,L} <: MLJFluxProbabilistic diff --git a/test/metalhead.jl b/test/metalhead.jl index 8c937e54..4260ff78 100644 --- a/test/metalhead.jl +++ b/test/metalhead.jl @@ -4,28 +4,28 @@ const Metalhead = MLJFlux.Metalhead @testset "display" begin io = IOBuffer() - builder = MLJFlux.image_builder(MLJFlux.Metalhead.ResNet)(50, pretrain=false) + builder = MLJFlux.image_builder(MLJFlux.Metalhead.ResNet, 50, pretrain=false) show(io, MIME("text/plain"), builder) @test String(take!(io)) == "builder wrapping Metalhead.ResNet\n args:\n"* " 1: 50\n kwargs:\n pretrain = false\n" show(io, builder) - @test String(take!(io)) == "image_builder(Metalhead.ResNet)(…)" + @test String(take!(io)) == "image_builder(Metalhead.ResNet, …)" close(io) end @testset "disallowed kwargs" begin @test_throws( MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, - MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(imsize=(241, 241)), + MLJFlux.image_builder(MLJFlux.Metalhead.VGG, imsize=(241, 241)), ) @test_throws( MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, - MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(inchannels=2), + MLJFlux.image_builder(MLJFlux.Metalhead.VGG, inchannels=2), ) @test_throws( MLJFlux.ERR_METALHEAD_DISALLOWED_KWARGS, - MLJFlux.image_builder(MLJFlux.Metalhead.VGG)(nclasses=10), + MLJFlux.image_builder(MLJFlux.Metalhead.VGG, nclasses=10), ) end @@ -34,9 +34,11 @@ end imsize = (128, 128) nclasses = 10 inchannels = 1 - wrapped = MLJFlux.image_builder(Metalhead.VGG) - @test wrapped.metalhead_constructor == Metalhead.VGG - builder = wrapped(depth, batchnorm=true) + builder = MLJFlux.image_builder( + Metalhead.VGG, + depth, + batchnorm=true + ) @test builder.metalhead_constructor == Metalhead.VGG @test builder.args == (depth, ) @test (; builder.kwargs...) == (; batchnorm=true) From d04a50cb48198e5a6c2dbcc6b6f3a69e977f74fb Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Fri, 8 Jul 2022 09:27:10 +1200 Subject: [PATCH 05/24] rm redundant test helper --- test/builders.jl | 8 -------- 1 file changed, 8 deletions(-) diff --git a/test/builders.jl b/test/builders.jl index cd9d4f00..8aafa862 100644 --- a/test/builders.jl +++ b/test/builders.jl @@ -1,11 +1,3 @@ -# # Helpers - -function an_image(rng, n_in, n_channels) - n_channels == 3 && - return coerce(rand(rng, Float32, n_in..., 3), ColorImage) - return coerce(rand(rng, Float32, n_in...), GreyImage) -end - # to control chain initialization: myinit(n, m) = reshape(convert(Vector{Float32}, (1:n*m)), n , m) From c2df0d56f8c86548cac10e66a013835456aa6837 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 12 Jul 2022 04:11:56 +0000 Subject: [PATCH 06/24] address some Flux.Optimiser changes; improve image testing fix Flux compat --- Project.toml | 2 +- src/core.jl | 28 +++------------------------- src/metalhead.jl | 2 +- src/types.jl | 6 +++--- test/classifier.jl | 2 +- test/core.jl | 6 +++--- test/image.jl | 40 ++++++++++++++++++++++------------------ test/regressor.jl | 2 +- test/runtests.jl | 29 ++++++++++++++++++++--------- 9 files changed, 55 insertions(+), 62 deletions(-) diff --git a/Project.toml b/Project.toml index dd10ab8b..229b13a9 100644 --- a/Project.toml +++ b/Project.toml @@ -19,7 +19,7 @@ Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" CategoricalArrays = "0.10" ColorTypes = "0.10.3, 0.11" ComputationalResources = "0.3.2" -Flux = "0.10.4, 0.11, 0.12, 0.13" +Flux = "0.13" Metalhead = "0.7" MLJModelInterface = "1.1.1" ProgressMeter = "1.7.1" diff --git a/src/core.jl b/src/core.jl index de2a982d..d94d9f22 100644 --- a/src/core.jl +++ b/src/core.jl @@ -1,30 +1,8 @@ ## EXPOSE OPTIMISERS TO MLJ (for eg, tuning) -# Here we make the optimiser structs "transparent" so that their -# field values are exposed by calls to MLJ.params - -for opt in (:Descent, - :Momentum, - :Nesterov, - :RMSProp, - :ADAM, - :RADAM, - :AdaMax, - :OADAM, - :ADAGrad, - :ADADelta, - :AMSGrad, - :NADAM, - :AdaBelief, - :Optimiser, - :InvDecay, :ExpDecay, :WeightDecay, - :ClipValue, - :ClipNorm) # last updated: Flux.jl 0.12.3 - - @eval begin - MLJModelInterface.istransparent(m::Flux.$opt) = true - end -end +# make the optimiser structs "transparent" so that their field values +# are exposed by calls to MLJ.params: +MLJModelInterface.istransparent(m::Flux.Optimise.AbstractOptimiser) = true ## GENERAL METHOD TO OPTIMIZE A CHAIN diff --git a/src/metalhead.jl b/src/metalhead.jl index 48602a71..f42bb514 100644 --- a/src/metalhead.jl +++ b/src/metalhead.jl @@ -77,7 +77,7 @@ which can be used in `ImageClassifier` as in clf = ImageClassifier( builder=builder, epochs=500, - optimiser=Flux.ADAM(0.001), + optimiser=Flux.Adam(0.001), loss=Flux.crossentropy, batch_size=5, ) diff --git a/src/types.jl b/src/types.jl index 968dacbf..7d3166a0 100644 --- a/src/types.jl +++ b/src/types.jl @@ -13,7 +13,7 @@ Instantiate an MLJFlux model. Available hyperparameters: `MLJFlux.Short(n_hidden=0, dropout=0.5, σ=Flux.σ)` (classifiers) - `optimiser`: The optimiser to use for training. Default = - `Flux.ADAM()` + `Flux.Adam()` - `loss`: The loss function used for training. Default = `Flux.mse` (regressors) and `Flux.crossentropy` (classifiers) @@ -69,7 +69,7 @@ for Model in [:NeuralNetworkClassifier, :ImageClassifier] function $Model(; builder::B = $default_builder_ex , finaliser::F = Flux.softmax - , optimiser::O = Flux.Optimise.ADAM() + , optimiser::O = Flux.Optimise.Adam() , loss::L = Flux.crossentropy , epochs = 10 , batch_size = 1 @@ -123,7 +123,7 @@ for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] end function $Model(; builder::B = Linear() - , optimiser::O = Flux.Optimise.ADAM() + , optimiser::O = Flux.Optimise.Adam() , loss::L = Flux.mse , epochs = 10 , batch_size = 1 diff --git a/test/classifier.jl b/test/classifier.jl index 135c3020..55bade43 100644 --- a/test/classifier.jl +++ b/test/classifier.jl @@ -19,7 +19,7 @@ end |> categorical; # TODO: replace Short2 -> Short when # https://github.com/FluxML/Flux.jl/issues/1372 is resolved: builder = Short2() -optimiser = Flux.Optimise.ADAM(0.03) +optimiser = Flux.Optimise.Adam(0.03) losses = [] diff --git a/test/core.jl b/test/core.jl index 75e03636..823ca16d 100644 --- a/test/core.jl +++ b/test/core.jl @@ -4,7 +4,7 @@ stable_rng = StableRNGs.StableRNG(123) rowvec(y) = y rowvec(y::Vector) = reshape(y, 1, length(y)) -@test MLJFlux.MLJModelInterface.istransparent(Flux.ADAM(0.1)) +@test MLJFlux.MLJModelInterface.istransparent(Flux.Adam(0.1)) @testset "nrows" begin Xmatrix = rand(stable_rng, 10, 3) @@ -112,7 +112,7 @@ epochs = 10 _chain_yes_drop, history = MLJFlux.fit!(model.loss, penalty, chain_yes_drop, - Flux.Optimise.ADAM(0.001), + Flux.Optimise.Adam(0.001), epochs, 0, data[1], @@ -124,7 +124,7 @@ epochs = 10 _chain_no_drop, history = MLJFlux.fit!(model.loss, penalty, chain_no_drop, - Flux.Optimise.ADAM(0.001), + Flux.Optimise.Adam(0.001), epochs, 0, data[1], diff --git a/test/image.jl b/test/image.jl index 1866b1ed..f3d6837c 100644 --- a/test/image.jl +++ b/test/image.jl @@ -1,4 +1,22 @@ -## BASIC IMAGE TESTS GREY +# # HELPERS + +function make_images(rng; n_classes=33, n_images=50, color=false, noise=0.05) + n_channels = color ? 3 : 1 + image_bag = map(1:n_classes) do _ + rand(stable_rng, Float32, 6, 6, n_channels) + end + labels = rand(stable_rng, 1:3, n_images) + images = map(labels) do j + image_bag[j] + noise*rand(stable_rng, Float32, 6, 6, n_channels) + end + T = color ? ColorImage : GrayImage + X = coerce(cat(images...; dims=4), T) + y = coerce(labels, Multiclass) + return X, y +end + + +# # BASIC IMAGE TESTS GREY Random.seed!(123) stable_rng = StableRNGs.StableRNG(123) @@ -18,16 +36,9 @@ function MLJFlux.build(model::MyNeuralNetwork, rng, ip, op, n_channels) end builder = MyNeuralNetwork((2,2), (2,2)) - -# collection of gray images as a 4D array in WHCN format: -raw_images = rand(stable_rng, Float32, 6, 6, 1, 50); - -# as a vector of Matrix{<:AbstractRGB} -images = coerce(raw_images, GrayImage); -@test scitype(images) == AbstractVector{GrayImage{6,6}} -labels = categorical(rand(stable_rng, 1:5, 50)); - +images, labels = make_images(stable_rng) losses = [] + @testset_accelerated "ImageClassifier basic tests" accel begin Random.seed!(123) @@ -136,14 +147,7 @@ reference = losses[1] ## BASIC IMAGE TESTS COLOR builder = MyNeuralNetwork((2,2), (2,2)) - -# collection of color images as a 4D array in WHCN format: -raw_images = rand(stable_rng, Float32, 6, 6, 3, 50); - -images = coerce(raw_images, ColorImage); -@test scitype(images) == AbstractVector{ColorImage{6,6}} -labels = categorical(rand(1:5, 50)); - +images, labels = make_images(stable_rng, color=true) losses = [] @testset_accelerated "ColorImages" accel begin diff --git a/test/regressor.jl b/test/regressor.jl index 0b6c7c7f..0f05ee72 100644 --- a/test/regressor.jl +++ b/test/regressor.jl @@ -6,7 +6,7 @@ X = MLJBase.table(randn(Float32, N, 5)); # TODO: replace Short2 -> Short when # https://github.com/FluxML/Flux.jl/pull/1618 is resolved: builder = Short2(σ=identity) -optimiser = Flux.Optimise.ADAM() +optimiser = Flux.Optimise.Adam() losses = [] diff --git a/test/runtests.jl b/test/runtests.jl index b0e84fd0..fc235899 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -45,38 +45,49 @@ seed!(123) include("test_utils.jl") -@testset "penalizers" begin +# enable conditional testing of modules by providing test_args +# e.g. `Pkg.test("MLJBase", test_args=["misc"])` +RUN_ALL_TESTS = isempty(ARGS) +macro conditional_testset(name, expr) + name = string(name) + esc(quote + if RUN_ALL_TESTS || $name in ARGS + @testset $name $expr + end + end) +end +@conditional_testset "penalizers" begin include("penalizers.jl") end -@testset "core" begin +@conditional_testset "core" begin include("core.jl") end -@testset "builders" begin +@conditional_testset "builders" begin include("builders.jl") end -@testset "metalhead" begin +@conditional_testset "metalhead" begin include("metalhead.jl") end -@testset "mlj_model_interface" begin +@conditional_testset "mlj_model_interface" begin include("mlj_model_interface.jl") end -@testset "regressor" begin +@conditional_testset "regressor" begin include("regressor.jl") end -@testset "classifier" begin +@conditional_testset "classifier" begin include("classifier.jl") end -@testset "image" begin +@conditional_testset "image" begin include("image.jl") end -@testset "integration" begin +@conditional_testset "integration" begin include("integration.jl") end From 64825703da71aa9dadeced2cc1f5bc842a824fd4 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 12 Jul 2022 04:24:47 +0000 Subject: [PATCH 07/24] remove MNIST tests --- Project.toml | 3 +-- test/image.jl | 59 --------------------------------------------------- 2 files changed, 1 insertion(+), 61 deletions(-) diff --git a/Project.toml b/Project.toml index 229b13a9..6ce654f4 100644 --- a/Project.toml +++ b/Project.toml @@ -28,7 +28,6 @@ julia = "1.6" [extras] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" MLJBase = "a7f614a8-145f-11e9-1d2a-a57a1082229d" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" @@ -37,4 +36,4 @@ StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["LinearAlgebra", "MLDatasets", "MLJBase", "Random", "StableRNGs", "Statistics", "StatsBase", "Test"] +test = ["LinearAlgebra", "MLJBase", "Random", "StableRNGs", "Statistics", "StatsBase", "Test"] diff --git a/test/image.jl b/test/image.jl index f3d6837c..48fb4fd3 100644 --- a/test/image.jl +++ b/test/image.jl @@ -85,65 +85,6 @@ reference = losses[1] @test all(x->abs(x - reference)/reference < 5e-4, losses[2:end]) -## MNIST IMAGES TEST - -mutable struct MyConvBuilder <: MLJFlux.Builder end - -using MLDatasets - -ENV["DATADEPS_ALWAYS_ACCEPT"] = true -images, labels = MNIST.traindata() -images = coerce(images, GrayImage); -labels = categorical(labels); - -function flatten(x::AbstractArray) - return reshape(x, :, size(x)[end]) -end - -function MLJFlux.build(builder::MyConvBuilder, rng, n_in, n_out, n_channels) - cnn_output_size = [3,3,32] - init = Flux.glorot_uniform(rng) - return Chain( - Conv((3, 3), n_channels=>16, pad=(1,1), relu, init=init), - MaxPool((2,2)), - Conv((3, 3), 16=>32, pad=(1,1), relu, init=init), - MaxPool((2,2)), - Conv((3, 3), 32=>32, pad=(1,1), relu, init=init), - MaxPool((2,2)), - flatten, - Dense(prod(cnn_output_size), n_out, init=init)) -end - -losses = [] - -@testset_accelerated "Image MNIST" accel begin - - Random.seed!(123) - stable_rng = StableRNGs.StableRNG(123) - - model = MLJFlux.ImageClassifier(builder=MyConvBuilder(), - acceleration=accel, - batch_size=50, - rng=stable_rng) - - @time fitresult, cache, _report = - MLJBase.fit(model, 0, images[1:500], labels[1:500]); - first_last_training_loss = _report[1][[1, end]] - push!(losses, first_last_training_loss[2]) -# @show first_last_training_loss - - pred = mode.(MLJBase.predict(model, fitresult, images[501:600])); - error = misclassification_rate(pred, labels[501:600]) - @test error < 0.2 - -end - -# check different resources (CPU1, CUDALibs, etc)) give about the same loss: -reference = losses[1] -@info "Losses for each computational resource: $losses" -@test all(x->abs(x - reference)/reference < 0.05, losses[2:end]) - - ## BASIC IMAGE TESTS COLOR builder = MyNeuralNetwork((2,2), (2,2)) From 0b999ceb43141adb79295491a8584f1982503277 Mon Sep 17 00:00:00 2001 From: josephsdavid Date: Mon, 27 Jun 2022 14:59:11 -0500 Subject: [PATCH 08/24] wip --- nn.md | 245 ++++++++++++++++++++++++++++++++++++++++++ nnc.md | 128 ++++++++++++++++++++++ nnclassif.norg | 148 +++++++++++++++++++++++++ nnm.md | 247 ++++++++++++++++++++++++++++++++++++++++++ nnregressor.norg | 273 +++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1041 insertions(+) create mode 100644 nn.md create mode 100644 nnc.md create mode 100644 nnclassif.norg create mode 100644 nnm.md create mode 100644 nnregressor.norg diff --git a/nn.md b/nn.md new file mode 100644 index 00000000..46641a33 --- /dev/null +++ b/nn.md @@ -0,0 +1,245 @@ +# NeuralNetworkRegressor + +`NeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` target, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with +mach = machine(model, X, y) +Where +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Continuous`; check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a regression model using the Boston house price dataset +```julia + + using MLJ + using MLJFlux + using Flux + using Plots + +``` +First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: +```julia + + data = OpenML.load(531); # Loads from https://www.openml.org/d/531 + + y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); + + scitype(y) + schema(X) + +``` +Since MLJFlux models do not handle ordered factos, we can treat `:RAD` as `Continuous`: +```julia +X = coerce(X, :RAD=>Continuous) +``` +Lets also make a test set: +```julia + + (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); + +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) + end +``` +Finally, we can define the model! +```julia + + NeuralNetworkRegressor = @load NeuralNetworkRegressor + model = NeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +```julia +mach = machine(pipe, X, y) + fit!(mach, verbosity=2) + + # first element initial loss, 2:end per epoch training losses + report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + + rates = 10. .^ (-5:0) + + foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) + end + plt #!md + + savefig(joinpath("assets", "learning_rate.png")) + + + pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 + +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +```julia + + # For initializing or clearing the traces: + + clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing + end + + # And to update the traces: + + update_loss(loss) = push!(losses, loss) + update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) + update_epochs(epoch) = push!(epochs, epoch) + +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia + + controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), + WithIterationsDo(update_epochs)] + + + iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) + +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia + + + clear() + mach = machine(iterated_pipe, X, y) + fit!(mach) + + plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); + scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + + savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia + + e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +#- + + using Measurements + l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) + @show l1_loss + +``` +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +```julia + + function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean + end + performance(iterated_pipe) + + three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + + errs = performance.(three_models) + + (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty + + +``` diff --git a/nnc.md b/nnc.md new file mode 100644 index 00000000..a14f2ecc --- /dev/null +++ b/nnc.md @@ -0,0 +1,128 @@ +# NeuralNetworkClassifier + +`NeuralNetworkClassifier`: +- TODO + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with +mach = machine(model, X, y) +Where +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Finite` with `n_out` classes; check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: + - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + - `Flux.binary_focal_loss`: Binary version of the above +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a classification model using the Iris dataset. +```julia + + using MLJ + using Flux + import RDatasets + + using Random + Random.seed!(123) + + MLJ.color_off() + + using Plots + pyplot(size=(600, 300*(sqrt(5)-1))); + +``` +This is a very basic example, using a default builder and no standardization. +For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: +```julia + + iris = RDatasets.dataset("datasets", "iris"); + y, X = unpack(iris, ==(:Species), colname -> true, rng=123); + NeuralNetworkClassifier = @load NeuralNetworkClassifier + clf = NeuralNetworkClassifier() + +``` +Next, we can train the model: +```julia +import Random.seed!; seed!(123) + mach = machine(clf, X, y) + fit!(mach) +``` +We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: +```julia +clf.optimiser.eta = clf.optimiser.eta * 2 + clf.epochs = clf.epochs + 5 + + # note that if the optimizer_changes_trigger_retraining flag was set to true + # the model would be completely retrained from scratch because the optimizer was + # updated + fit!(mach, verbosity=2); +``` +We can inspect the mean training loss using the `cross_entropy` function: +```julia + + training_loss = cross_entropy(predict(mach, X), y) |> mean + +``` +And we can access the Flux chain (model) using `fitted_params`: +```julia +training_loss = cross_entropy(predict(mach, X), y) |> mean +``` +Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function +```julia +r = range(clf, :epochs, lower=1, upper=200, scale=:log10) + curve = learning_curve(clf, X, y, + range=r, + resampling=Holdout(fraction_train=0.7), + measure=cross_entropy) + using Plots + plot(curve.parameter_values, + curve.measurements, + xlab=curve.parameter_name, + xscale=curve.parameter_scale, + ylab = "Cross Entropy") + + savefig("iris_history.png") +``` diff --git a/nnclassif.norg b/nnclassif.norg new file mode 100644 index 00000000..25ba3847 --- /dev/null +++ b/nnclassif.norg @@ -0,0 +1,148 @@ +* NeuralNetworkClassifier + + `NeuralNetworkClassifier`: + - [ ] TODO + +* Training data + + In MLJ or MLJBase, bind an instance `model` to data with + + mach = machine(model, X, y) + + Where + + - `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` + + - `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Finite` with `n_out` classes; check the scitype with `scitype(y)` + + +* Hyper-parameters + + - `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. + - `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or {https://fluxml.ai/Flux.jl/stable/training/optimisers/}[the Flux optimiser documentation]. To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. + - `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in {https://fluxml.ai/Flux.jl/stable/models/losses/}[the Flux loss function documentation]. For a classification task, the most natural loss functions are: + -- `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + -- `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + -- `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + -- `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + -- `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + -- `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + -- `Flux.binary_focal_loss`: Binary version of the above + - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. + - `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. + - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. + - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. + - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. + - `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. + - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. + - `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +* Operations + + - `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. + - `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + + +* Fitted parameters + + The fields of `fitted_params(mach)` are: + + - `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +* Report + + The fields of `report(mach)` are: + + - `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch $n-1$. + +* Examples + + In this example we build a classification model using the Iris dataset. + + @code julia + + using MLJ + using Flux + import RDatasets + + using Random + Random.seed!(123) + + MLJ.color_off() + + using Plots + pyplot(size=(600, 300*(sqrt(5)-1))); + + @end + + This is a very basic example, using a default builder and no standardization. + For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: + + @code julia + + iris = RDatasets.dataset("datasets", "iris"); + y, X = unpack(iris, ==(:Species), colname -> true, rng=123); + NeuralNetworkClassifier = @load NeuralNetworkClassifier + clf = NeuralNetworkClassifier() + + @end + + Next, we can train the model: + @code julia + import Random.seed!; seed!(123) + mach = machine(clf, X, y) + fit!(mach) + @end + + We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: + + @code julia + clf.optimiser.eta = clf.optimiser.eta * 2 + clf.epochs = clf.epochs + 5 + + # note that if the optimizer_changes_trigger_retraining flag was set to true + # the model would be completely retrained from scratch because the optimizer was + # updated + fit!(mach, verbosity=2); + @end + + We can inspect the mean training loss using the `cross_entropy` function: + + @code julia + + training_loss = cross_entropy(predict(mach, X), y) |> mean + + @end + + And we can access the Flux chain (model) using `fitted_params`: + + @code julia + training_loss = cross_entropy(predict(mach, X), y) |> mean + @end + + Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function + + @code julia + r = range(clf, :epochs, lower=1, upper=200, scale=:log10) + curve = learning_curve(clf, X, y, + range=r, + resampling=Holdout(fraction_train=0.7), + measure=cross_entropy) + using Plots + plot(curve.parameter_values, + curve.measurements, + xlab=curve.parameter_name, + xscale=curve.parameter_scale, + ylab = "Cross Entropy") + + savefig("iris_history.png") + @end + diff --git a/nnm.md b/nnm.md new file mode 100644 index 00000000..5c0234dc --- /dev/null +++ b/nnm.md @@ -0,0 +1,247 @@ +# MultitargetNeuralNetworkRegressor + +`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with +mach = machine(model, X, y) +Where +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any table of output targets whose element + scitype is `Continuous`; check the scitype with `schema(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a regression model using the Boston house price dataset +```julia + + using MLJ + using MLJFlux + using Flux + using Plots + using MLJBase: augment_X + +``` +First, we generate some data: +```julia + + X = augment_X(randn(10000, 8), true); + θ = randn((9,2)); + y = X * θ; + X = MLJ.table(X) + y = MLJ.table(y) + + + + + schema(y) + schema(X) + +``` +Lets also make a test set: +```julia + + (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); + +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) + end +``` +Finally, we can define the model! +```julia + + MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor + model = MultitargetNeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +```julia +mach = machine(pipe, X, y) + fit!(mach, verbosity=2) + + # first element initial loss, 2:end per epoch training losses + report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + + rates = 10. .^ (-5:0) + + foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) + end + plt #!md + + savefig(joinpath("assets", "learning_rate.png")) + + + pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 + +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +```julia + + # For initializing or clearing the traces: + + clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing + end + + # And to update the traces: + + update_loss(loss) = push!(losses, loss) + update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) + update_epochs(epoch) = push!(epochs, epoch) + +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia + + controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), + WithIterationsDo(update_epochs)] + + + iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) + +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia + + + clear() + mach = machine(iterated_pipe, X, y) + fit!(mach) + + plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); + scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + + savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia + + e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +#- + + using Measurements + l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) + @show l1_loss + +``` +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +```julia + + function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean + end + performance(iterated_pipe) + + three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + + errs = performance.(three_models) + + (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty + + +``` diff --git a/nnregressor.norg b/nnregressor.norg new file mode 100644 index 00000000..cdeb2277 --- /dev/null +++ b/nnregressor.norg @@ -0,0 +1,273 @@ +* MultitargetNeuralNetworkRegressor + + `MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic + predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. + +* Training data + + In MLJ or MLJBase, bind an instance `model` to data with + + mach = machine(model, X, y) + + Where + + - `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` + + - `y`: is the target, which can be any table of output targets whose element + scitype is `Continuous`; check the scitype with `schema(y)` + + +* Hyper-parameters + + - `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. + - `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or {https://fluxml.ai/Flux.jl/stable/training/optimisers/}[the Flux optimiser documentation]. To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. + - `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in {https://fluxml.ai/Flux.jl/stable/models/losses/}[the Flux loss function documentation]. For a regression task, the most natural loss functions are: + -- `Flux.mse` + -- `Flux.mae` + -- `Flux.msle` + -- `Flux.huber_loss` + - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. + - `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. + - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. + - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. + - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. + - `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. + - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. + - `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +* Operations + + - `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +* Fitted parameters + + The fields of `fitted_params(mach)` are: + + - `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +* Report + + The fields of `report(mach)` are: + + - `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch $n-1$. + +* Examples + +In this example we build a regression model using the Boston house price dataset + + @code julia + + using MLJ + using MLJFlux + using Flux + using Plots + using MLJBase: augment_X + + @end + + First, we generate some data: + + @code julia + + X = augment_X(randn(10000, 8), true); + θ = randn((9,2)); + y = X * θ; + X = MLJ.table(X) + y = MLJ.table(y) + + + + + schema(y) + schema(X) + + @end + + Lets also make a test set: + + @code julia + + (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); + + @end + + Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. + + @code julia + builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) + end + @end + + Finally, we can define the model! + + @code julia + + MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor + model = MultitargetNeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) + @end + + For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have {https://www.informit.com/articles/article.aspx?p=3131594&seqNum=2}[saturated neurons] and not train well. Therefore, standardization is key! + + @code julia + pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) + @end + + If we fit with a high verbosity ($>1$), we will see the losses during training. We can also see the losses in the output of `report(mach)` + + + @code julia + mach = machine(pipe, X, y) + fit!(mach, verbosity=2) + + # first element initial loss, 2:end per epoch training losses + report(mach).transformed_target_model_deterministic.training_losses + + @end + +** Experimenting with learning rate + + We can visually compare how the learning rate affects the predictions: + + @code julia + plt = plot() + + rates = 10. .^ (-5:0) + + foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) + end + plt #!md + + savefig(joinpath("assets", "learning_rate.png")) + + + pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 + + @end + +** Using Iteration Controls + We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. + + @code julia + + # For initializing or clearing the traces: + + clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing + end + + # And to update the traces: + + update_loss(loss) = push!(losses, loss) + update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) + update_epochs(epoch) = push!(epochs, epoch) + + @end + + For further reference of controls, see {https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided}[the documentation]. To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: + + @code julia + + controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), + WithIterationsDo(update_epochs)] + + + iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) + + @end + + Next, we can clear the traces, fit the model, and plot the traces: + + @code julia + + + clear() + mach = machine(iterated_pipe, X, y) + fit!(mach) + + plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); + scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + + savefig(joinpath("assets", "loss.png")) + @end + +*** Brief note on iterated models + Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +** Evaluating Iterated Models + We can evaluate our model with the `evaluate!` function: + + @code julia + + e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +#- + + using Measurements + l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) + @show l1_loss + + @end + +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +** Comparison with other models on the test set + + Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): + + @code julia + + function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean + end + performance(iterated_pipe) + + three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + + errs = performance.(three_models) + + (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty + + + @end + From adb2cb8410dceabeb054ad61eb53a90f23de0fce Mon Sep 17 00:00:00 2001 From: josephsdavid Date: Tue, 12 Jul 2022 16:36:33 -0500 Subject: [PATCH 09/24] update with code review suggestions --- src/MLJFlux.jl | 943 ------------------------------------------- src/classifier.jl | 6 +- src/image.jl | 5 +- src/regressor.jl | 13 +- src/types.jl | 996 +++++++++++++++++++++++++++++++++++++++++++--- 5 files changed, 951 insertions(+), 1012 deletions(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index 4fe7d3bd..d2e63add 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -37,949 +37,6 @@ MLJModelInterface.metadata_pkg.((NeuralNetworkRegressor, export NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor export NeuralNetworkClassifier, ImageClassifier -""" -$(MMI.doc_header(NeuralNetworkRegressor)) - -`NeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` target, given a table of `Continuous` features. - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with - mach = machine(model, X, y) - -Where - -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Continuous`; check the scitype with `scitype(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. - Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder - using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating - of the weights of the network. For further reference, see either the examples or - [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). - To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to - start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function - which can be called in the form `loss(yhat, y)`. - Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). - For a regression task, the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents - one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents - the number of samples per update of the networks weights. Typcally, batch size should be - somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, - while larger batch sizes lead towards smoother training loss curves. - In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), - and stick with it, and only tune the learning rate. In most examples, batch size is set - in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value - in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. - A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a - machine if the associated optimiser has changed. If true, the associated machine will - retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. - For training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. - Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include - `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - deterministic. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: - -- `chain`: The trained "chain", or series of layers, functions, and activations which - make up the neural network. - - -# Report - -The fields of `report(mach)` are: - -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - epoch n-1. -# Examples - -In this example we build a regression model using the Boston house price dataset -```julia - using MLJ - using MLJFlux - using Flux - using Plots -``` -First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: -```julia -data = OpenML.load(531); # Loads from https://www.openml.org/d/531 - -y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); - -scitype(y) -schema(X) -``` -Since MLJFlux models do not handle ordered factors, we can treat `:RAD` as `Continuous`: -```julia -X = coerce(X, :RAD=>Continuous) -``` -Lets also make a test set: -```julia -(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); -``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -random initial weights of the network. -```julia -builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) -end -``` -Finally, we can define the model! -```julia -NeuralNetworkRegressor = @load NeuralNetworkRegressor - model = NeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) -``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -```julia -pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) -``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` -also see the losses in the output of `report(mach)` -```julia -mach = machine(pipe, X, y) -fit!(mach, verbosity=2) - -# first element initial loss, 2:end per epoch training losses -report(mach).transformed_target_model_deterministic.training_losses - -``` - -## Experimenting with learning rate - -We can visually compare how the learning rate affects the predictions: -```julia -plt = plot() - -rates = 10. .^ (-5:0) - -foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) -end -plt #!md - -savefig(joinpath("assets", "learning_rate.png")) - -pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 -``` - -## Using Iteration Controls - -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. -trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. -`NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. -```julia -# For initializing or clearing the traces: - -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - - # And to update the traces: - -update_loss(loss) = push!(losses, loss) -update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) -``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia -controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), -WithIterationsDo(update_epochs)] - - -iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia -clear() -mach = machine(iterated_pipe, X, y) -fit!(mach) - -plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); -scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - -savefig(joinpath("assets", "loss.png")) -``` - -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an -out-of-sample loss can be tracked and used in the specified stopping criterion, -`NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by -`IteratedModel` (our pipeline model) is retrained on all data for the same number of -iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned -parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia -e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -using Measurements -l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) -@show l1_loss -``` -We take this estimate of the uncertainty of the generalization error with a [grain of -salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the -untouched test set, of the eror of our self-iterating neural network regressor with a -couple of other models trained on the same data (using default hyperparameters): -```julia -function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean -end -performance(iterated_pipe) - -three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - -errs = performance.(three_models) - -(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty -``` - -See also -[`MultitargetNeuralNetworkRegressor`](@ref) -""" -NeuralNetworkRegressor - -""" -$(MMI.doc_header(MultitargetNeuralNetworkRegressor)) - -`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` multi-target, presented as a table, given a table of -`Continuous` features. - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with - mach = machine(model, X, y) - -Where - -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any table of output targets whose element - scitype is `Continuous`; check the scitype with `schema(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural - network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct - your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the - updating of the weights of the network. For further reference, see either the examples - or [the Flux optimiser - documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a - learning rate (the update rate of the optimizer), a good rule of thumb is to start out - at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a - function which can be called in the form `loss(yhat, y)`. Possible loss functions are - listed in [the Flux loss function - documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, - the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents - one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents - the number of samples per update of the networks weights. Typcally, batch size should be - somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, - while larger batch sizes lead towards smoother training loss curves. In general, it is a - good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and - only tune the learning rate. In most literature, batch size is set in powers of twos, - but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be - any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of - 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during - training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting - a machine if the associated optimiser has changed. If true, the associated machine will - retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. - For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. -Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include -`Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - deterministic. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: - -- `chain`: The trained "chain", or series of layers, functions, and activations which - make up the neural network. - - -# Report - -The fields of `report(mach)` are: - -- `training_losses`: The history of training losses, a vector containing the history of - all the losses during training. The first element of the vector is the initial - penalized loss. After the first element, the nth element corresponds to the loss of - epoch n-1. - -# Examples - -In this example we build a regression model using a toy dataset. -```julia -using MLJ -using MLJFlux -using Flux -using Plots -using MLJBase: augment_X -``` -First, we generate some data: -```julia -X = augment_X(randn(10000, 8), true); -θ = randn((9,2)); -y = X * θ; -X = MLJ.table(X) -y = MLJ.table(y) - -schema(y) -schema(X) -``` -Lets also make a test set: -```julia -(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); -``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -```julia -builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) -end -``` -Finally, we can define the model! -```julia -MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor - model = MultitargetNeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) -``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -```julia -pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) -``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` - -```julia -mach = machine(pipe, X, y) -fit!(mach, verbosity=2) - -# first element initial loss, 2:end per epoch training losses -report(mach).transformed_target_model_deterministic.training_losses - -``` - -## Experimenting with learning rate - -We can visually compare how the learning rate affects the predictions: -```julia -plt = plot() - -rates = 10. .^ (-5:0) - -foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) -end -plt #!md - -savefig(joinpath("assets", "learning_rate.png")) - - -pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 - -``` - -## Using Iteration Controls - -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. -```julia -# For initializing or clearing the traces: - -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - -# And to update the traces: - -update_loss(loss) = push!(losses, loss) -update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) -``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia -controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), -WithIterationsDo(update_epochs)] - -iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia -clear() -mach = machine(iterated_pipe, X, y) -fit!(mach) - -plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); -scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - -savefig(joinpath("assets", "loss.png")) -``` - -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia -e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -using Measurements -l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) -@show l1_loss -``` -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): -```julia - -function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean -end -performance(iterated_pipe) - -three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - -errs = performance.(three_models) - -(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty - - -``` -See also -[`NeuralNetworkRegressor`](@ref) -""" -MultitargetNeuralNetworkRegressor -""" -$(MMI.doc_header(NeuralNetworkClassifier)) - -`NeuralNetworkClassifier`: a neural network model for making probabilistic predictions -of a Multiclass or OrderedFactor target, given a table of Continuous features. ) - TODO: - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with - mach = machine(model, X, y) - -Where - -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; - check the scitype with `scitype(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - - `Flux.binary_focal_loss`: Binary version of the above -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a classification task, `softmax` is used for multiclass, single label regression, `sigmoid` is used for either binary classification or multi label classification (when there are multiple possible labels for a given sample). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. -- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions - returned above. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: - -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -# Report - -The fields of `report(mach)` are: - -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - -# Examples - -In this example we build a classification model using the Iris dataset. -```julia -using MLJ -using Flux -import RDatasets - -using Random -Random.seed!(123) - -MLJ.color_off() - -using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); -``` -This is a very basic example, using a default builder and no standardization. -For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: -```julia -iris = RDatasets.dataset("datasets", "iris"); -y, X = unpack(iris, ==(:Species), colname -> true, rng=123); -NeuralNetworkClassifier = @load NeuralNetworkClassifier -clf = NeuralNetworkClassifier() -``` -Next, we can train the model: -```julia -import Random.seed!; seed!(123) -mach = machine(clf, X, y) -fit!(mach) -``` -We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: -```julia -clf.optimiser.eta = clf.optimiser.eta * 2 -clf.epochs = clf.epochs + 5 - -# note that if the optimizer_changes_trigger_retraining flag was set to true -# the model would be completely retrained from scratch because the optimizer was -# updated -fit!(mach, verbosity=2); -``` -We can inspect the mean training loss using the `cross_entropy` function: -```julia - -training_loss = cross_entropy(predict(mach, X), y) |> mean - -``` -And we can access the Flux chain (model) using `fitted_params`: -```julia -chain = fitted_params(mach).chain -``` -Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function -```julia -r = range(clf, :epochs, lower=1, upper=200, scale=:log10) -curve = learning_curve(clf, X, y, - range=r, - resampling=Holdout(fraction_train=0.7), - measure=cross_entropy) -using Plots -plot(curve.parameter_values, - curve.measurements, - xlab=curve.parameter_name, - xscale=curve.parameter_scale, - ylab = "Cross Entropy") - -savefig("iris_history.png") -``` -See also -[`ImageClassifier`](@ref) -""" -NeuralNetworkClassifier -""" -$(MMI.doc_header(ImageClassifier)) - -`ImageClassifier`: A neural network model for making probabilistic -"predictions of a `GrayImage` target, given a table of `Continuous` features. - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with -mach = machine(model, X, y) -Where -- `X`: is any `AbstractVector` of input features (eg, a `DataFrame`) whose items - are of scitype `GrayImage`; check the scitype with `scitype(X)` -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; - check the scitype with `scitype(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - - `Flux.binary_focal_loss`: Binary version of the above -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. -- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions - returned above. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -# Report - -The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - -# Examples - -In this example we use MLJ to classify the MNIST image dataset -```julia -using MLJ -using Flux -import MLJFlux -import MLJIteration # for `skip` - -MLJ.color_off() - -using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); -``` -First we want to download the MNIST dataset, and unpack into images and labels -```julia -import MLDatasets: MNIST - -ENV["DATADEPS_ALWAYS_ACCEPT"] = true -images, labels = MNIST.traindata(); -``` -In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` [scientific type](https://juliaai.github.io/ScientificTypes.jl/dev/). For more in this, see [Working with Categorical Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/): -```julia -labels = coerce(labels, Multiclass); -images = coerce(images, GrayImage); - -# Checking scientific types: - -@assert scitype(images) <: AbstractVector{<:Image} -@assert scitype(labels) <: AbstractVector{<:Finite} - -images[1] -``` -For general instructions on coercing image data, see [type coercion for image data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/%23Type-coercion-for-image-data-1) -We start by defining a suitable `builder` object. This is a recipe -for building the neural network. Our builder will work for images of -any (constant) size, whether they be color or black and white (ie, -single or multi-channel). The architecture always consists of six -alternating convolution and max-pool layers, and a final dense -layer; the filter size and the number of channels after each -convolution layer is customisable. -```julia -import MLJFlux - -struct MyConvBuilder - filter_size::Int - channels1::Int - channels2::Int - channels3::Int -end - -make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) - -function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) - k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3 - mod(k, 2) == 1 || error("`filter_size` must be odd. ") - p = div(k - 1, 2) # padding to preserve image size - init = Flux.glorot_uniform(rng) - front = Chain( - Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init), - MaxPool((2, 2)), - Conv((k, k), c1 => c2, pad=(p, p), relu, init=init), - MaxPool((2, 2)), - Conv((k, k), c2 => c3, pad=(p, p), relu, init=init), - MaxPool((2 ,2)), - make2d) - d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first - return Chain(front, Dense(d, n_out, init=init)) -end -``` -It is important to note that in our `build` function, there is no final softmax. This is applie by default in all MLJFlux classifiers, using the `finaliser` hyperparameter of the classifier. Now that we have our builder defined, we can define the actual moel. If you have a GPU, you can substitute in `acceleration=CudaLibs()` below. Note that in the case of convolutions, this will **greatly** increase the speed of training. -```julia -ImageClassifier = @load ImageClassifier -clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), - batch_size=50, - epochs=10, - rng=123) -``` -You can add flux options such as `optimiser` and `loss` in the snippet above. Currently, `loss` must be a flux-compatible loss, and not an MLJ measure. -Next, we can bind the model with the data in a machine, and fit the first 500 or so images: -```julia -mach = machine(clf, images, labels); - -fit!(mach, rows=1:500, verbosity=2); - -report(mach) - -chain = fitted_params(mach) - -Flux.params(chain)[2] -``` -We can tack on 20 more epochs by modifying the `epochs` field, and iteratively fit some more: -```julia -clf.epochs = clf.epochs + 20 -fit!(mach, rows=1:500); -``` -We can also make predictions and calculate an out-of-sample loss estimate, in two ways! -```julia -predicted_labels = predict(mach, rows=501:1000); -cross_entropy(predicted_labels, labels[501:1000]) |> mean -# alternative one liner! -evaluate!(mach, - resampling=Holdout(fraction_train=0.5), - measure=cross_entropy, - rows=1:1000, - verbosity=0) -``` - -## Wrapping in iteration controls - -Any iterative MLJFlux model can be wrapped in **iteration controls**, as we demonstrate next. For more on MLJ's `IteratedModel` wrapper, see the [MLJ documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/). -The "self-iterating" classifier (`iterated_clf` below) is for iterating the image classifier defined above until a stopping criterion is hit. We use the following stopping criterion: -- `Patience(3)`: 3 consecutive increases in the loss -- `InvalidValue()`: an out-of-sample loss or a training loss that is `NaN` or `±Inf` -- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes. -We can specify how often these checks (and other controls) are applied using the `Step` control. Additionally, we can define controls to -- save a snapshot of the machine every N control cycles (`save_control`) -- record traces of the out-of-sample loss and training losses for plotting (`WithLossDo`) -- record mean value traces of each Flux parameter for plotting (`Callback`) -And other controls. For a full list, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). -First, we define some helper functions and some empty vectors to store traces: -```julia -make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) -make1d(x::AbstractArray) = reshape(x, length(x)); - -# to extract the flux parameters from a machine -parameters(mach) = make1d.(Flux.params(fitted_params(mach))); - -# trace storage -losses = [] -training_losses = [] -parameter_means = Float32[]; -epochs = [] - -# to update traces -update_loss(loss) = push!(losses, loss) -update_training_loss(losses) = push!(training_losses, losses[end]) -update_means(mach) = append!(parameter_means, mean.(parameters(mach))); -update_epochs(epoch) = push!(epochs, epoch) -``` -Next, we can define our controls! We store them in a simple vector: -```julia -save_control = - MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) - -controls=[Step(2), - Patience(3), - InvalidValue(), - TimeLimit(5/60), - save_control, - WithLossDo(), - WithLossDo(update_loss), - WithTrainingLossesDo(update_training_loss), - Callback(update_means), - WithIterationsDo(update_epochs) -``` -Once the controls are defined, we can instantiate and fit our "self-iterating" classifier: -```julia -iterated_clf = IteratedModel(model=clf, - controls=controls, - resampling=Holdout(fraction_train=0.7), - measure=log_loss) - -mach = machine(iterated_clf, images, labels); -fit!(mach, rows=1:500); -``` -Next we can compare the training and out-of-sample losses, as well as view the evolution of the weights: -```julia -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") - -savefig(joinpath(DIR, "loss.png")) - -n_epochs = length(losses) -n_parameters = div(length(parameter_means), n_epochs) -parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)' -plot(epochs, parameter_means2, - title="Flux parameter mean weights", - xlab = "epoch") -# **Note.** The the higher the number, the deeper the chain parameter. -savefig(joinpath(DIR, "weights.png")) -``` -Since we saved our model every few epochs, we can retrieve the snapshots so we can make predictions! -```julia -mach2 = machine(joinpath(DIR, "mnist3.jlso")) -predict_mode(mach2, images[501:503]) -``` - -## Resuming training - -If we change `iterated_clf.controls` or `clf.epochs`, we can resume training from where it left off. This is very useful for long-running training sessions, where you may be interrupted by for example a bad connection or computer hibernation. -```julia -iterated_clf.controls[2] = Patience(4) -fit!(mach, rows=1:500) - -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") -``` -See also -[`NeuralNetworkClassifier`](@ref) -""" -ImageClassifier end #module diff --git a/src/classifier.jl b/src/classifier.jl index 82d4efc9..2825dff7 100644 --- a/src/classifier.jl +++ b/src/classifier.jl @@ -31,8 +31,4 @@ end MLJModelInterface.metadata_model(NeuralNetworkClassifier, input=Table(Continuous), target=AbstractVector{<:Finite}, - path="MLJFlux.NeuralNetworkClassifier", - descr="A neural network model for making "* - "probabilistic predictions of a "* - "`Multiclass` or `OrderedFactor` target, "* - "given a table of `Continuous` features. ") + path="MLJFlux.NeuralNetworkClassifier") diff --git a/src/image.jl b/src/image.jl index 5c973eb7..dc8d5637 100644 --- a/src/image.jl +++ b/src/image.jl @@ -29,7 +29,4 @@ end MLJModelInterface.metadata_model(ImageClassifier, input=AbstractVector{<:MLJModelInterface.Image}, target=AbstractVector{<:Multiclass}, - path="MLJFlux.ImageClassifier", - descr="A neural network model for making probabilistic "* - "predictions of a `GrayImage` target, "* - "given a table of `Continuous` features. ") + path="MLJFlux.ImageClassifier") diff --git a/src/regressor.jl b/src/regressor.jl index f932bff7..85a431aa 100644 --- a/src/regressor.jl +++ b/src/regressor.jl @@ -23,11 +23,7 @@ end MLJModelInterface.metadata_model(NeuralNetworkRegressor, input=Table(Continuous), target=AbstractVector{<:Continuous}, - path="MLJFlux.NeuralNetworkRegressor", - descr="A neural network model for making "* - "deterministic predictions of a "* - "`Continuous` target, given a table of "* - "`Continuous` features. ") + path="MLJFlux.NeuralNetworkRegressor") # # MULTITARGET NEURAL NETWORK REGRESSOR @@ -59,9 +55,4 @@ end MLJModelInterface.metadata_model(MultitargetNeuralNetworkRegressor, input=Table(Continuous), target=Table(Continuous), - path="MLJFlux.MultitargetNeuralNetworkRegressor", - descr = "A neural network model for making "* - "deterministic predictions of a "* - "`Continuous` multi-target, presented "* - "as a table, given a table of "* - "`Continuous` features. ") + path="MLJFlux.MultitargetNeuralNetworkRegressor") diff --git a/src/types.jl b/src/types.jl index bf5674af..16c3c295 100644 --- a/src/types.jl +++ b/src/types.jl @@ -3,51 +3,6 @@ abstract type MLJFluxDeterministic <: MLJModelInterface.Deterministic end const MLJFluxModel = Union{MLJFluxProbabilistic,MLJFluxDeterministic} -const doc_regressor(model_name) = """ - - $model_name(; hyparameters...) - -Instantiate an MLJFlux model. Available hyperparameters: - -- `builder`: Default = `MLJFlux.Linear(σ=Flux.relu)` (regressors) or - `MLJFlux.Short(n_hidden=0, dropout=0.5, σ=Flux.σ)` (classifiers) - -- `optimiser`: The optimiser to use for training. Default = - `Flux.ADAM()` - -- `loss`: The loss function used for training. Default = `Flux.mse` - (regressors) and `Flux.crossentropy` (classifiers) - -- `epochs`: Number of epochs to train for. Default = `10` - -- `batch_size`: The batch_size for the data. Default = 1 - -- `lambda`: The regularization strength. Default = 0. Range = [0, ∞) - -- `alpha`: The L2/L1 mix of regularization. Default = 0. Range = [0, 1] - -- `rng`: The random number generator (RNG) passed to builders, for - weight intitialization, for example. Can be any `AbstractRNG` or - the seed (integer) for a `MersenneTwister` that is reset on every - cold restart of model (machine) training. Default = - `GLOBAL_RNG`. - -- `acceleration`: Use `CUDALibs()` for training on GPU; default is `CPU1()`. - -- `optimiser_changes_trigger_retraining`: True if fitting an - associated machine should trigger retraining from scratch whenever - the optimiser changes. Default = `false` - -""" - -doc_classifier(model_name) = doc_regressor(model_name)*""" -- `finaliser`: Operation applied to the unnormalized output of the - final layer to obtain probabilities (outputs summing to - one). The shape of the inputs and outputs - of this operator must match. Default = `Flux.softmax`. - -""" - for Model in [:NeuralNetworkClassifier, :ImageClassifier] ex = quote @@ -97,13 +52,416 @@ for Model in [:NeuralNetworkClassifier, :ImageClassifier] return model end - @doc doc_classifier($Model) $Model - end eval(ex) end +""" +$(MMI.doc_header(NeuralNetworkClassifier)) + +`NeuralNetworkClassifier`: a neural network model for making probabilistic predictions +of a Multiclass or OrderedFactor target, given a table of Continuous features. ) + TODO: + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; + check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: + - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + - `Flux.binary_focal_loss`: Binary version of the above +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a classification task, `softmax` is used for multiclass, single label regression, `sigmoid` is used for either binary classification or multi label classification (when there are multiple possible labels for a given sample). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we build a classification model using the Iris dataset. +```julia +using MLJ +using Flux +import RDatasets + +using Random +Random.seed!(123) + +MLJ.color_off() + +using Plots +pyplot(size=(600, 300*(sqrt(5)-1))); +``` +This is a very basic example, using a default builder and no standardization. +For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: +```julia +iris = RDatasets.dataset("datasets", "iris"); +y, X = unpack(iris, ==(:Species), colname -> true, rng=123); +NeuralNetworkClassifier = @load NeuralNetworkClassifier +clf = NeuralNetworkClassifier() +``` +Next, we can train the model: +```julia +import Random.seed!; seed!(123) +mach = machine(clf, X, y) +fit!(mach) +``` +We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: +```julia +clf.optimiser.eta = clf.optimiser.eta * 2 +clf.epochs = clf.epochs + 5 + +# note that if the optimizer_changes_trigger_retraining flag was set to true +# the model would be completely retrained from scratch because the optimizer was +# updated +fit!(mach, verbosity=2); +``` +We can inspect the mean training loss using the `cross_entropy` function: +```julia + +training_loss = cross_entropy(predict(mach, X), y) |> mean + +``` +And we can access the Flux chain (model) using `fitted_params`: +```julia +chain = fitted_params(mach).chain +``` +Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function +```julia +r = range(clf, :epochs, lower=1, upper=200, scale=:log10) +curve = learning_curve(clf, X, y, + range=r, + resampling=Holdout(fraction_train=0.7), + measure=cross_entropy) +using Plots +plot(curve.parameter_values, + curve.measurements, + xlab=curve.parameter_name, + xscale=curve.parameter_scale, + ylab = "Cross Entropy") + +savefig("iris_history.png") +``` +See also +[`ImageClassifier`](@ref) +""" +NeuralNetworkClassifier + +""" +$(MMI.doc_header(ImageClassifier)) + +`ImageClassifier`: A neural network model for making probabilistic +"predictions of a `GrayImage` target, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with +mach = machine(model, X, y) +Where +- `X`: is any `AbstractVector` of input features (eg, a `DataFrame`) whose items + are of scitype `GrayImage`; check the scitype with `scitype(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; + check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: + - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. + - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. + - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. + - `Flux.binary_focal_loss`: Binary version of the above +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + probabilistic. +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions + returned above. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: +- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + + +# Report + +The fields of `report(mach)` are: +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + +# Examples + +In this example we use MLJ to classify the MNIST image dataset +```julia +using MLJ +using Flux +import MLJFlux +import MLJIteration # for `skip` + +MLJ.color_off() + +using Plots +pyplot(size=(600, 300*(sqrt(5)-1))); +``` +First we want to download the MNIST dataset, and unpack into images and labels +```julia +import MLDatasets: MNIST + +ENV["DATADEPS_ALWAYS_ACCEPT"] = true +images, labels = MNIST.traindata(); +``` +In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` [scientific type](https://juliaai.github.io/ScientificTypes.jl/dev/). For more in this, see [Working with Categorical Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/): +```julia +labels = coerce(labels, Multiclass); +images = coerce(images, GrayImage); + +# Checking scientific types: + +@assert scitype(images) <: AbstractVector{<:Image} +@assert scitype(labels) <: AbstractVector{<:Finite} + +images[1] +``` +For general instructions on coercing image data, see [type coercion for image data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/%23Type-coercion-for-image-data-1) +We start by defining a suitable `builder` object. This is a recipe +for building the neural network. Our builder will work for images of +any (constant) size, whether they be color or black and white (ie, +single or multi-channel). The architecture always consists of six +alternating convolution and max-pool layers, and a final dense +layer; the filter size and the number of channels after each +convolution layer is customisable. +```julia +import MLJFlux + +struct MyConvBuilder + filter_size::Int + channels1::Int + channels2::Int + channels3::Int +end + +make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) + +function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) + k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3 + mod(k, 2) == 1 || error("`filter_size` must be odd. ") + p = div(k - 1, 2) # padding to preserve image size + init = Flux.glorot_uniform(rng) + front = Chain( + Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init), + MaxPool((2, 2)), + Conv((k, k), c1 => c2, pad=(p, p), relu, init=init), + MaxPool((2, 2)), + Conv((k, k), c2 => c3, pad=(p, p), relu, init=init), + MaxPool((2 ,2)), + make2d) + d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first + return Chain(front, Dense(d, n_out, init=init)) +end +``` +It is important to note that in our `build` function, there is no final softmax. This is applie by default in all MLJFlux classifiers, using the `finaliser` hyperparameter of the classifier. Now that we have our builder defined, we can define the actual moel. If you have a GPU, you can substitute in `acceleration=CudaLibs()` below. Note that in the case of convolutions, this will **greatly** increase the speed of training. +```julia +ImageClassifier = @load ImageClassifier +clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), + batch_size=50, + epochs=10, + rng=123) +``` +You can add flux options such as `optimiser` and `loss` in the snippet above. Currently, `loss` must be a flux-compatible loss, and not an MLJ measure. +Next, we can bind the model with the data in a machine, and fit the first 500 or so images: +```julia +mach = machine(clf, images, labels); + +fit!(mach, rows=1:500, verbosity=2); + +report(mach) + +chain = fitted_params(mach) + +Flux.params(chain)[2] +``` +We can tack on 20 more epochs by modifying the `epochs` field, and iteratively fit some more: +```julia +clf.epochs = clf.epochs + 20 +fit!(mach, rows=1:500); +``` +We can also make predictions and calculate an out-of-sample loss estimate, in two ways! +```julia +predicted_labels = predict(mach, rows=501:1000); +cross_entropy(predicted_labels, labels[501:1000]) |> mean +# alternative one liner! +evaluate!(mach, + resampling=Holdout(fraction_train=0.5), + measure=cross_entropy, + rows=1:1000, + verbosity=0) +``` + +## Wrapping in iteration controls + +Any iterative MLJFlux model can be wrapped in **iteration controls**, as we demonstrate next. For more on MLJ's `IteratedModel` wrapper, see the [MLJ documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/). +The "self-iterating" classifier (`iterated_clf` below) is for iterating the image classifier defined above until a stopping criterion is hit. We use the following stopping criterion: +- `Patience(3)`: 3 consecutive increases in the loss +- `InvalidValue()`: an out-of-sample loss or a training loss that is `NaN` or `±Inf` +- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes. +We can specify how often these checks (and other controls) are applied using the `Step` control. Additionally, we can define controls to +- save a snapshot of the machine every N control cycles (`save_control`) +- record traces of the out-of-sample loss and training losses for plotting (`WithLossDo`) +- record mean value traces of each Flux parameter for plotting (`Callback`) +And other controls. For a full list, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). +First, we define some helper functions and some empty vectors to store traces: +```julia +make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) +make1d(x::AbstractArray) = reshape(x, length(x)); + +# to extract the flux parameters from a machine +parameters(mach) = make1d.(Flux.params(fitted_params(mach))); + +# trace storage +losses = [] +training_losses = [] +parameter_means = Float32[]; +epochs = [] + +# to update traces +update_loss(loss) = push!(losses, loss) +update_training_loss(losses) = push!(training_losses, losses[end]) +update_means(mach) = append!(parameter_means, mean.(parameters(mach))); +update_epochs(epoch) = push!(epochs, epoch) +``` +Next, we can define our controls! We store them in a simple vector: +```julia +save_control = + MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) + +controls=[Step(2), + Patience(3), + InvalidValue(), + TimeLimit(5/60), + save_control, + WithLossDo(), + WithLossDo(update_loss), + WithTrainingLossesDo(update_training_loss), + Callback(update_means), + WithIterationsDo(update_epochs) +``` +Once the controls are defined, we can instantiate and fit our "self-iterating" classifier: +```julia +iterated_clf = IteratedModel(model=clf, + controls=controls, + resampling=Holdout(fraction_train=0.7), + measure=log_loss) + +mach = machine(iterated_clf, images, labels); +fit!(mach, rows=1:500); +``` +Next we can compare the training and out-of-sample losses, as well as view the evolution of the weights: +```julia +plot(epochs, losses, + xlab = "epoch", + ylab = "root squared error", + label="out-of-sample") +plot!(epochs, training_losses, label="training") + +savefig(joinpath(DIR, "loss.png")) + +n_epochs = length(losses) +n_parameters = div(length(parameter_means), n_epochs) +parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)' +plot(epochs, parameter_means2, + title="Flux parameter mean weights", + xlab = "epoch") +# **Note.** The the higher the number, the deeper the chain parameter. +savefig(joinpath(DIR, "weights.png")) +``` +Since we saved our model every few epochs, we can retrieve the snapshots so we can make predictions! +```julia +mach2 = machine(joinpath(DIR, "mnist3.jlso")) +predict_mode(mach2, images[501:503]) +``` + +## Resuming training + +If we change `iterated_clf.controls` or `clf.epochs`, we can resume training from where it left off. This is very useful for long-running training sessions, where you may be interrupted by for example a bad connection or computer hibernation. +```julia +iterated_clf.controls[2] = Patience(4) +fit!(mach, rows=1:500) + +plot(epochs, losses, + xlab = "epoch", + ylab = "root squared error", + label="out-of-sample") +plot!(epochs, training_losses, label="training") +``` +See also +[`NeuralNetworkClassifier`](@ref) +""" +ImageClassifier + for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] ex = quote @@ -149,12 +507,552 @@ for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] return model end - @doc $doc_regressor($Model) $Model - end eval(ex) end + +""" +$(MMI.doc_header(NeuralNetworkRegressor)) + +`NeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` target, given a table of `Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `Continuous`; check the scitype with `scitype(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. + Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder + using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating + of the weights of the network. For further reference, see either the examples or + [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). + To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to + start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function + which can be called in the form `loss(yhat, y)`. + Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). + For a regression task, the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents + one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents + the number of samples per update of the networks weights. Typcally, batch size should be + somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, + while larger batch sizes lead towards smoother training loss curves. + In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), + and stick with it, and only tune the learning rate. In most examples, batch size is set + in powers of twos, but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value + in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. + A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a + machine if the associated optimiser has changed. If true, the associated machine will + retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. + For training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. + Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include + `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which + make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. + epoch n-1. +# Examples + +In this example we build a regression model using the Boston house price dataset +```julia + using MLJ + using MLJFlux + using Flux + using Plots +``` +First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: +```julia +data = OpenML.load(531); # Loads from https://www.openml.org/d/531 + +y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); + +scitype(y) +schema(X) +``` +Since MLJFlux models do not handle ordered factors, we can treat `:RAD` as `Continuous`: +```julia +X = coerce(X, :RAD=>Continuous) +``` +Lets also make a test set: +```julia +(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) +end +``` +Finally, we can define the model! +```julia +NeuralNetworkRegressor = @load NeuralNetworkRegressor + model = NeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` +also see the losses in the output of `report(mach)` +```julia +mach = machine(pipe, X, y) +fit!(mach, verbosity=2) + +# first element initial loss, 2:end per epoch training losses +report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + +rates = 10. .^ (-5:0) + +foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) +end +plt #!md + +savefig(joinpath("assets", "learning_rate.png")) + +pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. +`NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. +```julia +# For initializing or clearing the traces: + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + + # And to update the traces: + +update_loss(loss) = push!(losses, loss) +update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), +WithIterationsDo(update_epochs)] + + +iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach) + +plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); +scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + +savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an +out-of-sample loss can be tracked and used in the specified stopping criterion, +`NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by +`IteratedModel` (our pipeline model) is retrained on all data for the same number of +iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned +parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia +e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +using Measurements +l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) +@show l1_loss +``` +We take this estimate of the uncertainty of the generalization error with a [grain of +salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the +untouched test set, of the eror of our self-iterating neural network regressor with a +couple of other models trained on the same data (using default hyperparameters): +```julia +function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean +end +performance(iterated_pipe) + +three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + +errs = performance.(three_models) + +(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty +``` + +See also +[`MultitargetNeuralNetworkRegressor`](@ref) +""" +NeuralNetworkRegressor + +""" +$(MMI.doc_header(MultitargetNeuralNetworkRegressor)) + +`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic +predictions of a `Continuous` multi-target, presented as a table, given a table of +`Continuous` features. + +# Training data + +In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) + +Where + +- `X`: is any table of input features (eg, a `DataFrame`) whose columns + are of scitype `Continuous`; check the scitype with `schema(X)` +- `y`: is the target, which can be any table of output targets whose element + scitype is `Continuous`; check the scitype with `schema(y)` + + +# Hyper-parameters + +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural + network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct + your own builder using the `@builder` macro, see examples for further information. +- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the + updating of the weights of the network. For further reference, see either the examples + or [the Flux optimiser + documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a + learning rate (the update rate of the optimizer), a good rule of thumb is to start out + at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a + function which can be called in the form `loss(yhat, y)`. Possible loss functions are + listed in [the Flux loss function + documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, + the most natural loss functions are: + - `Flux.mse` + - `Flux.mae` + - `Flux.msle` + - `Flux.huber_loss` +- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents + one pass through the entirety of the training dataset. +- `batch_size::Int=1`: The batch size to be used for training. The batch size represents + the number of samples per update of the networks weights. Typcally, batch size should be + somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, + while larger batch sizes lead towards smoother training loss curves. In general, it is a + good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and + only tune the learning rate. In most literature, batch size is set in powers of twos, + but this is fairly arbitrary. +- `lambda::Float64=0`: The stregth of the regularization used during training. Can be + any value in the range `[0, ∞)`. +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of + 0 represents L2 regularization, and a value of 1 represents L1 regularization. +- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during + training. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting + a machine if the associated optimiser has changed. If true, the associated machine will + retrain from scratch on `fit`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. + For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network. +Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include +`Flux.sigmoid` and the identity function (otherwise known as "linear activation"). + +# Operations + +- `predict(mach, Xnew)`: return predictions of the target given new + features `Xnew` having the same Scitype as `X` above. Predictions are + deterministic. + + +# Fitted parameters + +The fields of `fitted_params(mach)` are: + +- `chain`: The trained "chain", or series of layers, functions, and activations which + make up the neural network. + + +# Report + +The fields of `report(mach)` are: + +- `training_losses`: The history of training losses, a vector containing the history of + all the losses during training. The first element of the vector is the initial + penalized loss. After the first element, the nth element corresponds to the loss of + epoch n-1. + +# Examples + +In this example we build a regression model using a toy dataset. +```julia +using MLJ +using MLJFlux +using Flux +using Plots +using MLJBase: augment_X +``` +First, we generate some data: +```julia +X = augment_X(randn(10000, 8), true); +θ = randn((9,2)); +y = X * θ; +X = MLJ.table(X) +y = MLJ.table(y) + +schema(y) +schema(X) +``` +Lets also make a test set: +```julia +(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); +``` +Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. +```julia +builder = MLJFlux.@builder begin + init=Flux.glorot_uniform(rng) + Chain(Dense(n_in, 64, relu, init=init), + Dense(64, 32, relu, init=init), + Dense(32, 1, init=init)) +end +``` +Finally, we can define the model! +```julia +MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor + model = MultitargetNeuralNetworkRegressor(builder=builder, + rng=123, + epochs=20) +``` +For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +```julia +pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) +``` +If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +```julia +mach = machine(pipe, X, y) +fit!(mach, verbosity=2) + +# first element initial loss, 2:end per epoch training losses +report(mach).transformed_target_model_deterministic.training_losses + +``` + +## Experimenting with learning rate + +We can visually compare how the learning rate affects the predictions: +```julia +plt = plot() + +rates = 10. .^ (-5:0) + +foreach(rates) do η + pipe.transformed_target_model_deterministic.model.optimiser.eta = η + fit!(mach, force=true, verbosity=0) + losses = + report(mach).transformed_target_model_deterministic.model.training_losses[3:end] + plot!(1:length(losses), losses, label=η) +end +plt #!md + +savefig(joinpath("assets", "learning_rate.png")) + + +pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 + +``` + +## Using Iteration Controls + +We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +```julia +# For initializing or clearing the traces: + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + +# And to update the traces: + +update_loss(loss) = push!(losses, loss) +update_training_loss(report) = + push!(training_losses, + report.transformed_target_model_deterministic.model.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) +``` +For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: +```julia +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), +WithIterationsDo(update_epochs)] + +iterated_pipe = + IteratedModel(model=pipe, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2) +``` +Next, we can clear the traces, fit the model, and plot the traces: +```julia +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach) + +plot(epochs, losses, + xlab = "epoch", + ylab = "mean sum of squares error", + label="out-of-sample", + legend = :topleft); +scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md + +savefig(joinpath("assets", "loss.png")) +``` + +### Brief note on iterated models + +Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. + +## Evaluating Iterated Models + +We can evaluate our model with the `evaluate!` function: +```julia +e = evaluate!(mach, + resampling=CV(nfolds=8), + measures=[l1, l2]) + +using Measurements +l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) +@show l1_loss +``` +We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). + +## Comparison with other models on the test set + +Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): +```julia + +function performance(model) + mach = machine(model, X, y) |> fit! + yhat = predict(mach, Xtest) + l1(yhat, ytest) |> mean +end +performance(iterated_pipe) + +three_models = [(@load EvoTreeRegressor)(), # tree boosting model + (@load LinearRegressor pkg=MLJLinearModels)(), + iterated_pipe] + +errs = performance.(three_models) + +(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty + + +``` +See also +[`NeuralNetworkRegressor`](@ref) +""" +MultitargetNeuralNetworkRegressor + const Regressor = Union{NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor} From a19d93a28b0427d33de18fc13a045585a0db0d73 Mon Sep 17 00:00:00 2001 From: josephsdavid Date: Tue, 12 Jul 2022 16:42:47 -0500 Subject: [PATCH 10/24] git killing me --- nn.md | 245 ------------------------------------------ nnc.md | 128 ---------------------- nnclassif.norg | 148 ------------------------- nnm.md | 247 ------------------------------------------ nnregressor.norg | 273 ----------------------------------------------- 5 files changed, 1041 deletions(-) delete mode 100644 nn.md delete mode 100644 nnc.md delete mode 100644 nnclassif.norg delete mode 100644 nnm.md delete mode 100644 nnregressor.norg diff --git a/nn.md b/nn.md deleted file mode 100644 index 46641a33..00000000 --- a/nn.md +++ /dev/null @@ -1,245 +0,0 @@ -# NeuralNetworkRegressor - -`NeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` target, given a table of `Continuous` features. - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with -mach = machine(model, X, y) -Where -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Continuous`; check the scitype with `scitype(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - deterministic. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -# Report - -The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - -# Examples - -In this example we build a regression model using the Boston house price dataset -```julia - - using MLJ - using MLJFlux - using Flux - using Plots - -``` -First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: -```julia - - data = OpenML.load(531); # Loads from https://www.openml.org/d/531 - - y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); - - scitype(y) - schema(X) - -``` -Since MLJFlux models do not handle ordered factos, we can treat `:RAD` as `Continuous`: -```julia -X = coerce(X, :RAD=>Continuous) -``` -Lets also make a test set: -```julia - - (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); - -``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -```julia -builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) - end -``` -Finally, we can define the model! -```julia - - NeuralNetworkRegressor = @load NeuralNetworkRegressor - model = NeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) -``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -```julia -pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) -``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` - -```julia -mach = machine(pipe, X, y) - fit!(mach, verbosity=2) - - # first element initial loss, 2:end per epoch training losses - report(mach).transformed_target_model_deterministic.training_losses - -``` - -## Experimenting with learning rate - -We can visually compare how the learning rate affects the predictions: -```julia -plt = plot() - - rates = 10. .^ (-5:0) - - foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) - end - plt #!md - - savefig(joinpath("assets", "learning_rate.png")) - - - pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 - -``` - -## Using Iteration Controls - -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. -```julia - - # For initializing or clearing the traces: - - clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing - end - - # And to update the traces: - - update_loss(loss) = push!(losses, loss) - update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) - update_epochs(epoch) = push!(epochs, epoch) - -``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia - - controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), - WithIterationsDo(update_epochs)] - - - iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) - -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia - - - clear() - mach = machine(iterated_pipe, X, y) - fit!(mach) - - plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); - scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - - savefig(joinpath("assets", "loss.png")) -``` - -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia - - e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -#- - - using Measurements - l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) - @show l1_loss - -``` -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): -```julia - - function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean - end - performance(iterated_pipe) - - three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - - errs = performance.(three_models) - - (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty - - -``` diff --git a/nnc.md b/nnc.md deleted file mode 100644 index a14f2ecc..00000000 --- a/nnc.md +++ /dev/null @@ -1,128 +0,0 @@ -# NeuralNetworkClassifier - -`NeuralNetworkClassifier`: -- TODO - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with -mach = machine(model, X, y) -Where -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Finite` with `n_out` classes; check the scitype with `scitype(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - - `Flux.binary_focal_loss`: Binary version of the above -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. -- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions - returned above. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -# Report - -The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - -# Examples - -In this example we build a classification model using the Iris dataset. -```julia - - using MLJ - using Flux - import RDatasets - - using Random - Random.seed!(123) - - MLJ.color_off() - - using Plots - pyplot(size=(600, 300*(sqrt(5)-1))); - -``` -This is a very basic example, using a default builder and no standardization. -For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: -```julia - - iris = RDatasets.dataset("datasets", "iris"); - y, X = unpack(iris, ==(:Species), colname -> true, rng=123); - NeuralNetworkClassifier = @load NeuralNetworkClassifier - clf = NeuralNetworkClassifier() - -``` -Next, we can train the model: -```julia -import Random.seed!; seed!(123) - mach = machine(clf, X, y) - fit!(mach) -``` -We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: -```julia -clf.optimiser.eta = clf.optimiser.eta * 2 - clf.epochs = clf.epochs + 5 - - # note that if the optimizer_changes_trigger_retraining flag was set to true - # the model would be completely retrained from scratch because the optimizer was - # updated - fit!(mach, verbosity=2); -``` -We can inspect the mean training loss using the `cross_entropy` function: -```julia - - training_loss = cross_entropy(predict(mach, X), y) |> mean - -``` -And we can access the Flux chain (model) using `fitted_params`: -```julia -training_loss = cross_entropy(predict(mach, X), y) |> mean -``` -Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function -```julia -r = range(clf, :epochs, lower=1, upper=200, scale=:log10) - curve = learning_curve(clf, X, y, - range=r, - resampling=Holdout(fraction_train=0.7), - measure=cross_entropy) - using Plots - plot(curve.parameter_values, - curve.measurements, - xlab=curve.parameter_name, - xscale=curve.parameter_scale, - ylab = "Cross Entropy") - - savefig("iris_history.png") -``` diff --git a/nnclassif.norg b/nnclassif.norg deleted file mode 100644 index 25ba3847..00000000 --- a/nnclassif.norg +++ /dev/null @@ -1,148 +0,0 @@ -* NeuralNetworkClassifier - - `NeuralNetworkClassifier`: - - [ ] TODO - -* Training data - - In MLJ or MLJBase, bind an instance `model` to data with - - mach = machine(model, X, y) - - Where - - - `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` - - - `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Finite` with `n_out` classes; check the scitype with `scitype(y)` - - -* Hyper-parameters - - - `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. - - `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or {https://fluxml.ai/Flux.jl/stable/training/optimisers/}[the Flux optimiser documentation]. To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. - - `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in {https://fluxml.ai/Flux.jl/stable/models/losses/}[the Flux loss function documentation]. For a classification task, the most natural loss functions are: - -- `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - -- `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - -- `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - -- `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - -- `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - -- `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - -- `Flux.binary_focal_loss`: Binary version of the above - - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. - - `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. - - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. - - `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. - - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. - - `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -* Operations - - - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. - - `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions - returned above. - - - -* Fitted parameters - - The fields of `fitted_params(mach)` are: - - - `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -* Report - - The fields of `report(mach)` are: - - - `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch $n-1$. - -* Examples - - In this example we build a classification model using the Iris dataset. - - @code julia - - using MLJ - using Flux - import RDatasets - - using Random - Random.seed!(123) - - MLJ.color_off() - - using Plots - pyplot(size=(600, 300*(sqrt(5)-1))); - - @end - - This is a very basic example, using a default builder and no standardization. - For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: - - @code julia - - iris = RDatasets.dataset("datasets", "iris"); - y, X = unpack(iris, ==(:Species), colname -> true, rng=123); - NeuralNetworkClassifier = @load NeuralNetworkClassifier - clf = NeuralNetworkClassifier() - - @end - - Next, we can train the model: - @code julia - import Random.seed!; seed!(123) - mach = machine(clf, X, y) - fit!(mach) - @end - - We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: - - @code julia - clf.optimiser.eta = clf.optimiser.eta * 2 - clf.epochs = clf.epochs + 5 - - # note that if the optimizer_changes_trigger_retraining flag was set to true - # the model would be completely retrained from scratch because the optimizer was - # updated - fit!(mach, verbosity=2); - @end - - We can inspect the mean training loss using the `cross_entropy` function: - - @code julia - - training_loss = cross_entropy(predict(mach, X), y) |> mean - - @end - - And we can access the Flux chain (model) using `fitted_params`: - - @code julia - training_loss = cross_entropy(predict(mach, X), y) |> mean - @end - - Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function - - @code julia - r = range(clf, :epochs, lower=1, upper=200, scale=:log10) - curve = learning_curve(clf, X, y, - range=r, - resampling=Holdout(fraction_train=0.7), - measure=cross_entropy) - using Plots - plot(curve.parameter_values, - curve.measurements, - xlab=curve.parameter_name, - xscale=curve.parameter_scale, - ylab = "Cross Entropy") - - savefig("iris_history.png") - @end - diff --git a/nnm.md b/nnm.md deleted file mode 100644 index 5c0234dc..00000000 --- a/nnm.md +++ /dev/null @@ -1,247 +0,0 @@ -# MultitargetNeuralNetworkRegressor - -`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. - -# Training data - -In MLJ or MLJBase, bind an instance `model` to data with -mach = machine(model, X, y) -Where -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` -- `y`: is the target, which can be any table of output targets whose element - scitype is `Continuous`; check the scitype with `schema(y)` - - -# Hyper-parameters - -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -# Operations - -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - deterministic. - - -# Fitted parameters - -The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -# Report - -The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - -# Examples - -In this example we build a regression model using the Boston house price dataset -```julia - - using MLJ - using MLJFlux - using Flux - using Plots - using MLJBase: augment_X - -``` -First, we generate some data: -```julia - - X = augment_X(randn(10000, 8), true); - θ = randn((9,2)); - y = X * θ; - X = MLJ.table(X) - y = MLJ.table(y) - - - - - schema(y) - schema(X) - -``` -Lets also make a test set: -```julia - - (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); - -``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -```julia -builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) - end -``` -Finally, we can define the model! -```julia - - MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor - model = MultitargetNeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) -``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -```julia -pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) -``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` - -```julia -mach = machine(pipe, X, y) - fit!(mach, verbosity=2) - - # first element initial loss, 2:end per epoch training losses - report(mach).transformed_target_model_deterministic.training_losses - -``` - -## Experimenting with learning rate - -We can visually compare how the learning rate affects the predictions: -```julia -plt = plot() - - rates = 10. .^ (-5:0) - - foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) - end - plt #!md - - savefig(joinpath("assets", "learning_rate.png")) - - - pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 - -``` - -## Using Iteration Controls - -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. -```julia - - # For initializing or clearing the traces: - - clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing - end - - # And to update the traces: - - update_loss(loss) = push!(losses, loss) - update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) - update_epochs(epoch) = push!(epochs, epoch) - -``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia - - controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), - WithIterationsDo(update_epochs)] - - - iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) - -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia - - - clear() - mach = machine(iterated_pipe, X, y) - fit!(mach) - - plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); - scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - - savefig(joinpath("assets", "loss.png")) -``` - -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia - - e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -#- - - using Measurements - l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) - @show l1_loss - -``` -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): -```julia - - function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean - end - performance(iterated_pipe) - - three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - - errs = performance.(three_models) - - (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty - - -``` diff --git a/nnregressor.norg b/nnregressor.norg deleted file mode 100644 index cdeb2277..00000000 --- a/nnregressor.norg +++ /dev/null @@ -1,273 +0,0 @@ -* MultitargetNeuralNetworkRegressor - - `MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic - predictions of a `Continuous` multi-target, presented as a table, given a table of `Continuous` features. - -* Training data - - In MLJ or MLJBase, bind an instance `model` to data with - - mach = machine(model, X, y) - - Where - - - `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` - - - `y`: is the target, which can be any table of output targets whose element - scitype is `Continuous`; check the scitype with `schema(y)` - - -* Hyper-parameters - - - `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. - - `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or {https://fluxml.ai/Flux.jl/stable/training/optimisers/}[the Flux optimiser documentation]. To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. - - `loss=Flux.mse`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in {https://fluxml.ai/Flux.jl/stable/models/losses/}[the Flux loss function documentation]. For a regression task, the most natural loss functions are: - -- `Flux.mse` - -- `Flux.mae` - -- `Flux.msle` - -- `Flux.huber_loss` - - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. - - `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. - - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. - - `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. - - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. - - `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). - - -* Operations - - - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - deterministic. - - -* Fitted parameters - - The fields of `fitted_params(mach)` are: - - - `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. - - -* Report - - The fields of `report(mach)` are: - - - `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch $n-1$. - -* Examples - -In this example we build a regression model using the Boston house price dataset - - @code julia - - using MLJ - using MLJFlux - using Flux - using Plots - using MLJBase: augment_X - - @end - - First, we generate some data: - - @code julia - - X = augment_X(randn(10000, 8), true); - θ = randn((9,2)); - y = X * θ; - X = MLJ.table(X) - y = MLJ.table(y) - - - - - schema(y) - schema(X) - - @end - - Lets also make a test set: - - @code julia - - (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); - - @end - - Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. - - @code julia - builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) - end - @end - - Finally, we can define the model! - - @code julia - - MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor - model = MultitargetNeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) - @end - - For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have {https://www.informit.com/articles/article.aspx?p=3131594&seqNum=2}[saturated neurons] and not train well. Therefore, standardization is key! - - @code julia - pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) - @end - - If we fit with a high verbosity ($>1$), we will see the losses during training. We can also see the losses in the output of `report(mach)` - - - @code julia - mach = machine(pipe, X, y) - fit!(mach, verbosity=2) - - # first element initial loss, 2:end per epoch training losses - report(mach).transformed_target_model_deterministic.training_losses - - @end - -** Experimenting with learning rate - - We can visually compare how the learning rate affects the predictions: - - @code julia - plt = plot() - - rates = 10. .^ (-5:0) - - foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) - end - plt #!md - - savefig(joinpath("assets", "learning_rate.png")) - - - pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 - - @end - -** Using Iteration Controls - We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. - - @code julia - - # For initializing or clearing the traces: - - clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing - end - - # And to update the traces: - - update_loss(loss) = push!(losses, loss) - update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) - update_epochs(epoch) = push!(epochs, epoch) - - @end - - For further reference of controls, see {https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided}[the documentation]. To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: - - @code julia - - controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), - WithIterationsDo(update_epochs)] - - - iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) - - @end - - Next, we can clear the traces, fit the model, and plot the traces: - - @code julia - - - clear() - mach = machine(iterated_pipe, X, y) - fit!(mach) - - plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); - scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - - savefig(joinpath("assets", "loss.png")) - @end - -*** Brief note on iterated models - Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. - -** Evaluating Iterated Models - We can evaluate our model with the `evaluate!` function: - - @code julia - - e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -#- - - using Measurements - l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) - @show l1_loss - - @end - -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -** Comparison with other models on the test set - - Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): - - @code julia - - function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean - end - performance(iterated_pipe) - - three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - - errs = performance.(three_models) - - (models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty - - - @end - From 8aaa88f756d541dd77d77f5d1ce5a1adb61e3050 Mon Sep 17 00:00:00 2001 From: josephsdavid Date: Tue, 12 Jul 2022 16:44:17 -0500 Subject: [PATCH 11/24] update to fix CI --- src/MLJFlux.jl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index d2e63add..981bc4d4 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -14,6 +14,8 @@ using ColorTypes using ComputationalResources using Random +const MMI=MLJModelInterface + include("penalizers.jl") include("core.jl") include("builders.jl") @@ -24,7 +26,7 @@ include("image.jl") include("mlj_model_interface.jl") ### Package specific model traits: -MLJModelInterface.metadata_pkg.((NeuralNetworkRegressor, +MMI.metadata_pkg.((NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor, NeuralNetworkClassifier, ImageClassifier), From 1550b8fbda27a557f094a220b7e2abafe204b066 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 14 Jul 2022 00:52:41 +0000 Subject: [PATCH 12/24] add MLJFlux.make_images oops --- src/MLJFlux.jl | 4 ++-- src/utilities.jl | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 src/utilities.jl diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index d3a88064..b0d779a4 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -1,7 +1,5 @@ module MLJFlux -export CUDALibs, CPU1 - import Flux using MLJModelInterface using MLJModelInterface.ScientificTypesBase @@ -15,6 +13,7 @@ using ComputationalResources using Random import Metalhead +include("utilities.jl") include("penalizers.jl") include("core.jl") include("builders.jl") @@ -38,5 +37,6 @@ MLJModelInterface.metadata_pkg.((NeuralNetworkRegressor, export NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor export NeuralNetworkClassifier, ImageClassifier +export CUDALibs, CPU1 end #module diff --git a/src/utilities.jl b/src/utilities.jl new file mode 100644 index 00000000..88573d82 --- /dev/null +++ b/src/utilities.jl @@ -0,0 +1,44 @@ +# # IMAGE COERCION + +# Taken from ScientificTypes.jl to avoid as dependency. + +_4Dcollection = AbstractArray{<:Real, 4} + +function coerce(y::_4Dcollection, T2::Type{GrayImage}) + size(y, 3) == 1 || error("Multiple color channels encountered. "* + "Perhaps you want to use `coerce(image_collection, ColorImage)`.") + y = dropdims(y, dims=3) + return [ColorTypes.Gray.(y[:,:,idx]) for idx=1:size(y,3)] +end + +function coerce(y::_4Dcollection, T2::Type{ColorImage}) + return [broadcast(ColorTypes.RGB, y[:,:,1, idx], y[:,:,2,idx], y[:,:,3, idx]) for idx=1:size(y,4)] +end + + +# # SYNTHETIC IMAGES + +""" + make_images(rng; image_size=(6, 6), n_classes=33, n_images=50, color=false, noise=0.05) + +Return synthetic data of the form `(images, labels)` suitable for use +with MLJ's `ImageClassifier` model. All `images` are distortions of +`n_classes` fixed images. Two images with the same label correspond to +the same undistorted image. + +""" +function make_images(rng; image_size=(6, 6), n_classes=33, n_images=50, color=false, noise=0.05) + n_channels = color ? 3 : 1 + image_bag = map(1:n_classes) do _ + rand(rng, Float32, image_size..., n_channels) + end + labels = rand(rng, 1:3, n_images) + images = map(labels) do j + image_bag[j] + noise*rand(rng, Float32, image_size..., n_channels) + end + T = color ? ColorImage : GrayImage + X = coerce(cat(images...; dims=4), T) + y = categorical(labels) + return X, y +end + From c29b733c3b1757d72cf0c20bc344e6d8e9b59b15 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 14 Jul 2022 00:54:44 +0000 Subject: [PATCH 13/24] image testing improvements --- src/metalhead.jl | 7 ++++--- src/types.jl | 3 ++- test/image.jl | 48 ++++++++++++++++++++++-------------------------- 3 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/metalhead.jl b/src/metalhead.jl index f42bb514..48d2eaa0 100644 --- a/src/metalhead.jl +++ b/src/metalhead.jl @@ -115,21 +115,22 @@ MLJFlux.build( # See above "TODO" list. function VGGHack( depth::Integer=16; - imsize=nothing, + imsize=(242,242), inchannels=3, nclasses=1000, batchnorm=false, pretrain=false, ) - # Note `imsize` is ignored, as here: + # Adapted from # https://github.com/FluxML/Metalhead.jl/blob/9edff63222720ff84671b8087dd71eb370a6c35a/src/convnets/vgg.jl#L165 + # But we do not ignore `imsize`. @assert( depth in keys(Metalhead.vgg_config), "depth must be from one in $(sort(collect(keys(Metalhead.vgg_config))))" ) - model = Metalhead.VGG((224, 224); + model = Metalhead.VGG(imsize; config = Metalhead.vgg_conv_config[Metalhead.vgg_config[depth]], inchannels, batchnorm, diff --git a/src/types.jl b/src/types.jl index 7d3166a0..df160c56 100644 --- a/src/types.jl +++ b/src/types.jl @@ -50,7 +50,8 @@ doc_classifier(model_name) = doc_regressor(model_name)*""" for Model in [:NeuralNetworkClassifier, :ImageClassifier] - default_builder_ex = Model == :ImageClassifier ? :(image_builder(VGGHack)) : Short() + default_builder_ex = + Model == :ImageClassifier ? :(image_builder(VGGHack)) : Short() ex = quote mutable struct $Model{B,F,O,L} <: MLJFluxProbabilistic diff --git a/test/image.jl b/test/image.jl index 48fb4fd3..fd038472 100644 --- a/test/image.jl +++ b/test/image.jl @@ -1,21 +1,3 @@ -# # HELPERS - -function make_images(rng; n_classes=33, n_images=50, color=false, noise=0.05) - n_channels = color ? 3 : 1 - image_bag = map(1:n_classes) do _ - rand(stable_rng, Float32, 6, 6, n_channels) - end - labels = rand(stable_rng, 1:3, n_images) - images = map(labels) do j - image_bag[j] + noise*rand(stable_rng, Float32, 6, 6, n_channels) - end - T = color ? ColorImage : GrayImage - X = coerce(cat(images...; dims=4), T) - y = coerce(labels, Multiclass) - return X, y -end - - # # BASIC IMAGE TESTS GREY Random.seed!(123) @@ -36,7 +18,7 @@ function MLJFlux.build(model::MyNeuralNetwork, rng, ip, op, n_channels) end builder = MyNeuralNetwork((2,2), (2,2)) -images, labels = make_images(stable_rng) +images, labels = MLJFlux.make_images(stable_rng) losses = [] @testset_accelerated "ImageClassifier basic tests" accel begin @@ -85,10 +67,12 @@ reference = losses[1] @test all(x->abs(x - reference)/reference < 5e-4, losses[2:end]) -## BASIC IMAGE TESTS COLOR +# # BASIC IMAGE TESTS COLOR + +# In this case we use the default ResNet builder builder = MyNeuralNetwork((2,2), (2,2)) -images, labels = make_images(stable_rng, color=true) +images, labels = MLJFlux.make_images(stable_rng, color=true) losses = [] @testset_accelerated "ColorImages" accel begin @@ -100,20 +84,18 @@ losses = [] epochs=10, acceleration=accel, rng=stable_rng) - # tests update logic, etc (see test_utililites.jl): @test basictest(MLJFlux.ImageClassifier, images, labels, model.builder, model.optimiser, 0.95, accel) - @time fitresult, cache, _report = MLJBase.fit(model, 0, images, labels) + @time fitresult, cache, _report = MLJBase.fit(model, 0, images, labels); pred = MLJBase.predict(model, fitresult, images[1:6]) first_last_training_loss = _report[1][[1, end]] push!(losses, first_last_training_loss[2]) -# @show first_last_training_loss # try with batch_size > 1: - model = MLJFlux.ImageClassifier(builder=builder, - epochs=10, + model = MLJFlux.ImageClassifier(epochs=10, + builder=builder, batch_size=2, acceleration=accel, rng=stable_rng) @@ -129,4 +111,18 @@ reference = losses[1] @info "Losses for each computational resource: $losses" @test all(x->abs(x - reference)/reference < 1e-5, losses[2:end]) + +# # SMOKE TEST FOR DEFAULT BUILDER + +images, labels = MLJFlux.make_images(stable_rng, image_size=(32, 32), n_images=12, noise=0.2, color=true); + +@testset_accelerated "ImageClassifier basic tests" accel begin + model = MLJFlux.ImageClassifier(epochs=10, + batch_size=4, + acceleration=accel, + rng=stable_rng) + fitresult, _, _ = MLJBase.fit(model, 0, images, labels); + predict(model, fitresult, images) +end + true From 677e1f0f32d7d0ee255b551d4a6f1ad13347690d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 14 Jul 2022 01:16:43 +0000 Subject: [PATCH 14/24] add catches in fit for builders or built chains incompatible with data --- src/mlj_model_interface.jl | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/mlj_model_interface.jl b/src/mlj_model_interface.jl index bfac2987..1f2e09d4 100644 --- a/src/mlj_model_interface.jl +++ b/src/mlj_model_interface.jl @@ -40,6 +40,9 @@ end # # FIT AND UPDATE +const ERR_BUILDER = + "Builder does not appear to build an architecture compatible with supplied data. " + true_rng(model) = model.rng isa Integer ? MersenneTwister(model.rng) : model.rng function MLJModelInterface.fit(model::MLJFluxModel, @@ -51,10 +54,24 @@ function MLJModelInterface.fit(model::MLJFluxModel, rng = true_rng(model) shape = MLJFlux.shape(model, X, y) - chain = build(model, rng, shape) |> move + + chain = try + build(model, rng, shape) |> move + catch ex + @error ERR_BUILDER + end + penalty = Penalty(model) data = move.(collate(model, X, y)) + x = data |> first |> first + try + chain(x) + catch ex + @error ERR_BUILDER + throw(ex) + end + optimiser = deepcopy(model.optimiser) chain, history = fit!(model.loss, From 23adbeb71e23e96774c8fed5507611a22a3c6663 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Fri, 15 Jul 2022 09:35:15 +1200 Subject: [PATCH 15/24] update examples with readme files and delete old file --- examples/boston/README.md | 12 +- examples/boston/boston.jl | 250 -------------------------------------- examples/iris/README.md | 10 ++ examples/mnist/README.md | 10 ++ 4 files changed, 29 insertions(+), 253 deletions(-) delete mode 100644 examples/boston/boston.jl create mode 100644 examples/iris/README.md create mode 100644 examples/mnist/README.md diff --git a/examples/boston/README.md b/examples/boston/README.md index 84a3ec6f..194b1d98 100644 --- a/examples/boston/README.md +++ b/examples/boston/README.md @@ -1,4 +1,10 @@ -The files notebook.* in this directory cannot be executed without the -accomanying env/ directory, which contains the package environment -used. +# Contents +- `notebook.ipynb`: Juptyer notebook +- `notebook.jl`: executable Julia script annotated with comments + +# Important + +Scripts or notebooks in this folder cannot be reliably exectued without the accompanying +Manifest.toml and Project.toml files. If the Manifest.toml does not specify a +`julia_version` in the first four lines, use Julia 1.6.x, where x is any integer. diff --git a/examples/boston/boston.jl b/examples/boston/boston.jl deleted file mode 100644 index b990ba6a..00000000 --- a/examples/boston/boston.jl +++ /dev/null @@ -1,250 +0,0 @@ -# # Using training an MLJFlux regression model on the Boston house -# # price dataset - -using Pkg -Pkg.activate(@__DIR__) -Pkg.instantiate() - -# **Julia version** is assumed to be 1.6.* - -using MLJ -using MLJFlux -using Flux - -MLJ.color_off() - -using Plots - -# This tutorial uses some MLJ's `IteratedModel` wrapper wrappers, to -# transform the MLJFLux `NeuralNetworkRegressor` into a model that -# **automatically selects the number of epochs** required to optimize -# an out-of-sample loss. - -# We also show how to include the model in a **pipeline** to carry out -# standardization of the features and target. - - -# ## Loading data - -data = OpenML.load(531); # Loads from https://www.openml.org/d/531 - -# The target `y` is `:MEDV` and everything else except `:CHAS` goes -# into the features `X`: - -y, X = unpack(data, ==(:MEDV), !=(:CHAS)); - -# (The Charles River dummy variable, `:CHAS`, is not deemed to be -# relevant.) - -# Inspect the scientific types: - -scitype(y) - -#- - -schema(X) - -# We'll regard `:RAD` (index of accessibility to radial highways) as -# `Continuous` as Flux models don't handle ordered factors: - -X = coerce(X, :RAD => Continuous); - -# Let's split off a test set for final testing: - -X, Xtest = partition(X, 0.7); -y, ytest = partition(y, 0.7); - -# ## Defining a builder - -# In the macro call below, `n_in` is expected to represent the number -# of inputs features and `rng` a RNG (builders are generic, ie can be -# applied to data with any number of input features): - -builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), - Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) -end - - -# ## Defining a MLJFlux model: - -NeuralNetworkRegressor = @load NeuralNetworkRegressor - model = NeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) - - -# ## Standardization - -# The following wraps our regressor in feature and target standardizations: - -pipe = @pipeline Standardizer model target=Standardizer - -# Notice that our original neural network model is now a -# hyper-parameter of the composite `pipe`, with the automatically -# generated name, `:neural_network_regressor`. - - -# ## Choosing an initial learning rate - -# Let's see how the training losses look for the default optimiser. For -# MLJFlux models, `fit!` will print these losses if we bump the -# verbosity level (default is always 1): - -mach = machine(pipe, X, y) -fit!(mach, verbosity=2) - -# They are also extractable from the training report (which includes -# the pre-train loss): - -report(mach).neural_network_regressor.training_losses - -# Next, let's visually compare a few learning rates: - -plt = plot() -rates = [5e-4, 0.001, 0.005, 0.01] - -# By default, changing only the optimiser will not trigger a -# cold-restart when we `fit!` (to allow for adaptive learning rate -# control). So we call `fit!` with the `force=true` option. We'll skip -# the first few losses to get a better vertical scale in our plot. - -foreach(rates) do η - pipe.neural_network_regressor.optimiser.eta = η - fit!(mach, force=true) - losses = report(mach).neural_network_regressor.training_losses[3:end] - plot!(:length(losses), losses, label=η) -end -plt #!md - -#- - -savefig("learning_rate.png") - -# ![](learing_rate.png) #md - -# We'll use a relatively conservative rate here: - -pipe.neural_network_regressor.optimiser.eta = 0.001 - - -# ## Wrapping in iteration control - -# We want a model that trains until an out-of-sample loss satisfies -# the `NumberSinceBest(4)` stopping criterion. We'll add some fallback -# stopping criterion `InvalidValue` and `TimeLimit(1/60)`, and -# controls to print traces of the losses. - -# For intializing or clearing the traces: - -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - -# And to update the traces: - -update_loss(loss) = push!(losses, loss) -update_training_loss(report) = - push!(training_losses, report.neural_network_regressor.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) - -# The controls to apply: - -controls=[Step(1), - NumberSinceBest(4), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), - WithIterationsDo(update_epochs)] - -# Next we create a "self-iterating" version of the pipeline. Note -# that the iteration parameter is a nested hyperparameter: - -iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - iteration_parameter=:(neural_network_regressor.epochs), - measure = l2) - -# Training the wrapped model on all the train/validation data: - -clear() -mach = machine(iterated_pipe, X, y) -fit!(mach) - -# And plotting the traces: - -plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of sqaures error", - label="out-of-sample", - legend = :topleft); -scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - -#- - -savefig("loss.png") - -# ![](losses.png) #md - -# **How `IteratedModel` works.** Training an `IteratedModel` means -# holding out some data (80% in this case) so an out-of-sample loss -# can be tracked and used in the specified stopping criterion, -# NumberSinceBest(4)`. However, once the stop is triggered, the model -# wrapped by `IteratedModel` (our pipeline model) is retrained on all -# data for the same number of iterations. Calling `predict(mach, -# Xnew)` on new data uses the updated learned parameters. - -# In other words, `iterated_model` is a "self-iterating" version of -# the original model, where `epochs` has been transformed from -# hyper-parameter to *learned* parameter. - - -# ## An evaluation the self-iterating model - -# Here's an estimate of performance of our "self-iterating" -# model: - -e = evaluate!(mach, - resampling=CV(nfolds=5), - measures=[l2, l1]) - -using Measurements -err = e.measurement[1] ± std(e.per_fold[1])/sqrt(4) -@show err - -# which we can see has substantial uncertainty. - - -# ## Comparing the model with other models - -# Although we cannot assign them statistical significance, here are -# comparisons of our fully automated model with a couple other models -# (trained using default hyperparameters): - -function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l2(yhat, ytest) |> mean -end -performance(iterated_pipe) - -three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - tuned_iterated_pipe] - -errs = performance.(three_models) - -(models=typeof.(three_models), mean_square_errors=errs) |> pretty - -# So, apparently better than linear, but no match for the -# tree-booster. And our cv-estimate of performance on the -# train/validate dataset is wildly optimistic. - diff --git a/examples/iris/README.md b/examples/iris/README.md new file mode 100644 index 00000000..c2819b58 --- /dev/null +++ b/examples/iris/README.md @@ -0,0 +1,10 @@ +# Contents + +- `iris.ipynb`: Juptyer notebook +- `iris.jl`: executable Julia script annotated with comments + +# Important + +Scripts or notebooks in this folder cannot be reliably exectued without the accompanying +Manifest.toml and Project.toml files. If the Manifest.toml does not specify a +`julia_version` in the first four lines, use Julia 1.6.x, where x is any integer. diff --git a/examples/mnist/README.md b/examples/mnist/README.md new file mode 100644 index 00000000..d2bb2cea --- /dev/null +++ b/examples/mnist/README.md @@ -0,0 +1,10 @@ +# Contents + +- `mnist.ipynb`: Juptyer notebook +- `mnist.jl`: executable Julia script annotated with comments + +# Important + +Scripts or notebooks in this folder cannot be reliably exectued without the accompanying +Manifest.toml and Project.toml files. If the Manifest.toml does not specify a +`julia_version` in the first four lines, use Julia 1.6.x, where x is any integer. From f86654a534febc3d02c1e7cb864cb8c4deb2d0b0 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 18 Jul 2022 08:50:47 +1200 Subject: [PATCH 16/24] fix confidence interval in Boston example --- examples/boston/notebook.ipynb | 1104 +++++++++------------ examples/boston/notebook.jl | 2 +- examples/boston/notebook.unexecuted.ipynb | 8 +- 3 files changed, 487 insertions(+), 627 deletions(-) diff --git a/examples/boston/notebook.ipynb b/examples/boston/notebook.ipynb index d653cbc3..6fa0831d 100644 --- a/examples/boston/notebook.ipynb +++ b/examples/boston/notebook.ipynb @@ -2,237 +2,225 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, "source": [ "# Building an MLJFlux regression model for the Boston house\n", "# price dataset" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "\u001b[32m\u001b[1m Activating\u001b[22m\u001b[39m environment at `~/GoogleDrive/Julia/MLJ/MLJFlux/examples/boston/Project.toml`\n" + " Activating project at `~/GoogleDrive/Julia/MLJ/MLJFlux/examples/boston`\n", + "┌ Warning: The active manifest file is an older format with no julia version entry. Dependencies may have been resolved with a different julia version.\n", + "└ @ ~/GoogleDrive/Julia/MLJ/MLJFlux/examples/boston/Manifest.toml:0\n" ] } ], + "cell_type": "code", "source": [ "using Pkg\n", "Pkg.activate(@__DIR__)\n", "Pkg.instantiate()" - ] + ], + "metadata": {}, + "execution_count": 1 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "**Julia version** is assumed to be 1.6.*" - ] + ], + "metadata": {} }, { + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ Info: Precompiling MLJ [add582a8-e3ab-11e8-2d5e-e98b27df1bc7]\n", + "[ Info: Precompiling Plots [91a5bcdd-55d7-5caf-9e0b-520d859cae80]\n" + ] + } + ], "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], "source": [ "using MLJ\n", "using MLJFlux\n", "using Flux\n", "using Plots" - ] + ], + "metadata": {}, + "execution_count": 2 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "This tutorial uses MLJ's `IteratedModel` wrapper to transform the\n", "MLJFlux `NeuralNetworkRegressor` into a model that **automatically\n", "selects the number of epochs** required to optimize an out-of-sample\n", "loss." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We also show how to include the model in a **pipeline** to carry out\n", "standardization of the features and target." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Loading data" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "data = OpenML.load(531); # Loads from https://www.openml.org/d/531" - ] + ], + "metadata": {}, + "execution_count": 3 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "The target `y` is `:MEDV` and everything else except `:CHAS` goes\n", "into the features `X`:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123);" - ] + ], + "metadata": {}, + "execution_count": 4 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We specified the seed `rng` to shuffle the observations. The Charles\n", "River dummy variable `:CHAS` is dropped, as not deemed to be\n", "relevant." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Inspecting the scientific types:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "AbstractVector{Continuous} (alias for AbstractArray{Continuous, 1})" - ] + "text/plain": "AbstractVector{Continuous} (alias for AbstractArray{ScientificTypesBase.Continuous, 1})" }, - "execution_count": 5, "metadata": {}, - "output_type": "execute_result" + "execution_count": 5 } ], + "cell_type": "code", "source": [ "scitype(y)" - ] + ], + "metadata": {}, + "execution_count": 5 }, { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "┌─────────┬───────────────┬──────────────────────────────────┐\n", - "│\u001b[22m names \u001b[0m│\u001b[22m scitypes \u001b[0m│\u001b[22m types \u001b[0m│\n", - "├─────────┼───────────────┼──────────────────────────────────┤\n", - "│ CRIM │ Continuous │ Float64 │\n", - "│ ZN │ Continuous │ Float64 │\n", - "│ INDUS │ Continuous │ Float64 │\n", - "│ NOX │ Continuous │ Float64 │\n", - "│ RM │ Continuous │ Float64 │\n", - "│ AGE │ Continuous │ Float64 │\n", - "│ DIS │ Continuous │ Float64 │\n", - "│ RAD │ Multiclass{9} │ CategoricalValue{String, UInt32} │\n", - "│ TAX │ Continuous │ Float64 │\n", - "│ PTRATIO │ Continuous │ Float64 │\n", - "│ B │ Continuous │ Float64 │\n", - "│ LSTAT │ Continuous │ Float64 │\n", - "└─────────┴───────────────┴──────────────────────────────────┘\n" - ] + "text/plain": "┌─────────┬───────────────┬──────────────────────────────────┐\n│\u001b[22m names \u001b[0m│\u001b[22m scitypes \u001b[0m│\u001b[22m types \u001b[0m│\n├─────────┼───────────────┼──────────────────────────────────┤\n│ CRIM │ Continuous │ Float64 │\n│ ZN │ Continuous │ Float64 │\n│ INDUS │ Continuous │ Float64 │\n│ NOX │ Continuous │ Float64 │\n│ RM │ Continuous │ Float64 │\n│ AGE │ Continuous │ Float64 │\n│ DIS │ Continuous │ Float64 │\n│ RAD │ Multiclass{9} │ CategoricalValue{String, UInt32} │\n│ TAX │ Continuous │ Float64 │\n│ PTRATIO │ Continuous │ Float64 │\n│ B │ Continuous │ Float64 │\n│ LSTAT │ Continuous │ Float64 │\n└─────────┴───────────────┴──────────────────────────────────┘\n" }, - "execution_count": 6, "metadata": {}, - "output_type": "execute_result" + "execution_count": 6 } ], + "cell_type": "code", "source": [ "schema(X)" - ] + ], + "metadata": {}, + "execution_count": 6 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We'll regard `:RAD` (index of accessibility to radial highways) as\n", "`Continuous` as MLJFlux models don't handle ordered factors:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "X = coerce(X, :RAD => Continuous);" - ] + ], + "metadata": {}, + "execution_count": 7 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Let's split off a test set for final testing:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "(X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true);" - ] + ], + "metadata": {}, + "execution_count": 8 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Defining a builder" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "In the macro call below, `n_in` is expected to represent the number\n", "of inputs features and `rng` a RNG (builders are generic, ie can be\n", "applied to data with any number of input features):" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "GenericBuilder{#1#2}\n" - ] + "text/plain": "GenericBuilder{#1#2}\n" }, - "execution_count": 9, "metadata": {}, - "output_type": "execute_result" + "execution_count": 9 } ], + "cell_type": "code", "source": [ "builder = MLJFlux.@builder begin\n", " init=Flux.glorot_uniform(rng)\n", @@ -240,19 +228,18 @@ " Dense(64, 32, relu, init=init),\n", " Dense(32, 1, init=init))\n", "end" - ] + ], + "metadata": {}, + "execution_count": 9 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Defining a MLJFlux model:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, "outputs": [ { "name": "stdout", @@ -263,108 +250,82 @@ ] }, { + "output_type": "execute_result", "data": { - "text/plain": [ - "NeuralNetworkRegressor(\n", - " builder = GenericBuilder(\n", - " apply = Main.##283.var\"#1#2\"()),\n", - " optimiser = ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}()),\n", - " loss = Flux.Losses.mse,\n", - " epochs = 20,\n", - " batch_size = 1,\n", - " lambda = 0.0,\n", - " alpha = 0.0,\n", - " rng = 123,\n", - " optimiser_changes_trigger_retraining = false,\n", - " acceleration = CPU1{Nothing}(nothing))" - ] + "text/plain": "NeuralNetworkRegressor(\n builder = GenericBuilder(\n apply = Main.##291.var\"#1#2\"()),\n optimiser = Flux.Optimise.ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}()),\n loss = Flux.Losses.mse,\n epochs = 20,\n batch_size = 1,\n lambda = 0.0,\n alpha = 0.0,\n rng = 123,\n optimiser_changes_trigger_retraining = false,\n acceleration = ComputationalResources.CPU1{Nothing}(nothing))" }, - "execution_count": 10, "metadata": {}, - "output_type": "execute_result" + "execution_count": 10 } ], + "cell_type": "code", "source": [ "NeuralNetworkRegressor = @load NeuralNetworkRegressor\n", " model = NeuralNetworkRegressor(builder=builder,\n", " rng=123,\n", " epochs=20)" - ] + ], + "metadata": {}, + "execution_count": 10 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Standardization" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "The following wraps our regressor in feature and target standardizations:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "DeterministicPipeline(\n", - " standardizer = Standardizer(\n", - " features = Symbol[],\n", - " ignore = false,\n", - " ordered_factor = false,\n", - " count = false),\n", - " transformed_target_model_deterministic = TransformedTargetModelDeterministic(\n", - " model = NeuralNetworkRegressor{GenericBuilder{#1#2},…},\n", - " target = Standardizer,\n", - " inverse = nothing,\n", - " cache = true),\n", - " cache = true)" - ] + "text/plain": "DeterministicPipeline(\n standardizer = Standardizer(\n features = Symbol[],\n ignore = false,\n ordered_factor = false,\n count = false),\n transformed_target_model_deterministic = TransformedTargetModelDeterministic(\n model = NeuralNetworkRegressor{GenericBuilder{#1#2},…},\n target = MLJModels.Standardizer,\n inverse = nothing,\n cache = true),\n cache = true)" }, - "execution_count": 11, "metadata": {}, - "output_type": "execute_result" + "execution_count": 11 } ], + "cell_type": "code", "source": [ "pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer)" - ] + ], + "metadata": {}, + "execution_count": 11 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Notice that our original neural network model is now a\n", "hyper-parameter of the composite `pipe`, with the automatically\n", "generated name, `:neural_network_regressor`." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Choosing a learning rate" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Let's see how the training losses look for the default optimiser. For\n", "MLJFlux models, `fit!` will print these losses if we bump the\n", "verbosity level (default is always 1):" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, "outputs": [ { "name": "stdout", @@ -411,110 +372,76 @@ ] }, { + "output_type": "execute_result", "data": { - "text/plain": [ - "Machine{DeterministicPipeline{NamedTuple{,…},…},…} trained 1 time; caches data\n", - " model: MLJBase.DeterministicPipeline{NamedTuple{(:standardizer, :transformed_target_model_deterministic), Tuple{Unsupervised, Deterministic}}, MLJModelInterface.predict}\n", - " args: \n", - " 1:\tSource @185 ⏎ `Table{AbstractVector{Continuous}}`\n", - " 2:\tSource @132 ⏎ `AbstractVector{Continuous}`\n" - ] + "text/plain": "Machine{DeterministicPipeline{NamedTuple{,…},…},…} trained 1 time; caches data\n model: MLJBase.DeterministicPipeline{NamedTuple{(:standardizer, :transformed_target_model_deterministic), Tuple{MLJModelInterface.Unsupervised, MLJModelInterface.Deterministic}}, MLJModelInterface.predict}\n args: \n 1:\tSource @658 ⏎ `ScientificTypesBase.Table{AbstractVector{ScientificTypesBase.Continuous}}`\n 2:\tSource @854 ⏎ `AbstractVector{ScientificTypesBase.Continuous}`\n" }, - "execution_count": 12, "metadata": {}, - "output_type": "execute_result" + "execution_count": 12 } ], + "cell_type": "code", "source": [ "mach = machine(pipe, X, y)\n", "fit!(mach, verbosity=2)" - ] + ], + "metadata": {}, + "execution_count": 12 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "They are also extractable from the training report (which includes\n", "the pre-train loss):" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "21-element Vector{Float64}:\n", - " 1.2838145640873329\n", - " 0.3686760957021546\n", - " 0.2215979652158559\n", - " 0.17754900774597876\n", - " 0.15194120480021098\n", - " 0.13221864530853797\n", - " 0.11697435053172738\n", - " 0.10636408931390769\n", - " 0.095208490765527\n", - " 0.08948774179997945\n", - " 0.08276797005304384\n", - " 0.07997044943380421\n", - " 0.07726719934689869\n", - " 0.07190359246973332\n", - " 0.07050712214306228\n", - " 0.0669674161856798\n", - " 0.06422548257753624\n", - " 0.06024360408282619\n", - " 0.05974192388977129\n", - " 0.05666044439895874\n", - " 0.055767894928594526" - ] + "text/plain": "21-element Vector{Float64}:\n 1.2838145640873329\n 0.3686760957021546\n 0.2215979652158559\n 0.17754900774597876\n 0.15194120480021098\n 0.13221864530853797\n 0.11697435053172738\n 0.10636408931390769\n 0.095208490765527\n 0.08948774179997945\n 0.08276797005304384\n 0.07997044943380421\n 0.07726719934689869\n 0.07190359246973332\n 0.07050712214306228\n 0.0669674161856798\n 0.06422548257753624\n 0.06024360408282619\n 0.05974192388977129\n 0.05666044439895874\n 0.055767894928594526" }, - "execution_count": 13, "metadata": {}, - "output_type": "execute_result" + "execution_count": 13 } ], + "cell_type": "code", "source": [ "report(mach).transformed_target_model_deterministic.model.training_losses" - ] + ], + "metadata": {}, + "execution_count": 13 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Next, let's visually compare a few learning rates:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "5-element Vector{Float64}:\n", - " 5.0e-5\n", - " 0.0001\n", - " 0.005\n", - " 0.001\n", - " 0.05" - ] + "text/plain": "5-element Vector{Float64}:\n 5.0e-5\n 0.0001\n 0.005\n 0.001\n 0.05" }, - "execution_count": 14, "metadata": {}, - "output_type": "execute_result" + "execution_count": 14 } ], + "cell_type": "code", "source": [ "plt = plot()\n", "rates = [5e-5, 1e-4, 0.005, 0.001, 0.05]" - ] + ], + "metadata": {}, + "execution_count": 14 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "By default, changing only the optimiser will not trigger a\n", "cold-restart when we `fit!` (to allow for adaptive learning rate\n", @@ -522,323 +449,320 @@ "option. (Alternatively, one can change the hyper-parameter\n", "`pipe.neural_network_regressor.optimiser_changes_trigger_retraining`\n", "to `true`.)" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We'll skip the first few losses to get a better vertical scale in\n", "our plot." - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { + "text/plain": "Plot{Plots.GRBackend() n=5}", "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQCAIAAAD9V4nPAAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nOzdd3wUZf4H8O/MbO8tyaYXEkqCBKQaiCKodFQwBwqCDcvd2eVE8XeidwIWzkMEBPEECyrYEQsWyoGeSBMIVUJJCGmbTbKbbdmd+f0xMcQQIECSze5+3i9evGaHZyffh5D98MzM8wwjCAIBAABEKjbYBQAAAAQTghAAACIaghAAACIaghAAACIaghAAACIaghAAACIaghAAACIaghAAACIaghAAACIaghAAACJaEIKwsrLy6NGj7f91LxrP88EuoU2Ea78CgUCwS2gT6FdoQb9CSBCC8Ouvv37qqafa/+teNI/HE5bfe5fLFZYrzbpcrmCX0CbQr9CCfoUQnBoFAICIhiAEAICIhiAEAICIhiAEAICIhiAEAICIhiAEAICIhiAEAICIhiAEAICIJgl2AcEh8P7Kkp0Bv7sljZ01FRIJx7IX858GQfD7fbUX8cZLx/N+v895jgY+n08qlTIM024ltQ+v1yuXy4NdRetro34F/B4+4G31w7acz+eTyWRBLKCNoF9tRB+VldjlhtY9ZsQFob1094n9HxYe/FSljZPK9S15CytRcZzk4gKDYSQSmfoi3njpGFYilWnO0aDO52OEMPxB9ft8LKFfLcVJFC38QWgjPHml4fgfF/SrjUikqtY/ZqsfsWNyO0tOHl5zPP8Dn6cqseuNgyd8qjGktfC9LpdLLpdzHNemFbY/p9OpVqvDb0TocDi0Wm2wq2h96FdoOW+/SkpKHnzwwZBb5jAQCATrw3D69Ol9+/ZtiyOHeRAG/J5TBd+e2L/aVrzNmjr0siv/Hp2USxRuH/0AEHJOnDixY8eO2bNnB7uQ0PDaa6/t3r0bQXgBBIG3FW87sX910aHPjTE9k7rd1H/UEk6iDHZdAACnmUymvLy8YFcRGr755pu2O3i4BWGN7eCJ/R8e37daqYlJ6nbTsNu2yFWWYBcFAAAd14UF4cmTJ2UyWVRU1Nka7Nmzp6ampnfv3gqF4pJruwAeZ0nR4TXH9632um1JXcddlfexxtjSS4AAABDJWhqEzz///EsvvWSz2aZOnfrmm2+e2SAQCNx000179uxJSEg4evTo+vXr09LaPIoCfu+pgnWnLwHmPoVLgAAAcEFaGoTDhw8fP378smXLSktLm22wZs2a/Pz8X3/9Va1WP/jgg88888yKFStar84/OPMSYL+Rr7XFPbUAABD2WhqE2dnZRHSOW+0//PDDvLw8tVpNRFOnTs3NzV2+fHmr35pfYztUdOjzE/tXcxJFUre8YbdtlqvOep4WAADgvFrtZpnCwsIrr7xS3E5JSXG5XDabzWJp/kaVioqKb7/9VtyWy+WDBg0698F9nqri39ae2L/aVVMYlz6q36hlhqgs8Y94nm+lHpwVz/M8z4fffLvw7lewq2h96FdoOW+/wrLXbUoQhIv4S2vJomCtFoRut7th3R1xISi3+6wLmB06dGjOnDnidnR09OWXX36OI1cUbti9/tHo5GvTej1gjO3HMCwRuVyu1qr8vFwuVxDnkLYdt9vNMEz4BaHb7Q6/bxahX6HmvP3yer0dcDb9l19+uWnTpoaXzz777JkLqv32228vvfRSeXn5sGHDpk2b1sLPELvd/vzzzze8HDFixFVXXXVBtfl8vov45FepVOfNwlYLwtjY2MrKSnG7oqKCYZiYmJizNc7JyXn33XdbeGRlxjWJGb9ykna9DbUxlmXDcmUZIgrLlWUEQdBozrW2XIhCv0LLefulVCo74E/fhg0b9uzZc+ONN4ovz6zQ6XTm5ubecccdo0aNevzxxx0Ox6OPPtqSI1dXV8+fP3/+/PniS4PBcKG1yeXyNvqn0mpB2K9fv02bNj3yyCNEtGnTpssvv7y1FmYNYgQCAESaXr163X333Wf703fffTc1NfW5554jIoVCcccddzz00EPiOOG///3vxo0bNRrNxIkTrVbrme+VyWTnOHIQtfSJCr/88svzzz//v//9b8+ePc8///yGDRuI6NSpUzqdrqioiIjuvPPOTZs2Pffccx988MGMGTPERAQAgNCyadOme++9d/bs2SUlJWf+6S+//JKbmytu5+bmFhUVnTp1iojmzJnz4IMPRkVF2Wy2fv36Nfter9f70EMPTZ8+fd26dW3ahQvV0hGh1+u12+39+/cnIrvdLl7/U6vVf/3rX8WFZa1W6+bNm1999dX9+/e/8sor48ePb7uiAQDC0r/28F8VtdNNNAzRuhFNIyAzM9NkMpnN5vXr12dlZe3YsSM5Oblxg9LS0s6dO4vbCoVCo9GUlJTIZLI5c+YUFBSIN0i6XK5FixY9++yzjd+oUCimTZvWvXv3kpKSSZMmPfroozNmzGjL/l2AlgbhoEGDzry3U6fTNV4xNjMzc9GiRa1WGgBAhBmRyPQwtdPtCM1eoLztttvEjWnTpo0ZM2bRokWN73AhIpVK5fF4xG1BELxer1qt3rt3byAQuPnmm8X9J0+evOyyy+x2+wMPPCDuWbFihdVqXbBggfiyd+/eEyZMePzxxzvIVdJwW2sUACB0dTMw3S74JpK20q1btzPPcCYkJBw/flzcPnnyZCAQiIuLq6qqMplMq1atamgmlUolEknDqcEmgZeZmelwOGprazvIfVIX89R1AAAIS+I9H0RUUVHxySef9OvXj4gEQXjjjTcqKiqIKC8vb82aNeL2m2++OWzYML1e36NHD57nt2zZYjQajUajXq/3er0KheKG3zEMc+rUqUAgIB5t6dKlmZmZHSQFCSNCAABokJubq1QqzWbz7t27x44de8899xBRIBC46667evToYbFYBgwYkJeXl52dnZ6efuTIkS+//JKI1Gr1+++/P2XKlHnz5mk0mr179/7jH/+YPHly4yO//fbbL730UpcuXU6ePMmy7HvvvRecHjYHQQgAAPUOHTp04MABh8ORnp4eHR0t7pRIJKWlpSaTSXy5cOHCxx57rLS0tGfPng0PGsrNzT106NDhw4dra2s7d+6s1+ubHPlvf/vbzTffXFhYaDabO3XqJJF0oPTpQKUAAEBwSaXSyy677Mz9DaEoSk1NTU1NPfO9mZmZ5zh4YmJiYmLipRfZ6nCNEAAAIhqCEAAAIhqCEAAATlu/fn1eXt7YsWMbT4dorKio6L777hs2bNisWbMaP1zhgw8+GDt2bF5enrj0mKiiouLhhx++7rrrHn/88ZqaGnFncXHxihUrHn744SaTFIMFQQgAAPX27t17/fXXjxo16q677rr//vvXrFnTpAHP88OGDeM47oknntiyZUvDlPnPP//8gQceuOuuu0aPHj127Ni9e/eK+8eNG2ez2WbOnPnbb79NmTJF3Pn1119//PHHhw4d+uyzz9qta+eAm2UAAKDeokWLbr31VnF9mWPHjs2fP3/MmDGNG3z77bc1NTULFixgGCY1NbVr165z5841m83z58+fOXPm2LFjiWjr1q2LFi1atGjR9u3bf/3112+//VYul2dnZ1ut1oKCgrS0tDvuuOOOO+548803X3/99aB0swmMCAEAoN727dtzcnLE7YEDB27fvr3ZBuJKMcnJyWazWRz87dix48w3bt++vXfv3uITag0GQ7du3Xbs2NFufWk5jAgBADqKmnXvefb82E5fjGGiH3mlyb7G8wVNJlNVVZXH42mYLEhEJSUlRqOx4aXZbC4pKfF4POIqaw1vFNdmKy0tPbNxG/XmUiAIAQA6CnWfoYpufYJYgEajabj/xe12y2QycTzXuEFxcXHDS5fLpdVq5XK5TCZr/EbxqUQajaZhhe6Gxm3ehwuHIAQA6Cg4UzRnij5/uzaTmJh49OhRcbugoCAhIaHJetlJSUlbtmwRt71eb3FxcVJSEsMwCQkJR48ezcrKIqKjR4+KE+cbH00QhGPHjiUlJbVfZ1oM1wgBAKDehAkT3n77bY/HIy60PXHiRHH/8uXLDx8+TEQ33njjtm3b8vPziWjlypWpqandu3cX37hs2TJBEDwez9tvvz1hwgQiGjFiRHFx8ebNm4noiy++YBim4aG+HQpGhAAAUO+WW2755JNPunTpolarNRrNG2+8Ie7/+9///uKLL2ZkZMTExMydO/fKK69MT08/fvx4w1zDxx57bPjw4ZmZmS6Xq1evXrfccgsRqdXqV1999frrr8/IyDhy5Mh//vMfcYnRb7/9dsKECT6fz+v1mkymkSNHvvPOO8HqMiEIAQCggUwm++yzz44dO+b1ejt37txwXvTgwYMymUzcvv/++ydPnnzixInOnTsrlUpxp8lk+vnnnw8dOiSXy1NSUhoOOHny5LFjxx49ejQ9PV2tVos7Bw8efOTIkYY2Uqm0Hbp2DghCAAD4g8ZJJmoIPJH43MEmbRiG6dKly5lH0+l02dnZjfdIpdIz3x5EuEYIAAARDUEIAAARDUEIAACnffnllyNHjhwyZMjy5cubbXD06NEpU6YMGjToscceczqdDfvffPPNIUOGjBw58quvvhL3BAKBPzVytgMGHa4RAgBAvV27dt18883Lli0zm8233nqrTqcbN25c4waBQGDYsGE33HDDAw88MGvWrL/85S8rVqwgoo8++uipp556++23KysrJ06cuHHjxp49e/I8v3r16hUrVoiXGDMyMoLTq/NBEAIAQL3FixdPnTo1Ly+PiGbOnLlgwYImQfj111/7fL4XXniBiF577bX09PSXXnopKipqwYIFTz755JAhQ4jov//97+LFi5csWSK+5cYbb+yYC8o0wKlRAACot3PnzgEDBojbAwYM2LlzZ5MGu3btamiQkJAQFRUlTq5vvL/JG++9997Jkye//vrrfr+/zTtwUTAiBADoKN7a+8Hmwp/b52sxDLNk+LwmO8vKyhomNphMpurqarfb3XjuRGlpqcFgaHhpMplKS0s9Hk91dXXjN4qLa7Ms+8QTT/Tr16+ysvKll17atGnT22+/3ba9uigIQgCAjmJY6tX943oHsQCtVutyucTt2tpamUzW+NETYoOioqKGl7W1tTqdTi6Xy+Xyxm/U6/VExHHc7NmzxZ25ubmdO3d++eWXLRZLe/TkQiAIAQA6ihh1dIw6mItuJycnFxQUiNtHjhxJTk5usuh2cnLyxo0bxW23211cXCy2SUxMPHLkiLjuqPjGJkdOTExkWdZut3fAIMQ1QgAAqHfzzTevWLHC5XLxPL9kyZKbb75Z3L948eIDBw4Q0bhx43bu3CleAlyxYkXnzp0zMzPFNy5ZsoTneZfLtWLFCvGNhYWFVVVVRMTz/AsvvBAbG5uWlha0vp0dRoQAAFBvwoQJa9euTUtLUyqVycnJjzzyiLh/zpw5JpOpa9euFotl/vz5Q4cOjY+Pt9vtH374odjg0UcfHTt2bKdOndxu99ChQ8WnT2zduvW2226LjY2tqqoym82rV6/mOC5ofTs7BCEAANSTSCQrV64sLS31+XziMwVFJ06caNi+6667Jk6cWFJSkpKSIj5Ngoj0ev3GjRsLCwtlMllMTIy4c/z48aNHjy4qKtJqtdHRwTzle24IQgAA+IOGJDsbjUaTnp5+5v7G2SmSy+WdOnVqtcraBq4RAgBAREMQAgBAREMQAgDAaR999NHgwYNzcnIWLlzYbIODBw+OHz++T58+f/nLX6qrqxv2v/rqqzk5OYMHD/74448bdr7zzjsPP/zwn/70p/3797d56RcLQQgAAPV++eWXO++8c/r06fPmzXvxxRfff//9Jg38fv+IESMuu+yyFStWlJaW3nvvveL+995778UXX5w3b97f/va3O++8c9u2beL+999/X6fTffXVVxUVFe3akwuBm2UAAKDea6+9dvvtt48aNYqInnjiiYULF06cOLFxg7Vr1zIMM2vWLCJ65ZVXUlNTS0tLY2JiFi5c+MQTT1xxxRVEdPvtty9evPiNN94goi+++IKIFi1a1P59aTmMCAEAoN7u3bv79esnbvfr12/37t1NGuzZs6ehQVxcXExMzL59+5rsb/aNHRlGhAAAHcWxtaUVu6rP365VMNTnyc5N9pWXlzesqW00Gmtqaposul1WVtbsots1NTWN31haWtrG1bcmBCEAQEcRf5XZOsAYxAJ0Ol3D2tlOp1MulzdZdFun0xUWFja8dDgcBoNBbNb4jY3DsuNDEAIAdBRSjUSqCWYBKSkphw8fFrcPHz6ckpLSZNHt1NTU77//Xtx2uVzFxcVim6SkpMOHD4uLbotvbN/CLwmuEQIAQL1JkyYtX77c4XAEAoFFixZNmjRJ3P/yyy+LD+C98cYb9+zZ8/PPPxPR66+/npWV1bVrV/GNixYtCgQCDodj+fLlDW8MCQhCAACod9NNNw0aNCglJSU+Pl4qlT788MPi/oanT5hMpiVLlowYMSI9Pf3f//730qVLxQaPPPKIRCKJj49PSUnJzc296aabxP05OTkmkykQCFx//fUmk6nhGU8dCk6NAgBAPY7jli1bNm/evLq6usYPDjx06FDD9qRJk/Ly8srLy2NjY1m2fjSl0WjEyYJSqVR8Kq/oxx9/bLfiLxqCEAAA/qBxkjVLJpPFx8efub8DPnS3JXBqFAAAIhqCEAAAIhqCEAAATnvnnXf69euXnZ394osvNttgz549o0aN6tq16+23315ZWSnuXLx48bWN1NXVtWPJlwrXCAEAoN5PP/30wAMPfPTRR2azedy4cVar9dZbb23coK6ubuTIkffff//ChQtnzpx59913f/jhh0R06NAhq9U6depUsRnHcUGo/mIhCAEAoN6SJUvuvPPOq6++mohmzJixePHiJkG4Zs0apVL5t7/9jYj+9a9/JSYmnjp1KjY2lojS0tKuueaaoJR9iXBqFAAA6u3du7d3797idp8+ffbu3dukQX5+fp8+fcTtmJiY2NjYhgcNrl69+sorr5wyZcqOHTvareBWgREhAEBHsXfz7KJDn7fP12IYdtjtTSf5NV5022AwOByOMxfdbjy5wmg0lpWVEdHQoUOvvvpqi8Wybt26gQMH/vzzzz169Gj7TrQOBCEAQEeR0fue1MuCuTiZwWBwOp3itsPhUCgUTRbdNhgMx48fb3jpcDiMRiMRjR49WtyTk5Nz8ODBt95666WXXmqvqi8VghAAoKOQK81ypTmIBaSmph48eFDcPnToUGpqapNFt9PS0tatWyduO53OkydPpqWlNTlIVFRUQ5qGBFwjBACAerfeeuubb75pt9t9Pt+CBQsa7pR57rnnfv31VyIaN27cvn37Nm3aREQLFy7s1atXRkYGEW3YsIHneSLasWPHypUrQ+uuGYwIAQCg3rhx4zZu3Jiamsqy7JAhQx566CFx/6pVq7KysrKzs/V6/fLly2+66SaWZY1G4+rVq8UGM2fO3L59u1KplEgk06dPb1h0OyQgCAEAoB7DMK+88soLL7zA87xKpWrYLw4HRePHj7/xxhsbP5KeiLZs2eLz+bxer1arbdeKWwOCEAAA/qDJDTJnYln2zGfQy2QymUzWZkW1IVwjBACAiIYgBACAiHYBp0Z379796aefqtXqyZMnx8TEnNmgoKDgk08+cTqd/fv3Hz58eOsVCQAA0FZaOiLcvHlzbm4uz/P79+/v06dPw4rjDXbs2JGdnS0uOnD//ffPmDGjtUsFAABofS0dEc6dO/eJJ54Q423kyJFvvPHG9OnTGzf46KOPRo0a9fzzzxNRly5d7r777rlz57Z6uQAAAK2rpSPCH374YcSIEeL28OHDf/jhhyYNMjIyCgoKxGdQHTx4UJxiCQAA0MG1aERYVVXldrujo6PFl1artbi4uEmbqVOn7ty5MyUlxWQy8TzfsAZPs/bt2/fYY4+J20aj8dFHH73wytuPx+MRBCG0Hq/VEh6Ph+O4JusnhQGPxyOVSoNdRetDv0LLeftlMpnsdrvJZGq3klqFIAhB+dBwuVxXXXWVx+O50DfKZDKWPc+Qr0VBKGaAuHwOEQUCAYmk6Ru/+OKLzz///LXXXouJiXnhhRceffTR999//xyVNXz79Xp9B88Y7nfBLqSViZ0KvyAMy28WoV+h5rz9Sk5O/vnnn0PrSe5E5HQ6NRpNUL60wWC4iM+rlrylRUGo1Wo1Gs2pU6fi4+OJqLi4OC4urkmbhQsX3nfffWPGjCGiRYsWxcTEzJ8/v9mbS4koPT39ySefbMmX7gikUqlUKg2/n1WxX+EXhGK/gl1F60O/QktL+iU+tyG0KJXKUFw75txaeo1w5MiRH3/8MREJgvDpp5+OHDmSiPx+/9atW71eLxHp9fqTJ0+KjYuKijiOC7+/LAAACD8tvWt05syZV199dWlpaXFxcXV1tbgkud1u79+//+HDh9PT0x977LFhw4bZbLaoqKhVq1Y9/vjjjdepAwAA6JhaGoQ9evTYu3fvunXrVCrVqFGjxJAzGAzff/+9eL60b9++Bw8e3LBhQ21t7W233Zadnd2GVQMAALSSC1hZJjY2durUqY33SKXSIUOGNLyMiorKy8trtdIAAADaHtYaBQCAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiIYgBACAiNbSIOR5/sUXXxw0aNCYMWM2b97cbJuKiopHHnkkJydn+PDhn3zySesVCQAA0FYkLWw3f/78FStWLFu2bN++faNGjcrPz09ISGjcwOVyDR48+IorrpgzZ47X662rq2utEk/WClKWiVa21vEAAABOa2kQLliw4N///veAAQMGDBjw5Zdf/uc///n73//euMHSpUv1ev3rr7/e6iW+ks+7/LQgh2v1IwMAALTo1Gh1dfXRo0f79+8vvuzfv/+uXbuatPnpp5+GDh369NNP5+Xlvfjii16vt7VKnN6De+8If7JWaK0DAgAANGjRiLCsrIyIjEaj+NJkMpWWljZpc+LEiXXr1j399NP33nvvP/7xj127dr377rtnO+DGjRt79eolblut1lWrVp3jq8uJJiZLZm/3z+3lb0m1rc7tdvt8Po4LtyFpbW0tz/MMwwS7kFbmdDqDXUKbQL9CC/rVQahUqvN+ercoCHU6HRG5XC6ZTEZEtbW1er2+SRuNRnP11Vc/9NBDRJSQkJCZmbl06VK1Wt3sAXv27PnMM8+I2zKZTKvVnruAp/pS5od1M/so4lRB+NTmOE4ul4dfEDIMo1arwy8Iiei8/6JCFPoVWtCvUNGiIIyKilKpVEeOHOnduzcR/fbbb8nJyU3apKSksCzb0J7neYfDcbYg1Ov14qFayKqkW9PZf+3hX+ofbmkEAADB1aJrhCzLTpw4ceHChURUVlb24Ycf3nLLLURkt9tnz57tdruJaMqUKV9//bXdbiei9957LyMjw2q1tmKhM7K55Yf4MncrHhIAAKDF8wjFy36dOnXq1q3bpEmTcnNzichmsz311FMul4uIcnNzb7nllq5du1522WUvv/zyO++807qFxqpoYid23p5A6x4WAAAiXEunT8TFxe3YsaOwsFCj0TTcNZOens7zfEObOXPmzJgxw+l0xsfHt36lRI9nsz0/9j/Wg4tStMXhAQAgEl3YEmuJiYkNKdgsvV7fRilIRIlq5k9p7L/3YlAIAACtJsTWGn0im31tP1/hCXYdAAAQLkIsCJM0zPhU9pV8DAoBAKB1hFgQEtHMnuzi/by91RauAQCAiBZ6QZisYcYksfMxKAQAgNYQekFIRDN7sq/m81W+YNcBAAChLySDsJOOGZXEvprPn78pAADAOYVkEBLR//ViF+wLOFrtoYcAABChQjUI03XMtfHsq/swKAQAgEsSqkFIRDN7si/vwaAQAAAuSQgHYTcDMzSeXbwfg0IAALh4IRyERPR/vdh5ewJODAoBAOBihXYQZhqYq6zskgMYFAIAwEUK7SAkoqcvZ+ftCbj8wa4DAABCU8gHYZaRGRDNLsWgEAAALkrIByER/b0X+8LugBuDQgAAuHDhEIQ9zUzfKPaNQxgUAgDABQuHICSip3uxc3bxHizEDQAAFyhMgvByC9PLTG9iUAgAABcoTIKQiJ7pzc3ZxfsQhQAAcCHCJwh7W5gsIy3HoBAAAC5E+AQhET19OfccBoUAAHAhwioIB0QznfX09mEkIQAAtFRYBSERPXM5N3sX70cUAgBAy4RbEObEMMkaeuc3JCEAALRIuAUhEf39cu6fGBQCAEDLhGEQDo5l4lX0fgGSEAAAzi8Mg5CI/n459+xOPiAEuw4AAOjwwjMIh8YxViWtwqAQAADOJzyDkIie6sX9YyfPY1AIAADnFLZBeF08Y5LTh0cxKAQAgHMJ2yAkoid7cs9iUAgAAOcUzkE4MpFRS+iTYxgUAgDAWYVzEBLRU73YWTswKAQAgLMK8yAck8QqJbTmBAaFAADQvDAPQiJ6sif7zA6MCQEAoHnhH4TXJ7MsQ2tPIAoBAKAZ4R+EDNET2ezTO7DODAAANCP8g5CIxqWyfp6+LkQUAgBAUxERhAzRzJ7sszsDwS4EAAA6nIgIQiK6KZV11tG6kxgUAgDAH0RKELIMPdGTnbUdg0IAAPiDSAlCImdqJLgAACAASURBVJqQxtp99H0xBoUAAHBaBAUhx9CTPdmnMSgEAIBGIigIieiWTmyFhzaewqAQAADqRVYQcgzNyMbtowAAcFpkBSER3ZrBFtbSphIMCgEAgCgCg5Bj6PEe7D8xKAQAACKKwCAkoikZ7G81tBmDQgAAiMwglLI0I5v9x06sPgoAABEZhER0W2fW6ac7NgXq8KRCAIDIFqFBKGPph5ESZx2N/MZfUxfsagAAIHgiNAiJSM7R+0O4dB0zdK2/1B3sagAAIEgiNwiJiGNo8UDuplQ253P/oWpcMQQAiEQRHYSix7PZv1/OXvmF/8dSZCEAQMRBEBIRTc1g3x4sGf+dfy0e3gsAEGEQhPWujWc+v04y7b/+pQdwIykAQARBEJ7WN4rZPEYybw8/awfWnQEAiBQIwj9I0zKbRku+OCHcuSngx8gQACACXFgQ1tbW8nyY50OMkjaOlpS4hfHfB1z+YFcDAABtrKVBePLkyUGDBiUmJkZFRb3++utna1ZXVzdw4MDOnTu3UnnBoZbQZ9dKohU05Et/uSfY1QAAQFtqaRA++uijmZmZFRUV69evf+SRR44ePdpssxdeeEEul1dVVbVehSR43XxtTSsesCUkLC3N5YYnMDmf+4842vmLAwBA+2lREDocjk8++WT69Oksy/bo0eO666575513zmx24MCB1atXz5gxo3VLdO3YUL5oRvtnIUM063JuRjY74nvprsp2/uIAANBOWhSEhYWFgiCkp6eLL7t161ZQUNCkDc/z06ZNe/XVV+Vy+XkPWFdXZ/9ddXX1uRurrxihyOxXvugJ3hWEodmdXdh5ffwjvuG/KcIUQwCAMCRpSaOqqiqVSsUwjPhSq9VWVjYdIs2bN69nz56DBg3auHHjeQ/4/fffp6WliduJiYlbtmw5d3v2yvGsx1OyYLrm9lmMUt2SmlvR1Qb3yhzllI2Kf/asy0sKn5kV4q1PDd/WsOF0OoNdQptAv0IL+tVBqFQqjuPO3aZFQRgVFeVwOHieZ1mWiOx2e0xMTOMG5eXlc+fOXbZs2Xfffbd7926fz/fdd9/l5OSoVKpmDzh8+PB33323Zb2opx1/b/Xnyzwr50bdN4eRKy/ovZeI47irzfLvTOzIr5kTXmbW5ef5Ow0VDMOo1erwC0Ii0mq1wS6hTaBfoQX9ChUtOjWamJio0Wh27dolvtyxY0dmZmbjBn6/f+jQoe+9997SpUvXrFnj8XiWLl163nOeF0o/5k5ZQnr5azMFbxCeFpFpYH4aK/nsuPDgTwEeZ0kBAMJFi4JQoVBMnTr1ySefPHHixHvvvbd169bJkycTUX5+/pAhQ4goNjZ21e9mzZql0+lWrVoVGxvbysUyjGH8X6RxqRVLnhJ8QZjWEKuiTaMlB6qEvO8DnvA5RQoAENFaOn1izpw5aWlp11xzzaJFiz799FOLxUJEDMNIpdImLY1G45VXXtnKZTZgGONNf5XEJFUse0ao87XVVzk7rZTWDJPIOBrxtb86CF8fAABaGSMI7X2ab+XKlWvXrr3Qa4R/wPOV777E11ab73yakcpar7TmuVwuuVze+HKrQDRja2BtofDVcC5RHarX2JxOZ1heI3Q4HOF3DYPQr1CDfoWQ0FxrlGVNkx5jVVrbf/4h+Ova/+szRM/346Z1YXM+D+yuxAVDAIAQFppBSEQsa5r8N0ahrHxrjhAIzpKgD3Zn/z2AHf61/78lyEIAgFAVskFI9VkoBPjKt54nPjj3roxPZd+9WnLT9/5VBWG+FjkAQLgK5SAkYjiJ+Y6nhDqv7e3nKUiPxbg6lvl+pORvW/mF+5CFAAChJ7SDkMQsvP0pvram8v2Xqd1v/BF1NzIbRnGv7uMf/CmAk6QAAKEl5IOQiBipzHLXMwF7mf39fwcrC1O0zOYxkl/KhZu+C+yrQhoCAISMcAhCImJkcsu0Z/22YvuqV4KVhWY5fT9S0ieKueZL/9h1gS2liEMAgBAQJkFIRIxMbr7rmbrio1WfLglWDUoJPZHNHpsovSmVueu/gT6f+t86zONsKQBARxY+QUhErEJlue8539F9VZ8uDWIZMpamZLD54yVPX84u3s93Xe2fv5fHkmwAAB1TWAUhEbEKteW+2b4je6o/XxbkShgak8T+NFby5pXcd8V86vt1s3YEqrAqGwBABxNuQUhErFJjufc5z/5tNd9cwipurWeQlVlzneSbEZKCGur0Qd2DPwWKXThbCgDQUYRhEBIRq9ZF/eV5985NNd++F+xa6vUwMW8N5rbfICGi7h/5p2wIHMDNpQAAHUB4BiERsRp91F+fd21b7/jug2DXclqKlpl/BXcoT5qmoyu/8I9Z5/9fGeIQACCYwjYIiYjVGKL+PKf2f187fvgw2LX8gUVBsy7njk6UXhPHTvghMGiNf80JPOsXACA4wjkIiYjTm6P++kLtlrXOzV8Eu5am1BJ6sDt7dILk8Wz22R18r4/9bx3m/VimDQCgfYV5EBIRZ4iK+uvzzvUf1m5ZG+xamiHeXPrLDZJXc7jVR/mM1f75e3lXcB6nAQAQicI/CImIM0Zb7ptd8+17tT99Fexazkq8ufSjodz2CqHTB3WzdgRs3mDXBAAQASIiCIlIYomL+svzNd+869r2Q7BrOZfLLcxbg7nNYyR2L3VdXffgT4ETTlw9BABoQ5EShEQkiYqP+vPc6jVvuHZsCHYt59FJx8y/gvt1nETO0eWf+KdsCPyvTMDtNAAAbSGCgpCIJNEJlnufq/5kiXvnpmDXcn5xKuaFftyRCdIsI3P35kD8yrq7Nwe+OCG4cQURAKD1RFYQEpE0NsVyzz+rPl7syf852LW0iF5Gj2ezu8dJfhor6WNhlhwIxK6sG7POv/QAf8oV7OIAAEKfJNgFBIE0oZN52jO2ZU/L03toh/5JGt8p2BW1SIqWubsrc3dX1u6l74r5NceFGb/UpWmZ0UnMn9LYTAMT7AIBAEJSJAYhEcmSOltn/qf2f99UvD6LM0brrslTZA0IdlEtZZRTXiqbl0oBgfupVFh9lB/+VUDK0ugkZkwSOziWkUTcOB8A4OJFaBASESNXaq66QT1otHvHhqrP/8N89bb2qhtVvYcQGzIxwjE0yMoMsnLzr6B8u/DFCWHWjsDBauHqWHZ0EnNjCquVBrtEAIAOL3KDUMRwElXfa1R9hnr2/Vzz7fs136zU5I5V54xkpLJgl3ZhsoxMlpF5PJs94RS+LhJWH+Uf+CnQN4oZncjelMrEq3HiFACgeZEehPUYRpE1QJE1wFuQ7/h+leP7VeqckZrBN7IKdbAru2BJmvpLiS4/fV/Mf3FCuPzTgEXO5KUxY5LY3hYkIgDAHyAI/0CeliVPe6buZIFjw0clz96mvmKEdshNxITk35JKQmOS2DFJtGgg91Op8EUhP2l9wMvTdfHM6CRmeELInAEGAGhTIfkR39ak8WmmSdP9tlPOjZ+WzL5Llp0rGXITZ4kNdl0XqeFS4ty+tLtS+Py48I+d/B2bAtdYpdcl8jlWtosew0QAiFwIwrOSmGMN4+7TXXezff0nFf9+SNGtj3boBKk1Kdh1XZIeJqaHiXmqF1vsEj467PmumJ7ZGXD5hSti2JxoZmAM0yeKUXDBrhIAoB0hCM+D1RiUQyfoh+Z5fvmuYvET0oR03bUTZSndgl3XpYpTMbd3Cvy1B8swzCkXbavgt5QKM37hd9mErgZmYAzT28JcFcskazBYBIAwhyBskfq5FgNHuXdurHz3JVZj0F2Tp8jsT0w45ESsqv5qIhG5/LSjQtheIXxxQnj054CUpUEx7MAYZpCV6WVm2HDoLgDAHyAILwAjkZ6ea/H1yuq1K7RXj1f1vprY8DmZqJKIFxSZB7sTEVfgEDaXCNsrhLd/4w9VCz1MzCArMzCGGRTDGuXBrhUAoDUgCC/cH+da1Hz9jubKG0Jx6mFLpGmZNC0zJYOIqKaOtpYJm0v5V/L5W9YHktRMb0t9LmYZMVQEgFCFILx44lwLX+Fh56ZPHf+Yqs4ZpbnyBlalCXZdbUUnpWvimWviOSLy83SwWthSKmwuEWbv4r0BoU8UI55E7RvFyMNnhAwA4Q9BeKlkiRmmSdPrTh1z/LC65Lk7VH2vUWUPlKVkhsflw7ORsPVr2dzdlYiowCH8WCr8WCqsPMIXOIRMA5NtZnqamGwz08PEYKU3AOjIEIStQxqbYpo0PVBZVrt1nf3DhXyNXdF9gPKyK+QZPcPylGkT4hnUyelERI462lMp/Fop7LIJKw7z+XbBqmJ6mplsE5Ntomwz7kQFgI4FQdiaOFO0bvhk3fDJgcoyz4Ftzi1rK996Xtapu6pnrqL7AFYZtmdNG9NKKSeGyYk5nXbFLmF7hbC9Qnj9oLDPTjav0N3IZBmZTAPT28L0tjBK/DMEgODBJ1Cb4EzR6pyR6pyRvMvhyf/Znf9z1cevSazJqp65yuxBnMES7ALbVZyKiUtixvy+FEGVj/ZW1kfj27/xB6qEZA2TZWQyjdTbwvSLYmOUQS0XACIMgrBtsSqtqu81qr7XCHU+76Ed7vytjnn3s2qtsueViqz+ssSMYBcYBAZZ/QwN8WUdT4eqhe0Vwr4qYekB/s5NAfECZMN4sauB4XAyFQDaDIKwnTBSmTjpgm76q/fYfk/+z5VvzSU+oMjqr+w+QJ7eI5wmI14Q6e/33TTsOeYQdlUKv9ro0+PCMzv4Mo/Q3chkm5huBqabkemqpyRcZQSA1oMgbHcsK0/Lkqdl6cfcUVdy3JP/c/VX7/jLChWZfZVZAxSZfRmZItglBlmKlknRMjck17+s9tHuSmGvXdhnF74o5A9UUZVP6KJnuhqYTAPTRU/djEy6jpHhcRoAcFEQhMEktSZLrcnaoX8Sb66p/eU7+/svyzpdFlE315yXXka5VibXenoU6A3QbzXCvioh3y58UEAFDv5gtRCtYNJ0lGlgOim5zCgh28xERfr/KACgRRCEHQJurrkgcq7+bGpeav0eP08naoWCGsq3C7vK2VWFgd2VgoSlTAOTZWTStEymkbKMTKo2rGd3AsBFQRB2LKdvrvF5PQe2u/f+VLNupcRkVWT2kaVmyVK6sQpVsGvsiCSsOJeRrolnHI46rVZBRCecwsFq2l8l7K8SvikS9lUJngB10TOZRqarnulqoEwDk6plJDinChDZEIQdFCOTK3vkKHvkEM97j+zxHt7l+O59X+FvEkusPDVLlpYlT8viDFHBLrNDS9IwSRq6Nv70INDupQPVwv4q4UCVsOygsL+KimqFeBXTSUdpWqaTjknTUicdk6ZjdFgNByBiIAg7PJaVZ2TLM7KJiPhAXVmRryDfs/+X6s/fID4gTcyQp2XJUrNkyV0YDt/N8zDK6Ypo5oro09FYx1NhrVBQQwUOocAh/FJOBQ7+cLXA1Q8xmTTd6Y1kDSZyAIQhfHSGFJYT769R54wkokC1zXd0n7dgb/Unr9WVFUnj0uRpWfK0TFmny1iFOti1hgbp7+dUif4QcXZvfTQW1ND2CmH1Ub6ghopdQpzqD9GYpmW66BkNho8AoSwEgvC/hT9tPPHTvb2mWlTmYNfSsXB6s7JnrrJnLhHxHlfdiYPegnzHxs98K+ZKTNGytO7y1Ex5eg/OGB3sSkOPUU695Uxvyx/S0e2nIw6hoEY44qCCGmFTiXCkho45BYuC6aSlNB2Tpq0/y5qsYWJxMRcgRIRAEPaNvfw3+7Hbv3xgXOfRk7LGy7jwX8P6IrAKlbxzL3nnXkQkBPx1xUd9Bfnu/J+rPl3KcBJZWpZ4ZVGWkB7ej8VoU0oJdTcy3f/48EVeoKJaocBBR2qEAoew5gQdqeFPOIUqHyWomUR1/aXKRDWTqGaSNJSkweM4ADqWEAhChUR+e4+bR6dfu3TX25M+v/eu7MnD0oYEu6gOjeEkssQMWWKG5qobiMhvO+UryPce3Vf705eBqgppUhdxRr8QlUyEM6iXimXqo25w7B8C0sdTUa1QXEun3PXzOr47KRQ4hN9qBG+AxFOssUomTl1/ojVWRalaRhUCP5EA4SZkfuyiVJaZOQ/vLN2zYNvrXxV8/0Cfu9MMyed/GxBJzLESc6yq7zUkXlYsyPceza9e8x9faaE7JkkalyqNTZbGpUrjUlmNIdjFhg/ZWa4+EpHbX5+OBQ5BfDSHeA2ysFbQSEkMxThV/WVIMSxTsKocQJsJmSAU9Yq5bNnI+d8eXf/YD3/Pie83reeterku2EWFEk5vVva6UtnrSiJy2m3S6jL/qWN1p46583+uO1lALCcmYn00xiQzMnmwSw5DSknzGckLdMolHHdSYa1QVEsnnMJPZVRUyxfWCtU+SlDJ49T+eDVjVVKcirGqKE7FxKooVskY8V0CuAQhFoRExDLMsLQhOQn93tz93pQ1f7m1+5/GdRnFMpgUfeGkcllyV3lKt4YdvMtZV3K8rvCw7/jB2v+tqzv5G6tQSxMzpNZkqTVJmpghjU4kFn/VbYVlKF7NxKvpzEGkJ0D7S50OVn2yVih108laYXclFbv4EhcVuwRPgGJVTJyKrOLvSiZeTTFKJl5FMUomGo+1Ajin0AtCkVameaDPtBs6j1iwbdnnv319f++7+sb2CnZRIY9VacTLh/Wv+YDfXuYvOe4r/M2d/7Pj+1V+W4nEbG0UjZ05nSmoJUcKBUfpWkGrZc7MSCLyBsjmFU65qNhV//uPpVTs4sXtMjfpZPWnWM/8PUmNtXUg0oVqEIqSdAkvDpn148mt87YuStUnPdj3HqsaUwVaD8uJ1xcVWQPEHYLXXVdW5C85XldywvnjV3Un5guBOklMsiwxXWpNlliTZQmd8PSM9ifnKE7FxKmod3Mx6eOpzC0U1VKpWzhZS6VuYY9d+PakUOyiErdQ4SGznKKVTJyKopVMjJJiVUy0gmJVTIySopVYuxzCX2gHoSgnvl9va8+PDq659+tHR6RdM+WyCUoJfnbbBCNXivejNuwJVFXUnTpWV3zUW7DXuWWtv6yQM0RJrUmSqHhJdIIkOkEanchq9EGsGWQsJaiZhObOuIrsXip2CXYvnXLX3+a6s4LsvuYHlEZ5/bVJ8XejnGJVmJEDoS0EgrD6SG3VQWf81RaJ8qyPrpVzslsyx1+XMnjJrrduXfPnadmTr0u7mjnLjz20Is5g4QwWRbc+9a/5QF3ZSX9Zob+syFeQX/u/r/1lJ4kESVSCJCZRGp1QH5BR8YwEk+k6CqOcjHLxh6X5AWW5WzjlplI3lbmFYhcVu4QdNipx8WUeKq4VfDzFKJlYFUUrGauSrEqKVjKxKrIomChF/e8AHVkIBKHSIivb5t8+53DC1ZbYQSZWetYLGhaVeWbOw/tth17ZtvTLI9/d32daujH1bI2hTbCc1JoktSY13se7nH7bqYCtpK7kuGvnRnGDVWok1mSJJVZitkqtSRJrssRkxWT/DkjGNtzCQ80mpSdApW6h2HU6L/dXCd8XU7mHr/BQmVuo9pGYiNFKilEyjbbJImdUPJMqI9z4CkEUAkEo00szJsS7y7zHvyornnM48dqomP5Ghj3rJ2Y3c+eF17347dH109fP6mPt+efL7zAqcGoumFiVRqbKoMQMJeXW7+IDfntZwFbirzhVV3Lcc3BnwFYSqLFJzFaJNVlijhU3ZPFpjBy3PHZ0Co6SNUyyhs526pUanX21++pv59lnpy0ldMrNFzlk1f66UjfpZfWnXhvOvhplp8/EGuVkVZ7j5x7g4l1AEAqCsHXr1tLS0pycHIulmUfFOp3Obdu2uVyuXr16xcbGtl6RRETKaHnXqYnOE+5ja0tPbrQlj4i29NCf7edOnGIxMKH/u/kf3r72/j91uz6v6/VSNgRSP1L8fhuOuCaciPfU+stO+suK/KWFnkM7/Vu+8JcVsWqdJCpBGp0giY6XWGI5c6zEHMtIscxeiDnH2VeHw6HVar0BqvAI5R4qcddvlLuF3ZVCuYcqPHyZh0rdQh1PFgVjkZNFQdFKxiwni4IxK8gip2glY1GQRUFmOSM/6yUUgOa1NBsEQZgwYcKePXsyMzPvvPPOzz77LCcnp3GDH3/8ceTIkT169NDpdJs2bZo3b960adNavVxNkrL7fSlVh5zHvigtWl+RMspqyDjrImEamfqeXlOHpQ1ZsP31dQXr7+8zrbc1uxWL4QXB5raV1JaV1JaVOstLastKa8ulnGRq94ldzOmt+IUiBKtQy5I6y5I6n94lCAF7eV1Zob/8pL/8pOfgTr+tJFBZwio1nDlWYrGKY0dO/F2PNdlDmJw7zwlYIvIEqMIjVHiozFO/YfMI+6uowkNlbt7mpQqPYPOQnKMoBROlJDEpxXSMVpJFQRY5Y1bU78TYEhowgiC0pN2GDRsmTZq0f/9+nU738ssvf/bZZxs2bGjc4NSpU0QkDgTXrFkzceLE6upqiaSZoF25cuXatWvffffdSypcoIrd1ce/LJUbZSmjYzQJ5zmB9t/C/y3a8Z9OxtQ/X357nMZ6QV+q2llTE3BUeCpLastKnGUltaWlteWlteXlLpterrVqomPU0VZ1dIw6yqqOLqkte2vvqh5RmXdlT47XtvKwuHU5nU61Ws2E4GW5houOftspv63EX3FKPLPK6cyc2SroLUprosQcy5mt0uiEsDm5Ko6cgl1F62v1ftXUUZlbqPBQhYdsXqHCQ+UeodxNYlKKO20esigaBpSMWVF/FVNMTbOCzHKKUlzSkj34foWQlgbhQw895HK5li5dSkQlJSWxsbE2m81kan4y9YEDB7p3715TU6NSNfMomtYJQiIiEgJC6Vb7iXXluhRV8sgYZdS5zpj5+cCnh75csff9a1MG35U9WSVt+vlYF6grd9tOOUsrXDabx17sKCl2lpxylla4K7UytUVpjtXExGmscVqrWWk0K03JugRFc/M0PH7vx4e++GDfp7mJA27vcYtZabz0nraF0A3CZgn+ukB1RcBW4iw6Kqmtqr89p7SQkUjEUKy/9GiJ5czWULwxJyw/gChI/QoIZPNQhVeweerPxFZ4qMIj2Lxk8wg2L4nx6ayjRtHIWH7Py4aXZjlFKRlDcx88+H6FkJYGYV5eXo8ePf7v//5PfCmXy7dt23bZZZc12/jOO+90OByrVq1q9k9Xrlz5+uuv33vvveJLtVo9YsSIC6/8NN7Hn9pSeWqT3dRdm3CtRaY71/neCpft9d1v7yrLv6XbuIAQKHNVlNaWl9SWlbnKa+vcVnVUtCoqRh0Vo4qKUUfHaqKjVVEqQaFUKDnuwq48VHtrVu776Jtj62/MGJXXZeyZuRt0YRaEDf7wgyoIgWpboLLEbyvhK0v8Faf8tlMBW6ngdXEmq3h+lTNGc6YYiTGGNUazKk1Qaz+XsPwAoo7drzqebF6q9NYPLsW8rPTWDy5tXrJ5mXK34AqQWU5mOWOWC2ZFfV6qBW+cXm6Sk1nOGGWCWcGY5MSF/k9bR/5+NYtl2fN+yrX0GqHP55NKT0/8kkqlXq+32Zbz58/ftGnTli1bznG04uLihpi0WCxDhlzqY5UsOVpjL3XJpqpd845YeuusVxk4RfOzLLSc5pFe9+2vPPT5kW+0Mk20ytLJmhKtiopRWYyK5p+94HK5WIa90CBUkPyOzFtGpVz71r4Pbl3754mdbxyROrRD3bDj9XolEkn4BaHP5/vDP06lluK1XHwGR9TwL1jwunl7WaCyhLeX+cqL+UO7AlXlfGUZEbHGKM4YzYq/DNHiNqMM/vOqmvYrXHTwfhlZMiqp0zn/H1vHU6WPsXnJ5qFKH5V7mEofU+jk9zkCdh9VeplKL1X6yO5jtBLBomCMMsEkF4wyMsvJKCOTXDDJySQjo0wQo1QradH4JCg6+PfrTAqF4ryfci0dEd5zzz1arfall14iotraWo1GU1RUFB8f36TZkiVL5s6du2HDhuTksz4jqRVPjZ7JW1VX+G25bU9N/GBzXK75HJMOW87lcsnl8gsNwsaOVRe+uXvlftvhW7vnjep0HdsxsiciRoQXSKjzBWps4rwOv62k4UqkUOcTL0CKEx/Fc62czsTpTO12ijXk/ifeQhHVL5uXKj2COMq0eYVKD9m8gjjErPQIFWKUegV3gMxyMskZs4JMcsYsJ5OczArGIieTnAxyxiirvxG32bOy7d+vUNfSAcrAgQPnz58vbq9fv75Tp07ifTE8zzMMI36YLl++fPbs2T/88MM5UrCtyQ3S9Ly4+KvMx78q296CSYftI0Wf+Ezu43vLD7y2c/lHB7+4u+eUnPi+wS0JmsVIZWfO66AzAtJVuClgKwlU23i3s0lAsjoTpzeF4jVIaAfiGdTflyg8678QH0+VXrJ5hPq89JLNQzavUFBDlV6q8vF2L9l9ZPcK1T4yyusnXBpkZJQzRjkZZafD0iCr3yOmJhZYb1ZLR4Rut7t79+5Dhw4dMGDAs88++8QTT9xzzz1ENHr06J49e/7zn//cvHnzVVdddcMNN2Rk1H+XH3vssWanG7bpiLAxxwn38bUl3mr/uScdnteljwgb+/Hk1kU73oxWWe7pNbWLKZizLDAivHS82xmoLPNXlgYqS/2VJfXb9lLB75eYojlDFGeI4oxREmO0uMEZoi56EmRY/k+c0K9LIxDZvWT3ClU+EtcrsHvJ7qUqccNHVV7B7jvdRiWpT8Qm8WmQkUFOeikZ5IxBRnoZGWSMPmJuAmrpiFCpVP7444+LFi3atm3b/Pnzr7/+enH/HXfcER0dTUQWi2X27NmN38IG+8F12iRl9/tSxUmHJ9dXJJ9z0mG7yYnv1z+u95dHvntywz+7R3W7u+eUDj7LAs6BVWrYeI00Pq3J/oYRZKDaFqip9B7b76/4ga+x+e3lDMdxOhOrM0ssseLJ1fobWY3RxGIqOFwYhsgkJ5OcabTjXGrqGqWmV7D7qMpLdp9wqJqqfFTlE6p9VOWjah9VFuBfPgAAIABJREFUeYWauvoxpV5GBhnpZYxBRkqSRGsCehnTODgNMjLIGb00VEecLR0RtqJ2GxGeJlDF7upja0sVphZNOmyidUeEDcRZFu/v++TKxCvu6HGLqd1nWWBEGBS8yxmosfE1leJZ1kCNLVBdGbCV+O1lrEJ5+uqj3tzkRGsH79dFQ786LIHqY/L3dBSqvFTm8LoZebVPqM9Ln1Dloypv/YZK0jg4SS9jdFIyyEkvY/RS0slIJyWdjNH/PujUdYzs7EA3MbYhhizZenN3XelW+75lx3Wp6pRRMQpLkJfpUkjkt2SOH93puvf2fTx17V9Hd7pucvc8tbSZmZcQTliVhlVpyJrc9DJkwM9XV/qrygP20kBVRcBe7i3YF6gqC1RVCD4PZ4ohjbHOFC0OIlmdkdObOa2R05vxAEhoI8wf1sYTd5DD4ddqzzqWcNT9Pqz0UnUdVfuEmt/zsqiWqn1U46OaOl7csPuEGh/JOdLLSCdldI1OyeqkpPs9R3UyMsrq/1Rspm7t4IqMICQiIoZjrFeYonobijfafp1/xJytj+5t0KWogrvUkk6uvafX1Bs6j3x776pJn9/7p27X53UZK+XwiKKIw3ASzhTNmaKJspr8keDz+u2lzlOF8jp3oNrmrywJHNvHV1cGHPZAdQUxbEMostqGgDSxOhOnNbJqXVC6A5FJKyWtlEk8fQ3q/B+vtf76vKypq0/H3/NSKHQK1T6qqaMqH1/tE3NUGJ3Evp7byufnIigIRZyMTbw2yppjOrXFduTjYl+135SlNXfXGTqrW2WuxcWJUUc91v8v47uOWb77vUlr7utQsywg6BiZXBqTJFEZVc2dahPqfLzLEaip5MWzrDWV3qP5gWobX1Mp3tfKKjWc3szqTKxKw+nM4nnX+pOuuDAJwaaWkFrCxJ0+FxaEz72IC0KRVM0lXReddF20115nP+Ao+any0LtFujSVKUtr6q6TaYPz15KqT3om9/E95fuX7Fy+5rdv7uk5tXVXCYewxEhlnN7M6c1EGWf+qVDn4x1VgeqKgLMqUFXBO6vqio96Du4IVNt4h52vrWHVOlat43QmVmNgNXpOa2C1Rk6jZzV6cSee9QFhL0KDsIHcKLVeYbJeYfK7A1WHnJX5jmNflKqscnGYqIwOwtNCL4vqtuC6uRtP/PivrYtj1FH39rqts6lT+5cB4YGRyn4/49ocng847LyzOlBTyTurA7XVfE1lXVkR76ziHdUBRyXvrCZOwmmNrNbAafSs1shpDaxaz2oNnNbEavRiZLZvnwBaWaQHYQOJkrNk6y3Zet4v1BTUVuY79iw6KlFypiydqpNUntGuicgQMzhp4KCEAWt++/rxDc/2jsm+OWtcJ0NKe9YAEYFlxdHkmTNAGvAeF19TGXBW87X1eekvPxk4spd3VgWc1bzDzntc9SNIrZHVGlhNfWSyah2n1rEaPavWsarQvn8SwhuCsClWwhg6awydNWk3xDpPuivzHUVrbUdryoxdNZZsvbGrhmmvdXMlLHdj51HD04au2v/pzI3P8QI/IK7PgPjel8f0aPapFwBtgVWoWIVKEp1wtgZCwM87q5sMK/1lRYHaGl785azmvS5OrWfVWlb8XWPg1HpWo2NVOj8n9VliWI2BU+vC5plZEFoiYx7hpXG5XIKTqT7gqtzncJ5w69JUlp56U3etRNGudxkUO0u2l/z6Y9HWHaV7MoypOQn9cuL7pegTL/qAmEcYWkK6X0LAz9c6+Npq8feAs6r+pbPGV1PJemoDzmq+toYEnlVpxUEkp9Gzar14CZNV6ziNoX5bpWVkQbhmcaFC+vt1DmHZL4wIW0RhlqmvVMZdaa6rDdj3OyrzHQUfn1LFyi3ZenMPndzQHrMd4jTWuHTrmPRhHr83v+LAlqKt0394WsJKeluzcxL69rX2wqQL6LAYTsLpjJyumVUjGn+wCnU+vraGr60OOKr43weU/tJCvrYm4KzindW8y8G7HCQIrEpb/0utZVVaVqlh1NrTOxv+SBH8xaSg40MQXhipmovuY4juY+Dr+KpDtZX7HIUvH5GqOEtPvSlLe6Fr1lwchUTe25rd25r9QJ9pR6tP/HTylw8PrPnnln91NWfkxPfLTRwQo45qhzIAWh0jlXEGC2ewnPv/dOKMkT/8qnXwLkddVQXvcvC1NeJOweXkfZ4/pqPmjARt2FBjJknEQhBeJFbKmrK0pixtp/Gx1UdclXtr9r95guEYU6ZW30mtS1VJNe3xd5uqT0rVJ92SOb7G59hRsnvbqV0r930o5+RXxPcdmNCvR3RWh3oCIkCraDRj5Hz4QENM/v7Lybsc/vKT/PHTCcq7nbzbyUjlrFLNKjWsUs0otayqYVvDKtWsUiv+KSO26cDPcIYLhU/JS8WwjCFDbchQp90YW3vSYz/gKP3Zfvj9kzKdRJem1qWp9GlqubHNT1rqZNrBSQMHJw3khT8fth/ZXvLrm7tXHqk61jO6e05Cvyvi+1qUprauAaDDYTlWY2A1zT9zuwnB6+bdTt5dy7udvMsp/L4dqKr4//bOPDau6vz75+7brJ7xeI8Tx84GNBATCPsW07c0gVAadgWIBLSAkJCA0iKqRtBKICgUaIuoaAlEQKsWUkIIET+2AGEpJouTpjFJvC/jmbFnu/ty3j/O+GawHSdOHI+X81E0Ove5Z+zn5nrme59znvMcq6fVUbLorGtHckgMaicpeAYl00MKHosg9WCY5EWCF5GI4p25Ji1YCMcTqYKXKnL5nFrCSDZnk/uzbe9GoQ19cyTfHNFXI3oqhJNaOYEkiPlFtfOLam9cdE1ST+2M7vm88+sXd7xS5ilBs4mnhBfimjUYzHAITqA4gQoc68yCo2QdNQORcA6+QhRuqlkjk7JM3dEVqCnoFMHyJC8SvEQKIslLBC+SopfkRYITSUEieJHkJZIXc8rKi4QgERT+ip4I8P/yyYIPsWipPgDASFvpFjnZLPd+0a+nTO8sMTBP8s2RvLOEk7oYI8D5UZhoOXZT7L9fdn3z5Fd/TOnps8vrl5Wfucg3TwJHTCVwIJRNebhdMVUb2kOMhm3qto7aBCCCvD/IB2k844KZ1uTqpx+B4dmVUFcdVXY0BWqyoyk5BdUUqMlWvDtnQWeVrKPJUFMASeYkkxdz4SYvEpxAsHxOO1mB4ASSFwhBQm2C40kBD9uODSyEEwHro9FqfQCAmbEy7Wq6RTn0Vo8a06VyHgWL/rkSxZ+sYqc0SZ1RctoZJaf9fMltvXLfl13fbG358PHoszRJAwBMx9QsfchbSIKQmBFkUmQEihiqcAzF8FQuox0C2K8lk1rSy3qDvD8shor4QFgoKhKCIaEoJBSFhGBIKOIoXLgLM7PIRZxjeQs0DaSIjiYfFk5dg7rqKFmnPwp1Feqao6tQzTq6mjvUZIITSA5ppEjwEsnxBDoUPAQnoMOcxHI8wQoEL5C8RHACQc/E5HMshBMN46VRlg0AJbbuZNqUdIvS/Wli/6sdQoRDw6eBWg8tnaxwqlSKrJp3xap5VwykkxRHEQTBkAxPj//CrAEtOaClYkp8QEvFlURnpmd3338Tan9CHUio/TRJh8VQkA8UC6GgECgWioJCoFgIBflAWAzh7agwGIDSghgWeMe8WamjKVBXoaHlAk1dhYYKdc1Rs1BTLTkFddXRFKgph+VTzUJDg45DIlFkXe2UCJYnWZ7gRVKQDEjIXn9u7pPjCWRH/adsWVoshIWE4khUxQYAAG0o92jJ5mzff5IH/tFNi1RgngdFiicp14YhaYk9iQvqg3wgyAdqAtUjnpVNJa4k+rVkXE0MqMmYkmjuPxRTEwNaMq4kLMdyY8cQiib5gIf1+Divl/X4WI+H9ZwM8cZgpgckLwL+eJ4moW0dFkVDQwlEUNccQ4e64qiynU0b8a7c3KeuQUOFmupoMjQ0aFs5gWSRQEokLxAMT3A8KUgEw6G8XILjCYZzx3gJliu4jmIhnCwQFOGpFDyVArgUQAfKnVqqRU40pVv+3UPxlLdalMp5TwUvlfNMgTbHGF8kRpT8YvURKuPotpFQ+93wsV8d2JXuyhjZtJHNGNmMnskYWQdCH+fxsh4v6/WyHrfNQqbYFxq0S0g7SWISbIONwUx6CIomxjj3eRjHcdAoLhJRTXE0BRo6NDRHlaGpO3LKSvRAXYOm5mgq1BRo6tDQHTULTR1aZk4vUVYRJxAMR3A8yUsEyxEMR4oeguGY0llDNrU+cabDV+r0gyAJzyzBM0uouAgACJQ+Pduuyt1q54fZbJdKUoQ4KIpSuSBEWIKcblmgHMWWe0rLPaWj9NFtwxXFjCGnjUzGyGb0bJ8c+y5zCLUzRjZtZLJGVqAFL+vxDsaUbiMihmuCs2f5KoZPfGIwmLFBkqMnEB0FCB1VhoYGTX1waBeJaBaaBjQ0R8lCI05w/LiPBWEhnPQQQCzhxBIOgNxaKCNtKb2a0qsP/C/b8X5MSxh8iPVUCWIpJ5bw3mphYtbyFxyOYjmhaPj6yBGfWLOGjEQRqSMKLlN6en//gb81vR5T4tX+qrpgTV2wZm5wztzAbJHB1Z8xmImFIEjRAwpRqWBGfGNOM1gfzfpyM4sAAGhDNWZkO9Vsh5psTmRfV0ma8FQKYikvlnKeSkEs4Qqx5/PkwsNKHlYqAyUjnjUdqzPT3Zw4sL//4P+1fvLdwCGJEecX1c4rqp0fmjvbP2v02BSDwUxpsBBOeQiKEEs5sZSLnHk4ZMx2qNlOtX9vpvODmJ4yhTCHRNFTJUgVPMXiCbPvwZA0Klb3w5pLAQAOdNrTXQcGDh0YaPnX/ncO9LcAAGqL5tQFa2qDc2qDNbN8FVN30jFjZGVTyRrZrCFnTcVyrIgYrvSW+7jptqUABnOMYCGchrA+d4UGAABYqi13aXK3Jndrfd8klajOBRmpnGeLSV+5zYc5voihx7a6aZpDEuRsf9Vsf9Xy2RchS1xJHEi2HOhv+bTjy7/tfj2u9s9GQ6lFNXMDc+YGZwsF2iHSgTBrZtHAr2woWVPOGnLu1ZCzpiwbeRZTzhqyh5UkRvKykoeRJFakSToqxzoz3SQgK31lld7ySm9Fpa+80ltW6S2f3utYZFNpS3W0pTtpkioRi8NiqFgI4V1cZiBYCKc/tED5ayV/bW51PLSh0qfL3VqyNR37NqUlDL3fBATgixiuiOWLWD7EcEUsH2L5IoZkpmrcM76ExVBYDC0rPxMdKqZ6MNl6cKCluf/g5oPvt6U6isUwChbrgnNqgzUB3qeYKuosm4oDHZBXfwdCKJvK4I9S7GFns4M1fVRTsxwLAGBBO62kHdLJGnLGkOVBYVNNzcNKXtbjYSWJET2sx8NKHkb0sFK5p1RiRQ/SPFaSGAn1PNI1JvVUZ7qnI9PVmene1r69M9PTmekWaK7SW1HlK6/w5qSx0ls+RReuqJbWlupoSba1pjoOpdraUh1pPVPtr6r2V9mOHZVjcTURVxIe1lMshsJCqEQqDouhiBiKiOGQUBSRinEViOkKFsIZB0ERUhkvlfHifNrdmNdSbSNlGWlTSxhawkwdVLSEocZ0giT4EDv4j2F9DOunxQhHzuzBVZERTiteeFrxQnRoQzs3lNrf8vd9G1tS7Skt7abbiIxIESQAgKUYjuIAAARBuJHW8LMAAFeueJoXGB4AQBNUhA35JZ8nF8lJHgbJ27hFbAHOHyj2n1q8IN8YV/s7011IFP+vdVtnprsr0+PnfDlR9JUjaazwlE62QEqz9LZ0R2uyvSXV3ppqb011DGipal/l7MCs2f6qn5SsmO2vKvVEiGHz5/3qQExNxJV+JI3/6enok2MxNRFTEjzNFQuhiBQuFsNIKYvFUFgoKpGK+QINCWDGBSyEGAAAoAWKFiixdOiTvqXaWsJA/7KdmpZIawlDHzApjswXSNTggsz0W8hxLFAEhaYYG2ZffPJ+S0F2Bg8LRWGh6PSS01yLA2FMiSFp7Ex374w2dWZ6onIsJAQrvGWV3vIqb0Wlr6xUKhEZQWLEiRlcNWyjLd3ZmmpHyteSbE+o/bN8ldX+qjmB6hW1P6wJVJdKJcdSbr5ICBYJwfkjbdaS1FNxpb9PiceUeFzp3xHdHVMSMSXRp8QogoqI4WIpXCyEisVwRAqLgPfJvtF/l+3YiqUex/VSBJWf2CwxolsZgyUZLi9kzx8D4Clusj2vTBKwEGJGgxao3DL/PKAD9aSp95tawtD6jfQhpe+bpJYwLMXmgiwfYviiQY0Ms3yIpbgZHT5OM0iCKJEiJVKkvnSxa7Sh3Zvt68rkRla/6m6MKjHFVBVTkU3Fw0oiLUiMKAxKo4eVREaUGEGkBdImw74QsouMeCzyaTpWe7qzdXCcsyXZFlPild7yan9VTaD6/9VcVhOoLveUjntCU4DzBzh/bXDO8FMZIxtXElElFlf6Y0piT2xfNBMDR/v9FEmJ9PEs1LGh7Y69AwBkU4EQorZhG7pt5DvmtjVLNx0TtUmCzP9P9nO+iBSOiOESKRIRwxEpXCIWl0iRKToMPlawEGLGDEESfBHLF7HuvCPCsaDej8JHU0vo6RZFSxhqwqBY0hVFPsQIIZYPsawfP5lOHyiCqvCWVXjLzgJLhp/NGFkkioqlyqaimGrWyMqmKptKTEkklVTTwL6sKSumIpuqK59IDofIZ9aQDyXbonJfmadktn/WnMCs5bMvrAlUV3jLClsSAVVpmJNXULAgEfyx40DHnagGACS1VJ8S71PiUblvb/x/H7XH++R4VO7jaC4iFpdIrkAWe4FUQ1UXCcGpmzg9HCyEmHGDpAkhwgmR0cZXM61qrDGlJQwjZbJ+ZsjgqhDh8NKO6QcSiSOdPZJgZA1ZNhXFyukikk+JkW497foqXyXe5OsEIQky/6Z4WU+Vr2J4t6Se6pNzAhmVY/v7D/Sko3EtkdLTRUJRiVRcKhUjgSwWw2VSJCIVT8VMYyyEmJPOiOOrjgWNlDkokGa8I60lDKVPJyli+OwjX8TimgAzDVQDodBezHTQaPC8ormuBT24mI4VVxKuQB4YaNne+Z+o3BdVYgQgImK41BOJiMVBPuDnfH7O6+d8Ad7n43x+zjcJk2+xEGIKA0nnBC/fCB1opCwtYahxQ0sYye9k7csBLW5ACPkilhYpWqQYkaIFCrVpkT5sFCk8GYnBTAwMSZd5Sso8JQCcMuRU1pD7lHiv3Ncnxwa0VHu6M62nB/RUWs8k9XRaz5AE4ed8PtYb5AM+zouUEjUCnN/P+/ys18/5JjKvBwshZhJBkAQXZLggM2T20VJtfcA0ZdtSbUuxLcWyFBul55iKbSk5u2NCWqRInmA9DJLGPMmkhlhmZoIrBnOyQaH8kfZfAwColpbW00ktndLTKSOd0jJpI92SbE/p6aSeTuvplJ5O6xmapH2cL8j7fZw3wPkGJdM3NzBnyCKfEwcLIWYKgFZ3HLUbtKGl2MlYmiP4Qcm0LcVWY8agWFrIYio2xZGMSNMixXhpRqIYiaIlmvXStEQxEs1IFOOlccEdDGbcEWheoPkSKTJ6N8VUk3oqNaiLKT2T0tMHBlpsx8FCiMEcEYIiGC/NA8brPfp0vaXalmqbWduSLVO2Tdkys3Y6oZhZy0KHGds2HUaiGImmJYr10jQSSIlmvBQj0YyHZiSKlnBwicGMPyIjiIwwMfXusRBiZigoyuRHWjftAm1oyraZtZAumrJlybbSq5kHrEG7bcoWI1I5jfTk1JHiSZIhSZogWZKkCYolCYqgWJJw29zgKxZRDKbQYCHEYI4IQRGsj2Z9o35MIDBly5QHI8uMZWYtS3Ec03IsaBsORK82tHUH2t9v6w50IJJDih98JQhaIAFB0DwJ3LZAGoaZ8muAAGjAlhYoQACapwABKJ4iSEBx6BVLLAYzNrAQYjAnBgFQIAjAcdbgsDUHQmhrDnSgpTkAQksdfAVu27Ypy1IdAICWMAEAlmoDCCxt8NUBtm5DB9iaDR3wPYnlBuPRwUiUZAiSJkmWIGmSZAgUHOcyiQSSGjzEazoxMwQshBhMgaF4EgBw1MSc46hUguQwP/rMj0odAzqW45gwl4LbOZiUqzpoAhXacFAjyVxDoKjDkknRAnk4EZen8FpPzBQFCyEGM21BCyuPO/cVZeEiUbRUx22bWUuL6Yclc9DuqiMSy1wZaCLPATSWi5pk7glA1/W0oLtrQNH48OG2a6cI93LQ5Cse/sWMF1gIMRjMyKAsXMZ7rN8SOckcFEiAykBDYKl2rgcaxUVNB1iKAwCwDWiYlq07ObsND7cdONxuqbZjQcdwLM0mSIJiSYojSYagOAoN/NICRTIEyZC0QKIRYJqnCJqgOJLiKJImKJ5EiUu0QKGcpvH438JMYbAQYjCY8SG33HPURNzhnEhxajTMa2uOYzm27ti6Ay1oabZjQsd0LNV2TGgpjtZvQhPahmPrtmNBW3NQEpOl2o7pOBb8fv5RXo7SyO1cjlJ+vhLNU4D8Xhs68PguCjPxYCHEYDBTFRT/nXjdAxSz5vKPvtfOz1dy20DrN3M9v5+vNKSNAlZaoEiWpFiC4imKJymGJFmSFiiKJUiOzHXIGUmKJdGoL55znUiwEGIwmJnO4Yhw/MhkMh7RYxuOpdiO4dgGtHUbBaMoWrV1x0patuFYKurg2Jpj645tOI7hWKpNsiTFDJdPkhh1LBetwBn1YsnhEoumY0mGJJlcgjHNU4AiaD6XbDy9x5CxEGIwGMxJ4QQDVqSItu5YmuMgddRslPc72rs0x92kd0RQUDsENMjsmI5j5pKKLdUGDrS0XMoxGkOmOBIQgBFpggJowpVkSbQaBy2EpQUKpUGhlTkAgOHK7Z5ycTOhDlvIoRY36wqtBRrlAo8DLIQYDAYzGaFYkmJJ5og7ORYAW3cy6QxPCiiPCWUtDQpnbsErtIGlOI5lOSYEAKDp2PwfgjQ135KfIZWzOEMt7qh1eLGv9toRtk48EbAQYjAYDOaYoDiS4kneO+k2FDxBcOUIDAaDwcxosBBiMBgMZkaDhRCDwWAwMxoshEcnFotls9lCezH+RKNRRVEK7cX409PTo2laob0Yf7q6ugzDKLQX409nZ6dlWYX2Yvxpb293HOfo/aYara2thXZh/MFCeHQefPDBTZs2FdqL8efuu+/++OOPC+3F+HPLLbc0NjYW2ovx5yc/+cl3331XaC/Gn4aGht7e3kJ7Mf6cffbZ0+9B07btM844o9BejD9YCDEYDAYzo8FCiMFgMJgZDRZCDAaDwcxoiNGL8ZwMvv7661tvvVXX9Qn+vcdNOp3mOI7jjnP/8UlLKpUSBIFlp9va2GQyKUkSwzCFdmScGRgY8Pl8FDWe9TAnA4lEIhgMkuR0eyiPx+OhUGj0sp9TkVgsVlxcXGgvxsA777yzcOHC0fsUQAgBAB0dHaZpTvzvxWAwGMyMorKy8qiP+4URQgwGg8FgJgnTbTgCg8FgMJgxgYUQg8FgMDMaLIQYDAaDmdFgIcRgMBjMjAbvR3iYVCq1efPmXbt2iaK4cuXKJUuWDO+zYcMGt2zS7NmzL7/88on18XjYu3fv559/7h7+9Kc/LSoqGtJHUZS//OUvbW1ty5YtW7169ZTI+d66dWtbW5t76PP5rr/++vwOmqa98sor7uGSJUvOPPPMifNvLOi6vmvXrr1794bD4ZUrV7p227ZfeeWVpqamBQsWrF27lqZH+MB+9tlnGzduDAaDa9euLSsrm0Cvj45pmk1NTbt37/Z6vddccw0y2rb9ySeffPbZZ9lsdunSpddcc83wtROffvrpvn373MM77rhj4pw+BmRZ3rFjx/79+2tqai655BJkVFX11VdfdfvU19fX19cPf+/mzZs/+OCD8vLy22+/3e/3T5DHx4aqqjt37ty3b19FRcUPf/hDZGxpaXn//ffzu1111VUlJSX5li1btnR0dKC21+u94YYbJsbh8QIL4WHWrVvX3Nx8wQUXpNPpiy+++KWXXlq9evWQPg8++OAVV1wRDocBADzPF8LNMfPxxx//8Y9/vPLKK9HhiAtXVqxYwfP8ypUrf/Ob3/zvf//79a9/PbE+Hg/RaPTQoUOo/e6771ZXVw8RQlmW77zzzl/84hfocPbs2RPs4bHz1FNP/e1vfxNFMRgM5gvhXXfdtWPHjrVr17766qvbtm3bsGHDkDe+8847t9xyyyOPPLJ///6zzjqrqakpEAhMrO+j8eKLLz711FPBYNC2bVcIt23bdu+9965ataqsrOyRRx55++238/UD8dprr/33v/8955xzAACT8LHs/vvv37Ztm2VZy5Ytc4Uwk8n8/Oc/f+CBB9DhnDlzhr/xT3/60+OPP/7AAw98+umnGzZsaGxsnFQLQ9etW7dx40aKompra10hzGaz7getra3tzTffvPrqq4e88bnnnqNpetGiRQCAYDA4kT6PDxAziKqqbvt3v/vdpZdeOrxPWVnZnj17JtCpceD555+/4YYbRumwffv2cDisaRqEsLGxMRAIyLI8Ud6NA7ZtV1VVvfXWW0Ps8XicIIiCuDRWbNuGEL7wwgsXXXSRa+zu7uY4rrOzE0KYSCR4nj948OCQN5577rkvvPACal966aV/+MMfJsjjYwNd1xtvvLF48WLXqGma4ziovXfvXoIgUqnUkDf+7Gc/e+yxxybMz7GCrutXv/rVmjVrXGM0GqVpepR3WZY1a9asLVu2oJ9QV1e3cePGk+3qmEDX9fjjj1955ZUjdrj//vuvueaa4fYf/ehHGzZsOLnOnUzwHOFh8iM8TdM8Hs+I3V5//fWnn35627ZtE+XXONDS0vLEE0+8/PLLAwMDw89+8sknF154ISqds2TJEpqmd+/ePeE+Hj/vvfeerus//vGPRzz73HPPPf/885P8ikasq7J9+/a6urqKigoAQFFR0RlnnPHZZ5/ldzBN84svvli+fDk6bGho+OTdlB7EAAAHeElEQVSTTybA22NnxOviOM4N8jRNYxhmxLJNjY2NTzzxxN///vdJWIXqSHVwIITPPvvs888/39TUNPxsa2trV1fXZZddhn7CZZddNiXul4tlWRs2bFi7du2IZz/88MMnn3zy3//+91TcfAoL4QgcPHjw2WefdYc48jn77LMNw2hvb1+9evU999wz8b4dB8FgcMGCBel0+o033pg/f/7wrXx6enryayZFIpHu7u6J9fGEeOmll9asWTO8phpJkpdffnlvb+/u3bvPP//8P//5zwVx77jp7e3Nvy8lJSVD7ktvby+EMBKJuB16enom1MUTw7Ks++6777777hsuhJWVlaWlpclk8sknn1yyZEk6nS6Ih2OCJMmGhoZoNLpr167zzjvvhRdeGNKhp6cnEAi4f6jDb+gkZ9OmTRRFuUOm+cybN8/j8cTj8Yceeuiyyy6bchtM4jnCoUSj0RUrVjz88MPnn3/+8LNvvfUWatxzzz0LFiy49957582bN7EOjpkbb7zxxhtvRO01a9Y89thj69evz+/AMEz+xmmmaU6hAqSJRGLz5s3ffvvt8FPBYPC9995D7auuuuq66667/fbbR8w3mZzQNG3btns4/L6gr1T3S2dq3TjHcW677TaWZdetWzf87MMPP4wajz766Nlnn/3iiy/ef//9E+vgmAmHw1u2bEHtlStX3nTTTbfffnv+FCDDMPkKYZrm1Kpg/Ne//vXWW28dcVLzmWeeQY1HHnlkwYIFb7755rXXXjux3p0QOCL8Hn19fZdccsmaNWuO+qmbO3duaWlpS0vLxDg2Xpx77rnuvLdLeXl5V1cXatu2HY1Gy8vLJ9y142T9+vX19fVoln4Uzj33XFmW+/r6JsarcSH/vgAAurq6htyX4uJihmHcPl1dXZMta/RIQAjvuOOOrq6ut99+e3QxoChq2bJlw/9oJznnnXdeNpsd8vdWXl6eTqczmQw6nEL3CwDQ29u7devWW2+9dfRukiSdfvrpU+6LEQvhYeLx+PLly6+//vpf/vKX+fY9e/YcPHgQAKDrOhwszdrY2BiNRo9a1HwyoGkaajiOs3nz5lNPPRUdbt++PRaLAQBWrFixbds29KHdunVrIBBYvHhxobwdK+vXrx8yafHll1+iHc9VVXWNmzZtCoVCpaWlE+3fCXDxxRfHYrEdO3YAAJqbm5ubmxsaGgAAnZ2djY2NAACKoq644op//vOfAADTNDdu3OjmBk9mIIT33HPPvn373n77bUEQXHtvb++XX36J2u69UxTlgw8+OOWUUwrg6BhxP2gAgE2bNoXDYfT3duDAgb179wIAqqqqFi9ejO5XJpPZunXrlLhfiJdffvm8886rra11La2trTt37gQA2LZtGAYy9vX1ffXVV1Pifn2PgqbqTC7uvPNOhmHqB3Hzpq6++uoHHngAQvj+++/PnTv32muvXbVqldfrffzxxwvq77Fy4YUXNjQ03HzzzQsXLly0aFFPTw+yV1RU/OMf/0Dtu+66q66u7rbbbisuLn7ttdcK5+zY+OKLLyRJSqfT+cba2tr169dDCH//+9//4Ac/uPnmmxsaGvx+/7/+9a8CuXl0Pvroo/r6+lmzZnm93vr6+oceegjZn3766bKysrVr11ZVVf32t79Fxmeeeeass85C7W+//TYcDt94443nnHPOBRdcgJ7VJg/ffPNNfX19TU2NIAj19fV33303hPDdd98FACxYsMD9rH333XcQwpdffrmurg69saysbMWKFTfddFNlZWVDQwNKaZ48vP766/X19aWlpaFQqL6+/plnnoEQPvnkk4sXL7755puXL1/u9/vdNOb77rtv9erVqL1ly5ZQKHTLLbeceuqp1157bcEu4Ahs2rSpvr6+oqIiEAjU19c/+uij7qkFCxa8+uqr+Z3XrVu3fPlyCGEsFotEIqtWrbruuuvC4fCaNWvcrOCpAt594jCtra2JRMI95DgOBU8HDhzgOK6qqsq27aampv379/M8v2TJkqqqqsI5Owbi8fjXX3+dSqWqqqqWLVvmTpLt3LmzurraXfTzxRdftLa2Ll26NP+hb5ITjUaTyeT8+fPzjbt3766oqAiFQrquNzY2trW1BQKBpUuXotWfk5NkMolGHRDBYLCmpga1m5qa9uzZs3DhwtNPPx1ZotFoIpFwR4NjsdhHH30UCAQuueSSybYLYyaTaW5udg99Pl9dXd2QiwUALFq0SBCEeDze09Nz2mmnAQDa29t37NihaVpdXd2IpS0KS19fn7t+HABQUlJSWVmp6/o333zT3t4eDAaXLl0aCoXQ2Y6ODsMw5s6d6x5+/vnnZWVlF1544WRbIplIJFpbW93DcDhcXV0NALAsa9euXaeeemr+OHZ3d3cmk0Gfvubm5r1791qWdcoppxx1nmISgoUQg8FgMDMaPEeIwWAwmBkNFkIMBoPBzGiwEGIwGAxmRoOFEIPBYDAzGiyEGAwGg5nRYCHEYDAYzIwGCyEGg8FgZjRYCDEYDAYzo8FCiMFgMJgZDRZCDAaDwcxosBBiMBgMZkbz/wFxdbxW1s736QAAAABJRU5ErkJggg==", - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n" + "\n" ], - "text/html": [ + "image/svg+xml": [ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n" - ], - "text/plain": [ - "Plot{Plots.GRBackend() n=5}" + "\n" ] }, - "execution_count": 15, "metadata": {}, - "output_type": "execute_result" + "execution_count": 15 } ], + "cell_type": "code", "source": [ "foreach(rates) do η\n", " pipe.transformed_target_model_deterministic.model.optimiser.eta = η\n", @@ -848,84 +772,80 @@ " plot!(1:length(losses), losses, label=η)\n", "end\n", "plt" - ] + ], + "metadata": {}, + "execution_count": 15 }, { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "savefig(joinpath(\"assets\", \"learning_rate.png\"))" - ] + ], + "metadata": {}, + "execution_count": 16 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We'll go with the second most conservative rate for now:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "0.0001" - ] + "text/plain": "0.0001" }, - "execution_count": 17, "metadata": {}, - "output_type": "execute_result" + "execution_count": 17 } ], + "cell_type": "code", "source": [ "pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001" - ] + ], + "metadata": {}, + "execution_count": 17 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Wrapping in iteration control" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We want a model that trains until an out-of-sample loss satisfies\n", "the `NumberSinceBest(6)` stopping criterion. We'll add some fallback\n", "stopping criterion `InvalidValue` and `TimeLimit(1/60)`, and\n", "controls to print traces of the losses." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "For initializing or clearing the traces:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "clear (generic function with 1 method)" - ] + "text/plain": "clear (generic function with 1 method)" }, - "execution_count": 18, "metadata": {}, - "output_type": "execute_result" + "execution_count": 18 } ], + "cell_type": "code", "source": [ "clear() = begin\n", " global losses = []\n", @@ -933,71 +853,60 @@ " global epochs = []\n", " return nothing\n", "end" - ] + ], + "metadata": {}, + "execution_count": 18 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "And to update the traces:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "update_epochs (generic function with 1 method)" - ] + "text/plain": "update_epochs (generic function with 1 method)" }, - "execution_count": 19, "metadata": {}, - "output_type": "execute_result" + "execution_count": 19 } ], + "cell_type": "code", "source": [ "update_loss(loss) = push!(losses, loss)\n", "update_training_loss(report) =\n", " push!(training_losses,\n", " report.transformed_target_model_deterministic.model.training_losses[end])\n", "update_epochs(epoch) = push!(epochs, epoch)" - ] + ], + "metadata": {}, + "execution_count": 19 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "The controls to apply (see\n", "[here](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided)\n", "for the complete list):" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "7-element Vector{Any}:\n", - " Step(1)\n", - " NumberSinceBest(6)\n", - " InvalidValue()\n", - " TimeLimit(Dates.Millisecond(60000))\n", - " WithLossDo{typeof(Main.##283.update_loss)}(Main.##283.update_loss, false, nothing)\n", - " WithReportDo{typeof(Main.##283.update_training_loss)}(Main.##283.update_training_loss, false, nothing)\n", - " WithIterationsDo{typeof(Main.##283.update_epochs)}(Main.##283.update_epochs, false, nothing)" - ] + "text/plain": "7-element Vector{Any}:\n IterationControl.Step(1)\n EarlyStopping.NumberSinceBest(6)\n EarlyStopping.InvalidValue()\n EarlyStopping.TimeLimit(Dates.Millisecond(60000))\n IterationControl.WithLossDo{typeof(Main.##291.update_loss)}(Main.##291.update_loss, false, nothing)\n MLJIteration.WithReportDo{typeof(Main.##291.update_training_loss)}(Main.##291.update_training_loss, false, nothing)\n MLJIteration.WithIterationsDo{typeof(Main.##291.update_epochs)}(Main.##291.update_epochs, false, nothing)" }, - "execution_count": 20, "metadata": {}, - "output_type": "execute_result" + "execution_count": 20 } ], + "cell_type": "code", "source": [ "controls=[Step(1),\n", " NumberSinceBest(6),\n", @@ -1006,68 +915,48 @@ " WithLossDo(update_loss),\n", " WithReportDo(update_training_loss),\n", " WithIterationsDo(update_epochs)]" - ] + ], + "metadata": {}, + "execution_count": 20 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Next we create a \"self-iterating\" version of the pipeline. Note\n", "that the iteration parameter is a nested hyperparameter:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { - "text/plain": [ - "DeterministicIteratedModel(\n", - " model = DeterministicPipeline(\n", - " standardizer = Standardizer,\n", - " transformed_target_model_deterministic = TransformedTargetModelDeterministic{NeuralNetworkRegressor{GenericBuilder{#1#2},…}},\n", - " cache = true),\n", - " controls = Any[Step(1), NumberSinceBest(6), InvalidValue(), TimeLimit(Dates.Millisecond(60000)), WithLossDo{typeof(Main.##283.update_loss)}(Main.##283.update_loss, false, nothing), WithReportDo{typeof(Main.##283.update_training_loss)}(Main.##283.update_training_loss, false, nothing), WithIterationsDo{typeof(Main.##283.update_epochs)}(Main.##283.update_epochs, false, nothing)],\n", - " resampling = Holdout(\n", - " fraction_train = 0.8,\n", - " shuffle = false,\n", - " rng = Random._GLOBAL_RNG()),\n", - " measure = LPLoss(p = 2),\n", - " weights = nothing,\n", - " class_weights = nothing,\n", - " operation = MLJModelInterface.predict,\n", - " retrain = false,\n", - " check_measure = true,\n", - " iteration_parameter = nothing,\n", - " cache = true)" - ] + "text/plain": "DeterministicIteratedModel(\n model = DeterministicPipeline(\n standardizer = Standardizer,\n transformed_target_model_deterministic = TransformedTargetModelDeterministic{NeuralNetworkRegressor{GenericBuilder{#1#2},…}},\n cache = true),\n controls = Any[IterationControl.Step(1), EarlyStopping.NumberSinceBest(6), EarlyStopping.InvalidValue(), EarlyStopping.TimeLimit(Dates.Millisecond(60000)), IterationControl.WithLossDo{typeof(Main.##291.update_loss)}(Main.##291.update_loss, false, nothing), MLJIteration.WithReportDo{typeof(Main.##291.update_training_loss)}(Main.##291.update_training_loss, false, nothing), MLJIteration.WithIterationsDo{typeof(Main.##291.update_epochs)}(Main.##291.update_epochs, false, nothing)],\n resampling = Holdout(\n fraction_train = 0.8,\n shuffle = false,\n rng = Random._GLOBAL_RNG()),\n measure = LPLoss(p = 2),\n weights = nothing,\n class_weights = nothing,\n operation = MLJModelInterface.predict,\n retrain = false,\n check_measure = true,\n iteration_parameter = nothing,\n cache = true)" }, - "execution_count": 21, "metadata": {}, - "output_type": "execute_result" + "execution_count": 21 } ], + "cell_type": "code", "source": [ "iterated_pipe =\n", " IteratedModel(model=pipe,\n", " controls=controls,\n", " resampling=Holdout(fraction_train=0.8),\n", " measure = l2)" - ] + ], + "metadata": {}, + "execution_count": 21 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Training the wrapped model on all the train/validation data:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, "outputs": [ { "name": "stdout", @@ -1076,383 +965,376 @@ "[ Info: Training Machine{DeterministicIteratedModel{DeterministicPipeline{NamedTuple{,…},…}},…}.\n", "[ Info: final loss: 18.029846309038444\n", "[ Info: final training loss: 0.15870439154333713\n", - "[ Info: Stop triggered by NumberSinceBest(6) stopping criterion. \n", + "[ Info: Stop triggered by EarlyStopping.NumberSinceBest(6) stopping criterion. \n", "[ Info: Total of 13 iterations. \n" ] }, { + "output_type": "execute_result", "data": { - "text/plain": [ - "Machine{DeterministicIteratedModel{DeterministicPipeline{NamedTuple{,…},…}},…} trained 1 time; does not cache data\n", - " model: MLJIteration.DeterministicIteratedModel{MLJBase.DeterministicPipeline{NamedTuple{(:standardizer, :transformed_target_model_deterministic), Tuple{Unsupervised, Deterministic}}, MLJModelInterface.predict}}\n", - " args: \n", - " 1:\tSource @189 ⏎ `Table{AbstractVector{Continuous}}`\n", - " 2:\tSource @474 ⏎ `AbstractVector{Continuous}`\n" - ] + "text/plain": "Machine{DeterministicIteratedModel{DeterministicPipeline{NamedTuple{,…},…}},…} trained 1 time; does not cache data\n model: MLJIteration.DeterministicIteratedModel{MLJBase.DeterministicPipeline{NamedTuple{(:standardizer, :transformed_target_model_deterministic), Tuple{MLJModelInterface.Unsupervised, MLJModelInterface.Deterministic}}, MLJModelInterface.predict}}\n args: \n 1:\tSource @757 ⏎ `ScientificTypesBase.Table{AbstractVector{ScientificTypesBase.Continuous}}`\n 2:\tSource @577 ⏎ `AbstractVector{ScientificTypesBase.Continuous}`\n" }, - "execution_count": 22, "metadata": {}, - "output_type": "execute_result" + "execution_count": 22 } ], + "cell_type": "code", "source": [ "clear()\n", "mach = machine(iterated_pipe, X, y)\n", "fit!(mach)" - ] + ], + "metadata": {}, + "execution_count": 22 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "And plotting the traces:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, "outputs": [ { + "output_type": "execute_result", "data": { + "text/plain": "Plot{Plots.GRBackend() n=2}", "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQCAIAAAD9V4nPAAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nOzdZ1wUV9sH4DNb6GVXUBAQpEm1ICpoIiIiYO+KGkuwRtGoMbb4PHaNHTX2XmIsRFGjETuaaOwVbKBgQ3qvW+b9MI/7EhwU0Z1t/+tDfrOzszM362bvPXPucw5F0zQBAADQVTxVBwAAAKBKSIQAAKDTkAgBAECnIRECAIBOQyIEAACdhkQIAAA6DYkQAAB0GhIhAADoNCRCAADQaUiEAACg0zQ1ERYXFz98+FDVUXBBLperOgTNI5PJVB2C5pHL5ZhwsQbwYasBzt603NzcpKSkjx6mqYkwISFh8ODBqo5C6WiaLikpUXUUmqe4uFjVIWie8vJyfKfXAD5sNcDZm3bhwoXJkyd/9DBNTYQAAABfBBIhAADoNCRCAADQaUiEAACg05AIAQBAp2l5IpTL5ds3bmzfqFErZ+dJERGZmZmqjggAANSLQNUBKNfsH37I2rp1f0GBKSF/pKR0vnjx3L17RkZGqo4LAADUhTa3CMvKyk78+uuagoJahAgJ6SGT9Xz16sC+faqOCwAA1Ig2J8KkpCQviqr4F/qWlT26dk1lAQEAgPrR5luj9evXf/LvKaPiBQKhWHzz5k1VhfSpaJouLS01NDRUdSDccXZ2FolEqo4CQLlevHiRkZGh6ihUpri4uAZdVHw+v3HjxhRFffF4tDkRGhkZNQsNnfv77zNKSgSEXKGolXp6ZN++P0+fVnVon4CmaWX8w6unt2/fRkREzJ07V9WBAChXv379cnNzjY2NVR2IatTsay0+Pv7u3bsNGjT44vFocyIkhCzfunVF/fpf7d79tkDSqLGXh4EgIiKid+/eqo4L2C1cuLCoqEjVUQAonUwm27VrV/PmzVUdiCbx9PRU0nS42txHSAjR09ObNm/e1eRk/4MvBmw+qbO/vwAAoCpanggVPEVUQi6WmAEAgMp0JRF6iUl8jqqDAAAA9aM7iZCKz0GLEAAAKtOVROhqRr0qouVIhQAA8M6GDRtsbW21vGpUQcAjTqZUkVTVcQAAfAn37t27d++eTCZzdXX18/Pj8/mqjkjz3LlzZ/r06XFxcbrSIiSEeImpIoluNQnPnj27cuXKK1eucHnRK1eunD9/nssrAuiUf/75p4mnZ+PGjb+PiJg2atRXX33lYm9/6NAhjsNYvXr1BwY7ZWdnb9y4kct4amD79u19+vRp1KiRDiVCTzFVqLEtwocPH3777bef9JIdO3ZMnDixvLxcSSFVJTY2NiYmhuOLAuiIc+fOBQYEeDx58pCQLIkktazsFSE9U1P79O69ZcuWGp924sSJn/qLOSoq6sOJcP369TWOhxtPnjzx9vYmutNHSAjxEpFCiaqDYFNSUvLRY/Ly8i5fvvxJ5/n777+HDBkyderUli1bVjqyvLz8/U9wQUEB62nz8/MV26yf+7KyMonkQ+9sdf5AAPiosrKyiG++GSST/SaTub/baUvIcppeStPfR0a+fv26Zme+cePG+1O+lZWVFRcXK7Yr/ap+9uxZnTp1Ku4pLCxUbLu4uNy5c6fiszKZ7P2vgo9+e9TAtGnTqHd69OiRl5dX1ZHZ2dmmpqZEtxKhWO36CE+fPu3i4uLh4WFvb3/w4EFmZ+vWrRU5b82aNRMmTCCE9O7d+/nz587Ozs7OzoqPpsKBAwfs7e09PDxcXV3PnDlDCJkwYcK+ffsWLVrk7OyckJCgOLKoqKhLly7169dv1qyZg4PD27dvmYPt7Ox8fX3r1aunuMHy/fffjxs3ztfX183NrXHjxk+ePOnYsaObm1u9evXu3btHCMnLy6tVq9bcuXO9vLxsbW0nT55M05XvPO/du9fJyalRo0aKwACgxk6dOpWalrZYLn//qQmE1CHk119/rcFpFy1adOPGjVGjRjk7O69atYr5qpk2bZqTk9PYsWNfv37t4+Pj7u7u5OTUrl075kuDEGJnZ5eWlkYI6du37/z58xs3buzq6tq0adPU1FRCyNOnT728vJgj3dzc1qxZ4+bmZmdn17VrVybzlZWVDRkyxN7e3svLa/bs2XXr1q3Ze/K+n3/+mX7n8OHD5ubmVR1paWnJ/NDXlWIZQoiLGVX63uw86SXkXjYXHYe19ElTy3/NrZeVldW3b9/9+/eHhITcuHEjMDCwWbNmjo6O+fn5Uun/MnZpaSmT9qKjo4cMGfL48eP3z/zs2bNhw4ZduHDB19f35MmTffr0SUxMjIqKysnJ8fHxYfKowqFDh6RS6evXrymKysnJYabzHj169MqVKymKunv3bvv27UNDQ42NjYuLi8+fP3/58uXatWv37NkzMDAwNja2YcOGc+bMmTt3bnR0NE3TOTk5ubm5iYmJ2dnZ/v7+rVq16tmzp+JaN2/enDZt2qVLlxwcHG7evNmlS5eHDx9+4EMJAB92586dRnp6tUpL33+KR0jr8vI7t27V4LTTp08/ceLEjz/+2LVrV0JIYmLi8+fPRSLR69ev5XJ5UVHRsWPH7OzsaJqePHny3Llz161bRwjJysqSy+WEkMLCwiNHjly8eNHMzGzgwIGrV69etGiRVCrNzs5mzp+Xl3f16tVHjx5JpVI/P7/Dhw/37dt3/fr1SUlJycnJBgYGw4cPz8lRwUBvNzc35me9DiVCAY8YvffnPs6jF99TyuR1lTS1oJpa/qus6+LFi05OTiEhIYSQZs2atW7dOjY2dvTo0R89lUwmU2REDw+PU6dOBQQE+Pr6EkLCwsLq169/6dKl7t27K45/9uxZaWkpIcTe3t7Gxubu3bvr16/v0qVLvXr1mANsbW137Njx/Pnz8vJymUz29OnTJk2aEELCw8OZWx+BgYFlZWUNGzYkhAQFBSkar4SQiRMnEkJq1ao1ZMiQY8eOVUyEBw4caNas2dOnT58+fUoIMTc3v3XrVtu2bWvy9gEAIcXFxcbv3XdRMKHp11V0cHwqHo83adIkZsPU1PTFixcHDhxIT09PTU1NSkp6//iIiAjmN25ISAhricC4ceMEAoFAIGjTps2jR48IISdPnhwxYgTzWzwyMrJmbdnP9O2337Zu3XrkyJE6lAgJIcbv/bmtranTHVTzJmRnZ1tYWCgeWlpaZmVlVTrm/ZuNhJC8vDxF4cxff/2VlZVV6TyZmZkVj//vf//LpKLly5e3a9duw4YNv/766/Tp05s2bfr7778bGxv7+/u3bdu2devWPB5v9+7dis5CMzMzZkMoFCq29fT0KnYVKC5taWl57d9rPaalpb18+VKRNQMCAhQnAYAaqFev3m6apglhXbjhiVDo4eT0RS5kbm6up6fHbJ8+fXrYsGFTpkzx9fU1NDSs1PPHqOr74f0D9PX1mQPy8/MVO1V1o6hhw4arVq365ptvdCsRmqjTn+vs7Pzw4UOpVCoQCAgh9+7d69SpEyHE3NxccZcgMTGR2RAKhYr7pbVq1bp69WrF80RHRzPbUqk0ISHBxcWl4oX27NlT8WHXrl27du1aUlISGhq6b9++r776qqio6JdffiGE5OXlfeoNivj4eGYG/QcPHjg7O1d8ys3NLT8/X/1LqAE0RYcOHcaPG3eckM7vPfWYkIsy2eROnWp2ZqFQWNXCDkeOHBk9enRkZCQhZO3atTU7//vc3d1v3LjRo0cPQsj169e/1Gk/1ZAhQ4YMGaJOmUH5TISqjqCCgIAAa2vrMWPGDB48OCYmpqioiLmf2bZt28WLF5ubm9+/f//EiRMdO3YkhDg5OWVkZKxbt6527do9evRgcieje/fuP/3009SpU7t06bJ9+3ZbW9uAgICqLhodHZ2VldW4ceOCgoKXL196eHjY2dnl5OTs2rXL0dFx+fLlQuGnvUc//fTTjBkznj9//uuvv1Yqvx49evT69etnzJjRuXPnwsLCU6dO/fTTT2Kx+NPeJgB4x9HRcdTIkRFbtx6XSCou4JRMSC89vdZ+fqGhoTU7s5eX144dO8rLy729vfX19Ss+1aBBg127dgUEBLx69eqXX375UsujTpo0KSgoyMDAQCQS7d27V7WrrupWIjQWqtEKtzwe7+zZs8uWLVuxYoWrq+uVK1cMDAwIIdOnT+fz+VFRUf7+/rt27crNzSWEiMXi2NjYkydPvnjxolu3bhXPY2Bg8M8//zDnadiw4cqVK3k8HiGkU6dONjY2lS7aoEGDbdu2HT9+XCQSrVmzhumxO3r06Pr16ymKioyMDAwMZPoOw8LCLC0tmVc1bdpUsWq8jY3NyJEjFSdcuHBhVFQUj8c7ceKEm5sbIaRly5ZMkbRYLL59+/aqVauWLl1qYmLSqlUrLIMF8JlWrlqVnZXVMjq6E5//lVSqR8gNHu8QRTVt0uRgTEyN08mCBQt27Nhx+/ZtCwsLHx+f77//XvHUd999V1xcvGTJEmdn5z179sTFxTH7x48fb2JiQgjp06ePokDU09OzT58+hBALCwvFF0VkZKSiD6Vt27bMJDje3t6XLl36/fffy8vLlyxZMnjw4JpF/mXQmun69evNmjX71Ff16tX74MGDyohHBzE3UWUy2Rc854IFC2bMmPH558nPz//8k+iakpISiUSi6ig0T80+bM2bN7927VqNLxobGzuwf39vFxdPJ6funTvv3btXKpXW+GwqIZVKnz59StN0eXn50KFDhw0b9tGXeHh4JCQkfNJVDh8+3L17948epqwWYUpKyq1bt3JychwdHdu0acO0UQgh9+7du3v3rrGxcWBgYK1atd5/YVxcnGJ8pZWVFVOp+KWotPENAPBlhISEMAXnmksqlfbs2TM/P18ikXz11VfMkAxVUVYi7Nu3r4ODg7m5+dKlS62srGJjY/X19X/88cfDhw/7+/vn5uYOHz48NjaWqbOoqFevXm5ubkZGRoSQNm3afNlECF+QSCSiq67kBgD4AH19fWYMnzpQViJUlDUWFxfb29v/9ddf7dq1Gzt27OLFi5nW4fjx4xctWsQ6UeyOHTtcXV2VFBgAAEBFSp9irbS0VCqV1q5dmxBSv359xT1Sa2vrqqaYu379+qlTp5jJewAAAJRKiVWjCxYsOHfu3MOHD5cuXdqoUaOKT2VkZKxdu5b1prCFhcXevXtLSkquXbu2fPnyigWKlWRnZy9cuFDxcNKkSR9dkQu38tSfXC7//El4JRLJF5/JV+spKmVUHYiGqdmHDe9zzUil0k96t6saHFmJEhNh165dfX194+Li5s6d26FDBzs7O2Z/YWFhjx49evfuXWkYACMhIYHJZ+fOnevYsWO3bt2srKxYzy+XyxVz2ZHq/cH48Kk/uVxezc/uB8hkss8/ia6RyWTMhP2qDkTD4MPGpU99t+VyeXW+9pWYCBs2bNiwYcOwsLC7d+/u3r17+vTphJDi4uKuXbu6ublFRUWxvkrRqgsKCjIzM3v48GFVidDS0nLZsmWfFBKPx7ty5QrSodp68OCBo6MjM57yc0gkks8/iQ5iZoNUdRQapmYfNoqizp49m5ycrISItFZ+fr6+vv4nvdtCobA6v+2U/qGnaTojI4OZSq68vLxPnz61a9fetGlTxeDy8/MFAgFTKarw/PnzrKwse3v7LxjM5MmTly9f/ue9Fxb6pK6RZvzylclkH73lq03eXz0RQPvMmjVr586dt2q0WIQWqNnXWuvWra2trZURj1IS4ZMnT0aPHh0QECAUCs+dO5eXl9e/f39CyJQpU86ePfvNN9+MGTOGEFK3bt3Zs2cTQvr37+/j4zN//vxTp06tX7/e19e3tLR0586dw4cPd/pCc8gy/Pz8Dhw4sOCOvFBCL2quAdmFpuni4mJMyAKgZTp16tSppvOCaoGCggJmRVw1oZRE6OjoOHHixNu3b5eXl3/77be9e/dmGrNdu3b19PRUHKaYtWvChAnM4PoWLVowy3wYGhpu3749ODhYGeF5isiOp8o4MQAAaB6lJEKhUNilS5cuXbpU2h8UFBQUFPT+8e3bt2c2RCLRkCFDlBFSRV5iKj6HZYlnAADQQUofR6iGnM2o1GK6SKrqOAAAQA3oYiLkU8TVjHqci9pRAADQyURImLujSIQAAKCzidBTTMXnIBECAICuJkIvMUnIUXUQAACgBnQ1EYrQIgQAAEJ0NhE6mVFpJSgcBQAAXU2EfIq4mlOPUC8DAKDzdDQRkv8Nq0ciBADQdbqbCD1FVAJahAAAOk93E6GXmKBFCAAAupwIqXiMoAAA0Hm6mwidTKl0FI4CAOg83U2EPIo0MKceopsQAEC36W4iJCgcBQAAHU+EnmIqAYkQAEC36XQi9BKhcBQAQNfpdiIUU/G5qg4CAABUSqcToaMplVlKF0hUHQcAAKiOTidCpnAUM44CAOgynU6EBBOtAQDoPJZEmJGR4ezsfPr0ae6j4R5GUAAA6DiWRCgUCp89e2ZsbMx9NNzDjKMAADqOJRGKRCJ/f/+zZ89yHw33MOMoAICOE7DunTNnzpAhQyQSSZcuXSwsLCo+5eTkxElgHKlvQmWV0QUSYipUdSgAAMC5R48esSfCwYMHp6WlzZs3b968eZWeommtupGoKBxtXptSdSwAAMAduVzev3//a9eusSfCpUuXlpSUcByTqniJqPgcJEIAAN3y559/Xr9+/d69e+yJcNCgQRwHpEIoHAUA0EEHDx7s06ePqakpeyJkyOXyV69evXr1qm7duvXq1RMIPnSw5vISU+seylUdBQAAfAGJiYnJycnM9t27d6XSKledffHihZ+fH6mqWIYQsmrVqvnz52dmZjIPzczMJk6c+J///IfP53/JkNWAp5gkoHAUAEArXLhwYf/+/cx2RkZGnTp1qjqypKREX1+fVJUIo6KiJk6cGBAQ0Ldv37p162ZmZh49enTu3LnFxcVLlixRRugqhMJRAACtMXz48OHDhzPbMTExO3furOpIa2vrrKwswjqOUC6XL1q0aMSIEXFxcWPHju3Zs+fIkSP/+OOPWbNmrV69uqCgQEnRqwqPIm5Yqh4AQMe0aNHi0qVLhDURpqenp6enf/fdd5X2f/fdd2VlZU+ePOEiQG6hXgYAQNdERET89ddfixYtYkmEBgYGhBCmwVgRs8fIyIiD+DiGRAgAoGusrKwuXrz4/Plz9inWfH19J02a9OLFC8XOzMzMyMhIOzs7Nzc3DuPkCNagAADQQd7e3ps2bWIvllm7dm1wcLCLi4u/v7+NjU1GRsY///wjkUgOHz7M42nhyk1eYoIZRwEAdBN7VvPz87t9+/bw4cNzcnIuXryYmprar1+/mzdvdurUieP4uFHflMopo/PKVR0HAABwjqVFWFRUNG/evPDw8HXr1nEfkEpQhLiZU49yab86mGgNAEC3sLQICwsLFy9eXFZWxn00KuQlpuLRTQgAoHtYEmHt2rVtbGwSExO5j0aFPMVUAgpHAQB0D8utUR6Pt3z58hkzZri7u/v6+tbgpGVlZUeOHImPj5fL5Y0aNerZs6diYrYnT57s3buXEDJgwIAGDRq8/9rCwsKtW7e+ffu2TZs2YWFhNbh6zXiJqfNvMOMoAIDOYS+W2bZtW25ubvPmze3s7Jr9W3VOmpaWduDAAT09PVNT0zlz5gwYMIDZ//jx4xYtWkgkEqlU2qJFi0ePHlV6oVwuDwoKOnfunJWV1ahRo7jspPQSoXAUAEAXsQ+fcHBwEIlENT6pvb19dHQ0s92lSxdvb++SkhJDQ8OoqKiBAwcuWLCAEJKXlxcVFbVhw4aKLzx16lRGRsbly5cFAoG3t3dERMSoUaO4mebbwZTKLafzyom5HgdXAwAAdcGSCOVy+fTp0y0sLMzNzT//AteuXXNycmJmq7lw4cLPP//M7A8JCfnxxx8rHXzhwoWgoCBmvafAwMC0tLTExERuhvBThLiLqIe5tD8KRwEAdAlLInz79q2zs/OZM2fatWv3Oaf29/dPTk6mKCo2NpaiKEJIamqqYkUMKyurN2/eVHpJamqqjY3N/yITCCwsLN68eVNVIkxLS1NMMU4IiYqK+swVE93NeHfSZU3M1Khkhqbp0tJS7Vv6StlKS0uFQiwm8mlKS0sFAoG2LjuqPPiw1QBnb5pEIqHpj3+ls3zoxWLxFwkxNjY2PT1948aNffv2vXPnjoGBgUAgkMlkzLNSqfT9qwgEArn8/ytWJBLJByIxMDCo2Gepr6//mbPeeInpJ4VEKFSjFiFN00KhEP+bfSq8aTUgk8mQCGsAH7Ya4OxN4/P5TDPsw1g+9IaGhuHh4Vu2bPnMFqG5ubm5ufmyZcu2bdt2/fr11q1b29jYKFqBb968UTT+FGxsbJ4/f85sl5SU5Obmvn9MxfOPHj36cyKspKEFfT5eplbNL5qm+Xy+WoWkEfCm1QD/HVUHomHwptUAZ29aNVtH7L/+WrVqNXPmTH9//86dO1da3nfkyJEfPWlpaSnTKUgISUpKys/Pt7OzI4R07tw5Ojq6b9++hJDo6OjOnTszx1y+fNnT01MkEnXu3LlLly75+flmZmYxMTFubm5OTk7V+TO+CMw4CgCgg9gT4ezZs7OysrKysq5evVrpqeokwl27dq1bt65x48ZlZWWnTp2aNm2ao6MjIWT8+PH+/v7du3enKOr27dsrV65kjg8JCYmJiQkODvbz8wsMDGzdunWLFi1iYmK2bt36eX/dp7E3ofLL6dxyIkLhKACAzmBPhI8fP67YV/ephg0b5uPj8/jxY0NDwwULFjg7OzP7ra2t7927d+rUKULIzp07zczMmP1nzpzx8PBgtvfv33/hwoWXL19Onz6dy+YgqVA42hKFowAAOoM9EX7mwAk+n9+8efPmzZu//5SZmVnv3r0r7fT391dsUxTVtm3bz7n652AmWkMiBADQHVVWiEkkklOnTsXHx0ul0hkzZhBCHj16ZGRkZG9vz2F4XMNS9QAAuoY9Eb558yYsLOz+/fv6+vqWlpZMIty8efOVK1cuX77MbYSc8hJRp19hxlEAAB3CXlo6YsSIkpKS69evnzx5UrGzX79+V69ezcvL4yo2FfASk/hcVQcBAAAcYl+PMDY2NioqqlmzZhWHIrq4uMjl8pcvX3IYHtfqmVAF5XQulqoHANAZLIkwPz9fJpO9X7EplUoJIeXl2pwlFIWjqg4EAAA4wr4wr7Gx8fsjCE+fPi0QCFxcXDgJTGVQLwMAoFNYimWEQuHAgQOnTZtmY2Ojp6dHCJHL5SdOnJg0aVLv3r0Vg/+0FRIhAIBOYa8aXb58+ZMnT0JDQw0MDKRSqUgkKigoaNKkyZo1aziOj3teYioWhaMAADqDPRGamJicPXv22LFjzEq5JiYmgYGB4eHhTANRu3mKSAIKRwEAdEaVA+p5PF63bt26devGZTTqoJ4JVSihc8qIWF/VoQAAgPJ91gJ+WgmFowAAOgWJkIWXCPUyAAC6AomQhZeYikeLEABANyARsmDWoFB1FAAAwAUkQhZYqh4AQHdUKxHev39/586dd+/eVXY0asLOmCqS0tllqo4DAACUjz0RhoSEzJw5k9k+evSoj4/P0KFDmzZtun37dg5jUxmKEA8UjgIA6AaWRCiRSM6fP9+uXTvm4axZs/z8/JKSkn744Yfp06dLJBJuI1QNTLQGAKAjWBJhVlaWVCp1cHAghLx+/frOnTs//PCDk5PTlClT0tLSUlJSOA9SBTxFVAJahAAAOoAlERoZGRFCCgsLCSHHjh3j8XiBgYGEEGNjY0KIdi/Mq4AWIQCAjmBJhGZmZs7OzitXrnz48OGmTZv8/f1r1apFCHn27BkhxMrKiusYVcFLTJAIAQB0Aftco8uWLQsPD9+xY4eBgcEff/zB7Dx8+LCNjY2dnR2H4amMnTFVIiXZZaQWZhwFANBq7Imwe/fuSUlJDx488Pb2trW1ZXY2atRo8+bNHMamYh4iKiGH/tqaUnUgAACgFDk5Ofv27aty9QlbW1tFCmR07dpV+VGpEWaiNSRCAACtVFhY6Ofn5+PjU+WA+gcPHowePbp169atWrVi9mzZsuXgwYNcRah6mGgNAECL7d2719LSct++feyJ8OzZs82bNz916pRQKHzx4gWzs7i4eOrUqRwGqWIoHAUA0GJnzpzp2LEjRVHst0YjIyM7dOiwb9++y5cvf/PNN8zOkJCQ77//PjU1tW7duhyGqjJeIhSOAgBomD///DMuLo7Zfvr0aWlpaVVHvnnzJiQkhLAWy2RmZj569GjXrl16enoU9f89ZEy9qO4kQltjqkxOMkuJpYGqQwEAgOoxNDQUi8XMtrGxcUlJSVVH8ng8mUxGWBOhXC4nhAgElZ9KT08nhOjr69B4AmbG0daolwEA0BCBgYHMJDCEkJiYmJ07d1Z1pI2Nzdu3bwnrgPratWvb2dlFR0cTQiq2CLdu3SoWi93c3L5s0OrME0vVAwBoqQ4dOhw7dkwul7O0CCmKmjFjRmRkZH5+vqOjo1QqPXfu3P79+7ds2bJgwYL3W4pazEuMGUcBALRT3759o6KiOnbsyJ7Vvvvuu6Kiojlz5jAzjrZr105PT2/KlClTpkzhNk4V8xJTx17IVR0FAAB8eYaGhn///fexY8dYEiFN07m5uZGRkSNHjrx8+XJqamqtWrVatmxZp04d7gNVLcw4CgCgxYyMjPr168eSCN++fWtjY3Py5MnQ0NCwsDDuI1MfNkaUBIWjAABajaVYxtzcnM/n61R16Ad4YGFCAACtxr4eYY8ePfbs2cN9NGoIE60BAGg39mKZLl26/PDDD69everSpUulrsE+ffpwEpi68MIICgAArcaeCKdMmZKZmRkbGxsbG1vpKZrWrazgJaaOpKBwFABAa7EnwqtXrzITzwCzGJOqowAAAGVhT4QODg4cx6G26hoRKQpHAQC0l1KmiSkvL4+Ojj537lx2dra3t/f48eMtLS0JISdOnLh48WLFIxcuXMjj/atgZ+7cucXFxcx2o0aNBgwYoIwIPwlTOBqAGUcBALRRlYnwt99+279/f1JSkoV9l1cAACAASURBVCItMZKSkj560tevX2/atKl///42Njbbt28PDAy8deuWnp6ehYWFk5MTc8wff/yRmppaKQsSQlavXj1hwgSmQkdNhvAzCxMiEQIAaCX2RLh8+fLJkye3bdu2vLzc3Nzc2dn54sWLRUVF4eHh1Tmpo6PjhQsXmO2QkBAzM7P4+HgfHx8/Pz8/Pz9m/4YNG0aMGMH68n79+rm6un7yn6I0WKEXAECLsa9Qv3z58gkTJpw7d65Vq1YdO3Y8ePBgYmLi119//X4D7qNSU1NlMpm1tXXFnbdu3Xr06FFVaXXRokWRkZG7du1Sk4IdJEIAAC3G0iLMy8tLTU0dPHgwIYSiKGZ5X1NT01WrVnl5eS1cuJDp8KsOqVQ6bNiwMWPGVFrLd9u2bb169VKsnVjRgAEDvL29S0tLf/7556NHjzKrQbF69epVUFCQ4uGhQ4eUtDKGg5CKzxEy849zjKbpkpISXRuy8vmKiooqriAG1VFaWioQCHRqeZkvAh+2GuDsTSstLa3O92eVC/MyU6zVqVPn1atXzP569erJZLKUlJRqJkKZTDZkyBB9ff1ly5ZVimzv3r2///4766tWr17NbPTp08fe3v7Ro0fu7u6sR9aqVWvGjBmKh2ZmZjVosFaHiwmR0ZISgUltzgtHaZrm8XjGxsZcX1jD0TRtYmKi6ig0jOAdVQeiYfBhqwHO3jQDA4PqZFyWD71YLLawsEhMTPT09GzYsOHWrVvfvHljY2PDTLpma2tbncvL5fIRI0akpaUdO3ZMT0+v4lOHDh0SiURt2rT58Bnq1q1rbm6elpZWVSI0MjIKDg6uTjCfj5lorU1d/O4DANA27E2o0NDQ3377jRDSu3dvMzMzZ2dnR0fH0aNH9+vXr1JvHyuapiMjI588eRITE2NoaFjp2W3btg0bNqxi6y02NvbGjRuEkOzs7KKiImZndHR0aWlpw4YNa/aHfVmYaA0AQFux3wb59ddfmQ19ff2///57586dz58/9/HxGTZsWHVOeu/evfXr15uYmNjb2zN79u7dy6zolJycHBcXt3379orHr1692sfHp1mzZrdu3erVq1eDBg1KS0vfvn27Y8eOWrVq1fyP+3IwvwwAgLb6eH+AtbX11KlTP+mk3t7e2dnZFfcobgfb2NhkZmaam5tXfHbfvn1Mz0RwcHBycnJSUpKBgYGrq6v6LAXlKaYOJWPGUQAALaSUjnE+n89aEUoI0dPTq9RlSAgxNTVVbIvF4mbNmikjqs/hJaYe4NYoAIA2Yk+Efn5+mZmZrE9VZ2YZ7WNtSAghGaWE+8JRAABQqirXI6w4bC4jI+Ovv/7KyMioZh+hVvIQUfE5dCAKRwEAtAt7Ipw5c2alPVKpdOjQofn5+coPSU0x88sgEQIAaJnqjkAXCAQzZ87csmVLWlqaUgNSW54iKgGFowAAWucTpmIxNjaWy+Vv3rxRXjTqDDOOAgBopeomwtzc3J9++kkgEKjVuhBcQiIEANBK1aoaLSkpefv2LUVRc+bM0dlZ9awMCSEkvYTUqTxVDgAAaLBqVY0SQqytrYODg729vTmJSk15iqj4XLqOIeplAAC0R3WrRoG8uzvaFoWjAABaRCnrFmkrZg0KVUcBAABfEnuL8Mcff/zokMFBgwZ9/fXXSghJfXmJqYPPMOMoAIBWYU+EFy9efPjwYUFBAUVRtWrVys3NlclkRkZGFddgateuHVdBqgssxgQAoH3Yb43OnDnT3Nx83759paWlmZmZpaWlx48ft7GxmTFjRtI7ffv25ThWlatjSHgUSStRdRwAAPDlsLQI5XL5mDFjVqxY0adPn/8dJBB07NjR3Nw8JCSkX79+OjuCghDiKabic2grFI4CAGgLlhZhenr6q1evGjVqVGl/o0aNiouLnzx5wklgagoTrQEAaBmWRGhqaioUCo8fP15p/4kTJwghFhYWXMSlrjC/DACAlmG5NWpsbDxkyJCpU6e+fPmyR48edevWTU9PP3ny5MqVKzt27Ojg4MB9lOrDS0ztR+EoAIAWYa8a/eWXX4RC4dq1a6Oiopg9PB6vX79+GzZs4DA2deSNpeoBALQLeyLU19dft27dnDlzHjx48Pr1a2tra09PTxsbG46DU0OWBkRAkbcl/1uzHgAANB17ImTUrl27bdu2nIWiKZj5ZaxROAoAoBXYxxFGR0cfOXKE2c7JyRkwYICrq2t4eHhWVhaHsakp1MsAAGiHK1eu9O7dmz0Rfv/99xkZGcz2tGnTDh061Lx580uXLg0dOpS7ANUV5pcBANACr169CgsLCw4OZkmE+fn5b968ad68OSFEIpHs379/6tSpe/fu/f33348fP56dnc15tOrFS0zFYyghAICG27p1a/v27UePHs2SCIuKiggh5ubmhJCrV6/m5eV169aNENK0aVOaplNSUjiOVd3g1igAgBa4fft2y5YtCWuxTO3atYVC4f379+vXr3/gwAFLS8vGjRsTQpi2oIGBAcexqhsUjgIAqK01a9bs3LmT2c7NzXV0dKzqyLdv34rFYsKaCAUCQa9evYYPHx4SEhIdHT1y5Eg+n08IuXXrllAotLe3V07wmoRpFKJwFABA3fTs2bNVq1bM9oULF+Li4qo60tTUtKSkhFRVNbpx48aePXs+ffp06NCh8+bNY3ZGR0e3adPG2Nj4S4eteXB3FABAPdna2vq+4+zszDTkWDk4ODx79oxUNY7QzMxs/fr1lXZu27btC8aq0bzE1J0sJEIAAA3Wr1+/iIiIn376ib1FCB+GNSgAADRdcHBw586dPTw8kAhrArdGAQA0HUVR69evv3fvHhJhTVgaECGPpBarOg4AAPg8VlZWSIQ1hPllAAC0w/8nwuTk5NjYWBWGollwdxQAQDv8fyI8f/78pEmTmG03N7dr166pKCTN4ClGvQwAgDb4/0QoEolyc3NpmiaE5OXlSaVS1UWlAdAiBADQDv8/jtDX1zczM7NTp04NGzYsLCxcv3790aNH33/Bzz//zGF46guJEABAO/x/IrS3t//tt98WL168Y8eOkpKSI0eOCAQsw+2RCBkW+kSfT94U0zZGmGgNAECD/SvV9ezZs2fPnoQQa2vrQ4cOKaZrA1ZeYio+h9gYqToOAAD4DOzDJzZv3uzm5sZxKBoHd0cBALQA+1yjXbp0IYSkpaU9ePDg1atXdevW9fT0tLOz4zY2decpom5hxlEAAA3H3iKUSqWRkZF2dnbBwcFDhw4NDQ11cHAYOHAgs2bvRyUlJY0ZM8bX17dJkybjx49XLGr/66+/tq8gMzPz/ddevXq1Xbt27u7u3333XWFhYY3/MA6gRQgAoAXYW4Q//fTT+vXrhw4dGh4ebm1tnZmZefTo0fXr1/N4vN27d3/0pAkJCXXr1t24caNQKJw8efLQoUOZAtTnz58bGhqOHz+eOczU1LTSCwsKCjp27Lho0aL27dt///33P/zww8aNGz/vD1QibzEVn0PThKBaBgBAc7EkQolEsmHDhv/85z+zZ89W7Gzbtq2Hh8eYMWOioqIsLCw+fNIuXbowN1cJIbNnzw4LC1M8Va9eveDg4KpeuG/fvgYNGowcOZIQsmTJEl9f32XLlr2fL9WEWJ8Y8smbItrWGKkQAEBTsdwazcjIyM/P79WrV6X9vXv3lslkz58//6QLXL9+3cvLS/EwNja2devW/fv3v3Tp0vsHx8fHN2vWjNl2d3fn8XjMqolqy0tMxeeqOggAAPgMLC1CMzMzHo+XlJTUsGHDivuTkpIIISKRqPpnf/DgwZw5c44fP848bNWqlZeXl7W19d9//x0SEnLy5Mk2bdpUPD4jI8PR0VHxUCQSpaenV3Xy5OTkigffvHlTKBRWP7YvwsVYcOutpKWZsmbhoWm6pKRELpcr6fzaSs17l9VTaWmpQCBgHT0MH4APWw1w9qaVlJQw06V9GMuH3sTEpG3btpGRkRYWFq1bt2Z23r17NyIiwtPT08XFpZoRPHnyJDQ0dO3atYrxiEFBQcxGy5YtU1JStm3bVikRikSiivU4+fn5YrG4qvPb2toeOnRI8VAsFlMU17cofazkNzJoU1NDJZ2fpmk+n29sbKyk82sxtb2jrraEQiESYc3gw1YD3LxphoaG1ckL7B/6DRs2tG3bNiAgwNra2sbGJiMj49WrV2Kx+OTJk9W8fFJSUvv27efPnz9gwADWA2rXrp2amlppp6Oj49mzZ5nt1NTUoqIiBweHqi4hFAqdnJyqGY+SeImonU/QXAMA0GDswydcXFwePHiwYsUKf39/c3Pzxo0bz58//+HDh82bN6/OSVNSUoKCgr777rvu3bvn5OTk5OQwjdO4uDiJREIIefjw4ebNm9u3b88cP2PGjKdPnxJCmL7DW7duEUJWrFgRFhZWu3btL/J3KomXmErIrUbDGwAA1FWVt0HMzc0nTpw4ceLEGpz05MmTBQUFS5YsWbJkCbPnxYsXJiYmK1asCA0NNTQ0JISMHTt2xIgRzLN79+4NCwtzdXW1tbVds2ZN+/bt+Xx+vXr1oqOja3B1Lon1iZGAvC6i7VA4CgCgmZTSHzBq1KhRo0a9v//IkSNSqbS4uNjMzKzi/uTkZMX2t99+O3jw4KKiokrHqC1PERWfQ+zYevEKCwt3bNz45NYtp4YNh44e/Ul1RgAAwA2uO8YFAsFHMxyfz9eULEje3R0NtavcIkxLS+vYvHlEWlr/8vIHQmHbNWuOXLlib2+vkiABAKAq7H2EUH1VTbQ2a9y4Ba9fjy0vb0nICIlkbWrqtOHDuQ8PAAA+DInwc1WVCO/euBFcYfxfK5p++vAhh3EBAEC1IBF+Li8xlZDDUjhqamqaX+FhKSEGhsoabggAADWGRPi5RHrEREi9KqqcCvuNHDnTzExGCCGEJmSWiUnPwYO5Dw8AAD6MvVhGLpcfOXIkJibm7du3leb3On36NCeBaRJPMUnIIfX+XTg6bOzYNykpzXbscOfxHstkoX36fD9jhooCBACAKrEnwsmTJ69cubJ+/frOzs7cz1umcZhuwvcLR/+zZMnU+fNfvnxpZ2enr6+vktgAAODDWBJheXn52rVrp02btnDhQmTB6vASUf+ks08vo6en5+zszHE8AABQfSx9hPn5+eXl5X369EEWrCYvMRWfi3nWAAA0EksitLS09PT0vHfvHvfRaKiqCkcBAED9sfcRbtu2bdCgQcbGxu3bt8fEYB9lrkdMhdSrIroeZhwFANA07ImwQYMGXl5effv2ff+p6ixyqIO8xCT+vcJRAABQf+yJsG/fvpcuXfrmm29cXV1R7lgdTOFo2HuFowAAoOZYEmF2dvaZM2e2bdv27bffch+QhvISU5fT0FYGANA8LMUyfD6foqhGjRpxH43m8hRRCSgcBQDQQCyJ0NzcPDg4+MSJE9xHo7lQOAoAoKGqnFlmxIgRmZmZoaGhVlZWFZ/y9fXlJDANY65HzPSol4W0vQm6CQEANAl7Ihw8eHBaWtrq1atXr15d6SlUjVbFS0Tic4i9iarjAACAT8GeCA8cOFBeXs5xKJqOmV+mQz20CAEANAl7IgwICOA4Di3gicJRAACNsmrVqrlz52I9wi+mqqXqAQBADd28eXPOnDmXL19mbxGGhIRkZ2ezPnXjxg1lBqbBvMTUw1yaJgT3RgEA1N+OHTv69u3r5ubGnggdHBwqTjGal5d369at8vLy0NBQriLUPGZCYq5HvSikHVA4CgCgIiUlJaWlpcx2YWHhBwo8nz592qlTJ1JVH+HmzZsr7SkoKOjbt2+DBg2+UKjaiZlx1AGFowAAKrJkyZJVq1Yx2xKJpEWLFlUdmZOTY2JiQlgH1LMyNTVdvHjxkiVLqrplCgTdhAAAqjZr1qzsd3bv3m1mZlbVkZaWlnl5eaT6iZB5jUQiSU5O/vxAtRUmWgMA0BQeHh53794l1U+EEolkxYoVFEU5OTkpMzDNhhYhAICmiIiIOHToUFxcXLWqRqVS6YsXL3JycsaOHYt1ej/AE4WjAAAawtPTc+PGjZGRkdWqGuXz+cHBwWFhYcHBwVxFqJHMhESsR6UU0PVNkQoBANRdeHh4eHh4datGoZq8xCQ+l9Q3VXUcAABQPdXqI5TL5W/evMF029XhKaYS0E0IAKA52BPh3LlzN2zYwGw/evTIycnJ1ta2Xr16N2/e5DA2jYR6GQAAzcKSCOVy+bJly+rUqcM8/OGHHyQSybp161xdXYcNG4Z24Yd5iZAIAQA0CUsfYU5OTkFBgaenJyGkoKDgzJkzUVFR3333Xfv27V1dXd+8eWNra8t5nBqDmXFUThMeymUAADQBS4tQJpMRQgQCASHk3LlziilGHRwcCCGpqancRqhhTITEwoBKKUSjEABAM7Akwtq1a5uZmZ08eZIQsmvXLhcXF2YQ/evXrwkhGEf4UZ4ikpCr6iAAAKB6WBIhRVHff//9hAkT6tevf+jQobFjxzL74+LiTExMmHYhfADqZQAANAj7OMI5c+a4u7vfuHGjWbNm/fv3Z3ampqZOmjRJKBRyGJ5G8hJTF1KRCAEANAN7IqQoasCAAQMGDKi4c9q0aZyEpPG8xNTaBLmqowAAgGr5hNUnoJo8RdSjXFqONiEAgCZAIvzyUDgKAKBBlJII8/Ly+vXrZ2lpaWBg0KhRo9OnTzP7Dx065Ovra2RkJBKJhg4dWlhY+P5rfX19nd8ZN26cMsLjALNUPQAAqD+lJMLS0lJ/f/8HDx4UFxdHRkb27NkzNzeXEFJeXr5s2bKcnJzHjx8/fvz4v//97/uvTUlJ2bx58+nTp0+fPj1z5kxlhMcBzC8DAKAplJIIraysJk6caG1tzePxhg8fLpFIEhMTCSHh4eFt27bV19e3srIKDw9nlgZ+X7169ZycnJycnKysrJQRHgcwggIAQFMovY/wzJkzJiYmHh4eFXfSNH38+PGvvvqK9SVBQUH16tXr1atXUlLSB84sl8tzKviSQX82TzGVkItECACgAdiHT5SWlq5fv/7w4cOpqaly+b9GAnw4OVXy8uXLiIiINWvWGBsbV9y/ePHily9fHj58+P2X7N6928fHp6ysbP78+R06dLh3756BgQHryZ8/f85MeaMITE9Pr/qxKZWDkHqYI8wvKPzMGUdpmi4pKcFE55+qqKiIojDZ66cpLS0VCATM3IpQffiw1QBnb1ppaWl1vj/ZP/RDhw7dv39/QEBAYGAgj1fDVmNqamq7du1++OEHxZB8xtq1azdt2hQXF1cpOzI6dOjAbKxfv14sFt+7d69Fixas53d2dr5+/XrNYlM2E0JqG0ozibGTyWf9Y9M0zePxWN8o+ACapk1MTFQdhYYRvKPqQDQMPmw1wNmbZmBgUJ2My/KhLy8vP3z48MKFC6dPn17jy6enp7dv337o0KETJ06suH/btm1Lliw5f/58vXr1PnwGmqZpmtbcn1peYhKfQzuZamr8AAA6gqW1l5ubW15eHhYWVuOT5uXltWvXzt3dPTQ09ObNmzdv3szLyyOE7N27d9y4cUuWLMnJybl582Z8fDxz/Jw5c/bs2UMISUhIiImJefPmTVJS0siRI21tbRs1alTjMFTLS0xhBAUAgPpjaRHWrl3b2dk5ISHBx8enZifNzMy0trbOy8tTzMo2b948f3//lJSUVq1abdmyhdlpb2+/detWQkhRUVFJSQkhRCaTRUVFJSYmGhgY+Pv7nzx5Ul9fv2YxqJyniDr3Bn17AADqjiURUhS1devWUaNG2dratmnTpgY3J52dnRWD6CuaPn066+3WJUuWMBsNGza8cOHCp15OPXmJqTWqm3FUKpVu37Dh/OHDJmZmvUeNCvmM9j0AgHZj7xifMmXK27dv27Zta2hoWLdu3YpPfVLVqC7zFFOPcmkZTfic9xLSNN2jTZsW9+7NKSwsIOTnCxfuTZgwedYsruMAANAE7IkwICCgSZMmHIeiZYwFpI4hlVxAO5txnQljY2Prx8f/590Mdr/l5vr98svoH35AbRsAwPvYE+HSpUs5jkMreYlIfI4KEuGdy5cD8/IUD/mE+BGSkJBQ1UAUAABdhtUnlMhLTMXnquC6Ns7Or/5dZPSKz7exsVFBKAAAaq/KwbMlJSUXL1589uxZfn5+xf1Tp05VflRawlNMnXmtgsLRjp06hdWq1TE11ZUQQsgRHk/i4GBnZ8d9JAAA6o89Ed6/f79Dhw6vX79+/ykkwurzElOrHqigcNTS0nLLiROjBw0qy8gop2m3Zs127dzJfRgAABqBPRGOGjXKwsLi9OnTixcvtrOzGz9+/B9//DFnzpzNmzdzHJ9G8xRRj/NUUzjapEmTs/fvl5WVCQQCPp/P9eUBADQHSyKUSqU3btw4cuSIh4cHRVEymaxOnToRERHm5uaDBw9+9eoVZiOsJiMBsTKknhfQLpzXyzA0dzoCAADOsBTLZGZmSiQSZ2dnQoiJiUneu/rD0NDQtLS0R48ecRqghmNmHFV1FAAAUCWWRGhpaSkQCNLT0wkh9erVU6zwkJKSQgjBfbZP4imiElRROAoAANXEkggFAkGLFi2Yqc769Olz586d/v37L1u2rHfv3g4ODi4uLlzHqMmwVD0AgJpjH0e4dOlSZtkHR0fHDRs2/P3331OmTNHT0ztw4IBQKOQ2Qs2GRAgAoObYy15atWql2B42bNiwYcMkEglSYA14iKineXReOTHXU3UoAADA5iMzy0ilUqZYBlmwZowEZJgb75sLUjmahQAAaqnKRLhz586GDRsaGRl5eXkxe2bPnj1v3jyuAtMey/34hRIy+5ZM1YEAAAAL9kS4bt26b7/91t3dffTo0Yqd7u7uq1atkkqlXMWmJQQ8Eh0s+DWRPvhcZcsTAgBAVVgSoVwunzNnzvTp0w8ePNirVy/Ffj8/v6ysLNZ51+DDLPTJ78H8yMsyFM4AAKiV27dvsyTCtLS09PT08PDwSvvr1KlDCGHGF8KnamJBLffj9zwjyy1XdSgAAECITCbr3r17eHg4SyI0NDQkhFRadIIQkpiYSAixsLDgID6t9I0LL9SOCj8nlaFZCACgasePH4+Pj2dvEYpEoiZNmqxcuVImk1HU/ybJlEqlc+fOdXJycnR05DZUrbLCj18mI3NROAMAoGq///577969jYyM2McRLlu2LCwszN/f39vbu7i4eN68eYcOHbp79250dLQiNUINMIUzzWOkDWvJeztiVWQAgC/s/v37ijmxr1+/LpFIqjryxYsXLVu2JFUNqG/Xrt3Zs2enTp26c+dOmqb/+9//enp6xsTEdO3aVRlx6xSmcCbspNRDRHmJtfNXxbVr13atXJmXnd0yNHR4ZKSeHmYTAACO3Lt378iRI8z269evTUxMqjqyrKyMGSJf5YJKAQEBV65cyc3NzcrKMjU1ZSpl4IvwsaCW+fF7npFd7SYQaV2O2L1ly56pU/+bnW1ByLG//grbufPUzZtYugsAuDFw4MCBAwcy2zExMTurXpbc2to6KyuLfHRmGZFI5OzsjCz4xQ1y4YXYUoMvyLRsxhmappfNnHk4O/srQtwJ+bG4uE1i4m979qg6LgCAyvz8/OLi4sgHWoTJycl//PFHamqqTPavyo6ff/5Z6dHphpX+/OA/pXNvy2Y31Z6VrVJSUhoQYlRhT1Bx8eELFwYNHaqqkAAAWEVERCxdunTWrFnsiXDHjh0jR46USCSmpqaVbmohEX4pAh7ZHyRocUTqLdaewpk6deq8pf/VyH1NUTbOzqqKBwCgKrVr1758+fIvv/zCPrPM+PHjO3XqlJaWlp+fn/1v3MeqxawMSUx7/lgtmnHGyMjIvmnTne9+PL0hZGXt2r2++Ua1UQEAsGrQoMHq1atZEmFmZmZBQcGMGTPQNcgBReFMnrbMOLN+//6/unVrVqdOmzp1+ri4LN2/H2NPAUCdsdwatbS0tLa2TktL4z4a3TTIhXc1nR50QRbTns/T/PEUZmZmm6OjZTJZWVmZkZHRx18AAKBSLC1CHo+3dOnSGTNmJCUlcR+Qblrpz8+X0HNva8+MM3w+H1kQADQCe7FM//79z5496+7u7uTkZGpqWvGpGzducBKYbhHyyP4gQfMYaUOxvJe2FM4AAGgE9kQ4bty4HTt2uLi4uLq64nc9N6wMyZEQfthJqbv2zjgDAKCGWBJhUVHRpk2bfvrpp/nz53MfkC5TFM5c6yYw17oZZwAA1BPLXbji4mKZTNazZ0/uo4FBLrxgW2pwnLbNOAMAoLZYEmHt2rU9PT3v3r3LfTRACIny5+eV0/Nuy1UdCACATmDvI9y1a9fgwYP19fVDQkIsLS05jknHKQpnvMWkZ310Fn6yJ0+e3L5928PDo1GjRqqOBQA0AHsiZKaVUUzgXRFN456d0jEzznSIlbqL+PXRWVhtMplsaNeu+dev+xYXHzE0zHVyOnjunLGxsarjAgC1xp4IZ86cWVRUxHEoUFFTS2qZH7/XGfn59hS+yKspasECj7i4GcxHt6hoT37+T2PGRFW9CAsAAKkqEUZGRnIcB7xvkAvvn3R6xD+Co6FEC2ac4cDJ6Oj9FX7ADSwvX3PhgurCAQDNgLHbam2lHy+vnJ5/B4Uz1SKTySr+sqMIkcvx1gHARygrEcrl8pcvXyYnJ1dazpAQkpKSkpyc/IHXZmdnP3z4UCKRKCk2DSLkkV1fybY8kh9Kxhf6xwV27PirgYHi4XGBoHHz5iqMBwA0QpUL836O8+fPDxo0iBCir69P0/TevXv9/f0JISUlJd27d3/8+DEhxM3NLSYmxtDQsNJrf/755yVLltjb2+fk5Bw7dgyFf3UM6HeFM5SnCHdIP2Tq/Pk9Ll68nZjYIicn3tz8mpXVsS1bVB0UAKg7pbQILSwsTpw48erVq6SkpEGDBg0fPpzZv3HjxsLCwqdPnyYmJhYXF2/YsKHSC5OSkhYsWHD9+vU7tGfLMgAAIABJREFUd+6MHj16woQJyghP4zS1pJa24Pc8rT1LNSmJvr7+iatXvzl8uGj58g7791+Mj69Vq5aqgwIAdaeURNioUSNFSy44OPjly5fM9r59+4YNGyYUCgUCwbBhw/bt21fphfv372/Xrp2zszMhZOTIkXFxcampqcqIUOMMduUF2WDGmWoJCAiIGD48JCSEz+erOhYA0ABKL5bZuXNn586dme3k5GQmyRFCnJ2dU1JSKh2ckpLi4uLCbFtYWJibm7948aKqM5eVld2sQOsHOK5qyc8toxegcAYA4ItSSh+hwubNm0+fPn3t2jXmYVFRkcG7WgYjI6P8/PxKxxcVFVWcyMbIyKigoKCqk6elpY0YMULx8OzZs0Kh8IuFrh5omi4pKVHk+O3+VOBpPQ/jsjAb7Vm5UBmKioooCv2pn6a0tFQgEAgEyv1O0D74sNUAZ29aaWlpddpISvzQ79mzZ86cOefPn7eysmL2WFlZ5eTkMNtZWVnW1taVXlLxAEJITk7O+8co2NvbX79+/UtHrV5omubxeIq5UUxMyMF2dOdTJK6zAQpnPoCmaRMTE+4veujgwTMHDxqZmvaMiPjq6685DuAzCd5RdSAaRiUfNk3H2ZtmYGBQnYyrrFuj0dHRU6ZMiY2NdXV1Vez08fH5559/mO1//vmnadOmlV7VpEkTxQF3794VCoVOTk5KilBD+dWhlvuhcEYdDena9dLIkRHR0V23b1/SrduqhQtVHREAVItSEuG5c+f69+/fu3fvv//+e9OmTZs2bWIGBUZGRq5evfrIkSNHjx5dtWqVYv6axo0bX758mRDSu3fvtLS0OXPmXL58edy4ccOHD8eywO8b7Mpra0MNiZNpeaeoRvnrr7+oy5ej8vKaE9KGkN+zs/esXJmXl6fquADg45TVIoyIiKhYzMIMq2/Tps22bds2bdq0cePGrVu3BgYGMge3bt1aLBYTQgwNDc+dO5eYmDhz5sz27dsvxG/qKqxuyc8poxdgqSa1cePvv0Mq3NUXEPK1XH7//n0VhgQA1aSU/oCgoKCgoCDWp7p37969e/dKO3/55RfFtpub2+7du5URlTYR8siBdoIWR6RNLKjO9ugsVL06dnZv9fVJaaliT5pQWKdOHRWGBADVhLlGNZWVIdkfxP/2ovRhLm6Rql5IaOhekUgx1ucSIa8tLBRjgQBAnaFCTIP516GW+fF7nJZd7SYwx7KFKmVpabnu8OEBgwcb5+eXEWLq6LjnwAEeDz80ATQAEqFmG+LKu5JGD4mTHW7Pxx1S1Wrh7//XkyfZ2dn6+vpYDRhAg+AXq8Zb04qfjcIZtVGrVi2VZEG5XH7r1q3Tp0+np6dzf3UAjYZEqPGYwplNj+R/vEBnoY5KTk5u7eGxLjT0fJ8+3b29V8yZo+qIADQJbo1qA2tDcqAdv8sp6cXOAg/MOKN7RvboserJk2aEEELkhPRavbp527atAwJUHBaAhkCLUEswhTM9z8jysZ6xjiksLJSlpjZ795BHyJjs7D/fW9oFAKqCRKg9hrjyAqypwRcw44xuKSsrq3Rjx4CQ0uJi1UQDoIGQCLXKL6342WX0QizVpEssLCwKTU2TK+z51dw8sEcPVcUDoHGQCLUKUziz8aH8+Es0C3XImn37etWtu9jAYAdF9bWwoMLCunbrpuqgADQGimW0DQpndFBTX9/zjx6dOHEiMzV1SuvWzZo1+/hrvqgXL14kJCQ4ODh4eHhwfGmAz4dEqIX861Dzm/F7npFd7SYw07a1ioGdmZlZeHg499elaXpM//5J5875lZdv09Mrdnbed/o01ucDzYJbo9pplDsvwJoagsIZULKNUVHiEydOZWTMy8s7kJEx4NataaNGqToogE+DRKi1VrfkZ5TSgX9I9yXJy2Sqjga01PHffhtfUKB4OKC8/NrFiyqMB6AGkAi1lj6fxHUW/MeHfyiZtt0rGfWX7H422ofwhZWXl1ea752Wo2gZNEZmZuaKFSuQCLUZnyLBttSBdvw7PQVOplTnU7JmMdJNj+TFUlVHBtqiTceOew0NFQ8vUJSrt7cK4wGovoKCghYtWty5cwfFMjrBzpia2pia3Ih3/g296ZF8xnVJL0feGE9e41ooK4XP8sOsWd3PnElITPTPyXliYnK6Tp0jO3dyGYBEInn8+LG+vr6TkxOfz+fy0qDp9uzZY2tru2vXLiRCHcI0EINt+a+LeHsS6W6nZJYGZKQ7b6ALzxgfBKgRfX39E1evnj9//sHt281dXGZ17CgUclepfDY2dsqwYY3Ky8so6pmJyaZDhxo1bszZ1UHTnTt3LiwsjGD4hG6yNaamNqZ+bMQ7V6GB+J0Hr4kFGojwySiKCgoKCgoK4vi6aWlp04YMOZWWZkEIISQxPb1/165/P32qp4dVqnXaoUOHYmNjme3k5GSKqvJrLTU1NSQkhCAR6jLeuwbim2Le7qd0jzMyC30y0p03wJlngtGHoPbOnzvXLz/f4t1DF0J8S0ru37/v6+uryrBA1WxtbRWfAT6fn5KSUtWRfD5fLpcTJEIghNgY/auBOP26pLcjb7QHzwcNRFBjBXl5ZpJ/LbZiJpXm5+erKh5QE35+fn5+fsx2TEzMzqo7rW1sbFJTUwmGT4AC712J6YNeQidTqteZ/5WYFmJdJ1BLLb/+OkYkUgwJKiPkvEDg4+OjyphAo3Tq1OnIkSMymQyJECqra0SmNuYl9hX83Jx/5jXtsE8y6i/ZzUyMQQT14u3t3XzgwF4WFocI2UdImKXlmNmzRSIRN1enaXrrunVfN2jQ1sOjf/v2T58+5ea68AX16dNHKBQGBwfj1iiwU/Qgvi3h73wi73tWZsAng115I915Yn1VBwdACCFkTlTUtQEDzh49KjQw2NCnj5ubG2eXXjJr1vOoqJMFBSaEXH/9OrxNmz9u3qxbty5nAcDn09fXv3Tp0p9//olECB9hbUimNuYpehDr75OE2vFGuvOCbdGDCKrXokWLFi1acHxRmqb3bdp0o6CAGbfYnJDJb99u/+WXGQsWcBwJfCZ9ff3u3bsjEUK1KBqIaSX8HU/k3/0t0+ORwa68Ee68Wmgggo7Jzs62IqTi6P2GNH3q7l2VBQSfB32E8GmsDMnUxrynfQW7AvnPCmiXA5K+Z2VnXqMHEXSIhYVFGiEVy8ju8HhuTZuqLCD4PEiEUEO+ltTGr/lJfYXBttTEf2Qe0dLFd+VZZaoOC4ATg8eNG2lmlk0IISSOolZaW0dERnJ29YcPH34TFtbcwaFjs2axf/7J2XW1FW6NwmcR65OR7ryR7rybmfSmR3LXA5JgG95Id14726qncwDQfBNmzNhra9t72bL8vLyGPj6/r15dp04dbi6dmJg4KDBwfXp6c0JevngxauDAovXre/brx83VtRISIXwZTANxQTP+rqfycVdkPELCnXmuZsTZjHIyoyzQjwjahaKogUOHDhw6tKCgwNTUlMtLr5o1a2l6enNCCCH1CNmXkxM6fToS4edAIoQvydKATGrIm9SQd/EtfeKl/FAyeVYgT8qnaUKczSgnU8rJ9N2GGbE3pgS4Nw/wiR7dv9+kwkMzQqRFRSqLRisgEYJSBFhTAdb/X1WXU0aeFdDPCuhn+eRmJn3wufxZPnlTTNsYUU5mxMn0f6nRyZRyMaPMMWcyQNVc3NwS7t//6t3DYkL4RkZcBkDTdHJyMp/Pt7e35/K6yoNECFwQ6xNffcrX8l/9huVyklxAPysgz/LppAL6n/T/NR8NBSzNRztjdDoCEEJI5OzZwy5c2J2Z6UpILiEjzc3HzJjB2dX/uXIlMry/fWmplCZvzczm7D7o26SxkEcoiog09icsEiGojB6PNDCnGpgTQv6V49JKyLMCOimfflZA4lLpbU/kz/JJVhntWDE1vtswxEcYdIyXl9eaEycmjB2b/uaNoYnJ2Jkz+33zzeefVkaTrFKSVUb/779lJLOUZJTQWWX/1969R0Vxng0Af2Z2d/YCrMBycXeRS1TKCgLGghi8VAkmaQqRcALe0hg1OX5qGpIajzmNp0kxsX5fT5qGEJueY2IT7UnUgEZTiaQnxgulIjWxNOqi3OWOe2fvM98fA5OVhXgJe4F9fn94Zud9Z/aZdXce3ndm3hcGrDBgYQas0HdTI9xWeF7XpQQAAHVfz5L8fNv/Njj5IpoBnQ0AQMgDCR8AIERA8AkgCWD7eEQ8YH+tUyiCdlAiyhkiAACQ8EHIAwAIpQgCQEACO/tNEB8oHgBAGEUAAMUDdtrUYAHEBBHjO4UqnkWQ34kWQ7SYmB91S3a0OOG6fqhz9bqB+fIG02SAZgMTJuRSIzGd7WWVEkG+Ch0hr8jIyPj8/Pk7rGynv89kAxam3wJ9rgnPAgNW6LcwGitEiEAmImTC4X+FECkmVGEgE4JMSMpEUP+P2ianQTm850SAQsa0Mqlh/vz53NtZnGB2AADo7YyTAScNejsAgNkBFicAgM7GmMwOgVBgsAMADDrA6gQA0NoYBsBOAzvQv8kBNicAgMZGA4DVCYMOAACDHd7N5uUoxrOHCBMhmhhEPEgOI5LDbvn2MwA3TEyTYShHnmiH6wa6Sc8Y7cJwkUMqACkFIQIIowgpBcMvCakAQoUwhSKkAggRgJQCqQAvTKKJx+q8JZP1WUZv0pkcECECmZCQDf8bKQKFhJgdxia879ffNrfUGW+GWW95WDjMZtNoNK5rRDwQsS054Vj7IwwGOiTkXu6Uo2n64sWL3Re7bzDpSqXy9hvcGUyEaAIjAGKCiJggWDT11s5VrdEuCNLbQW8Dgx20NkZnA70N9Hamc5C5YgOdDbQ2Wm8Hgw30dtDbGL0dQtk0OZwypQIiVMguf58y2ZwaIhhaH4IzGE8ERjvY6XvfnG2pjMVkIkQ0wzZ6uO5BBkBrG9pIM5w4dDagGYDhphIAGOzgoG+J0ORgbDSASzuJa0hZaRh0MABgc4LJAQAgIG/JcBEiiBDBtCBijgxkIpJt0kWIiHEcJT9z3rySKVO29PezvzcnQLVI9D/eGlKnq6uraOnSmf39sVbr/4lEC4qLd5aVjcueMRGiSUjCY0KCXFPjHfWiaKxgsDNs+tTbwWBnNNahNNliHEqZWhuts4FhqA5jskOoEKSCW1qc7C0DIQLgkyDigZhH8EiQCgAApALgkSDmESIe8Elg8+gUCkgCJHxCSH5/gWRyY3u92HygsYKTAb0NbDSYHAx73r/bCj8sWACCH/GgDnvtaiw0TQn4Tvb/l7seBsNXtgAgVDj0/WP/owFAKiB4BABAbBCwTxBxEUr4pJAEABDzh9pV3KU1IQkS/i1Xy7wvKSkpc9Wq4v3719286QDYI5MV/epXU6dO9c67byoqel2tXkTTAAAGw7r9+z9bujS/oODH7xkTIUJDwoQjOnNunz7ZFoDOdkv6ZNsEejs4aTA7GY2NcdLQaAcYbhMMOmgrDQ4a2GskWhswzFBTgLtAEkoBQUAQn6DI70987J/2wXxCQA7dksCdedkbE34Mh4MkSSDJe2w6sVmKvT70A2mMrcDeB8HGHCoEHgFTKBCQEMwn2PM+W4FNGAkhrhVI9wpcpvEJ7z9Q71u/+9OfzhUVVR08yBcIfrdy5dy5c73zvg6Ho6excSgLAgDAJq1279/+hokQIR8jiXtJn3eCbeUYHYydHuoKYwC0VgAAg51xMEP3DnB9cVxv2z1zOBiSZEjyHvciICGMuiWNhQjIsfIcmtCys7Ozs7NvX29cuc8jTwHYrOMzurEHv5Jms1mtVkdFRXGTVXZ0dPT09LjWuf/++4lbex2+/fZbh8PBLoeFhd13332eixAhv8U2/sa43cAj7R+Lxc7nE3w+7/ZVEfI6oVAokcsbenpShtfsDwlZMh7NQfBcInzyyScPHjxIEMQrr7zyyiuvsCs//fTTjz76iF3u7e21WCwj8iIA5OTkyOVyoVAIALm5ubt27fJQhAghhCaQdz75ZGVOTr5GE2M2/yM8nJo/f/XateOyZ08lwpdffvndd999+umnXVc+//zzzz//PLtcXFysVCqJ0S5CV1RUzJw500OBIYQQmogSExPPqtVVVVU9nZ0lGRmZmZnjtWdPJcJZs2b9QOnAwMDRo0fPj/FAqFqt1mq1KpUqODjYM9EhhBCaeMRiccE4dYe68s1l6/3796enp6emproXicXi119/3WKxNDc379mzZ8WKFWPtRKfT/fnPf+Zerl+/niQn21wGVVVV+/bt+/jjj30dyASTnJz87bffSqVSXwcykWzfvl2lUm3YsMHXgUwkdXV1v/nNb06ePOnrQCaY7OzsysrK+Ph4T78RTd/RXdC+SYQffPDBpk2bRi1Sq9VisRgAKisr16xZk5ubK5PJRq1psVjq6uq4l6tWreLzJ9vtaIODgwaDwTpOd0YFDq1Wa7Va8XO7K0aj0WQy4Yd2V/AXem/0er3FYvHC52a325kfGg5hiA8yR11dnVqtLioqGrWUzYIAUFBQIJFIGhoaFi9ePGrN6OjovXv3eipK/yAUCnk8nsS7c6xMAgRBiMVi/NzuCp/PpygKP7S7IhKJSJLED+1uee0XKhQKR70TZQQf9CW+//77RUVFoaGh3Bqapt0bsJ2dnRqNhnv0AiGEEPIET7UIjxw5Ultb29DQ0NvbazQaCwsLMzIyAMBsNn/88cdHjx51rZyXlzdnzpydO3d+9dVX+/fvv//++61W63vvvVdYWJiYmDjq/oOCgqxWa25urofi9xNarbanp2fSH+a4I0myoKCAx8NH4u5Cc3NzTU3NiN8m+mFGo7GtrQ1/oXfLaDSuXbuWfUzOozQaTXp6+m2reSoRSiSSsLAw7vEJ7oB7e3t37NixcOFC18rr16+Pjo4GgOTk5JSUlMuXL4tEop07dxYWFo61f5VK9eGHH/b393sofoQQQpPAvHnzbluHuJMLiQghhNBkNdmeN0AIIYTuCiZChBBCAQ0TIUIIoYCGiRAhhFBAm2xDsUxcjY2Nf/3rX/v7+x999NG8vDz3CidPnvzmm2+4ly+++OLkG0nnbh05ckStVrPLFEWVlJS419Hr9b///e81Gs3mzZtTUlLcKwSar7/++l//+pfrmpKSEoqiXNeUl5ebTCZ2OS4urri42Hvx+Zn6+vpTp07l5ua6Dgmp1Wp3796t0+m2bNky1rjK77333vnz5x9++OEnnnjCW8H6C71eX1lZabPZnnnmGW6lWq3et2+fRqP5xS9+8eijj7pvVVVVdenSJe6lV09xDPID7JRVUVFR06dPJwhi4cKF7nXmzp3L5/PDh+l0Ou/H6W/i4uIoimI/ELlc7l5hYGBAIpGEhoayH+yhQ4e8H6S/efbZZ7lvEUVRBEGYzeYRdXg8XnBwMFtnwYIFPonTH0gkEnZcko0bN3Ir+/r6xGIxO1sqSZKVlZXuG2ZkZAgEglmzZpEkWVxc7MWQfY/9e5QkSYqiuJUHDhxwPcX97Gc/c98wPT1dIBBwX073r6XnYCL0C01NTWq1ml3+8MMPAaCtrW1Enblz5wbyKWlUcXFxRUVFP1Dh6aefZsdeYBgmOzs7JibGW6FNDHFxcWlpae7reTwe/tHAMMzXX39tt9uDgoJcE+GaNWuCg4PZQSyzsrJiY2NHbHXhwgUAqK2tZRjmL3/5C0EQAwMD3gzbt65evdrS0rJ9+3bXRNjU1HTt2jV2mR0as7Ozc8SG6enpixcv9lqcrvAaoV9ISEjgpmBke1rMZrN7td7e3tLS0mPHjnk1OP/W3NxcWlr61VdfjVr6xRdfZGdns/1+zz33XEdHh3ej82utra2tra0vv/zyqKXV1dW7d+9uaGjwclR+ZdGiRe69c9XV1dz6zZs3t7e3j6hQVlYWEhLCPsf9zDPPkCT5/vvveydgf5CYmBgXFzdiZUJCwvTp09ll9hQ3ODjovm13d3dpaennn3/u6SBHwETod5588km5XO4+thyPx+vq6iovL1++fHl0dLTRaPRJeH6Fz+er1eqysrKcnJwZM2a4j1ir0+m4qV7S0tIA4Pr1614O0m+99NJLEolk1Ot/AoGgoqJi9+7ds2fPHmt8/IBlMBi4E31qairDMCNyYVtbm+tYyiKRqLGx0ash+rennnpKoVBweZHD4/E6OzvLy8vz8/PlcvmomdJDMBH6l8cff/zatWujTm925swZvV7f3d3d1tam1Wqfeuop74fnb7777jutVtvb23vp0qXm5uZf//rXIyowDMPNUikQCAAAZ8zhHD9+PD8/f9QinU7X19d38+bNd95559ChQ7W1tV6OzZ8xDMMNY8t2NlgsFtcKTqfTdcYDkiTtdrs3I/Rnjz32WEtLS3V1tXtRTU0Ne4praWm5efMmN0KnF2Ai9COrVq06duzYqVOnRr25kbuvT6lUpqamut5eFbC4zyQlJSU2Ntb9fB0SEtLZ2ckuX7lyBQDGGsY90Bw+fNhsNr/xxhujlnIf7ObNm/l8/hdffOHF0PxdUFDQjRs32GX2SzWicaNUKg0GA/fSYrHcd9993ozQb61cufLvf//76dOnR73VlvvWTZs2LTk52fUmeU/DROgv1q9ff+jQoZMnTz7wwAPcSqPR2Nra6l75+vXrOEGVK4vF0t3dzfaCOhyOxsZGtps0MzPz3LlzbJ29e/eGh4fjMyes0tLS+Pj4hIQEbk1PT4/7KPb19fUOh2P27Nnejc6vZWRknD17ll3+4IMPIiIi2F6H9vZ2vV4PAKtXr9ZoNGyyrK6uttvtq1ev9mHAfmLdunWHDx/+8ssvXUfBHvUUR9N0c3OzQqHwXnA+uUUHjfDRRx8BQHh4uGrYmTNnGIYpKSnh8XhsnfDw8AceeOCRRx6RyWQ8Hu8///mPT0P2PbPZHBISsmjRooceekgqlQqFwu7uboZhzpw5AwAXL15kGObatWs8Hk+lUi1btowgiD/84Q++jtov6HQ6kiTffPNN15UxMTFz585lGKa8vFwulz/44IOLFy/m8/nx8fE+CtP3nnjiCZVKRZJkWFiYSqU6fvw4wzBXr14lSTI5OTk3N5cgiD/+8Y9sZaFQ+Mtf/pJdjomJmTJlSkFBgUgkyszM9NkB+MKpU6dUKpVMJiMIQqVS5eXlMQyzb98+AJDJZNwp7p///CfDMM899xyfz2c3DAsLy87O5k5xly9f9lrMvFdffdV7WReNwWq1ms3mmTNnKoYtW7YsMjJSIpFIJJKf//znAOB0Ojs6OoxGY1ZWVlVVFXa2kCRpMBhu3LhhtVqXLFlSXV0tk8kAQCgU9vT0FBcXi8Xi8PDw5cuX19fXm0ymnTt3bty40ddR+4XGxkaTybRr1y7XS1k0TS9atGjOnDkKhaK7u7urq4vP569du7aiosKHofrW1atXKYqaPXt2YmKiQqF48MEHFQqFTCZ77LHHLly4MDg4uGvXLu6ZcZ1Ol5+fP2PGDADYuHFjc3Nzc3NzYWHhJ598cieTpE8aRqOxqalp5syZqampCoUiKSlp6dKlVqvVYrG4nuIefvhhmUwmkUiCgoIeeeQRALDb7R0dHSaTKSsr68SJE9xtbl6A0zAhhBAKaHiNECGEUEDDRIgQQiigYSJECCEU0DARIoQQCmiYCBFCCAU0TIQIIYQCGiZChAJXbW0tO+0XQoEMEyFCgevTTz998cUXfR0FQj6GiRAhhFBAw0SIkB9hGKa3t1ej0YxVwW639/T0jDWtz8DAgPvA2RyTydTd3e10Okct7e/vx9mCUGDCRIiQvygrK4uJiYmOjg4PD09LS+PmzQCAhQsXPvvss2+88UZERMTUqVOjo6PLy8tdtz1+/PhPfvKTiIiIyMjI6dOnHz582LW0trZ2wYIFUqlULpdLJJLly5e7ltbU1CQlJUVGRgYHB69YscJsNnv0MBHyN5gIEfILpaWlL7zwwvr16+vr62tqapRK5UMPPXT9+nW2VK/XV1RUHDp06OjRo//+97/z8vK2bNly8OBBtvT06dPLly+fNm3a6dOnz507l5SUVFRUdOLECba0vr5+yZIlRqOxsrLyv//972effeY6wY3JZFq3bt327dsvXLjw2muvHTx4sKyszMvHjpCPeW2eC4TQWPr6+kQi0datW7k1JpNJLpe/8MIL7MvU1FSBQNDa2sq+pGk6NTU1LS2Nfbls2bKoqCiTycS+tFqtsbGx8+bN40plMplGo3F/361btwJAVVUVtyYnJycrK2u8jw8hv4YtQoR87/Tp0xaLRaFQfDmspqYmPj6+oaGBq5ORkREbG8suEwRRWFjY0NDAXtX75ptv8vLyJBIJW0pR1OOPP15XVzc4OGi320+dOlVYWBgaGjrqWwsEgpycHO7lrFmz2tvbPXWcCPklnK0bId/r6ekBgNdee42d65yjUqm45alTp7oWyeVyp9PZ1dUVFRXV29srl8tdSxUKBU3TWq1WKBTabLaYmJix3loqlfL5358H2Po/8nAQmlgwESLke1OmTAGAqqqqrKysseqMuB20t7eXJMmoqCiKogQCwYjSvr4+giCkUilFUXw+n020CKFRYdcoQr63YMECHo/H3fwyqgsXLrhmu6qqqhkzZohEIpIkMzMzq6qqHA4HW0TT9PHjx1NSUoKDgymKmj9//rFjxywWi2ePAaEJCxMhQr4XGxu7adOmt99+u7S0tLW11Ww2X7ly5a233jpw4IBrtTVr1jQ3N2s0mt/+9rdnz54tKSlh17/00kstLS0bNmxob2+/cePGpk2bLl++vG3bNrb01Vdf7ejoKCgouHTp0uDgoFqt3r17t7ePECF/5uu7dRBCDMMwdrt9x44dwcHB3G8zISHhyJEjbGlqauqKFSs2bNjAXkTk8Xjbtm2jaZrbfM+ePWz/KgAEBwe/+eabrjuvqKhwvUz405/+lF2/detWmUzmWnPr1q2RkZEePlaE/AvBMIyXUy9CaCw2m+3y5cs2m02pVLqAE6o4AAAAh0lEQVQ+7ZeWlpaSknLgwIHu7u62trb4+PioqKgR21qt1u+++46m6VmzZonF4hGlNE1fuXLFaDQqlUqlUunxI0Fo4sBEiNAEwCVCXweC0CSE1wgRQggFNHx8AqEJYMeOHeHh4b6OAqHJCbtGEUIIBTTsGkUIIRTQMBEihBAKaJgIEUIIBbT/B3OUZFgm7VJLAAAAAElFTkSuQmCC", - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", - "\n", - "\n", - "\n" + "\n", + "\n" ], - "text/html": [ + "image/svg+xml": [ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", - "\n", - "\n", - "\n" - ], - "text/plain": [ - "Plot{Plots.GRBackend() n=2}" + "\n", + "\n" ] }, - "execution_count": 23, "metadata": {}, - "output_type": "execute_result" + "execution_count": 23 } ], + "cell_type": "code", "source": [ "plot(epochs, losses,\n", " xlab = \"epoch\",\n", @@ -1460,20 +1342,21 @@ " label=\"out-of-sample\",\n", " legend = :topleft);\n", "scatter!(twinx(), epochs, training_losses, label=\"training\", color=:red)" - ] + ], + "metadata": {}, + "execution_count": 23 }, { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, "outputs": [], + "cell_type": "code", "source": [ "savefig(joinpath(\"assets\", \"loss.png\"))" - ] + ], + "metadata": {}, + "execution_count": 24 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "**How `IteratedModel` works.** Training an `IteratedModel` means\n", "holding out some data (80% in this case) so an out-of-sample loss\n", @@ -1482,139 +1365,113 @@ "wrapped by `IteratedModel` (our pipeline model) is retrained on all\n", "data for the same number of iterations. Calling `predict(mach,\n", "Xnew)` on new data uses the updated learned parameters." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "In other words, `iterated_model` is a \"self-iterating\" version of\n", "the original model, where `epochs` has been transformed from\n", "hyper-parameter to *learned* parameter." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## An evaluation of the self-iterating model" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Here's an estimate of performance of our \"self-iterating\"\n", "model:" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\r", - "Evaluating over 8 folds: 25%[======> ] ETA: 0:00:06\u001b[K\r", - "Evaluating over 8 folds: 38%[=========> ] ETA: 0:00:05\u001b[K\r", - "Evaluating over 8 folds: 50%[============> ] ETA: 0:00:04\u001b[K\r", - "Evaluating over 8 folds: 62%[===============> ] ETA: 0:00:03\u001b[K\r", - "Evaluating over 8 folds: 75%[==================> ] ETA: 0:00:02\u001b[K\r", - "Evaluating over 8 folds: 88%[=====================> ] ETA: 0:00:01\u001b[K\r", - "Evaluating over 8 folds: 100%[=========================] Time: 0:00:12\u001b[K\n" + "\rEvaluating over 8 folds: 25%[======> ] ETA: 0:00:05\u001b[K\rEvaluating over 8 folds: 38%[=========> ] ETA: 0:00:05\u001b[K\rEvaluating over 8 folds: 50%[============> ] ETA: 0:00:04\u001b[K\rEvaluating over 8 folds: 62%[===============> ] ETA: 0:00:03\u001b[K\rEvaluating over 8 folds: 75%[==================> ] ETA: 0:00:02\u001b[K\rEvaluating over 8 folds: 88%[=====================> ] ETA: 0:00:01\u001b[K\rEvaluating over 8 folds: 100%[=========================] Time: 0:00:12\u001b[K\n" ] }, { + "output_type": "execute_result", "data": { - "text/plain": [ - "PerformanceEvaluation object with these fields:\n", - " measure, measurement, operation, per_fold,\n", - " per_observation, fitted_params_per_fold,\n", - " report_per_fold, train_test_pairs\n", - "Extract:\n", - "┌───────────────┬─────────────┬───────────┬─────────────────────────────────────────────────\n", - "│\u001b[22m measure \u001b[0m│\u001b[22m measurement \u001b[0m│\u001b[22m operation \u001b[0m│\u001b[22m per_fold \u001b[0m ⋯\n", - "├───────────────┼─────────────┼───────────┼─────────────────────────────────────────────────\n", - "│ LPLoss(p = 1) │ 2.64 │ predict │ [2.38, 2.8, 2.96, 2.89, 1.81, 2.43, 2.52, 3.29 ⋯\n", - "│ LPLoss(p = 2) │ 17.8 │ predict │ [19.8, 24.4, 26.6, 14.4, 6.1, 10.6, 16.9, 23.7 ⋯\n", - "└───────────────┴─────────────┴───────────┴─────────────────────────────────────────────────\n", - "\u001b[36m 1 column omitted\u001b[0m\n" - ] + "text/plain": "PerformanceEvaluation object with these fields:\n measure, measurement, operation, per_fold,\n per_observation, fitted_params_per_fold,\n report_per_fold, train_test_pairs\nExtract:\n┌───────────────┬─────────────┬───────────┬─────────────────────────────────────────────────\n│\u001b[22m measure \u001b[0m│\u001b[22m measurement \u001b[0m│\u001b[22m operation \u001b[0m│\u001b[22m per_fold \u001b[0m ⋯\n├───────────────┼─────────────┼───────────┼─────────────────────────────────────────────────\n│ LPLoss(p = 1) │ 2.64 │ predict │ [2.38, 2.8, 2.96, 2.89, 1.81, 2.43, 2.52, 3.29 ⋯\n│ LPLoss(p = 2) │ 17.8 │ predict │ [19.8, 24.4, 26.6, 14.4, 6.1, 10.6, 16.9, 23.7 ⋯\n└───────────────┴─────────────┴───────────┴─────────────────────────────────────────────────\n\u001b[36m 1 column omitted\u001b[0m\n" }, - "execution_count": 25, "metadata": {}, - "output_type": "execute_result" + "execution_count": 25 } ], + "cell_type": "code", "source": [ "e = evaluate!(mach,\n", " resampling=CV(nfolds=8),\n", " measures=[l1, l2])" - ] + ], + "metadata": {}, + "execution_count": 25 }, { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "l1_loss = 2.64 ± 0.17\n" + "l1_loss = 2.64 ± 0.33\n" ] }, { + "output_type": "execute_result", "data": { - "text/latex": [ - "$2.64 \\pm 0.17$" - ], - "text/plain": [ - "2.64 ± 0.17" - ] + "text/plain": "2.64 ± 0.33", + "text/latex": "$2.64 \\pm 0.33$" }, - "execution_count": 26, "metadata": {}, - "output_type": "execute_result" + "execution_count": 26 } ], + "cell_type": "code", "source": [ "using Measurements\n", - "l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7)\n", + "l1_loss = e.measurement[1] ± 1.96*std(e.per_fold[1])/sqrt(7)\n", "@show l1_loss" - ] + ], + "metadata": {}, + "execution_count": 26 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing))." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## Comparison with other models on the test set" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "Although we cannot assign them statistical significance, here are\n", "comparisons, on the untouched test set, of the eror of our\n", "self-iterating neural network regressor with a couple of other\n", "models trained on the same data (using default hyperparameters):" - ] + ], + "metadata": {} }, { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, "outputs": [ { "name": "stdout", @@ -1623,7 +1480,7 @@ "[ Info: Training Machine{DeterministicIteratedModel{DeterministicPipeline{NamedTuple{,…},…}},…}.\n", "[ Info: final loss: 18.029846309038444\n", "[ Info: final training loss: 0.15870439154333713\n", - "[ Info: Stop triggered by NumberSinceBest(6) stopping criterion. \n", + "[ Info: Stop triggered by EarlyStopping.NumberSinceBest(6) stopping criterion. \n", "[ Info: Total of 13 iterations. \n", "[ Info: For silent loading, specify `verbosity=0`. \n", "import EvoTrees ✔\n", @@ -1634,7 +1491,7 @@ "[ Info: Training Machine{DeterministicIteratedModel{DeterministicPipeline{NamedTuple{,…},…}},…}.\n", "[ Info: final loss: 18.029846309038444\n", "[ Info: final training loss: 0.15870439154333713\n", - "[ Info: Stop triggered by NumberSinceBest(6) stopping criterion. \n", + "[ Info: Stop triggered by EarlyStopping.NumberSinceBest(6) stopping criterion. \n", "[ Info: Total of 13 iterations. \n", "┌────────────────────────────┬────────────────────┐\n", "│ models │ mean_square_errors │\n", @@ -1648,6 +1505,7 @@ ] } ], + "cell_type": "code", "source": [ "function performance(model)\n", " mach = machine(model, X, y) |> fit!\n", @@ -1663,31 +1521,33 @@ "errs = performance.(three_models)\n", "\n", "(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty" - ] + ], + "metadata": {}, + "execution_count": 27 }, { "cell_type": "markdown", - "metadata": {}, "source": [ "---\n", "\n", "*This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*" - ] + ], + "metadata": {} } ], + "nbformat_minor": 3, "metadata": { - "kernelspec": { - "display_name": "Julia 1.6.5", - "language": "julia", - "name": "julia-1.6" - }, "language_info": { "file_extension": ".jl", "mimetype": "application/julia", "name": "julia", - "version": "1.6.5" + "version": "1.7.3" + }, + "kernelspec": { + "name": "julia-1.7", + "display_name": "Julia 1.7.3", + "language": "julia" } }, - "nbformat": 4, - "nbformat_minor": 3 + "nbformat": 4 } diff --git a/examples/boston/notebook.jl b/examples/boston/notebook.jl index da38986a..feafee14 100644 --- a/examples/boston/notebook.jl +++ b/examples/boston/notebook.jl @@ -225,7 +225,7 @@ e = evaluate!(mach, #- using Measurements -l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) +l1_loss = e.measurement[1] ± 1.96*std(e.per_fold[1])/sqrt(7) @show l1_loss # We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). diff --git a/examples/boston/notebook.unexecuted.ipynb b/examples/boston/notebook.unexecuted.ipynb index 4895e2dc..6e5c2f7e 100644 --- a/examples/boston/notebook.unexecuted.ipynb +++ b/examples/boston/notebook.unexecuted.ipynb @@ -562,7 +562,7 @@ "cell_type": "code", "source": [ "using Measurements\n", - "l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7)\n", + "l1_loss = e.measurement[1] ± 1.96*std(e.per_fold[1])/sqrt(7)\n", "@show l1_loss" ], "metadata": {}, @@ -630,11 +630,11 @@ "file_extension": ".jl", "mimetype": "application/julia", "name": "julia", - "version": "1.6.5" + "version": "1.7.3" }, "kernelspec": { - "name": "julia-1.6", - "display_name": "Julia 1.6.5", + "name": "julia-1.7", + "display_name": "Julia 1.7.3", "language": "julia" } }, From ccbed5eabeef26b5fc8b5d84a522d0b12ac2564a Mon Sep 17 00:00:00 2001 From: josephsdavid Date: Mon, 18 Jul 2022 16:17:12 -0500 Subject: [PATCH 17/24] code review --- src/types.jl | 556 ++++++++++++++------------------------------------- 1 file changed, 148 insertions(+), 408 deletions(-) diff --git a/src/types.jl b/src/types.jl index 16c3c295..1f454a63 100644 --- a/src/types.jl +++ b/src/types.jl @@ -22,7 +22,7 @@ for Model in [:NeuralNetworkClassifier, :ImageClassifier] function $Model(; builder::B = Short() , finaliser::F = Flux.softmax - , optimiser::O = Flux.Optimise.ADAM() + , optimiser::O = Flux.Optimise.Adam() , loss::L = Flux.crossentropy , epochs = 10 , batch_size = 1 @@ -60,19 +60,22 @@ end """ $(MMI.doc_header(NeuralNetworkClassifier)) -`NeuralNetworkClassifier`: a neural network model for making probabilistic predictions -of a Multiclass or OrderedFactor target, given a table of Continuous features. ) - TODO: +`NeuralNetworkClassifier` is for training a data-dependent Flux.jl neural network +for making probabilistic predictions of a `Multiclass` or `OrderedFactor` target, +given a table of `Continuous` features. Users provide a recipe for constructing + the network, based on properties of the data that is encountered, by specifying + an appropriate `builder`. See MLJFlux documentation for more on builders. # Training data In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) Where - `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` + are of scitype `Continuous`; check the column scitypes with `schema(X)`. - `y`: is the target, which can be any `AbstractVector` whose element scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; check the scitype with `scitype(y)` @@ -80,8 +83,11 @@ Where # Hyper-parameters -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural + network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, + and `MLJFlux.MLP`. See MLJFlux documentation for examples of + user-defined builders. +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. - `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. @@ -90,21 +96,28 @@ Where - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - `Flux.binary_focal_loss`: Binary version of the above + Currently MLJ measures are not supported as loss functions here. - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `batch_size::int=1`: the batch size to be used for training. the batch size represents + the number of samples per update of the networks weights. typcally, batch size should be + somewhere between 8 and 512. smaller batch sizes lead to noisier training loss curves, + while larger batch sizes lead towards smoother training loss curves. + In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), + and stick with it, and only tune the learning rate. In most examples, batch size is set + in powers of twos, but this is fairly arbitrary. - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. - `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a classification task, `softmax` is used for multiclass, single label regression, `sigmoid` is used for either binary classification or multi label classification (when there are multiple possible labels for a given sample). # Operations - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. + features `Xnew` having the same scitype as `X` above. Predictions are + probabilistic but uncalibrated. - `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions returned above. @@ -113,14 +126,17 @@ Where The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. +- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, + functions, and activations which make up the neural network. This includes + the final layer specified by `finaliser` (eg, `softmax`). # Report The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. +- `training_losses`: A vector of training losses (penalised if `lambda != 0`) in + historical order, of length `epochs + 1`. The first element is the pre-training loss. # Examples @@ -133,16 +149,12 @@ import RDatasets using Random Random.seed!(123) -MLJ.color_off() - -using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); ``` This is a very basic example, using a default builder and no standardization. -For a more advance illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: +For a more advanced illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: ```julia iris = RDatasets.dataset("datasets", "iris"); -y, X = unpack(iris, ==(:Species), colname -> true, rng=123); +y, X = unpack(iris, ==(:Species), rng=123); NeuralNetworkClassifier = @load NeuralNetworkClassifier clf = NeuralNetworkClassifier() ``` @@ -157,7 +169,7 @@ We can train the model in an incremental fashion with the `optimizer_changes_tri clf.optimiser.eta = clf.optimiser.eta * 2 clf.epochs = clf.epochs + 5 -# note that if the optimizer_changes_trigger_retraining flag was set to true +# note that if the `optimizer_changes_trigger_retraining` flag was set to true # the model would be completely retrained from scratch because the optimizer was # updated fit!(mach, verbosity=2); @@ -186,7 +198,6 @@ plot(curve.parameter_values, xscale=curve.parameter_scale, ylab = "Cross Entropy") -savefig("iris_history.png") ``` See also [`ImageClassifier`](@ref) @@ -196,25 +207,34 @@ NeuralNetworkClassifier """ $(MMI.doc_header(ImageClassifier)) -`ImageClassifier`: A neural network model for making probabilistic -"predictions of a `GrayImage` target, given a table of `Continuous` features. +`ImageClassifier` classifies images using a neural network adapted to the type + of images provided (color or greyscale). Predictions are probabistic. Users + provide a recipe for constructing the network, based on properties of the image + encountered, by specifying an appropriate `builder`. See MLJFlux documentation + for more on builders. # Training data In MLJ or MLJBase, bind an instance `model` to data with -mach = machine(model, X, y) + + mach = machine(model, X, y) + Where -- `X`: is any `AbstractVector` of input features (eg, a `DataFrame`) whose items - are of scitype `GrayImage`; check the scitype with `scitype(X)` +- `X`: is any `AbstractVector` of images with `ColorImage` or `GrayImage` + scitype; check the scitype with `scitype(X)` and refer to ScientificTypes.jl + documentation on coercing typical image formats into an appropriate type. - `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; - check the scitype with `scitype(y)` + scitype is `Multiclass`; check the scitype with `scitype(y)`. # Hyper-parameters -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `builder`: An MLJFlux builder that constructs the neural network. + The fallback builds a depth-16 VGG architecture adapted to the image + size and number of target classes, with no batch normalisation; see the + Metalhead.jl documentation for details. See the example below for a + user-specified builder. +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. - `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. @@ -223,21 +243,26 @@ Where - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - `Flux.binary_focal_loss`: Binary version of the above + Currently MLJ measures are not supported as loss functions here. - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents the number of samples per update of the networks weights. Typcally, batch size should be somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, while larger batch sizes lead towards smoother training loss curves. In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and only tune the learning rate. In most literature, batch size is set in powers of twos, but this is fairly arbitrary. +- `batch_size::Int=1`: The batch size to be used for training. The batch size + represents the number of samples per update of the networks weights. Batch + sizes between 8 and 512 are typical. Increasing batch size can speed up + training, especially on a GPU (`acceleration=CUDALibs()`). - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. +- `finaliser=Flux.softmax`: The final activation function of the neural network, + needed to convert outputs to probabilities (builders do not provide this). # Operations - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are - probabilistic. + features `Xnew` having the same scitype as `X` above. Predictions are + probabilistic but uncalibrated. - `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions returned above. @@ -245,13 +270,17 @@ Where # Fitted parameters The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which make up the neural network. + +- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, + functions, and activations which make up the neural network. This includes + the final layer specified by `finaliser` (eg, `softmax`). # Report The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. +- `training_losses`: A vector of training losses (penalised if `lambda != 0`) in + historical order, of length `epochs + 1`. The first element is the pre-training loss. # Examples @@ -262,31 +291,20 @@ using Flux import MLJFlux import MLJIteration # for `skip` -MLJ.color_off() - -using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); ``` First we want to download the MNIST dataset, and unpack into images and labels ```julia import MLDatasets: MNIST -ENV["DATADEPS_ALWAYS_ACCEPT"] = true images, labels = MNIST.traindata(); ``` -In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` [scientific type](https://juliaai.github.io/ScientificTypes.jl/dev/). For more in this, see [Working with Categorical Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/): +In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` scitype: ```julia labels = coerce(labels, Multiclass); images = coerce(images, GrayImage); -# Checking scientific types: - -@assert scitype(images) <: AbstractVector{<:Image} -@assert scitype(labels) <: AbstractVector{<:Finite} - images[1] ``` -For general instructions on coercing image data, see [type coercion for image data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/%23Type-coercion-for-image-data-1) We start by defining a suitable `builder` object. This is a recipe for building the neural network. Our builder will work for images of any (constant) size, whether they be color or black and white (ie, @@ -323,7 +341,7 @@ function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) return Chain(front, Dense(d, n_out, init=init)) end ``` -It is important to note that in our `build` function, there is no final softmax. This is applie by default in all MLJFlux classifiers, using the `finaliser` hyperparameter of the classifier. Now that we have our builder defined, we can define the actual moel. If you have a GPU, you can substitute in `acceleration=CudaLibs()` below. Note that in the case of convolutions, this will **greatly** increase the speed of training. +It is important to note that in our `build` function, there is no final `softmax`. This is applied by default in all MLJFlux classifiers (override this using the `finaliser` hyperparameter). Now that we have our builder defined, we can define the actual model. If you have a GPU, you can substitute in `acceleration=CUDALibs()` below to greatly speed up training. ```julia ImageClassifier = @load ImageClassifier clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), @@ -349,114 +367,20 @@ We can tack on 20 more epochs by modifying the `epochs` field, and iteratively f clf.epochs = clf.epochs + 20 fit!(mach, rows=1:500); ``` -We can also make predictions and calculate an out-of-sample loss estimate, in two ways! +We can also make predictions and calculate an out-of-sample loss estimate: ```julia predicted_labels = predict(mach, rows=501:1000); cross_entropy(predicted_labels, labels[501:1000]) |> mean -# alternative one liner! +``` +The preceding `fit!`/`predict`/evaluate workflow can be alternatively executed as folllows: + +```julia evaluate!(mach, resampling=Holdout(fraction_train=0.5), measure=cross_entropy, rows=1:1000, verbosity=0) ``` - -## Wrapping in iteration controls - -Any iterative MLJFlux model can be wrapped in **iteration controls**, as we demonstrate next. For more on MLJ's `IteratedModel` wrapper, see the [MLJ documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/). -The "self-iterating" classifier (`iterated_clf` below) is for iterating the image classifier defined above until a stopping criterion is hit. We use the following stopping criterion: -- `Patience(3)`: 3 consecutive increases in the loss -- `InvalidValue()`: an out-of-sample loss or a training loss that is `NaN` or `±Inf` -- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes. -We can specify how often these checks (and other controls) are applied using the `Step` control. Additionally, we can define controls to -- save a snapshot of the machine every N control cycles (`save_control`) -- record traces of the out-of-sample loss and training losses for plotting (`WithLossDo`) -- record mean value traces of each Flux parameter for plotting (`Callback`) -And other controls. For a full list, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). -First, we define some helper functions and some empty vectors to store traces: -```julia -make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) -make1d(x::AbstractArray) = reshape(x, length(x)); - -# to extract the flux parameters from a machine -parameters(mach) = make1d.(Flux.params(fitted_params(mach))); - -# trace storage -losses = [] -training_losses = [] -parameter_means = Float32[]; -epochs = [] - -# to update traces -update_loss(loss) = push!(losses, loss) -update_training_loss(losses) = push!(training_losses, losses[end]) -update_means(mach) = append!(parameter_means, mean.(parameters(mach))); -update_epochs(epoch) = push!(epochs, epoch) -``` -Next, we can define our controls! We store them in a simple vector: -```julia -save_control = - MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) - -controls=[Step(2), - Patience(3), - InvalidValue(), - TimeLimit(5/60), - save_control, - WithLossDo(), - WithLossDo(update_loss), - WithTrainingLossesDo(update_training_loss), - Callback(update_means), - WithIterationsDo(update_epochs) -``` -Once the controls are defined, we can instantiate and fit our "self-iterating" classifier: -```julia -iterated_clf = IteratedModel(model=clf, - controls=controls, - resampling=Holdout(fraction_train=0.7), - measure=log_loss) - -mach = machine(iterated_clf, images, labels); -fit!(mach, rows=1:500); -``` -Next we can compare the training and out-of-sample losses, as well as view the evolution of the weights: -```julia -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") - -savefig(joinpath(DIR, "loss.png")) - -n_epochs = length(losses) -n_parameters = div(length(parameter_means), n_epochs) -parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)' -plot(epochs, parameter_means2, - title="Flux parameter mean weights", - xlab = "epoch") -# **Note.** The the higher the number, the deeper the chain parameter. -savefig(joinpath(DIR, "weights.png")) -``` -Since we saved our model every few epochs, we can retrieve the snapshots so we can make predictions! -```julia -mach2 = machine(joinpath(DIR, "mnist3.jlso")) -predict_mode(mach2, images[501:503]) -``` - -## Resuming training - -If we change `iterated_clf.controls` or `clf.epochs`, we can resume training from where it left off. This is very useful for long-running training sessions, where you may be interrupted by for example a bad connection or computer hibernation. -```julia -iterated_clf.controls[2] = Patience(4) -fit!(mach, rows=1:500) - -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") -``` See also [`NeuralNetworkClassifier`](@ref) """ @@ -479,7 +403,7 @@ for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] end function $Model(; builder::B = Linear() - , optimiser::O = Flux.Optimise.ADAM() + , optimiser::O = Flux.Optimise.Adam() , loss::L = Flux.mse , epochs = 10 , batch_size = 1 @@ -516,28 +440,32 @@ end """ $(MMI.doc_header(NeuralNetworkRegressor)) -`NeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` target, given a table of `Continuous` features. +`NeuralNetworkRegressor` is for training a data-dependent Flux.jl neural +network to predict a `Continuous` target, given a table of +`Continuous` features. Users provide a recipe for constructing the +network, based on properties of the data that is encountered, by specifying +an appropriate `builder`. See MLJFlux documentation for more on builders. # Training data In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) Where - `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` + are of scitype `Continuous`; check the column scitypes with `schema(X)`. - `y`: is the target, which can be any `AbstractVector` whose element scitype is `Continuous`; check the scitype with `scitype(y)` # Hyper-parameters -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. - Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder - using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the updating +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs + a neural network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, + and `MLJFlux.MLP`. See below for an example of a user-specified builder. +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to @@ -550,34 +478,27 @@ Where - `Flux.mae` - `Flux.msle` - `Flux.huber_loss` + Currently MLJ measures are not supported as loss functions here. - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents - the number of samples per update of the networks weights. Typcally, batch size should be - somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, - while larger batch sizes lead towards smoother training loss curves. - In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), - and stick with it, and only tune the learning rate. In most examples, batch size is set - in powers of twos, but this is fairly arbitrary. +- `batch_size::Int=1`: The batch size to be used for training. The batch size + represents the number of samples per update of the networks weights. Batch + sizes between 8 and 512 are typical. Increasing batch size can speed up + training, especially on a GPU (`acceleration=CUDALibs()`). - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a - machine if the associated optimiser has changed. If true, the associated machine will - retrain from scratch on `fit`, otherwise it will not. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. - For training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. - Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include - `Flux.sigmoid` and the identity function (otherwise known as "linear activation"). +For training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. # Operations - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are + features `Xnew` having the same scitype as `X` above. Predictions are deterministic. @@ -585,18 +506,18 @@ Where The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which - make up the neural network. +- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, + functions, and activations which make up the neural network. This includes + the final layer specified by `finaliser` (eg, `softmax`). # Report The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - all the losses during training. The first element of the vector is the initial penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - penalized loss. After the first element, the nth element corresponds to the loss of epoch n-1. - epoch n-1. +- `training_losses`: A vector of training losses (penalised if `lambda != 0`) in + historical order, of length `epochs + 1`. The first element is the pre-training loss. + # Examples In this example we build a regression model using the Boston house price dataset @@ -604,7 +525,6 @@ In this example we build a regression model using the Boston house price dataset using MLJ using MLJFlux using Flux - using Plots ``` First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: ```julia @@ -641,10 +561,9 @@ NeuralNetworkRegressor = @load NeuralNetworkRegressor rng=123, epochs=20) ``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! -neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +We will arrange for standardizaion of the the target by wrapping our model + in `TransformedTargetModel`, and standardization of the features by +inserting the wrapped model in a pipeline: ```julia pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) ``` @@ -663,7 +582,7 @@ report(mach).transformed_target_model_deterministic.training_losses We can visually compare how the learning rate affects the predictions: ```julia -plt = plot() +using Plots rates = 10. .^ (-5:0) @@ -674,114 +593,21 @@ foreach(rates) do η report(mach).transformed_target_model_deterministic.model.training_losses[3:end] plot!(1:length(losses), losses, label=η) end -plt #!md -savefig(joinpath("assets", "learning_rate.png")) pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 -``` -## Using Iteration Controls +# CV estimate, based on `(X, y)`: +evaluate!(mach, resampling=CV(nfolds=5), measure=l2) -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. -trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. -`NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as update the traces. -```julia -# For initializing or clearing the traces: - -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - - # And to update the traces: - -update_loss(loss) = push!(losses, loss) -update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) +# loss for `(Xtest, test)`: +fit!(mach) # train on `(X, y)` +yhat = predict(mach, Xtest) +l2(yhat, ytest) |> mean ``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia -controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), -WithIterationsDo(update_epochs)] - - -iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia -clear() -mach = machine(iterated_pipe, X, y) -fit!(mach) - -plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); -scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - -savefig(joinpath("assets", "loss.png")) -``` - -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an -out-of-sample loss can be tracked and used in the specified stopping criterion, -`NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by -`IteratedModel` (our pipeline model) is retrained on all data for the same number of -iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned -parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia -e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -using Measurements -l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) -@show l1_loss -``` -We take this estimate of the uncertainty of the generalization error with a [grain of -salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the -untouched test set, of the eror of our self-iterating neural network regressor with a -couple of other models trained on the same data (using default hyperparameters): -```julia -function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean -end -performance(iterated_pipe) - -three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - -errs = performance.(three_models) - -(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty -``` +For impementing stopping criterion and other iteration controls, refer to examples linked +from the MLJFlux documentation See also [`MultitargetNeuralNetworkRegressor`](@ref) @@ -791,19 +617,22 @@ NeuralNetworkRegressor """ $(MMI.doc_header(MultitargetNeuralNetworkRegressor)) -`MultitargetNeuralNetworkRegressor`: A neural network model for making deterministic -predictions of a `Continuous` multi-target, presented as a table, given a table of -`Continuous` features. +`MultitargetNeuralNetworkRegressor` is for training a data-dependent Flux.jl + neural network to predict a multivalued `Continuous` target, represented as a table, + given a table of `Continuous` features. Users provide a recipe for constructing the + network, based on properties of the data that is encountered, by specifying an +appropriate `builder`. See MLJFlux documentation for more on builders. # Training data In MLJ or MLJBase, bind an instance `model` to data with + mach = machine(model, X, y) Where - `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the scitype with `schema(X)` + are of scitype `Continuous`; check the column scitypes with `schema(X)`. - `y`: is the target, which can be any table of output targets whose element scitype is `Continuous`; check the scitype with `schema(y)` @@ -813,7 +642,7 @@ Where - `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct your own builder using the `@builder` macro, see examples for further information. -- `optimiser::Flux.ADAM()`: A `Flux.Optimise` optimiser. The optimiser performs the +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a @@ -828,34 +657,27 @@ Where - `Flux.mae` - `Flux.msle` - `Flux.huber_loss` + Currently MLJ measures are not supported as loss functions here. - `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size represents - the number of samples per update of the networks weights. Typcally, batch size should be - somewhere between 8 and 512. Smaller batch sizes lead to noisier training loss curves, - while larger batch sizes lead towards smoother training loss curves. In general, it is a - good idea to pick one fairly large batch size (e.g. 32, 64, 128), and stick with it, and - only tune the learning rate. In most literature, batch size is set in powers of twos, - but this is fairly arbitrary. +- `batch_size::Int=1`: The batch size to be used for training. The batch size + represents the number of samples per update of the networks weights. Batch + sizes between 8 and 512 are typical. Increasing batch size can speed up + training, especially on a GPU (`acceleration=CUDALibs()`). - `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. - `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. - `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting - a machine if the associated optimiser has changed. If true, the associated machine will - retrain from scratch on `fit`, otherwise it will not. +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. - For Training on GPU, use `CudaLibs()`, otherwise defaults to `CPU`()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. -Defaults to `Flux.softmax`. For a regression task, reasonable alternatives include -`Flux.sigmoid` and the identity function (otherwise known as "linear activation"). +For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. # Operations - `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same Scitype as `X` above. Predictions are + features `Xnew` having the same scitype as `X` above. Predictions are deterministic. @@ -863,18 +685,17 @@ Defaults to `Flux.softmax`. For a regression task, reasonable alternatives inclu The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain", or series of layers, functions, and activations which - make up the neural network. +- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, + functions, and activations which make up the neural network. This includes + the final layer specified by `finaliser` (eg, `softmax`). # Report The fields of `report(mach)` are: -- `training_losses`: The history of training losses, a vector containing the history of - all the losses during training. The first element of the vector is the initial - penalized loss. After the first element, the nth element corresponds to the loss of - epoch n-1. +- `training_losses`: A vector of training losses (penalised if `lambda != 0`) in + historical order, of length `epochs + 1`. The first element is the pre-training loss. # Examples @@ -883,7 +704,6 @@ In this example we build a regression model using a toy dataset. using MLJ using MLJFlux using Flux -using Plots using MLJBase: augment_X ``` First, we generate some data: @@ -913,11 +733,11 @@ end Finally, we can define the model! ```julia MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor - model = MultitargetNeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) +model = MultitargetNeuralNetworkRegressor(builder=builder, rng=123, epochs=20) ``` -For our neural network, since different features likely have different scales, if we do not standardize the network may be implicitly biased towards features with higher magnitudes, or may have [saturated neurons](https://www.informit.com/articles/article.aspx%3fp=3131594&seqNum=2) and not train well. Therefore, standardization is key! +We will arrange for standardizaion of the the target by wrapping our model + in `TransformedTargetModel`, and standardization of the features by +inserting the wrapped model in a pipeline: ```julia pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) ``` @@ -936,7 +756,7 @@ report(mach).transformed_target_model_deterministic.training_losses We can visually compare how the learning rate affects the predictions: ```julia -plt = plot() +using Plots rates = 10. .^ (-5:0) @@ -947,108 +767,28 @@ foreach(rates) do η report(mach).transformed_target_model_deterministic.model.training_losses[3:end] plot!(1:length(losses), losses, label=η) end -plt #!md -savefig(joinpath("assets", "learning_rate.png")) pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 ``` -## Using Iteration Controls - -We can also wrap the model with MLJ Iteration controls. Suppose we want a model that trains until the out of sample loss does not improve for 6 epochs. We can use the `NumberSinceBest(6)` stopping criterion. We can also add some extra stopping criterion, `InvalidValue` and `Timelimit(1/60)`, as well as some controls to print traces of the losses. First we can define some methods to initialize or clear the traces as well as updte the traces. +With the learning rate fixed, we can now compute a CV estimate of the performance (using +all data bound to `mach`) and compare this with performance on the test set: ```julia -# For initializing or clearing the traces: +# custom MLJ loss: +multi_loss(yhat, y) = l2(MLJ.matrix(yhat), MLJ.matrix(y)) |> mean -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - -# And to update the traces: +# CV estimate, based on `(X, y)`: +evaluate!(mach, resampling=CV(nfolds=5), measure=multi_loss) -update_loss(loss) = push!(losses, loss) -update_training_loss(report) = - push!(training_losses, - report.transformed_target_model_deterministic.model.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) -``` -For further reference of controls, see [the documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/%23Controls-provided). To apply the controls, we simply stack them in a vector and then make an `IteratedModel`: -```julia -controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(1/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), -WithIterationsDo(update_epochs)] - -iterated_pipe = - IteratedModel(model=pipe, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2) -``` -Next, we can clear the traces, fit the model, and plot the traces: -```julia -clear() -mach = machine(iterated_pipe, X, y) +# loss for `(Xtest, test)`: fit!(mach) - -plot(epochs, losses, - xlab = "epoch", - ylab = "mean sum of squares error", - label="out-of-sample", - legend = :topleft); -scatter!(twinx(), epochs, training_losses, label="training", color=:red) #!md - -savefig(joinpath("assets", "loss.png")) +yhat = predict(mach, Xtest) +multi_loss(yhat, y) ``` -### Brief note on iterated models - -Training an `IteratedModel` means holding out some data (80% in this case) so an out-of-sample loss can be tracked and used in the specified stopping criterion, `NumberSinceBest(4)`. However, once the stop is triggered, the model wrapped by `IteratedModel` (our pipeline model) is retrained on all data for the same number of iterations. Calling `predict(mach, Xnew)` on new data uses the updated learned parameters. - -## Evaluating Iterated Models - -We can evaluate our model with the `evaluate!` function: -```julia -e = evaluate!(mach, - resampling=CV(nfolds=8), - measures=[l1, l2]) - -using Measurements -l1_loss = e.measurement[1] ± std(e.per_fold[1])/sqrt(7) -@show l1_loss -``` -We take this estimate of the uncertainty of the generalization error with a [grain of salt](https://direct.mit.edu/neco/article-abstract/10/7/1895/6224/Approximate-Statistical-Tests-for-Comparing)). - -## Comparison with other models on the test set - -Although we cannot assign them statistical significance, here are comparisons, on the untouched test set, of the eror of our self-iterating neural network regressor with a couple of other models trained on the same data (using default hyperparameters): -```julia - -function performance(model) - mach = machine(model, X, y) |> fit! - yhat = predict(mach, Xtest) - l1(yhat, ytest) |> mean -end -performance(iterated_pipe) - -three_models = [(@load EvoTreeRegressor)(), # tree boosting model - (@load LinearRegressor pkg=MLJLinearModels)(), - iterated_pipe] - -errs = performance.(three_models) - -(models=MLJ.name.(three_models), mean_square_errors=errs) |> pretty - - -``` See also [`NeuralNetworkRegressor`](@ref) """ From 8ce55331f4315167db5e2010c72d9b1882622d5b Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 13:23:35 +1200 Subject: [PATCH 18/24] tweak doc strings --- src/classifier.jl | 3 +- src/types.jl | 736 ++++++++++++++++++++++++++++------------------ 2 files changed, 456 insertions(+), 283 deletions(-) diff --git a/src/classifier.jl b/src/classifier.jl index 2825dff7..40bcf5bd 100644 --- a/src/classifier.jl +++ b/src/classifier.jl @@ -14,8 +14,7 @@ MLJFlux.build(model::NeuralNetworkClassifier, rng, shape) = # returns the model `fitresult` (see "Adding Models for General Use" # section of the MLJ manual) which must always have the form `(chain, -# metadata)`, where `metadata` is anything extra neede by `predict` may -# require: +# metadata)`, where `metadata` is anything extra needed by `predict`: MLJFlux.fitresult(model::NeuralNetworkClassifier, chain, y) = (chain, MLJModelInterface.classes(y[1])) diff --git a/src/types.jl b/src/types.jl index 1f454a63..7c383c5c 100644 --- a/src/types.jl +++ b/src/types.jl @@ -72,54 +72,86 @@ In MLJ or MLJBase, bind an instance `model` to data with mach = machine(model, X, y) -Where +Here: -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the column scitypes with `schema(X)`. -- `y`: is the target, which can be any `AbstractVector` whose element - scitype is `Multiclass` or `OrderedFactor` with `n_out` classes; - check the scitype with `scitype(y)` +- `X` is any table of input features (eg, a `DataFrame`) whose columns are of scitype + `Continuous`; check column scitypes with `schema(X)`. + +- `y` is the target, which can be any `AbstractVector` whose element scitype is `Multiclass` + or `OrderedFactor`; check the scitype with `scitype(y)` + +Train the machine with `fit!(mach, rows=...)`. # Hyper-parameters -- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural - network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, - and `MLJFlux.MLP`. See MLJFlux documentation for examples of - user-defined builders. -- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - - `Flux.binary_focal_loss`: Binary version of the above - Currently MLJ measures are not supported as loss functions here. -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::int=1`: the batch size to be used for training. the batch size represents - the number of samples per update of the networks weights. typcally, batch size should be - somewhere between 8 and 512. smaller batch sizes lead to noisier training loss curves, - while larger batch sizes lead towards smoother training loss curves. - In general, it is a good idea to pick one fairly large batch size (e.g. 32, 64, 128), - and stick with it, and only tune the learning rate. In most examples, batch size is set - in powers of twos, but this is fairly arbitrary. -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults to `Flux.softmax`. For a classification task, `softmax` is used for multiclass, single label regression, `sigmoid` is used for either binary classification or multi label classification (when there are multiple possible labels for a given sample). +- `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible + `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, and `MLJFlux.MLP`. See + MLJFlux.jl documentation for examples of user-defined builders. + +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the + updating of the weights of the network. For further reference, see [the Flux optimiser + documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a + learning rate (the update rate of the optimizer), a good rule of thumb is to start out + at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. + +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a + function which can be called in the form `loss(yhat, y)`. Possible loss functions are + listed in [the Flux loss function + documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification + task, the most natural loss functions are: + + - `Flux.crossentropy`: Standard multiclass classification loss, also known as the log + loss. + + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but numerically more + stable than finalising the outputs with `softmax` and then calculating + crossentropy. You will need to specify `finaliser=identity` to remove MLJFlux's + default softmax finaliser, and understand that the output of `predict` is then + unnormalized (no longer probabilistic). + + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than + easier examples. + + Currently MLJ measures are not supported values of `loss`. + +- `epochs::Int=10`: The duration of training, in epochs. Typically, one epoch represents + one pass through the complete the training dataset. + +- `batch_size::int=1`: the batch size to be used for training, representing the number of + samples per update of the network weights. Typically, batch size is between 8 and + 512. Increassing batch size may accelerate training if `acceleration=CUDALibs()` and a + GPU is available. + +- `lambda::Float64=0`: The strength of the weight regularization penalty. Can be any value + in the range `[0, ∞)`. + +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 + represents L2 regularization, and a value of 1 represents L1 regularization. + +- `rng::Union{AbstractRNG, Int64}`: The random number generator or seed used during + training. + +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when re-fitting + a machine if the associated optimiser has changed. If `true`, the associated machine + will retrain from scratch on `fit!` call, otherwise it will not. + +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For + Training on GPU, use `CudaLibs()`. + +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults + to `Flux.softmax`. # Operations -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same scitype as `X` above. Predictions are - probabilistic but uncalibrated. -- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions - returned above. +- `predict(mach, Xnew)`: return predictions of the target given new features `Xnew`, which + should have the same scitype as `X` above. Predictions are probabilistic but uncalibrated. + +- `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions returned + above. # Fitted parameters @@ -127,7 +159,7 @@ Where The fields of `fitted_params(mach)` are: - `chain`: The trained "chain" (Flux.jl model), namely the series of layers, - functions, and activations which make up the neural network. This includes + functions, and activations which make up the neural network. This includes the final layer specified by `finaliser` (eg, `softmax`). @@ -140,51 +172,59 @@ The fields of `report(mach)` are: # Examples -In this example we build a classification model using the Iris dataset. +In this example we build a classification model using the Iris dataset. This is a very +basic example, using a default builder and no standardization. For a more advanced +illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref), and +examples in the MLJFlux.jl documentation. + ```julia using MLJ using Flux import RDatasets +``` -using Random -Random.seed!(123) +First, we can load the data: -``` -This is a very basic example, using a default builder and no standardization. -For a more advanced illustration, see [`NeuralNetworkRegressor`](@ref) or [`ImageClassifier`](@ref). First, we can load the data: ```julia iris = RDatasets.dataset("datasets", "iris"); -y, X = unpack(iris, ==(:Species), rng=123); -NeuralNetworkClassifier = @load NeuralNetworkClassifier +y, X = unpack(iris, ==(:Species), rng=123); # a vector and a table +NeuralNetworkClassifier = @load NeuralNetworkClassifier pkg=MLJFlux clf = NeuralNetworkClassifier() ``` + Next, we can train the model: + ```julia -import Random.seed!; seed!(123) mach = machine(clf, X, y) fit!(mach) ``` -We can train the model in an incremental fashion with the `optimizer_changes_trigger_retraining` flag set to false (which is by default). Here, we change the number of iterations and the learning rate of the optimiser: + +We can train the model in an incremental fashion, altering the learning rate as we go, +provided `optimizer_changes_trigger_retraining` is `false` (the default). Here, we also +change the number of (total) iterations: + ```julia clf.optimiser.eta = clf.optimiser.eta * 2 clf.epochs = clf.epochs + 5 -# note that if the `optimizer_changes_trigger_retraining` flag was set to true -# the model would be completely retrained from scratch because the optimizer was -# updated -fit!(mach, verbosity=2); +fit!(mach, verbosity=2) # trains 5 more epochs ``` + We can inspect the mean training loss using the `cross_entropy` function: -```julia +```julia training_loss = cross_entropy(predict(mach, X), y) |> mean - ``` + And we can access the Flux chain (model) using `fitted_params`: + ```julia chain = fitted_params(mach).chain ``` -Finally, we can see how the out-of-sample performance changes over time, using the `learning_curve` function + +Finally, we can see how the out-of-sample performance changes over time, using MLJ's +`learning_curve` function: + ```julia r = range(clf, :epochs, lower=1, upper=200, scale=:log10) curve = learning_curve(clf, X, y, @@ -199,19 +239,19 @@ plot(curve.parameter_values, ylab = "Cross Entropy") ``` -See also -[`ImageClassifier`](@ref) + +See also [`ImageClassifier`](@ref). + """ NeuralNetworkClassifier """ $(MMI.doc_header(ImageClassifier)) -`ImageClassifier` classifies images using a neural network adapted to the type - of images provided (color or greyscale). Predictions are probabistic. Users - provide a recipe for constructing the network, based on properties of the image - encountered, by specifying an appropriate `builder`. See MLJFlux documentation - for more on builders. +`ImageClassifier` classifies images using a neural network adapted to the type of images +provided (color or gray scale). Predictions are probabilistic. Users provide a recipe for +constructing the network, based on properties of the image encountered, by specifying an +appropriate `builder`. See MLJFlux documentation for more on builders. # Training data @@ -219,50 +259,87 @@ In MLJ or MLJBase, bind an instance `model` to data with mach = machine(model, X, y) -Where -- `X`: is any `AbstractVector` of images with `ColorImage` or `GrayImage` - scitype; check the scitype with `scitype(X)` and refer to ScientificTypes.jl - documentation on coercing typical image formats into an appropriate type. -- `y`: is the target, which can be any `AbstractVector` whose element +Here: + +- `X` is any `AbstractVector` of images with `ColorImage` or `GrayImage` scitype; check + the scitype with `scitype(X)` and refer to ScientificTypes.jl documentation on coercing + typical image formats into an appropriate type. + +- `y` is the target, which can be any `AbstractVector` whose element scitype is `Multiclass`; check the scitype with `scitype(y)`. +Train the machine with `fit!(mach, rows=...)`. + # Hyper-parameters -- `builder`: An MLJFlux builder that constructs the neural network. - The fallback builds a depth-16 VGG architecture adapted to the image - size and number of target classes, with no batch normalisation; see the - Metalhead.jl documentation for details. See the example below for a - user-specified builder. -- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see either the examples or [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a function which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification task, the most natural loss functions are: - - `Flux.crossentropy`: Typically used as loss in multiclass classification, with labels in a 1-hot encoded format. - - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `softmax` and then calculating crossentropy. - - `Flux.binarycrossentropy`: Typically used as loss in binary classification, with labels in a 1-hot encoded format. - - `Flux.logitbinarycrossentopy`: Mathematically equal to crossentropy, but computationally more numerically stable than finalising the outputs with `sigmoid` and then calculating binary crossentropy. - - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. - - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than easier examples. - - `Flux.binary_focal_loss`: Binary version of the above - Currently MLJ measures are not supported as loss functions here. -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size - represents the number of samples per update of the networks weights. Batch - sizes between 8 and 512 are typical. Increasing batch size can speed up - training, especially on a GPU (`acceleration=CUDALibs()`). -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network, - needed to convert outputs to probabilities (builders do not provide this). +- `builder`: An MLJFlux builder that constructs the neural network. The fallback builds a + depth-16 VGG architecture adapted to the image size and number of target classes, with + no batch normalization; see the Metalhead.jl documentation for details. See the example + below for a user-specified builder. A convenience macro `@builder` is also available. + +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the + updating of the weights of the network. For further reference, see [the Flux optimiser + documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a + learning rate (the update rate of the optimizer), a good rule of thumb is to start out + at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. + +- `loss=Flux.crossentropy`: The loss function which the network will optimize. Should be a + function which can be called in the form `loss(yhat, y)`. Possible loss functions are + listed in [the Flux loss function + documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a classification + task, the most natural loss functions are: + + - `Flux.crossentropy`: Standard multiclass classification loss, also known as the log + loss. + + - `Flux.logitcrossentopy`: Mathematically equal to crossentropy, but numerically more + stable than finalising the outputs with `softmax` and then calculating + crossentropy. You will need to specify `finaliser=identity` to remove MLJFlux's + default softmax finaliser, and understand that the output of `predict` is then + unnormalized (no longer probabilistic). + + - `Flux.tversky_loss`: Used with imbalanced data to give more weight to false negatives. + + - `Flux.focal_loss`: Used with highly imbalanced data. Weights harder examples more than + easier examples. + + Currently MLJ measures are not supported values of `loss`. + +- `epochs::Int=10`: The duration of training, in epochs. Typically, one epoch represents + one pass through the complete the training dataset. + +- `batch_size::int=1`: the batch size to be used for training, representing the number of + samples per update of the network weights. Typically, batch size is between 8 and + 512. Increassing batch size may accelerate training if `acceleration=CUDALibs()` and a + GPU is available. + +- `lambda::Float64=0`: The strength of the weight regularization penalty. Can be any value + in the range `[0, ∞)`. + +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 + represents L2 regularization, and a value of 1 represents L1 regularization. + +- `rng::Union{AbstractRNG, Int64}`: The random number generator or seed used during + training. + +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when re-fitting + a machine if the associated optimiser has changed. If `true`, the associated machine + will retrain from scratch on `fit!` call, otherwise it will not. + +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For + Training on GPU, use `CudaLibs()`. + +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults + to `Flux.softmax`. # Operations -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same scitype as `X` above. Predictions are - probabilistic but uncalibrated. +- `predict(mach, Xnew)`: return predictions of the target given new features `Xnew`, which + should have the same scitype as `X` above. Predictions are probabilistic but + uncalibrated. + - `predict_mode(mach, Xnew)`: Return the modes of the probabilistic predictions returned above. @@ -279,39 +356,50 @@ The fields of `fitted_params(mach)` are: # Report The fields of `report(mach)` are: + - `training_losses`: A vector of training losses (penalised if `lambda != 0`) in historical order, of length `epochs + 1`. The first element is the pre-training loss. # Examples -In this example we use MLJ to classify the MNIST image dataset +In this example we use MLJFlux and a custom builder to classify the MNIST image dataset. + ```julia using MLJ using Flux import MLJFlux -import MLJIteration # for `skip` - +import MLJIteration # for `skip` control ``` -First we want to download the MNIST dataset, and unpack into images and labels + +First we want to download the MNIST dataset, and unpack into images and labels: + ```julia import MLDatasets: MNIST - -images, labels = MNIST.traindata(); +data = MNIST(split=:train) +images, labels = data.features, data.targets ``` -In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into the `Multiclass` scitype: + +In MLJ, integers cannot be used for encoding categorical data, so we must coerce them into +the `Multiclass` scitype: + ```julia labels = coerce(labels, Multiclass); -images = coerce(images, GrayImage); +``` + +Above `images` is a single array but MLJFlux requires the images to be a vector of +individual image arrays: +``` + images = coerce(images, GrayImage); images[1] ``` -We start by defining a suitable `builder` object. This is a recipe -for building the neural network. Our builder will work for images of -any (constant) size, whether they be color or black and white (ie, -single or multi-channel). The architecture always consists of six -alternating convolution and max-pool layers, and a final dense -layer; the filter size and the number of channels after each -convolution layer is customisable. + +We start by defining a suitable `builder` object. This is a recipe for building the neural +network. Our builder will work for images of any (constant) size, whether they be color or +black and white (ie, single or multi-channel). The architecture always consists of six +alternating convolution and max-pool layers, and a final dense layer; the filter size and +the number of channels after each convolution layer is customizable. + ```julia import MLJFlux @@ -341,38 +429,53 @@ function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) return Chain(front, Dense(d, n_out, init=init)) end ``` -It is important to note that in our `build` function, there is no final `softmax`. This is applied by default in all MLJFlux classifiers (override this using the `finaliser` hyperparameter). Now that we have our builder defined, we can define the actual model. If you have a GPU, you can substitute in `acceleration=CUDALibs()` below to greatly speed up training. + +It is important to note that in our `build` function, there is no final `softmax`. This is +applied by default in all MLJFlux classifiers (override this using the `finaliser` +hyperparameter). + +Now that our builder is defined, we can instantiate the actual MLJFlux model. If you have +a GPU, you can substitute in `acceleration=CUDALibs()` below to speed up training. + ```julia -ImageClassifier = @load ImageClassifier +ImageClassifier = @load ImageClassifier pkg=MLJFlux clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), batch_size=50, epochs=10, rng=123) ``` -You can add flux options such as `optimiser` and `loss` in the snippet above. Currently, `loss` must be a flux-compatible loss, and not an MLJ measure. -Next, we can bind the model with the data in a machine, and fit the first 500 or so images: + +You can add Flux options such as `optimiser` and `loss` in the snippet above. Currently, +`loss` must be a flux-compatible loss, and not an MLJ measure. + +Next, we can bind the model with the data in a machine, and train using the first 500 +images: + ```julia mach = machine(clf, images, labels); - fit!(mach, rows=1:500, verbosity=2); - report(mach) - chain = fitted_params(mach) - Flux.params(chain)[2] ``` -We can tack on 20 more epochs by modifying the `epochs` field, and iteratively fit some more: + +We can tack on 20 more epochs by modifying the `epochs` field, and iteratively fit some +more: + ```julia clf.epochs = clf.epochs + 20 -fit!(mach, rows=1:500); +fit!(mach, rows=1:500, verbosity=2); ``` -We can also make predictions and calculate an out-of-sample loss estimate: + +We can also make predictions and calculate an out-of-sample loss estimate, using any MLJ +measure (loss/score): + ```julia predicted_labels = predict(mach, rows=501:1000); cross_entropy(predicted_labels, labels[501:1000]) |> mean ``` -The preceding `fit!`/`predict`/evaluate workflow can be alternatively executed as folllows: + +The preceding `fit!`/`predict`/evaluate workflow can be alternatively executed as follows: ```julia evaluate!(mach, @@ -381,8 +484,9 @@ evaluate!(mach, rows=1:1000, verbosity=0) ``` -See also -[`NeuralNetworkClassifier`](@ref) + +See also [`NeuralNetworkClassifier`](@ref). + """ ImageClassifier @@ -440,11 +544,11 @@ end """ $(MMI.doc_header(NeuralNetworkRegressor)) -`NeuralNetworkRegressor` is for training a data-dependent Flux.jl neural -network to predict a `Continuous` target, given a table of -`Continuous` features. Users provide a recipe for constructing the -network, based on properties of the data that is encountered, by specifying -an appropriate `builder`. See MLJFlux documentation for more on builders. +`NeuralNetworkRegressor` is for training a data-dependent Flux.jl neural network to +predict a `Continuous` target, given a table of `Continuous` features. Users provide a +recipe for constructing the network, based on properties of the data that is encountered, +by specifying an appropriate `builder`. See MLJFlux documentation for more on builders. + # Training data @@ -452,139 +556,184 @@ In MLJ or MLJBase, bind an instance `model` to data with mach = machine(model, X, y) -Where +Here: -- `X`: is any table of input features (eg, a `DataFrame`) whose columns +- `X` is any table of input features (eg, a `DataFrame`) whose columns are of scitype `Continuous`; check the column scitypes with `schema(X)`. -- `y`: is the target, which can be any `AbstractVector` whose element +- `y` is the target, which can be any `AbstractVector` whose element scitype is `Continuous`; check the scitype with `scitype(y)` +Train the machine with `fit!(mach, rows=...)`. + # Hyper-parameters -- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs - a neural network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, - and `MLJFlux.MLP`. See below for an example of a user-specified builder. -- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating - of the weights of the network. For further reference, see either the examples or - [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). - To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to - start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. +- `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural + network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, and + `MLJFlux.MLP`. See MLJFlux documentation for more on builders, and the example below + for using the `@builder` convenience macro. + +- `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the + updating of the weights of the network. For further reference, see [the Flux optimiser + documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a + learning rate (the update rate of the optimizer), a good rule of thumb is to start out + at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. + - `loss=Flux.mse`: The loss function which the network will optimize. Should be a function - which can be called in the form `loss(yhat, y)`. - Possible loss functions are listed in [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). - For a regression task, the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` - Currently MLJ measures are not supported as loss functions here. -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents - one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size - represents the number of samples per update of the networks weights. Batch - sizes between 8 and 512 are typical. Increasing batch size can speed up - training, especially on a GPU (`acceleration=CUDALibs()`). -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be any value + which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in + [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). + For a regression task, natural loss functions are: + + - `Flux.mse` + + - `Flux.mae` + + - `Flux.msle` + + - `Flux.huber_loss` + + Currently MLJ measures are not supported as loss functions here. + +- `epochs::Int=10`: The duration of training, in epochs. Typically, one epoch represents + one pass through the complete the training dataset. + +- `batch_size::int=1`: the batch size to be used for training, representing the number of + samples per update of the network weights. Typically, batch size is between 8 and + 512. Increasing batch size may accelerate training if `acceleration=CUDALibs()` and a + GPU is available. + +- `lambda::Float64=0`: The strength of the weight regularization penalty. Can be any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. - A value of 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. -For training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. + +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 + represents L2 regularization, and a value of 1 represents L1 regularization. + +- `rng::Union{AbstractRNG, Int64}`: The random number generator or seed used during + training. + +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when re-fitting + a machine if the associated optimiser has changed. If `true`, the associated machine + will retrain from scratch on `fit!` call, otherwise it will not. + +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For + Training on GPU, use `CudaLibs()`. + +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults + to `Flux.softmax`. # Operations -- `predict(mach, Xnew)`: return predictions of the target given new - features `Xnew` having the same scitype as `X` above. Predictions are - deterministic. +- `predict(mach, Xnew)`: return predictions of the target given new features `Xnew`, which + should have the same scitype as `X` above. # Fitted parameters The fields of `fitted_params(mach)` are: -- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, - functions, and activations which make up the neural network. This includes - the final layer specified by `finaliser` (eg, `softmax`). +- `chain`: The trained "chain" (Flux.jl model), namely the series of layers, functions, + and activations which make up the neural network. This includes the final layer + specified by `finaliser` (eg, `softmax`). # Report The fields of `report(mach)` are: -- `training_losses`: A vector of training losses (penalised if `lambda != 0`) in +- `training_losses`: A vector of training losses (penalized if `lambda != 0`) in historical order, of length `epochs + 1`. The first element is the pre-training loss. # Examples -In this example we build a regression model using the Boston house price dataset +In this example we build a regression model for the Boston house price dataset. + ```julia - using MLJ - using MLJFlux - using Flux +using MLJ +import MLJFlux +using Flux ``` -First, we load in the data, with target `:MEDV`. We load in all features except `:CHAS`: + +First, we load in the data: The `:MEDV` column becomes the target vector `y`, and all +remaining columns go into a table `X`, with the exception of `:CHAS`: + ```julia data = OpenML.load(531); # Loads from https://www.openml.org/d/531 - y, X = unpack(data, ==(:MEDV), !=(:CHAS); rng=123); scitype(y) schema(X) ``` -Since MLJFlux models do not handle ordered factors, we can treat `:RAD` as `Continuous`: + +Since MLJFlux models do not handle ordered factors, we'll treat `:RAD` as `Continuous`: + ```julia X = coerce(X, :RAD=>Continuous) ``` -Lets also make a test set: + +Lets also split off a test set: + ```julia (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); ``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. -random initial weights of the network. + +Next, we can define a `builder`, making use of a convenience macro to do so. In the +following `@builder` call, `n_in` is a proxy for the number input features (which will be +known at `fit!` time) and `rng` is a proxy for a RNG (which will be passed from the `rng` +field of `model` defined below). We also have the parameter `n_out` which is the number of +output features. As we are doing single target regression, the value passed will always be +`1`, but the builder we define will also work for [`MultitargetNeuralRegressor`](@ref). + ```julia builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), + init=Flux.glorot_uniform(rng) + Chain( + Dense(n_in, 64, relu, init=init), Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) + Dense(32, n_out, init=init), + ) end ``` -Finally, we can define the model! + +Instantiating a model: + ```julia -NeuralNetworkRegressor = @load NeuralNetworkRegressor - model = NeuralNetworkRegressor(builder=builder, - rng=123, - epochs=20) +NeuralNetworkRegressor = @load NeuralNetworkRegressor pkg=MLJFlux +model = NeuralNetworkRegressor( + builder=builder, + rng=123, + epochs=20 +) ``` -We will arrange for standardizaion of the the target by wrapping our model - in `TransformedTargetModel`, and standardization of the features by -inserting the wrapped model in a pipeline: + +We arrange for standardization of the the target by wrapping our model in +`TransformedTargetModel`, and standardization of the features by inserting the wrapped +model in a pipeline: + ```julia pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) ``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` -also see the losses in the output of `report(mach)` + +If we fit with a high verbosity (>1), we will see the losses during training. We can also +see the losses in the output of `report(mach)`. + ```julia mach = machine(pipe, X, y) fit!(mach, verbosity=2) # first element initial loss, 2:end per epoch training losses -report(mach).transformed_target_model_deterministic.training_losses - +report(mach).transformed_target_model_deterministic.model.training_losses ``` ## Experimenting with learning rate We can visually compare how the learning rate affects the predictions: + ```julia using Plots -rates = 10. .^ (-5:0) +rates = rates = [5e-5, 1e-4, 0.005, 0.001, 0.05] +plt=plot() foreach(rates) do η pipe.transformed_target_model_deterministic.model.optimiser.eta = η @@ -594,9 +743,15 @@ foreach(rates) do η plot!(1:length(losses), losses, label=η) end +plt pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 +``` + +With the learning rate fixed, we compute a CV estimate of the performance (using +all data bound to `mach`) and compare this with performance on the test set: +```julia # CV estimate, based on `(X, y)`: evaluate!(mach, resampling=CV(nfolds=5), measure=l2) @@ -606,8 +761,11 @@ yhat = predict(mach, Xtest) l2(yhat, ytest) |> mean ``` -For impementing stopping criterion and other iteration controls, refer to examples linked -from the MLJFlux documentation +These losses, for the pipeline model, refer to the target on the original, unstandardized, +scale. + +For implementing stopping criterion and other iteration controls, refer to examples linked +from the MLJFlux documentation. See also [`MultitargetNeuralNetworkRegressor`](@ref) @@ -617,11 +775,11 @@ NeuralNetworkRegressor """ $(MMI.doc_header(MultitargetNeuralNetworkRegressor)) -`MultitargetNeuralNetworkRegressor` is for training a data-dependent Flux.jl - neural network to predict a multivalued `Continuous` target, represented as a table, - given a table of `Continuous` features. Users provide a recipe for constructing the - network, based on properties of the data that is encountered, by specifying an -appropriate `builder`. See MLJFlux documentation for more on builders. +`MultitargetNeuralNetworkRegressor` is for training a data-dependent Flux.jl neural +network to predict a multi-valued `Continuous` target, represented as a table, given a +table of `Continuous` features. Users provide a recipe for constructing the network, based +on properties of the data that is encountered, by specifying an appropriate `builder`. See +MLJFlux documentation for more on builders. # Training data @@ -629,50 +787,70 @@ In MLJ or MLJBase, bind an instance `model` to data with mach = machine(model, X, y) -Where +Here: -- `X`: is any table of input features (eg, a `DataFrame`) whose columns - are of scitype `Continuous`; check the column scitypes with `schema(X)`. -- `y`: is the target, which can be any table of output targets whose element - scitype is `Continuous`; check the scitype with `schema(y)` +- `X` is any table of input features (eg, a `DataFrame`) whose columns are of scitype + `Continuous`; check column scitypes with `schema(X)`. + +- `y` is the target, which can be any table of output targets whose element scitype is + `Continuous`; check column scitypes with `schema(y)`. # Hyper-parameters - `builder=MLJFlux.Linear(σ=Flux.relu)`: An MLJFlux builder that constructs a neural - network. Possible `builders` include: `Linear`, `Short`, and `MLP`. You can construct - your own builder using the `@builder` macro, see examples for further information. + network. Possible `builders` include: `Linear`, `Short`, and `MLP`. See MLJFlux + documentation for more on builders, and the example below for using the `@builder` + convenience macro. + - `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the - updating of the weights of the network. For further reference, see either the examples - or [the Flux optimiser + updating of the weights of the network. For further reference, see [the Flux optimiser documentation](https://fluxml.ai/Flux.jl/stable/training/optimisers/). To choose a learning rate (the update rate of the optimizer), a good rule of thumb is to start out at `10e-3`, and tune using powers of 10 between `1` and `1e-7`. -- `loss=Flux.mse`: The loss function which the network will optimize. Should be a - function which can be called in the form `loss(yhat, y)`. Possible loss functions are - listed in [the Flux loss function - documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). For a regression task, - the most natural loss functions are: - - `Flux.mse` - - `Flux.mae` - - `Flux.msle` - - `Flux.huber_loss` - Currently MLJ measures are not supported as loss functions here. -- `epochs::Int=10`: The number of epochs to train for. Typically, one epoch represents - one pass through the entirety of the training dataset. -- `batch_size::Int=1`: The batch size to be used for training. The batch size - represents the number of samples per update of the networks weights. Batch - sizes between 8 and 512 are typical. Increasing batch size can speed up - training, especially on a GPU (`acceleration=CUDALibs()`). -- `lambda::Float64=0`: The stregth of the regularization used during training. Can be - any value in the range `[0, ∞)`. -- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of - 0 represents L2 regularization, and a value of 1 represents L1 regularization. -- `rng::Union{AbstractRNG, Int64}`: The random number generator/seed used during + +- `loss=Flux.mse`: The loss function which the network will optimize. Should be a function + which can be called in the form `loss(yhat, y)`. Possible loss functions are listed in + [the Flux loss function documentation](https://fluxml.ai/Flux.jl/stable/models/losses/). + For a regression task, natural loss functions are: + + - `Flux.mse` + + - `Flux.mae` + + - `Flux.msle` + + - `Flux.huber_loss` + + Currently MLJ measures are not supported as loss functions here. + +- `epochs::Int=10`: The duration of training, in epochs. Typically, one epoch represents + one pass through the complete the training dataset. + +- `batch_size::int=1`: the batch size to be used for training, representing the number of + samples per update of the network weights. Typically, batch size is between 8 and + 512. Increassing batch size may accelerate training if `acceleration=CUDALibs()` and a + GPU is available. + +- `lambda::Float64=0`: The strength of the weight regularization penalty. Can be any value + in the range `[0, ∞)`. + +- `alpha::Float64=0`: The L2/L1 mix of regularization, in the range `[0, 1]`. A value of 0 + represents L2 regularization, and a value of 1 represents L1 regularization. + +- `rng::Union{AbstractRNG, Int64}`: The random number generator or seed used during training. -- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when fitting a machine if the associated optimiser has changed. If true, the associated machine will retrain from scratch on `fit!`, otherwise it will not. -- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. -For Training on GPU, use `CudaLibs()`. For training on GPU, use `CUDALibs()`. + +- `optimizer_changes_trigger_retraining::Bool=false`: Defines what happens when re-fitting + a machine if the associated optimiser has changed. If `true`, the associated machine + will retrain from scratch on `fit!` call, otherwise it will not. + +- `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For + Training on GPU, use `CudaLibs()`. + +- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults + to `Flux.softmax`. + # Operations @@ -699,83 +877,79 @@ The fields of `report(mach)` are: # Examples -In this example we build a regression model using a toy dataset. +In this example we apply a multi-target regression model to synthetic data: + ```julia using MLJ -using MLJFlux +import MLJFlux using Flux -using MLJBase: augment_X ``` -First, we generate some data: -```julia -X = augment_X(randn(10000, 8), true); -θ = randn((9,2)); -y = X * θ; -X = MLJ.table(X) -y = MLJ.table(y) +First, we generate some synthetic data (needs MLJBase 0.20.16 or higher): + +```julia +X, y = make_regression(100, 9; n_targets = 2) # both tables schema(y) schema(X) ``` -Lets also make a test set: + +Splitting off a test set: + ```julia (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); ``` -Next, we can define a `builder`. In the following macro call, `n_in` is the number of expected input features, and rng is a RNG. `init` is the function used to generate the random initial weights of the network. + +Next, we can define a `builder`, making use of a convenience macro to do so. In the +following `@builder` call, `n_in` is a proxy for the number input features and `n_out` the +number of target variables (both known at `fit!` time), while `rng` is a proxy for a RNG +(which will be passed from the `rng` field of `model` defined below). + ```julia builder = MLJFlux.@builder begin - init=Flux.glorot_uniform(rng) - Chain(Dense(n_in, 64, relu, init=init), + init=Flux.glorot_uniform(rng) + Chain( + Dense(n_in, 64, relu, init=init), Dense(64, 32, relu, init=init), - Dense(32, 1, init=init)) + Dense(32, n_out, init=init), + ) end ``` -Finally, we can define the model! + +Instantiating the regression model: + ```julia MultitargetNeuralNetworkRegressor = @load MultitargetNeuralNetworkRegressor model = MultitargetNeuralNetworkRegressor(builder=builder, rng=123, epochs=20) ``` -We will arrange for standardizaion of the the target by wrapping our model - in `TransformedTargetModel`, and standardization of the features by -inserting the wrapped model in a pipeline: + +We will arrange for standardization of the the target by wrapping our model in + `TransformedTargetModel`, and standardization of the features by inserting the wrapped + model in a pipeline: + ```julia pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer) ``` -If we fit with a high verbosity (>1), we will see the losses during training. We can also see the losses in the output of `report(mach)` + +If we fit with a high verbosity (>1), we will see the losses during training. We can also +see the losses in the output of `report(mach)` ```julia mach = machine(pipe, X, y) fit!(mach, verbosity=2) # first element initial loss, 2:end per epoch training losses -report(mach).transformed_target_model_deterministic.training_losses - +report(mach).transformed_target_model_deterministic.model.training_losses ``` -## Experimenting with learning rate - -We can visually compare how the learning rate affects the predictions: -```julia -using Plots - -rates = 10. .^ (-5:0) - -foreach(rates) do η - pipe.transformed_target_model_deterministic.model.optimiser.eta = η - fit!(mach, force=true, verbosity=0) - losses = - report(mach).transformed_target_model_deterministic.model.training_losses[3:end] - plot!(1:length(losses), losses, label=η) -end - - +For experimenting with learning rate, see the [`NeuralNetworkRegressor`](@ref) example. +``` pipe.transformed_target_model_deterministic.model.optimiser.eta = 0.0001 - ``` With the learning rate fixed, we can now compute a CV estimate of the performance (using all data bound to `mach`) and compare this with performance on the test set: + ```julia # custom MLJ loss: multi_loss(yhat, y) = l2(MLJ.matrix(yhat), MLJ.matrix(y)) |> mean @@ -784,9 +958,9 @@ multi_loss(yhat, y) = l2(MLJ.matrix(yhat), MLJ.matrix(y)) |> mean evaluate!(mach, resampling=CV(nfolds=5), measure=multi_loss) # loss for `(Xtest, test)`: -fit!(mach) +fit!(mach) # trains on all data `(X, y)` yhat = predict(mach, Xtest) -multi_loss(yhat, y) +multi_loss(yhat, ytest) ``` See also From 499310ad1539681e6c9f99d6b422fbbee3903f99 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 13:50:44 +1200 Subject: [PATCH 19/24] move docstrings to after metadata_pkg to fix headers + minor tweaks --- src/MLJFlux.jl | 11 ---- src/types.jl | 158 +++++++++++++++++++++++++++---------------------- 2 files changed, 86 insertions(+), 83 deletions(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index 981bc4d4..0c1b84f1 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -25,17 +25,6 @@ include("classifier.jl") include("image.jl") include("mlj_model_interface.jl") -### Package specific model traits: -MMI.metadata_pkg.((NeuralNetworkRegressor, - MultitargetNeuralNetworkRegressor, - NeuralNetworkClassifier, - ImageClassifier), - name="MLJFlux", - uuid="094fc8d1-fd35-5302-93ea-dabda2abf845", - url="https://github.com/alan-turing-institute/MLJFlux.jl", - julia=true, - license="MIT") - export NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor export NeuralNetworkClassifier, ImageClassifier diff --git a/src/types.jl b/src/types.jl index 7c383c5c..315ad10d 100644 --- a/src/types.jl +++ b/src/types.jl @@ -57,6 +57,80 @@ for Model in [:NeuralNetworkClassifier, :ImageClassifier] end + +for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] + + ex = quote + mutable struct $Model{B,O,L} <: MLJFluxDeterministic + builder::B + optimiser::O # mutable struct from Flux/src/optimise/optimisers.jl + loss::L # can be called as in `loss(yhat, y)` + epochs::Int # number of epochs + batch_size::Int # size of a batch + lambda::Float64 # regularization strength + alpha::Float64 # regularizaton mix (0 for all l2, 1 for all l1) + rng::Union{AbstractRNG,Integer} + optimiser_changes_trigger_retraining::Bool + acceleration::AbstractResource # eg, `CPU1()` or `CUDALibs()` + end + + function $Model(; builder::B = Linear() + , optimiser::O = Flux.Optimise.Adam() + , loss::L = Flux.mse + , epochs = 10 + , batch_size = 1 + , lambda = 0 + , alpha = 0 + , rng = Random.GLOBAL_RNG + , optimiser_changes_trigger_retraining=false + , acceleration = CPU1() + ) where {B,O,L} + + model = $Model{B,O,L}(builder + , optimiser + , loss + , epochs + , batch_size + , lambda + , alpha + , rng + , optimiser_changes_trigger_retraining + , acceleration) + + message = clean!(model) + isempty(message) || @warn message + + return model + end + + end + eval(ex) + +end + + + +const Regressor = + Union{NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor} + + +MMI.metadata_pkg.( + ( + NeuralNetworkRegressor, + MultitargetNeuralNetworkRegressor, + NeuralNetworkClassifier, + ImageClassifier, + ), + name="MLJFlux", + uuid="094fc8d1-fd35-5302-93ea-dabda2abf845", + url="https://github.com/alan-turing-institute/MLJFlux.jl", + julia=true, + license="MIT", +) + + +# # DOCSTRINGS + """ $(MMI.doc_header(NeuralNetworkClassifier)) @@ -87,7 +161,8 @@ Train the machine with `fit!(mach, rows=...)`. - `builder=MLJFlux.Short()`: An MLJFlux builder that constructs a neural network. Possible `builders` include: `MLJFlux.Linear`, `MLJFlux.Short`, and `MLJFlux.MLP`. See - MLJFlux.jl documentation for examples of user-defined builders. + MLJFlux.jl documentation for examples of user-defined builders. See also `finaliser` + below. - `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see [the Flux optimiser @@ -141,8 +216,8 @@ Train the machine with `fit!(mach, rows=...)`. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults - to `Flux.softmax`. +- `finaliser=Flux.softmax`: The final activation function of the neural network (applied + after the network defined by `builder`). Defaults to `Flux.softmax`. # Operations @@ -276,7 +351,8 @@ Train the machine with `fit!(mach, rows=...)`. - `builder`: An MLJFlux builder that constructs the neural network. The fallback builds a depth-16 VGG architecture adapted to the image size and number of target classes, with no batch normalization; see the Metalhead.jl documentation for details. See the example - below for a user-specified builder. A convenience macro `@builder` is also available. + below for a user-specified builder. A convenience macro `@builder` is also + available. See also `finaliser` below. - `optimiser::Flux.Adam()`: A `Flux.Optimise` optimiser. The optimiser performs the updating of the weights of the network. For further reference, see [the Flux optimiser @@ -330,8 +406,8 @@ Train the machine with `fit!(mach, rows=...)`. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults - to `Flux.softmax`. +- `finaliser=Flux.softmax`: The final activation function of the neural network (applied + after the network defined by `builder`). Defaults to `Flux.softmax`. # Operations @@ -390,7 +466,7 @@ Above `images` is a single array but MLJFlux requires the images to be a vector individual image arrays: ``` - images = coerce(images, GrayImage); +images = coerce(images, GrayImage); images[1] ``` @@ -490,57 +566,6 @@ See also [`NeuralNetworkClassifier`](@ref). """ ImageClassifier -for Model in [:NeuralNetworkRegressor, :MultitargetNeuralNetworkRegressor] - - ex = quote - mutable struct $Model{B,O,L} <: MLJFluxDeterministic - builder::B - optimiser::O # mutable struct from Flux/src/optimise/optimisers.jl - loss::L # can be called as in `loss(yhat, y)` - epochs::Int # number of epochs - batch_size::Int # size of a batch - lambda::Float64 # regularization strength - alpha::Float64 # regularizaton mix (0 for all l2, 1 for all l1) - rng::Union{AbstractRNG,Integer} - optimiser_changes_trigger_retraining::Bool - acceleration::AbstractResource # eg, `CPU1()` or `CUDALibs()` - end - - function $Model(; builder::B = Linear() - , optimiser::O = Flux.Optimise.Adam() - , loss::L = Flux.mse - , epochs = 10 - , batch_size = 1 - , lambda = 0 - , alpha = 0 - , rng = Random.GLOBAL_RNG - , optimiser_changes_trigger_retraining=false - , acceleration = CPU1() - ) where {B,O,L} - - model = $Model{B,O,L}(builder - , optimiser - , loss - , epochs - , batch_size - , lambda - , alpha - , rng - , optimiser_changes_trigger_retraining - , acceleration) - - message = clean!(model) - isempty(message) || @warn message - - return model - end - - end - eval(ex) - -end - - """ $(MMI.doc_header(NeuralNetworkRegressor)) @@ -618,9 +643,6 @@ Train the machine with `fit!(mach, rows=...)`. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults - to `Flux.softmax`. - # Operations @@ -633,8 +655,7 @@ Train the machine with `fit!(mach, rows=...)`. The fields of `fitted_params(mach)` are: - `chain`: The trained "chain" (Flux.jl model), namely the series of layers, functions, - and activations which make up the neural network. This includes the final layer - specified by `finaliser` (eg, `softmax`). + and activations which make up the neural network. # Report @@ -671,7 +692,7 @@ Since MLJFlux models do not handle ordered factors, we'll treat `:RAD` as `Conti X = coerce(X, :RAD=>Continuous) ``` -Lets also split off a test set: +Splitting off a test set: ```julia (X, Xtest), (y, ytest) = partition((X, y), 0.7, multi=true); @@ -848,9 +869,6 @@ Here: - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For Training on GPU, use `CudaLibs()`. -- `finaliser=Flux.softmax`: The final activation function of the neural network. Defaults - to `Flux.softmax`. - # Operations @@ -864,8 +882,7 @@ Here: The fields of `fitted_params(mach)` are: - `chain`: The trained "chain" (Flux.jl model), namely the series of layers, - functions, and activations which make up the neural network. This includes - the final layer specified by `finaliser` (eg, `softmax`). + functions, and activations which make up the neural network. # Report @@ -967,6 +984,3 @@ See also [`NeuralNetworkRegressor`](@ref) """ MultitargetNeuralNetworkRegressor - -const Regressor = - Union{NeuralNetworkRegressor, MultitargetNeuralNetworkRegressor} From 14b39b1e91cad83e2e4b1d58a08785cea35e1da1 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 14:00:31 +1200 Subject: [PATCH 20/24] more doc tweaks --- src/types.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/types.jl b/src/types.jl index 315ad10d..3df93bc7 100644 --- a/src/types.jl +++ b/src/types.jl @@ -214,7 +214,7 @@ Train the machine with `fit!(mach, rows=...)`. will retrain from scratch on `fit!` call, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For - Training on GPU, use `CudaLibs()`. + Training on GPU, use `CUDALibs()`. - `finaliser=Flux.softmax`: The final activation function of the neural network (applied after the network defined by `builder`). Defaults to `Flux.softmax`. @@ -404,7 +404,7 @@ Train the machine with `fit!(mach, rows=...)`. will retrain from scratch on `fit!` call, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For - Training on GPU, use `CudaLibs()`. + Training on GPU, use `CUDALibs()`. - `finaliser=Flux.softmax`: The final activation function of the neural network (applied after the network defined by `builder`). Defaults to `Flux.softmax`. @@ -641,7 +641,7 @@ Train the machine with `fit!(mach, rows=...)`. will retrain from scratch on `fit!` call, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For - Training on GPU, use `CudaLibs()`. + Training on GPU, use `CUDALibs()`. # Operations @@ -655,7 +655,7 @@ Train the machine with `fit!(mach, rows=...)`. The fields of `fitted_params(mach)` are: - `chain`: The trained "chain" (Flux.jl model), namely the series of layers, functions, - and activations which make up the neural network. + and activations which make up the neural network. # Report @@ -867,7 +867,7 @@ Here: will retrain from scratch on `fit!` call, otherwise it will not. - `acceleration::AbstractResource=CPU1()`: Defines on what hardware training is done. For - Training on GPU, use `CudaLibs()`. + Training on GPU, use `CUDALibs()`. # Operations @@ -882,7 +882,7 @@ Here: The fields of `fitted_params(mach)` are: - `chain`: The trained "chain" (Flux.jl model), namely the series of layers, - functions, and activations which make up the neural network. + functions, and activations which make up the neural network. # Report From a21a6999413c2d077957daafaadce5a419552dcb Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 15:40:46 +1200 Subject: [PATCH 21/24] update MNIST example to julia 1.7 and resolve #210 --- examples/mnist/Manifest.toml | 1697 ++++++++++------- examples/mnist/Project.toml | 2 - examples/mnist/README.md | 7 +- examples/mnist/generate.jl | 4 + examples/mnist/loss.png | Bin 23719 -> 22134 bytes examples/mnist/notebook.ipynb | 2143 ++++++++++++++++++++++ examples/mnist/{mnist.jl => notebook.jl} | 13 +- examples/mnist/notebook.unexecuted.ipynb | 691 +++++++ examples/mnist/weights.png | Bin 35266 -> 35107 bytes 9 files changed, 3872 insertions(+), 685 deletions(-) create mode 100644 examples/mnist/generate.jl create mode 100644 examples/mnist/notebook.ipynb rename examples/mnist/{mnist.jl => notebook.jl} (96%) create mode 100644 examples/mnist/notebook.unexecuted.ipynb diff --git a/examples/mnist/Manifest.toml b/examples/mnist/Manifest.toml index 5d454b54..c943af16 100644 --- a/examples/mnist/Manifest.toml +++ b/examples/mnist/Manifest.toml @@ -1,1392 +1,1749 @@ # This file is machine-generated - editing it directly is not advised -[[AbstractFFTs]] -deps = ["LinearAlgebra"] -git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0" +julia_version = "1.7.3" +manifest_format = "2.0" + +[[deps.ARFFFiles]] +deps = ["CategoricalArrays", "Dates", "Parsers", "Tables"] +git-tree-sha1 = "e8c8e0a2be6eb4f56b1672e46004463033daa409" +uuid = "da404889-ca92-49ff-9e8b-0aa6b4d38dc8" +version = "1.4.1" + +[[deps.AbstractFFTs]] +deps = ["ChainRulesCore", "LinearAlgebra"] +git-tree-sha1 = "69f7020bd72f069c219b5e8c236c1fa90d2cb409" uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" -version = "1.0.1" +version = "1.2.1" -[[AbstractTrees]] -git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5" -uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.3.4" +[[deps.Accessors]] +deps = ["Compat", "CompositionsBase", "ConstructionBase", "Dates", "InverseFunctions", "LinearAlgebra", "MacroTools", "Requires", "Test"] +git-tree-sha1 = "8557017cfc7b58baea05a43ed35538857e6d35b4" +uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" +version = "0.1.19" -[[Adapt]] +[[deps.Adapt]] deps = ["LinearAlgebra"] -git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7" +git-tree-sha1 = "195c5505521008abea5aee4f96930717958eac6f" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.3.1" +version = "3.4.0" -[[ArgTools]] +[[deps.ArgCheck]] +git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" +uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" +version = "2.3.0" + +[[deps.ArgTools]] uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" -[[Artifacts]] +[[deps.ArrayInterface]] +deps = ["ArrayInterfaceCore", "Compat", "IfElse", "LinearAlgebra", "Static"] +git-tree-sha1 = "0582b5976fc76523f77056e888e454f0f7732596" +uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" +version = "6.0.22" + +[[deps.ArrayInterfaceCore]] +deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "40debc9f72d0511e12d817c7ca06a721b6423ba3" +uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" +version = "0.1.17" + +[[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" -[[BFloat16s]] -deps = ["LinearAlgebra", "Test"] -git-tree-sha1 = "4af69e205efc343068dc8722b8dfec1ade89254a" +[[deps.BFloat16s]] +deps = ["LinearAlgebra", "Printf", "Random", "Test"] +git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072" uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" -version = "0.1.0" +version = "0.2.0" -[[BSON]] -git-tree-sha1 = "92b8a8479128367aaab2620b8e73dff632f5ae69" -uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" -version = "0.3.3" +[[deps.BangBang]] +deps = ["Compat", "ConstructionBase", "Future", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables", "ZygoteRules"] +git-tree-sha1 = "b15a6bc52594f5e4a3b825858d1089618871bf9d" +uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" +version = "0.3.36" -[[Base64]] +[[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" -[[BinDeps]] -deps = ["Libdl", "Pkg", "SHA", "URIParser", "Unicode"] -git-tree-sha1 = "1289b57e8cf019aede076edab0587eb9644175bd" -uuid = "9e28174c-4ba2-5203-b857-d8d62c4213ee" -version = "1.0.2" +[[deps.Baselet]] +git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e" +uuid = "9718e550-a3fa-408a-8086-8db961cd8217" +version = "0.1.1" -[[BinaryProvider]] +[[deps.BinaryProvider]] deps = ["Libdl", "Logging", "SHA"] git-tree-sha1 = "ecdec412a9abc8db54c0efc5548c64dfce072058" uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232" version = "0.5.10" -[[Blosc]] -deps = ["Blosc_jll"] -git-tree-sha1 = "84cf7d0f8fd46ca6f1b3e0305b4b4a37afe50fd6" -uuid = "a74b3585-a348-5f62-a45c-50e91977d574" -version = "0.7.0" - -[[Blosc_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Lz4_jll", "Pkg", "Zlib_jll", "Zstd_jll"] -git-tree-sha1 = "e747dac84f39c62aff6956651ec359686490134e" -uuid = "0b7ba130-8d10-5ba8-a3d6-c5182647fed9" -version = "1.21.0+0" - -[[BufferedStreams]] -deps = ["Compat", "Test"] -git-tree-sha1 = "5d55b9486590fdda5905c275bb21ce1f0754020f" +[[deps.BufferedStreams]] +git-tree-sha1 = "bb065b14d7f941b8617bc323063dbe79f55d16ea" uuid = "e1450e63-4bb3-523b-b2a4-4ffa8c0fd77d" -version = "1.0.0" +version = "1.1.0" -[[Bzip2_jll]] +[[deps.Bzip2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e" +git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2" uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" -version = "1.0.6+5" +version = "1.0.8+0" -[[CEnum]] -git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9" +[[deps.CEnum]] +git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" -version = "0.4.1" +version = "0.4.2" -[[CUDA]] -deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "DataStructures", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"] -git-tree-sha1 = "82b2811f5888465d96b38c7bb12d8fb9c25838e1" +[[deps.CSV]] +deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings"] +git-tree-sha1 = "873fb188a4b9d76549b81465b1f75c82aaf59238" +uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" +version = "0.10.4" + +[[deps.CUDA]] +deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"] +git-tree-sha1 = "49549e2c28ffb9cc77b3689dc10e46e6271e9452" uuid = "052768ef-5323-5732-b1bb-66c8b64840ba" -version = "3.3.1" +version = "3.12.0" -[[Cairo_jll]] +[[deps.Cairo_jll]] deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6" +git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2" uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" -version = "1.16.0+6" +version = "1.16.1+1" -[[CategoricalArrays]] -deps = ["DataAPI", "Future", "JSON", "Missings", "Printf", "RecipesBase", "Statistics", "StructTypes", "Unicode"] -git-tree-sha1 = "1562002780515d2573a4fb0c3715e4e57481075e" +[[deps.Calculus]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad" +uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9" +version = "0.5.1" + +[[deps.CategoricalArrays]] +deps = ["DataAPI", "Future", "Missings", "Printf", "Requires", "Statistics", "Unicode"] +git-tree-sha1 = "5f5a975d996026a8dd877c35fe26a7b8179c02ba" uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597" -version = "0.10.0" +version = "0.10.6" -[[ChainRules]] -deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "Statistics"] -git-tree-sha1 = "e7e3200bd24b77bcc849e6616f7c2f0d45d70f5b" +[[deps.CategoricalDistributions]] +deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes", "UnicodePlots"] +git-tree-sha1 = "036d44b2d1ed2a8a3409790103e277ab5cbe30df" +uuid = "af321ab8-2d2e-40a6-b165-3d674595d28e" +version = "0.1.8" + +[[deps.ChainRules]] +deps = ["Adapt", "ChainRulesCore", "Compat", "Distributed", "GPUArraysCore", "IrrationalConstants", "LinearAlgebra", "Random", "RealDot", "SparseArrays", "Statistics", "StructArrays"] +git-tree-sha1 = "650415ad4c2a007b17f577cb081d9376cc908b6f" uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2" -version = "0.8.17" +version = "1.44.2" -[[ChainRulesCore]] +[[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "be770c08881f7bb928dfd86d1ba83798f76cf62a" +git-tree-sha1 = "80ca332f6dcb2508adba68f22f551adb2d00a624" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "0.10.9" +version = "1.15.3" + +[[deps.ChangesOfVariables]] +deps = ["ChainRulesCore", "LinearAlgebra", "Test"] +git-tree-sha1 = "38f7a08f19d8810338d4f5085211c7dfa5d5bdd8" +uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" +version = "0.1.4" -[[CodecZlib]] +[[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" version = "0.7.0" -[[ColorSchemes]] -deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random", "StaticArrays"] -git-tree-sha1 = "c8fd01e4b736013bc61b704871d20503b33ea402" +[[deps.ColorSchemes]] +deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Random"] +git-tree-sha1 = "1fd869cc3875b57347f7027521f561cf46d1fcd8" uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4" -version = "3.12.1" +version = "3.19.0" -[[ColorTypes]] +[[deps.ColorTypes]] deps = ["FixedPointNumbers", "Random"] -git-tree-sha1 = "32a2b8af383f11cbb65803883837a149d10dfe8a" +git-tree-sha1 = "eb7f0f8307f71fac7c606984ea5fb2817275d6e4" uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" -version = "0.10.12" +version = "0.11.4" -[[Colors]] +[[deps.ColorVectorSpace]] +deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "SpecialFunctions", "Statistics", "TensorCore"] +git-tree-sha1 = "d08c20eef1f2cbc6e60fd3612ac4340b89fea322" +uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4" +version = "0.9.9" + +[[deps.Colors]] deps = ["ColorTypes", "FixedPointNumbers", "Reexport"] git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40" uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" version = "0.12.8" -[[CommonSubexpressions]] +[[deps.CommonSubexpressions]] deps = ["MacroTools", "Test"] git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" version = "0.3.0" -[[Compat]] +[[deps.Compat]] deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] -git-tree-sha1 = "dc7dedc2c2aa9faf59a55c622760a25cbefbe941" +git-tree-sha1 = "78bee250c6826e1cf805a88b7f1e86025275d208" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "3.31.0" +version = "3.46.0" -[[CompilerSupportLibraries_jll]] +[[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -[[ComputationalResources]] +[[deps.CompositionsBase]] +git-tree-sha1 = "455419f7e328a1a2493cabc6428d79e951349769" +uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b" +version = "0.1.1" + +[[deps.ComputationalResources]] git-tree-sha1 = "52cb3ec90e8a8bea0e62e275ba577ad0f74821f7" uuid = "ed09eef8-17a6-5b46-8889-db040fac31e3" version = "0.3.2" -[[Conda]] -deps = ["JSON", "VersionParsing"] -git-tree-sha1 = "299304989a5e6473d985212c28928899c74e9421" -uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" -version = "1.5.2" +[[deps.ConstructionBase]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "59d00b3139a9de4eb961057eabb65ac6522be954" +uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" +version = "1.4.0" + +[[deps.ContextVariablesX]] +deps = ["Compat", "Logging", "UUIDs"] +git-tree-sha1 = "8ccaa8c655bc1b83d2da4d569c9b28254ababd6e" +uuid = "6add18c4-b38d-439d-96f6-d6bc489c04c5" +version = "0.1.2" -[[Contour]] -deps = ["StaticArrays"] -git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7" +[[deps.Contour]] +git-tree-sha1 = "d05d9e7b7aedff4e5b51a029dced05cfb6125781" uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" -version = "0.5.7" +version = "0.6.2" -[[Crayons]] -git-tree-sha1 = "3f71217b538d7aaee0b69ab47d9b7724ca8afa0d" +[[deps.Crayons]] +git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15" uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" -version = "4.0.4" +version = "4.1.1" -[[DataAPI]] -git-tree-sha1 = "ee400abb2298bd13bfc3df1c412ed228061a2385" +[[deps.DataAPI]] +git-tree-sha1 = "fb5f5316dd3fd4c5e7c30a24d50643b73e37cd40" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" -version = "1.7.0" +version = "1.10.0" -[[DataDeps]] +[[deps.DataDeps]] deps = ["BinaryProvider", "HTTP", "Libdl", "Reexport", "SHA", "p7zip_jll"] -git-tree-sha1 = "4f0e41ff461d42cfc62ff0de4f1cd44c6e6b3771" +git-tree-sha1 = "e299d8267135ef2f9c941a764006697082c1e7e8" uuid = "124859b0-ceae-595e-8997-d05f6a7a8dfe" -version = "0.7.7" +version = "0.7.8" -[[DataStructures]] +[[deps.DataFrames]] +deps = ["Compat", "DataAPI", "Future", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Reexport", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"] +git-tree-sha1 = "daa21eb85147f72e41f6352a57fccea377e310a9" +uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +version = "1.3.4" + +[[deps.DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" +git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.9" +version = "0.18.13" -[[DataValueInterfaces]] +[[deps.DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" version = "1.0.0" -[[Dates]] +[[deps.Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" -[[DelimitedFiles]] +[[deps.DefineSingletons]] +git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c" +uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52" +version = "0.1.2" + +[[deps.DelimitedFiles]] deps = ["Mmap"] uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" -[[DiffResults]] +[[deps.DensityInterface]] +deps = ["InverseFunctions", "Test"] +git-tree-sha1 = "80c3e8639e3353e5d2912fb3a1916b8455e2494b" +uuid = "b429d917-457f-4dbc-8f4c-0cc954292b1d" +version = "0.4.0" + +[[deps.DiffResults]] deps = ["StaticArrays"] git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805" uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" version = "1.0.3" -[[DiffRules]] -deps = ["NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "214c3fcac57755cfda163d91c58893a8723f93e9" +[[deps.DiffRules]] +deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] +git-tree-sha1 = "28d605d9a0ac17118fe2c5e9ce0fbb76c3ceb120" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.0.2" +version = "1.11.0" -[[Distances]] -deps = ["LinearAlgebra", "Statistics", "StatsAPI"] -git-tree-sha1 = "abe4ad222b26af3337262b8afb28fab8d215e9f8" +[[deps.Distances]] +deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04" uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.10.3" +version = "0.10.7" -[[Distributed]] +[[deps.Distributed]] deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" -[[Distributions]] -deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns"] -git-tree-sha1 = "2733323e5c02a9d7f48e7a3c4bc98d764fb704da" +[[deps.Distributions]] +deps = ["ChainRulesCore", "DensityInterface", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns", "Test"] +git-tree-sha1 = "6180800cebb409d7eeef8b2a9a562107b9705be5" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.25.6" +version = "0.25.67" -[[DocStringExtensions]] +[[deps.DocStringExtensions]] deps = ["LibGit2"] -git-tree-sha1 = "a32185f5428d3986f47c2ab78b1f216d5e6cc96f" +git-tree-sha1 = "5158c2b41018c5f7eb1470d558127ac274eca0c9" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.8.5" +version = "0.9.1" -[[Downloads]] -deps = ["ArgTools", "LibCURL", "NetworkOptions"] +[[deps.Downloads]] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -[[EarCut_jll]] +[[deps.DualNumbers]] +deps = ["Calculus", "NaNMath", "SpecialFunctions"] +git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566" +uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74" +version = "0.6.8" + +[[deps.EarCut_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d" +git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d" uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" -version = "2.1.5+1" +version = "2.2.3+0" -[[EarlyStopping]] +[[deps.EarlyStopping]] deps = ["Dates", "Statistics"] -git-tree-sha1 = "9427bc7a6c186d892f71b1c36ee7619e440c9e06" +git-tree-sha1 = "98fdf08b707aaf69f524a6cd0a67858cefe0cfb6" uuid = "792122b4-ca99-40de-a6bc-6742525f08b6" -version = "0.1.8" +version = "0.3.0" -[[Expat_jll]] +[[deps.Expat_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f" +git-tree-sha1 = "bad72f730e9e91c08d9427d5e8db95478a3c323d" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" -version = "2.2.10+0" +version = "2.4.8+0" -[[ExprTools]] -git-tree-sha1 = "10407a39b87f29d47ebaca8edbc75d7c302ff93e" +[[deps.ExprTools]] +git-tree-sha1 = "56559bbef6ca5ea0c0818fa5c90320398a6fbf8d" uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04" -version = "0.1.3" +version = "0.1.8" -[[EzXML]] -deps = ["Printf", "XML2_jll"] -git-tree-sha1 = "0fa3b52a04a4e210aeb1626def9c90df3ae65268" -uuid = "8f5d6c58-4d21-5cfd-889c-e3ad7ee6a615" -version = "1.1.0" +[[deps.Extents]] +git-tree-sha1 = "5e1e4c53fa39afe63a7d356e30452249365fba99" +uuid = "411431e0-e8b7-467b-b5e0-f676ba4f2910" +version = "0.1.1" -[[FFMPEG]] +[[deps.FFMPEG]] deps = ["FFMPEG_jll"] git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8" uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" version = "0.4.1" -[[FFMPEG_jll]] -deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] -git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f" +[[deps.FFMPEG_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libaom_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "ccd479984c7838684b3ac204b716c89955c76623" uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" -version = "4.3.1+4" +version = "4.4.2+0" + +[[deps.FLoops]] +deps = ["BangBang", "Compat", "FLoopsBase", "InitialValues", "JuliaVariables", "MLStyle", "Serialization", "Setfield", "Transducers"] +git-tree-sha1 = "4391d3ed58db9dc5a9883b23a0578316b4798b1f" +uuid = "cc61a311-1640-44b5-9fba-1b764f453329" +version = "0.2.0" -[[FilePathsBase]] -deps = ["Dates", "Mmap", "Printf", "Test", "UUIDs"] -git-tree-sha1 = "0f5e8d0cb91a6386ba47bd1527b240bd5725fbae" +[[deps.FLoopsBase]] +deps = ["ContextVariablesX"] +git-tree-sha1 = "656f7a6859be8673bf1f35da5670246b923964f7" +uuid = "b9860ae5-e623-471e-878b-f6a53c775ea6" +version = "0.1.1" + +[[deps.FileIO]] +deps = ["Pkg", "Requires", "UUIDs"] +git-tree-sha1 = "94f5101b96d2d968ace56f7f2db19d0a5f592e28" +uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" +version = "1.15.0" + +[[deps.FilePathsBase]] +deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"] +git-tree-sha1 = "129b104185df66e408edd6625d480b7f9e9823a0" uuid = "48062228-2e41-5def-b9a4-89aafe57970f" -version = "0.9.10" +version = "0.9.18" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" -[[FillArrays]] -deps = ["LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "31939159aeb8ffad1d4d8ee44d07f8558273120a" +[[deps.FillArrays]] +deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] +git-tree-sha1 = "246621d23d1f43e3b9c368bf3b72b2331a27c286" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "0.11.7" +version = "0.13.2" -[[FixedPointNumbers]] +[[deps.FixedPointNumbers]] deps = ["Statistics"] git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" version = "0.8.4" -[[Flux]] -deps = ["AbstractTrees", "Adapt", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"] -git-tree-sha1 = "0b3c6d0ce57d3b793eabd346ccc8f605035ef079" +[[deps.Flux]] +deps = ["Adapt", "ArrayInterface", "CUDA", "ChainRulesCore", "Functors", "LinearAlgebra", "MLUtils", "MacroTools", "NNlib", "NNlibCUDA", "Optimisers", "ProgressLogging", "Random", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "Test", "Zygote"] +git-tree-sha1 = "9b5419ad6f043ac2b52f1b7f9a8ecb8762231214" uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c" -version = "0.12.4" +version = "0.13.5" -[[Fontconfig_jll]] +[[deps.FoldsThreads]] +deps = ["Accessors", "FunctionWrappers", "InitialValues", "SplittablesBase", "Transducers"] +git-tree-sha1 = "eb8e1989b9028f7e0985b4268dabe94682249025" +uuid = "9c68100b-dfe1-47cf-94c8-95104e173443" +version = "0.1.1" + +[[deps.Fontconfig_jll]] deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f" +git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03" uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" -version = "2.13.1+14" +version = "2.13.93+0" -[[Formatting]] +[[deps.Formatting]] deps = ["Printf"] git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" version = "0.4.2" -[[ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "NaNMath", "Printf", "Random", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "e2af66012e08966366a43251e1fd421522908be6" +[[deps.ForwardDiff]] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] +git-tree-sha1 = "187198a4ed8ccd7b5d99c41b69c679269ea2b2d4" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.18" +version = "0.10.32" + +[[deps.FreeType]] +deps = ["CEnum", "FreeType2_jll"] +git-tree-sha1 = "cabd77ab6a6fdff49bfd24af2ebe76e6e018a2b4" +uuid = "b38be410-82b0-50bf-ab77-7b57e271db43" +version = "4.0.0" -[[FreeType2_jll]] +[[deps.FreeType2_jll]] deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6" +git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9" uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" -version = "2.10.1+5" +version = "2.10.4+0" -[[FriBidi_jll]] +[[deps.FreeTypeAbstraction]] +deps = ["ColorVectorSpace", "Colors", "FreeType", "GeometryBasics"] +git-tree-sha1 = "38a92e40157100e796690421e34a11c107205c86" +uuid = "663a7486-cb36-511b-a19d-713bb74d65c9" +version = "0.10.0" + +[[deps.FriBidi_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91" uuid = "559328eb-81f9-559d-9380-de523a88c83c" version = "1.0.10+0" -[[Functors]] -deps = ["MacroTools"] -git-tree-sha1 = "a7bb2af991c43dcf5c3455d276dd83976799634f" +[[deps.FunctionWrappers]] +git-tree-sha1 = "241552bc2209f0fa068b6415b1942cc0aa486bcc" +uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" +version = "1.1.2" + +[[deps.Functors]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "a2657dd0f3e8a61dbe70fc7c122038bd33790af5" uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196" -version = "0.2.1" +version = "0.3.0" -[[Future]] +[[deps.Future]] deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" -[[GLFW_jll]] +[[deps.GLFW_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"] -git-tree-sha1 = "dba1e8614e98949abfa60480b13653813d8f0157" +git-tree-sha1 = "d972031d28c8c8d9d7b41a536ad7bb0c2579caca" uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" -version = "3.3.5+0" +version = "3.3.8+0" -[[GPUArrays]] -deps = ["AbstractFFTs", "Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"] -git-tree-sha1 = "ececbf05f8904c92814bdbd0aafd5540b0bf2e9a" +[[deps.GPUArrays]] +deps = ["Adapt", "GPUArraysCore", "LLVM", "LinearAlgebra", "Printf", "Random", "Reexport", "Serialization", "Statistics"] +git-tree-sha1 = "45d7deaf05cbb44116ba785d147c518ab46352d7" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" -version = "7.0.1" +version = "8.5.0" + +[[deps.GPUArraysCore]] +deps = ["Adapt"] +git-tree-sha1 = "6872f5ec8fd1a38880f027a26739d42dcda6691f" +uuid = "46192b85-c4d5-4398-a991-12ede77f4527" +version = "0.1.2" -[[GPUCompiler]] -deps = ["DataStructures", "ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"] -git-tree-sha1 = "222c6cdb888ec24795936d6829aa978691def60e" +[[deps.GPUCompiler]] +deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"] +git-tree-sha1 = "122d7bcc92abf94cf1a86281ad7a4d0e838ab9e0" uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" -version = "0.12.3" +version = "0.16.3" -[[GR]] -deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"] -git-tree-sha1 = "b83e3125048a9c3158cbb7ca423790c7b1b57bea" +[[deps.GR]] +deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "RelocatableFolders", "Serialization", "Sockets", "Test", "UUIDs"] +git-tree-sha1 = "cf0a9940f250dc3cb6cc6c6821b4bf8a4286cf9c" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.57.5" +version = "0.66.2" -[[GR_jll]] +[[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "e14907859a1d3aee73a019e7b3c98e9e7b8b5b3e" +git-tree-sha1 = "2d908286d120c584abbe7621756c341707096ba4" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.57.3+0" +version = "0.66.2+0" -[[GZip]] +[[deps.GZip]] deps = ["Libdl"] git-tree-sha1 = "039be665faf0b8ae36e089cd694233f5dee3f7d6" uuid = "92fee26a-97fe-5a0c-ad85-20a5f3185b63" version = "0.5.1" -[[GeometryBasics]] -deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] -git-tree-sha1 = "15ff9a14b9e1218958d3530cc288cf31465d9ae2" +[[deps.GeoInterface]] +deps = ["Extents"] +git-tree-sha1 = "fb28b5dc239d0174d7297310ef7b84a11804dfab" +uuid = "cf35fbd7-0cd7-5166-be24-54bfbe79505f" +version = "1.0.1" + +[[deps.GeometryBasics]] +deps = ["EarCut_jll", "GeoInterface", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"] +git-tree-sha1 = "a7a97895780dab1085a97769316aa348830dc991" uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326" -version = "0.3.13" +version = "0.4.3" -[[Gettext_jll]] +[[deps.Gettext_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"] git-tree-sha1 = "9b02998aba7bf074d14de89f9d37ca24a1a0b046" uuid = "78b55507-aeef-58d4-861c-77aaff3498b1" version = "0.21.0+0" -[[Glib_jll]] +[[deps.Glib_jll]] deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "47ce50b742921377301e15005c96e979574e130b" +git-tree-sha1 = "a32d672ac2c967f3deb8a81d828afc739c838a06" uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" -version = "2.68.1+0" +version = "2.68.3+2" + +[[deps.Glob]] +git-tree-sha1 = "4df9f7e06108728ebf00a0a11edee4b29a482bb2" +uuid = "c27321d9-0574-5035-807b-f59d2c89b15c" +version = "1.3.0" + +[[deps.Graphics]] +deps = ["Colors", "LinearAlgebra", "NaNMath"] +git-tree-sha1 = "d61890399bc535850c4bf08e4e0d3a7ad0f21cbd" +uuid = "a2bd30eb-e257-5431-a919-1863eab51364" +version = "1.1.2" -[[Grisu]] +[[deps.Graphite2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "344bf40dcab1073aca04aa0df4fb092f920e4011" +uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472" +version = "1.3.14+0" + +[[deps.Grisu]] git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" version = "1.0.2" -[[HDF5]] -deps = ["Blosc", "Compat", "HDF5_jll", "Libdl", "Mmap", "Random", "Requires"] -git-tree-sha1 = "1d18a48a037b14052ca462ea9d05dee3ac607d23" +[[deps.HDF5]] +deps = ["Compat", "HDF5_jll", "Libdl", "Mmap", "Random", "Requires"] +git-tree-sha1 = "9ffc57b9bb643bf3fce34f3daf9ff506ed2d8b7a" uuid = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -version = "0.15.5" +version = "0.16.10" -[[HDF5_jll]] +[[deps.HDF5_jll]] deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "OpenSSL_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "fd83fa0bde42e01952757f01149dd968c06c4dba" +git-tree-sha1 = "c003b31e2e818bc512b0ff99d7dce03b0c1359f5" uuid = "0234f1f7-429e-5d53-9886-15a909be8d59" -version = "1.12.0+1" +version = "1.12.2+1" -[[HTTP]] -deps = ["Base64", "Dates", "IniFile", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] -git-tree-sha1 = "86ed84701fbfd1142c9786f8e53c595ff5a4def9" +[[deps.HTTP]] +deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] +git-tree-sha1 = "0fa77022fe4b511826b39c894c90daf5fce3334a" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "0.9.10" +version = "0.9.17" -[[IOCapture]] -deps = ["Logging", "Random"] -git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a" -uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.2" +[[deps.HarfBuzz_jll]] +deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] +git-tree-sha1 = "129acf094d168394e80ee1dc4bc06ec835e510a3" +uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566" +version = "2.8.1+1" + +[[deps.HypergeometricFunctions]] +deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions", "Test"] +git-tree-sha1 = "709d864e3ed6e3545230601f94e11ebc65994641" +uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a" +version = "0.3.11" -[[IRTools]] +[[deps.IRTools]] deps = ["InteractiveUtils", "MacroTools", "Test"] -git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c" +git-tree-sha1 = "af14a478780ca78d5eb9908b263023096c2b9d64" uuid = "7869d1d1-7146-5819-86e3-90919afe41df" -version = "0.4.3" +version = "0.4.6" -[[IniFile]] -deps = ["Test"] -git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8" +[[deps.IfElse]] +git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" +uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" +version = "0.1.1" + +[[deps.ImageBase]] +deps = ["ImageCore", "Reexport"] +git-tree-sha1 = "b51bb8cae22c66d0f6357e3bcb6363145ef20835" +uuid = "c817782e-172a-44cc-b673-b171935fbb9e" +version = "0.1.5" + +[[deps.ImageCore]] +deps = ["AbstractFFTs", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Graphics", "MappedArrays", "MosaicViews", "OffsetArrays", "PaddedViews", "Reexport"] +git-tree-sha1 = "acf614720ef026d38400b3817614c45882d75500" +uuid = "a09fc81d-aa75-5fe9-8630-4744c3626534" +version = "0.9.4" + +[[deps.ImageShow]] +deps = ["Base64", "FileIO", "ImageBase", "ImageCore", "OffsetArrays", "StackViews"] +git-tree-sha1 = "b563cf9ae75a635592fc73d3eb78b86220e55bd8" +uuid = "4e3cecfd-b093-5904-9786-8bbb286a6a31" +version = "0.3.6" + +[[deps.IniFile]] +git-tree-sha1 = "f550e6e32074c939295eb5ea6de31849ac2c9625" uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" -version = "0.5.0" +version = "0.5.1" + +[[deps.InitialValues]] +git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3" +uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c" +version = "0.3.1" -[[InteractiveUtils]] +[[deps.InlineStrings]] +deps = ["Parsers"] +git-tree-sha1 = "d19f9edd8c34760dca2de2b503f969d8700ed288" +uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48" +version = "1.1.4" + +[[deps.InteractiveUtils]] deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -[[InvertedIndices]] +[[deps.InternedStrings]] +deps = ["Random", "Test"] +git-tree-sha1 = "eb05b5625bc5d821b8075a77e4c421933e20c76b" +uuid = "7d512f48-7fb1-5a58-b986-67e6dc259f01" +version = "0.7.0" + +[[deps.InverseFunctions]] deps = ["Test"] -git-tree-sha1 = "15732c475062348b0165684ffe28e85ea8396afc" +git-tree-sha1 = "b3364212fb5d870f724876ffcd34dd8ec6d98918" +uuid = "3587e190-3f89-42d0-90ee-14403ec27112" +version = "0.1.7" + +[[deps.InvertedIndices]] +git-tree-sha1 = "bee5f1ef5bf65df56bdd2e40447590b272a5471f" uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f" -version = "1.0.0" +version = "1.1.0" + +[[deps.IrrationalConstants]] +git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151" +uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" +version = "0.1.1" -[[IterTools]] -git-tree-sha1 = "05110a2ab1fc5f932622ffea2a003221f4782c18" +[[deps.IterTools]] +git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5" uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" -version = "1.3.0" +version = "1.4.0" -[[IterationControl]] +[[deps.IterationControl]] deps = ["EarlyStopping", "InteractiveUtils"] -git-tree-sha1 = "f61d5d4d0e433b3fab03ca5a1bfa2d7dcbb8094c" +git-tree-sha1 = "d7df9a6fdd82a8cfdfe93a94fcce35515be634da" uuid = "b3c1a2ee-3fec-4384-bf48-272ea71de57c" -version = "0.4.0" +version = "0.5.3" -[[IteratorInterfaceExtensions]] +[[deps.IteratorInterfaceExtensions]] git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" uuid = "82899510-4779-5014-852e-03e436cf321d" version = "1.0.0" -[[JLLWrappers]] +[[deps.JLD2]] +deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] +git-tree-sha1 = "81b9477b49402b47fbe7f7ae0b252077f53e4a08" +uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +version = "0.4.22" + +[[deps.JLLWrappers]] deps = ["Preferences"] -git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e" +git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.3.0" - -[[JLSO]] -deps = ["BSON", "CodecZlib", "FilePathsBase", "Memento", "Pkg", "Serialization"] -git-tree-sha1 = "e00feb9d56e9e8518e0d60eef4d1040b282771e2" -uuid = "9da8a3cd-07a3-59c0-a743-3fdc52c30d11" -version = "2.6.0" +version = "1.4.1" -[[JSON]] +[[deps.JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" +git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.1" +version = "0.21.3" -[[JpegTurbo_jll]] +[[deps.JSON3]] +deps = ["Dates", "Mmap", "Parsers", "StructTypes", "UUIDs"] +git-tree-sha1 = "fd6f0cae36f42525567108a42c1c674af2ac620d" +uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" +version = "1.9.5" + +[[deps.JpegTurbo_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "d735490ac75c5cb9f1b00d8b5509c11984dc6943" +git-tree-sha1 = "b53380851c6e6664204efb2e62cd24fa5c47e4ba" uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" -version = "2.1.0+0" +version = "2.1.2+0" -[[Juno]] -deps = ["Base64", "Logging", "Media", "Profile"] -git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7" -uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d" -version = "0.8.4" +[[deps.JuliaVariables]] +deps = ["MLStyle", "NameResolution"] +git-tree-sha1 = "49fb3cb53362ddadb4415e9b73926d6b40709e70" +uuid = "b14d175d-62b4-44ba-8fb7-3064adc8c3ec" +version = "0.2.4" -[[LAME_jll]] +[[deps.LAME_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c" uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d" version = "3.100.1+0" -[[LLVM]] -deps = ["CEnum", "Libdl", "Printf", "Unicode"] -git-tree-sha1 = "f57ac3fd2045b50d3db081663837ac5b4096947e" +[[deps.LERC_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "bf36f528eec6634efc60d7ec062008f171071434" +uuid = "88015f11-f218-50d7-93a8-a6af411a945d" +version = "3.0.0+1" + +[[deps.LLVM]] +deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"] +git-tree-sha1 = "e7e9184b0bf0158ac4e4aa9daf00041b5909bf1a" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" -version = "3.9.0" +version = "4.14.0" + +[[deps.LLVMExtra_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg", "TOML"] +git-tree-sha1 = "771bfe376249626d3ca12bcd58ba243d3f961576" +uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" +version = "0.0.16+0" -[[LZO_jll]] +[[deps.LZO_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6" uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac" version = "2.10.1+0" -[[LaTeXStrings]] -git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104" +[[deps.LaTeXStrings]] +git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996" uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" -version = "1.2.1" +version = "1.3.0" -[[Latexify]] +[[deps.Latexify]] deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] -git-tree-sha1 = "a4b12a1bd2ebade87891ab7e36fdbce582301a92" +git-tree-sha1 = "1a43be956d433b5d0321197150c2f94e16c0aaa0" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.15.6" +version = "0.15.16" -[[LatinHypercubeSampling]] +[[deps.LatinHypercubeSampling]] deps = ["Random", "StableRNGs", "StatsBase", "Test"] git-tree-sha1 = "42938ab65e9ed3c3029a8d2c58382ca75bdab243" uuid = "a5e1c1ea-c99a-51d3-a14d-a9a37257b02d" version = "1.8.0" -[[LazyArtifacts]] +[[deps.LazyArtifacts]] deps = ["Artifacts", "Pkg"] uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" -[[LearnBase]] -git-tree-sha1 = "a0d90569edd490b82fdc4dc078ea54a5a800d30a" -uuid = "7f8f8fb0-2700-5f03-b4bd-41f8cfc144b6" -version = "0.4.1" +[[deps.LazyModules]] +git-tree-sha1 = "a560dd966b386ac9ae60bdd3a3d3a326062d3c3e" +uuid = "8cdb02fc-e678-4876-92c5-9defec4f444e" +version = "0.3.1" -[[LibCURL]] +[[deps.LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -[[LibCURL_jll]] +[[deps.LibCURL_jll]] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -[[LibGit2]] +[[deps.LibGit2]] deps = ["Base64", "NetworkOptions", "Printf", "SHA"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" -[[LibSSH2_jll]] +[[deps.LibSSH2_jll]] deps = ["Artifacts", "Libdl", "MbedTLS_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -[[LibVPX_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "12ee7e23fa4d18361e7c2cde8f8337d4c3101bc7" -uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a" -version = "1.10.0+0" - -[[Libdl]] +[[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" -[[Libffi_jll]] +[[deps.Libffi_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "761a393aeccd6aa92ec3515e428c26bf99575b3b" +git-tree-sha1 = "0b4a5d71f3e5200a7dff793393e09dfc2d874290" uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490" -version = "3.2.2+0" +version = "3.2.2+1" -[[Libgcrypt_jll]] +[[deps.Libgcrypt_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"] git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae" uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4" version = "1.8.7+0" -[[Libglvnd_jll]] +[[deps.Libglvnd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"] git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf" uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29" version = "1.3.0+3" -[[Libgpg_error_jll]] +[[deps.Libgpg_error_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9" uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8" version = "1.42.0+0" -[[Libiconv_jll]] +[[deps.Libiconv_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778" uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" version = "1.16.1+1" -[[Libmount_jll]] +[[deps.Libmount_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73" uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9" version = "2.35.0+0" -[[Libtiff_jll]] -deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] -git-tree-sha1 = "340e257aada13f95f98ee352d316c3bed37c8ab9" +[[deps.Libtiff_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "LERC_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"] +git-tree-sha1 = "3eb79b0ca5764d4799c06699573fd8f533259713" uuid = "89763e89-9b03-5906-acba-b20f662cd828" -version = "4.3.0+0" +version = "4.4.0+0" -[[Libuuid_jll]] +[[deps.Libuuid_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066" uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700" version = "2.36.0+0" -[[LinearAlgebra]] -deps = ["Libdl"] +[[deps.LinearAlgebra]] +deps = ["Libdl", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -[[Literate]] -deps = ["Base64", "IOCapture", "JSON", "REPL"] -git-tree-sha1 = "501a1a74a0c825037860d36d87d703e987d39dbc" -uuid = "98b081ad-f1c9-55d3-8b20-4c87d4299306" -version = "2.8.1" - -[[LogExpFunctions]] -deps = ["DocStringExtensions", "LinearAlgebra"] -git-tree-sha1 = "1ba664552f1ef15325e68dc4c05c3ef8c2d5d885" +[[deps.LogExpFunctions]] +deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "361c2b088575b07946508f135ac556751240091c" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.2.4" +version = "0.3.17" -[[Logging]] +[[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" -[[LossFunctions]] -deps = ["InteractiveUtils", "LearnBase", "Markdown", "RecipesBase", "StatsBase"] -git-tree-sha1 = "0f057f6ea90a84e73a8ef6eebb4dc7b5c330020f" +[[deps.LossFunctions]] +deps = ["InteractiveUtils", "Markdown", "RecipesBase"] +git-tree-sha1 = "53cd63a12f06a43eef6f4aafb910ac755c122be7" uuid = "30fc2ffe-d236-52d8-8643-a9d8f7c094a7" -version = "0.7.2" +version = "0.8.0" -[[Lz4_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "5d494bc6e85c4c9b626ee0cab05daa4085486ab1" -uuid = "5ced341a-0733-55b8-9ab6-a4889d929147" -version = "1.9.3+0" - -[[MAT]] +[[deps.MAT]] deps = ["BufferedStreams", "CodecZlib", "HDF5", "SparseArrays"] -git-tree-sha1 = "5c62992f3d46b8dce69bdd234279bb5a369db7d5" +git-tree-sha1 = "971be550166fe3f604d28715302b58a3f7293160" uuid = "23992714-dd62-5051-b70f-ba57cb901cac" -version = "0.10.1" +version = "0.10.3" -[[MLDatasets]] -deps = ["BinDeps", "ColorTypes", "DataDeps", "DelimitedFiles", "FixedPointNumbers", "GZip", "MAT", "Requires"] -git-tree-sha1 = "a4f7bcbfa6c7e578607d3d1a657407557f54090f" +[[deps.MLDatasets]] +deps = ["CSV", "DataDeps", "DataFrames", "DelimitedFiles", "FileIO", "FixedPointNumbers", "GZip", "Glob", "HDF5", "ImageShow", "JLD2", "JSON3", "LazyModules", "MAT", "MLUtils", "NPZ", "Pickle", "Requires", "SparseArrays", "Tables"] +git-tree-sha1 = "6de5b8a75e30d28f0119b31db44a00b6fd905d85" uuid = "eb30cadb-4394-5ae3-aed4-317e484a6458" -version = "0.5.6" +version = "0.7.4" -[[MLJ]] -deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJIteration", "MLJModels", "MLJOpenML", "MLJScientificTypes", "MLJSerialization", "MLJTuning", "Pkg", "ProgressMeter", "Random", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "3e5176bbc839153ee781a4607b0d5995bba43013" +[[deps.MLJ]] +deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJIteration", "MLJModels", "MLJTuning", "OpenML", "Pkg", "ProgressMeter", "Random", "ScientificTypes", "Statistics", "StatsBase", "Tables"] +git-tree-sha1 = "4199f3ff372222dbdc8602b70f8eefcd1aa06606" uuid = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" -version = "0.16.6" +version = "0.18.4" -[[MLJBase]] -deps = ["CategoricalArrays", "ComputationalResources", "Dates", "DelimitedFiles", "Distributed", "Distributions", "InteractiveUtils", "InvertedIndices", "LinearAlgebra", "LossFunctions", "MLJModelInterface", "MLJScientificTypes", "Missings", "OrderedCollections", "Parameters", "PrettyTables", "ProgressMeter", "Random", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "c841d75dcd7dad3e3faee3a49efaf533a2c8d1df" +[[deps.MLJBase]] +deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Dates", "DelimitedFiles", "Distributed", "Distributions", "InteractiveUtils", "InvertedIndices", "LinearAlgebra", "LossFunctions", "MLJModelInterface", "Missings", "OrderedCollections", "Parameters", "PrettyTables", "ProgressMeter", "Random", "ScientificTypes", "Serialization", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] +git-tree-sha1 = "66ab866c0c214fb4bde679ae0af6255b90faa5f6" uuid = "a7f614a8-145f-11e9-1d2a-a57a1082229d" -version = "0.18.11" +version = "0.20.17" -[[MLJEnsembles]] -deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "ScientificTypes", "StatsBase"] -git-tree-sha1 = "1e124bb0f98d24bea17eae0fddf2503ec46fef76" +[[deps.MLJEnsembles]] +deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Distributed", "Distributions", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "ScientificTypesBase", "StatsBase"] +git-tree-sha1 = "ed2f724be26d0023cade9d59b55da93f528c3f26" uuid = "50ed68f4-41fd-4504-931a-ed422449fee0" -version = "0.1.0" +version = "0.3.1" -[[MLJFlux]] +[[deps.MLJFlux]] deps = ["CategoricalArrays", "ColorTypes", "ComputationalResources", "Flux", "MLJModelInterface", "ProgressMeter", "Random", "Statistics", "Tables"] -git-tree-sha1 = "cf88a40f375bc6fd8b431ff25997a6071633ca2c" +git-tree-sha1 = "a47257705ebca405a25320b111345a978925bcd5" uuid = "094fc8d1-fd35-5302-93ea-dabda2abf845" -version = "0.2.0" +version = "0.2.7" -[[MLJIteration]] -deps = ["IterationControl", "MLJBase", "Random"] -git-tree-sha1 = "f927564f7e295b3205f37186191c82720a3d93a5" +[[deps.MLJIteration]] +deps = ["IterationControl", "MLJBase", "Random", "Serialization"] +git-tree-sha1 = "024d0bd22bf4a5b273f626e89d742a9db95285ef" uuid = "614be32b-d00c-4edb-bd02-1eb411ab5e55" -version = "0.3.1" +version = "0.5.0" -[[MLJModelInterface]] -deps = ["Random", "ScientificTypes", "StatisticalTraits"] -git-tree-sha1 = "cafa0e923ce1ae659a4b4cb8eb03c98b916f0d4d" +[[deps.MLJModelInterface]] +deps = ["Random", "ScientificTypesBase", "StatisticalTraits"] +git-tree-sha1 = "16fa7c2e14aa5b3854bc77ab5f1dbe2cdc488903" uuid = "e80e1ace-859a-464e-9ed9-23947d8ae3ea" -version = "1.1.0" +version = "1.6.0" -[[MLJModels]] -deps = ["CategoricalArrays", "Dates", "Distances", "Distributions", "InteractiveUtils", "LinearAlgebra", "MLJBase", "MLJModelInterface", "MLJScientificTypes", "OrderedCollections", "Parameters", "Pkg", "REPL", "Random", "Requires", "Statistics", "StatsBase", "Tables"] -git-tree-sha1 = "6a430717810ca3ef7ba182235f07c634ede4c412" +[[deps.MLJModels]] +deps = ["CategoricalArrays", "CategoricalDistributions", "Dates", "Distances", "Distributions", "InteractiveUtils", "LinearAlgebra", "MLJModelInterface", "Markdown", "OrderedCollections", "Parameters", "Pkg", "PrettyPrinting", "REPL", "Random", "ScientificTypes", "StatisticalTraits", "Statistics", "StatsBase", "Tables"] +git-tree-sha1 = "8291b42d6bf744dda0bfb16b6f0befbae232a1fa" uuid = "d491faf4-2d78-11e9-2867-c94bc002c0b7" -version = "0.14.7" - -[[MLJOpenML]] -deps = ["HTTP", "JSON"] -git-tree-sha1 = "2903e9ef92ac5f390ca2a420fb0dbe3361ab57d7" -uuid = "cbea4545-8c96-4583-ad3a-44078d60d369" -version = "1.0.0" +version = "0.15.9" -[[MLJScientificTypes]] -deps = ["CategoricalArrays", "ColorTypes", "Dates", "PersistenceDiagramsBase", "PrettyTables", "ScientificTypes", "StatisticalTraits", "Tables"] -git-tree-sha1 = "59ef6602733869cc695de7e2524f75359ba1930f" -uuid = "2e2323e0-db8b-457b-ae0d-bdfb3bc63afd" -version = "0.4.8" +[[deps.MLJTuning]] +deps = ["ComputationalResources", "Distributed", "Distributions", "LatinHypercubeSampling", "MLJBase", "ProgressMeter", "Random", "RecipesBase"] +git-tree-sha1 = "e1d0220d8bf5c17270cef41835ed57f88d63579d" +uuid = "03970b2e-30c4-11ea-3135-d1576263f10f" +version = "0.7.2" -[[MLJSerialization]] -deps = ["IterationControl", "JLSO", "MLJBase", "MLJModelInterface"] -git-tree-sha1 = "cd6285f95948fe1047b7d6fd346c172e247c1188" -uuid = "17bed46d-0ab5-4cd4-b792-a5c4b8547c6d" -version = "1.1.2" +[[deps.MLStyle]] +git-tree-sha1 = "c4f433356372cc8838da59e3608be4b0c4c2c280" +uuid = "d8e11817-5142-5d16-987a-aa16d5891078" +version = "0.4.13" -[[MLJTuning]] -deps = ["ComputationalResources", "Distributed", "Distributions", "LatinHypercubeSampling", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "RecipesBase"] -git-tree-sha1 = "516187c8578e5a33897f0c8963ccc548e38daa8b" -uuid = "03970b2e-30c4-11ea-3135-d1576263f10f" -version = "0.6.8" +[[deps.MLUtils]] +deps = ["ChainRulesCore", "DelimitedFiles", "FLoops", "FoldsThreads", "Random", "ShowCases", "Statistics", "StatsBase", "Transducers"] +git-tree-sha1 = "7fd41b7edef1d58062a75c2f129e839a8d168fe9" +uuid = "f1d291b0-491e-4a28-83b9-f70985020b54" +version = "0.2.10" -[[MacroTools]] +[[deps.MacroTools]] deps = ["Markdown", "Random"] -git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" +git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.6" +version = "0.5.9" + +[[deps.MappedArrays]] +git-tree-sha1 = "e8b359ef06ec72e8c030463fe02efe5527ee5142" +uuid = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" +version = "0.4.1" + +[[deps.MarchingCubes]] +deps = ["StaticArrays"] +git-tree-sha1 = "3bf4baa9df7d1367168ebf60ed02b0379ea91099" +uuid = "299715c1-40a9-479a-aaf9-4a633d36f717" +version = "0.1.3" -[[Markdown]] +[[deps.Markdown]] deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" -[[MbedTLS]] -deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] -git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" +[[deps.MbedTLS]] +deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "Random", "Sockets"] +git-tree-sha1 = "d9ab10da9de748859a7780338e1d6566993d1f25" uuid = "739be429-bea8-5141-9913-cc70e7f3736d" -version = "1.0.3" +version = "1.1.3" -[[MbedTLS_jll]] +[[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -[[Measures]] +[[deps.Measures]] git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" version = "0.3.1" -[[Media]] -deps = ["MacroTools", "Test"] -git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58" -uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27" -version = "0.5.0" +[[deps.MicroCollections]] +deps = ["BangBang", "InitialValues", "Setfield"] +git-tree-sha1 = "6bb7786e4f24d44b4e29df03c69add1b63d88f01" +uuid = "128add7d-3638-4c79-886c-908ea0c25c34" +version = "0.1.2" -[[Memento]] -deps = ["Dates", "Distributed", "JSON", "Serialization", "Sockets", "Syslogs", "Test", "TimeZones", "UUIDs"] -git-tree-sha1 = "19650888f97362a2ae6c84f0f5f6cda84c30ac38" -uuid = "f28f55f0-a522-5efc-85c2-fe41dfb9b2d9" -version = "1.2.0" - -[[Missings]] +[[deps.Missings]] deps = ["DataAPI"] -git-tree-sha1 = "4ea90bd5d3985ae1f9a908bd4500ae88921c5ce7" +git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f" uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" -version = "1.0.0" +version = "1.0.2" -[[Mmap]] +[[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" -[[Mocking]] -deps = ["ExprTools"] -git-tree-sha1 = "916b850daad0d46b8c71f65f719c49957e9513ed" -uuid = "78c3b35d-d492-501b-9361-3d52fe80e533" -version = "0.7.1" +[[deps.MosaicViews]] +deps = ["MappedArrays", "OffsetArrays", "PaddedViews", "StackViews"] +git-tree-sha1 = "b34e3bc3ca7c94914418637cb10cc4d1d80d877d" +uuid = "e94cdb99-869f-56ef-bcf0-1ae2bcbe0389" +version = "0.3.3" -[[MozillaCACerts_jll]] +[[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -[[NNlib]] -deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"] -git-tree-sha1 = "7461639cef384a2ad058005b49e32b318d844343" +[[deps.NNlib]] +deps = ["Adapt", "ChainRulesCore", "LinearAlgebra", "Pkg", "Requires", "Statistics"] +git-tree-sha1 = "415108fd88d6f55cedf7ee940c7d4b01fad85421" uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -version = "0.7.22" +version = "0.8.9" -[[NNlibCUDA]] -deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"] -git-tree-sha1 = "c2f2f152340022ce44c6ac97e05c62a6de59c373" +[[deps.NNlibCUDA]] +deps = ["Adapt", "CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"] +git-tree-sha1 = "4429261364c5ea5b7308aecaa10e803ace101631" uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d" -version = "0.1.4" +version = "0.2.4" -[[NaNMath]] -git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" +[[deps.NPZ]] +deps = ["Compat", "FileIO", "ZipFile"] +git-tree-sha1 = "45f77b87cb9ed5b519f31e1590258930f3b840ee" +uuid = "15e1cf62-19b3-5cfa-8e77-841668bca605" +version = "0.4.2" + +[[deps.NaNMath]] +deps = ["OpenLibm_jll"] +git-tree-sha1 = "a7c3d1da1189a1c2fe843a3bfa04d18d20eb3211" uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "0.3.5" +version = "1.0.1" -[[NetworkOptions]] +[[deps.NameResolution]] +deps = ["PrettyPrint"] +git-tree-sha1 = "1a0fa0e9613f46c9b8c11eee38ebb4f590013c5e" +uuid = "71a1bf82-56d0-4bbc-8a3c-48b961074391" +version = "0.1.5" + +[[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" -[[Ogg_jll]] +[[deps.OffsetArrays]] +deps = ["Adapt"] +git-tree-sha1 = "1ea784113a6aa054c5ebd95945fa5e52c2f378e7" +uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" +version = "1.12.7" + +[[deps.Ogg_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "7937eda4681660b4d6aeeecc2f7e1c81c8ee4e2f" +git-tree-sha1 = "887579a3eb005446d514ab7aeac5d1d027658b8f" uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" -version = "1.3.5+0" +version = "1.3.5+1" + +[[deps.OpenBLAS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] +uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" + +[[deps.OpenLibm_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "05823500-19ac-5b8b-9628-191a04bc5112" + +[[deps.OpenML]] +deps = ["ARFFFiles", "HTTP", "JSON", "Markdown", "Pkg"] +git-tree-sha1 = "06080992e86a93957bfe2e12d3181443cedf2400" +uuid = "8b6db2d4-7670-4922-a472-f9537c81ab66" +version = "0.2.0" -[[OpenSSL_jll]] +[[deps.OpenSSL_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "15003dcb7d8db3c6c857fda14891a539a8f2705a" +git-tree-sha1 = "e60321e3f2616584ff98f0a4f18d98ae6f89bbb3" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "1.1.10+0" +version = "1.1.17+0" -[[OpenSpecFun_jll]] +[[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" version = "0.5.5+0" -[[Opus_jll]] +[[deps.Optimisers]] +deps = ["ChainRulesCore", "Functors", "LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "1ef34738708e3f31994b52693286dabcb3d29f6b" +uuid = "3bd65402-5787-11e9-1adc-39752487f4e2" +version = "0.2.9" + +[[deps.Opus_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "51a08fb14ec28da2ec7a927c4337e4332c2a4720" uuid = "91d4177d-7536-5919-b921-800302f37372" version = "1.3.2+0" -[[OrderedCollections]] +[[deps.OrderedCollections]] git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" version = "1.4.1" -[[PCRE_jll]] +[[deps.PCRE_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488" uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc" version = "8.44.0+0" -[[PDMats]] +[[deps.PDMats]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "4dd403333bcf0909341cfe57ec115152f937d7d8" +git-tree-sha1 = "cf494dca75a69712a72b80bc48f59dcf3dea63ec" uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" -version = "0.11.1" +version = "0.11.16" -[[Parameters]] +[[deps.PaddedViews]] +deps = ["OffsetArrays"] +git-tree-sha1 = "03a7a85b76381a3d04c7a1656039197e70eda03d" +uuid = "5432bcbf-9aad-5242-b902-cca2824c8663" +version = "0.5.11" + +[[deps.Parameters]] deps = ["OrderedCollections", "UnPack"] -git-tree-sha1 = "2276ac65f1e236e0a6ea70baff3f62ad4c625345" +git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe" uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" -version = "0.12.2" +version = "0.12.3" -[[Parsers]] +[[deps.Parsers]] deps = ["Dates"] -git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc" +git-tree-sha1 = "0044b23da09b5608b4ecacb4e5e6c6332f833a7e" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "1.1.0" +version = "2.3.2" -[[PersistenceDiagramsBase]] -deps = ["Compat", "Tables"] -git-tree-sha1 = "ec6eecbfae1c740621b5d903a69ec10e30f3f4bc" -uuid = "b1ad91c1-539c-4ace-90bd-ea06abc420fa" -version = "0.1.1" +[[deps.Pickle]] +deps = ["DataStructures", "InternedStrings", "Serialization", "SparseArrays", "Strided", "StringEncodings", "ZipFile"] +git-tree-sha1 = "e6a34eb1dc0c498f0774bbfbbbeff2de101f4235" +uuid = "fbb45041-c46e-462f-888f-7c521cafbc2c" +version = "0.3.2" -[[Pixman_jll]] +[[deps.Pixman_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29" uuid = "30392449-352a-5448-841d-b1acce4e97dc" version = "0.40.1+0" -[[Pkg]] +[[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -[[PlotThemes]] -deps = ["PlotUtils", "Requires", "Statistics"] -git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d" +[[deps.PlotThemes]] +deps = ["PlotUtils", "Statistics"] +git-tree-sha1 = "8162b2f8547bc23876edd0c5181b27702ae58dce" uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" -version = "2.0.1" +version = "3.0.0" -[[PlotUtils]] +[[deps.PlotUtils]] deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] -git-tree-sha1 = "ae9a295ac761f64d8c2ec7f9f24d21eb4ffba34d" +git-tree-sha1 = "9888e59493658e476d3073f1ce24348bdc086660" uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "1.0.10" +version = "1.3.0" -[[Plots]] -deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] -git-tree-sha1 = "a680b659a1ba99d3663a40aa9acffd67768a410f" +[[deps.Plots]] +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun", "Unzip"] +git-tree-sha1 = "a19652399f43938413340b2068e11e55caa46b65" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.16.6" +version = "1.31.7" + +[[deps.PooledArrays]] +deps = ["DataAPI", "Future"] +git-tree-sha1 = "a6062fe4063cdafe78f4a0a81cfffb89721b30e7" +uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720" +version = "1.4.2" -[[Preferences]] +[[deps.Preferences]] deps = ["TOML"] -git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a" +git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.2.2" +version = "1.3.0" + +[[deps.PrettyPrint]] +git-tree-sha1 = "632eb4abab3449ab30c5e1afaa874f0b98b586e4" +uuid = "8162dcfd-2161-5ef2-ae6c-7681170c5f98" +version = "0.2.0" -[[PrettyTables]] +[[deps.PrettyPrinting]] +git-tree-sha1 = "4be53d093e9e37772cc89e1009e8f6ad10c4681b" +uuid = "54e16d92-306c-5ea0-a30b-337be88ac337" +version = "0.4.0" + +[[deps.PrettyTables]] deps = ["Crayons", "Formatting", "Markdown", "Reexport", "Tables"] -git-tree-sha1 = "0d1245a357cc61c8cd61934c07447aa569ff22e6" +git-tree-sha1 = "dfb54c4e414caa595a1f2ed759b160f5a3ddcba5" uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" -version = "1.1.0" +version = "1.3.1" -[[Printf]] +[[deps.Printf]] deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" -[[Profile]] -deps = ["Printf"] -uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" +[[deps.ProgressLogging]] +deps = ["Logging", "SHA", "UUIDs"] +git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539" +uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c" +version = "0.1.4" -[[ProgressMeter]] +[[deps.ProgressMeter]] deps = ["Distributed", "Printf"] -git-tree-sha1 = "afadeba63d90ff223a6a48d2009434ecee2ec9e8" +git-tree-sha1 = "d7a7aef8f8f2d537104f170139553b14dfe39fe9" uuid = "92933f4c-e287-5a05-a399-4b506db050ca" -version = "1.7.1" - -[[PyCall]] -deps = ["Conda", "Dates", "Libdl", "LinearAlgebra", "MacroTools", "Serialization", "VersionParsing"] -git-tree-sha1 = "169bb8ea6b1b143c5cf57df6d34d022a7b60c6db" -uuid = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" -version = "1.92.3" - -[[PyPlot]] -deps = ["Colors", "LaTeXStrings", "PyCall", "Sockets", "Test", "VersionParsing"] -git-tree-sha1 = "67dde2482fe1a72ef62ed93f8c239f947638e5a2" -uuid = "d330b81b-6aea-500a-939a-2ce795aea3ee" -version = "2.9.0" +version = "1.7.2" -[[Qt5Base_jll]] +[[deps.Qt5Base_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"] -git-tree-sha1 = "ad368663a5e20dbb8d6dc2fddeefe4dae0781ae8" +git-tree-sha1 = "c6c0f690d0cc7caddb74cef7aa847b824a16b256" uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1" -version = "5.15.3+0" +version = "5.15.3+1" -[[QuadGK]] +[[deps.QuadGK]] deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "12fbe86da16df6679be7521dfb39fbc861e1dc7b" +git-tree-sha1 = "78aadffb3efd2155af139781b8a8df1ef279ea39" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.4.1" +version = "2.4.2" -[[REPL]] +[[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" -[[Random]] -deps = ["Serialization"] +[[deps.Random]] +deps = ["SHA", "Serialization"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -[[Random123]] -deps = ["Libdl", "Random", "RandomNumbers"] -git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3" +[[deps.Random123]] +deps = ["Random", "RandomNumbers"] +git-tree-sha1 = "7a1a306b72cfa60634f03a911405f4e64d1b718b" uuid = "74087812-796a-5b5d-8853-05524746bad3" -version = "1.4.2" +version = "1.6.0" -[[RandomNumbers]] +[[deps.RandomNumbers]] deps = ["Random", "Requires"] -git-tree-sha1 = "441e6fc35597524ada7f85e13df1f4e10137d16f" +git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111" uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143" -version = "1.4.0" +version = "1.5.3" -[[RecipesBase]] -git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" +[[deps.RealDot]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9" +uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9" +version = "0.1.0" + +[[deps.RecipesBase]] +git-tree-sha1 = "6bf3f380ff52ce0832ddd3a2a7b9538ed1bcca7d" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.1.1" +version = "1.2.1" -[[RecipesPipeline]] +[[deps.RecipesPipeline]] deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] -git-tree-sha1 = "9b8e57e3cca8828a1bc759840bfe48d64db9abfb" +git-tree-sha1 = "e7eac76a958f8664f2718508435d058168c7953d" uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" -version = "0.3.3" +version = "0.6.3" -[[Reexport]] -git-tree-sha1 = "5f6c21241f0f655da3952fd60aa18477cf96c220" +[[deps.Reexport]] +git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" uuid = "189a3867-3050-52da-a836-e630ba90ab69" -version = "1.1.0" +version = "1.2.2" + +[[deps.RelocatableFolders]] +deps = ["SHA", "Scratch"] +git-tree-sha1 = "22c5201127d7b243b9ee1de3b43c408879dff60f" +uuid = "05181044-ff0b-4ac5-8273-598c1e38db00" +version = "0.3.0" -[[Requires]] +[[deps.Requires]] deps = ["UUIDs"] -git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" +git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" uuid = "ae029012-a4dd-5104-9daa-d747884805df" -version = "1.1.3" +version = "1.3.0" -[[Rmath]] +[[deps.Rmath]] deps = ["Random", "Rmath_jll"] git-tree-sha1 = "bf3188feca147ce108c76ad82c2792c57abe7b1f" uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa" version = "0.7.0" -[[Rmath_jll]] +[[deps.Rmath_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "68db32dff12bb6127bac73c209881191bf0efbb7" uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f" version = "0.3.0+0" -[[SHA]] +[[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" -[[ScientificTypes]] -git-tree-sha1 = "b4e89a674804025c4a5843e35e562910485690c2" +[[deps.ScientificTypes]] +deps = ["CategoricalArrays", "ColorTypes", "Dates", "Distributions", "PrettyTables", "Reexport", "ScientificTypesBase", "StatisticalTraits", "Tables"] +git-tree-sha1 = "ba70c9a6e4c81cc3634e3e80bb8163ab5ef57eb8" uuid = "321657f4-b219-11e9-178b-2701a2544e81" -version = "1.1.2" +version = "3.0.0" -[[Scratch]] +[[deps.ScientificTypesBase]] +git-tree-sha1 = "a8e18eb383b5ecf1b5e6fc237eb39255044fd92b" +uuid = "30f210dd-8aff-4c5f-94ba-8e64358c1161" +version = "3.0.0" + +[[deps.Scratch]] deps = ["Dates"] -git-tree-sha1 = "0b4b7f1393cff97c33891da2a0bf69c6ed241fda" +git-tree-sha1 = "f94f779c94e58bf9ea243e77a37e16d9de9126bd" uuid = "6c6a2e73-6563-6170-7368-637461726353" -version = "1.1.0" +version = "1.1.1" + +[[deps.SentinelArrays]] +deps = ["Dates", "Random"] +git-tree-sha1 = "db8481cf5d6278a121184809e9eb1628943c7704" +uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c" +version = "1.3.13" -[[Serialization]] +[[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" -[[SharedArrays]] +[[deps.Setfield]] +deps = ["ConstructionBase", "Future", "MacroTools", "Requires"] +git-tree-sha1 = "38d88503f695eb0301479bc9b0d4320b378bafe5" +uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" +version = "0.8.2" + +[[deps.SharedArrays]] deps = ["Distributed", "Mmap", "Random", "Serialization"] uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" -[[Showoff]] +[[deps.ShowCases]] +git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5" +uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3" +version = "0.1.0" + +[[deps.Showoff]] deps = ["Dates", "Grisu"] git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" version = "1.0.3" -[[Sockets]] +[[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" -[[SortingAlgorithms]] +[[deps.SortingAlgorithms]] deps = ["DataStructures"] -git-tree-sha1 = "2ec1962eba973f383239da22e75218565c390a96" +git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508" uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" -version = "1.0.0" +version = "1.0.1" -[[SparseArrays]] +[[deps.SparseArrays]] deps = ["LinearAlgebra", "Random"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -[[SpecialFunctions]] -deps = ["ChainRulesCore", "LogExpFunctions", "OpenSpecFun_jll"] -git-tree-sha1 = "a50550fa3164a8c46747e62063b4d774ac1bcf49" +[[deps.SpecialFunctions]] +deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "d75bda01f8c31ebb72df80a46c88b25d1c79c56d" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "1.5.1" +version = "2.1.7" + +[[deps.SplittablesBase]] +deps = ["Setfield", "Test"] +git-tree-sha1 = "39c9f91521de844bad65049efd4f9223e7ed43f9" +uuid = "171d559e-b47b-412a-8079-5efa626c420e" +version = "0.1.14" -[[StableRNGs]] +[[deps.StableRNGs]] deps = ["Random", "Test"] git-tree-sha1 = "3be7d49667040add7ee151fefaf1f8c04c8c8276" uuid = "860ef19b-820b-49d6-a774-d7a799459cd3" version = "1.0.0" -[[StaticArrays]] -deps = ["LinearAlgebra", "Random", "Statistics"] -git-tree-sha1 = "745914ebcd610da69f3cb6bf76cb7bb83dcb8c9a" +[[deps.StackViews]] +deps = ["OffsetArrays"] +git-tree-sha1 = "46e589465204cd0c08b4bd97385e4fa79a0c770c" +uuid = "cae243ae-269e-4f55-b966-ac2d0dc13c15" +version = "0.1.1" + +[[deps.Static]] +deps = ["IfElse"] +git-tree-sha1 = "f94f9d627ba3f91e41a815b9f9f977d729e2e06f" +uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" +version = "0.7.6" + +[[deps.StaticArrays]] +deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] +git-tree-sha1 = "85bc4b051546db130aeb1e8a696f1da6d4497200" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.2.4" +version = "1.5.5" -[[StatisticalTraits]] -deps = ["ScientificTypes"] -git-tree-sha1 = "2d882a163c295d5d754e4102d92f4dda5a1f906b" -uuid = "64bff920-2084-43da-a3e6-9bb72801c0c9" +[[deps.StaticArraysCore]] +git-tree-sha1 = "5b413a57dd3cea38497d745ce088ac8592fbb5be" +uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" version = "1.1.0" -[[Statistics]] +[[deps.StatisticalTraits]] +deps = ["ScientificTypesBase"] +git-tree-sha1 = "30b9236691858e13f167ce829490a68e1a597782" +uuid = "64bff920-2084-43da-a3e6-9bb72801c0c9" +version = "3.2.0" + +[[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -[[StatsAPI]] -git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510" +[[deps.StatsAPI]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6" uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" -version = "1.0.0" +version = "1.5.0" -[[StatsBase]] -deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "2f6792d523d7448bbe2fec99eca9218f06cc746d" +[[deps.StatsBase]] +deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "d1bf48bfcc554a3761a133fe3a9bb01488e06916" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.33.8" +version = "0.33.21" -[[StatsFuns]] -deps = ["LogExpFunctions", "Rmath", "SpecialFunctions"] -git-tree-sha1 = "30cd8c360c54081f806b1ee14d2eecbef3c04c49" +[[deps.StatsFuns]] +deps = ["ChainRulesCore", "HypergeometricFunctions", "InverseFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] +git-tree-sha1 = "5783b877201a82fc0014cbf381e7e6eb130473a4" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" -version = "0.9.8" +version = "1.0.1" + +[[deps.Strided]] +deps = ["LinearAlgebra", "TupleTools"] +git-tree-sha1 = "a7a664c91104329c88222aa20264e1a05b6ad138" +uuid = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" +version = "1.2.3" + +[[deps.StringEncodings]] +deps = ["Libiconv_jll"] +git-tree-sha1 = "50ccd5ddb00d19392577902f0079267a72c5ab04" +uuid = "69024149-9ee7-55f6-a4c4-859efe599b68" +version = "0.3.5" -[[StructArrays]] -deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"] -git-tree-sha1 = "000e168f5cc9aded17b6999a560b7c11dda69095" +[[deps.StructArrays]] +deps = ["Adapt", "DataAPI", "StaticArraysCore", "Tables"] +git-tree-sha1 = "8c6ac65ec9ab781af05b08ff305ddc727c25f680" uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" -version = "0.6.0" +version = "0.6.12" -[[StructTypes]] +[[deps.StructTypes]] deps = ["Dates", "UUIDs"] -git-tree-sha1 = "e36adc471280e8b346ea24c5c87ba0571204be7a" +git-tree-sha1 = "79aa7175f0149ba2fe22b96a271f4024429de02d" uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" -version = "1.7.2" +version = "1.9.0" -[[SuiteSparse]] +[[deps.SuiteSparse]] deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" -[[Syslogs]] -deps = ["Printf", "Sockets"] -git-tree-sha1 = "46badfcc7c6e74535cc7d833a91f4ac4f805f86d" -uuid = "cea106d9-e007-5e6c-ad93-58fe2094e9c4" -version = "0.3.0" - -[[TOML]] +[[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -[[TableTraits]] +[[deps.TableTraits]] deps = ["IteratorInterfaceExtensions"] git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" version = "1.0.1" -[[Tables]] -deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] -git-tree-sha1 = "8ed4a3ea724dac32670b062be3ef1c1de6773ae8" +[[deps.Tables]] +deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits", "Test"] +git-tree-sha1 = "5ce79ce186cc678bbb5c5681ca3379d1ddae11a1" uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" -version = "1.4.4" +version = "1.7.0" -[[Tar]] +[[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -[[Test]] +[[deps.TensorCore]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "1feb45f88d133a655e001435632f019a9a1bcdb6" +uuid = "62fd8b95-f654-4bbd-a8a5-9c27f68ccd50" +version = "0.1.1" + +[[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -[[TimeZones]] -deps = ["Dates", "EzXML", "LazyArtifacts", "Mocking", "Pkg", "Printf", "RecipesBase", "Serialization", "Unicode"] -git-tree-sha1 = "960099aed321e05ac649c90d583d59c9309faee1" -uuid = "f269a46b-ccf7-5d73-abea-4c690281aa53" -version = "1.5.5" - -[[TimerOutputs]] +[[deps.TimerOutputs]] deps = ["ExprTools", "Printf"] -git-tree-sha1 = "9f494bc54b4c31404a9eff449235836615929de1" +git-tree-sha1 = "9dfcb767e17b0849d6aaf85997c98a5aea292513" uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -version = "0.5.10" +version = "0.5.21" -[[TranscodingStreams]] +[[deps.TranscodingStreams]] deps = ["Random", "Test"] -git-tree-sha1 = "7c53c35547de1c5b9d46a4797cf6d8253807108c" +git-tree-sha1 = "4ad90ab2bbfdddcae329cba59dab4a8cdfac3832" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.5" +version = "0.9.7" -[[URIParser]] -deps = ["Unicode"] -git-tree-sha1 = "53a9f49546b8d2dd2e688d216421d050c9a31d0d" -uuid = "30578b45-9adc-5946-b283-645ec420af67" -version = "0.4.1" +[[deps.Transducers]] +deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"] +git-tree-sha1 = "c76399a3bbe6f5a88faa33c8f8a65aa631d95013" +uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999" +version = "0.4.73" -[[URIs]] -git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" -uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +[[deps.TupleTools]] +git-tree-sha1 = "3c712976c47707ff893cf6ba4354aa14db1d8938" +uuid = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" version = "1.3.0" -[[UUIDs]] +[[deps.URIs]] +git-tree-sha1 = "e59ecc5a41b000fa94423a578d29290c7266fc10" +uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" +version = "1.4.0" + +[[deps.UUIDs]] deps = ["Random", "SHA"] uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" -[[UnPack]] +[[deps.UnPack]] git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b" uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" version = "1.0.2" -[[Unicode]] +[[deps.Unicode]] uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" -[[VersionParsing]] -git-tree-sha1 = "80229be1f670524750d905f8fc8148e5a8c4537f" -uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" -version = "1.2.0" +[[deps.UnicodeFun]] +deps = ["REPL"] +git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf" +uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" +version = "0.4.1" + +[[deps.UnicodePlots]] +deps = ["ColorSchemes", "ColorTypes", "Contour", "Crayons", "Dates", "FileIO", "FreeTypeAbstraction", "LazyModules", "LinearAlgebra", "MarchingCubes", "NaNMath", "Printf", "SparseArrays", "StaticArrays", "StatsBase", "Unitful"] +git-tree-sha1 = "5b931e95bf691e13ae25c1bdeda71b89169064ce" +uuid = "b8865327-cd53-5732-bb35-84acbb429228" +version = "3.0.5" + +[[deps.Unitful]] +deps = ["ConstructionBase", "Dates", "LinearAlgebra", "Random"] +git-tree-sha1 = "b649200e887a487468b71821e2644382699f1b0f" +uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" +version = "1.11.0" + +[[deps.Unzip]] +git-tree-sha1 = "34db80951901073501137bdbc3d5a8e7bbd06670" +uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" +version = "0.1.2" -[[Wayland_jll]] +[[deps.Wayland_jll]] deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"] git-tree-sha1 = "3e61f0b86f90dacb0bc0e73a0c5a83f6a8636e23" uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" version = "1.19.0+0" -[[Wayland_protocols_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll"] -git-tree-sha1 = "2839f1c1296940218e35df0bbb220f2a79686670" +[[deps.Wayland_protocols_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "4528479aa01ee1b3b4cd0e6faef0e04cf16466da" uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91" -version = "1.18.0+4" +version = "1.25.0+0" + +[[deps.WeakRefStrings]] +deps = ["DataAPI", "InlineStrings", "Parsers"] +git-tree-sha1 = "b1be2855ed9ed8eac54e5caff2afcdb442d52c23" +uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5" +version = "1.4.2" -[[XML2_jll]] +[[deps.XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a" +git-tree-sha1 = "58443b63fb7e465a8a7210828c91c08b92132dff" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.9.12+0" +version = "2.9.14+0" -[[XSLT_jll]] +[[deps.XSLT_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"] git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a" uuid = "aed1982a-8fda-507f-9586-7b0439959a61" version = "1.1.34+0" -[[Xorg_libX11_jll]] +[[deps.Xorg_libX11_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"] git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527" uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc" version = "1.6.9+4" -[[Xorg_libXau_jll]] +[[deps.Xorg_libXau_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e" uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec" version = "1.0.9+4" -[[Xorg_libXcursor_jll]] +[[deps.Xorg_libXcursor_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"] git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd" uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724" version = "1.2.0+4" -[[Xorg_libXdmcp_jll]] +[[deps.Xorg_libXdmcp_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4" uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05" version = "1.1.3+4" -[[Xorg_libXext_jll]] +[[deps.Xorg_libXext_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3" uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3" version = "1.3.4+4" -[[Xorg_libXfixes_jll]] +[[deps.Xorg_libXfixes_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4" uuid = "d091e8ba-531a-589c-9de9-94069b037ed8" version = "5.0.3+4" -[[Xorg_libXi_jll]] +[[deps.Xorg_libXi_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"] git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246" uuid = "a51aa0fd-4e3c-5386-b890-e753decda492" version = "1.7.10+4" -[[Xorg_libXinerama_jll]] +[[deps.Xorg_libXinerama_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"] git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123" uuid = "d1454406-59df-5ea1-beac-c340f2130bc3" version = "1.1.4+4" -[[Xorg_libXrandr_jll]] +[[deps.Xorg_libXrandr_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"] git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631" uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484" version = "1.5.2+4" -[[Xorg_libXrender_jll]] +[[deps.Xorg_libXrender_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96" uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" version = "0.9.10+4" -[[Xorg_libpthread_stubs_jll]] +[[deps.Xorg_libpthread_stubs_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb" uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74" version = "0.1.0+3" -[[Xorg_libxcb_jll]] +[[deps.Xorg_libxcb_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"] git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6" uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b" version = "1.13.0+3" -[[Xorg_libxkbfile_jll]] +[[deps.Xorg_libxkbfile_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"] git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2" uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a" version = "1.1.0+4" -[[Xorg_xcb_util_image_jll]] +[[deps.Xorg_xcb_util_image_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97" uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b" version = "0.4.0+1" -[[Xorg_xcb_util_jll]] +[[deps.Xorg_xcb_util_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"] git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1" uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5" version = "0.4.0+1" -[[Xorg_xcb_util_keysyms_jll]] +[[deps.Xorg_xcb_util_keysyms_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00" uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7" version = "0.4.0+1" -[[Xorg_xcb_util_renderutil_jll]] +[[deps.Xorg_xcb_util_renderutil_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e" uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e" version = "0.3.9+1" -[[Xorg_xcb_util_wm_jll]] +[[deps.Xorg_xcb_util_wm_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"] git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67" uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361" version = "0.4.1+1" -[[Xorg_xkbcomp_jll]] +[[deps.Xorg_xkbcomp_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"] git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b" uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4" version = "1.4.2+4" -[[Xorg_xkeyboard_config_jll]] +[[deps.Xorg_xkeyboard_config_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"] git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d" uuid = "33bec58e-1273-512f-9401-5d533626f822" version = "2.27.0+4" -[[Xorg_xtrans_jll]] +[[deps.Xorg_xtrans_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845" uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10" version = "1.4.0+3" -[[ZipFile]] +[[deps.ZipFile]] deps = ["Libdl", "Printf", "Zlib_jll"] -git-tree-sha1 = "c3a5637e27e914a7a445b8d0ad063d701931e9f7" +git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de" uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea" -version = "0.9.3" +version = "0.9.4" -[[Zlib_jll]] +[[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -[[Zstd_jll]] +[[deps.Zstd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6" +git-tree-sha1 = "e45044cd873ded54b6a5bac0eb5c971392cf1927" uuid = "3161d3a3-bdf6-5164-811a-617609db77b4" -version = "1.5.0+0" +version = "1.5.2+0" -[[Zygote]] -deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"] -git-tree-sha1 = "531474afbc343c3c7cb9b71c2771813c6defd550" +[[deps.Zygote]] +deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "GPUArrays", "GPUArraysCore", "IRTools", "InteractiveUtils", "LinearAlgebra", "LogExpFunctions", "MacroTools", "NaNMath", "Random", "Requires", "SparseArrays", "SpecialFunctions", "Statistics", "ZygoteRules"] +git-tree-sha1 = "8ac61a92a33b3fd2a4cbf92951817831e313a004" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.6.14" +version = "0.6.44" -[[ZygoteRules]] +[[deps.ZygoteRules]] deps = ["MacroTools"] -git-tree-sha1 = "9e7a1e8ca60b742e508a315c17eef5211e7fbfd7" +git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0" uuid = "700de1a5-db45-46bc-99cf-38207098b444" -version = "0.2.1" +version = "0.2.2" -[[libass_jll]] -deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c" +[[deps.libaom_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "3a2ea60308f0996d26f1e5354e10c24e9ef905d4" +uuid = "a4ae2306-e953-59d6-aa16-d00cac43593b" +version = "3.4.0+0" + +[[deps.libass_jll]] +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "5982a94fcba20f02f42ace44b9894ee2b140fe47" uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" -version = "0.14.0+4" +version = "0.15.1+0" + +[[deps.libblastrampoline_jll]] +deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] +uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -[[libfdk_aac_jll]] +[[deps.libfdk_aac_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab" +git-tree-sha1 = "daacc84a041563f965be61859a36e17c4e4fcd55" uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" -version = "0.1.6+4" +version = "2.0.2+0" -[[libpng_jll]] +[[deps.libpng_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c" uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" version = "1.6.38+0" -[[libvorbis_jll]] +[[deps.libvorbis_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"] -git-tree-sha1 = "c45f4e40e7aafe9d086379e5578947ec8b95a8fb" +git-tree-sha1 = "b910cb81ef3fe6e78bf6acee440bda86fd6ae00c" uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a" -version = "1.3.7+0" +version = "1.3.7+1" -[[nghttp2_jll]] +[[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -[[p7zip_jll]] +[[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -[[x264_jll]] +[[deps.x264_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f" +git-tree-sha1 = "4fea590b89e6ec504593146bf8b988b2c00922b2" uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" -version = "2020.7.14+2" +version = "2021.5.5+0" -[[x265_jll]] +[[deps.x265_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab" +git-tree-sha1 = "ee567a171cce03570d77ad3a43e90218e38937a9" uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" -version = "3.0.0+3" +version = "3.5.0+0" -[[xkbcommon_jll]] +[[deps.xkbcommon_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] -git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6" +git-tree-sha1 = "9ebfc140cc56e8c2156a15ceac2f0302e327ac0a" uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd" -version = "0.9.1+5" +version = "1.4.1+0" diff --git a/examples/mnist/Project.toml b/examples/mnist/Project.toml index 20a04031..49a95dda 100644 --- a/examples/mnist/Project.toml +++ b/examples/mnist/Project.toml @@ -1,9 +1,7 @@ [deps] Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845" MLJIteration = "614be32b-d00c-4edb-bd02-1eb411ab5e55" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee" diff --git a/examples/mnist/README.md b/examples/mnist/README.md index d2bb2cea..af688717 100644 --- a/examples/mnist/README.md +++ b/examples/mnist/README.md @@ -1,10 +1,9 @@ # Contents -- `mnist.ipynb`: Juptyer notebook -- `mnist.jl`: executable Julia script annotated with comments +- `notebook.ipynb`: Juptyer notebook +- `notebook.jl`: executable Julia script annotated with comments # Important Scripts or notebooks in this folder cannot be reliably exectued without the accompanying -Manifest.toml and Project.toml files. If the Manifest.toml does not specify a -`julia_version` in the first four lines, use Julia 1.6.x, where x is any integer. +Manifest.toml and Project.toml files. diff --git a/examples/mnist/generate.jl b/examples/mnist/generate.jl new file mode 100644 index 00000000..6a36a764 --- /dev/null +++ b/examples/mnist/generate.jl @@ -0,0 +1,4 @@ +# Execute this julia file to generate the notebooks from ../notebook.jl + +joinpath(@__DIR__, "..", "generate.jl") |> include +generate(@__DIR__, execute=true, pluto=false) diff --git a/examples/mnist/loss.png b/examples/mnist/loss.png index d862a4b3205dc4e68567d689b36c2ada761df52f..9fbf1e0c62543e3c90f06f969733f6bbd5bc55ec 100644 GIT binary patch literal 22134 zcmb5Wby$?&_XYZzAc%ydC`d?$bc2p`4jm%hozkInNq2X5Hxkm_Ez;dx_wf1N-@o^H zE~4PfyziW|&))m2z4kie2bUB=L3)D(K@f_FFux20JywFCM^=cB!6R9-OoHIYb1g9; ze(3(;PfBBUI0U_iMEF0;*(dDGJE&sXE+ZW5_wBe?Qu-hvQc%2)&hd>~b6HQUcHoZ9 zR5nx|$;>h|jBOq}a-b>C7HjAxAuQ(ZLVm^~efShzZeU*b$9&@SB4NI7jr*HEnw|CA z&SMpC&C8FdhlgCpiTiNu0532Eb}E$(@B@Ou&>utYU%veR{I~p^xDR{{3}AD5z0*`y z*g$-RB&0^cegv~jATi$;s4*JP+i$v-+`#s5T;<{Dftw# zb??v;5fQOW@RCak`K$SQ38F;xG3XR0)b$LaaAXD{BhH!0Cz`=@zIS|$%0&`?p;Ii4DsHaIj=wQ4c> zP3sZ7OS!38B$fBMn`vy;b7K@4*Dc9A)(|Dd=6nZDQI=1?#lv&h7$Qpb_>?&^I?83c zB`0RCR$PSMBe1gaez*fR;uvt5keS-+JBiomNQ)}Gj|h0ggNf~FE3AhsD{wx-?QOV z8nNuB$5@2||8V~CTE}LcZ`@qm*H>@xXbZ*-J$eMSsErL0lX_m{<+$AP(?1(|_qxxR z&dL)_SvYXr$ruwo(dz!#a<9&7%_$b95AMA*J_}wB_iUK^xv(fzu|_ zzW>rIZx_`d6LauRQHIn;9D>jUZ|Pw$Utcoo)=gTTm~%-hlC#pwOiwfz^_b)nGKYpn zf9eT+H%8@ZbB_)kOioG&B9xJlnJD^J?{@FX+q^lRuUKs|-MVh)40|Q*sbGpPqyIeM zhK1B9|MJts?N>_N_2+aXYUb?AOxl7fXK9oIeq1j)U&OzL{&q)3M4W64x9KqQTt+8w z+UD8qFDNT33kWnMy%dv_yt~>izP$M*m7_KfSEsG@r7>%>G1iH-2-#hhaKUS%;P*r$ zK0B~_&(KJ}?Gatsa^8kCm$DiZr0X4j49w5Y3dr6{mQ` zOiVMH&R}bnlQSirRvi~^MX;btZ#a{-bTXLeBR+b!A6?(ONYm&j*%AW%?UIp|ovk#+ zK|>qLkxtj`jZ$W7x;g4Ko-C?tTBx@(A57wTLG;Dv)d$ZClWE5H@85IO{wo|ImrO7i z&y)Z3$-TPzyv}wTktkoWIEl~gZmmE5$>YaWX7egl*&SN39M*d2f`L3eYw2$X_TgdigaEoL;xx z$h>b%VIgrnka}#Wkdfnk6y1X%kJaemCfLC+oT%5fS0(>A5{w+^fUr z@mu3oaK7FyncJa^pSZiH$7r_76qwn=!$V9|w90(Z72IiPXsFhDo%jCc`2Oy^d9pzH zH6bCN@kBwB%QXfJ2^m=`fju-XuE$cf+4VMx?%UOp4`Gx`vD5h;3L&ra@}Ho0oHkv3 zeSv;{hT!HF78bN`=gI%22pF);1@Enh+;5lEYX5X_!6EC~{Jl>s&`rYGty0PMb}{_8 zU)9eoXVkr$m`&<)+heGB;@jFtg>ykKZc5fnEz@fC!yEQQQZwlffV^;V5s1g62i}3l z@x(sNuHI_Zf7*o1^WyR{k=G>-jq&@A5=^)^iWcD|Zb(Q78UcY?m9>I`f~F=BQTzP- z{P;KvJ-zwm(W(am>MJCqGG4pZ!9jW=qNTZ7>j`pc(hnb$xAVSMn*oQJz~Q^QNVk=n zX>>Z5P8Tet>HYYUNas%=o>plL+7XC7dZm&SXY{>=#@XK9{F)kj5JjFLz=|%{XQ_zK zmMO_+o&4`yLboF{lE(cVxB|0!XF|{^wXdmE_=@?$EY1TaSC`%;UP-3NOa5GY1|=12 zwa7Kg?soO}+kXFERfR~Dq1omgoP1m%S7v`?T-+FyoGgW?-0bRNW@hH`9HobMyhO8Y zZf@>~t5~hk@qBN7qB!oOW-D;v$)a!9bJl~k_4S>doz(`zS{DaPo+03FBBWtp;wqgf zo@nIR*KRL9V2E5!()7ml>lvfh)z&Ikvl$Gfw4OIwum4iW4+;*hcHLNBUf$lm#ze@; z;&4O{LpYy5HxQS&2%oo$zux1>@;h2GL=vS^w8Kv|nH00rkUh)3m$7}5v&Z@q1KG_M z2il_M<7);^GGA-m+1BQPW_7kR?d}f2e8a=TD_0PS6pK_#o6!9DF$E%d#1Dh{?kJoH z!@F&EXDZ7&k-%p6{owEK)#l-w%j5OIWc4#mlZP*w-m>C`qaJ@O>L_~kB(ToW(b-@Z z#1^|Gnh&qBSb_onctHd%s9vLgjfqK$9iUjOCNcV*LF$=+eR6W#%%Wn&&Rd+yjvEo? zW2*q6yZuyoNs!DbxlTs+RFWGl!m`#4bj9gm$Y-d6P)_IE{5(jEJesYZAVQKrh5-f! zX~Q#w?ps}7EK{~bJSztW$D-?XpM)rslW$@3iiz6wS57c8O4%$l*sr%7BgN{?zP`TM zj`wdoiJiL~7hTlNjpjaoH_y_w75cL~Y&=M;h&st&j?YAgL*|^#RWqHt|J1==NQ(`% z=&8G-Hn6<9+wJM3y4&vF-W~Yi;N-I++RO?FKber*Q1e6=_F`r-m?Sv}L!v1nR}!758!tcD}}`Qvn7tHh}yB@@^! zM>0e_LqLieh?J9aX7uAZDj^dKggvk*m(VGF6D_1zs&`sn7E z5sUfatDe7I$X-4qZXkYYYS_7Bs-FXE>`#@F(9jfQEeu_0EX1>!PmJM5>Eo>H?*;`D z1xj%DXS}sAagLv-7L@+k!q2C}rj4 z4!yEIDTw)#lkuW&NbBT2fsI=B$V6QxleEk#_w$%ve?8!k1C~6`AFb`e{NMr40X*zxbB?pI1KYwzRpkd!ZCJ~n3cmb(IO=` z{Ux3GCKT*rS(+E9kVYiC*e+HO;a?bW<<xa1!av^})gMav3<7=Pbnxw?JrZZmCMqL0zG44k)$3i>y?T17oz3Ml z8P&~5Yp>>FTF$C^fQcyl0ta)T1E~l#-Zs1wZf6qFqKIb#k&750LpYu70H1O_?q}1* zf5_{XN4nc}#`y|`Mk5(7h*m(d_~QMtyRR>UqQ`_ankR7c?{5!KQvh5N&uSXSWw)2| z*#nV?EIilbsRPKJ?d|;f`s+0{H5~ys_a~!L`h!W|Dh!6F%k=?1Dgy@Rb-6k?D4%_S zxg?v87>g%rXD%LeA3pCG{&iMlB{RoH->_U^kEyxudVhU}rzl5HJaW^_YCLPd?L=~*-#}fpQ&2)Ds7aEx-ptICv}|4RDg%&F5oXF*M%} zt@7y`d-OcHys~!%Qq}ZSRAqD`M)FJ34X0oG)k*sL`&r%uQ7aTo)mrNV(B%Zc+8I>H z__fMJxyoc3z_JS0+p7n3*7@rl3xNlMR-E(O4wF5)IP0JIrNRC5g*H-xyJ?#YjBcY; z1f!2Wy(0@1F1S6Z2`86gHJv$0v|HE&eij@&xa_8pEMZ`2kRmy*pcdetxdrV1Jm%Wj8W1LbY=Yux-=fAN-}IC4dN-^!g~=2pR_bEVIK9 z0va&?t2h)gnGEC-pIZsc{99>r)_da4NyW%$1+d`5jLLMyyre2krjNjOyXbO051@6n zWFl4gc;AW#$gcG@H80__eyK z2_%qHFJv4e7D3bSfG)+|SaUAMWz;f|7N3_FCK}A>PUS!QI;kWYa5;bg*-C&7wfTCl zzN~X-FpedE$Is>N?hb6?<-9an0Q3jo&{xq_@EDEdY-||7gs;zbg?_y?oL8obq*H5{ za#TAhtX=?5q7&&%m1qKE&^c9ujVloiHhKbFN+{^f2kZd1izS4(tj30hi@VZiG64Z* z>uYO&+S?mWC%%GJ%s-&4Rdof$R!$BDtjq5;Ij9ec#*Ivyy6XW)Fs`T`c^jS$qGi$T z-i7wr-8z!Q*bb?*6~XB~uB>N%&=cdShAgmQ@etS@{#XFK6Nfg6=ZH&5flXzZDKHKbNuct}?93GrG7=Jq z2^IkKJHLjY{rmS%<}5?IP~q!$^b5?Nt`yh-yO+VC{vqmh;TPL;C2eEW zw412Jcx%J@iWJ`B{c1P<)}2jS!TJfIx}{1xmxyE9fB;!q>grv!LK>7M(K%l0r*FDQ zn>Km%>eWJ-Zoq9t3YLI$YJ1!&g`TIs$_E z@bbB9RriiQC0>Xz&jm{o5cAp&ju8EJu005~Br3E5yoJ2GG83fX2DWUQ)2(rk)%Ip8 zshOGOHrQ`+FNw#tPhGA~4i64uKnkh^)!(;z1yxmXL=g#zL>A*H+vFFo$qH0!%SuX0 zQc^U)4PKLp=N#g;9z}$Es-=niu;xk0!L z0w7$jKS%&7g^ghnN=iSpu<-EXPC_@O^H_FEEfAhA&dzZl=77VNYA4^u#m=W8I@CUM z(Q)5c&!21I(W@$WX*i?1Vf^fCb4T7B1f8XuM@tJLSlqs7n`2aAR^lrMkoVu+kze9JoxA9z#`*nvDHOrP-Ix=gK z3HvS6&rKr;*4xvgQmz*?M42K}c||IKNhHM^sHI0*Mt?PW;xQp>yJ5~=I%%;cB_M}G z6#j_40Zdgqfqj@W{!tG|&_}(_$OCKr_AHMDy28zZasZjL^oH%|cS;C6;<#keYH}d_ zS#_%JtBNLC_r}m}9qaW0EKaZa;Y}Oe0J}-^(|e4>+l%*Mv2JUj*|TEKk9BF&ZJk`v z=`bPp^e^Z~*-qMdKLXp;-Bt`w0kp=K++~_?qLZV7AOs)cmGSG)zMIkDmq>=0g7e1W zzyCyLOkle_gWRc75FN(L8CI!C@Cn~x=4o9k_mQ=2dOY@e3|SaZz?vEffA=}rz=bJ# z94?H#X7Ae~<(&JNA!v*hlW5VAB~?{dXEaFYxfjO%CuNJT^LxpjqR*heC2-HjzD~|? zVc$`A5${n1@z}|yNZ|1l@c72Or&Ux4ZSX53Lobz4C)M9%eQ}SVzgYfoTubympF$Z73Rhc;);A3|J*~<)%@nH*SPV z!QdhPA+?6TVzQn)eI=G0@X_>>8*5F&}X7TxZ#yV*Wv-z zgYm}LF;m~+%|)qHlRPAxN)5~ZQ#EcLmUgP>D<|Dj)SPiHg{Hv?n1feS4B?A#XH z7q;%7QDZ_cD6j)x{IxF#$GR2HlsL1SMiU{3tXii8qnJQSEt zERjOI+4Fu6i){qVB8hUIeyVrG7w-XsF}sjsCf_5MgQxB|FjEuLC>hxj?sCFDgJA&l zrGyszut4{i{zI-o2idk{Wz}JyQ;F;FKSW}^o9(xzg4`XS3m7yT`1b{+@a5@TZQNGs zeJ|=Pz=Pb~9t#*us-0w43YP6x?S4+38dW41&$Q^|bo~NGNKFB&7nEITM7(MfEyBcr zC0zZYueE5kKPFCoFxYF%)JCV_`AZTc86MMzkcj%KJVUoX`|9_^^Oa{7YVG zP+;JQG^<~xz;`zD4Q{y~R-O9^rsn)dPWJ&yA~ywIim?+s#W2Sa`zH^ENqXg9Vx~dC z$o4Zyahk_#LYe6a*}D17oBy6e@-G>{9Sv+UeJ`}9aOD=8c6wZ)nt5+b{@*k>F*Or^ z!@(>ayPrv8U9+RQ$k%T-|2NPTFwpKPJ)5i_oDttPBVnc4!DSNFt&7deOjv-{_a-h7 z!}aeoGE7pxj0B8u*7;OefXE;G+y;=ab4J0%eO*~5g6JHomnk7(J&DgHe?J(V+>^LK z8`r-9Uy`0(c02`+KJFEjf9l6&F9saVE){V*>9Btj|0?gf*UE1drUKUBYG4&xO#hNS zO$tWxz`Kl|hlLWFF3QF>24;~%5KtlyGM7{z4kM9&g~Za>8JY1RT8*dkD_HruEU-X5itroSlN`cZZ#X_^c-ITseSK&yJ2B0Kb*h zk!<58sDij`Hd5rM0Oh7(?ZbC}6Hc`b;Fn9(&Myh^@blJ9FEG#-L<3*449{-@AG|lR zRN7M}*9KJ8_t%T}fDQ%>N|nR$T31)s#>NJV@dPFo7EgCPAVfjMeh#>ZVy$F_LKS5K zTu_ZN{joCvje%&S?qM!@6eIx@eCpX#s+(J<1sUMQwDJ;BFOZN_8tg3v1^=4k|)&@tA{twnct^eo*M#w3#Qw#cl2F70KiRLny-G z@Q$R|*jN!%)mp1nLf1t{J+1dHrS1teisQg5gn?I3PI*4qfOX0>;x|AZ4~+uAo&*L4 zf~lJ~-+o`|3PtkziNgM!c67ss+4+iVOffgpUgH*97Jk&!F_?XoS{YtPr& zvYxN7n*qYp&(CiHRa;vdhhFWbZ?T$SX!{has_&&1FZbt8KK;C{-p9l=d5N{xtRh-h zV_G$iyN4vGH!Yz!qPOfv;QcgX^3K7kfSN(6(XmpF>K(8&b#cQ8AfAtsZSCw(QBenb zuZ6n(?+ z{(+F;c2Ciqqk4P5K-HK-qfvgT>xnr4$N%07EeN7-onCSCWP$*nn*aHF*lnvqCIy~7kWxBn9y`scK0uNFv7J=o|2bJXD-D`+Q z_UFdnfED?^@{Ak@NH=d?^bdP86$eD)y$+*5$P7#qe6e_24N%Z)a>=DxZS)znA_|^d7kW21pn4aYcmpSk&TKE5{=Lz}OO(_4{uprm9E+KaBszhz@E_>%`p6AF%!K+??lOd@~X@2M zAo6T*L;bUj%&%2BNfr;7D2=7#(&|r%e42vTWB>ua_X?sn9raKHM{rv5!eKR=v%k4G z1RPJp{oOU-QTze|>RoT^Y;()|`o!#4}!aKkG@koN;6jHI}ok!{R!IJ)F z@An+i2sXjg^y&~u9f+L?Q*#_h2-AOISEvWi+2c$*IN!$%O?)BWp9FSM`OIKVDoRQY zz#akXgZ@@rczByW*@chJ%h++?U|CnxUS)q-J=4VK64!{ZAa|FtZ=N6kPL7U{P*Dk) z4dB3fET*N%J=Urv(TF>mMFO z{Lr2`*&IzzOLGO3H(&-xNl6<(ZxZB9Tn4SD_m_XCyKs*bsIS7w0h~?ajJGz&H&+?9Vkn$O z!s4Xu2-3Jpx}rsv zXa=C(F$!WXoOhCc$$@QdOQb!Ml9B+mr|x>GD=r?`t#fpE7|Wn7Xl2Ey7c?P&gzk`! zRZg+`;Nxq{Us3=6-j&a!@69_t*P9TcFAMg|D0icuOu@v=&f+(ndEafxjGst{ z#%I$|lx<8AZqa?Q?|lxvX9p&v{aS^IgL6#y%*aoMf5D`KyPJe^Y+h}Qa4t}v>5=;v zusHw0wWhRrsx6y+6XJWw2tR^|Z~g_)Kls8!dmtmj z^ZpYYKZ5v3km7jrH42iC@ww^ysI5u8Cr}O{Fwm@db7FWnXpE40!IrLa@Y&1`lF*F) zoS-@0o9=0SW_Oe-`tuQl_z?bpyS-V52>K9H<8Srq1MpKC*_V4=c;B7!@nL{iqjYdf{C)ku*r({TLwi_P_+2^nO8$m(wa z=qZqduvoEiaEg`71@p&`!jO!512q8ToXc!ADgve38S_GePLJgJ!+N??xxieWfkqg~ z!)`FChnr(d#Slv%bIaoup_I16{>e?_^@9PQo*cYcddMin>QaccU@v>X1a~)=Wm21` zS5FXLflg;_T^&*gy3tr4THUnwRxxHC=c}%}l@AihrGeI89wu|JuZh0U;9w10%o8n9 z((Z4|k948nD3_k@Zpx1zEx=QcpL)h+VT5n~?#u3;s~eD)Ak4&~5{M_b`ZAmFTylvP zACwpb*qVU-IXXH*Bq}W}Ee3`JxXDZGS>^UAyl&b&tJlj$BhmwQdi-VI`7W=nQ&^?w z-@}E2CaNUj;^O?s6XtIiV$oZjRpv<^R1Rf-s|1AX4#JV3PiU-rBo!k@l^~m9DJUbO zphSvhRwg4E&2JOg9HV6At8jjjT}>&u_DzV~&*MbRYRC9*THS)^db$sp(-b81s7~yJ z2FKI9=4L+n-zncYM7rV&W9%d?;_DP`GvPZ+BxWOhpl&X6A^g8Jvl<-8x1hq96AZ?|qABn36L~s!`-=>&VQ<7qn<7 z8?AmlR00j9mPRL;(P{wq0noAR{py{Hrh%_=`l*z=w`1)yo^}4nVEr@-`Dmg#7yOKV zl|x(<$Q>29=hew34xK7!hPJu^kd2hwS}^QLN%!-`g3cXN@5pri$mU1?4YQeVd~uA$ z{`d$7u_#bV6(|e9I_H&?46QB1u&B^?3#^n^ot*HW^=X9Wgy~LEhAN%Ty`TRfQKY@N z#P$yQv|A?aX*yG}#D{=rTtnD+hH>66VCHC%iApN{eIvT@?+4a>y5+3_eOAa_MSfGg z5x`u)&lV|{d+RF0eAS94w@+SpD&wp0!XqQ~{G z<;rFQ?y3T`X8`NQ65sC`f<>*sFDQ6KfO;`H68mO0Fv%&bP>T>)!8`oAhL}4U586V- z7kHRGq1%n+YELA9$GZ3cqu7Dp0iAplgK}|ZiCMlKwR$lxYM!YvMt4-Abc;Us?Fn@1 z12(m4vu~=Za}Nj@R1Rf93kwTWky|sB#@u>OnsPgIq?vD3h&I3K%_o zF)|LF+*C|lTwHSU{`z{S7!}$}fv(9?@i|J?>WUGUv3d0|!YTfmydtv4&=?k2*LGiY zu_zi2*BkcJzlKjXH#ft{B#>Ud4D|Q^$3}pd5=qT67Mx^PH7@swcz1z?FwaZqWB zfmG{5=sE?OVW8v#pd}U!10R33!QmKKqe#6uNi)y!`@1px!~k!ntd`KgYXP*)7p4?!g2D^30_}Xy-ShFlS zbx*K+gXB~1a7>13*z$@2^O7^l;TTs@r|M8yWwpR}N5)2nYrhuoI^IFV&g+SGWI(s% zk{A_*jfpu1PylG319qD4@*4afs7-(@BD?I}iQ_G3oL2+bEGKtTk@`db9Z&8nf-!x{ zDyfjXx>uv*dleDb|BXO!ilX(QzhXK&apef|IB}=}=-q%V;*4v*Aw2n>U%%x!D#6au z(jm})++HjPv)XkH4494QjRW6((DV=xxB@kS{@o*YLiP5K`LeYYF#K{oezT;JwIu0fsr{VqF4s8|ivVdKxm%*u z;Sa_`MoJ1qM4&Nl0wQei<_W~dy{e-?t#|ffRuPG1UTEu;Syf4uWYS}3iVa@?^eut} zNr*^+cIkim5KpvF;f!GeD=Vv{6^VEjW6%`Ko6-c9e&`i85in@ApY7JP${{pN`!~sH zOvh?!!zs*m=&jD_cwIuyL8`cq2>LJqI)z~0Tm&i&^QOzJ&mPag(1|=FqN4Ql_1R3O zN(7sX_4M?nW!)acVX?8w(?H|4+V1yy(fK&&^a~+jGU&^U3wv|=>ncZKtz8PeBvS8y zkKSw5+rvjX71R}Z`Q++tgP6;Z9S(-V90glQ_Wy!HR)+5vx5TKNty{$Zf()oTJ_QMW zK~%1@$rUCg_S^vyA+1tRAD>BK#`0;^v-il}n&M=#x=t!IQ9z*za+)$94IN$4zrcKw zkFnzNZ;NIoYKaVUQ==0}%zfmzg+#BtRB)w-FSWz4hkxqio3;`-2zisxM_gI1e8A`~ z$J7Mi7hp>}(B}Zk6WfiU-*wVdk$?#r86U3{40CXBm~V8Nyt)E~B9OsF?uv|_fUfcS zK;l;tz!ZSDWQ-MWfz<6PDi(|g2uV-nHV4^^|T(7^!d~?43obN_bue>%Vr>nc$4{ZdP9+X!n+mpd)WuVs#aQICd8^Xdx zPR``dO%C!J^?C(vo;+WB$$IO?!>eb0>Lu?%4W9&1qqwBxKzDcPKqy0eKj=!$mg`Gt zX#CYrc2VdUDr`92o+Jc~oSo@%ptJ$F3l!l%YBt^Mb`L~Q6)LnuD}a*;3<|Qfviirq zx1Q|A>w06~eM)le+r%`f!^Hl}l=x3P{)quC1 zot=84!0tKL+q(^NfHT5Vr^+;*a*Eh~M!C77E#{JoIdv1wBD7Sn3UNWiCnqPMp-fDq z0~CFj0z5zsII*izs6c~#9QSHRi3x>;!#Bw(@1Ln)1+36_FORQ6J_ICODFC!VEp&2w zzL*RuBl2YDk;1QRfHQ)1MN%u;a3%tfw*TU{s|#-T+Zt1jgl~sCbyt^~0-bk%{ZD^_ z0^6PnY;6GL0_o-qkbv6QoW!vh7vKZ&+3oGEhX(^K0fCHfSGMU!tXt_$x_lQqTS#Rf zooGAb90W%IJE<3}6zGA*#>Rl42vAlkY)$72PC)ckcy{M(!JPisK>x+%d58b8(D=OI z#{HZL1KvYt5^?XOaJL;tex={cL26@g^Ei6jH!$&I&M-QHUE1_)ndV}@JEkw?8BCu{7asDo7 zrS#ewR7L(lk-Dtr3*35rF@WR*OytGY)!O>{KBxmhRRx$YARk!bBc5u$ER2mhT=V-8 zRfR&B#2w9QEI>Whv3?ik(}5HN5SkG%({P#qkR(b93sX3KYQH#Kb^?83a9=R_DB7<_ z@n~C6Pz;T4*5X9~4<}sijpVd=s<=$Q$Qp z9-M%vG$qn;nIS3HTPf749r-tszy8oXgVWBI3+~U?D{7|y*7%6~5|D+_a`1DwXF;Iu z0#`S7L^6e_^cD!J>wnJzz{}s^3rvA(15_?Rky&RFppc))sOv9B6%^FLL6;*Z_r%B@ zmFNBhybY#lT|8qkz@JEwU~_$#+Tp(r;!p}sPN46dco2k*WCm1M+c`Vm9If^i8izcV zTN`EV8&s?c( z_m?SVmXTqoB(a-*!89!CJM1YdtuI*ji!?x6ib@|O00ctkA)s@8h75EjCGMbLoopNl zQ;Ys%TJxuI!|gsuUL0f}M!e~5! znVc=25qoVk^$1clGStF~F8~Bbwwog#?$SRsqHlk;Jxs&?z?Z<%W}ph}d@5vz7=r}0 zkO6LJT0glEvwg^^$;4l{UaAC2Xuk=&peF!xTVy z!z`RB!(uukM->_6@*wI34DlKR!I(UO6(f0q15fB}`rz#%13;dB<^gH1o#ftKA;$wZ z;s$F3stFLXQwv{pwB4Y1J?|mD49i(_t0yk|$@cJb+}QKB?7TA<#D2Nk%7-#U+9cWe z=*2xp${qT!j>F62;x>~(vWh?=Pc%O%wB@mu5G#K4Y2^(Y2s)R);)c1q6&aJ{**@7S zFGt0BFaH1~ukn-mpF(GCg6OU%!$QDCib^bj^a7+mny-hk){y))vG-kb^YnJzbw4?> znHYN!Gp+Iz$S%5ITYZ2y7jbD|ZxI&};jrDpq|Z&cGf2x=K-ASEbo)q=K>_N}(wA(| z6u|v4NlAPA`|6tMDurf1oh2yv9K2c$NN{03Rfi z3q%;jCDO%{>nkf=t5$#yY9vnW_CM?E(>9?LLnHzKlZQ>%O(T5>fIGxeWzTPQHeiD{ z1VYWrAVQauP$imQett`!o}_M!gMXIP=~z4m=uPW7N>!3!eGZ!y3xY}=uG<0P*3!~? z{NxEJMt}r91gI(jrwkV-j`(T7qA$RY%nXn}5uxoQ1Z?v?n z8yGlNu$7dKheBZW0)mw_%7`l|O#x*L%JVNkX)Ky)WoNg}fjuT>-4^>-V$DN2icSr) z&?-vkI~`|S$|jTAhAQ-TD6cM6Efs!oeVzU+HLFWt<@lj6oajS``GWj%JNzzcL@U)A z5Jhc0j4k<^t`*f`1(K_XM8s_LhUFz`+HQ#Nkp4SMb8#l{l4+kg{+qb3ZAax3eA)u# z4LJ0-2JYEX9W^i}_T%I_TM6dd&IKPU5jl+g0`qwcb$Ss$edLVUabwwQbyMV}gy40} z`heHbko?h}reKE)m~KpBC=bANXjNVd%7yvsnmqc1O94asGo@D{D&ABNMmhG)XY2$E&4*5YXr74J?HJAkyzH2OS{NCg@64+ip^{GZq zM1u!q6< z3SvjYB$|3=$8q^S%wd^cVSZU){>~m$rb;#_(8Ukpr%s0|9}{I^RX9%d&6TYdR~iG= z`{gs-sb3y_y7d81gQh4jNa3<-?w>Z7KkH#xwEJ|4YF0}bz~57nzM%hnx49u=dy=xi z^LtB4aMY<_yN2TuUjfrZ&6FYD^Vf6e6x$zdr+4Igp9Ih0T?exbeSM^1X3{N6HhoRK zi8tl8_s}qC6T{Gdx*W=c?DE9olXTEu$lN!!vMi9s{=Gb#4oS6Zb``o`S4ah_I37|A zPSAlHz+i0R;A!GWR2RhO8r2@-$k{;-4|;% zFsb3D=$oQIHP@eq^(6{K-~~H&LkTe&JSngVWYPLXv;M2g-y=sjNeUX-}I0 zPX=O$n+KCvKGoty>&&9*mfz{ zDkvY$UHlc1hGjp4uH~pnTHD*(K}+oF;-YKyC?+Nb5Up}kO8^mq-eIp>ivjb5;7g0A z5la8Kb>ez`V;%TtM)vOLm)K#x#r|=4qJ((wIp>lO6=nC6bX?pR25$m*!h#|j9;KwI z;GqMQZv%AwpynbYS^;fPpjN%wDIcWTS>D{NwLknfbFuf=9mHn+;2r1vI{jXeeXs4@ z+tB!E+xq0VvR~-;1J&*7>uS8M*sy_2j z{$xHkaC`$)0F|MS-lC*`H#|6U7SM^VS8vi3=7cYm6VZ)zO80qbrX@Dwf{3(=IlVr; z!<7!HfHe4wT=@+cV?mAihS@;lc4&T{hnhOSl~**g47?9<=mt<56FMLK28%FLtj-s| z|HpC;sM8Z7B6My)5)*5Is)r@1_7x=^ogS{>2Ig6S*{JL|Man@n{porzB|?+9$ST(N zgYYmrl@Rbf1~aneWI06BUK3vYGl$DZ*IJn;(;kY0^s52B)fpPyt;77SKcsjqAVd>4 zR9J^KzL4-ukYKF1f@U)41QWVm{J!EOC0zlKy0`$5(jN&0|q*diq>BmF3uN zwDjSPzd|W0GWQxCJCSwo7Pp&{R=L+qOU;qv>ji4U5$zcH;#4W#a9a65Y=~B{&Fr=Cf6W^7#s^i&X!2WT5)bmkH_rw+2g-ELA%3soFd8%b<%*hLOS?w1?tX|=z0{~akc&P04c)X~ua zj(UWNW`e^~XaOCb;BOd^5A5&Lhj#}GE`x6L-dyeS(h@oz9%ZO7l@lSR=CfbN`O4)$ znRGNkA9_(~gU9O;Cf8u_jb#FMwpe8%Ox>tLDoRgW?N3uA^DjNkIU=Ik|00h<9qUk9 znwP4la6nzEROlk{p>)5Tp=yo$NnFrk>B?gKR#A&$>Y|ci*?;zeJs2T)nORgEN%K;iw>#m1EHPT zF)h+Vj9%MQcK+e0>(19r;HatXZ-I%b$sK=UaS@@eRnqi`f zWTo_1#Vg{%w$9n-VluC4vzChtXft=()EX8;!Pvey%VT;(nm;J$p8AkjomwOx#(p29 zw~Lmv!@KmRsi8$(Jh^*c_PN8Vm-@q|qgdU7)fcIe)2PPSs6gVqtc5T|(Kpl-gb%ip zAXN1@kwz!{@S)SK5>6P)k;a7nD+HGAg%+kRib&)o*sAP1pu_OhcY$C0EkjIqMU+jy zq*9xk%-;}$hwZUKS!uzIFWRSr6D0xW*Z%XVWh90oCDxKc>3cEdFlil;$#9#-)vTnq zjoO!|Z}e~BBt*PD*Nszh0uEHZ5kyqznHaC}zA$5?p-Icc)o84}8e=Z>D|tH{PfPX- zGuAhhq(e`pHB9CgZXfn*p%b!J72gSA8c$5Q5&dAd)X)zCdaZDN5+Y2Z0Sy0;>*f|o z445oGZ5BVKw7;JxnH75OVP}3S;%`flcrS;BYl+|9SPw=@JV=_#&mHn32xH8M^)|{P z3QpGTr)%Q#6`u?@<`nz)O{OP|m?@hnR3^$GD5J}SQdi_hs4!j{8anxl51D?V;&NKV znSSA(VFA8@M07IXcNo7RDSr=IWlRo#L>C?P`G!7ut+pE8swB9Lg{VUjZ9V_Kp;XhAU)0{mAgpRs~(ZKeHv zS4x>wBr)<@16AUPCC@m0y1GJ%N83=vlw?%R?6uylMo==o)~-b@e3HwkvfLIbF+ID6 zPrLFte0okvL1mRK3d^tDGp*~^A7hyR+oj;Prg=e5NoZE@t_Z#uICon)n2;HK8&E*- z8T~PuK>Qv?a>^WGbc7nmHscYBN-jF5bbkoLM5ww~vxd|yf`l>g2fZ}5uP0<$raf8{ zV9q~fWLhyiVhtpsZe+srhy@vw+{#nuRUXnRL?BH?(yn7Yq_Mk< zG^R!DxT6<&MD98SSj73gdoFe9d&CnF=3cSic#m>IE-1#7g9FuP|2TNo-=-0b4LKq`J=&nyV_=N z`&aAkZyndm4e8Ps!YU1DZHT$j&|e3|)bNd@+}yfUEc7dAQt0XKF8g1{`^M(i3_E7# ziC?M}e1vo<($M*CHlOktBXEC0e>^9|KrFFeDsw;7kjxwHV`t;Iq>kxv|7pj|lPs(= z>%L}eeblyvkjC!O0ivOXXXk8wU#;C9ty`}%z3c`%1m6a%3Dz)G_H$WYnW}vk^Nt;G z2p-n?$bH6(i4UrRM=(|uxAQUYcXoV;9;j(xm)-aT!M}baewq_8HQIR_v>F(`otRl1 zXP$-h4goTHCg=&tr?z9P6OCs42)%4}(c~SIBbZab9+4a?DSbfNe^L-1<&mQXF|epm z_KVQ-y6r6V*ij0SqWDMnR76(!{^^r(uX}Zm@`M{*lqU4Kpy(44aKf5Ncai4ux@K?C z)Pc{H?ge6`Per)bpRpYe8pxfE0_L7gc6Ng0bd^^h^w+;4(~z>mjgT5B<{pFq8HwOK zL!?~ab%Y2yKovkd*fGcyBp4PU81}PJ-3%HFYYtw&J+}_X<)0u{!N#RD#*+Y_N1*@I z-3ScT=&^*@?U#%@A|UaOwJsMOAe$|tDog#-5j7SnJgj$-tx8a9MPy`U+ZTx0x zc>^9mhNK+$A=9wC!!3Epg~3B1N0*49EabaPvql2@vl5^~b)SKW=#+)vDht2FW1Ww2 zs9f1!_WIhyu)Y{s85rf$F8sA3OBX4X=`rMw=8nKjduT>?XjZhz6V+e;)#^}gxVzDT zkN!2=s}P^6aGxq12Ycjq=q;A1h>z?$YU-%L30&=K6>-7k)8>FdLh#uG-$Y46y(%Jo z{s=!-S^C2dB`xuBK$_gH+FDCR_i_7r!tp-6UuL>tS_WRC8-dWPM*@iN8+_QvwvfeS zq_FQ@8Z>&@h}nv-*~lE(a5&$8a`7g9T0&CMBQl&-pC)0q?Kc`GrhnwB3R5a)rVu3< zQFt>Jhp}Y239-BsK-38CkxYo1kGbgbHzV4`C1(TE8hcFvgPato;?Q-VEa3naw?Hr;t<-3&Z)H7d@kPMhmD=~mbdSHcbPVh!Vl&33wje4?qjqi?95}!MHp_XnsAo%EVM?|GnTcv55|( z36^AwL*=k5WVki1|C~_*&oFDp#^oQ~a{sC)QvkQ~hlPI9g?@pgCavPE8j&YJ|BlvD z3J>h!)S;Pe#a@fs&xkKGi&Ev)o~izw5Kn7$jOoON<8 zLDDQRJf8XE$d%@F$Z$-kg4Jdq9#-MD12x<`SRKat79GJA_JikDc}$n`V^udOXO&3JMh=yvz8UbTbEDI z6@-ap_t#FFn6WAr=&SO8P9CwMRMwB^u5sCwHA)Tmy(<-BH;>c8`UG_>PAglm}K9;fA4>*G{k9)K`d`(F#lM3OZfR(F-d|C8&G7 z0ptQiG>jC=W!=a5A#(_7yp7e2wu9+n;{fu@vsS)QP`|J79c+_t5sIDL|@(l^RgcbeK63UjzonV&+8X01i%GT zex+0VsU=#TgNqU*CTTPB)?CGy4)imsaTpK$KV6)8I8^Qb$EV^kJ!DJvoh&iPo-hav zLP}$4$~;12eF}-5ELpQ}VeBO9G)0z3o)i&dk3lKx6b6x$Exw=m{l0&F=MQJjxaOL3 z?(;dHb6@xS^?vudvHB^>NR`~YmWOjsRG6LXAWZNxwx*sq0a-i8q#S>37@Ueixtd3B zs;aQOwXq6A&pzl{ge=U5xnl&6`D>Y>GL)ZsRY_&&k+uc0u#zekDWi~G<- zp`;~iZjptJa?pDY@m=o({Ar#;f%HFjex;8*N*CHkv+w%BZ-m9j)y$214CUpzo)jQ! zbTtO4x>{}(Y1|wd@qx2I6xeELI5_t3&Q-rS*sv-}QoytNLTQbK3uCkTdB1SR0n)AN zalNDI_3v=oW|h7p{PnDG9%e4P``UO|U_(?0Jg^0~ByUjQ$6ZA`XFRK)wzvLeBE@69 zC+K#Q5Fh!e!xjB$d&lCkV;+u`Wyb{J-6g9!BTEGh+MZpnoQAz$c?aW2HVr5|bv-eG zo3;(m25LXx`ih?O(6u89)aIL(EwU@Hhw-8yR{ z+uM7lO>s8OQ<~kt&NCsZS*bFl04@40bI;S6mt;u2YM;$(|84Mb*enR|vB+4o7n+4!0tA5fw2{3|5eLQqWhx9Awi_4BW z?p%iB_^@3T*9ZxII^MaoK}sb*C+fIbgmxz>x`rdoOFP2FCgNl~+b~x{*u4bKXGad( zel^H$^Ab?4cqn3Q(*4TkfsMJl#kXLza6lV{sa6YRcc*ZY#jj7V^ZP{lVw>Kt&wjev zE0XL^805sM6-lG{lXkqw!^K1?&b|$QKCTr};d?2w9halm_s|9~s0i%rDl_iiFom_N zH}Z(jyXkiy^qP-9e$X3Uetqz9#xsZc+?;?}CJlWZEx-7GauVl$@Mw4*FE7{D-J3XK zRe)$(`zqrebTCPX!}_Cd;9d4)>;4JYCJ7bpKTWghOPtM&IP1C;=643Zf6;k8~mBj)82HvDE?!w zU;fRNxmLAsVH;QvUZF%OHV)j0ZAm zh3$#cr%xA-eFgjy$RB_x!Gqp+A2OZ7DB_XMPuf5Bjuw6s`?5M}l;>+gEGz_<@gkl6 z6xa0dyqu_R;MDZ=$yaACUVP6`G6x0>(&m8D-~IiY=G&wO@D>7*`DIh#O;=Z!MV3eYyf?pv2lWv0ghvk5>61)_DP+99c zwJ1HQZ=1$rlau{4U&t%Cp&=nrTwZ?NP%P0jJtd{un5CM(^uiYKktao%g_b9pj{aMb zfU&Ruxq$t>O?4_x>D%(Mt4v2+ zwRL)zv0Zst*`o^!;rg(q_i%?lu_H8~h>K}$rTN>JFJES=Fw%Es{y{Anax$?UltPBD zuC9iqI)$iwKlfMvyVD^a>MAM&1ofn(q?tt^hYlS&#Pu^$ob~kIDK25w>GB}3w1L&`?E;cbyplfDfGwl4gBqKS^B0eF3C$``jTjE7-2?zv{=Z@v2 z+(1gyjwf(({~2Y)$Xf%UTUN#hwo~IQJA8~2*1$xOmGBS`OhG)hkvIc+Lyl-~7#NaTmLIS~;+o3K}| zkZn(7C%)fU0-3mE&|Wd~`R6WGVrIUC#lrHN zzrX*~l?YflUI$>Kti1dyjRrSRFsOs)&MpSRfuzB59Uvbu3v%|;;7quy9}1ZIItP_X zH7^~?$q{6a0Ycy+4#yTdFul0AxOq1OAd>J|?pQ{UND^UVHE{#oSsazNH@U)!KKt6Ce;y9u+&Q*j zFrGm_J^C&GGnLx$bsa`0JATjsPae$pi$IW*mv^UA$&ibLLZN({UcPuCh^{}7Hw~Br z*vjckON=h?>eW6lybw#_RXYx*Fl0Em-v|UkEL0ZgqFJ4@QE(=* zrQpJcAcJGv+^X8zo9q2(MlgjX)^$MPiksrF5JiEnU;XUQ%nMfKx;CaJsAsz zdli20!xe_^1${Z_?*K?cV6CR2y84f|d`@uaOY`#s3;sZ!sAl(2DDh2*E9swnoL05( z*xTD17$B6uA5-%5>EEELuPgIjIV{cSG>`xI1yeFawcvmEVmaYM(*Hf6TRl?#_g?{9 zz%}(1qWk@M5V~ik^o$T=G%(=p)1SWj@bP0dJ5$(oW*khipmEtZ@*<*-u(1*99c%1` z`S`R#FFQuO7U%gBa<-bAn>X$RpF7vHn?GGWI5-GVAgf!ukoJ*^sVf?BQT+sq`~d-) z;^JXf&!lNW0tF0SSXdZPUTrVx!$#x~h%-O#v|Ok(4*llY2$ujz!$GpHs4^PuB$Ix# z38$1rnO(EW*D-ni!K-0n0v9e0J}V!g-Mlod%cV0} ztTU8LLOP2=#?qhVYasdsiGs{w0z4+rk_hTKR4TT1TvtaYhe+H6OH|L~fnB)blR);g zw?{%`O;^{cJ3k11eri4^;H9jLhV+8Dxw)}1B2KBlukYWG-#R`hu;4&SK zp?2^c?d?p4oRB(K5UK*HP5u4JPoOhzks+I}f}&|9r$L-e%v$7g0`DG!#{)C{_)PCt8%C%C(tVK4?r2s?wM zD0oVwN#CRcnFqxMh;1~dBSfeL1_qj#MD@D)t(-zkaf@>uY)yc`pi^2}zusG;gTJ-> z%E`_Bd!liTmgVH;;sWymEq}3l21V!P;Pc!Zi0OE{wB%)Ic)$l#qLr1E&-I>Jj^rmK zQaXhOI^z6bN#jOszdoH#|7T;wj=s3a$P?`D?gn?1%8fL{Vgm E0OZ_J1poj5 literal 23719 zcmZU*1yq$?)HQnO?(PPq8$`Mr=@8+NA|PGThn5CKP>=@cPHB)vQc94Pke2R$AK&l) z?!DtOhQk4Jo@d9}Yt1$1+`fILsf2?`i3x!~a8#7#bs!K#BM1b+4ILHy3-}*|BB?L0zz;<|6pTD{oozk5E!=D%uPr=W9GyKJ zUs=+7*|@pCa&{8n=I7?;q_^|%a1rC-`G5YM+u7|U&j-5Lw-5+DL`7ak&nIK=*Bc}K z>2A~`@BILB>kld!_%G$4l;)&h+NThyEA+C7aAvuw3)a` z^bm}VjaMfN)D%8&!XKEsg-=J9aC+)dW?YLJM=8YbNv$(bpq9!D?Sy>m@2`bYipJUuXB|x|VYYe^ODI^Y>;sS7*vVdFeXH=V&lp z`DrtnFOA_B7Wmk7bFgNPMI|;frx@%yBo6B6!r6&ZyIXAom@7WVG&M6N5x5ijwk`eO zdm9y16k9xwQff$p!Peupb%o|Q1HBKmcOe(0;I`V7)2r!Px6wVaJ4UwdbrX06A45Cl ziz-NGzsibr8m3h%ORKkRWa7TbqWfvs@W2xnN@1k0r^MnWyr>DOL9rQ;-s&`|U?qow zG0nlJWJ%MA9<56oQZ(wG9bKt}{x&?Ta{PgBnX65Xw4QG6&2RYmoF8T-=XXV)fKa^nl@p! zIbdNw$yZ4DWgR6WQ~o@%vs3!=>Z)sYmh7q5-hf|RWF%Ul=I7{@6=+jSi`Dl}LPkw4 z44*`th)75iO|DjZ3yqcIw`^LO@^*HQ5D^hq4i6LJs6-@Z zJ6>zEpQ;`xV4Nh5ck8cw?=kUs(2o(J`g8Oq5&Jm@)tomCa=O8RvnNn77~do4PH5#~CAq&^A03aNLP&DF}1 zG-~y!?wa+#JDUZ`tn;J5OYF>(2_-s)r%$Q2MnB6bDx#Xy+oSOC@U-4v?>4wREK{nQKF!os4- zbzQL?8|)Lgq`zo`^KyKNekr4*B&~ftCD-$Yu=MojNe`K0At6ug$ zQ%{&lxtzTlH}4j#H{E#BpFkH0b{Q-ZB4m5HLrO809CmYY_)vPWRQnQ-l-m?tB^mnq zb=<6Z5RKpYp3LD=Ta5n;Wo2~9`%5d+jzF56oE*F95)#M?h!6M!4wl-mQdGk)FMV$B zZw`wLD`_DsDTZbUkjtAJCMG7Nfa_BPNYS+YV(S}$!`<}s^!55#Gb9Ky3QE`a?>H~V zG8O9Wr~i(=jg9^GJzZp?Socfo*v^@+X|r26ikgZndh5<(d>xIIGbEF`E8`dpNndqC zHQrQ|k;TPTaUb>?r(XIu3yo3cVicrj>D}!M(6km2K~H~1M*iy6IObN<(v3qWy?Wp4 zC%$d%IuIGnZ6GsU z`WQ2kMx?xSPO~OS7pK}@ZZT^^KVn*vE-FDl@KIHtL%WRM8k@_ct}~HA&ShtkMG)k8 z5P=f*KZ;;K>mAm?Zus0@@?~UXRQaC$X`P*$pJ$MeprMRIWlzC0Gc!YRB5S`~i@DrP zcg6?7`#Rc99SjHE?m9!eKaGWg+*YSQkymVdh)D$~(x$68o(K?%XFKhrdoV zlBIilj%6=eNGsdUDK#@hVQ6@?o&Ei23A7SJPuWC8$!9BG^t1=u;8Oy0A?m(~-P+m; zb#l5lJ$z11TK1A&4e{`5Xxm7xJNS;WNaPi7>BlfW4Q6FoX>^!s9FoQSBghWh#oFMg zzK^KGqq6575ij}!6Zfe#?hNS}b1}gI2c@G@@!|e1XWD*sWhEGgN;n%#l^mXKKx$cC zg$cj@xzW})F+p&5bD;$G3m(V1Uo=TBzv>pMcRf99edoPf?R~hkJzYAhd*XlGO9syu zBj3L>4D)Y(Pe+5mvzhPdHtf%IX`Vt1sf)LFP(?@OyLUtt<{j}M7v>ihTK@X#S=G=m zYFNc?SP}brp<(rx_j2!CwN=u)cSzpe-i}MHSItXP_&Gd1eqHiPN^<1}OziBKpd3fH zdlxDu;A!q0UA>|CsZU}wVW7yh5QHsf`;y^oKv;bKL?U|-rqSXu7ZD*>Q)eLPprWR% z9Gu5AY^l)a5DYOxE4-zUN9pbBW2C2t06d`>khK=Ld^ND(UZaOGf|Xod852vTb$T>k z)KyaY)!&Z{(;aIr;#_umaM)t%>r3ha$RHYgm8s1rgPZH=a(|7OZEBJOC+P@#ttMYpVvA^9)+h%Q+x73Ub!(o%&=|I`q zOe~z=|H}tZsPADYrO0O`OsldyB)1tTU-u^0%6m-mH;FklZJiEWX!%LF^Q0oHs^o7c z2csVT23Rk*TZa6AW0y(rQX_p1*glPo|Fll`pIx!byZo-|$na&wai-hYQol+OmxPEZWWaMYg#Eb1LrtFIkl^DjNnf97@- zeCysa!BC0tgrYTRT$7%^Kx35n1EKX7Cm(!jD3|)N(8Ud)nQIZFs#tXv4E6I=*RmGO z9m>w2cc@rAb~ysP#S-4HBA1H9Y@U=o_mmr2!tpsehUU}$L~LVKT0I35p3z7b#+jcV zYZn{``Ek4>=)v;y?(S|(Y;5eyz=XfCC~6G$WUy^ocI=b)yqiV1G+uVDY|}^YLX%Ut zsLf~*8}ZshZPulkUzY!bpL04uWy3D%h7C| z0@bmU%GNc5CzvJqghu3?dC$3EFtmER8tqV?=R-dA6^}gLsY9OWd%@OAAHrX^WGXMU zBVv?Fh0ouk&v{cJ4o8?iNsB6~n_`s`9?CG8A0?=1(@olHJ)3zBal2T4$BZ#uqA%Cx zd**WXM}M)|9gU2~>{~DblFQX!8|#-Z0WhXn4!EE(eeuF|r4u2RPKx%6=I5d3EO+~j z%YEnj3rQInvEYE_fGiAg+amgspWoTnhYIiyxu6}s*Z$9NP|~ah5*gCO-|z=7#^s^m zP=?jj30_`au7HCM|LIq+eUr5Z$-=%68^rxmHBmZl-Zc@f>%&>g;X_38`uD4L#y*3H zklGSjmq2&Q8YeU9zYT}@57W#>f=A&2jSRe%#^%jO^75zZI!xa5HipNFA(`Q?`|qm` zPfe1x;E==UtH)zd{VlAlLSaZ~YXeD4q8{7$j*gBH2o@=4aBZys1bV(VpT=i}O3Gtq zy5b-acuNlIzkuyXCr`(1$V8sf7D-uVrG8(La{VY zNs&;orS|}0c4VmH)(bzV_qZH$Zq74au1}h%ww>^P&GpBp$L5E) zwV7zdMpi15)6&zTjakz3&&$C>1)pSh-bNKQIfgvVOZI;DHt5Ct!PPlzZ|aMdSu5ElXmwoZ#p7Dp2IpIUyet%Znr{()nIs`~og za4fR%N^>blFo2c`31jYeG((7Bgnk7TVFpf!p?LPklRPG|N0U~X*~lCzs}r0I$m~L| z5Y#F+m8F`gw7e?1LXG7trb;;WTJS;>ZBQOi!C@etU)m{d{QLg2ffn{C= zZ!Uk{XxSG_5#ByJImv%GR>KJzHA?@4pv=+Qevj;p!ctURCd3`8o9Y-Bt;GC(0DBG5LraBzs(*w~OwU}0tL z9v#JvkB=W19qj{6THJ@8r6pQWV2wHgB_J!OJ5!=T_pK94V?RKW!eBsF6#U@&^$X_t zCL&jZ9UoW8QLhSC>&zme*ZxhXSQA#}ZOtdF_k;x6nXc=741$8h?d|PRF)_p5WuS1u z!IZd!#6-1~(&hCvrO+?{iM&Dg{s2vJ8rL9$q!8ZL)~3BYr5DzYeaKHuLxWRq5?568 z=<+5ti|vn_hp)ezNo0c7?#dx&)z{Cl5x81(I@1LOD_tWD@Mb6;X-UYWJuD1aR7`TY zEr^}Y2M51%H@o#7o4*j879(S08FUzEGU^;=aSba>WquE(Xwa0;Mc)K$p&LB5X2np~ z9ggxhR&;Bi4U~FSwUJAzwcDLs&$ZWrD|=nsHt^L|CZ`T0?B0_F=MH!iAt@=jz0k$5|NRBAZb#}V>`I^<=7fX zEusK)FX_Jhon*;m1Tc9IfH^LkBNQI{KS{v19H*_TJ&{&(RTfRYXY8!%X~7I~5syf! z^=fo5IGrA^r>0tGsEzmd<)KO~@`@vvDHF0^P{K0MI+#GoMtMg_KrmrWY+Tp{0&dyT zT1H03s0u0rJ6T&>!>6H%GY`5Wf#B29#wRf;F$xQlf&_)yaSwvl!r3_?Q^Jo?T)g$$ zNh<31GMHRW=jGIg?H0VKrVnqm0t8l}W&?x5qtC7)8Q=7)TF*LRTIkjgp%W1ish#d> z&-eTT1QD4ddC?3{o={+51WDV1yaF!{R12|8ItBxZh%TYYoxoCvUm-9MSBG*9@|;@#8Ll{4-RY6-lb2hbJ*5q4Tc$HPlt zbjCBVPR1wS=VB^}#7xnXJ~F`JX1RY+6UMxy>C+K}^(tvWBkesPE-baJac97z_?#9+ z;Q*O|fdOQ3EG%lNKx)W%v-``DG(IHIhr@F|DlT;tSQdDi;4rMvW3>FZsc*c}=)^8K zm)+I>rQV3@`l4}qv5lcD)J*p8c}mg70&30c)!zUUWsU(}N1X;nthadB$cR#^7)Plv zTJRH`|D`M{VaKREr37|@QqyWZjMgoV)z8W_h1a_$<+o0tSKuF|mHFG?ygKKcfuT!C zkU}>)I$Bjz(+!HMI!!pSt&I)J%a>b>V|;_UVb;`gl&WcO6eG1X9je(k4xP&#t?*j7 zk&;f=$*P~nrVQH!2+;h2AHP0BfG_SPYEaSL8-&< z-91DGGBio80W5XJDy*u?pcdR$LsR5>#95k*NU3`fAvYDF5Xj&{yRH~10fBaSSBO5Y zz7B@E9lOP}kc88n6|q`_Yj8j+Oj+ulqsH>ejt>M2UllY(w}(o_r2*MC^RZ~O4b38k ztK^OC82<yO2kR9G;Z9(1h^_l(}(Al8X#j|G14;3Z%d}o*v7C*o}HuYL~TxnhbOCd z=lc0HTVdXCnz1)Tzbp3q1C$#f>obRc*ide~_fnYm3zO*X9XRKQ2A)t@6W(d(jA7op zji0LRLX36k-rZsly0Opk9!Cj;ie?p5UmCE8Ma<9&m^SBp1OJ{_TYO#K)5L(SPyand z5r8LC7ocSnAk&yJoI0Ikl5m?RAXIGUNhin&* zGT^(hXVJw7qM1Pnjg!-ocsMr`kG)tfv339XwtxX05y7vzE%pt{UWc1~Z!IHY)mSpc8Z zFQclWXS5B7NL1+lUYd~}8eqtKJrS~5DKNH`3AZ%R@khX37rHcWk7 z?yCK9JOEAUx0blV|0N7FP9y7Qub;wZcdXAMSGywP zHg51!u5Zd|bgD;pIp~vy;D7R7n-fxnM}-?;D+b`J0n8e{YPvc9upB^mbM+3XlW&TM zOPqPeJ1RQ&EMqN;+2O}|+;(FzL7$lpOzf4eT~|$A-M2Qs3;N+2@Z0aNJGJ^ku~%A3 zwdi_lUe6M-e<}HQ;z&MpJr9%cI|uIwOExYdcG#vLZ;bCO6A-h06(cF)+16!l*gQ=B)04}Ffp~z;=XmP`L&KVJR&aV8OkC9W)Nbj4 zm6>@3aAx}aY$^P@ch-xl2s5S}OJat&7CqToTD|YgXDOagfWo{(!D$3kht4lqfzjF7 z+5TC0*x27jJ_=L^+<4_h=1mUwt7Jzi9X$F_NSppXqt%-dZpEAeOb5wQ7SkG)ts;k#sthY6?FoLqee_YGCH8 z-Qg%RUnlMB;e6l^`}&Csf=%%@srP`P z9k0dk9DOB3^C2Y40_G;0&_&4^wQ#&;2i>eWKp${D}wM<0d;alCk1L)cnodrGcicuRG3{~N_)59HzQZbZPI z%2--jzHOf`(#``!D*^*Dw`mjfNE{Cj&&Sud8+3M{ZM|HLq*E0{ODVJECKQ_{zTBn& znaF$_XTA$XMJk{d17In=1M*gdZ9rTB-kmU@)~wBU6*NP=KO+ESL3}xy(FJhx(GW-F zdplhAH+0u(P4AKA7un5>hdv+Olh#zoGp#Q=D{6uQmCqUaZjp@JGztL;wR3u!1RQkP z0@a5=ez$bt*Y{_$k4FAKP1fS0Lzy5BN9)>lO!RUcj491m$02fJnKcR)6(wg3B1cMV zeU*vY1-N0&HiMBYd5?A`UbCnFlXg3vh>309%t5;_ac~s8yo7x(4sajG#f>ao{@xXXcXx{e4oWgW zZxn_gF$2A8X=w>af(n4WP8mf8tso@$<3|cWmV~0>P(I3pWsXJO6Ph?rS-O|eN;r^h zNisk$z@xH$#^h(UhnyTGoWuc^f*?g9e5s#LL_`VD@UB~Y0&4(JcFrRveoPuD@q<#Tz&0P^X|`g$m<#-|maX&Tkr>%ZNk zU%v*@);uzF;jq^{u&c%upR?@~)2k_+?HYl&Gyus4F2j~O8RegPAAAE;Tb0wVuR1M< zP@wz&uoueB%@rMdA%lT|q4oL4+`&tnOzT~OX*vD9=YgLt{d{=+L>=Y2EtYMd7|bkI zk4J^1f74YWtqDQXx>OE`SkMySv zXn)z0R6>h0O1p&XM0C#Y)1I#WD)^cNIy3@`$gYqAgHi%~{A_Ddb|s()*zL3ON|;1e z9OTaB88z(R(BEy)GC!OOWkh^mfx`;+ci5z5Bpe2h$U}#y7?34TQ^x}%NuM5UPxQ9= zD5&r^hRKaRtQqnU}5ztGKAlOvG*bcK5 z`ponxFJ#fx&r)61l4x5FNIRey4A9vfA-lChhIQ)H*MM8D>4$ci07^{E`ydvGtQLTj zxKgD5GgBS|6m<^5#GwYVQY_@=&Cy=p589mW7pPT5hcWL1q$HB93~*#kE~_CRAJ1RRnwD9)`tIGY&qpV}w&U4YY>zJ6-HLq=-*Qo~ zPU#}fxqu!Hr>$*h5dNb$rq}LK#xTD4dmex>@oC;t1FGU1##nfS6m5V?Z*y~VuEqxI{BRjgbua-58E}(!%k2_B8=Why71`Ooe#N?cf-sfmiC%0_ z#xe(i%*fCjThCJF)6OAB()Prq!8-zLa!#Wla13OWm7@rai!z33dXp!t8cER(cgGv- zk5xZdYhwtPe`Pr9WS#TeLmM2L8BhVu#{K6tJSO~<;DdT(V2BaO^&C4DOP~3bGy5v( zdcli{1&(u{a2n+obtqP!|C(AK2wh9%oS#9M`TmstY{holz2@KRx0kKD7<|BYoUTpnMFl#6(Bx;p1*l0VSql7Qh zpnc0bW&b8UBGbI`Ct@76+m_|NPw{1Fv2ae?vCllQ^>QmD=Bq|5d}X(Kp7NK3v)<(M#TKZYInK-NmwUBif7`N^5@_K%C=jwE0Q`9TE_tY2 z(i6E*35o9WjK%Z*HIlMLjch*#*L_m}!NZO9@$ziJG50k<4ax;`!=e%Mcyww63QP4% zJi3%GL0TV>S#Jj6=}S8yx!sp(Yvevpr+YK3b?IXl-F^Ei()tWB{_Z-+&{eUQi=7 zGQ}xyadFcm{DciEOg~2N$1qxCD48bWVu z(p9K$2p88{$jrm7d$sk!s?tPf=!?5;CtL~1S-M1Y&}1BH|6Zf{*G0r3kQaeb6ds+r#}0{Il^iPYqVy%1B8` zWk?46++HnO<9mn9dNmZ)RM6l1lA;&$&Yfr*tN*_&=>uMkMaI1Xw{HX^`r(U-iqcC; zE;S7nbyLnsXBU32{oY%1bQJdH7pwAo3xhL{20+HTd`4BE1#$&u3fY7gZN4O+GZ8X* zR5CYW1H-$sacnB?PeSYM`2CXVC)Rw2x#D?wj3S&}IQ2lqG64Px$=f42U;A@{ufoX)CZy`vm@& zUJ&bzjY>d*YrnsCfD019siy>_13u>^aq1^-VSWV36DVI*$$v;g|30RneM#y}JUrAv zW9|1ce{b1t1Abs4NWQYE*xIsQpYDW$8U`2ffKZt@*$mv3wVwv(eAGe*zj$1hq}={E z2_M21-uI=y+WK9!dn=}g%Z-^2j_Oe5H~}Xp#nj?H_+k~0QM|7JhB!qpAjCs0uJRc#sAe?;{WQa*t&G1d2)gviv?MQ99jE5|Hi zyznhS!~mK6FUKh<0|c8=5Z$@$j1<_QfDwlc4r+|>hL@L@w-MfsFvEC4Y+9@2VuWVT ze9GH>{x~@P^iUdtX(-1nBD9s4D}V@)V9;||9k2CX4W$4p3_haC>tnJI z=4%nzQnl?D&m%GV>$lC+I!9Z#lUWv?c72`4QkJO0XxQ(j*W1Ko0nS49TN!Yj-*(5&r6tA){zqtVSL?sOJbls9iY&4=otXW;Rde-W(?I$dS7;LDws+; zVWHA4V0w_XA_4m{Qt~6Lz#A)nFD6+31Q$QK>v1I56_#iz*CKU_L#D1@sN^q3Fb*4X z+Opra#EiuBlPH52d|wK*65H>Tb@tPk6%`d#wY3~abV-w(`iLjb2QJU>BpPn-PeW|9 z7qB)xDVc2O8pnV$;y0+Iz+}{w?%a+69CyH=nFow$pb?L3ZhitZ2)@8u9~O7Q-#eS! zM>;sLYNh&tapIT6r6M)Xz4W`R@Fc-#1DpiC*S3=I5(!T)QzU?B}_&Jz3@P*3-%K{Cyw`^aIx1giujEV z4FUG9(u{_WkMB)?oe3AMRG{OnhrB}C9$VK0BDwe8&Th4Fu^j#UQwE!$FvLLsC2|Ft z)a3wC0LM`vzpVXEHSYteXwK9Q)Ydj)zV;;(FYmL#40)8fPgwq>kMbS@xz2BM8vWdRCN--=fL#c~iGXoMia6au#Nj~=00^~d)H-Jj5b z7`2?Qv55|6=HHn9idg;&Sy90r*g38Zay(tw&;{J{U5M-eqU8r<0QgvC>t9SrXIB@( z^9Dy5dwcfez3SD}XE=;W(k8!E@%Xzjh?oVe zKb@gycqsz1n3AHceTo0c%Ga4N5a7(*+8n zo;C=ct+6bg(-Y7i14|@Q6)1>#;f%htKy#_utHC{#w|C8L3Faher|@Xk$WE`{VjTyc zTg$v+ivb2VR?UnkKC3=*CcFvvcPy~pF8}j5?2jcbsvwPwfzxo|DDOzV@LbahRgfomhn;1r8br(> z{QeF=)hI<>pl{oOKRd6=bJqZFbcCz?UC(3To;NDzRv559ii(Otd!tE?{{Cg3;MBYS zIE)x$GO%5x8*?Udx@}kqX!z1#eC9)=w?FQvw3~V87D1PkoHKW_18gweKp=irYEaHN zMg{z!aF__ZZkF9)807rcQL-4tt#7I$!ZK+Q4{2u^g}xkN90KZYp+O?i%Z;aQFo6W# zQ~ypG-#rVz-$-IE9$#P&4&cTIN|E!<5TT6exjzXF8@#X&ogrRf>C!lP1*CuX1#}X zK0GsTOkJrj#5>#moN>f_yVRw-rkL{ulC^j%Zq#h&GAR2e0ggwct8mT^q_KgTSimLb zsiv^|9IYTUIL;%snI#Y~DZalHF9Y3qHfXxum>d4DPCXuSmI`V}&+>AIeC@M|`!@_J z2J!Koe(fgyZheSMpG@*#auyweqKw7*WcdmQVRDTk0Nin8)^f6Q%t9#0;Enn-(NLVn ze0&6eotgvoQ)}bVo7>x~lZiGzZ?FpMwPTVT$I=hfLIS**kiK z!5TNb?gsT#U^4z5aREO*gb=h5RxY?R(IgzamfxcMe`7+djr*bqn5x`<4-w+&g2-HK z^AkDr%KKl*+HCDmfkcTVrE^P$-)m`NeQP0{%xk`j!r;#{c>4vncZGqYw*-zX1qDS^ zTpZV_5dfb20s?B{v4FU~y}hjh1qjj!tjMdYtJ)j;GP!H$C-^?qUpv$COswrOd5jx! zR>xWUnIx`*YgR$}T?BxuyStm$bxpp^w3!evRFL3}4eK(aYD8f6lm!2jxf2IB{Zb0q zuYmo4w~7FJr+SwGXeH|ThZq9gnJj?o*pz~H!603)gW?mA*+Hf)2}%y;2iK=0X?_() zBun2H?E%|DuXC;-UCH4~;sfaSTypx1c!RW~}4#n+gly+dE$AS95+MNWsKD$6k7c`=G72!Od~3yALV;eb?oSS4 zu~LP-5$7m8K_@@sEDk2n0T6~?x7co$@cfLTrKJTpDE6fAfo&jQZXyr)+y#@~+>bNV z`QEo|$f5)9WZ(LsEj`CMA*cA|Ffsvw#8Z&f0clhPOr)&|?~;>wJ+`00;lWg)Mrgdd zUQsONGcEe2=+2@%lZ|c6C5FaR+>oIHF`WyAGl8ldOy1k`TsYg?XQQG|M_pvBi``!NgaM3!c7>n-%H$tUK3!_aL7H5YfAtg#ZUsDhkBc{j6>n*b(h3=zK`6cv`SfoX zo&a3?`icRo1q9M@cTQDPQ!@_E1SA+(mJgPGJD=+{1Wc-ja3UOL{U8-(9T~BhV#R*N zb|HPk+=UEuy&_I9ia$S!LxD2`5dxSZEO<})$kx;}V=&c@{<;w;ppv+WRktOy{p*-Z z>~$CihzzBPzCNXJkz3o?whDT8(ecom@x*-v__p9`#p(9Mn|@qsQ3xF!odO~u#gvN{ zM!P0J_hQ@k)yx^zHb}cGqskJ01w20P{8QX*lDa~Ke2$>-8xV(W7qq}i7YgW@-!y>g zA2Mu;8e)gj#({s6Xoo9_Gh%F}gt4#7u=8nddjHL!jNOf<4Bm?a7Zr-6)a##uR=*i^ zOgTg_?sj*M?p3(D9k4`=Zri$BhPqG;{@~}+UIGThJR&jcs|PheS*rw`xzW~|?i3Uz zD?LRa?$wTRelqFb{`G{HAjTkiwWMilY!)i9xLZVjm)QkN|G<51&FRu)?VfC45hOW{ zJxpFFKjx_Tdq?2j_azq3#tdidBQ&qO5F4|8 zs?B=wBtQoc!Ao$>H^8#h+mirXX1ZMsoLprl^+R?!QR=@7J`sMMB_t0rjj7A%P%}7S z9dAnvSQoFcT$Cqb(iTOo*>$sE|vD4FNk@#j?wIW2Pp1o=AH>wi)N)vL%HKxKFGL~fYbqI zn$ZQsFTgDduMMAsU)v``hIiR1KES`EBqIx#!Sjo9tI| zz^zwo4GICU8yZy<$r8yzwXrA~b%Mvn9wb&@fV(}OF`8hXU)rSoTWJG@icn8rbH%0= zCkIF=64=TA%vL4J^Jc6c-*$5*`J`WpqgH9F{OgA4Ul93YlXAMdyXO}akoa-}=>Sv+MJxrqpUSc; zF;EOL!r)4tJBig_fNOO-=x5w@d%pue3dL4c(0b=(THxvL{yltk@uCG}{zCPSA)v!0 zhrg9@h0#J;4)*wqhtK<@r8)pT0?DY3akxsb@E>8 zV+8E};XMu`l@LeX`_APi0$XaM_2R(^cHD*ji2~}4Eb10i12mK1E>Sx|+TSAW_#fWMCyMvewMiDUE3ft2hvgeOjUL(ieK$!sfltdp0M-oSZ@8`KuV}dj1aD=g%=u@B z!gxT^V**5A`~6dRg48{^wzjtFCIkD_^$!9&Lw)F}HrGa9SK{XqH_(aaVh*h6hW-D< zGgo9kx?h=|0SK} zy5+phgL~Zo*PRn&A|THQRgfPW>ym{U2v9gw?z-m4kV&I6mt7P8d@rx71f&cUV2Yv`%#J|( zZi=j!(V+OdOE<|~iWE=k4i5-J{GK37@bd$fo+$~A;0Q;>Q4Ril${h;hu`sKZTuaqd zoGdIDcc{^zNs5Y!4MT|;h(dh9f^AQK)b*H#P!HFIQjF%@q9@6BtXLq<_%~`rKvXqB(dg{npNs%X$~y^oA+P%Y*HLlbMnIB|sgqWzi|p z9(W|c8i-F)snlp*U*y_}M2E^PE!*Rzd~3l#XyA|d!Z*4&4-_#BU<1>U2G$|GX@2f= z9r%@2rdH;?n8x4)C+5D{Ks*e-FUL1!jY+O|t^a)h3Os$1U zqeuu<&@9ZZoMVkt>)W+Wsmo;o^|sGKVpP!xf7AFQ0*X|8#QV|Uj#})16MNBP7vcDa zTZs^!G9|seSAvyNwB-`Nj#O!2(uPUSrR*Tn{t^vQ(8EM$z~8owkaTWGo%P=AG$%!*~V{y+!?6Q~Mhdy~(AEjY24+0B?lrxuKx@cr|Jm67A zB)IP^<+Qe%(Fng@4S^ODqMsyIN$Q0+&Fak^g~a}BK%`ULd3NK66VhPE`_M}2t!vy6 z74<&Xt_$3Ip$rKPod86Dpz{(nAojvDw}7PsG(+Hy0o=+Vb$b+=%=cE6YgiGXh<}wM zZMH0-f4Go%PVlEH%XmwCSk!ql({l$oYrSG*_?7HWyFzwt`k}pK^*)KCH45_9>d_UU zLjIR7aJ3i-4P0E7M~=Xz2myGD)Yl1o1E+mrmGoE#8k*A;ztH{+*?7IouzUSPwEf;u z+)f$-q1uQ4zW}s=wH@)!EN_OSRiI}V31jYRc!Adx|KeGhM&AB+ z6fE zgxto9KcRZrJ}fV_MJnmjULrlTzdUF(Wo00vHxxuiNKFrZlx_DSRiA@i>Q@N~t;xc4 zeYnBCwrY`vY-46-&!TILi@(W}5ae(~X zi9X%kT|=Aqq1pQn9~g--Er6Wpv-XzlXxIGV>+-r2sB7Rpld}DbS5OHaX4P7rAhi4R ztsI*hROIDL?!|;4v=11DLEjEgd0jb=_P9C~vc5e?jzx%)<*&a1j z>UF+Sy}kb}Bn^QRJL6gJB^Ws$Kb8pykmz3Jg>acpPfzF6)zul!&xySo3VrUC~s)$6w%yKIR#anFOhiY7yvpV0(I$EsggSTQ?R>4XL-DS%K z(<2{&*oPA{@~5Lk+t??3j06+Y`DG}j+1U_q8>$~5u4>C(%)Pm4q~E`Px6$HV`%8w& zGJwlL5C|yE5j#^~zLb2Dr}&=sJecp{2=iyq4V|6e#qxyb-vk$_dm8dz;jg*Q zk-cwkiHxc(QN(HSK+j%fGfb*yXov&>_s3#W&3rMO8mWN$QI44q_MQ;e*oO!F4;y!Cuh_g9Rk?Z(!fu8i&>u7eTy6Tb@XI7HFH#=}Hj}B| zx%roBuk<;t^zF6FC9$(mK83mOn?Pbvs!wPL=|oEAyeOL{B{bP%5y=lBL#B)jQ7MYg zyHzuOLQCZ3duWhIC(zl#f~`g+cn#GfF18**3WHvR?%f!4H@jej@z(`y<)WACm&|eV zHfA|NUw2&|71aBF-c7u0#ku^YKtdKE?Jsac&Os<^`0Cb5nJn?;s+BUMsuU_`u6DN` zFD*)`x!mpnmtDXO4)VP}`hikbI0Hx9_ZwqdSg{kdq~-7#ajdfG5d(vNuSvJpY_aGv z0`l6u(WI07@+yaz)C@w}C3&*lw?rLAPMEY&3z>)F*DWLhio00N)$Q}SYnO|LH{pmE zsVs-7n($XLpHJF3OndkQ9kWw*ToAi;oW7o8T~0nIIU9DwFB(8(uerum&`~wT{dR`r zkScmOC5b1bwkOWCy7Py-5*=!qFnZbR)oDTOk_L(76Z=@#pY(Rvq?DR~MW;aC&@^!; z7KqyeLaIvFoz39QCtfH) zoNRTfV&mo=>RJjBb_DRY?Tu6faY zqu&gwU~|~Y=$7Y_As?AYM{;x27iMZRqMk+=%#1U?X}mNQ3G1y{bI(7YzZSY=!O(Y9nyT-BQ@)c0C| zIim3L%z)|Zg@FB?MC7$!972rUd;TW3#ET8JpK5tQXF(I5f2z(Ww{w?5A8f@URiHZD zypJ0nsO;rajv^7v9WfEwtHOA&Uk5yD(CGW_u*%h7u7+Q90e(v9js2Q(#CZ;tfNt_@ zsFAnwF3pPN>UY{kM}xW#Vxdt1nB>H-t#=MQ4Ih47>>L;8uS8Bg(=L#gMHnr_c8IMZMs01lPhD0L7*gH6;{NlnW9;IpSmL$mWxn(ew zt4^=BtqH;2Zytzdt_V;5lc{usfR1<*o6&#jk zyda|JUnvJAapcW9<-|EG~F4~Oz=!(*FdAIdWJv6q(5yyzY9DJA zv~_L<&}OLlZJVQ&UsLoSo*R|p=QW`zAAf;sL7`x=9@VI=iD0tI6}WS4u-=cYT;%%M zLWRcdpy)+~Ke|&xk@|hjgvO=EcooV6yz5E*xT4E>>ub_{B|pN(7~6s>V7PEhws7@bUWmh;-e17F_s?xTEB1);zk1NAgxwQHe&$dKZ97@Bndh7r*@h3`2QvrNgOy*@=-R zdKPTMJZG-3#uXkK--m+x) zF}GfmTfg^fKYoQFnITKKctLbJ>b%LmNi54Sd+f7ye?5wZl+ zNMANt)I8v>IQU~H$Zn3U$mJX%yhCSOC8AetP>3RaFdZ*dPdCJ~-{P^?_L;03G`hnI zjQxkUDLZrY6;Ynka^;!czR`8EUvGOv6hVWrfwbR^f9@1O9E%bTQ2P#lRm$3*)1%pT z(_aU6gk>gqVY%5{QLvM?#nYVuEeV8~JX?nsA5Q5heu?UCOmFjo!e@Mt;qCs+tj}J5 zC$>YJ-G4*GN!Bvy#wfG=g&?>R%A5^~grlkIp5vsS!4u&3Ih<)uBg|=0{y=x9Yi&Xa z7CYII=o=to@#7c{>B8{4{xF(9OHc56nRJ0ar2&pe(?}j-bE(-BNU}A@XmPT-mK&`a zoG=jZ>%Kvk3fNI=g9Gj~a)Au-SYZq-83|HG?fARj{@TpqH`Vlsg$IwpNb{tPUS%qK0Nt;Uo-xP4DJerUqe-!{vYIj$~w+!G>k?Odxxw!TWnkb3m zh7o#(Ur+Gd{27@7#9d$^dCG=b6f>-3KRiL3ll7*O_ZS>WFSR;}u7g>Zom@RCv69F~Ou!G_wf>Wae$TE>Vz%MtaKc~;324D}BD2L}jVmXeXH)g# z9p#FniXpoa1^Te~P&&uP52YOrwFD0I={UJgN;8l#*W6lW3p?yLY=t~U^@s3%-=HYItsnmg4{Xr294+1Y_3Dt7eOrc^j?<2Oe z+b5(b`j}zox2yd#cZL_bXQe8{5|EuzJu`RKBi7H?MzHU?NAj%#vLBW%o$uE4`j-;$Yd(qtK$r|qdunHR6(*~_wET?pj)*1QXpok;|gY2Yr8?x zGhs}K)8(D@(go`!C3!A^BPLn3B2ObOwPHJawv(?c5mTYv3Zn{6ORQx~$^9oo4$ulf z+icRgg*28fGvboJJq(|<;46yA#jH3{a>1`JCZxxG~=Jl|2? zE3^OLaxy*BI2oYWDp2K>{f9&S56FSsk>amPGZ@@eV{4=){o|41UAtrYic7v-Oiz~< zmI@ge$unJy+1l)h^nvQ!^pigrpJ4rmVj*MVRZ^{wG(O~r@AS%;$QL86pI31gMm25v z$oJm9FgU_mvwSUDv(I!TUg%sbHBsp)Q!-e&B;w;5TsTn-?h*C}iA= z7tVtcBHo9qv9%Q$HSZtaIl_^`__@-u!lH${<_8-{9g+fzfZD zQSCUK$9pJFqc5pkRHQ)9B>6FMUEaUO@KFT?RSq^*5X8-snp;v;@;H~SMWcHo(fyvL z4tF@<5==2AS8x_^jpyNi_$W^0781@g=GV5ahPszTuj?*W68XuB)h+y-mm&q(pjho49B`m51h7DZVGlu}8-=X2KV=k<0r4 z@PlihO@j03YO|buJ@ysM_;T({_JVaRZWIOoFqHL%b>Y zgV!br+9nP^t0n@7)AG>(Q2luMnDvu2q(m&lzZ5hzcP0Wo9cy>@^pG>n^6WaQe|_IweAK`0eK+-do-CUN{Y zJA^$93p)kF0(|WC`J!wz0E{xUZ}g3hrbc{^7MPx%CJqI>I>QM(QJ_Pu!xkz2)fY|U z7x@D5dF8+ST`2eme`EW9F5wjr%) zQaa#A`;bXeTqVOd0plh;iP%HpQ9KR~4v$U;B9|Mt+@5LQJpcXQ9X;I#DxZ(0v3YrU z{Q^*_vk|A{sSqCff}9saQ`Z2jOk(5)Ta&mI60?e_kgM&g5 zL0Bz_;7%tCtp6l&ucw`G!l>r6u=8U<@2hb@?bNB~#l`S+?M*=#76b-RjI$sI*Q%|K29PD0$F*Se4F}}C|*O^J%+mQqUA^-JjUZrey7`EDpc>jdK7M|TFc41s2CM-?uK}02!T_`|M}yRwHY$Q8MY!ZdW&u&N9@ugs zRO3Eisw&57_CPDq;!8Bt-l|)zx&50Am9U_(5$}6+v5P7oe!YZe+3AVHIDw%sTDt) zcS{T)dBwlom=)cnf6`5CpEfWtNph1t!ZuO;NsIzufkc0r($ws%Cy3_N>sp2fERqEi zX1oZm3=s660s6(v!C@oJV5FOV0H&0g=*9;W4%p1m(8r`Z0wrxP8 z#uQvUeez^o1qft5l+84&bg2UwW*VfH7d!?)gR*^Z;4(n%@WukjVO`TQSP;CiQNO>c zWnO8+5aI@b0I?8dW$VX}bls1?rrQyBF+5yJ2(F})02X*eK;Q=;LPQS9oXwJ6Y6zXT zrSHW=Z4OW<>ed@2pu%%VK4|f_<{f!m;JFDwntb%ma1!tqHwCU z1e`8xYTAF_3CbmW&~syIRu(@7g@B}rWiu6U@2{_+78VxPU$d~V9NzhurR@6tg?L#> zNeQjl#@?Q}ipCBDmO>-b8t?UIe)WzY3b1s#tn!$LmAP_H`mPTKUEIKk!NflV+KQ6Q zZr3MRwCLmUjkUA0vx~p0EmyR&Q&LhGV4&=smIl6}yQ>RUH^K?~+}>_@M#R|8uGhg3 z6k)V#!K3iUkTW9CR&)iRy*r+qDx=xExm9M5)%ZJ-n?QFbVJzqqFd_r2?t6@8%vQs9 znNUnsSs7*J*1=6kEEh)f=|-_i5+Mwet~v^7^Vh`FJKqm<0Aw~HNjyd{UF#R?)oa&m zz~%y0G!|fHBR^|IZ>s`o(p?j=8_Zxwxjy~S5y*4;P0h>z*j)LAc!(ViBC$21dB+tp zn@b%Rn2eR~o+>QAw3N;LV4GGAgzBz0x{Zd>TvvDY0FPYFEo#juRai~Q)g6vP&Vao@ zzOdt|8n<}#Is$Yjl0X_l337OVBwN2&*$@J}H0SAB0ob_^$0$qLoh!aI_GNA)+aj%s zmTqTjdmD%_Ob%Vnqk>@JGO`Jo&54)<0>J0y=uH5tU~g{^QU6=GZ_y2>rl)a$8z1uS zqJnM&s!hbeT%p(XFsk5^`pr$nW53 pcr4%@cmq0r2tnsygw2CJ<$~b1#et$bLO@g;W@coGsWiae{tqm9yk7tS diff --git a/examples/mnist/notebook.ipynb b/examples/mnist/notebook.ipynb new file mode 100644 index 00000000..00cf8e74 --- /dev/null +++ b/examples/mnist/notebook.ipynb @@ -0,0 +1,2143 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using MLJ to classifiy the MNIST image dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m\u001b[1m Activating\u001b[22m\u001b[39m project at `~/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist`\n" + ] + } + ], + "source": [ + "using Pkg\n", + "const DIR = @__DIR__\n", + "Pkg.activate(DIR)\n", + "Pkg.instantiate()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Julia version** is assumed to be ^1.7" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Precompiling MLJ [add582a8-e3ab-11e8-2d5e-e98b27df1bc7]\n", + "└ @ Base loading.jl:1423\n" + ] + } + ], + "source": [ + "using MLJ\n", + "using Flux\n", + "import MLJFlux\n", + "import MLJIteration # for `skip`\n", + "\n", + "using Plots\n", + "gr(size=(600, 300*(sqrt(5)-1)));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic training" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Downloading the MNIST image dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Precompiling MLDatasets [eb30cadb-4394-5ae3-aed4-317e484a6458]\n", + "└ @ Base loading.jl:1423\n", + "┌ Warning: MNIST.traindata() is deprecated, use `MNIST(split=:train)[:]` instead.\n", + "└ @ MLDatasets /Users/anthony/.julia/packages/MLDatasets/eZ0Va/src/datasets/vision/mnist.jl:187\n" + ] + } + ], + "source": [ + "import MLDatasets: MNIST\n", + "\n", + "ENV[\"DATADEPS_ALWAYS_ACCEPT\"] = true\n", + "images, labels = MNIST.traindata();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In MLJ, integers cannot be used for encoding categorical data, so we\n", + "must force the labels to have the `Multiclass` [scientific\n", + "type](https://juliaai.github.io/ScientificTypes.jl/dev/). For\n", + "more on this, see [Working with Categorical\n", + "Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "labels = coerce(labels, Multiclass);\n", + "images = coerce(images, GrayImage);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Checking scientific types:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "@assert scitype(images) <: AbstractVector{<:Image}\n", + "@assert scitype(labels) <: AbstractVector{<:Finite}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looks good." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For general instructions on coercing image data, see [Type coercion\n", + "for image\n", + "data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "28×28 Array{Gray{N0f8},2} with eltype Gray{FixedPointNumbers.N0f8}:\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " ⋮ ⋱ \n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", + " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "images[1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We start by defining a suitable `Builder` object. This is a recipe\n", + "for building the neural network. Our builder will work for images of\n", + "any (constant) size, whether they be color or black and white (ie,\n", + "single or multi-channel). The architecture always consists of six\n", + "alternating convolution and max-pool layers, and a final dense\n", + "layer; the filter size and the number of channels after each\n", + "convolution layer is customisable." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import MLJFlux\n", + "struct MyConvBuilder\n", + " filter_size::Int\n", + " channels1::Int\n", + " channels2::Int\n", + " channels3::Int\n", + "end\n", + "\n", + "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", + "\n", + "function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels)\n", + " k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3\n", + " mod(k, 2) == 1 || error(\"`filter_size` must be odd. \")\n", + " p = div(k - 1, 2) # padding to preserve image size\n", + " init = Flux.glorot_uniform(rng)\n", + " front = Chain(\n", + " Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init),\n", + " MaxPool((2, 2)),\n", + " Conv((k, k), c1 => c2, pad=(p, p), relu, init=init),\n", + " MaxPool((2, 2)),\n", + " Conv((k, k), c2 => c3, pad=(p, p), relu, init=init),\n", + " MaxPool((2 ,2)),\n", + " make2d)\n", + " d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first\n", + " return Chain(front, Dense(d, n_out, init=init))\n", + "end" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note.** There is no final `softmax` here, as this is applied by\n", + "default in all MLJFLux classifiers. Customisation of this behaviour\n", + "is controlled using using the `finaliser` hyperparameter of the\n", + "classifier." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now define the MLJ model. If you have a GPU, substitute\n", + "`acceleration=CUDALibs()` below:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: For silent loading, specify `verbosity=0`. \n", + "└ @ Main /Users/anthony/.julia/packages/MLJModels/lDzCR/src/loading.jl:168\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "import MLJFlux ✔\n" + ] + }, + { + "data": { + "text/plain": [ + "ImageClassifier(\n", + " builder = MyConvBuilder(3, 16, 32, 32), \n", + " finaliser = NNlib.softmax, \n", + " optimiser = Adam(0.001, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n", + " loss = Flux.Losses.crossentropy, \n", + " epochs = 10, \n", + " batch_size = 50, \n", + " lambda = 0.0, \n", + " alpha = 0.0, \n", + " rng = 123, \n", + " optimiser_changes_trigger_retraining = false, \n", + " acceleration = CPU1{Nothing}(nothing))" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ImageClassifier = @load ImageClassifier\n", + "clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32),\n", + " batch_size=50,\n", + " epochs=10,\n", + " rng=123)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can add Flux options `optimiser=...` and `loss=...` here. At\n", + "present, `loss` must be a Flux-compatible loss, not an MLJ\n", + "measure. To run on a GPU, set `acceleration=CUDALib()`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Binding the model with data in an MLJ machine:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "mach = machine(clf, images, labels);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Training for 10 epochs on the first 500 images:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Training machine(ImageClassifier(builder = MyConvBuilder(3, 16, 32, 32), …), …).\n", + "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:498\n", + "┌ Info: Loss is 2.291\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 2.208\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 2.049\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 1.685\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 1.075\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 0.628\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 0.4639\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 0.361\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 0.2921\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n", + "┌ Info: Loss is 0.2478\n", + "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/n3YAv/src/core.jl:127\n" + ] + } + ], + "source": [ + "fit!(mach, rows=1:500, verbosity=2);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Inspecting:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(training_losses = Float32[2.3242702, 2.2908378, 2.20822, 2.0489829, 1.6850392, 1.0751165, 0.6279615, 0.46388212, 0.36103815, 0.29207793, 0.2478443],)" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "report(mach)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(chain = Chain(Chain(Chain(Conv((3, 3), 1 => 16, relu, pad=1), MaxPool((2, 2)), Conv((3, 3), 16 => 32, relu, pad=1), MaxPool((2, 2)), Conv((3, 3), 32 => 32, relu, pad=1), MaxPool((2, 2)), make2d), Dense(288 => 10)), softmax),)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain = fitted_params(mach)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "16-element Vector{Float32}:\n", + " 0.011803599\n", + " 0.05579675\n", + " 8.461591f-5\n", + " 0.013422165\n", + " -0.001925053\n", + " 0.011568692\n", + " -0.00051727734\n", + " -0.0003228416\n", + " 0.03614383\n", + " 0.06365696\n", + " -0.0005846103\n", + " -0.004092362\n", + " 0.0036211032\n", + " 0.0031117066\n", + " 0.02764553\n", + " 0.05152524" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Flux.params(chain)[2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Adding 20 more epochs:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Updating machine(ImageClassifier(builder = MyConvBuilder(3, 16, 32, 32), …), …).\n", + "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:499\n", + "\u001b[33mOptimising neural net: 100%[=========================] Time: 0:00:08\u001b[39m\n" + ] + } + ], + "source": [ + "clf.epochs = clf.epochs + 20\n", + "fit!(mach, rows=1:500);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Computing an out-of-sample estimate of the loss:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.36284238f0" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "predicted_labels = predict(mach, rows=501:1000);\n", + "cross_entropy(predicted_labels, labels[501:1000]) |> mean" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or, in one line:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PerformanceEvaluation object with these fields:\n", + " measure, operation, measurement, per_fold,\n", + " per_observation, fitted_params_per_fold,\n", + " report_per_fold, train_test_rows\n", + "Extract:\n", + "┌────────────────────────────────┬───────────┬─────────────┬────────────────┐\n", + "│\u001b[22m measure \u001b[0m│\u001b[22m operation \u001b[0m│\u001b[22m measurement \u001b[0m│\u001b[22m per_fold \u001b[0m│\n", + "├────────────────────────────────┼───────────┼─────────────┼────────────────┤\n", + "│ LogLoss( │ predict │ 0.363 │ Float32[0.363] │\n", + "│ tol = 2.220446049250313e-16) │ │ │ │\n", + "└────────────────────────────────┴───────────┴─────────────┴────────────────┘\n" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluate!(mach,\n", + " resampling=Holdout(fraction_train=0.5),\n", + " measure=cross_entropy,\n", + " rows=1:1000,\n", + " verbosity=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Wrapping the MLJFlux model with iteration controls" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Any iterative MLJFlux model can be wrapped in *iteration controls*,\n", + "as we demonstrate next. For more on MLJ's `IteratedModel` wrapper,\n", + "see the [MLJ\n", + "documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The \"self-iterating\" classifier, called `iterated_clf` below, is for\n", + "iterating the image classifier defined above until one of the\n", + "following stopping criterion apply:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- `Patience(3)`: 3 consecutive increases in the loss\n", + "- `InvalidValue()`: an out-of-sample loss, or a training loss, is `NaN`, `Inf`, or `-Inf`\n", + "- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These checks (and other controls) will be applied every two epochs\n", + "(because of the `Step(2)` control). Additionally, training a\n", + "machine bound to `iterated_clf` will:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- save a snapshot of the machine every three control cycles (every six epochs)\n", + "- record traces of the out-of-sample loss and training losses for plotting\n", + "- record mean value traces of each Flux parameter for plotting" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For a complete list of controls, see [this\n", + "table](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Wrapping the classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some helpers" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", + "make1d(x::AbstractArray) = reshape(x, length(x));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To extract Flux params from an MLJFlux machine" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "parameters(mach) = make1d.(Flux.params(fitted_params(mach)));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To store the traces:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Any[]" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "losses = []\n", + "training_losses = []\n", + "parameter_means = Float32[];\n", + "epochs = []" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To update the traces:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "update_epochs (generic function with 1 method)" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "update_loss(loss) = push!(losses, loss)\n", + "update_training_loss(losses) = push!(training_losses, losses[end])\n", + "update_means(mach) = append!(parameter_means, mean.(parameters(mach)));\n", + "update_epochs(epoch) = push!(epochs, epoch)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The controls to apply:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "save_control =\n", + " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jlso\")), predicate=3)\n", + "\n", + "controls=[Step(2),\n", + " Patience(3),\n", + " InvalidValue(),\n", + " TimeLimit(5/60),\n", + " save_control,\n", + " WithLossDo(),\n", + " WithLossDo(update_loss),\n", + " WithTrainingLossesDo(update_training_loss),\n", + " Callback(update_means),\n", + " WithIterationsDo(update_epochs)\n", + "];" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The \"self-iterating\" classifier:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ProbabilisticIteratedModel(\n", + " model = ImageClassifier(\n", + " builder = MyConvBuilder(3, 16, 32, 32), \n", + " finaliser = NNlib.softmax, \n", + " optimiser = Adam(0.001, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n", + " loss = Flux.Losses.crossentropy, \n", + " epochs = 30, \n", + " batch_size = 50, \n", + " lambda = 0.0, \n", + " alpha = 0.0, \n", + " rng = 123, \n", + " optimiser_changes_trigger_retraining = false, \n", + " acceleration = CPU1{Nothing}(nothing)), \n", + " controls = Any[Step(2), Patience(3), InvalidValue(), TimeLimit(Dates.Millisecond(300000)), IterationControl.Skip{Save{typeof(Serialization.serialize)}, IterationControl.var\"#8#9\"{Int64}}(Save{typeof(Serialization.serialize)}(\"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist.jlso\", Serialization.serialize), IterationControl.var\"#8#9\"{Int64}(3)), WithLossDo{IterationControl.var\"#20#22\"}(IterationControl.var\"#20#22\"(), false, nothing), WithLossDo{typeof(update_loss)}(update_loss, false, nothing), WithTrainingLossesDo{typeof(update_training_loss)}(update_training_loss, false, nothing), Callback{typeof(update_means)}(update_means, false, nothing, false), WithIterationsDo{typeof(update_epochs)}(update_epochs, false, nothing)], \n", + " resampling = Holdout(\n", + " fraction_train = 0.7, \n", + " shuffle = false, \n", + " rng = Random._GLOBAL_RNG()), \n", + " measure = LogLoss(\n", + " tol = 2.220446049250313e-16), \n", + " weights = nothing, \n", + " class_weights = nothing, \n", + " operation = MLJModelInterface.predict, \n", + " retrain = false, \n", + " check_measure = true, \n", + " iteration_parameter = nothing, \n", + " cache = true)" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "iterated_clf = IteratedModel(model=clf,\n", + " controls=controls,\n", + " resampling=Holdout(fraction_train=0.7),\n", + " measure=log_loss)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Binding the wrapped model to data:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "mach = machine(iterated_clf, images, labels);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Training machine(ProbabilisticIteratedModel(model = ImageClassifier(builder = MyConvBuilder(3, 16, 32, 32), …), …), …).\n", + "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:498\n", + "┌ Info: No iteration parameter specified. Using `iteration_parameter=:(epochs)`. \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/core.jl:62\n", + "┌ Info: loss: 2.224743\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 1.968148\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", + "┌ Info: loss: 1.2209107\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.5940933\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.46833506\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist2.jlso\". \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", + "┌ Info: loss: 0.42414027\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.40840885\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.40475494\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist3.jlso\". \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", + "┌ Info: loss: 0.40977737\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.42039925\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.4321642\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: final loss: 0.4321642\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/train.jl:44\n", + "┌ Info: final training loss: 0.043363843\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/train.jl:46\n", + "┌ Info: Stop triggered by Patience(3) stopping criterion. \n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/stopping_controls.jl:54\n", + "┌ Info: Total of 22 iterations. \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/core.jl:35\n" + ] + } + ], + "source": [ + "fit!(mach, rows=1:500);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Comparison of the training and out-of-sample losses:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "plot(epochs, losses,\n", + " xlab = \"epoch\",\n", + " ylab = \"cross entropy\",\n", + " label=\"out-of-sample\")\n", + "plot!(epochs, training_losses, label=\"training\")\n", + "\n", + "savefig(joinpath(DIR, \"loss.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Evolution of weights" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "n_epochs = length(losses)\n", + "n_parameters = div(length(parameter_means), n_epochs)\n", + "parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)'\n", + "plot(epochs, parameter_means2,\n", + " title=\"Flux parameter mean weights\",\n", + " xlab = \"epoch\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note.** The the higher the number, the deeper the chain parameter." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "savefig(joinpath(DIR, \"weights.png\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Retrieving a snapshot for a prediction:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3-element CategoricalArrays.CategoricalArray{Int64,1,UInt32}:\n", + " 7\n", + " 9\n", + " 5" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mach2 = machine(joinpath(DIR, \"mnist3.jlso\"))\n", + "predict_mode(mach2, images[501:503])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Restarting training" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Mutating `iterated_clf.controls` or `clf.epochs` (which is otherwise\n", + "ignored) will allow you to restart training from where it left off." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Info: Updating machine(ProbabilisticIteratedModel(model = ImageClassifier(builder = MyConvBuilder(3, 16, 32, 32), …), …), …).\n", + "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:499\n", + "┌ Info: loss: 0.444918\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.4575673\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", + "┌ Info: loss: 0.46934554\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.48012888\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: loss: 0.49023148\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", + "┌ Info: final loss: 0.49023148\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/train.jl:44\n", + "┌ Info: final training loss: 0.010609009\n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/train.jl:46\n", + "┌ Info: Stop triggered by Patience(4) stopping criterion. \n", + "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/stopping_controls.jl:54\n", + "┌ Info: Total of 32 iterations. \n", + "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/core.jl:35\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "iterated_clf.controls[2] = Patience(4)\n", + "fit!(mach, rows=1:500)\n", + "\n", + "plot(epochs, losses,\n", + " xlab = \"epoch\",\n", + " ylab = \"cross entropy\",\n", + " label=\"out-of-sample\")\n", + "plot!(epochs, training_losses, label=\"training\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "\n", + "*This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Julia 1.7.3", + "language": "julia", + "name": "julia-1.7" + }, + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 3 +} diff --git a/examples/mnist/mnist.jl b/examples/mnist/notebook.jl similarity index 96% rename from examples/mnist/mnist.jl rename to examples/mnist/notebook.jl index 88e1b0b1..5c7f9311 100644 --- a/examples/mnist/mnist.jl +++ b/examples/mnist/notebook.jl @@ -6,17 +6,15 @@ const DIR = @__DIR__ Pkg.activate(DIR) Pkg.instantiate() -# **Julia version** is assumed to be ^1.6 +# **Julia version** is assumed to be ^1.7 using MLJ using Flux import MLJFlux import MLJIteration # for `skip` -MLJ.color_off() - using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); +gr(size=(600, 300*(sqrt(5)-1))); # ## Basic training @@ -230,7 +228,7 @@ fit!(mach, rows=1:500); plot(epochs, losses, xlab = "epoch", - ylab = "root squared error", + ylab = "cross entropy", label="out-of-sample") plot!(epochs, training_losses, label="training") @@ -266,10 +264,7 @@ fit!(mach, rows=1:500) plot(epochs, losses, xlab = "epoch", - ylab = "root squared error", + ylab = "cross entropy", label="out-of-sample") plot!(epochs, training_losses, label="training") -using Literate #src -Literate.markdown(@__FILE__, @__DIR__, execute=false) #src -Literate.notebook(@__FILE__, @__DIR__, execute=false) #src diff --git a/examples/mnist/notebook.unexecuted.ipynb b/examples/mnist/notebook.unexecuted.ipynb new file mode 100644 index 00000000..e78eb059 --- /dev/null +++ b/examples/mnist/notebook.unexecuted.ipynb @@ -0,0 +1,691 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Using MLJ to classifiy the MNIST image dataset" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using Pkg\n", + "const DIR = @__DIR__\n", + "Pkg.activate(DIR)\n", + "Pkg.instantiate()" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "**Julia version** is assumed to be ^1.7" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "using MLJ\n", + "using Flux\n", + "import MLJFlux\n", + "import MLJIteration # for `skip`\n", + "\n", + "using Plots\n", + "gr(size=(600, 300*(sqrt(5)-1)));" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "## Basic training" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "Downloading the MNIST image dataset:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "import MLDatasets: MNIST\n", + "\n", + "ENV[\"DATADEPS_ALWAYS_ACCEPT\"] = true\n", + "images, labels = MNIST.traindata();" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "In MLJ, integers cannot be used for encoding categorical data, so we\n", + "must force the labels to have the `Multiclass` [scientific\n", + "type](https://juliaai.github.io/ScientificTypes.jl/dev/). For\n", + "more on this, see [Working with Categorical\n", + "Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/)." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "labels = coerce(labels, Multiclass);\n", + "images = coerce(images, GrayImage);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Checking scientific types:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "@assert scitype(images) <: AbstractVector{<:Image}\n", + "@assert scitype(labels) <: AbstractVector{<:Finite}" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Looks good." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "For general instructions on coercing image data, see [Type coercion\n", + "for image\n", + "data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1)" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "images[1]" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "We start by defining a suitable `Builder` object. This is a recipe\n", + "for building the neural network. Our builder will work for images of\n", + "any (constant) size, whether they be color or black and white (ie,\n", + "single or multi-channel). The architecture always consists of six\n", + "alternating convolution and max-pool layers, and a final dense\n", + "layer; the filter size and the number of channels after each\n", + "convolution layer is customisable." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "import MLJFlux\n", + "struct MyConvBuilder\n", + " filter_size::Int\n", + " channels1::Int\n", + " channels2::Int\n", + " channels3::Int\n", + "end\n", + "\n", + "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", + "\n", + "function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels)\n", + " k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3\n", + " mod(k, 2) == 1 || error(\"`filter_size` must be odd. \")\n", + " p = div(k - 1, 2) # padding to preserve image size\n", + " init = Flux.glorot_uniform(rng)\n", + " front = Chain(\n", + " Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init),\n", + " MaxPool((2, 2)),\n", + " Conv((k, k), c1 => c2, pad=(p, p), relu, init=init),\n", + " MaxPool((2, 2)),\n", + " Conv((k, k), c2 => c3, pad=(p, p), relu, init=init),\n", + " MaxPool((2 ,2)),\n", + " make2d)\n", + " d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first\n", + " return Chain(front, Dense(d, n_out, init=init))\n", + "end" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "**Note.** There is no final `softmax` here, as this is applied by\n", + "default in all MLJFLux classifiers. Customisation of this behaviour\n", + "is controlled using using the `finaliser` hyperparameter of the\n", + "classifier." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "We now define the MLJ model. If you have a GPU, substitute\n", + "`acceleration=CUDALibs()` below:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "ImageClassifier = @load ImageClassifier\n", + "clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32),\n", + " batch_size=50,\n", + " epochs=10,\n", + " rng=123)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "You can add Flux options `optimiser=...` and `loss=...` here. At\n", + "present, `loss` must be a Flux-compatible loss, not an MLJ\n", + "measure. To run on a GPU, set `acceleration=CUDALib()`." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "Binding the model with data in an MLJ machine:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "mach = machine(clf, images, labels);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Training for 10 epochs on the first 500 images:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "fit!(mach, rows=1:500, verbosity=2);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Inspecting:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "report(mach)" + ], + "metadata": {}, + "execution_count": null + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "chain = fitted_params(mach)" + ], + "metadata": {}, + "execution_count": null + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "Flux.params(chain)[2]" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Adding 20 more epochs:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "clf.epochs = clf.epochs + 20\n", + "fit!(mach, rows=1:500);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Computing an out-of-sample estimate of the loss:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "predicted_labels = predict(mach, rows=501:1000);\n", + "cross_entropy(predicted_labels, labels[501:1000]) |> mean" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Or, in one line:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "evaluate!(mach,\n", + " resampling=Holdout(fraction_train=0.5),\n", + " measure=cross_entropy,\n", + " rows=1:1000,\n", + " verbosity=0)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "## Wrapping the MLJFlux model with iteration controls" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "Any iterative MLJFlux model can be wrapped in *iteration controls*,\n", + "as we demonstrate next. For more on MLJ's `IteratedModel` wrapper,\n", + "see the [MLJ\n", + "documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/)." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "The \"self-iterating\" classifier, called `iterated_clf` below, is for\n", + "iterating the image classifier defined above until one of the\n", + "following stopping criterion apply:" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "- `Patience(3)`: 3 consecutive increases in the loss\n", + "- `InvalidValue()`: an out-of-sample loss, or a training loss, is `NaN`, `Inf`, or `-Inf`\n", + "- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "These checks (and other controls) will be applied every two epochs\n", + "(because of the `Step(2)` control). Additionally, training a\n", + "machine bound to `iterated_clf` will:" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "- save a snapshot of the machine every three control cycles (every six epochs)\n", + "- record traces of the out-of-sample loss and training losses for plotting\n", + "- record mean value traces of each Flux parameter for plotting" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "For a complete list of controls, see [this\n", + "table](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided)." + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### Wrapping the classifier" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "Some helpers" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", + "make1d(x::AbstractArray) = reshape(x, length(x));" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "To extract Flux params from an MLJFlux machine" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "parameters(mach) = make1d.(Flux.params(fitted_params(mach)));" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "To store the traces:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "losses = []\n", + "training_losses = []\n", + "parameter_means = Float32[];\n", + "epochs = []" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "To update the traces:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "update_loss(loss) = push!(losses, loss)\n", + "update_training_loss(losses) = push!(training_losses, losses[end])\n", + "update_means(mach) = append!(parameter_means, mean.(parameters(mach)));\n", + "update_epochs(epoch) = push!(epochs, epoch)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The controls to apply:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "save_control =\n", + " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jlso\")), predicate=3)\n", + "\n", + "controls=[Step(2),\n", + " Patience(3),\n", + " InvalidValue(),\n", + " TimeLimit(5/60),\n", + " save_control,\n", + " WithLossDo(),\n", + " WithLossDo(update_loss),\n", + " WithTrainingLossesDo(update_training_loss),\n", + " Callback(update_means),\n", + " WithIterationsDo(update_epochs)\n", + "];" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The \"self-iterating\" classifier:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "iterated_clf = IteratedModel(model=clf,\n", + " controls=controls,\n", + " resampling=Holdout(fraction_train=0.7),\n", + " measure=log_loss)" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Binding the wrapped model to data:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "mach = machine(iterated_clf, images, labels);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Training" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "fit!(mach, rows=1:500);" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Comparison of the training and out-of-sample losses:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "plot(epochs, losses,\n", + " xlab = \"epoch\",\n", + " ylab = \"cross entropy\",\n", + " label=\"out-of-sample\")\n", + "plot!(epochs, training_losses, label=\"training\")\n", + "\n", + "savefig(joinpath(DIR, \"loss.png\"))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Evolution of weights" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "n_epochs = length(losses)\n", + "n_parameters = div(length(parameter_means), n_epochs)\n", + "parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)'\n", + "plot(epochs, parameter_means2,\n", + " title=\"Flux parameter mean weights\",\n", + " xlab = \"epoch\")" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "**Note.** The the higher the number, the deeper the chain parameter." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "savefig(joinpath(DIR, \"weights.png\"))" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Retrieving a snapshot for a prediction:" + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "mach2 = machine(joinpath(DIR, \"mnist3.jlso\"))\n", + "predict_mode(mach2, images[501:503])" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "### Restarting training" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "Mutating `iterated_clf.controls` or `clf.epochs` (which is otherwise\n", + "ignored) will allow you to restart training from where it left off." + ], + "metadata": {} + }, + { + "outputs": [], + "cell_type": "code", + "source": [ + "iterated_clf.controls[2] = Patience(4)\n", + "fit!(mach, rows=1:500)\n", + "\n", + "plot(epochs, losses,\n", + " xlab = \"epoch\",\n", + " ylab = \"cross entropy\",\n", + " label=\"out-of-sample\")\n", + "plot!(epochs, training_losses, label=\"training\")" + ], + "metadata": {}, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "---\n", + "\n", + "*This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*" + ], + "metadata": {} + } + ], + "nbformat_minor": 3, + "metadata": { + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.7.3" + }, + "kernelspec": { + "name": "julia-1.7", + "display_name": "Julia 1.7.3", + "language": "julia" + } + }, + "nbformat": 4 +} diff --git a/examples/mnist/weights.png b/examples/mnist/weights.png index b533c7e0d3b8ba314919f31162c6d88d64e17cf2..7db2a7eceb945a3fd7c132974472f24e52b8e926 100644 GIT binary patch literal 35107 zcmagFbySq?8!e0?FcK2d(jnc5G}1_kh;&GYNOubZNJ~o!N{2{^w4$`6ARyh{k|KQ9 zyuWq6zs_1`bS-3@dE&mW*n405iO^72#KWeRa0X8>*oKQ{ZE&ZG_P1b3($*rZG>{;4JH|Z_4M2GkN zTm8I=G^_q=MZa;nb`-b#5IhRpGQghVL!bgejm;X*?9b3-sN=eN&=LAAUgDq1hG zv9U{Xje^*N>Fk?2V(y%*7rt$1#73Li>bpPdci4A-+6cpm1tFusM%Bs8!ooou`RWy7 zb@}YNp8 zxivmyRsHXH(oyGw;r`JP^*y_!@Ng_WdV2c#@k(>5&ZEt-3fb^mo_?LTXeBcf63lA# zzkU0Lg-^3(^CCmFpxSyMf%~2&i;NN{Y2EX&{(ZO7QPlcpih0Jao~3XZ+@cgzVow%z zyc#Kf!r3eNXHXV1Wd39}aG<9rGCG<{$bOu9cCw~MFsgmH_z6o?`@y7RE3D7?*;)Jd z??t0_Rc+mgEOE3_{f8Gp*U)Kv)^f#KGDEQN3knK;+kdj^OUI)T*Dcm+D0}~NJ(fzu zOC?{}vGuA*klLKAdk*=YWBMp{J)OEiD}o5I{1UDRp&mcyV^9S?o5L zBi-*S<$vxbEqzU9tt}MC*x1;@w9#YxyBVMN!sWlea99go@}}lygHQGowa;}@LMxW_ z!=)Fiz4pzJgDO1DZgK8+)iYlah~bKU-Z&(YB_LN4LW8?QJCJ3-2F?MBaC<8tlIQ$M@f-_wnrVim{p;k3$3 zM5_b5YCm09`5QYr$H8$eTUlSHprv&br0k5jv#P7~cx0hH%E-7k;x>zZt=)1Hj-%tQNx)gCxX;E& zab#cwEKY3U)!FJ>?pCJYkPv)Y$*m@w2st7Gf{$=@aK%;X_V!!TGX@8vRJ62^s0-r- zad89dKMRk1{8d$>4}0}K*{AjXjH416Zt`|r_3`ub3%L5{x$180v(B9A=v1lOaMA&Ng(rHA?Eyivm(n?f|Zq3LgJr8yLt4<&U6%k&+&vcwF&7=;TK_-s=G3WgdA`>^ALH!^$EUr8kMHSSWId35;zj!a?i8s zTIKxD`Zs5)j&X;&ldXx{^ff+58@@XELpFJu#o^(dhXYc9J5%*#Wo3_~{%LWNs^n{~ zI(DNf%!KvZ>1JO~eSLieg$~Fs{W^#3gHWa5!O2Mj?$m*d`@4{R;y!=YzJ_4&b)ZUP zM8p{sDfKK-_p^iL)~ie3uGjR4Tt4#-VKUj4->?BJ>Y1nTC8oP~`?DqP(2o?TWfVUt z{A4$32VXBPF1{mVuTeTmEFb^tW_(IYffl6-Wy5(^PTC&IBKy^p1U)R&2>zO5V$_Ibho)LFOf(jg@6?u@^>8+lP8aA zZGT$*J>G`vgV@T~EY7nPy?1YPe`;yTqTY}@wKtVh4;A;Yicd?mp>9GNBqc5BzJM4! zTvx%+KxYC^>je?L+|NIZapDWQr#&1MbV!e_ap|3v)m1DE3_e_e=fBDze~dmm z%@@86V35MdEG;2N=H5?e2c4s%qHiQSKTUqK^(8 zGM?_w@v(!a*s9b4)-4%nf-aLwvE8N+zJPy6V}vq`opG&>PxC^0^YyCjC-{+r$eXu! z_P;}a$=9SHC0)^#2_~SgdG3-Oqk! zE~(UbRKoCK+n;w=SE25_MdZ$Z3l~dy9XnW6#lJdwL)zwN0iN1VtXou0#+RcyCiF%<)Q#!Lds zPdC4|rwLfIMs7?u)|ubDDU(W7HDS+`ndwW&zA;uoE7P_AWUjK4Cb4&!kB?6|f7q%z zK9pL_!-BkH==0lq&q-z0rafofXZq`1SHGJ%N0D+-sTfLbK)GI044JTp3b{Va%wJG! zMsSN-REyw7XItHDz~nL>Q>)13`6f>)zmn`B4G+Z#VZjuOX|cSRK~ z=j?)lZcy?aTmKy^()ttA(6j{nJ6D$cm!E3v(jxZ!7bi3&-xG(}MfMN&)e~-_S*mw^ z*K)%@7}m=S|9)xuCKbY#<}c9=>B67uN9Pdu&!TS=5gBTG?YEOsXl7xZWjns;{r_-{0NVjC;?I$IatJq&y*yO-)T92dXFRPgvPt2_z(%q0soB|8X{! z^gggiQ&niV%xpg2C_NefEn$31{&8MD>iy1_1Ccx|92}c-Uqb*R+>K!hk7raAvK_V< z$d(Wv(3y&FHyc`ad-?Juz<~YzeeO62(O6ok8D||PqWCiRKOQqf(2NOW{+{eAB21(4 zse}hhbt<5tet*WBDeQcmUQ0_WDKm4b(p=F+LyrI%bLW0!yV z_7~d4c5I*$BXX_Z`v0oe5fB&mTk4AUGz^?+@+OuS*-3aS9eaPzK`GwW)|Q)Ae~#2i z>(hQZpSc2F4CSa@Ewst?(b6&B-DVb|hyX~6o;Wp6isIZMo3^$#sAL&wX%EkdL}Cq~ z@dHv55D;*6b~jQB<^unDSX)%oWvQ0B&TR3zotjM4 zDmR{9N>HtJzvB+K!J_w!S>@95ay`|Rt3vjNu}uz1Blct~YwJVt0bX1^2GS@F8jFT- ztN;=6#mFN%WVC|0sVO~$xEX(%cA0J^)Pu4zPUtI%-Ex>WrYmug1QWr%tp!on&@r0; zfWbccuB2IOK6wI{x!RYZsA)ah;xFNQJk=W5N{6txDYMp-%)!je47edLW|NbL2cI5* z;L}v}w{i*!m@=>a)QLq!N2Auf{U|HEgAUo{L{k;zySKkjA>s1}3j7o#EOdpF%`wNd z!CaDA3Jozjgmet-NpeNSE&#pBI>$#oui0g}Q#pvSQ$r;sB_W#JGg}w;J85S>y)dw` zSp!U%A!z$<)Q%!duh#CVvT~$K>5vF9wItviGGni=-@ci{9`hqcqC{6jhE-gAs-9x?hgl^ZlKm3dFd0`b)>3*B!1aBlF9T4*;%>`d2PnZ}o;z{~ zc}2wvz3RK7qUS9G7^67B1X|c{|P{JXLB>-_3MvNc3tL{T2${Q z8drFxjXa{nUV6?dDH#B5_VMG#;kT$=F0YqZjFpbs8GU^AQomf%eIx16NyFt!f&S~8 zHz5=*w(DkTPS>H7o76cNxhHFpdd zxO=Ost4rrCh2xc&4p#@>N(EjfbLbrJ&b9(pMgfWOkEReH&5Mr08m@EX^hLw~1kzNs zD-YSZxXKrLpb1VixIeFRn1-O1m6avqHdMN`t%XTZGN+-d`z9@|ShKk6#`lpCE#x4e z=&UD$&v>=?yQ`|IdU|@^y?dAW>QaX>Y+u*=nelZbU*R@>UX-e-29jW%?QOL?(+nVzos(1C zZEY}Q8zIh%;&;UIp`oFc*4AR`3XufLDk_sL{*Arqu*d)os)bbdDYqSM{YtaYKC3(T9jMO2n6VPMjT)9NlHhZ#2`rL85A#_!HbzVPXO#n? zZ9}?p9RBG+AQ0q2_PVB~^H{Wg8vBp4)K@=G=RE`fI$ElerLzXy3ZiFtco^uE({ABd zEI=>m5X|MJrPq3Ho~5g?SKP3H6*Fc^1kit~09FQGU0Rr4UH)T>^f{UKjDOO~D)Y+p z4NxufXcOP<+DPi;6E7Bk^MzXM_Z_CzE*@V2Pe@2iq%E^Q!1o7Yu^NgQlDqJTnIEw^ zleL<2_rVxzQ5oR7gYvo=N~kwL0iaO00NO&`ntT-L`ucjTJ1*uofE}Eqy?`;5?tIXx zFvfwX`1tHAtPqNsHB!H!x8R=usE-1rQ?3Ku%Ll;2QIN;3)@SIQyhW(PbVEE(@!;Sf zM7s-Bo$b#8s9fQpq0^q3G%`x>hvektnVFcx&)41qoq5WhjH;`<%_qP_+%GTA*qg=F zwafKKAzLtTNSRnz;wS{}iiMWw8uu! z%F1eIrui?>PR~twD4!5;?T{Hh$6IGmBO&CoB>jClW2wJ|e^SF`Y?E&y^`^-*@( zSKelQL<~G`?F_r5F(S%XptJWw#AF?^hkvm%4JQOv>R0rtbbbw4k~f+ z@Z61+3Z{y**gKIjCS+{=ixRcy*rfGsNOn{&N0N|irgBzO+ zD>rw4IZ2@u^L!lE>GVqdNj^*IR&%n~6qjB-^NUfwIoe-#l=_+so~+_0Ro14;o<&1ut?jJtzktk)<`FU=hz6BzZEeBTN)tFteRkeG3<)_$#1dXd z`|<)R!4dQn)i=DTL?ex2W@EDjj-Q{O4^$_Z*#0#g;u_kV5a2KXD*%_BIpdZ8BP_uf zI8(YLMIr>qoDaJ7F3ae+-RL>LK}q}k`Li&w{54CM`aG_)WEykG(}vyt0~n%u@3 zA^6N0)c8egmsEVKy zX@V&u`4>gBsAHpz5Flj!U+0x30Cl^F)=j(S3iFfBN#?E3G^mRd-6*-nLNcGnkfeo7 zjI8XBob=Vq_*;GLU}Fjgbr*tE0@nRg)z*({?Xk@b|Bm&n@Gac;U^#Vm$em;_tuo=vGQm9_vNc!K0 zYPtFNDj_QiAH79U7*DSMj$ro#+7?K^RGz3M-I)?W4xS%vn)vO@*Ei`8P@L!dce)Yc z^a7N|T$}eJ{ozY8!s~yLjn4 z>!;9_Plqqn-%2)H_NKJAw`XT(LtFF*ngRfPoA`-0>KZf#*2-Abbh7;-LWL)hlXk_d zj&{#(Z!`@bHH|RJtf}0#D?WV0 z*<{up+3b5#keBzlF{SapINMlV+fW_L6*M~gqR!uD375Pr`;a5r&i1vqgoI)o&A-dP zyMIql|NqaIRN=6`|DzqU>qbv1@JjGh0jYr_HgxpdOXVK+E!4}yljP(YD&4O1it+ME zQ#*~*Z$aj0Fq0JL|F#)AjCa$a{jIaIL$vu3KmnKT$2pKeVL-4pWZoW00wBSm3=6d# zI|KQphYf^vhk0E|VTI{GAI~J=wy1HG=j1#C74L1pd9h3`O5~vR-9nM#`bwJ9LUc4V zoy?&A)ET=y>VZy%2EV(s>l^ml(nB%T6^BwjFjp}*Giz|)RM_bU6xN?90-PN-l}wKx zIw&A$MDT;34q_+wP#K-WFQ&}aknGppE-d$WW>kbR*mySTIOIb=*24C7Gb|)r!cBU& zh3^q>125022P9pMO(9Z%fjOv3-+%?M$yCx|SzlG<{5}@*IW){}Q5v@7(Y-=3?e1tpMc$Sm^8RJv=;wrja4(-*9N(cAG_=)8Nw<3}ZfI z9qbh3UOYiZ`!Y>pFf>8rcf9f?C6A$u;4!uMV_zOS3)UlbmWH&3udy3Ro*NvyBV$Is zK!HKhK@F8$2PEp;H-7<9>Xe1_r@3c00-&0fgk<@{0RoD{16kMUR|Cm8)y&IJ#(t`N zj8P<{L1jW)h!4}7ojLh?BR99)2Laars4miAh3s1-r%q2#7quV%I3W4g{`)J!9WH!S zecEWeQ*c0|?&8H$OD-UYx^uf?Cz2y2UWkD2yRy(;Sgnn#Zwc_w4QmK42Lez9go(Nfk~ zcsL%VAgLD4v+10ZY4=_}UcaPJ>K0<9N6ySCwUo=OKgT}u7Vcv0Evl_`}ORci(nrul@hlqk%Z9l@xI$g(=eU3z zhk`8Oynxg%rxA9VgA}nE$X27N(k};rTf10GH0#4gGL%TVRdnCH+El~nrwnqK)iwJ$ z3Son=bS);nq*L*4QLou)8u#6smQ*E_NbS-in1hOY?d5=G3B@lQm*N1F2T<(>y|eJ{ z2&ul6%*tDA@;-!Jf1*ghZQLX*BqSs$*%E09r+Vcl(4Y~Gy#+f72jmP=A@mAEFH?L?UObe?bt zYN_BI$q9_;tJG;cVK}*3!*YKGl^b*upgt5~XA4n0u#-9(8X9qhj{u{pFcEdKxxyAh zA!v8kVkfCD*DMC9X4KA-+^Qp*0@m4Ga=L$d&8;DvHelS@6OdM{kR{{y!=^qf&<3YN z1w}DKzD_fLR*Q(~5!BSdW&Qc!EmwYj^5fJ$#Io-Tu&D~<)RJZJA6=+Ep;~`A%CL1{ z7)7X8^68}GqW?-KnC^2Ua#9?0H1m$=b(Fe|Fc)`T_zqo0jGw;>O0GpH)XSE=HXLBy z{8)rXKK}x_9&o3MDU{BrJq>{Y(4<)Ys0{TKR)H~)_pz+|RtU)-@)f7wO& z-2!IpTQ|Hn{gK?8j(F2eWt|lHZ}K01j>Oh^JZ~ph#$V)vmyaN&1p318FU=(`2_ z`;A}pKC@pE7juRxsXwVB8LYDGg<=zZV;)BGu{08tgoM&G=P=24b#W1Lm^%IayyB?~ zs~Vm^E=1t9YuD^*MFFgcic-=iB_yC`!>R5J3=F^kAkpFol-1Q8MoUO!lo;a4V>%!| z(CJ~U1f%rj`T0uwi7NJA=x9Yvf30R-5kQvL4Qg!s^_&7QsOxbN-g%`X_E%r*&m!H^ zoWlgrJX#O?#U>^u)Y1hbqM}^0)x1E$1j=R7=)raOZh=QnBZr}->&g!bNk1=$Ti}l{ zZUU8xw}XtwI65Mta&nt|5!AWUzkdr03XI`f{QNG{4es0qKQ!9>|c zz7Q@|Cq;~cr_K;L5UTn{B$kf-*P9q^brctyEcN59^+j18%lC!0JwFv?9mB7o5mPC> zeljmLtXw^FJvA$4&-8zRb`^N_Z*2PM6e=)5y#hc7+z)PnJI=T@!02uwP=IM)Mp!?} zn1V3&vapcE-W0A3F;54WbNT!EwOn2Pott}xB|HtAt)dmK3g2`(@P)Y+#5BNbw8Bv8+CKN#~fO`_C*bb zfV9NKH8w-`q_n&-S*Lje22&0w&Xhz`ppz{QZ2gd+bF30e=P(2G2&k7Lq zbZG!xxKr1soX3m%36m8PFFwN{SylsV@I-$&%b)J}fy5rnX9hAwe)7Gm1*8t!({OxeV2*1qlGVJ2WjgFhiTyP;q$E6L-i=y0BzM<#MQDVoy%B zfgUpg;<2&!axxrRq+fBH6#Dl_64h;GR$`^;d)i+e=IpFZi0}#9r;dwvf7foVz73SLALm65f`SI!2&@?q1n1ysA;E4h zdi)-yxy`?ew`pJ_P91kVigt@v+{jFnakDscV%eH4nwkU(J&s3@>kBk1Cb^XlKqG5{ zWJu*Uvinu0XJ|MDz=I=rfdES%f<3_Fku!oGpybZo$yc+{WU*v(_axrXOB2f|Rd{-;p^ZGMtgM7m0kFH| zbP#5LP$Bd;Nv1IY@auS7D_18=VLlWv_-S%%W?CYK_Ou4z>O{5m6HiKoW;o^VicSn% zXY@HCEElJ6>Z#fL$lLG7<+I3@q|r(i00)3-HoAEPWY--P`dl(-J= zAI;JP!5upq+}Zv~C-LvTF-{=xZ4lQr?&zj~yJx9r9_ITxWxDhIW>cRX(WXkfB4;93 z0~^Cq<$osXf7y3IvwI3dwKVNDkG)QOO2%vNcexQS>L&7WMa8m1iEreDsluESfW(r} zz@bs2hnkuivnLm6RJVnTiEZ73C;~yzLhSt?j7`zcDX#nf=)sz5tV|a9>@8| zcI_YQvfRyJ3I|NR9rR^FnZS$vaN`Qcu@BVVtfHd&6=RBWa(;k79mLCVf$zmc_D7Ot zE!0^rSPuPp!e!I-V32(6AmL~1$gp9v1LLpW(Xag~H2!~vbU>usny3beITnZrDAsxb zfY3qHb*8F=fe;`Fj@7iQmXmlxiODwW5u;$b|+hylr- zNv}V>bf2p6YtXM1JsprZF@zbAZc0o{42*5LIBhc2+lO9K| zQZ3_ftrcFX-Z!zqygsK+z6is74@ihu=5qa7eQ;LXQE|EUv+zNP*}~wvCw{xrQ+v-u z0|ar{5JA4%6V)tW$H>(X2ca896LRDcl(BFvhn+~{0IeR~_=+FXqiARZ8P-KB(JA0&{X-kUN=r+FHto|aNk=yaCP@%895^8hN6Z_mkYuqpWSu~2L!=B>kWjhBs^3+U!ME&hh#jsaa}_(25dE;EJBrH zlIa5Lhk}B`Y|FGm9gH-nMBP~L-`C04WFy8tA-yS$*q~?((k{f=W!|s;JqX#(>_ljU zw!=1HrUoOwEPsiu<@{%ke>41GU?cT+OMnOoqwVaL8DD)p!>O&Vz#C%6@SS|M4W^~0 zKf}O{9)bzdDbuYHjDw7fiy`=up;$hIz*3zi)1DOYYnapD=+Uy5W0ai zJDC$`50E#44Kx?bKt>I2dQb_eb(GZ9sQJv~fU;WmrRR@rcDKPS3ZGK2$p7y#SmiFD zu$JT#4emKzKi`Vp8}#7Tc-bFqPLFY|$OM>aB>-@kS4i%Ezs;VEO;1cs&4mb7FJ$BZ zlYuvQ7AxEAYzS$9hP9KKOjTJ%X%u?sIl_w z)urdg2wN2sE2|CYA~5+$huJpZba2O+o10JTn%=8HjFssf!bI)}=D#3zKm)OBSD25b z76V#}LJcrgE7z@D0KXGEJG%^`y`$q8Ml;X{pmTsD<)M;Nkq=+a*RFf-A7Y(GTR;7@rl1#wG%hRPS7+Lo^!R+drS zvOw2(J7NC`&e43(!_kieoT!(lb?ZZdq1HD~RcYy$3h(LbYV}{3nW1pQ0P)84+_ zou8lo3?q41$gXVwn*pFf0$oKZ7a*y7*BK=xC&y5WX*0yXZmeiN);E$2e69w3(5@Cc zcy33=t|v_8$8X$~fy?6*J5Jr#U#E|R$zv2EgTspNd=WG8cHho01rrR|{M?|kqcpJ+ z$G&H$1G88z^;05oJ4KYi8s*w|Via5qD#o4*Ql>WB38sU~s?(_1c=xl?eiGXg@J_!Q z+eDFcn9+hI?<%%YgFP9gvqFRFHi7|#XaM#YW&2^15)+^ia7im~ORJhjRQ<5t$43ruMpW#U4pr8bl3P34|%TgD6vTkl}ph*L$f>~V{J}oy< z1XyvQZz*egZI;&o?}S-$yIC5r84zwwj?5nXtEG#tySX;aDr`-OhGz&fVXB`uegUL{ z?g+X?ePy;CLMF`g7qns+??zW1zAS|yyqLoyKn`uln+orT_{uQp-L}U08mArj(=y3| zJ8V5kFXtK5$@};RS+F0V^FyD5hTZ>$51rnkdIH8^$6FJN;&BV56)owYG5*DtE24Z< z=`p)wlw!$&nXSYWOnf8FNBkc_-3z)+Mt4s(G(lmR%9o9Ae6Nm5Le_E#lzk?7>%i;; zQqJoe^PgT!0{frO3hl6eGk^rCApA{m+`T2f-aRB~N6 z40Zyokf&)+20AQwyZ{`7*0y=E26PExZF773)012x0_2xIE9!Yh!4cPT(;!^5d%-UV zzkEVCjm>}>59mROte$X>pRjb{S`Z31$DmVN~2MJ2Fu;j%E~9(pBLA}d*q z0PLO6uf31p^`r^v06)^7z^lKrp!H>u@#7wDlvXs9GLh}wyD{}ln!z!={t!*@I#7I+ zs|_SM)C%nA?_ldAb3T2Y@!1Q63B*Mne*GpvK<+dlX=2=)Gb-cXeo` z1j}uM=y!7|#2h6ZX7JhVm;d|hz#&W>diwiel6nHDhk%&)&t^sIjOQuqO8vs_Okti& zMKKO}@+Zp8mvlF3u~z!g2ghMiS(BAhxCSVjjd|jgU;qW|)kH%hM~fW~GbAqV5jeIE zi^q0enaKxKRFp?B%yZlvNw{Ynij=vq9$ zS3((JwSX!7Ay)}yKuFsL>p7q^@MlB5OrmzUcrSMD`OOjGH)8l-0z?9fCQa?ximw7}M9 zD9q#6XF&b>!NNbpHX=Hq$0i~(fz75kGoj12IGRXBzVQ6>mJY?ZI>p+j-Wg5ed(aS} z39%*jfOvz_!su%(dr7lb0c^w&r1P4}fa>tcVOrF(huD(@P?sHPnpi9ON`FWg&SB=W zk<`_+VpZOxgO1^Y8rMh(`??|QqrNM_XiZrTt+GNM>ZxGuBAepigdVnt=oEIFwChi; zM5QJ8AQQka?m~4ROc4=s!31XH9S(Ko(QJ`z0YDywMc?3Ei#3}wmu<+&9+{7>CSf>2ks;`idt*g$hw}x z<6{~TmnA@Ufj|qYETCSXEVADtZljDiFaUy(gr`|PSIWSeKY6wzUewE;>=Z^6X-ZK` zR(|ZJF6^w~dbbLP$#5B*b*oEDz%hcSjt&l#)zqdTQo!)^_s=?1bV~Aj*pze-FH<+o zjZGNV-2H3`9a#yLqr949c{oUWBE9`1o^g_JTCHgFz zNT$>;aXR5BKCu^?1mp-7O^6c{VV)DPp^LZ{G?uskIkEe$3TLn*`ZP)PwIh`jJB+Ah z$?EsIdvI8`+X}dnwpNNqgzSHy8O}X=$;U5JDapgOjaQwkg2_oMs)zX456H{ z)3sA1u)G1!`>}x<@_mLozpglzuc2+<+xpn<-hrija5vrS8I|gnDQpyK-i6HXkBKnC z9^Mp#R@bQ5j4;F#8M6F zt_|0j_LkXJgjcGZ%-KSq9Y$)Wt}Nm8V4v2&45qbl%5Y<5i~PGMdp z>a1$vAeWWt5SO_|tG`GB_3?d+5#ur)wDsn^Wo7M6m4_CwYU@Wn$;!(eW7jE#VEbNB zxvqA8nL1OrZk~;+#KyItf_*y7(31C>hi$1h(#uHlolVELjK@B_lsx+RmLu5-ABAd^ zdu^J?lT))emZ93!LbbyqS`qoG(UVGoV=3=v%cqm%dpKxBsFV4qlGApya^@P- z2D#dq5&7%u8#r)UiDx^0%`4*kp;V=dkuF_E>Zyt<%W$FP#}?8f0+*7-@CMiL@vZd^ zro|_p+o*EOy7+D-?jNv(Jdq2aA5NjB;0Yv0PP`U;{%Z@=?urz(v_7j!-L0AGpkKG( zZk`In?Ukc$X`2u?-{O_B_x%Fn&Q#((%l4)Z4|tP?mGKtmWmkszC>kDMUaiZAVkC++gY+b7)g8a*PB4ahf+s7?7l;?!#kS%kuu%q8XjJ z?>8SMkHThekvPHhUSKP_M)TZX9C0YQS6_C7QVm%-R#dQABa#ezaxPwFh1Ut^S)QFH zO|pz%Lo2z}^5qLUw6GKHw4zesR4R3R%m>>ct@ronAcGsMN1Pk{^w~p-&~uD~S{3%_ zX4GvAU01!0T8@S(Ton>Va~4&wF(4g>e=6fa%8cmiFSZv+$iFll*3Rm|pDwQ9McfWL zhhhGObz}8jTq@=Hly~&@*_eH5zb?<)N@K|o;YQ&muAh0K?_?_uZif|%(`}&HkkcW) z2L61#+K&?~ah1O9lFqzXb7Y1sFR-wuWZN5C@-i;+LWV#dVwNrQoFWLb&?k5ghQ_pk z7tR3zmmp$+htGIl^@et-`*Ke*phd6`fnBemu#iQDm5b{IbSp3?!UKShB#yKIqnGBB zYq&y}kbjJ4v(xltvWmw*K;3|+aN8LKb4@J|F4BN-DF?V7*8eH#QDqzfb*V&M>OsDiN|M|!LRufN}HMO_=mw^d~ zRkdEO#*{fmRQp0x8_3dD{h4Qg-h6z0XJ3%PPbiXfyIuBfvP<4 zdq+n{0VEfgZYl3!qnJJ~p|DvJcf1nKh2%`cZyz8KuLVEHTH+=@xyvh#%IM8%BMS<-(z`Qr$7KadoQA{;F zu9T1jygXgK@D4nBr56akcNhYf`$*(87;=7lg6|@Q?=m^%-J)vU>M`RZGM(=HJ3G*b zn4J4&oECm2-K{93$|h7moKU_GDR@TBKKFQdc5}mN5|n#e?;ZI_eB+Ik)q`;xtQ`&> z-qFd4XW19$OmiHD_%(;{Ycv6;^CTSFV2+l?>>c^TCxa8`v$bF)H&`q6{540<(C)u( zA3nX0Z`A1Fk9E|w5DS@%V+$LG|Dez-Y*+1C|x`7Zs<$>8qsMy*Htdt zP8E+b^MA4z#+SkSNIo94ApPso2*WJSFT2?vM(Ym^AFFrA)*4>A$L&|=m%o2*TLJDl z0%$PYc4rNT%l^4ofsc=4_tOuAGWRghPzJ@XTuTjyj|407H~S5SV^gOucWn*qiTM&#>fWH)XxBGY5{%q>T~Z-7ZD zM^Ev&(~kI`(Cc#wu~Z9O*GYLv2^^ORWrCaiM63x3I^KBd_uoX&_cJ+BDgLZ^){dY@ z@>z4Y{+TQvtlCq0@X8`Nh)||U?F*HHdaismG1ogn8DbSaGfH~7A!5c{LYY?zWi{dh zfEjyiTqyArloOvj@)4yc&>;!9%xqH_42otvZw7y?EFG#p93;aI!kM}IjOI}L+Tr-w zr{`v--wmiDLJ1#IM|YjaZqp%!Z^scZ7|1SfvqU>|Br}GSv#AXczw4g z8YvyUwBnMYfT-lBbS3WPXP_meN3upHCtqo|kmze-4k6;p-mxJG7z@}^WDr9P=}7Hn zH0kjo>Vb-4*m%_Dj2eE$Tf@OPB8*@jwr7aR^PTC)Tg@oh{XbrSYfR(F1D2V9jG9Cx zw;{81D^>-WC%EmeY&TY3q<(+L`h7~3Q(K)8nU~EVM;}>0&K8rI5SA=}ps%_o#~+dV z`iY?gukaYF&8fp1r!C0>q@3LK8r`$Elq&z9}AXY4*wa5ar~OyUZ?53SbsF70*@a<$@c zG>|xzb*$I9M~ZHu8Iu&MXnwV_T9ZqA^Fx2Ds><*Egze|!K?Sf$J4NE9N*Cid=C!qN zBFy`LnA7YFWeZk2gi$!wId>VOg0RPz_@PZai4qnyhk4i6!h<=&$AT(}zeq&3zW3Nh znv(OWRSR-)4}3gGPRr197pR>2CpqdaG-Jh_+dgTZ+4qcRfc^cn#R{iTzg0~JXH$F6 zL>CcGnhcxBgi8K;C2rI2{Msc1z0A?NZAwXIp~!e8@=E1*f2_pe z9X9O*X6#DC4rS)(@h2;K%{*AaALG+qlkhb)Drze}NqK$PQ$VHTSG>5J1F|MIvTbSJ zE{2hU$zAlUg;Q-!F@hg?9p@kBP1~p4>^BX=_@3sS@YG^T&RhGTO=PYvTZ~O)G9Pga z`AyksVvfRg$`mU$g*IJ#tK`B2ssaZ8puBDQ2GZ$&w??JJSJXVJyB(?ygaoYV%lSC0 zxdsG2hz~>yB_`LUi$o>Y=?j;B4RF1e5udNerXthJ)~s8pnfJPCnQ!Hrmx^wsj8?WW zf4pjc4^x?3hw?H5h3sf9jSix%!)BR+zLJ2JMo(f|aF9&D@b%pM4{?eN<7`t8h!zzo z`gMG!@Csp^IHj;;tPDD*H@Fy__$+L)>B=AKpVFwnWi#s&$}?&QR9mhjJ&jJvUBi;H z%OCs6KAOd&{act~`|e~pZneHh##aYHSJFF<0>9PnHm22liS~Hn9X1iY6gK3*?`n0; zJXDFBteYuXx1)#HtUf9J6-A|TncMd_6>S*{Y_+QS#2nwtbUTzu80wLPDtg5-c{z7} z9SzNil@3jKqPM>NL3^j2y=0O0O50@Zc_{_{Tf1?F&l7=NY2Gsod)hDS_LjJ>uNHCEcF$1}`buPe#xYMU(t<)4&g7U+EvFVc;S=OtboX;k9h zTG4o*(af2Nig%Cq$<_L223g^E8@ny;8whiVcv;PtYxkGbQ7DbX<5nLCB~EjqZUc!- zOmgq%jgVnVjmTrK)clarE$_m{7+*z{uS9RkOr%go%#l~7C8x6yM~m4T6vp->?ycyT z7dr!C#UFjVyIVTO5`U>mRqm*ldRRnOTm%M zOH{A4vU8G?3yl+*W4;JYuYUSLMS)~8IapgbEb5f(rE+WMZ{m+KS-Hp`ANySx*9 zvFh^G(+lu`vyBy`YL9u`(%binkBw)ZvOa#bw4BSVjf#wSxEkjX6#ISTw-ZS-kPs~K z%DDbb?&{`e*y!ZB*0=T_&y4)|_4GX91+jlFxt_{a3PVQypcPxgnMtl1!Hr7I?>*!S zGGa1O$^2!}DK>KV4IJ~rO+$ zwX2>zz~^I3iL!{DQSxl%$T^oL5F%Ak#~F+9x?;q?lW3Aq@)-Zj&u;4rsUVAIqf#!%yPx)0AEw^itb zf)%ry+=VFrs?79HsEgI5_Ti4z%7pT^Sr0s3uS44@J!Y?(dTmN5Aw-fBn!C zekqG^zV5j4!yX!#tB7!y##Xd~7=o1rYo1Dv329=ABDP-plhOsW$EU|^;&1|FYOpc@E$Cob6 z=Xx#0dv^2pv^x`0@E9n*rCLXSwPLdF@f1wX{MHd~*Yri~w<>1iBAfOdN5wjpnCRpx z!6=7EWQyXiC z^anl@)hK@w$!s(HdN~Dsp|#^0jaR+a+TKHEH#rpQ!ebam$ljedI>-_#GQVz=vp>d8;eEp#cLUawy=JnJS29{m_CX07t zP>Sc}|5w*phE=si{eFYQMnE>*4bmYc-HmjElyoB<(v6bRNJ~kBfV2XFbazU3O4ps7 z_q`wPhkKs$93DNenQP9q#u{^u@&ElDk&!ZQ#HZQGmbf+Mg^bBk3^~%z%9Vn74n8F& z$NxOTEA3ow8zob7tk!X>#Hd=bpg`IB;vTw5)jGJ%at4BhSFd2Jc~*H#`X%ZCVa~rZ z^{a`JabY-!4KjjB@cEjQiO)-6!p@o`2lV92P`s_iQZ^0$$uHT(Oo9fCXG!$%U%^Kg zrUiv8FBVSCdiex4()Yw^TQM@$Y&&ebI1)I29QfVX{Q;BZxyFr8?{`CvVbLUik;*KZ zyB0G&=Q}jeyntXL;noCg{}A+ei#gToT$|N*D~@rbh2i~X)|MvEw$8+isSf{T%0}h{ z3dzMzvGwCSPVlUoleP}3Sh2G5>^TrPlUi$=P2r}0d|Y1g4ewFS^nK=6=-@b~*+AQ0 z&)lf}7t3R98d2R2x{e6{Di8r4K^7_`iX{%srGe>n$XEp$&wczg(~Uk~v-YxgG5I6& ztk!%|{bd^G+5M~1$!|a8PrnIXCNWL#@6XQWOD$y&rK#JcC+f`Ya3*dVM0F>WD5$R` zvW1Lz{iN6r-IZ_}Bs#rh7=ZSB)%hgyo!;eO?qIb4B49Q!XyuiQ`-1Fyk?G*5$!=au zdGrms|%m>9R2Ab6lKK45^|5Tq1@^#vW*18mdj~{olKB0VoQcg6oLwz!Q z8n956gFd_BH!*+n!R!#-&4UJ#$cfwfp1JSl9XlZ-dc(oWxD&af}M@K z_Ov`&eAprx80rkej5J|5>)@z)DI5g_!nC%Xp6@$}g01y|FD3q8PG*eze^ykWi9_px z#AbZ#N>codnS_?xEK}XeO{UyLJ^m()B+xha-(MIwXg3b48E?N9 znB>q<9!;d_za>G|*0!7Rbl!`Y=qNqiQa*Sc+4{ZH1tw1aBDgArcPGXFhtl0NntR;I zr*g5??*fl!2_)!6i?{F@OYM7%Ya*mIN3H^Wg6SH<_Cb7)GP8YSnHR>LbA8c9x&CLnME=vv_&paJDZKscU&SLSIJ`M< z$f0m6v;KGgY?46`A}Q;7)|=TMTfx6umKx8w;W17|mcUWW#25e*$A=59>QUz*BC#nv zBp&r^!K@rV?`29DPVzo7`tk3JQY8dMA^NrzT=nBloc(tOFuLxRnX!S5AyY88);1fL zTyFAaQB3hgY$TJ|%R91?1!{aYn@16It4oW^tx8+A4BM82NF_rK4WvuecHfkFuMta< zoWWGP>X<(T1kWKPzrwgt400aZb``%Igu{00`L{Aa^Ti1`b@pg{t8M6I4qDsi;S*?l zPkZS=HHH}INY1B?l*hk2*MKIDva&bjT5P&CU)62Krax%2w0o<}x#Q+rpBW{18XPJc zVG{4ziTH=@O{W>e_c2G@kg_WE6e_ZB86@Qn1VtolPld3K^b{_LaJcEvwXumq-9nY1 z9ikNM;EW0MSYrT+5I6uO|8V>Z2!!>M8uu!i<>loaK&vHa0XA9dj&^K0L4veg%-Q(RL z>S0)@Uw|C?c;y#p^Z7F{P*z?}3WOBk$hNko20?6v_YgFpJ@gwrsOjl>CpSEuEP+E? zLF=)vFSP1BC_*HuFgvX2*zk|Y#g+|6R;MUV8W*zL82N3~9K7T9UjJBgoVLM}oI|MP zK!t}81w}7w`n9jl5T)ngqos3-IJ!q|j+KwN@BHO?(;JliK`lg_GHZ*l@N#+jD5Smv zoMCPbXIGK{_QIp3$gM6fiB@UGv;g3voB?10Aez8Y0ICUSj05%6w!dM-yobzZizBnL z91GtA2YU1L3XGp_9sEk$kDG-HH#$AUL6+%=xvqf~6*}$he~&*ssFS7LDeS{bLC@c2 z!b05K+(6M*BL+~B;BpEJEmnSR(u&*I+K!Kntzv+`w6(Qm+h!5Z9RWpu;`TqFvmJ;{ z7Ju1Xz72_^Q!uwo{lKgFJREW3E6MRy+Xr=L4Ppo1FN0(xn3AxrkIjA))cluMBm%8m z61vj6s7HNTy1WqB`u4A;s5;FIo-fQ~Rzt0VmN%W)ekB%~5ZH@wfxV)=p^m@o59i4^ zSG{ynyd=pqB}ENTudJeBZj&+QZo#Qx&y?Jmpt6<>ww=OYpzy zB+v4@v5r}LYM%ir;S)g&2snoabQK^Mc&b92Pw7AP?1NSlaI~tasd>H;GZ^C}y)sv~ zo+LLhgF^`EK=$~N+?q$`t~*z0Sl~GsYN&{{k**lcO-ww&1LCaL%IX^}?AvIOzkJsQ zzr&JnZEp!{HqpP_Z?E4vebx^dk)p2Iw|A+1$sDhu?)dJDGVFJ<1TU@b1yp=Bt%Rqv zNqwymj25(ZJvCc^t^q!?KXFgo1b_n699njpHUL#}zvExXEO`CkVH5_EKcHKA!bKzaeA>L|EJ=TC0_Y2_hIm*YNqz}$SvLWJO^Upvag zBo^FVnM~%y<9}cw1R+GTbQk@eoBC*I{OfP=2}0-cN=8H7>LL8|!QRY=?iPHG^Bv1} zEb;BUP$iIgqx9H5R+>t{Em@B331L(DyB zHMgD?XgZsmnTDfc(I(~plfJgZCAbQ^P#F>cDl${$wsv-3gna5b_;DaWC}&xxx7L~i zj-NqmF2SpJUEo+eiBmZH2sD|Td<4RnySqF0%Ghx_$jVardaCw=1sO5lkHyw~&B5dT zf$6(Xt871VH-%R#@0w!X?X)nBM?Z6eP{^(h&5cds61z_& z`=I9Bh4={5QIF)_tD<*tLbB@_3<5UUPx#amT!n}|(T z+1h9hIK-qZbt6)P#x`e{EI!L$KlwPF3ytx&b&X;5w6cSPP_T@kEhe=qfx23~pRT*9 z1xug(Bups)Rv<(pt0qXdAYS%rEm{()_$+{4?&MEoL)xm4U8Ew(v;G<`M0q7HMcNuJ z-=k71z4?Ec6_$iCQNGg|UP{*w77TME6G5uut}TD_EOjdw2v)TfKEb%h7y2Tn;ULU! znR|7bq?TMQd1GO2)Q@%j7!Gv5Lw&^A+VjZxe_iZp=R29q%SbMClyJv%-!DD5sD&>j zQ>j7-=^tvl0m&l_RoQY>y53;wEaGXdHjc8w-l`D5DJvmw{!BMTA5kKsf#! z7S<~WI!el2L{@)3k&Kj8i~Al^OhJiBB~~WGdV$q0n;3>EGk#OJ84-qS{BI(d zvR+5Bq)#K zIs+)I6NFgX+{}*&^kiaIXb7S-u;w%Jj|kc=zd@^^h^!RwO4AJd zJ8=5dLP`6l+E~GEHI-M%+F)B(OD@r~M{n;qq5w1B#L6m7`lQ9g+6G#xt<;SzB{e=* zDDpTcSmYYpvmA=VNXF@%{I|(jdrPZ|C8h}n$)o}A z)=TFDyseVD7kjd6Kf+Imi^Cc){Ei1tj-+RvA7vcZo)G3YSIjMj>%*udpTkC(A~5An zQN#n{*eS`xG!8d@AW({brNC?rB1n!fqVZEj!(PReG{*^GVl1}O>*shGvj+JDqBYkf z35o^lR2p7;Nxr!buEeu|s6rNdHM*s23p3M`d{f=5Ldsmq->TKd`Sk3j*vPr8VPZ4B zy`z7k{O{U*Z0olk*5S_Ms_{?dt!DO0?wSlUa*5oIewsS%$2N;Ry-GZO4m9B3aT`o? zZgzx2-%BBGT*QuPM2WacA(~eOE2zBqnv7veD-`r+{Cik#QUxcsgubU?C%cUvU(O_& z>0e&do)W_H&@C>s{KB-E>%wp?TypY`fCH=$Z(6ht2S&B`D_zZONZg^pWd7>q%>iLK zw-JwB({5Y}7^bDS5KGEWJ>Xq-Z@YZEXTA-#pc|PH%9;J9)CUEb8g+%?WTN^m&Dsn!p~i zwv%Gt-HdYA;5^s%wuz3@|71(Q&}!bU+V$nbcy3?i9u?Uu^Oy^t$wIpRHc|^>=iw`d z29h@#2{AYK$Qc|*0n_@J&=(Wd;l$CMpThK!*3meLo}(ib%AmJPgOrINWKrQ?m)MpJ zoAfyH5c%n@z6*<2*xB>o2nF7{slWW|Yej!bxl14P8(kch1{^r>G~&Ke+rMK|C?~Vv z`qYr>KO=I>U`p9dCQcv84sAv)pd?tRQ)<1+dMu&$HYO0FK3kZDk>e1N65ZK#Y(%-*{o*vroZq^o*{PNVD0Pk{NeNm`dQTt1(}2@WMtL^=8ihrCovRjxL$KQM&`ZN zeb9u$bLI7yFHf8h$J;r2adG+jBqFw)r$6?2W!e`5R|X$3L^*!k=cLD1@^4u`35BR& zgo>q_orL9F&oy)B7v5E9@RP>zynR3Q$jb5K;`XwPe>BJbs%)h;{yMas>4RrLiNCfs zHZ1#qKvm_=!ky8Hd3O5vlp&wXu`c~Z)_lf5w( zQ%d+Sz(TLxc=QvWjtFt9yXA=P6Z!S-ie>bN6trzeVxJ9R|8)6+uBDBglFJQ@jv0yV z53&gq(!r6IUB%kT*zVT2)RK0s&2{t$W^z_;Y7tjhMu1;X13Rg|j0v2glWRW0N|gOR zDMz{cOc%@!mv8d@9gUpbE$fy<#1FPr&39ZBa#WCkmWRiQn@=Bqbfg)>UWUJsTuy&P zsQ0sNyTDH$Rfk)t(tq%Jb$#PM!)$-s?q1hJirg-U=@g2AyHCvNwT&8_{k24C&l$)3 z+Kk=vVusj)Yb6B1wKqbwed2q$*dJ|01%5~zEtTYbpIoG51VI!oUJzEKc^K@Z>$z;+ zOrFz^oez6)Ds7i*81vm~rECtWP!OP*v0v@ldi@Gu=60MA9Vh9x#YU}aiI=~+?5|=h z9Q5@Kn;f&p^IO$G`gShzAxfQ4A2;7BdF{MhY3x&!dYWG5#5U)A8*2X^LsX=6K68JT zxsOM>g|4ATq2+LpdK$QN=E{6)Jcx$gyj07bo%Z2Ef9@HjhBmK=cf8`Dy#F@3o+CU& z8=>MZMo_y8r=b3G&_)t2vKGnom$AthUZJ!4{tEVc3am(1cEm{Ll*YvjlIVG&Oyrg& z>_FoRErA6y0t+tIz5&l-3jYYP;kDLQGFhr1dal>PG_NFO&w@Tg2Esw!P0;lZ`fN*f zW^JPOCd3_YEHMh|)v-uY($Kaka8dNX>rfxnas=NKO(2IA0c+;K?3Dau5WaElI$n>2 zW~tE@!;l{7-#-2+%EVONPFG2RvzpeEHwiPqhSAQ7aiE7Yppb8BY;`YV4Ly!P0d3fc z<>gWR&8h{T;yX_joX-0FR1+^2#mg(&K&Mgr30gC%oA$?!hQFxe9r~{7c#L$BXs>2Dn!t?C`EsvAwq zpB#{_V;LH{IjHJ@|I~_#@A&$Uvr9j97H!^&_8h_qn}=?cR3dZ#iPlGgbEa9?FqRya zY*{LeG*20XlM{)uV+%+{(~pGA)D}0 zE+|HaT!XX^2g~2=b4M*lB9A)u=ov2-JF097t!cxr=2bOZsKW~R!p7@ZzCfq{i5WZ( zJ|4rgVaj}bKsEhd__)d4Z5EyFZ-tCW>seoXeM?V)pq*4H()Rb?k6n~!XUK0p*nRDG zjsMsYQPa|MpsVl5=3uojw>_XzS~ypxaOS++ZWp9DiJvmBH5M8nMimwkVT1#pK!BI$ zr+SS2idp*Ey9wp4IC_N8;@Aiis*sR~4Dcl^-{iHF>N_raIe5qj+ORW`)hxw|hV(|( z^Zp(B^xzOfwkVrJnbpZ^Jf!{|hIBYc9jlPmCktIAWR`I%GRl*B=-@G0sl6r|r7I>o zlnrs!`k(!kphnFv*Fn;`lUnTTo8#1f{z<1XwmM<(5Z;5*>(k=~OWuYkwyBB`$ zo?gY`IvTB3Q6}CG%D04P-z}T$^x@*~8mg;*y?<>!#7?(<B*-)L|Zd3EM7Y z>IuUKyerj5$Po28sLSs0_l2}CeXx(O3Z-h9XY0UQl!qWBQ@L39@b=-OtXGLcA1-10 zJ$|{hPy`_a0j0u7U1+>zGry(|+tu`_to?KO=IyYeVzyQ8b}b9~8g7?8I(n(?UcFDt ztbwZY%`~mBHq}awmE0&Qo z=g$TJ;g%gNHj_q$i~M0P8KsU6jWb!NqCIwXuAQ=B`2F$d>V}5rvgFiFxt--^nfPb+w@uG`cFpS? z)*qpJese8+>oP~La8ta0$24e7jn(s-DnvyMEJDnmc#h#d44P~Okpr8(F*^GC%z0;c z%qeaAhv8f{;*LClrWMk?@(N9dN&3mS@Eez~sYgni&cCZ+-QZsq^k|v;hki9RR%M0z zy;N0fb>{EPGE|u5_485@G$?-Bp{0J6W1B8h$!c7--|+~_CB~c!@o^SSWqTbt@d+qH z63~LHOwIT{I5m4r%5Y$!)mBW#WTV@sTZvI>s5Mwx+Rt`>)|V+n&*VIgVVgSl`87)+ z^?URZLrp(JKbkwYYHXj$!OAW|l1-udO`YyCk%)WQtR!8Tv+L*?R3H;};!X6T`M@WU zC%fmpsO`Qb7=l{ZdUT7|-2%Sm+=SUQCS}MFT<|efuOwQM-;u#BN;x;t)^af!4<=TP ze8NWl(3*g4zxzZonVTkB1Fm5*-v!vzc*k11-!~t^)%wiDng)wA!NV2IHWV(AQH^nB zoFnt>4I^zv*QPfAMe>!`E&>xU|Fje|MDB#;i1Hb1mfINMMsQ+B6)*1jMbTnhLyzE5 zcH(#W%2bZi=v@8$Snj0gt;sOi?SnyG^P_(2XUi*)%BSEkH!-_>NuK->dtLgl!K-$J zSy*VPZ2F+?$hI{4V|0{4b`0Eqr~keiZDJ5vaJiQre~wu3(5A26`nJ6Gp2Wmem2Wk-nRdmn$KEsc@7Eqvu-SLJvA-> zV#Ak<$xlke!UfCbB4qD*(8}fm#EdLiQ_R(|^M~OZ@!y2nw7KipUz%SW8>EgO}MZ{>fFgKQQRI z=?d=5-?Z~to7QkG<~^z;M-F=p*e@Ts?_cu1@Fuwpd^b?i`KE-L18IPfzNyEUP;&H& zI0&}8acp}?5;45-_|X5;zg-g}+p6WV(fQaOfSvm54^-t+<6c>49Zn$AE!oS?9IlHN z*j^v1XON=PcE@#hydk#8Fc~Nlc}JQv-pZLN*LbC=$REfYX}E>yaW98$-wib)L+~0c6MSGacg(rW-jp}7q;A_A^QL!dL&U)~&UJHaj0MdNaWc}J?IiD~ zyiN@yk|neEJnvo_ts-0m<@uE9O0~>6AC^*ts8oX8*o)ly(|xAD`4v)Sq? z1)QLhv=1kLS@cYKl#FG-_U0#r9UkTABJ#BT{O$RnlYM>|Dd+pQs_iHqmeI=gbLfpc zB&-$XhWd_1>W&!DckyN<27~R)m)U~+)b{adlRTA9?lUaQRarQx#iZJ`g%t}&fA@PW z<;V$_8*#u)HM2u=yN$F#6zHi|q>V-%r=H-QuR^wX{2LW{Th@2Aol9g2wAhdTvo*=xCwTe! z*GLymKX}S4e6_j_$9{C=A652(0tm1md9(LIIUC}E_jxh;G>mBmGkIv|1#Z4y!&=UK z8{K>PU}19|n~NVH_<~1Ypr#TA9z0>3kOXE@H_gO}gIUeyTc@>AaoYz1{*A!T13x4B z*j?m|yD;h-1@&K$XyMgsOA}yfZ+4?2jMs@gfS^NOWcDdOa3{+)8iaf0iQC{2Lg#}rmU+F_+OH0=nx<(9 zzO&I1_*>b|As{{Al6;}uvSVIuZ{K?V=UW|idG#@|EsrXPXAI(QaU*t`}Fh+5Y}zC%9+Hqz2u-j@r;}RoRvwN_^=?&i z>Y*mCcd`({#J92bLdA3r1>GO#dQi)+rP55v(_M!t6bw}o>!&6KZ|^tX$w)MI3!3~3 zvYx$vu&aG}xFyq(;M+dm)=yd%`!)J?Y@0{UocrP5EMnV@jCMK&B$aHVp<_!Zn<^Ax zA<569prLxfetE0rh*q4v&0%&#u6-;6c_k_nN}0%DH+;lLVn?KV5R7`B#1oH6^ z#jXIu_KVUMkPric&*~Nv6BE!LrY0qE1K*Fr&pbNhW<$5my*^mK?d`4ZstT(;FAFoH zhv5A?anR3hBR^Z96Bf!ZFvuY}nvb;gFIM%K{%-}2E6X?5GnA&&=tACqQF$qCh;^Hs zi5$K8?yxW5He^{H*Gc1EubW*6^?Adv$A;goh8$Ol!Kc zlO$)oe%dbYTxcQM=f4SUxR|~a=t*=jR}Krvj1|25rl}1J8R*aj1bj819o&9v}oE0fjq&0$!<8ou>(_0J?Rc?cC&j0RaPV zdSaguMFXN)a$@3J0Kl_1zCs%pcx6;@bK9?SI*4n$|{^3(RY~iG3A7?xo z_dI^M>h69=+3e828E0|alaqvA+rnpk^V~!f)LiPOMk^glb4F*7_{V^#Kg%RsZPO{LJVnYm4 zE=Li|0}onfMBdD|Pc(9_<5xK5lAq|KLgKpm>TWxDb(2AJ`}mFqS(tdP%Rqst7cBD9 zv#NSvOYm(a*@{N&`;+t=>E&K^Vr9fux}16h`^qL3#J|46(mSLC)#jogK)87AYllWiZ+dAPjLI0oZf#Du;nnfE)sHqC|kzKP4{u18>`0?Es#9+s! z%FplCKZ_@uXKe}C3i^9Yp5nCc@>_Fazyn#$Y%5K$&jslL=W- zmiKdrR;8g35Pt$NE`ZsCA`VbJ0?U^n095oq zu(1i$N9};_0yvez5EY#BtF_q2Rs42d=k-58AP&gfN!o_9Cw-8BMx#fDhP(Q;>76k} z8u0#f!_GX$6>;0001^&3QJ`FslZza*cm`=Td>j%u`dU=P)C0uPsX)%kOJWFk8MxYS zb6bu<8U7x?w(k5Mfin2D{ajbgqInu|HsrNEh5r(@{g0sEI3PV;icT=+n|0%Dc*3*Y zo%U~e#a0EcX2`biCGGrqiDeKD3u;!T2_4?zBtYw}kTMGSpW1ng%#3=x75jeKx+{-T z%Ge?O2UeNcNsbL4EuIsvy2IBRJzjWxqoHJJzJ}a11Go4+`&P=1 zDi9`KjK3W!R$PrvzmLo2WUB=`t*s0dI0f^0M$nX9h{C2vnTL!2ag`S8W~ma=hnZ^dr;m@I zDQ$vWwqguFSS4{KZE<~(`n0Du)tZpB^-*Tjj+wz^n=tB+yjqk=>>Gqv>+zAIA#ph9 z@-*_|hGL&&-=#|4VPVR4u|=B5eM+d(rV7GA$D*Xi7DG`Dn;vON&eMsSc43Uh%Esxe zWfU4Lq1XD&c+$IPr6f_?a>|%K&fb#zj#n#M0QgF&*!r?UOYcq}E9gpyzPY~2$ZO&{ zl6%k3R8*VQypqO1DW6BlK!g+OG;_305EsH8d1WpKqxO@Fkfn6&a??vjz(SWF&5=ZS zwVx!XfhA6Z6&E2&)uywjUXV{va9)z9i~G9@4HQ5(C?V{OLQ=0pRX@!E67=9`OT9T< z=GZbZs&>=P^DpTZUJzqXA*r=;b#N3v(hzj~QGVx;Wj%g$fK6sZ$d-n_lm`w1h`AIY zRrmtAhBh|7ebrNzl33L8rloKD)QY#aFn$xrgi zsw|tnV`WjBdgY^Us&}V2Ypj*#H*c!F>l=l`yp8BkC^w8`mc^({R9DTy;2;|qCuY<1 z(&l};3+=VH^04mL@mKD`^NKw2vd;_TibBQaWU(^c-gBeP5%oG$;GyIt=j^3ha(4axG)himuQ(u2|TSdH;gsr~BlEc9;P|8SHNOxVhX8Si8YP6j|BYlT|!mm|;+|*o|;#w#| z;S^c-^(XC=H^zb_yn(mDY3o(yZ@owys>$k}I-M|i3 zbr56rK#Wn7j<&dn=;CyypyCtkI!q!lR8wWM6OrF1npCo28k9!rH|p)-z>bViI=q~>c$SftjN>vXTw*n$r$R8P_}6{Jx7J{T}4 z1AbA`jU=o-*8FJ5EQwLJmvi9Ky`zLlLM)x&278F;oZbiw z_VE0T0&#V|gF;Yb%SJfCrpR*zr`m_M85!Z#)Q16ilbeFBu~$+UXhR!dSPsa+RT6Xd z!hL~N2@wg45{$d~^D*-7vFj)tJkK&M^2Qb4G^HuMezo&*&Z5f|ye`&3+vSHurNPY- z%1XB_M(bm_bFz{p#odLDEcthrf0RVbrx+T+Wv6msXPQ{3MgP2b&~(5QFV6T$(W0Rf z*2~UGXc4fm`B^gI%fnT9&vrHcnCo7Aue?c(dOGr^nGQYoivclc?ohZ`ss@^Gp?>Vw zIXRmz2Sa-~94s`N$~Q%;WluB1i`H=%F;+4|nYJG{(Dk`GkZ)4y#5Mn|7NL%h=r@;V z60|S$&1=+BAGF?pZ|4WPa3Sdr#xEqGl2m%-czQ$LII$CwJS1_ui=yh~bZ%xvBb%%X zlo((-S+48?j9vetpqP8n-J1Zm3;dviA(>gX#thW`2!V|(VE#G->PGv+c_nEj(k?fBa2U7M zkfC%;R1F<{BfK*OGcq-Z7LCu;3JhLIu8T_YQ6p0)YDw*A^gWJSkeW}8HeC^N+mDXo zkve#Xs*q2ki2VRWN6@{gv@`z}SRepDA1Ec-_}xYgfF1$-3_!-f{RFmGiZ92(&ZP3k zY7{WGfPidLwr4NkhA7IiFuyx~vZEAwoNj-p1J+ZyDzXJp=x6GslMomh686VX@!T#q#6aj(iHW(*& z2r0SH6K{gV3GJAQOY#QmcnP<9gJi5u}H&gzFIThXPZ`hz6 zXYUQgtbh?r<7PUikiLe-cR-tevTTHaXIk*X)v;E44d60>^M{k`4}jpkcYX*61;xe1 z!ROM&;U-`z^ISjD{3heNb8hx&`W%gZlX_;86hZe3t--cRT3lhkYaofanjve)X_% zD+|EbfI-ue>$sxF!uuYPhm%;);mOcx_U+p@(BS;|L|SNTdjLl*E{29npw)Z%h`6e5f*3^&MG#HDV)b(qI7zvv@4cqrBIC^?GE+7@ z1F!lsu;U&2jFaJL)%m1#8_l8kaVK96M`7h5r}Asm$?~s!A8<{fhjpY z?&HGhN1m?!oHo#^J7Q4_Ft_ahySK5i0g#{zv3~!kH|~H(5(uk@A>BKlksxYZ<^A}gApQ6QU!GH_#{ zA@*ex$#*?13`9jSda+B(PCW5jB55BzH@U*9r>)LTe2kO;DrC@p53gk4nMx#^WC{T- zlYhCJ<7YbMZl93%ea3_>GM+mOi?>g^P*)UTFFq1Ps$i^Y`5b8}m?R`lN^VEOVQ9aL zK04f%m&nIN5+NQEFh-4DZG{gf>R{v4@MeQP0PSSSI=~Z|8={h4m=lf6IZ4u)vq^6* z%F(IvO{cSvFC)WnjH+XjWQ2>Ri<9ehI-Do#)MjA`RF>j1 z;s}4-Y(e0|v7P2Yu3aGMgyEUd3ZTSOzuTuhiHLp-96MRspLs86i=EGZLNG_79@$fk+y||kd9Q9KA%65) zfQBh~Dia|Xq^eD6g)eMmZP*(`e8ugQ-cl&qn?vHhZf-ii)1V`h=wB1#>HMowDp0j1e|Zczxafo& z>-*;3(lGDVCBSfb34D7%;+6_1nprMleOZ`Km#TvQ%jfCq4Huet2u$3U=>Hu`v+_S0 zKuCEDoHg(M0{=FE#!t}6USh#{4sf$uz^xTHKavZ1yWX|*tX&^>VjQ^N0x<;;E~Iu5 zU1))@42bWbgmqoze&bi4I6B83x z>cBvJ1|0Q(&j&}z(lL9X0{`B9tfHH3a9sYZEUY`$wRS?&EZsT_4s4!Ida+{{Fdz_U zv7Drs#&&*eg<&hd9|u7v7nazkSTfl!(Q~Q3;=<&%5JowiNDqBkxFqQ${ zlg&qM&EQ=Q9KV4rf}(?_0BJqchoETu8rT#(xe>kyr=E4&+3?JA^l`qwIR_RN`PM`b zJZ2pgEv-%GMpmy$Z_cQ^p^0mEkbD6Q;gk1v5XEgwuFejfm3On*(}qDS$b zPPta_FBMhQ6OanV5WjwLgzB0Jd=2sN@s~k!{KzRj8DwmH+-HML&^`Ks2K4#^@MDbg z-5=`MCtt7D)5_YzPbR5XiwwHS=DOXK#3|iR5PlMvc%f1z@C>ee0@L=Et5s@fX}JRj z9)Ex*0-6Dc=(y|ZfsQ!zMY7o9AK)wKbU=$%Xt`0s(xfszKuQW`aPZ3&)Ok3vROT&U1H~`Mb@Bva8u%6|>a19v4fRC&^A5&m^`Xj(Q z0sk~$%ni^kKtRAsxejDLv658O(x0wCWuyIS^&_Ap(=->h4x+zaSaIc_F4uo;Jw}tM zbO{{U)7}&UIHzj9H1LOBU48fQH^_zcS`D-J(=Ql7Y5|NMPAU_@>4ngVGwsQuK~PGo=s|Kg@5{s$+3 zbCxdx#%{n|^*t{MIQH*AL~rTJc+ z5{ppDJ_8fTqF)bsc|?nwe^N*m4|@S&20*LF@9R-a7#PF;`!FYD!jOa~RRV}^-*uQl z`W|^Sz$%x}vj%*onfpPZzgeX;UqOz~b9z-|UEucNf%3*=HhiTI31yk2x2f6CFX zqA~&PxHVCPTYT}kdo)!dSD>gdaO?Z)^aD&XQM8tZ#*AG(@HYi%z9&FTd2PlB93FeO zg3=>+{eT}`Z1-GI<3U+u)wP{ zK(&4L6NDPt*L6_PG>`9JpPLKJ#TSA5LEo!R_R9qq<-1ux(yd~?{rL>zPt99>{r;N$ zOpm46!&~5g&IQ=XSdsJ?)bSBYMYjiz{_}_;RN_LAC2C1}Ymh0IP49tHB=~2IjxR<4 zq|KVz1_gpG;B0 zRKR352hzZoatY3Ff!#JxbZKZHKtNCidl_K3_$dY8xD}+NeuFA9#2DB-TGU>H6-<1; zWgjvW`b38QY=9hcQw(B4LPA12$U6c>MUyu`Ru1YoBL7AtsAXb+Fzugr>l5Lv=RIJ6 z$tePWI!o)*dpa5H<~{24SbE;$NVNh}5K#H^ZuJAPP2_fV7{Dq)2^APrdSx}}mHXe$ zF9Q+fcfIn?zgHlLXjQ!BbJ=_Y@jR%T20oU+o`$5d*S4~C3RID57t|$-@jEM2LlVLY zcb-gUuYqj__*L0HhkRLEAk_dtB!##29xT8Tc#VMb1X46403y8srJ}B*JE7A*)YADU zz~0^)Jm7$!3QQyoy>|+(KHUl&{s3Mx8?m4i3?40oaaXY0kiZc7D##6P6TljynKA=m z3y_~vRxPEAZ*!}*5ONPiaiq+Zd>5YFH=Uh@uU3LH9NP#XP)f_Dq?wDl$iyto^lku| zs>9F^fx4ku%5nVGMVk0>Uq=uK%_Mz51>)t2B9m!)@kI?TqQwSBBd|`Ty+PoNF7$sA hx~D4z8P?n*G90?7u@Fjw9xz;l$Vn+lR)`x1{U6SKzPSJZ literal 35266 zcmZ^~1zgqJx&^vuq)|XRrMsj%rCYi|Lb|&{S^)tm5u_XGZX`rNLb|&<1m0ZkbKbl6 zymS5SZNTCmbABwN~%F1u-Xs^j59JK_zHbv9ya&`-bMVC z1~T~Ni)uVb!8C#X4^t>oU=6;=>nf$~s_tOv>S5w+0kJc2b$sjK`qtW% z+}*<2#oEE1lZAtYgNfYA)zy)omG%GoK8u6%8`h6xG4CJ{a>y%5F%8d*{RJ=mq{#=Q zJnC>;iQfK|8 zZnpMmFVlB57W<$mBFe*3*c#!jtA0*Sw*9WkfTfD(6(&DiHu>JHtBPc}Yzh*a#ROrA zBU6Mnpe>D&kcWc5@7U8BWB>gUJ^~Z;Pps#;fl}URacgWQU7>M@h!lA+Po6wk=EW#b z%CwpEI`+W9BTii^7vC?r0s&>i0$wevI^ zgmeZLWcw4|-(5w>eeD7tJzj|*n<_HW)s6ZppJMvw`UGrepm>g`s3>IfM`rX|fBbY& zyXT3iBXLAb3|gr{GhVKAY;bydx*58z?T@SM$3IBj;pic{Wzs4tSd0F5b`XdennUXa zURQT_j#?3$)lYKYKPUYcZ2^hjLpO#}%+?3qJ9>CT6co_*#S%qSv_H%iWf@lV&(_+) zK*Yqv;)<4LZFiKi1m8uF`Qrx#1*tG3Z-Wv`C?g@uogT&o+ank#KGS7!$aJUw-OK}(zKu+*kfq=7Hs&UAUW zpm%h`V%QQ%o8<|clamAPaC~wirms(oLBg+5a2Q7-aQssOlY)r}jg*x1`1rVEdo1_l zDB!Vq(PeIF4XhLN+nuAMm^QV9Jd2)YiANLmy-~Pe?~VL#zN+6)#S6m5tvSxw)$=89 zt*-~J@iOT*{48o|X(489Iff0#AVI0sMQ3GYRmc(~5PAILlKGP8Q*n(plXkWF6LE3z z&uT?jgoIJl@*kIb-aP}C1%Y+Z77-PNB6zc&xII_Tnj}XZj!705OT@!JY5jxyV{&rX z^|fdJ#m3Q5e3e=M>czB?!+w0ixg%Gwlq_eF21BV{J%(9dOlJn4OG0KQdaN`Y#B=o> zZ9-yV2)K>$a4P$V@YZk|r_*u=%y0(ZGs_wRqL*(wTD;C2SGo`c@Ahlry!rU}uKPtE zf(i@i=G~?Zz%I(z+p`GY?Q_2UU5%ilql3?8ESh=7s#k|5;J)7hB9z@}RdT?*S}W55 zEYHBefT)53+LzqiD(eXv!OIylM;Djwhx@ztA3uhJ^q}PDC)}H^=$~(N!N$P})w6Hx zb;8IUi~OEQOGQNm_x}C+R2~P?VN1@sx4(l^Q}J8QehA3Q$-zPdAFkJA6%~8IrA}td z;yZ_iP$3z3Y)8LqkP>MW#igVW@)gs=fBsa-;CDmG_P?cnxLbOpPF|`-DNds z!twI*YB=d5a+)^q$jQrt*Q>Yh4Msq9Tpz%0x!Fv2yVX%s3r=@vBOr`Lyjt{O1(CYF zJqpSJ7B=?S_FYU=6fAjI^URF4Ory}}gT+=Nn#3n4VO>S@5Bv~128OQehjUtDVPE{U zzS!=(SBWYhwJX|gQB+k`W##2v_&p>@y|*c#If?w_NhdT^!5ZT-YLRnt)^tv%j#$q7 z-yJ}&6GOnMqNDTfVsF}Ps#IsD$&Jyz{oWm-Xk_;L_wRVVsBUc>oSd1vaK@mfcrXx&_zT)@ zS9HQ*3U)v$!3X)1EA$&bz!48mv_R%w<5rd#WCoPlG`gs`)9PxD zm)_@A`6_fXziZx*Y@pv0jtUA1>A1VsO;i~)yR(a7&HO4-&m4qdgY0)A zk(uK~fBnHnWcrx4;CnW*zdKnHcM8^#6EZwB6g`r`mzXwU*PQKb+p~?mhktp7`5pa#9#?Pn^rYaX?WYi&1eAs zq=#mQX$(mFe0Ip&o$;^3b93oCuKezs!)caRU~fT7(%qV=wiv>PT%HcG^6cq@o?_z(!8kq$OmE~0t-^}~aQ-mH zX3$eU!5AO4oWO9=MJ!rYuU zS&s>iCS`~qkY9{eM5J8_GB7x33Yh`*Z`ZE>f$X+31Sx%g0@M+F$j48gb~WrVr#Ch> zlr}7D(!QH>ymVe4pcx&(H}hC0@qgZ>uRJ!g&pS zcE+h`kYSrIru6oNuI3!t!TLJ1-SnwyEsgS1P~>z5)|h|)APuMG*Xau0P632%GB>U9 zBt~~AS6Dvss}vrv@NXBIY7OT8fKX@my>|R_Q4xRuffk$l+l%JSvlt?t^3Ll0{kH|D z(59_j1?zLU-wC>}NtHHn|NaD5twF&31$y1WhchW@=_&deR=KvrroCOEx;LYw{by7% zai1F+@L^zJJWqO`@BM0b-FXTj=i#aE^q)32LQP3e*E>QY`H?Mxsfy!tzO5QVDm1G~ zih~1BMwV?^tEzV(0w;#GytM`GSn!C5{3JDxzcYk<>Xfoavqg*?vJZ}gczCASV_zE@ z#)19`2f8M89GOTQ_;|rI4j$eV-)Kh=+zR-mL6i7>8T7OdA3kihXi7>+twgi7iAqZ& zTK>p*M@!mtbObZ_^+3Jf$0{*T;gONWLrdbzmv}Xn zBVC{gI}I_{|GLq6rbYzNpKeEgKN7UOhNBZf%b!BFaI4hT$=O+n`sUFQrl@YDeG2Gl z%|~rS06?U2Sw976aDINi!--f;U474B*x*dRw)WijqD;NIV$WtwC?H zK_;y4Km^%^j|`s~z1k_reyLk=c|&;XVjUO0SfEu>R!^KT1^4^+U5mvK9_YJYJKgvF zUZyFevYEhES65>Z5WE9Ok8E*qG2y#8uiGxpRJj3a?wI!0#l=PcB5ld!?0`V2mJ7*8 zA>&vw5mLcXDh39Wp%hjbO-)?){h1Yw_RYILp!gmSv9@#F2otGkX(489tsEQ}Y-yW` z^!F{c(?!MB=}gR2nNalrzsxO`_{$#Bm ziVUEM#k1ra0szBvk3}z9C}>DXNO0dl+R?R2M;y-ug$XtA3XRx7$vaBM#XpHIj(<6NeySmOl$eK}FqL(8{4N7dCQ1 zCl$m8n84@i(2a1?I))q>qH zZ3XFIuAheu6AwB+cio?Fe1?sUtpsBQ5Z^Ly`J_lXHkC9aLAX|1&(DH+Y=&DaQMarw z^vSbhkXEap8%k(uCYwcLh}%G?9j?13;=9WBh0~28r_=TC{1;NHJ$_dU5g$H0&CAdK z0z zA~ZxOKDI_FZ`Ai|+x+m|T)V-MLMdA~wjj&9crn3k`zM8di|5#he!&C`1O)|U_gCz@ zcVfoICsb&v{;ZNMC%^pX6pec3Wm$)1X z`CNVmQ8jjX)OuwI=>XY*7Sg#)Clc@=xahrCrq#;R+*c080y(9nrAm$WM{&Ak?ytyX zuO!q%^JILEJCWXh_z(>629f3DZ!jbluDBC*lq>bQF*x0>--+pa00NpU&nU^euT86Ti8Uq3%5Fi8;$dScyO z9m!Z)=9~HHy1H^Ldaj`}X;(r3YzS>`7J!9=V}J95m@ZlF?R*2a$Gj5~H2U=%S|_X6 z1VJ!2yB`GC*L&1FAE{3Pt4;yRNzI$lz9?KqHk)5`AdAuAn zTWct0g(CHDy39W^_(J^dE>}Pm;`KNr2DO#&?PJ}XeRu2CVzjczJq{?%5&)>gNK=7| z6G&v=0JG$K7Hd}A{*|z~Z_)Qm5xPc^QBmTKj%*qX(0Y-Ol@$kO1n6W_W|Qt}M)HSV zT3mOyO;O@N$c`}ByUyBJ;dbUvS^eT5xLvArt_eFk`#yL8H{qttG$;xnh>nbOoNexB z5f;t@_@n*NKhdu$V7~wz31Vq$DNTYIR2h1qEk7BmNDB#ux&`io~ke!1%0Cv!doYA^~2|FX$5-K5l^knBtyaJ!CXBjBxbF_ z`%2R+rfP6d?&F6KUDC0Hz4HqTX}V>WVBBYV`SPP52wtVTaBf#-0E?mcujcb-a)7zD z!QS{B)J_xolnH@pal}6!;BTU)zB>DbLBLH6b#-+?Ve;AWv9Uxrn#rihqu@b^6k#vB z)@TDWMV_K!DjSS|$6=|*Y$(>aAerwm%+qJjP7a%orW&O%DM(_cpoGrOFMwl=wo=}M zpj}OVZ5Tw3JbnH|O?(!$tdkP_Tv02uJXc$w0=5E4L_`Ec^BTZxQeIwsc&vu{Z%Uqt z^TVNN1B!^h7VN}YyvRexM`k@0eSNfKxv-V)2(z&-QcBr%)Onma+|Z99Q?N+eRze9R zas5(xgi^39<`x#*jqD`QFUiX6s=+Yw@H_`B%dTrW;w60SLQ4EOu`3ByU^yIR&faGy z0|w*waTC9Unq)^sr(s~W`N^L)VGzo|L_@FL)BFFYsbZy{fJ-}%fXe9z&`;cz}}DnSo#ya-Vy_!^5+_zRpEh{qOJTxA_tzCnsO8Cn~df9xuloDm4#& z|K15L`~WzpcYtLQkKBWHCjcDxx9wf!z71slUk?)~1%35ZQ=ES-K@2P&^t~ga0ZRt{ zM0R5TyO_v3>`IyZVNagXh?Y#6o^OxYO=0}QyPZROU`5L*3LqY8UfM;tJJUkfEd<|; zR}Cct-WN6y(O0jYh)YO7 z(d)eP5HsK=9R2!jKL7J{kU%MOUNtq6{Aw0Xb0ZTv_E-J)lcIS%5!P_Le+me;^fIYd zdzr>I?tM*O#P{ZgO7Fd(p`iqH5y-xuHnrL{mgrzG{yedxznFci^7p;G$wR_cjH__I zJb37!Xd*@7qiOAUCKTZ0XA%CO8c&zV!`#B>9vpo73AYjtZIj(EaPKwMg_CVlTuRFG z)z#I6PoJXJ*Ud|HYf(TiU^$YP_-At#^ow2*zWdOX#KoVi4y#Q-?kL*uoNYY4dmoEl z!C-5oQ~vigj=sye)7mH8@L4%$XAVG2U_X5d8xRm6DJ@;6H}`iR671J~zcR~CYIC}M z(dW;98o0ub!E<*eOUMwbfq41#Ivqy#K|vyo=s#IJu1O9?p@R#SII1VwYj|1 z^ASNv)x?AXa8NlDJC&|GM4MiA+@>lWI7;+3nDfrFSaWFqp{ z17&y(?zmGe1Z~2pVKH99wBehUO+#Er_>}yG_CL!@D5r>qlo5>MSQwczcuC*Zb~s-6 zYd#44hU|a=$IaC{t@(DTjbv2#j%jdcsPjw^MDp<I18S9>T*-``LyH{|n${Wm2Lm!OFxeE0xcrWMU?J4H@I(}!ISS^jf#Hq+ol z)pmC%kaG3UYqNZ}vbfIObVsgZ(=<f}vcM8~@*Jb~G z8>J(0>XMoY4f`j?CgUe6v0Mxno{|?hWOflhPXz5h!MgRtvWQfC9^*E6BKn`%3=07} z#_n}$4(3r3K+5C|-68$0KI89_((vPw9dcL;gr?tn335oHXm-02 z3$Ekj@l%QIEv#!8>Y-p_MgU5_D-0E2R#-@b_XPuua#qZ%M4A=Ag8QV`DndhAw%Jr+ zR)nEGiK0G~q5yt?`F$H}X=(ZMZ3@K`>hRYA!K{*S$6O3jvCrL;JDyGigQ%L+cT8kbETc77aFc=UKr>h&DlvtQeSKC&*W; zSmnv~Bx|qIU7tReRaWl*A{7nf4j4$l-Q1EsY5uOn-)AeqAxLGG%#ltoEHW-GT&bXY{Hy$2tKZ)Y{Pcn&d}3qV zN<%A$_=cv0O5wMI=`hSsAGuuB3D~b=g`I}uE@ZgOytD_5`X}|1F3Z$#~(p zUngz9>>J6*!Gs)WvD=8fV`If$F(m#1w=WjeEy`>cBpOKq{qT+nexh`yGcimtze;8s zgWbRr$b*gb`Hr**>64IUWVOS5(`!$jBeT=ca}8+v=A~ChqR|f7BGT)nGFZ^n+4a#o zibhJKDAxqrMGvlQYh|j8* zz)k^%n=RnM7w9&|fH{FO`G5_sXf)K)!t=kscnxTxx_kkEw=8s|!vB zR0_$76_2YK)MpD|eOY1aB1*;Xq0;E4knE<=fU~kXq7x@fPD^_Scmr4nkOu4GO1r%z$bYXGxI&|KUjW^!J-}g+7s0QnH`gJJ2x)j5a>IX=tu~fy=|POtefTbnpob zgbbLE)APnjfD!og>?{*jOa0>EBtBpn6f6l96_uJf!$xO|l)E5zq*_+ean3QfpA99J z);&d+T#F|h)}ezBL}h9aalx#NpreO#Yp{axul?~szz~p*Q?4Oc;5y@x{>+83F=Ar8ZMsQ9BrCf9c3fYPSL+o(f7odTXkv1VU?C2&QlciO2j{lZP}i z1#?d-PLfSU*dSwBbK$%U!W<#pa$Vab1%qUFXNo&3tfML_{m(RxY9 zMTic#!9CuW7K1n~=i~DDLf)>q7ofcPlCCa~4TCITXn9^~oAC8d|76OT>5cE-*7f+3A$} zh2lV5JUp$VIyA`fg2xhfiJ>A?C0F!7rjXPPS6Ly>zcjR@t5w`X@PWl05$) z{tGhtqrET#YnTC|0YsV6ADgcxilkPWU=SE@( z8xs8Ha~e-D@I3-A$TSP^?0F{0;B+Dl}LNmy9e zg!J?|lYf4O7YqlY@0k^TOBXuJLEgjIz(k&}IKKJJHNw-E0vg5vg%tA6JQVWJChM4A z=i+mBB}Ilv1?s@B3$p3=o9H``Doe_m*vPVqN~8XKLScXgx{GW&HbM88+LrS9{Sit? zC(1D~TaUah4>zj9?2*d` zeZ_~%`gRjAht9MI1f*yD{ZZ9|A%cgN{wPISf^W_Ueb+FUXshgfbxvWpmr^kG(9$2sFqG2@k zw68l+)3Q15#d6;B=+d&YW1EOvd8oMw^gtE*O(8qo^*HWl!=Dxu z#j{oU|3rbSfWYYkH76p)(EABu2n^tB@cCWoz;f)53#)<0{oFNtY*QD>gBiKKhWNHk zQM5EGw^kGQN>r`Z6_qw>zy7Z|4-yC%c;2`-=H|;q1z9$U%GjqYB2*N4L&h;HXKR#Q z6HMCvDm^vRC)Z~AXdVbBIOWI`3*rFrfs9;TTbrvjAnc^fi{RJjIo`Hs_F$UD4KfqC9bg{ ze#CSi^0B&lCG@+J(C@D5Q+H4JlwQAxH;G}55MmcBzslVa1qcusfV7=e1+DOU%xF(` z`A|8CFh1>aCmV*K9gZ9_g&Pnhj0cnG@wu!La+yFMwW*!`dH`J^)VnHCN#}fA8TlZY z@MBLWoOnYy6fJ$QU?)u;5_; z1xk*`th;^+4az)J5KifTbBL}Gk`=jp;`;(q83BP2TppKe-~aqc+rq|{W+RWVHv;6p zbzBPIp#rTQurG9UbR9|b>dR|uAv!gdjo;#-%dj5zkxlTUb?+2x_}rcI^^rM@i6KrY zGDT+gZ0=4`OI=b%MhtLNgocK8EG>}%tBF$o9pEN>4jYjGzwPYR@nzKU+4w61Zcf3w zbX@ForU7P?Ytt`*WgaRGV2?Nd$HbJB1cW@mNlU82Ly}Tbpk!g$vljgzkkOfFW@vly z(a6d*#=R8SqcW$^oT>88n$pD@! zpp@-Xck8Hx1f_Lj?C#v=vKgTn+kzC3+a@{Ygh=lyjj0yZ{Yv zl6`(!O6z)Dr0BqiZoU?N-N1@Z&{TDO&BZDvOT>C1o3HbC%w5`u`{1(kb5fNSW}-U5(W@P&POpw08z1!`5Fnwo zwY9N0wD;OpLVawdrA$yK03@)Xf%gS9^$M_}{M@@3Dc=GXg+idZqGUPnpHWj&ivcPX zaQ1+Wu-(xkp9UI#LvHg=i$CsR{kiQ8CWe&+tmrSrOCeDV{7tXR#r`3e#~-IhoXQ6S zT@XWt=8kgJa^iI-tulLhBtyf(D1eTmTZU&lRVoI=7N{WraNfYm1sFLdj79>r9AEDm~z(q-a|2Fjw8>LpDf|~Znkw5~= z^y**!z68!Jz>N%ibkF>zau6Ny%dPywN~aLs{Br-w!V`NHXh8QK!_{JxvOqj zoz8_;Ht)OVkB;}2*{oHHZXg zOoN_D8ZUT?48$TwML$qQVjqO8CJT^lmpnhRPkr$w^E=0e7$3~mUJn^P1Ru5Cm2)pp z=1HWMqMgi@F7uHeybN+TqYXXxL=6sM6Z#xKG&(0UdA*%%@Sok;6gWDdveQzV9}EQQ z?#fq4Tm6q=1qk<`8UiyOSem!0LJ-g8Af)4f$E$z_=M9vW)>ge)ZgKvewKPiq_MYPj z4VcyiI(=+$Su!wOJ2#0|6Aw2qyp_ zK_D;fX5m{6kGOz52GW2D zs42it@|V>GPuf#6DjW?g7N$UUoT;-TdH|l3#+C&wMeK6JRzl!l^aN54^cma_bo;C5 z)$^CXrWkesXC+bTYhY0WTcn|>8MNhfeS5nKt(lx(Wo6%7&PidT2GmiqXGoaxhB1P> zDgY~8mDL#Kz=pHTZ=m9{dmPTcw5!*IVFdhpT3Xr#X-Q*TagH}|xb_2Gaq1Hb)_4w! z2FLJ>rBohfgopi8{wBaPh`$BOIyMOjI_OEjEmG%m=UPH6t~C*p;&6HpD17Cr7=-<@4GTGFm)(DLnt(dLaNYT@zS8C=D!Je{@y9hq`M3jR_O1p zzXswN*-F!1xr^`3^|dibZx)9L=tsjxJH|K@ld$e_@uYrqxdL@NuxhIf*L0)9O?I4=S{`_ByLkFx_hIp{upz_B=+X9r^D zm)Y4X>V@7xX3*f+b?*+;%NOZ&wpptKRdR|p z;!VSm+;!PGBfE{l)xj%qWm|EGmeNINtD;nWDeb%3z$z)968W%>ETtJ}Wp}+!Y((O$ zVRI~3zpIirHi;~DsB}RLQzV_Rx<05?MxSiJ;BaC)r}zI7xJPsX{(pEc59T6(V=aGV z7Kkpg=wE5VXULd-`asCy3P8%7YXH03wMLRyDP2hI7c^4~f}WDP{!g9X-G_Bm5mOc2 zl>1PZ7r2)f1T$rV1%3E}mEDkStbPrkXMAzXDJ=LGhRMLIjtC9?+|ptvK?y*~Tgg7h zB@9Lug1{v+i{pd|5%W-2B!zF{r7gS2@)rm%Y%|R)F5+mMoT)uMl#dT}=W-D_9{raW zxWJvT=D*4;E~qn{{ghpa3|4IhmEu3g_MXiRCTzV^g0{>TM+YeuYf|fW2(IM_%a41Bn)x zrjGSrfXb*l)aL(PSL+4B*J@Mkp}i+68&X(9@-Ul!ta+3UU^;7_dn&Uc9}*6@J}9je zSvszQ4VXM!_RI-cWTv>t1>T--&WYdDeF6n4-Gd=nQ?Ibb$t_>Lf+-OAd2p3Dx=T+yc+z?YyUU)^;oC!ao-jJ&_oVXXa`qfn@$bPLUWcgmDT*b>BK3g63o)T)e zmwDfm$9}_o5G0?m4&7mR_Oqm&w52&be-qgng1|IRWhX?ndechz!2UrdCTq&zeYpe| zp)qqS8W4nbs@72??_BN*)pgiYe{cU0Pa_VtGk-U|{Tcb~4UU0@DmMJ6q1`ph6KjDO zcES7eiECgW0M2L7hff~vj;Oe}e*K-tgnpcGg!|O#v40hbcrLBhK$!{mOe*rVd`7Uk zTS765SvasZI@w?%>yp5iVVbx3<)9HrtGkABt%7qJeZa923f!8Yr?s38GD1%=fRh=B zDRn^0IH$w4v{r47N4b6xg;$$+sd`kXp2p0ZtPwBFXZQ9y>Ud=06j5*r=K8~XtZ%|t z-xincEQ0%pT0UG_P-dnS{h?HN&Dsd40!$-1!F$58nzz3>);BgxfGZ6W2y9!S;o++z zJWHWtR-v87feJJdIywYEmEHO~l?fr5p`H0xuSybB(n=$;Qim~)^D~pWsWnO*pMI3i z{BtM4x6My4_p`)lo3E@?!!pKs$ETL)JKFV4xb=l?reJOut2^EYpD z!6}a8^K(f6h^kz+l>X{qJM4>sbgO62(5&lG@QZUDr3h(1p~Yd-R9PC^92SE2Jq?-; zs=&clA=FsukT+Rgh+sYfEyB$j zw5|7~CUZL20r!eMoK&7YNE3yk%6+9;*1w8M3xCL}kBrf3xlQi4jS#twj~DDB7m2Ox zOhaWdIVC0Ow{MxifeoldbzndiU~j1X3CyREK&XtOp0D8K1?sYFy+?6|iF*@nJ%XdB z1y@+fVN_a7obs5?P*k2G>rj+2Gjnm3l-T=k%wi*V<0mb4zrOds+~o?E&P3j!fu5t8 z-m3zpAkZ!$kl>Jz6X0rGdqSRJBesVAV@X?4}g{Zg$Ni*MaPC8($~HR#UY2bG<=1(`>!y5Z&Jkv&_u(yR`_d-N+aX ziQ*)-_@}Rxl~_Pa`MKk&-Q;TBT(1mPL0(|=o#*LmY-Nl!CuO>_W*g={{8Fjs#zJmr z$+XP1;u;r`>IY#fSmt$^-CxJ3rHr-9=$j1V6xsQL9{4Cj8Y)B&Syay%GzSr0!a?UbL7qK)Rk33iEV1$wooh|c6*iMfLzA|a}js`|* z6$;k63Oq*W-L_s1GGQ>vS6bKmkdBgh{-z;-LW=;=xB${I+P`X56uX;s!u(iMy}>53 zwnZC7y-w7EWdKeRJ0l$_>6YR+gO_Mu^;=(a3)mVbZ9N48d}JP(j2DyF*X>j7Ex*7C^4JgDoD33MTS} zF4iq6!H@K-V>yDG7`wk=f^>%2aEU!oG)$%FJ5Cai;Klenxwyn6a*#=3(_T;1r-O5I z&IkXes%SaQiI0{l5 zO&SpUm|!@V)^K?EbJGAUmcWXwRj&UIoRwGu2+B&aUKZ1|T5d&dIp~`n!hlc57kByZ z)cv>g6Ysj&vIU;7AudXw0vg0Be>jYT7xvVj;@brbu$XPEue-WFpEYn<;_TNRNq6R6 z0uE1haOMr_nF3>a3_Calh1C9VTQY&H;j#zQvDoUv?sNGTl;FuZOQ3xMH(VGnGMWHR zjCtp!HIT(Aw_TfnMT-QCJ*(YRl>VP+mJ!Ly7Vv@Y z*92~Tdx76XnlK_GB9MTW4<)*H9Qu^NvJTGLiK@mvdfGgWj}_m9%(gT0aN$p7cthnX zs5T7-=8Es`z@+<%N}J>}=Y3z()|beFnlf#yA8S>rStJXYcr{u{zwvS(VJ5zJvu%*U zm0CK=@z4b$N)X^8Njbvf1es)R;<0D$@RO57l*4hjZmR(A+}LX1BoG{MaBw6CG4Z5PIaO%__JiVMf&3@A6Lsg9NI`s6U8fDa)zW zA3%Z}>pIYdP%U#i!{UfZP+ZgM%XDs>2f9wY`DT|mL-w(# zD5G|n-yXIUhRn(KJeqT7q1W-H+&pzAJ;e&IDhVO2szXND{8dkeB)&pM3Rql<|Q2 zQIG&j&EG1oiN;x{!=})`V#I@*X;b#JCRHWt1Ys~h8%Zfgvf{UL$HDD8R}m4wC9Htb ztTaN&F}!Fmm5easnhr@WbqQtc!$%AF6aIm9F{E~zQm{`k!g9P7Oe_3NwtzR_hXE%W z)O|L2umwih^Oop8x65oHzurWBqjI~^ZZdE~GAQ=`*vpfBy%ovTwH^%au9Tqi7SysR z_oO39mEsSX`DVvuJ3m9mrMojBDL?%4G<3h@Mw%S3TL^8_?h=$DP0OjuB5a1u)js-ii z8ecTT8T_&z6bb&ISYphdiR!lQX;mPCro*~~v4;unP!7F=29TD8=E~?eVdjpKI?F=v zYW0TXRhl+L3g^$@KqEU=lndf;^^75TwJMOlM`<6>~%P^O;0difN z$>#*AU#Fa^HgA5K&)w7Bb_OW0rnEXh7wP6s7<&$dpsigZaIxmT8@EQ+(QTTCZtj-7 z5yjIGzlDEY-hUHD2=S@>b2wl#j(Bt=gK^>i6?d~*8K zOqtn_XPL@8qY;5mKhyA(*0GK?(8HJ2@1(YUi_fuomqYBkz@VogKN4-?ZjqmUGX)n| z6zjKw6sE80A}x518_FE+Dw;Wnzl(q0yfSXohFqoRi*j19zc~wW7n$TnnYpvlZG|cbY-}P(<=q^u>CS(l@my||^*$l?)Pq!sICJRnR*9grxGMAo zrJP~?QsZ-g(XcPxRWUYo26yWwd*iM_-2} zTjKlcqi#n>9-L0>>|EsOS|9=veney z!MLI{kLhI6Rk0`v7FQ8vR&U&&2O$~JtkKJM;Daj8-SJ7JcIk`zn3bs4?H1iq8+LFr zUQ^?!C<$NN&>we6**7L+lU1MgO?vs(wfE?2rc3-4?<>l9M0M~TV?%i20;#q-hwS3H zvxhv9Sdgs0)cV z7}8K|#GcgZqbIkvsjH|-{21b&M1u!;VxmR-^khXfymk#C=E7ml?X{^CpWNdeHiQ3f z*fa--FJ24mFLwDHJg^A*}+m=}Py0c81(S3>NX& z1WG!u0U`5AihM26r#Kf8;6188mUi!8*g7=9`d~Suh;oZ7eKqqg?c+*o9JDqsEn>2fn1lPV!Y^=&73f<>?OKCMlz!Q4XbDD^G3i(ggo$3I=QQp~WKZk8xb&Bd~41y4gbBPHUB%i+tzHBMV}y^O#FMkhY5=yO#Pligu4waJo>%aPi%ZM zEXC3lwqLTw)%do=oZ1whh_ZvRlE#taKR+ce^c>KDg{Q`>EkFxa3skTWuq^2xO6R$q zNXsW3s7;g(LAxl2v$3IY9XZx8O4Iqq^}DU(iONukj9VV7wT4XYabR|OysN&-@7i$RGx*o_J@nZ?9|_l9IkKf#(^=5YfUILW^iQB#K?0V%xm=w1D1 ze5}n=ckibquR^P4LI+)>mwnTyjEjqbM+t1_u|6WwQ)EIC|Y+V|gJEDkj%f!WK zsVDj^^DR~h%i0m47dINpu5IpVvuZ5eQY5RG$qiT8%X+gsxBAsm09yWRtd4nV<4Ba+ zvtzRuv@DeA2bqCknxr>xrElW*I_MXE?q$)gyxe^0=sTKvX_86;C3YS!l@LRMyBmi!H(T`$6$iGo-cNR&*zv2nilN?= zoKiLZWCG?o62=X+)ciY4fy=!!JhnluqSno^d38PZS7xGcXcrj12$u zqj3kK!g26P@JZW1-^X7cbmduh%ZtzcjFoBC>wd7%t9!9`COBR`Od)DWNxv(WJ7b5!|^ruwPupfCDlhg!{^Sk#HqeK?091V@v z7kiS_UU;-fofl=B)$^!(_x2CN1xe!%vLbqQKnOL!}f8&+s;OC!stW%N9;WVlCf zr%!K%hBue1`H=Fkr?9X*Vet$0CLPjHRaJ8AhW1QcNLlHyaDN|z_9&Cj)m&U`vVrp4 zj=twBivVv#6fUnj)Gl=vtP8Zgy4S+-D}833$N2X1`2x+Ezqek_H=sZN$|920Vs^Ob zGVDJWF3>!x&eq`l1^olwS~@(Flh&U-9}JPXo#SX+M8-Yo!x8p{U$WrY$Ic#T1ZzHG zwH;CDb6-$tJ$im2mMW$)z{>bgwlnYDRB{BT&cof3Y%iE>FV0y5Q+!o0AG<z zW7jjnRy`6w7^-Kyr7a2X$t0QHR633~XECRsGk!EP*_yfwR$B7j%$WCfPvaZ?KAD0l zJNO;(>x0GJkNUG8WcH&Yjs%}nvDD_J4VT#$8qhJn+KVJhdvyvc8}ilbG#s!CHZCV^ z%c-dAB+v0=z>JA2a6PT;A-F2Y#W2!2I-b1YzZxb@bKH1g>e_b9*fcF1t7@pY(Yn+p za5V1nptvYE3%A%c^tJ$XQ^XmGqg934glD4$MIx!>ttXrrb3f%%hg?3=9QrA7TN%bI zJj{=~&rOLL>sdt-&N0QbSZ2(9tWSS1q6urMVpS3QXuIrZFsbvfa|tzbvAMxX@Koo@jAmr0#xj7X(|FFvMb9GVo{cAdS z$$WoHGNr}MZ>lSatTD*!I8U@!V`fQ4Vw#)mtNX54`R*OR5=UHzi;SJzJ&JyP3NQS} zda%3&cZ9oIS&WQ%Q1x7LLaP4QJD@Z*ld+`vLf@DnTQ9u+3#+Kt-~#?(&f}sSJ-@mE zS1%=3zphnO{ZHcPpTrK(w{#H{xBb#u=+XO2ad;8)szo&63Hl8MRfWQ3Ls7b?g7Tuf zi4{TR;eK0Cu%hK0)BKN;Tyo+IA9N}rr5_h6ETQ-$(AA$fbvj z_RZc-e3ZUDW|8}-|LXXo51WK0)vwu_ez}>jwa=&i-m~g{n2D1e&sfg=bBmI6M%-v5ZMZxfxuF0~HH~Tz))=0|Fm_y$7x!OU+mp<@svi#ZS zG-GMt9O)_AV^}DbDex6g|3Du4KI}I6Ayg#b*9%X=CH!yAvpd)K0ayWd)cXHVYi}7= zWzen-gQT>8fTV$RgLJ1f(ny1pbT=p|At2od(zWRBl)Ct1zxMn6`uM>* z4vxjl+;d-Z&6VdFldE#ENwLD?`-ocOvOy~~=FXy!Rggy`t`*XvN7MOd9n%asMuQ1; z13BK4e$kCi$KR)M*v0BIo;Am2hl;FzRH&4Bgv5_aC;U{ERmME9f*!)HXldDk6OGMh zYCj>NQCxN%?VZvGUF^c>z%c;JiE8y8Slz_NXZD$0@mnbfq&Lag(YwM(W4=d|=R#dP zF}WYG73S+)msOD3_eUH`$*Qu7h~6y&g1N~A4%mH6v#K5u4H3G@E%IUDu0l8WsB?I{ zmHF3DDKt@4*`}A;7f98(6DgQ-YJbcZb`2 zWRE7novl6}w|D#UI1lM=9;vbvA2EtHX&$i`#g98PM6MEk+Q~{uDsspVY|(&)bVa^r zeLz+V^HXKS{Pg*V-lNWXUI9%Q7;xsskB3(XC zNbVEZpqO-ty>g9r=PJXpJ_Sz%5=1n5wx*3ox6=!{}Y1FlUqzb)^9(w6a z{M2;6;Zdqu7(ATd1L|LkSY^aip%3yh1SLc&&1?~yqb)@#@jpL(c6G|KlCTTPKo?{U z>__dUuM*4av5l!MmCH|pB#CNofxxevY}hp$L7#b_mFGPxOH zZ+@pObncqkJ?V0lxR4N(|(*I|Ma#1r{yQfm@=X021LGGb>LOg#n_ zJKLfG^HFN6)QK^DWiI&rRT}k)lKDA(Us16BcAz|@?B-e>8Wi^OBH=VA=c=sAB@;T1 zC;H>L0=V<6pJtGQg~&hMuX^~;Wv&JmZ5%RIJ#A&!^DaF1ITw`CSYq2<#*%Z^60iA! zT;xijVBqH5f$l$e-Sx$9;(qr<%eoTa1EY3$b@<)A%brS;+lpH9`U|Rrsgu6?+Ec=9U1?p5 zq5NuH*mUo~bQN3?d|p0jL(P>OI8}!cK6& z9}`%tvI-q zGrZCO1qntUfwrXOnasJqmgP)Px4b_W6BDZF4sm_qy}KM@dySW_B}?z6 z96F04oy;W}(uDVZ%Hosqrs=7gVPQi2I_NH_nYh34m*;P_z}$Y+cF5Hss!T>snUY~r4oyd(-OPSlt8II!LP!e8C z>z81LLzwV|{=v`i={EHxEN<=@?HEw=!(rMu2=D)g!POLpq>v_P_d^QwmXexjR~B1xT}d-2db$NNH2+QvI3 zIxqpR;lU@o;*@=we3OP3+ms6aCpeojv)9y~7Ig(dh40@$;Rz!gX2+}2wyz+D*6SWg zpGANqY1w~$Qf0tG+~9K((9pJ9MTeIpQ<1ULjwPRW$R zQS#Qj8QPX-N$4EFx(r;Ep_TCJ91!=tV&_=QHkw*q*REO%uYSK2n{A>JyEXL5V}9JN zUw2YZU1Vw;=W%`z<2aZv^7uYp!%aF@9W?y1qAAcDct4(HY=iC<Q(d>v%nh z<7sAtYlL@8L8@lS@vdyx8i#_>X^2jrqIPpO5r@w*cZkUZ()g=A)03Y%S9Tc!Y7Mw$}0okKoWd2(v%WLn#HY;@Fae3O`%*zI!8EO@_t9<+0S z>SGfC-Gt~bV$*z~Zqs;BqhirQlv{^H;XWkgC|bTXPdT+TxS18(;j{fxnfVcIXuaEc zeVfIMk!)h{~5PK`&ZIRB@g*( z4jF_1oau-2qD1m!&bJ3J(J80N`6vJW78UC~&*nEZef<%DoK?m^@45#mFaHJV5Yv@; zSZnTGT031i`R>=*(jqhR@a7!D<9l0Vzdm8zC-?@eNC^fX3Ov}ZXxXQ{v;2H*po_Se zqteVEeo^57Oc|h@`O{VcA0YVhCC8g4R~6Y?qOg-=Kdd5SXy#iCKh2ouy70=*x4i|* zYDRQ%QBg#L3g2xC=7uN~ zCT`!Z$Hp`Vkftp)Jz499q2hri?@-XDGXcOToEB{BHoIoad9v#6)KV1FAt%ess!k>D zej60z-Kc>Dx8p*qN%pH2twCuH+%=_fJs*Z>v{`RLSz;iC9Sp2?v7LPK3!%B{ZpY)y z_JMLGue$M_K@lqflHfn>4qoDz@443hUaV*&oVfJx^&|HXLPa-!0KvnYc0G=05jgE%NDy z)4I)|tf1UQ$yH`!bDmnOF@5r~&Gs?6e0C$_*8~kEU)>Lf-{{$^J03dHpToWz^q|7YPTO~Bzg^kMe_@GEq6ubX&2Fj}-6I-aUB^e}#?jCZ|hb=WFkM zKE?NiUQ?q| z%Gc1C;y*&3clXinbS`J`c2KprAHV48F+FeQ;TO0IZ&v5L_9`Hi4!aKgL7s@d?Dm{< z4}!BQ5i0nI4tJq7?Ay7>j2XU7EMKy3k$$PRM*Z5I;Tv!DvYlI6V0$MYEP^0u&q9m! zj$Aiyk|P-7eLaf?Pie^<(@WYIWd2o} z`-WF(Z4@s7tqIY(*&|?Gpt70kvI6dncgYm19xo=`YL) zS9j5Yqg1!3v6Mha|0 z>bdTdbbsjsVzT=#o>NbXC--^pdR5Cp2&AVI$yL$Ra!%c+=5E;?;>=b3_*KwzH4VzK z{@E}6SH^{R(_d`iz&E|SgaK^5Y$gg!448Rbk_h*?AtNluDabyZU1r4Bm}rnTiTf!JbB0KZmQ%Zn_fJ-PmY#R)sN2nRaZ#!F=PxSx_@0Y zd}20Z64GKO=rtocA2q|%(sY|<=DP26b*u8m-w}Mf5Bh~ENaZu}7x`^WS?`dlzO>#e zc3^g7TKs_99J???dG*Q>*fX8);4S@j=UUU^^&e21OOkqt57+2@&l234@?3bbjR~Zt ztL&_F=^M0r9dmjpWHL6ZJ8f4up-`3wVnNQcP7cI9sq0!IGTL54TUV|l6aPuiv^3Z{ zN=t2&6}G6fMmLQVv}g?@w~LWqgXxU@5sRPq>`0H^>@)B9JY4NFrF3&`KMJ%yY`DjI zC_W-U6?nYLm^yxaV5HH0p*`7$_ekyDYK*Ni^`)iiT(304XR0o(md;{fe|L8nDSs)w zKwxoEb1pqkrZ&a>JDZ@YR-b;xOdC}Al_LWD*JkqP65wX~F5Q+a9Nkzu`k3O|vtzX# zd3Ll(5A8~htSev^sDHgO8=5OrJ9cfvUXUoUaZlx=cBx;LkD3t3w1z_Xq!>TNM9KyK z&Qz;uc}i{aU3Dv8JV}-A_5~Z4Qcv{gsg!-!Y`smJw4F+y7%kZj&5@q_3%ATV;Gt{4-eOpV0 zI>GL!SkeBV>ZRLY`9#db{6q}8B46XJM@|G)w6N*uwNq;bm`3oE5Ra?2t&0||NvX=y zWj46&$7G*P^Yn)}124Q4Z~2%S#QT&ULm*|^MO%>cnmYI79#>{!y}Dgqe6#Hd6lt<5 z3kK_rZY}097F)I_<7tf~!gPVJweWGu9NS0*_j(0e>Ey4^!V6yI_-rBfYfiB+l4SQM zs;Ntc*dKa_eL{VM{3rI==I>a4Wfzy7FIOhDX)WvCCMa{;vraDJZ(hRT(**W2iHu!E zB{mu7?>}8IbItN#d@6-R=#*#85a-Jm9=*SfD=OBNdChOx^|*ywHY^fSVmtoWr3mw5 zGVXa>!XSUDQEVR5B*b3vyX4q+gkU#D^v<1bFjX~E$uChYr9L2|GbMFnn?7RG$qp~g zoX07X%2VDgPVap&(;&1i;D|~JcTm4{S&G{ZP9i~N0m2ck43GnqMZ?i$Xk(2tV*`!N z5+Z&&>|4Q=Rl@&v74Y4`KO8{*_PB>X^-If@)Uu72sI0}u8RPu3|76~6;48b+`DNyP znmEF*`}@n(3#Cg3OD>n_q6kwN=mbVe=#YC95S}n_agF`A>-+9x*wSrVSM)|-@Y{=G zN)vjUrx_Gvyy012&Wjh6F*RCl4>|Dz&+Z71(S%Xm!^nX+7c=ybg#;)1% zw@2ti?V_vd#v^^xn5pz1K!9p$q>z~Vv@Ay!VEZy8;jD`+qtvRAB9a!ZbS~I4*pZT& zWRlXW*nG=z7@r$uQ%Z8;Bt4JsV~Ptj&SQtf=u+Xa>M$XgB8&n&v=|=Njb@uobW^cxB$;vv#p$y*OyIhvdAI%xBvgA-Tn!obG)fryIE7es|t- zuw;xMJBFrD$O!R4ke|41mUPLmforxtDEi~uc&cAnSrlf#5)g!(jGUw~5Z)=g_s~f| z4({3+_OR8siHa%gJRMta44u*|&858v%IBKfa5?xPuG6@>50Kxabb=4Au?+RN@)Jrj zC!1VkOIFIo^?koHtya*@dw(rjdAo2`ds1bRFkvyx_rB+1*x1^Q6E^kh`?W#S?U|-q zY#ma@G~PcSk4izwZk#P6&(2=gR2;5Nt=RwbSMt_XU#BK@;^5QQ5iemrmj>HKX+kUd zM%z{g22#Gbhu0%QwMR`$4$T6X3axF*Wy^i%Z!xrT^QCfJlb%cY$L|nEvx(5%C7TuW zzBy|@5}iT$oNV3@QksI)Yo)#FCd2k!#5Hq=im5Zqv~KhR!jN621+qu`xH#9~Ae*-0 zB-0hH=7V@`>heY;?3edv|BOC=aJPZ$w;#Vb1#O zoXOM0;jqi+rbEBqf2`1ZrC%Z?eSan|GTMjyMomevfthH|?6-#4#F#TzgQ2O=EGEa$ z;3ZmOJ+^l9mA}B%s@0aardj`0Jgb~yE4B@SE>|VQguu6LJFahX?(XxA^}mAy7LeOJkmDsn~DP?Tv8DdT2MfySQ59&I^2{*yZR1?-;$%nVD(M z)?NfdS~ia_ka#mvrU*+x)F<8j$(?xFYHPeRp<+}opyjXW9OKzBveeeXH~WFLk{Xw3 z6VLqV+L{-&)?PvFr~POxypG899dSC!pCU$L2kf|GmAPFM^-*QI%ZufX%avKJ=qszn zPki=5S}sKVVvMg+k1yB-UK|c89--8X-L)Txa_P8JY&7?>_v$by=DLcFAgAm<-g}%| z34Ct#V7nXCVWFFaq|{%Y$B`Jc+L(HN%FY9^6qmo$3*3LK!me{Qc0QkRPoH#N$Lq=; zulxG1GJBbymgcW9>(|u#g!TyO7XOJ|Tihb!zj>?_ukEh8{8v4=TaWV;NWQh?K)w_@ zk+NMN5V!X8qh)FER~66LD&^Qlo#On$4d3CE&QQDevi)LpRDazHR4dX{*^<$@)Omx_ zFgcRG#Vi|7S_S<`{hwNZ+9eI9U$yBxK@JMn4M98Ib~h}#K5_{VgT*~ayIq`t=@q7W zsqI4W&ZTjppi`&3r)T524v2%Wd~748PEDsQuT9Upi<_Dqi{F2J30X-JgydU!My|yZ zGE&{FB+yUr*B&W?XP{sq*7e%BmREJwc#*iiexE|Mp0Od2bLkcJ5q2~i;?<`m-dgSb z6Nis!0thF8H|JFa#l>jsTuLq-whR%j5ox(a-V|hveE)vz4>HD-=ZcJsaTtHIi@C9{ zmpQpDL>G%%h_r}5FCj$CZgQ@q9O&bIv6x^A601+)rXEtbki;tQM^d{rtm9a| zdof*IRl!Vvu;QmIMBic9A_x;9!{M{L;hhr|ffH1+UuojnE~BsJq^mXs8s=TrXKCTQ zOk$n~Y>%@XK~Mv>S55*e-xOxQ#@PwtFH!KKnS#8N!aMpHcbU)zZmO`S^un9$`*q-Q_hcvX`V9~R_z*~~o`nkCDbT^dV5SJ``8 zLMlr&_6;3GSBi@aCAbGe3uV4G=S?GUPN(H-^s_W0(wV8~IljR>dki(0((zm6scX*s zWK$0elXBr_(?l_XE>Wc)6mgc%(Hdr?cGPNB8rBHH$5(!DJLNRothXe%JM~ihJLDM_ zo$(S}c(|%tW^PwGG@4KQpvw3>a|Z#XD0aQ9pgGN%v84E?xZAsNNshFhxQC!+v)rl5 z*}c`l_c$ule!ThWE@b4>T?b`(f1H&$on$tcv(%c;oA7r7KWntepAzlTK%;CI-#!Y zQ|I~aq;FO7`~cnPP>0ASD>(YooYRt@wPtWvffB&2`6+aV*$TPx>4a3gz519`Q&VG4 zQOee=?8*Lyj#=sb`jBvq1|2{31pSAn7{pFTu%@+3EOHxA(Dmaxka!6h+UTQZ`I&*p znE6(ON*B6K*GI;d_ZnHi=pZ}X-7y9JCMWqqN6-4vsmPT*oos)7`6{PNN%IeFZQN_Q zL0|3)p>u9D}VY9y!2>O??ThvbWT6l#kQsPR$j>jm@u) zpkdE}DGQz0-Xynl%wI<(iL^EBEN8T`n3wF3!L>^$ueubqNwy~&_)7T6CSjt2w!fTE zn`e5y>uid=5Mp1cNM?L6Z%z868$MC%bANxKObhGYGj(+}wjr*T*42b?SQhLPMAZPT z{aA;X04mMGp8y0{#vgN>9)#Q1ih)^8z+KMNpzj-WSUs{r*>KF*;x}&D;_o@`l+}l& zCsY;jvv=8Ky_^xGS`r)j{0e!XAC9o%rx-W@vd>iKn~hqxbtGK}+XTiRzTV$8pwq%rcL9K@joZsVek{L-U^ z`AtEH#q58-+GgHOQHPW~y_4a&W^RTb2~C;XsVYC2<>hR-V`*HtgtybPqtlRU4mYU| z)6`^-#GMFJ4vp})e=IWcP>}N10LxCT~#BM((?YAV5hQxw{VXydX8g-YaE~ZpSV7Od& z&mBVbcX+_;wDjwrk~0%%Bb1>yePElZDaOP(3`8P>N_T)W9B+;iNeUerK#4drb>-7- z@Lw3yR8{UL+#{~@Q70qRP(OY;oV;$VCEJSc45Q#mA|p;|2kueR-bW_!3*^Et``mE3 zjp0gD{JXjOQXs>Wxrr-~68(R0tloH1Qg=i}$GIkXyHQnP8PJw02y5!}~PS%}Z`<6+pi^>m+eCG1ioJY4v;8d`^LG4(nT9cC^yP zv)rL(fAAqXb#{W1mua!x;{;r?H(kP%25HZvc=ki~O&<7Kjm6fHmt3z5;w%CN#2AWN z6V+k}D#3y2Z+g5@3@u!Q zxlNDr_KxT(pxl;-7Pl;@fM>cRX#J>S^`9gsnRl=Pbx5!JAUneSh5L*{-#zvOPBC=} zKFR$2O)Y|a(|mRxw7L(C$OX|FbEq;>HuUaua_@f2gp#&WwH z`)$+ZAxYC5P&zfV?D9<58m9&aPFv)h1#?!J7c|ed?+WS*QbDG-K56=HVXpLggE%bh z)Ad*mFZSTf$iIN!1z)OcSKJ3UvxXM_eO*AI;aHUT?Lc7i(vjPDRX2&{WqR1oWkxwb zA_~A+PeUwb7$~2cs^Q|Yyp8zFWM!hFqTJU}l@Uocp181s^Vx%p4^kJQp^UXWw>)@@ zc@*PB`YgAvzmNrcD5cXuF{(iq%i5CfV@>X{s_?~2wMiY-h+2o^?Hk&h>u#oQjist2 zb!6Yi#J;TlU+C)K&MD8eN}NwlBR3>D8GZ*xWcs|l&|P&3vf`Jc|GUWp-sB}as~onq zwY941N9;IQoSzhr*T8$!nIEigE4Rcc&*)lLkYI*kC5+wH`0^0JZ3)`Ox2d<3e)rr* zN4;}_O!wT%Ju+&b8>W714ahZA1C#WXCGTn!lJ#x!jBW9H7kO6$MVLT__t`|4E&Sz7 zyejmnVQ3d2^v-ne-n>3yg#W@WC-&{MeZGF;yn2{#oC(cwNdj5DKmP$-)BBAZ{NMV zXmadZ%pS`h)zw)&J88|8-wZ8eLH84JLQ1#1ug9L@G-s)q%p4_CsrI0Rb6CQH5-@6~3#W^8*Lk66nb z!dmw3H8e@<@zt|X(&u&iS3YiUL>Bxnf_#u+;R?@sPC;rwft2t0H=$W!u!HOg-mI`4 zL~?sj%exB3gDDZ|BqjL0^uUa@33S1smh5&*#utwZrKlteGX@B&s;L8@P9#~Uw;DZx zm6?d{-)ea@C!;o>`8-aJqdrFGA3Qz$?At7dXWMD_VzF1Qbb=S5LqP=HLQE4kje)!k z@ks~#)AlTcSzu8=B(TR8?O9wsvDwyhWW=^#6er6*6i1qNPI~F2w|#u52Sx#^eJGV` z+wEfuh|C_rVJZCqO=9ov#Rhyz^J&aZUUIFc)#tya0i`(Q%#vq8E^oE@?8ZY!k#Yu` z9MR&?{fN?IGGhA6C+=r1JNfS~;dA$buZK4zpZyHhtQ5e@_1KPmc6?n(gUPvqt&rm} zFLU2+5gSB&jFPu{qSnVK2n5O~JQ-|9veGxL_Z9@UKEHx4v5y%)i_GP<`~abDKZlTb za*$$YuZ3Hfa4UjN>Z{04Rp2b0;w?XuyK*w|%*1-J_jhSHj693pBT9Vc6F|;m{Jlf9 z>n5OXN4F|X$Zmb8Zz`&4NBp?Lk%R`4zyU;gAOm|aR~ZZlD^fmEiqhb({gNU#JcQO$ ztJ%~!hX=f5zV}P_UO3kGA~Vqt2+;aK>pCbiFJ9v`+LW0#j9`ypyG39yB(WL$0e|V4 zYX6&8mb;BV2?ahDud^(Yp@c|aZ;|667&OS9ppblY&pT~u2tjDEfY$&19yy$%AU#Lv z{+JHicP!o##j6qD$1@mY9xMw6___Q51W)kA-b{vS{pvI;t|&z#_98_a>X$7?<%ld} z+wk1l1&hV$@5Z#S1iG_^Ksyh4bjZ7|tfPsh90(!Z#Jj0P5>+fE<)T z6`NIB8d1CC5f&5_WEgjL}0u*d&A`AxrZR!9xztWr*JU>r}FaI2C z`ID61^`c4%&r}yZ_QOOBzi3v`uXH!OsGAA%hOp>yTDO5++P(+5%V0yD z&o+2*UOAAVM(r8ULhbg5o|DO=*jHh7Ob~oxJ#gu#8}6j&$Dncb z6nl3{Y%hCKYs+(QmDkyuYjJOsBa)+G^BfHbJ6<0xO7ZQj2aE>5M5}(m^XJc>a%sHI zvzuOm%VoPnv=q1duEYN(k+rqiu`L8gk%Ss`wp->Jm|rL^qcGrAw^x{g=j}Zbj`xUaR}MzU^%y+*0trKc4T#qSPbo&sT>5 z^!%~;d70NZD*#KwWU0v=4(hi4lmC2F3}DMy{dEu{+k?19-2wI=1q+Pnq|s<4ugec9 z+r%%i2Pbay3HRI-*WU~ot7&VeKrIQu?g*D6{CYFfJ_?RinspvY8LoA(-MF;Qql`|A z1BbU8wlVo&m=x)u&s2~4@c=!BO){@S>sxM&Q*3FF&VMUbiU99Gmijj!dGqvNL&kW; zh3gz6=7dK2b8Uo&YqVO-T4oK!e)anY$zTdA=j*UO;z*!*_xP?nO8%q1z5?}C*t)wn z$?!nAqiK1f=o;WQ!8CrNUhEdT#8c+6Q;G|g!l3wGdu_AsK!!YnbW1EQF7d9@X(N!e zlG16u*%&D00HRhf&MD&86U48InhHDwJ8qtzP!}eN3_DlVcdOKGm==FF-Y{ZO{P9_83d%vuQhv2Ji(mSBsZ$2LhqY z%{FIls`eui#J;uAwxfP1uByyxwo$gRb@qBvdYMzxhe|Tb_heyFhc<7P&L3BrjctUO zd?m)L`?9p-bLN)HV^YsmVFrv@0%9?q+e2Mvo?iKmT>JVA5tOx=1!@kCIBoZF>{;rM6t_e%^`8i6()e#wqjL4BDLo~X9_kuO| z4%qR4bSGTuw6^l-*&Rd$y6-UxS*hp8^Coq-NEI!|IVtZoW%5cmVi$&ycwB3WlL$AU zEgc6|JkRaHrcI^ik zW6vD1%QW#tn^{B}fFIZ`{XUSGG$S0-X8`Pk)K|D=w>LwA!6U6Ucn+QODVR(}ua?)- z&aLv>R6IUkV0>5(aVWO>C$oW6CeHb-`_7sgt^dNBx*V5`+l4spVMklhWJbmPP9^W+ z?u+?@Uy|n!;RM|o-G8~BE2kbO?#VX4Es)JWZ>8iUr@sb74EMWO;_OQleAAW0c^#YLzc18xiZCOjxrlHy?c^}%A}*te=qJ$5|V@BYxOi; z^|eZ#-0K}3-Xj+Pc`ID{szOu%O=g*Sy)5tUc==71Sie z$h>yBox^TZb`Q1rjo0k&)H4tn42;7%(=4q7e{{anAoALJhFR~-QDfraDu=NcV9z!b zsEkE}5=Yn&8?%_V-*mC$;L`4Kn7IC%$RNYvwYH*-F6}=JMO$vNduP?w%P&_voFU?= zxDc+hyYmMlZ|!=nwS5%tN0;U2b5BBTJP$%`w^IpEpSycccaJDIkn^R+2+thQ9v86* z@Xti&%Z5pu{w&`O9`BK!J`~V&v3*=Pi^PWZtO=(U(Es|Vq^!)gVu`Q*;qTF?&_0${ z{4=qq0~Edj-=sB1h3j;o@EO`t<8O`%Ta_kJ6%xMwc+&#+E*N^QC#3+Kclk&tqz2>$Pp+y8O>1J`?=M;BOWNf+PY9F3#w<%48UIm_Pu2XB0~=TvfBpE0VC_9 zrNxg0X0$9%NZHu+(14#xYYrC(1FrxQZ9(_wMgWwFy{vjcv2&D#g0Yq$!}568{;37*?& zQMsv^h$iU06oz1BCET&caKf%k(R239X9VB2oQ)L<0%+qTdSjLIBe-20shl>SX*yEx zTt#Nh8+!+0k_qUjlTTGpL_*-0%f%-+^yTYWaI#33(+elsTKlc=TPq?5yt zI$df7x7P+%0oF3_O6vK*^zl+4M)AzE!qD;?oVr$!DY4m^#(xZUElT)3N^={$Kc%F9 zN}K*ADx*!M4F)ph)iZy~!8+`=ICw_Q7gm*708dOMliccM6-Gn+o1C z04Qg5bw53IKc42?e{~gBsWpddaMQCg9%t~Xt`(57-ngBET{5qX<^yw8l+jLjj@o-O z*wj62H`#)+920H%SVpQSul_ZLC^S*?9Twp=)+11l2TnjmZWqPQ#d+IzJvGY+PB$++ zkl6hXEuRrhz1ewyJ$w(`b4}xb!?WLtMSd1)VDr{js(NpqY1(3II-=|x1b<2kY3b0@ zpmLf*bX*_(S0j zw`b{tvylB!X1V4gb>kW?M#8QRc#Z^&CXk_jBk6}&?D$Q6t%tPuir#u%ocHdTri$%| z#ybJVg*zL8ufH|oP5d;};F_saak^ll#p(sZllI92ME5C?&9&rbehG`v@6KgNcO#x? zxoN*AWzDSfA=|P&-Y_@RA0!99Z!UW{4J9et$MV6?3}b4V>L19_5QGl*>ps9bs_Uu2iXrO@(;fomw}qy{71GVgFvn78tnZr_U4nB zLlZhIjzB$%Lo#1ER?SLNGhy7vizyZLX!kCq)`bX3XBm#m+g3VZ3dlm;P=ex}4kACeXV9`5pB zr_Jre>WZQ&X33#!MPwm)MAvPBm89!SKkPkFi-D5!ZHwd}g3~|2_biUeWA@ZbF(g(Q zLhH@dSKJDouFHRdbdWu`qqt@TU#ozc{{W&WuxG0~g4iBnA2?JA$y8|3smImq7RD?N zUpF7%2r#AvMiLj&&^H|92YUU>d2gPhSEY=d%;bL*h-35?)H8SCaMXsd7LXc`ACrIJ zn2p6t>K8t9XG6G=ZB80h1IKJjf_~_5PnUV1^o}z2`I{~`N_g2HDo!0znBCF&!-xtR zxKnX$I|^A1=rae{^JNV&`K6R}OYCi7QLo(ew}#FG;!OOsjy$OHO=c=r8SwhQ6eof= zM)h1%G!;HMH#06?*y4F7SyGM_HC}&1G`DWZV5-5KaBjoh<($+)SwnRGAtGHmUFpXO zw%T{HD7$25SqU#uK9q=yCV^qHC(twji{2{0RieTT!9fdj+xR7w2#A}c<2(WKAq)_m z+vy(;C_y#`;2n7rl%R-dXmsqEAnJ7xMLz?{%e}rp$o&#)9{sPKS9=OZWEMk6)mhpJ za1H8{D(YS&6X-kS#aQS=#(uqlTdZSHCc9T`&A{T>;3@fb0oX~?0mWD(Z~;Ksv7+6G zz*QL_8;qfI1xOfYEB@FHH=79=3Mv7KQ5+M(=(nT(EruC?7(Bl%m46XoPTG|*AdA?z z{3W+Nd_-FI=366|9iRf!5h&gF;0$nQy9=vv+&mk0-j2JdYcr9-PQfwK8H-ik?dlHw z@c(iyW0+<}N813HBHtJ6>HpoqlsW5`{%UKR*VN?pyEgGP1BsD<6GtsB!-*M{jr?4a z$-G~B)5X29#2!KR9a&}CYIVEGw*^F3mmRkDm_?VQz+NbVRb#l7dnKev7G(mCy>aqG zRC*m=^cBqUIM)D_h-KdW0-7*aEScl^#Gq=X1+hw=!X}{jMQXRG+f?Z3#1Yp-N`Q+$vGGdW|yV7Pgp5sydHf9Oun%;Y~ptOiV zu$F5SGXXhK(o}(cLYV#M;K9h_!p*?_X(vOl>`2i9X%)Gg_u!I8+<%sXVKVxy+Nc}N z|~@4NOY@O5?(t?{dF6+jvr9oQf8Vto&=Z6;CY;5@@M=Z zer{a>)Fe!UVvwYz*44{#Ov;5u&iyE}U4wu14@u5s;Vs7Ao44_a9$RmAwn7HR;@a$~ z?=D|8Ks#P$Wr!e5H#qMF@sF*az#ugk2KpCoErF{ID726^clY*!)xhB$MwFk)?E0y` zMqbTHBZWyc8X7M~M)CIFW_%xhze}3~+nvYE!JtPjd7j+^{u7FA)f1K&Gpb|v_YznA z)p9?F<~t9BtT9hH=2L zbfv{}e<=J9X&$rFoQpm805YoypMhD2j%!uSIIg}ukLNdo8Bd$i^p5v^91$Nr@+|b7 zKO-pS%bR(}9d-Xr184L$xB>o|o(~)Y>W}8T@Q(ExLuMf>;)nk8!xIAe}htw1Zsd!oi^)ON)!B zW=?A)l)j7`!v6Jdesue1WqJ=#G-fG?#bfOAw)sX@|5ZJoCrgW@p!ac~VszifaL7Is zA;+39p%!gqD1bk%R?uy(<)W#EyYR>vna*}DNqevc!R(#%zZ-Z3K6@5=-gq*4?vGj{ zr%82NQ<0jEy?VaYuqy*=f};dd8!Wdy;TB6#75C>i7b-6u$!LX0F_#=K)@&hjHV;<6 zy89+uFZyt$#{iFaX4SIGn7c$TGL7XzetL&v+Tq5JM{i04*oCrIW!`}7ahXySzC6ku z{Y4daDZS}J1)YQcu@O4{>szAXEI=Fo$6l2K_k27kAdhsx>mqtR^Bt}pOFT#iMGX8$ z&?W$TB|V_}!3shY;H>|^sK*N8EX}p}NQn#QFBtGSm4}Rs3?SaPSEam>OwvL5Z`&F_ z1>iIZW?&PUvDZKCYP7BaxMbo3Q`mH(_bZRn3PJ`}y8+Q6h)`cYXiwM=<(`iHSLwE|4M-MgW6(^*Kvf4tI&^(mHNKz#bhP{g!J1 z=&71lX43^c>-H-K5_ue{2=%>K0dUk3O|^0=rkF1J=_3m(WsJ1oWqBkkh~EK_@h(6K z+6E|`9eXzg3~^+Bypc=cl(z;q3V6)niWxmm`460&Sl7!>g+-;xt@oSEamq3efQmp3 zVCe7AR4Ju0{?D+u&o4l43T>}_EcnI-wa_O(;;wsG*F=pW=TR1bBLP6A`T+WP=p|s$ zs|2{5(16z3MHdplK~8h6U%3Wlo2`CLg$0=z_{p{OBi8~>$73f4MhFvi6_5#x6YBE< zMek4~bOJl@K;z@%Z1Y1e{^WyZ2EbEdn4FYnW${0a9mh1aa^VE-zKL#+oBEG|FuwTz z9z*OifcyclJ4}E~G8b@9f~c-%RSKn~*3nbwLC%-bwdb-+6jbR)-bbDQ^C$So=c=*Z z`!t=U%>JUx%>Gh7jrS#7!a%o1>#Z7Sh2iOE#Qjsy(vl8CZr39CPb9RwoYgh$M~DO% zCZ6Jc1%>}+Bj$cb(YId!OL7ShnOZ>8NU2;i+?*qM0fquRwALLaRW4yvrKX`#30Q>4 zJ@;AR0Lr|)m>2?h+kX~V?5F$gI=fCgDITNrpKfPhY$O4qnNS~hMWR&xFeQwyfUX(p zvS2-1^HVt#5TKu}Q#@%tuFAn(E&B=q@I;Q@ND69FES1ln`vJff{Quf+AX(qNCiiOp zbJSIE$ok)3? zllLDlsNnxgq+v~y|6gBG7S7SKDE!ZJ~}fD(B;F3?xA*_n6dHkxTuKN|D=SH zJ%^2$d+o1Rv3>snwodRslhe}Sabp3vbnfNr9*5#sCFaDjA^Iqc+0A;hmF#aiS{z_c~8i5f- z-&+;1+y4#^hvTI4CSXLXupmF+_zkt=9t1pbl9rZxNl*U3M-v3P%@_&*tfov7baP32 z`wkXbyc@?kLM+JQI3yr1UWX-YJk? z@Uj@mBhPwBZ0*1n8Z2|;ibSo*`xe|zm z`@&?*__L=cUyb!yHN+b;0RXq0wWCms&dw&%YwPOj+Swdk0&r~y=c(ywf@Vo;YetYC zg{!KmF-kW5r27yT8@uAth<;hG!xY`O(f1TB`ol4TXC4!XYi5Ax8z4z3%lA|iceA~r z5y73SP>-YESC0$M&dzYEs;Z-tld7t{fQYytKOYV>r{fM(nA_M08vex<{n7pr8;FP; z$Rq&n=F~Jk*9xn-imVKf1kss;u`{ngsWZHAGe&v=SUp_b{po-^u$R%1v$!}pPXU8; z1+d)UN?UHff)ykH#hUcA8Gy6Ru+t7YCES;v9`L*KXRsUvS%;*creWb%6r_j~!MQV&uvvxgBZ zmOetgev)X#DO)-~xDVhoeR1ZuEe?FH0y6eZAx%Urpt$L7|y#@J?} zO)E3U%)J`Pq#aOXqbYYLFf^knr75=XGAX6UgWdh#OGlp@A~Q%wXJg-C3QzRJ=MTg; Qz=0oW2?g;|Q3JpK1H7JbJpcdz From 0d16f5a25d5e45c7329e775533eab8192320c5c7 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 15:54:53 +1200 Subject: [PATCH 22/24] fix a broken link to close #204 --- examples/mnist/generate.jl | 2 +- examples/mnist/mnist.ipynb | 1873 ---------------------- examples/mnist/mnist.md | 339 ---- examples/mnist/notebook.ipynb | 260 +-- examples/mnist/notebook.jl | 4 +- examples/mnist/notebook.unexecuted.ipynb | 4 +- 6 files changed, 152 insertions(+), 2330 deletions(-) delete mode 100644 examples/mnist/mnist.ipynb delete mode 100644 examples/mnist/mnist.md diff --git a/examples/mnist/generate.jl b/examples/mnist/generate.jl index 6a36a764..fe74dba2 100644 --- a/examples/mnist/generate.jl +++ b/examples/mnist/generate.jl @@ -1,4 +1,4 @@ # Execute this julia file to generate the notebooks from ../notebook.jl joinpath(@__DIR__, "..", "generate.jl") |> include -generate(@__DIR__, execute=true, pluto=false) +generate(@__DIR__, execute=false, pluto=false) diff --git a/examples/mnist/mnist.ipynb b/examples/mnist/mnist.ipynb deleted file mode 100644 index c5f3a9b9..00000000 --- a/examples/mnist/mnist.ipynb +++ /dev/null @@ -1,1873 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using MLJ to classifiy the MNIST image dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m\u001b[1m Activating\u001b[22m\u001b[39m environment at `~/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/Project.toml`\n" - ] - } - ], - "source": [ - "using Pkg\n", - "const DIR = @__DIR__\n", - "Pkg.activate(DIR)\n", - "Pkg.instantiate()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Julia version** is assumed to be ^1.6" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Precompiling MLJ [add582a8-e3ab-11e8-2d5e-e98b27df1bc7]\n", - "└ @ Base loading.jl:1317\n", - "┌ Info: Precompiling Plots [91a5bcdd-55d7-5caf-9e0b-520d859cae80]\n", - "└ @ Base loading.jl:1317\n" - ] - } - ], - "source": [ - "using MLJ\n", - "using Flux\n", - "import MLJFlux\n", - "import MLJIteration # for `skip`\n", - "\n", - "MLJ.color_off()\n", - "\n", - "using Plots\n", - "pyplot(size=(600, 300*(sqrt(5)-1)));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Basic training" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Downloading the MNIST image dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import MLDatasets: MNIST\n", - "\n", - "ENV[\"DATADEPS_ALWAYS_ACCEPT\"] = true\n", - "images, labels = MNIST.traindata();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In MLJ, integers cannot be used for encoding categorical data, so we\n", - "must force the labels to have the `Multiclass` [scientific\n", - "type](https://juliaai.github.io/ScientificTypes.jl/dev/). For\n", - "more on this, see [Working with Categorical\n", - "Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/)." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "labels = coerce(labels, Multiclass);\n", - "images = coerce(images, GrayImage);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Checking scientific types:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "@assert scitype(images) <: AbstractVector{<:Image}\n", - "@assert scitype(labels) <: AbstractVector{<:Finite}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Looks good." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For general instructions on coercing image data, see [Type coercion\n", - "for image\n", - "data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "" - ], - "text/plain": [ - "28×28 Array{Gray{N0f8},2} with eltype Gray{FixedPointNumbers.N0f8}:\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " ⋮ ⋱ \n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) … Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)\n", - " Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0) Gray{N0f8}(0.0)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "images[1]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We start by defining a suitable `Builder` object. This is a recipe\n", - "for building the neural network. Our builder will work for images of\n", - "any (constant) size, whether they be color or black and white (ie,\n", - "single or multi-channel). The architecture always consists of six\n", - "alternating convolution and max-pool layers, and a final dense\n", - "layer; the filter size and the number of channels after each\n", - "convolution layer is customisable." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "import MLJFlux\n", - "struct MyConvBuilder\n", - " filter_size::Int\n", - " channels1::Int\n", - " channels2::Int\n", - " channels3::Int\n", - "end\n", - "\n", - "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", - "\n", - "function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels)\n", - " k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3\n", - " mod(k, 2) == 1 || error(\"`filter_size` must be odd. \")\n", - " p = div(k - 1, 2) # padding to preserve image size\n", - " init = Flux.glorot_uniform(rng)\n", - " front = Chain(\n", - " Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init),\n", - " MaxPool((2, 2)),\n", - " Conv((k, k), c1 => c2, pad=(p, p), relu, init=init),\n", - " MaxPool((2, 2)),\n", - " Conv((k, k), c2 => c3, pad=(p, p), relu, init=init),\n", - " MaxPool((2 ,2)),\n", - " make2d)\n", - " d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first\n", - " return Chain(front, Dense(d, n_out, init=init))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Note.** There is no final `softmax` here, as this is applied by\n", - "default in all MLJFLux classifiers. Customisation of this behaviour\n", - "is controlled using using the `finaliser` hyperparameter of the\n", - "classifier." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now define the MLJ model. If you have a GPU, substitute\n", - "`acceleration=CUDALibs()` below:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: For silent loading, specify `verbosity=0`. \n", - "└ @ Main /Users/anthony/.julia/packages/MLJModels/w0uSt/src/loading.jl:168\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "import MLJFlux ✔\n" - ] - }, - { - "data": { - "text/plain": [ - "ImageClassifier(\n", - " builder = MyConvBuilder(3, 16, 32, 32),\n", - " finaliser = NNlib.softmax,\n", - " optimiser = ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}()),\n", - " loss = Flux.Losses.crossentropy,\n", - " epochs = 10,\n", - " batch_size = 50,\n", - " lambda = 0.0,\n", - " alpha = 0.0,\n", - " rng = 123,\n", - " optimiser_changes_trigger_retraining = false,\n", - " acceleration = CPU1{Nothing}(nothing)) @050" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ImageClassifier = @load ImageClassifier\n", - "clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32),\n", - " batch_size=50,\n", - " epochs=10,\n", - " rng=123)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can add Flux options `optimiser=...` and `loss=...` here. At\n", - "present, `loss` must be a Flux-compatible loss, not an MLJ\n", - "measure. To run on a GPU, set `acceleration=CUDALib()`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Binding the model with data in an MLJ machine:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "mach = machine(clf, images, labels);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Training for 10 epochs on the first 500 images:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Training Machine{ImageClassifier{MyConvBuilder,…},…} @745.\n", - "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/AkJde/src/machines.jl:354\n", - "┌ Info: Loss is 2.291\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 2.208\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 2.049\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 1.685\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 1.075\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 0.628\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 0.4638\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 0.3611\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 0.2921\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n", - "┌ Info: Loss is 0.2479\n", - "└ @ MLJFlux /Users/anthony/.julia/packages/MLJFlux/P7yEO/src/core.jl:141\n" - ] - } - ], - "source": [ - "fit!(mach, rows=1:500, verbosity=2);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Inspecting:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(training_losses = Float32[2.3242702, 2.2908378, 2.2082195, 2.0489826, 1.6850395, 1.075116, 0.6279608, 0.4638124, 0.36110216, 0.29205894, 0.24789181],)" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "report(mach)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(chain = Chain(Chain(Chain(Conv((3, 3), 1=>16, relu), MaxPool((2, 2)), Conv((3, 3), 16=>32, relu), MaxPool((2, 2)), Conv((3, 3), 32=>32, relu), MaxPool((2, 2)), make2d), Dense(288, 10)), softmax),)" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain = fitted_params(mach)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16-element Vector{Float32}:\n", - " 0.011789989\n", - " 0.055812832\n", - " 7.7648336f-5\n", - " 0.013380578\n", - " -0.0019681947\n", - " 0.01157092\n", - " -0.00053028984\n", - " -0.00027832793\n", - " 0.036143202\n", - " 0.06364283\n", - " -0.00057194557\n", - " -0.0040702056\n", - " 0.0035979813\n", - " 0.0031175867\n", - " 0.027636126\n", - " 0.051516373" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "Flux.params(chain)[2]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Adding 20 more epochs:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Updating Machine{ImageClassifier{MyConvBuilder,…},…} @745.\n", - "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/AkJde/src/machines.jl:355\n", - "\u001b[33mOptimising neural net:100%[=========================] Time: 0:00:05\u001b[39m\n" - ] - } - ], - "source": [ - "clf.epochs = clf.epochs + 20\n", - "fit!(mach, rows=1:500);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Computing an out-of-sample estimate of the loss:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.36405802f0" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "predicted_labels = predict(mach, rows=501:1000);\n", - "cross_entropy(predicted_labels, labels[501:1000]) |> mean" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Or, in one line:" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "┌───────────────────────┬───────────────┬────────────────┐\n", - "│\u001b[22m _.measure \u001b[0m│\u001b[22m _.measurement \u001b[0m│\u001b[22m _.per_fold \u001b[0m│\n", - "├───────────────────────┼───────────────┼────────────────┤\n", - "│ LogLoss{Float64} @348 │ 0.364 │ Float32[0.364] │\n", - "└───────────────────────┴───────────────┴────────────────┘\n", - "_.per_observation = [[[5.73, 0.138, ..., 0.00555]]]\n", - "_.fitted_params_per_fold = [ … ]\n", - "_.report_per_fold = [ … ]\n", - "_.train_test_rows = [ … ]\n" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "evaluate!(mach,\n", - " resampling=Holdout(fraction_train=0.5),\n", - " measure=cross_entropy,\n", - " rows=1:1000,\n", - " verbosity=0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Wrapping the MLJFlux model with iteration controls" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Any iterative MLJFlux model can be wrapped in *iteration controls*,\n", - "as we demonstrate next. For more on MLJ's `IteratedModel` wrapper,\n", - "see the [MLJ\n", - "documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The \"self-iterating\" classifier, called `iterated_clf` below, is for\n", - "iterating the image classifier defined above until one of the\n", - "following stopping criterion apply:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- `Patience(3)`: 3 consecutive increases in the loss\n", - "- `InvalidValue()`: an out-of-sample loss, or a training loss, is `NaN`, `Inf`, or `-Inf`\n", - "- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These checks (and other controls) will be applied every two epochs\n", - "(because of the `Step(2)` control). Additionally, training a\n", - "machine bound to `iterated_clf` will:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- save a snapshot of the machine every three control cycles (every six epochs)\n", - "- record traces of the out-of-sample loss and training losses for plotting\n", - "- record mean value traces of each Flux parameter for plotting" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For a complete list of controls, see [this\n", - "table](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Wrapping the classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some helpers" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "make2d(x::AbstractArray) = reshape(x, :, size(x)[end])\n", - "make1d(x::AbstractArray) = reshape(x, length(x));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To extract Flux params from an MLJFlux machine" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "parameters(mach) = make1d.(Flux.params(fitted_params(mach)));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To store the traces:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Any[]" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "losses = []\n", - "training_losses = []\n", - "parameter_means = Float32[];\n", - "epochs = []" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To update the traces:" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "update_epochs (generic function with 1 method)" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "update_loss(loss) = push!(losses, loss)\n", - "update_training_loss(losses) = push!(training_losses, losses[end])\n", - "update_means(mach) = append!(parameter_means, mean.(parameters(mach)));\n", - "update_epochs(epoch) = push!(epochs, epoch)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The controls to apply:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [], - "source": [ - "save_control =\n", - " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jlso\")), predicate=3)\n", - "\n", - "controls=[Step(2),\n", - " Patience(3),\n", - " InvalidValue(),\n", - " TimeLimit(5/60),\n", - " save_control,\n", - " WithLossDo(),\n", - " WithLossDo(update_loss),\n", - " WithTrainingLossesDo(update_training_loss),\n", - " Callback(update_means),\n", - " WithIterationsDo(update_epochs)\n", - "];" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The \"self-iterating\" classifier:" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "ProbabilisticIteratedModel(\n", - " model = ImageClassifier(\n", - " builder = MyConvBuilder(3, 16, 32, 32),\n", - " finaliser = NNlib.softmax,\n", - " optimiser = ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}()),\n", - " loss = Flux.Losses.crossentropy,\n", - " epochs = 30,\n", - " batch_size = 50,\n", - " lambda = 0.0,\n", - " alpha = 0.0,\n", - " rng = 123,\n", - " optimiser_changes_trigger_retraining = false,\n", - " acceleration = CPU1{Nothing}(nothing)),\n", - " controls = Any[Step(2), Patience(3), InvalidValue(), TimeLimit(Dates.Millisecond(300000)), IterationControl.Skip{Save{Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}, IterationControl.var\"#3#4\"{Int64}}(Save{Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}}(\"/Users/anthony/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/mnist.jlso\", Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}()), IterationControl.var\"#3#4\"{Int64}(3)), WithLossDo{IterationControl.var\"#16#18\"}(IterationControl.var\"#16#18\"(), false, nothing), WithLossDo{typeof(update_loss)}(update_loss, false, nothing), WithTrainingLossesDo{typeof(update_training_loss)}(update_training_loss, false, nothing), Callback{typeof(update_means)}(update_means, false, nothing, false), WithIterationsDo{typeof(update_epochs)}(update_epochs, false, nothing)],\n", - " resampling = Holdout(\n", - " fraction_train = 0.7,\n", - " shuffle = false,\n", - " rng = Random._GLOBAL_RNG()),\n", - " measure = LogLoss(\n", - " tol = 2.220446049250313e-16),\n", - " weights = nothing,\n", - " class_weights = nothing,\n", - " operation = MLJModelInterface.predict,\n", - " retrain = false,\n", - " check_measure = true,\n", - " iteration_parameter = nothing,\n", - " cache = true) @673" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "iterated_clf = IteratedModel(model=clf,\n", - " controls=controls,\n", - " resampling=Holdout(fraction_train=0.7),\n", - " measure=log_loss)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Binding the wrapped model to data:" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "mach = machine(iterated_clf, images, labels);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Training Machine{ProbabilisticIteratedModel{ImageClassifier{MyConvBuilder,…}},…} @194.\n", - "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/AkJde/src/machines.jl:354\n", - "┌ Info: loss: 2.2247427\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 1.968148\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: Saving \"/Users/anthony/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", - "└ @ MLJSerialization /Users/anthony/.julia/packages/MLJSerialization/NEVjq/src/controls.jl:39\n", - "┌ Info: loss: 1.2209109\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.59409326\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.46833506\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: Saving \"/Users/anthony/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/mnist2.jlso\". \n", - "└ @ MLJSerialization /Users/anthony/.julia/packages/MLJSerialization/NEVjq/src/controls.jl:39\n", - "┌ Info: loss: 0.4241403\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.40840897\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.4047549\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: Saving \"/Users/anthony/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/mnist3.jlso\". \n", - "└ @ MLJSerialization /Users/anthony/.julia/packages/MLJSerialization/NEVjq/src/controls.jl:39\n", - "┌ Info: loss: 0.4097773\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.4203993\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.43216416\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: final loss: 0.43216416\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/train.jl:29\n", - "┌ Info: final training loss: 0.04336384\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/train.jl:31\n", - "┌ Info: Stop triggered by Patience(3) stopping criterion. \n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/stopping_controls.jl:77\n", - "┌ Info: Total of 22 iterations. \n", - "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/Twn0E/src/core.jl:35\n" - ] - } - ], - "source": [ - "fit!(mach, rows=1:500);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Comparison of the training and out-of-sample losses:" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmQAAAF+CAYAAAAstAbcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdd3hUZd7G8e85k94hEEILobcQQhLp0kQRROmIgkpRcAUVBVFZe0FWQUHRtawFRFwlCNgQqSLSQXqVIrDUEBJSCElm5v0jrygSIANTUu7Pde21kCm5xbObm98853kMu91uR0REREQ8xvR0ABEREZHSToVMRERExMNUyEREREQ8TIVMRERExMNUyEREREQ8TIVMRERExMNUyEREREQ8rEQWsqysLDZs2EBWVpano4iIiIhcUYksZDt37iQhIYGdO3d6OkqpkpaW5ukIUgToOhBdA6JrwHElspCJZ1itVk9HkCJA14HoGhBdA45TIRMRERHxMBUyEREREQ9TIRMRERHxMC9PBxARESlpDh48SHJysqdjeExaWhqhoaGejuF25cqVIyoq6qpeq0ImIiLiRAcPHqR+/fraeqkUCggIYMeOHVdVylTIREREnCg5OZmsrCymT59O/fr1PR1H3GTHjh0MGDCA5ORkFTJnsdvtWJOP4lW+kqejiIhIMVW/fn3i4+M9HUOKCS3qL0D29jUcG3cvKTNeJ+/0SU/HERERkRJOhawAfnXjCet5P9nbV3Ps5cGkfv0fbJnpno4lIiIiJZQKWQGOnvPiUbrgN+Yjgm/oS+bybzn60iDSF83EnnPO0/FERESkhFEhK8DOVDtf7rMT+60Pa2P7E/n0xwQktCftu0849vIQMlfNx65jIURERMRJVMgK0KGyyZZeXtQLNbjxeyuPbgnBr/sDRD75AT41GnL6v29w/LV/cHbrSux2u6fjioiISDGnQnYJVYMMfuxiYVILk/d32oifncdGIgm/50kiHn0TS3BZTv3neU6+OYpz+7Z5Oq6IiIgUYypkl2EaBg/HWFjfw4sAL2gx18qLG6yYVWpT7oFXKHf/y9hzznHyzVEk/+d5co/97unIIiIiLpGamsqrr7561a/fu3cv8fHxNGnShI8//tiJya7egQMHKFeunKdjACpkhdKgjMHK27x4Is7kuQ02Wn9j5bcz4FcvgYhRb1H2rsfJPbqf4//6Byn/fYO8VG2VISIiJcu1FrKkpCRatGjBr7/+yqBBg5yYrGRQISskH4vBi4kWlt9q4VS2nbiv8nh3uxUMg4CE9kQ++QFhPYaRvWUVx14eQurXH2LL0lYZIiJSNP3www/Ex8cTGxtL27Zt2b59O0uXLiUxMfH8c7Zu3Up0dDQA999/P6mpqcTFxV3wnL/KyMhg8ODBxMTEEBMTw/PPPw/AtGnTeOONN5g5cyZxcXFs3779gtetWrWKhIQE4uLiiImJ4d///jcAM2bMoFmzZjRp0oS4uDi+//7786+Jjo7mmWeeoWXLlkRFRTF9+nQmT55M06ZNqVmzJkuXLgX+nIKNHj2aZs2a0bBhQxYvXlxg/rVr19KhQwcSExOJj49n1qxZV/VnezW0U7+DWlQw+bWnwehVNv7xi42vD9r5sI2FigHeBLXpRkDTjqQv+YqMJbPIXDmPkBtvJ6j1bRg+vp6OLiIiRcjRLDtHXXDcZcUAqBhgXPY5J06cYMCAASxZsoRGjRrx2Wef0bdvX6ZMmXLJ17z77rskJiaycePGSz7nxRdfJCcnh82bN3P27Flat25NgwYNuPvuu9m3bx8ZGRlMmDDhote98sorjBo1ijvvvBOA06dPA9CpUyfuuOMODMPgwIEDtGzZkt9//x1vb28Azp49y4oVK1i7di1t27ZlwoQJrFmzhi+//JKxY8eyYsUKAE6dOkWjRo2YMGECq1atonv37uzdu/eCDKmpqQwbNozvvvuOihUrkpycTEJCAq1atSIyMvKyf57OoEJ2FYK8Dd693sKt1QyGLLPSKCmP96630Ku6iekXSGjnuwhqdQtnfpxB2refkLFsLiGd7ybguhswTIun44uISBHw3g4bz2+wOf19n403eS7h8j9rVq9eTVxcHI0aNQKgf//+DB8+nKNHj17T9164cCGTJ0/GNE0CAwO5++67WbhwIX369Lns69q3b89LL73Eb7/9RocOHWjdujUA+/fvp3///hw+fBgvLy+Sk5P5/fffqVWrFgC33347APHx8Zw9e5a+ffsCkJCQwL59+86/v4+PD3fddRcAzZs3JzIykk2bNlGp0p9HJK5YsYJ9+/bRuXPn81+z2+3s2rVLhayouyXKZGtvg/uXW+m90MpdtWy81cpCqI+BJaQsZXqPIKhtD858N5XTn79O+pJZhHYdhF/DZhjG5f/2IiIiJduw+ia3VXP+yqGKAVd+jt1uL/DnUNWqVbH+ZZ/N7OzsS77H9u3bz0+0WrVqxdtvv13g+xb0fXr37s1vv/0GwKJFixg5ciS33XYbixYtYuzYscTExPDOO+/Qr18/JkyYQPfu3QEoW7bsBZn8/PwAsFgsF/0+Ly/vsn8Gf89lt9uJjY1l2bJll32dq6iQXaNyfgYzb7Aw/Tc7I36xsvRoHlPbWWhfKf9/ZN7lKxM+cCw5B3uR9s1HnPrPc/jUaEjorUPwrd7Aw+lFRMRTKgYYhSpPrtCiRQuGDBnCjh07qF+/Pv/973+pUqUK1atXZ//+/Zw6dYrw8HA+/fTT868JCQkhKyuLvLw8vLy8aNCgwUUfX95444188MEHtGzZkqysLKZPn86TTz550fdPSkq64Pe7du2ibt261KhRg6pVqzJ27Fgg/6PLP9awTZ8+/fxHmY7Kycnhs88+46677mLNmjUcO3aM2NhYTp788ya8li1bsmfPHhYvXkyHDh0A2LhxIw0aNMDHx+eqvq8jVMicwDAM7qpt0CbSYOBPVjp8Z+WRGDvjrjPx88pv4D5RdSn3wHjO7VxP2jcfcXLyo/g1aknoLQPxjozy8D+BiIiUJuXLl+fTTz+lf//+WK1WwsLC+PLLL6lcuTKjR48mMTGR6Oho2rRpc/41ZcuWpX///jRq1IjAwEDWrVt30fs+/fTTPPjgg+c/Cu3Tpw+9e/e+Yp633nqLJUuW4OPjg8ViYeLEiQBMnjyZHj16ULlyZVq0aEFU1NX9vAwPD+e3336jWbNmZGRkMGPGDAIDAy8oZGXKlOGbb77hscce45FHHiE3N5eoqCjmzJlzVd/TUYa9BG41v2HDBhISEli/fj3x8fFu/d42u51JW2w8udZG7VD4tJ0XTcr9bSxqs3F2w1LSvp+K9fRJApvdRMjNA7CEFY29UK5WSkoKZcuW9XQM8TBdB1LarwFP/gySix04cIDExESSk5Nd+n2u9d+7tr1wMtMweDQ2fzNZLwOazc3jlY1WrLY/e69hmgQkdiBy7AeEdh/K2S0rOPryYNK++QhbVoYH04uIiIgnqJC5SExZg9XdvRjVyOSpdTbafGtl75kLh5GGlw/BbbsT+dTHBLfvRcbPczn60kDSl8zCnpvjoeQiIiIlR3R0tMunY86gQuZCvhaDV5paWNbVwrEsO41n5fHBTttFB5Kb/oGEdrmHyH9+REBcW9K++ZBjLw8hc80C7DbrJd5dRERESgoVMjdoFWmysacXd9Q0GPqzldt+tHI86+Kle5bQcMr0fZAKT7yPT7W6nJ4xkeOvDefsttUXlTgREREpOVTI3CTYx+CDNl7MvcnCmpN2YmblMedAwRsCekdUIXzQU5QfOQkzIJhTHzzLybce49z+7QU+X0RERIo3FTI3u62ayZZeXrSqYNBjgZXBP+VxJqfg6ZdvdD3Kj3iV8KEvYj+bwcnJj5L80QvkHj/k5tQiIiLiSipkHhDhbzD7RgsftbEwc3/+2rJlRwuelhmGgX+D64h47G3K9B9N7qHfOD5+GKe/mIw1tegvUhQREZErUyHzEMMwGFTXZHNPL6oGGbT71sqY1VbOWQuelhmmhcDrOhL5z/8Q2u0+zm7+hWMvDyHt24+1VYaIiDjsueeeIyfH8Tv6161bR//+/Qv13Li4OM6ePevw9yiNVMg8rHqIwZJbLIxvajJpq43r5uSx+dSlF/AbXj4Et+tB5FMfE9SuBxk/zeHYS4O0VYaIiDjk+eefL7CQXekMyMTERD777LNCfY+NGzfi7+9/VflKGx2dVARYTIMxjS10qmJy19I8rpuTx0uJJo82MrGYBR9CbvoHEnrLQIJa38qZ+Z+R9s2HZCybS0jnuwlIbI9hWtz8TyEiIo6wpp3CeibF6e9rCSmLJTT8ss+5//77gfzzG03TpFKlStSqVYvdu3dz6NAhtm3bxoABA9i5cyc5OTlERUXx0UcfERERwdKlSxk9ejTr1q07vwv+Aw88wHfffUdaWhpvvvkmXbp0AfI/DUpPTycoKIjo6GgGDRrE/PnzOXr0KEOGDOGpp54C8g8qHzRoEJmZmcTGxrJv3z6eeuopunbt6vQ/n6JKhawIaRxusLa7F0+vs/H4GhvfHLQzrZ2F6OCCSxn8sVXGQwS17cGZ76dyesYEMpYkEXLrYPzqX3fRafYiIlI0ZKz4nvT5hZs0OSK4U39CO9912ee8++67vPfee6xYsYKgoCAGDhzI8uXLWbZsGUFBQQBMmjSJcuXyj/QbP348L7zwAlOmTLnovU6dOkVCQgIvvPACP/zwAw8//PD5QvZ3qamprFixgpMnT1KrVi0GDRpE5cqVueuuu3jkkUcYMGAA69evp2nTptf4p1D8qJAVMb4Wg1ebWbglyuCepVZiZ+XxZksL99Q2LluuvCtUJXzQU5w7sIO0bz7i1PvP4FOzEaG3DsE3up4b/wlERKQwglp2wT+mudPf1xJydeeI9u3b93wZA/jss8/49NNPOXfuHGfPniUyMrLA1wUGBtKtWzcAWrRowd69ey/5Pf5Ye1a+fHlq1KjB/v37CQ4OZuvWrdx5550AJCQkEBsbe1X/DMWZClkR1baiyeZeBg+vtDLoJytzDxi8f72F8v6Xn3j5Rten/IhXyd6xljPffMTJSSPxb9yaMneOwvTV5/giIkWFJTT8ih8tutNfy9jy5cuZMmUKK1asoHz58nz99de88MILBb7Oz8/v/K8tFgtW66VPmPn7c/Py8rDb7RjG5YcOpYEW9RdhIT4GH7f14quOFpYfz99M9pvfC94e46/yt8pomr9Vxp2jObt9DWdcMBYXEZHiKzg4mLS0tAIfO336NCEhIZQtW5acnBzee+89l+UIDQ2lQYMGfP755wD8+uuvbNmyxWXfr6hSISsGelTP30y2aXmD2360ct+yPNIvsZnsXxmmhcCmHQnpeDsZS2eTe+ygG9KKiEhxMGrUKDp06EBcXBwnTpy44LHOnTtTq1Yt6tWrR6dOnYiLi3NplmnTpvHGG2+QkJDA22+/TePGjQkNDXXp9yxqDHsJPCRxw4YNJCQksH79euLj4z0dx2nsdjv/2WXnkZVWKvjDtHYWWkVeuVPbc3M49q9heJWtQLl/vOKysXBKSgply17d2gUpOXQdSGm/BkrqzyBXyszMJCAgAMMw2L59O+3atWPXrl2UKVPG09EK7Vr/vWtCVowYhsF99Uw29fIiMsCgzbdWxq61knOJzWTPv87bh7Ce/+Dc7o2c3fSzm9KKiIgUzi+//EJcXByxsbH069ePDz74oFiVMWfweCHLzs6me/fu1KlTh7i4OG6++WYOHDhQ4HP37NlDy5YtqVOnDk2bNmX79tJ52HbNEINlXS28lGjy2iYbzebmsS3l8qXMv0FT/GKakzb7fWzntGuyiIgUHTfddBObNm1i8+bNbN68+fxdm6WJxwsZwNChQ9m1axcbN26ka9euDB06tMDnDRs2jKFDh7J7927GjBnDkCFD3Jy06LCYBk/GWVjT3YscKyTMyeP1zVZsl/kEOqzH/VizzpD+4+duTCoiIiJX4vFtL/z8/C7YQK558+ZMmjTpouedOHGCDRs28OOPPwLQq1cvRowYwYEDB4iOji7wvUeMGEFoaCg9e/akV69eLsnvadVMWNAOXtrqy6jVPszel8PbidlUCSigmBk+eLe8jfQls8it2xSzXCWnZjl9+rRT30+KJ10HUtqvgUvduSilQ1paGikpf57AUNj1lB4vZH/35ptvcuutt1709UOHDlGpUiW8vPIjG4ZBVFQUBw8evGQhmzJlSqlZUPlOe+hdx8bAn+D6hUFMaWVhQK2L93Wxd72bY1t/wb5oBmXuf9npC/xL80Je+ZOuAynN10BpuztQLhQaGnpV13+R+MjyD+PGjWPPnj28/PLLBT5+UbkoeTeIXpMOlU029/LitmoGdy+10neRlVPZF/4Z5S/wv59zuzZwdvMvHkoqIiIif1VkCtmECRP46quvmDdvHgEBARc9XrVqVQ4fPnz+FHq73c6hQ4eIiopyd9QiLczX4NP2Xsy8wcLiI3ZikvKYd+jCzWT9GzbDr2Ez0ma/h+1ctoeSioiIyB+KRCF7/fXX+fzzz1mwYAFhYWEFPiciIoImTZowffp0AGbNmkV0dPQlP64s7XrXMNna24u4cIMuP1j5+eiFpSysx/1YM1JJX6AF/iIiIp7m8TVkhw8fZtSoUdSoUYP27dsD4Ovry+rVqwHo0qULL7zwAomJibz33nsMHDiQcePGERISwtSpUz0ZvcirGGDw3c0W4r/K47kNNhbd8mf/9ipXkeAb+pK+8AsCmt6Id0QVDyYVESl5duzY4ekI4kbX+u/b44WsSpUql10L9v3335//dd26dVm5cqU7YpUYpmHwTLyFXgutLD9mo/VfdvYPuaEvWesWkTrrHcq5YIG/iEhpVK5cOQICAhgwYICno4ibBQQEUK5cuat6rccLmbhe92iDRmXhhQ02fuzyZyEzfHwJ6zGMU/95nuzNv+DfuLUHU4qIlAxRUVHs2LGD5ORkT0fxmLS0tFJ5t2m5cuWuem27ClkpYBoGzzSx0GeRlZXHbbSo8Gcp82vYHL8G15E653186ydi+vh5MKmISMkQFRVVqm86K+3nmV6NIrGoX1yvZ3WDhmXg+Q0XLu43DIOwnv/Amn6a9AX/9VA6ERGR0k2FrJQwDYOnm1iYf9jO6hMXljKvcpUI7tCH9MWzyD35Pw8lFBERKb1UyEqR3tUN6oddPCUDCO7YF0tIGVJn/Vsb7oqIiLiZClkpYjHzp2TzDtlZe/LCUmb6+BHW437O7VxH9hbdySoiIuJOKmSlTN8aBvXC8u+4/Du/Ri3wq38dqbPfxZajHfxFRETcRYWslLGYBk81sfDtQTvrT/7tnMs/FvifOU36wi89lFBERKT0USErhW6vYVA7BF741XrRY17lKxHcoRfpi2eSd/KIB9KJiIiUPipkpZDX/0/Jvv7dzq/JFy/gD76xH5bgMqTO1gJ/ERERd1AhK6XurGVQKwReLGBKlr/AfxjZ29eSvW2VB9KJiIiULipkpZSXafDPJhZmH7Cz+dTFUzC/Ri3xrZdA6lfvYs8554GEIiIipYcKWSnWv5ZB9eCC15KdX+Cfdoozi7TAX0RExJVUyEoxb9Pgn3EWZu23syXl4imZd0QVgtv3In3Rl+Qla4G/iIiIq6iQlXJ31zGIDoKXCpiSAQTfeAeWoDBSZ7/n5mQiIiKlhwpZKedtGoxtYmHmPjvbT188JTN9/QjtMYzsbas5u221BxKKiIiUfCpkwj21DaoGFXzHJYB/bCt868bnL/DPzXFzOhERkZJPhUzwsRg82djki712dhQwJTu/wD/1JOmLZnogoYiISMmmQiYADKprUjkQXt5Y8JTMu0JVgtv15MyiL8g7dczN6UREREo2FTIBwNdi8GScyed77exOLXh3/uCb7sASGELq7HfdnE5ERKRkUyGT84bUNakYcOk7Lk1ff0K7DyV76yrObl/j5nQiIiIllwqZnOdrMXiisclne+3sSSt4Subf+Hp868SR+tW/tcBfRETESVTI5AL31jWp4A8vX2JKlr/A/wGsKSdIX6wF/iIiIs6gQiYX8PMyeLyxyfTf7Ow9U/CUzDsyiqB2PTmzUAv8RUREnEGFTC4ytJ5JeT8Yd4kpGUBIpzuxBISQOud9NyYTEREpmVTI5CL+XgZjGptM22Nn/yWmZKavP6Hd7iN7ywrObl/r5oQiIiIliwqZFGhYfZOyfjDuEvuSAfg3aYNv7TjSvvo39jwt8BcREblaKmRSoAAvg8diTT7Zbef39IKnZIZhENbrH+SlHCd98Sw3JxQRESk5VMjkkv5R36SML7yy0XbJ53hHViOobXfSF/wXW+pJN6YTEREpOVTI5JICvQ1Gx5p8tNvGwYyCp2QAIZ36YwQEkfvjdDemExERKTlUyOSyHmhgEuIN4y8zJTP9Agi77V6sO9eSvWOdG9OJiIiUDCpkcllB3gajYk0+3GXj8GWmZP7x7TCrNcjfwV8L/EVERByiQiZXNKKBSZA3/GvTpadkhmHg03kgeaeOkb7kKzemExERKf5UyOSKgn0MHm1k8sEuG0cyLz0lMyOqENSmG+kLPifv9Ak3JhQRESneVMikUB5saOJvufyUDCDk5v4YfgGkaQd/ERGRQlMhk0IJ8TF4pJHJ+zttHM26zJTML5Cw2+7j7KblZO/a4MaEIiIixZcKmRTaQw1NfC3w2hWmZP4J7fGp2YjUWe9gz8t1UzoREZHiS4VMCi3M12BkjMm7O2wcv8yUzDAMyvQeTl7yEdKXznZjQhERkeJJhUwc8nCMibcJr22+/JTMu2I0Qdd3I/3HGeSd1g7+IiIil6NCJg4p42vwcIzJO9ttnDh76SkZQMjNAzB8/UmbqwX+IiIil6NCJg4bGWPiZcKEK0zJTP9AQrvdy9mNP2uBv4iIyGWokInDyvoZPNjQ5O3tNk5eYUoWkNABnxoxpM76txb4i4iIXIIKmVyVRxuZmAa8vuXyUzLDMAjr/QB5yf8jY9kcN6UTEREpXlTI5KqE+xmMaGAyZbuNU9mXn5L5VKpBUOtbOfPDZ+SlaoG/iIjI36mQyVV7tJGJzX7lKRlAyM13/f8C//+4IZmIiEjx4lAhy87OJjY2lvnz57sqjxQj5f0NhjcweWubjZQrTMnMgCBCbx3C2V9/InvPJjclFBERKR4cKmR+fn4cOXIEi8XiqjxSzIyONbHaYdLWK0/JAq67AZ/qDUid9TZ2a54b0omIiBQPDn9k2bNnT5KSklyRRYqhCH+Df9Q3mbzVRmrO5Z+bv8B/BHnHD5OxbK57AoqIiBQDXo6+oFWrVowdO5YjR47QuXNnIiIiMAzjguf07NnTaQGl6HssNn8LjPd+8+FfkZd/rk/lGgS17sqZH6YTEN8OS2i4e0KKiIgUYYbdbr/84p+/Mc3LD9UMw8BqtV5TqGu1YcMGEhISWL9+PfHx8R7NUlo8stLKR7us/H6HN2G+xmWfa8vK4Ni4e/GtE0f43U+4KaG4S0pKCmXLlvV0DPEgXQOia8BxDk/I9u/f74ocUsyNaWzy7nYrb22z8XT85dcY5i/wH8zpz18nu0Vn/Go3dlNKERGRosnhQlatWjVX5JBirmKAwd3Vc3lja/5ZlyE+l5+SBVzXkcyV80id9Q4VHnsbw+LwpSgiIlJiXNU+ZDk5OUybNo377ruPXr16cd999/Hpp5+Sk3OFVd1Soj1UN4esPHhr25XvuDRMk7Dew8k7fkgL/EVEpNRzuJCdOHGChIQEBg4cyMKFCzly5AgLFy7knnvuITExkRMnTrgipxQDFf3t3FfX5PUtNtJzrrw00adKLQJb3cKZHz7DmnbKDQlFRESKJocL2ejRozl16hQrVqxg//79rFy58vx/p6Sk8Nhjj7kipxQTjzc2yciFt7dfeUoGENrlbgxvb9K+1g7+IiJSejlcyL7//nv+9a9/0bx58wu+3qxZM8aNG8d3333ntHBS/FQJMhhS12TCZhsZuVeekpkBwYR2HUzW+iWc27vFDQlFRESKHocLWVZWFuHhBe8dFR4eTlZW1jWHkuLtiTiTM7nwTiGnZAFNb8SnWj1OJ2kHfxERKZ0cLmQJCQlMnjz5or3GrFYrkydPJiEhwWnhpHiKCjIYXCd/SpZZiCnZ+QX+x34n4+dv3JBQRESkaHF4r4Fx48Zx4403UqNGDbp3705kZCTHjx9nzpw5HD9+nAULFrgipxQzT8SZfLjLxr932Bgde+WzT32q1iaw5S2c+eFTAuLbYgnRhoIiIlJ6ODwhu/7661mxYgUJCQl8/vnnPPPMM3z++eckJCTwyy+/0Lp1a1fklGImOthgYB2D1zbbyMor3GEQoV3uwbB4kfb1hy5OJyIiUrQ4VMjOnTvHxIkT8fb25quvvuLEiRPk5uZy/PhxZs2apWOK5AJj4yykZMN7Owq3lswMDCa06yCy1i3i3N6tLk4nIiJSdDhUyHx9fXn66ac5ffq0U0M89NBDREdHYxgGW7de+gdxdHQ09erVIy4ujri4OL744gun5hDnqh5icHdtg1c32ThbyClZQLNOeEfVJTVpCnYPn4kqIiLiLg5/ZBkXF8f27dudGqJ3794sX768UMcyJSUlsXHjRjZu3Mjtt9/u1BzifGObWDiZDe/vLNyUzDBNyvQeTu6x38lYrgX+IiJSOji8qH/y5MkMGDCAiIgIOnfujL+//zWHaNOmzTW/R0FGjBhBaGgoPXv2pFevXi75HvKngianZYA+UX688quFPhXS8Lvy+n4IKodXfAfSvp9KTvXGGEGhTs8qruPsCboUP7oGRNfAn8qWLdxNag4Xsg4dOpCTk0OfPn0ACAgIwDD+PEjaMAzS0tIcfdtC69+/PzabjWbNmvHKK69Qvnz5Sz53ypQpWtfmZgVdeC80s1NvZh5fnQhlRMPCNDKw9hzG8R1rMH6eRdn+o50dU1yssP8HJCWXrgHRNeAYhwvZ6NGe++G4bNkyoqKiyM3N5amnnuKee+7h+++/91geKZzaoQb9axqM32Tjvnomvhbjiq+xBIYQcutgUr+YTGCLzvjWaOiGpCIiIp7hUCHLyckhJiaGuLg4atas6apMlxQVFQWAt7c3I0eOpE6dOm7PIFfnn00sfLY3j7wLO4MAACAASURBVA932XigQeGmZIHNOpG5ch6pSW8TMeotDEvhXiciIlLcOLSo38fHh/79+3Po0CFX5bmkzMxMUlNTz//+888/p0mTJm7PIVenbphBvxoGr2y0cc5auDsuzy/wP7qfzBU6I1VEREouh++yrFevntML2fDhw6lSpQqHDx+mY8eO1KpV6/xjXbp0Yd26dRw/fpz27dsTGxtLo0aN+Omnn5g2bZpTc4hrPdXEwv8y4ZPdhbvjEsAnqi6BzW8m7bupWNNTr/wCERGRYsiw2+2FG1f8v3nz5jFy5EhmzJhRZM+t3LBhAwkJCaxfv16L+t0oJSXlios471icx4rjdvb09cKnEGvJAKwZaRwfdy9+MS0oe+ejzogqLlSY60BKNl0DomvAcQ4v6h8zZgzJyck0bdqUcuXKERERcdFdlps2bXJqSCk5nm5iISYpj6l77NxXr3CFzBIUSsgtA0md+RaBLW7Gt3oDF6cUERFxL4cLWUJCAomJia7IIqVAgzIGfWoYjPvVysA6Bt5m4UpZYIubyVz1A6mz3ibi0TcxTC3wFxGRksPhQvbJJ5+4IIaUJk81sRA7K49P99gZXLdwhcwwLYT1Gs7JSSPJXPE9Qa1vdXFKERER93F4Uf9f2e12jhw5Ql5enrPySCnQqKxBr+oGL/9qJddW+CWMvtH1CGjeiTPzPsV2LtuFCUVERNzrqgrZ/Pnzad68OX5+flStWpXNmzcDMHToUD777DOnBpSS6ZkmFvalw2e/OXRPCSE33oHtbAZZa350UTIRERH3c7iQff7553Tp0oVq1arx5ptv8tebNGvWrMnHH3/s1IBSMsWGG/SINnjpVyt5DkzJvMIj8W/SlvQls7BbrS5MKCIi4j4OF7IXX3yRkSNH8sUXX3Dvvfde8FjDhg3ZunWr08JJyfZ0Ewt7z8AMB6dkwe17Y005ztlNP7somYiIiHs5XMj27dtHly5dCnwsMDDQpQeLS8nSpJzBbdXyp2RWB6ZkPlVq4ls3nvTFM3FwGz0REZEiyeFCFhkZyc6dOwt8bPPmzVSrVu2aQ0np8UwTC3vOwH/3OTglu6EPuYf3cm73ry5KJiIi4j4OF7I777yT5557jkWLFp3/mmEYbN26lVdffZUBAwY4NaCUbAnlDbpGGby4wbEpmW/tOLyr1CJ9cZIL04mIiLiHw4Xsueeeo2XLltx4441ERkYC0LlzZxo3bkxiYiJPPPGE00NKyfZMvMmuNPjSgSmZYRgEd+jDuV0byDn8mwvTiYiIuJ7DG8P6+Pgwd+5clixZwoIFC0hOTqZs2bJ07NiRjh07uiKjlHDXlTfpXNXGi79aub2mgWkUbrNY/8atsYRHkr44ifC79RcBEREpvhwuZH9o37497du3d2YWKcWejTdpPtdK0j47fWsWcvd+i4Xgdj1Jnf0uebcMxCs80sUpRUREXOOaduoXcZZmESadqhi88KsVmwN3TgY0uwnTP4iMpV+5MJ2IiIhrqZBJkfFsvMm20/DV/sIXMtPHj6DrbyNz1XysGdpyRUREiicVMikyWlQwubGy41OywP8/aDxz+beuiiYiIuJSKmRSpDwTb7IlBeYeKHwhswSFEtC8Exk/f40tR4eOi4hI8aNCJkVK60iTDpXyp2SO7MIf3K4ntqx0stYscGE6ERER1yjUXZbLli1z6E3btGlzVWFEIH8tWdtvrXz9u51u0YW749IrPBL/uOtJX/IVgS26YFgsLk4pIiLiPIUqZO3atcMwjPMTC+Mv+0TZ7fYLfg9gtVqdGFFKmzYVTdpVtPH8Biu3VTMuur4uJbhDb05MfJCzm38hoIn+UiAiIsVHoQrZ2rVrz//6xIkTDB06lDZt2tC7d28qVKjA8ePHmTlzJj///DPvv/++y8JK6fFMvEmH76x8d9BO12qFK2Q+VWvjW6cJ6Ytn4h93faGLnIiIiKcVqpAlJCSc/3Xv3r3p168fr7322gXP6dGjB6NHj+b999+nc+fOzk0ppU67igbXRxo8v8HGLVEOTMlu6EPyv8dybs8m/OrEuTiliIiIczi8qH/+/PncdNNNBT7WqVMnFi5ceM2hRAzD4Nl4k3XJduYdcuDQ8TpN8K5ck/TFM12YTkRExLkcLmRBQUEsWrSowMcWLFhAUFDQNYcSAehQyaBVhfwpWWHvuMw/dLw353auJ+d/+1ycUERExDkcLmTDhw/n1VdfZdCgQcydO5eVK1cyd+5cBg4cyMSJExk+fLgrckop9MeUbM1JO/MPF35K5h/XBkuZCDI0JRMRkWLC4cPFn3rqKcLCwhg/fjxTp049f/dlxYoVmTRpEg8++KArckop1bGyQfOI/ClZpyqFW0tmWCwEt+9F6pz3CLllIF5lK7ghqYiIyNW7qo1hR4wYwcGDBzlw4AArVqzgwIEDHDp0SGVMnO6PKdmqE3YW/s+RQ8c7YfoFkrF0tgvTiYiIOMdV79RvmiZRUVE0a9aMqKgoTFOb/otrdKpi0LS8Y2vJTF8/Aq+/jcxV87BmnnFxQhERkWtzVS1q27Zt9OvXj5o1a+Lr68uGDRsA+Oc//8m8efOcGlDkjynZL8ftLD5S+ClZ0PW3YrfbyfxFh46LiEjR5nAhW7BgAU2aNOHAgQP069eP3Nzc8495e3vzzjvvODWgCEDnqgaJ5Qxe2GAr9GssQWEENruJjGVzseecc2E6ERGRa+NwIXvyySfp168fq1at4vnnn7/gsSZNmvDrr786LZzIHwzD4Jl4k2XH7Cw9UvhSFtyuJ7bMdDLXan88EREpuhwuZFu3buWuu+4CuOiOt7CwMJKTk52TTORvukYZxJeDZ9cXfi2ZV7lK+DduTfqSJOw2nbEqIiJFk8OFrGzZshw5cqTAx3bv3k3FihWvOZRIQQzD4OVEC8uO2Zm1v/BryYI79MaafJSzm1e4MJ2IiMjVc7iQde/enWeffZZdu3ad/5phGBw7dowJEybQq1cvpwYU+aubq5rcGmUwarWVrLzClTKfqDr41o4jffHMQk/WRERE3MnhQvbKK69Qvnx5YmNjadasGQCDBw+mbt26hIaG8txzzzk7o8gF3mhh4fhZGL/RgbVkHXqTe3A3537b7MJkIiIiV8fhQhYaGsqKFSt49913qVOnDh07dqRu3bpMnDiR5cuX6yxLcbmaIQajG5m8utnGvjOFm3j51kvAu1J1MhYnuTidiIiI4xw6Oik7O5u+ffsyatQoBg0axKBBg1yVS+SynowzmbbHxiOrrMy96cqXcf6h431Imf4qOUf24VOphhtSioiIFI5DEzI/Pz9++uknbLbCf1Qk4gqB3gYTm1v4+nc7Pxwq3PXo3+SPQ8dnuTidiIiIYxz+yPKmm25iwYIFrsgi4pDe1Q3aVzR4eKWVHOuVP7o0LF4EtetJ1oal5J0+4YaEIiIihePQR5YAgwYN4v777ycjI4POnTsTERFx0X5k8fHxTgsocimGYfBWSwuNv8pj0lYbYxpbrviawOadODN/OhlLZxPWY5gbUoqIiFyZw4Wsa9euAEyZMoUpU6ZcUMbsdjuGYWC1agNOcY+GZQ1GNDR58VcbA2qZVAo0Lvt809efoNa3krF0NiGd7sQMCHZTUhERkUtzuJAtWbLEFTlErtpz8SYzfrMxZo2V6e2vfEkHXd+N9MVJZPzyHSE39nNDQhERkctzuJC1bdvWFTlErlqYr8H4phaGLLMyrJ6N6ytefmmkJTiMwKY3kfHTHILb9cTw9nFTUhERkYI5vKhfpCgaWMegaXmDB1dYsdquvMA/uH0vbJlpOnRcRESKhKsqZNOnT6d169ZEREQQEhJy0X9E3M00DN5qabIpBd7beeVtMLzKV8I/thUZS2bp0HEREfE4hwvZ9OnTuffee4mJiSE5OZm+ffvSq1cvfHx8iIiIYPTo0a7IKXJFTSNMhtQ1eGqdjeTsQkzJbuhD3sn/kb1lpRvSiYiIXJrDhWzixIk8/fTTvP322wA88MADfPzxx+zfv5/y5cvr6CTxqHHXWbDZ4am1V56S+UTVxbdWLGcW6dBxERHxLIcL2Z49e2jVqhUWiwWLxcKZM2cACA4O5vHHH+fNN990ekiRworwN3ghweT9nTY2JBdmStaX3IO7yNm31Q3pRERECnZVh4ufO3cOgMqVK7N9+/bzj1mtVk6dOuW8dCJX4YEGJg3LwIMrrFecfPnWS8C7YjTpi2a6KZ2IiMjFHN72IjExkc2bN9OpUyduu+02nn/+eWw2G97e3owfP55mzZq5IqdIoXmZ+Tv4t//OyvTf7NxV+9KbxRqGQdANfTg9/TVyjx7Au2K023KKiIj8weEJ2ZNPPklUVBQAL7zwAs2bN+eRRx7hH//4BxEREbz//vtODyniqHaVTPrWMHhstZUzOZefkgU0aYslrDzpi5PclE5ERORCDhey5s2bc/vttwMQFhbG3LlzycjIIDU1ldWrV1OjRg2nhxS5GhOaWUjPhRc2XH6B//lDx9cvIe/0STelExER+ZNTNob19fXV/mNS5FQNMvhnnMnkrTZ2nL78lCywxc0Yfv5k/DTbTelERET+5PAassGDB1/xOR999NFVhRFxtlGxJh/ttvHwSivzO1swjILXk5m+/gS16krGsrmE3HSHDh0XERG3criQrV279qKvpaSkcOzYMcLDw4mMjHRKMBFn8LUYTGpu4dYfrcw5YKdH9Usv8A9q0430JbN06LiIiLidw4Vsy5YtBX5969atDBgwgEmTJl1zKBFn6lrNpEtVG4+usnJzVQN/r4JLmSW4DIFNbyRj2VwdOi4iIm7ltMPFY2JiePzxxxk5cqSz3lLEaSa1sHAkC17ddPkF/kHte2HLSCVz3SI3JRMREXFiIYP8TWN/++03Z76liFPUDjV4tJHJ+E02DqRfeoG/d/nK+YeOL07SoeMiIuI2DheylJSUi/5z7NgxlixZwtixY4mJiXFFTpFr9s8mJuF+8OiqyxetoPa98w8d37rKTclERKS0c3gNWbly5Qq8U81ut1O1alXmzJnjlGAizhbkbfBaUwt3LrGy4LCNG6sU/PcR3+h6+NRsRPqimfg1annJOzNFREScxeFC9tFHH130A8rPz48qVarQrFkzvLwcfksRt+lX0+DdHQYPrbSyqaeBj6XgshV8Qx9Ovf8MOfu24VtTU18REXEth9vTwIEDnR7ioYce4uuvv+b3339ny5Ytl/zYc8+ePdxzzz0kJycTFhbGJ598QoMGDZyeR0ouw8g/57LJ7Dze2mZjVKylwOf51b8Or4rRpC+eqUImIiIu59RF/Verd+/eLF++nGrVql32ecOGDWPo0KHs3r2bMWPGMGTIEDcllJIkNtzggfomz2+wcTSr4AX+hmEQ3L432dtWk3v0gFvziYhI6WPY7fbLnynzN6ZpFnpNjWEY5OXlFfq9o6Oj+fbbbwuckJ04cYI6deqQnJyMl5cXdrudihUrsmrVKqKjoy947oYNG0hISKBFixaEhobSs2dPevXqVegccnVOnz5NmTJlPB2jUFJzoOn8QDpGWnnnuuwCn2O35pH91kjM6jH4drvfzQmLr+J0HYhr6BoQXQN/Klu2bKGe5/BHluPGjePtt9/GYrHQrVs3KlSowLFjx5g7dy52u53hw4e7ZB3ZoUOHqFSp0vn3NgyDqKgoDh48eFEh+8OUKVOIj493eha5tMJeeJ5WFnilmY2hP5s8FOdHywoFD4vT2/ci7duPCelxH15h5d0bshgrLteBuI6uAdE14BiHm9Pp06eJi4tjzpw5WCx/rr9544036NatG8nJybz22mtODfmHv0/mHBzuiVxgcB2D93YYjPjFytruBhbz4slvYIvOnJk/g4yf5hDW7T4PpBQRkdLA4TVkn3zyCcOHD7+gjAFYLBaGDx/O1KlTnRbur6pWrcrhw4fPfwRqt9s5dOgQUVFRLvl+UvJZTIMpLU1+PQX/2VXwDv6mXwBBrbuSuWIetqwMNycUEZHSwuFCdvbsWQ4cOFDgYwcOHCA7u+D1ONcqIiKCJk2aMH36dABmzZpFdHT0JT+uFCmM5hVMBtYx+OdaGynZBU9cg9p0w56XS8aK792cTkRESguHC1n37t15/PHHmTp1KmlpaQCkpaXxySef8OSTT9K9e3eHQwwfPpwqVapw+PBhOnbsSK1atc4/1qVLF9atWwfAe++9x3vvvUedOnUYP348H374ocPfS+Tvxl9nIdcGT68veEpmCSlLYNOOZCybjT0vx83pRESkNHD4Lsv09HQGDRrE7NmzAfD29iY3NxfIL2sff/wxISEhzk/qgD/usly/fr0W9btRSkpKsV3E+cYWK6NX29jQw4vG4RevJcs9fojj44dS5vaHCWx+swcSFh/F+ToQ59A1ILoGHOfwov7g4GCSkpLYuXMna9as4ejRo1SsWJHrrruO+vXruyKjiMuNaGjyn502RvxiZdmtlotuIPGuUBW/Ri1IX5xEQNObMMwisYWfiIiUEFe9P0W9evWoV6+eM7OIeIy3afBmSwsdv7cyY6+d/rUunpIFd+jDyUmPkL1tNf6NWnggpYiIlFQO/zV//fr1LFq06PzvU1NTue+++2jdujXPPfccNlvB63BEirobKpv0qm7w2Gor6TkXf5LvG10fnxoxpC+e6YF0IiJSkjlcyB555BGWL19+/vcPP/wwX375JZGRkUyYMIGXX37ZqQFF3GliMwup5+ClXwv+i0XwDX3I2b+dc/u2uTmZiIiUZA4Xsu3bt9O0aVMgfwuMpKQkJk2aRFJSEv/617/49NNPnR5SxF2qBRs8EWfyxlYbu1IvnpL51b8Or8goTclERMSpHC5kWVlZBAQEAPDLL79w7tw5unXrBkBsbCyHDx92bkIRN3ss1qRKIIxcab3oNAjDNPMPHd+6itxjBz2UUEREShqHC1mNGjWYN28eAJ999hkJCQnnb209ceKEx7e8ELlW/l4GbzS38MNhO98cvHhKFpDQHjM0nPQlSR5IJyIiJZHDhezRRx/l1VdfpXz58kybNo2HH374/GNLly4lNjbWqQFFPOG2agadqhg8stJKdt7fpmRe3gS37UHWusVY0055KKGIiJQkDheywYMHs3TpUp544gkWLlzInXfeef6x8PDwCwqaSHFlGAaTW1g4lAkTNl+8wD+wZWcMbx/Sf5rjgXQiIlLSXNU+ZG3atKFNmzYXff2555671jwiRUbdMIORMSbjNtq4q7ZJteA/9yYz/QIJatWVjF++JeTGfpj+gR5MKiIixZ22Gxe5jKebmIT5wujV1oseC2rTDXtuLpkrdei4iIhcGxUykcsI9jF4tamFpP12Fv3vwo8uLaHhBFzXgfSf5ujQcRERuSYqZCJX0L+WQesKBg+tsJJru3CBf3D73tjSTpG1fqlnwomISImgQiZyBYZh8FYrCzvT4O1tF07JvCtUxS8m/9Bxu44NExGRq+RwIZs2bRqnThV8q39KSgrTpk275lAiRU1cuMGweibPrrdxPOtvU7Ib+pB3/CDZ29d4KJ2IiBR3DheyQYMGsXfv3gIf279/P4MGDbrmUCJF0YuJJl4mPLn2wgX+vtUb4FO9gY5TEhGRq+ZwIfv7UTJ/dfr0aYKDg68pkEhRFe5n8HKiyce77aw6fuHHk8E39CFn3zbO7d/uoXQiIlKcFWofsnnz5p0/Lglg4sSJVKhQ4YLnZGdns3jxYuLi4pybUKQIua+eyfs7bTy4wsbq7gamkb83mV+DZnhFVCV9cRK+Q57xcEoRESluClXIdu/ezTfffAPkL3D++eef8fX1veA5Pj4+xMTEMG7cOOenFCkiLKbBWy0ttP7Gyke77NxbL7+QGaZJcIfenP5iErnHD+FdoaqHk4qISHFSqEL28MMPnz8SqXr16syZM4fGjRu7NJhIUdUq0mRALRtPrrXSq7pBGd/8UhaQ2J6076eSsWQWZfqN9HBKEREpThxeQ7Z//36VMSn1Xm1mIdsKz67/cy2Z4eVDcNseZK5dpEPHRUTEIVe1D9nu3bsZPHgwtWvXJjw8nNq1azNkyBB2797t7HwiRVLFAINn403e2W5jS8qfN7oEtuyC4eVNxrK5HkwnIiLFjcOFbP369SQkJPD111/TunVrhg4dSuvWrfn6669JSEhgw4YNrsgpUuQ81NCkVgg8uMJ6/u5j0z+QwFZdyPjlO2zZmR5OKCIixUWh1pD91ZgxY2jcuDHz588nMDDw/NczMzPp1KkTY8aMYeHChU4NKVIU+VgM3mxpodM8K1/ss9OvZv5asuA23cn4aQ6ZK38guH0vD6cUEZHiwOEJ2apVq3j88ccvKGMAgYGBjBkzhlWrVjktnEhRd1MVk+7VDEavtpKRmz8ls4SVIyChAxlLZ2PPy/VwQhERKQ4cLmQ+Pj5kZhb8UUxmZibe3t7XHEqkOHm9uYVT2TBu458L/IM79MKalkzWhqWeCyYiIsWGw4WsY8eOjB07lp07d17w9Z07d/L0009z0003OS2cSHFQPcRgTGOTiZtt/JaWPyXzjqyGX8NmOnRcREQKxeFC9vrrr2O324mJiaFx48Z06tSJuLg4YmJisNlsTJw40RU5RYq0xxubVAyAkSv/POcy+IY+5B37newdaz2YTEREigOHC1nVqlXZsmULr7/+OnXr1sVms1G3bl3eeOMNNm/eTJUqVVyRU6RIC/AyeL25he8O2fnuYP5EzKd6Q3yi65O+OMnD6UREpKhz+C5LgKCgIB566CEeeughZ+cRKbZ6RBt0rGzw8EorN1Qy8PMyCO7Qh1MfvcC5Azvwja7v6YgiIlJEXVUhA1i2bBk///wzKSkphIeHc/3113P99dc7M5tIsWIYBm+2sBA7K4/Xt9gY28SCX0xzvMpXJmNxEr6Dn/Z0RBERKaIcLmSZmZn06NGDhQsX4uXlRXh4OKdOncJqtdKxY0dmz55NQECAK7KKFHn1yxg8FGPy8kYbd9U2qRr0/4eOf/kmuScO4x2hj/RFRORiDq8he/zxx1m9ejUzZszg7NmzHD16lLNnzzJjxgxWr17NE0884YqcIsXGs/Emwd7w2Or8Bf4BiTdgBoeRsWSWh5OJiEhR5XAhmzVrFuPHj6dfv35YLBYALBYLt99+O+PGjWPmzJlODylSnIT4GPyrqYUv9tlZesSG4e1DUJvuZK5diPVMiqfjiYhIEeRwIUtNTaVGjRoFPlazZk1SU1OvOZRIcXdXbYPmEQYPrbSSZ7MT1PIWDIsXGT9/7eloIiJSBDlcyOrXr8/UqVMLfGzq1Kk0aNDgmkOJFHemYTClpYWtKfDv7TbMgCACW3YhY/m32LKzPB1PRESKGIcX9T/zzDP06tWLAwcO0KdPHyIjIzl+/Dhffvkla9asYdYsrZMRAUgob3BfPZNn1tu4vaZJ2bb/f+j4qh8IbtfT0/FERKQIcXhC1r17d2bPns25c+cYPXo0/fv3Z9SoUZw7d47Zs2fTrVs3V+QUKZZevs7EMGDsWiteYeUJSGhPxtKvsFvzPB1NRESKEIcmZDk5OXzzzTfExcWxfv16MjMzSU1NJSwsjMDAQFdlFCm2yvkZvJhg8uAKG8Pq24jr0JustQvJ2rCUwOs6ejqeiIgUEQ5NyHx8fOjfvz+HDh0CIDAwkMqVK6uMiVzGsPomjcrCiF9sWCKr4degaf6h43a7p6OJiEgR4fBHlvXq1TtfyETkyrxMg7daWlhz0s7U3fb8Q8ePHtCh4yIicp7DheyVV17hpZdeYv369a7II1IitalockdNgyfWWjlbpSE+1eqRvkh79omISD6H77IcM2YMycnJNG3alHLlyhEREYFhGOcfNwyDTZs2OTWkSEnwWjMLdb/M4/kNdsZ16M2pj1/i3IGd+EbX83Q0ERHxMIcLWUJCAomJia7IIlKiVQ40eLqJyT/X2bi3e3PCy1cmY0kSvoOe8nQ0ERHxMIcL2SeffOKCGCKlw8hGJh/usvHgKvi6fS9SZ75F7sn/4V2+sqejiYiIBzm8hkxErp6vxWBySwtLjtr5oVwHzOAwUj5+mdzjulFGRKQ0UyETcbPOVU1ujTJ4dJ2FwMEvYs/L4cTEEWSu+kFbYYiIlFIqZCIe8EYLCyeyYUJydSJGTcE/vh2n/zuJlKnjsGVleDqeiIi4mQqZiAfUDDEY3cjktc029pz1pWy/Ryg7cCzZOzdw/LUHOLdvq6cjioiIG6mQiXjIk3EmFf2hYVIenb7P47/+rfEb+TaWsHKcfGsMZ36Yjt1q9XRMERFxAxUyEQ8J9DZY18OLKS1Ncmxw7zIrFb8L597ar7A/8Q7OzJ/BybfHkJdy3NNRRUTExVTIRDwo3M/g/gYWlnT14n/9vZjYzCQlz0KbrH7cXmMcx46c4PD4Bzi9bpmno4qIiAupkIkUERUDDB6MsbD8Ni8O3uFFr7aNGJnwFvP84sicPo6kia8ze3cWZ/N0J6aISEnj8MawIuJ6VYMMHo218GhsGPtv+ier5v9A3Kr3OPyfbbSNfoza9Wpxe02TTlUMfC3Gld9QRESKNE3IRIq46qEmvft2odrjU6hR1peZv42mxpbZdJ+fS4XpeQxcmse8QzZybZqciYgUVypkIsWEd4WqVB09idA2tzFs/4fsPfcij9dIY+UJO11+sBI5PY/7luWx8H828lTORESKFRUykWLE8PIhrPtQyg17Cd/jv3HP/BFsjNnIrz29GFrPZNEROzd+b6XyjDweWG7lp6M2rCpnIiJFngqZSDHkVz+RCo//G+8qtTj1/tNEL3ufcfFW9t7uxZruFu6qZfLtQRvtvrVS9fM8Hl5hZcVxGzYdzSQiUiRpUb9IMWUJLkO5oS+QsWwOad98xLm9myl71xNcV6Eq15WHV5uZrDph54u9dmbut/HmNqgaCH1rmNxe0yCxnIFh6IYAEZGiQBMykWLMME2C2/Uk4pFJ2HPOXXBIuWkYtKxgMrmlhUN3mTgR/QAAHVhJREFUeLG0q4WuUSbT9thoOsdKrS/yeHKNlY2n7DrUXETEw1TIREoAnyq1iBg1hYD49n85pDz9/OMW06BtRZN3Wls40t+LBV0sdKhk8P5OG02+yqPezDyeWWdlW4qKmYiIJxSJQrZnzx5atmxJnTp1aNq0Kdu3by/wedHR0dSrV4+4uDji4uL44osv3JxUpOgyff0o029k/iHlu37l+GvDCzyk3Ms06FjZ5IM2Xhwb4MX3N1toEWHw5jYbMbPyiEnK5cUNVnanqpyJiLhLkVhDNmzYMIYOHcrAgQNJSkpiyJAhrFy5ssDnJiUlERMT4+aEIsVHQFwbfKrVI+XTf3HyrTEE3/R/7d17cBTXgS7w73T3vEcjaSSEJCQxyCAeMjEYLgLKYBvHviwVxwaFMhvsBeI4doU8IGv77jrs4nItzjrxYx3j3KyTWi8xLkK8QLw294YQJzbENthBic0rgHmYES8hjdBrNKOZ6bN/zExrRg+QAKn1+H5VXd3TfbrnNLRan06f7v5beO76KoSqdiprUQT+pljgb4oVhGMSO6olNh/X8cNPdfzzPh1TcoD7ShXcV6pgjIf9zYiI+oqQJnceqampQVlZGWpra6FpGqSUKCgowJ49e+Dz+dLK+nw+vP3221cMZFVVVZg2bRpmzZqFzMxMLFq0CJWVlX24FwQA9fX1yM7ONrsalCD1GKK7f43Irq1QisbBunAllKwRPVq3NQbsPK/h134Nvz2voTUmMDU7hoVFEdxbFMUoZ/enDR4HxGOAeAy083q9PSpneguZ3+9HYWEhNC1eFSEESkpKcPr06U6BDACWLl0KXddRUVGBH/zgBxgxovtfMOvXr8fNN9/cV1WnLvT0wKN+svAhhG+ahcBrP0T4lSeQveS7cE6Z26NVl48Alk8GmiMSb5+W2HxcYN0hFf+8H5g9UuC+UoHFpQoKnJ1bzngcEI8B4jHQOwOiD1nHW++7a7TbtWsXPvnkE1RVVSEnJwfLli3rj+oRDWq20hsx8rGXYZ9wMwL/+TQCv3wBejjU4/XdFoElNyjYdpeGmvs1/OI2FdlW4NG9Oka9HsVtb0fxfw/FUNPKPmdERFfL9Bay4uJiVFdXIxqNGpcs/X4/SkpKOpVNzrNYLFi1ahXKysr6u7pEg5LizIB32RMITvgtLm39CdqOH4D37/4B1uJxvdqOxyrwwDiBB8YpqA9LbDsV73P27Q90fOsDHbcVCIx12FCWG8Not8DoDKDEJZDn6PyHFxERtTM9kOXl5WHq1KnYuHEjli9fji1btsDn83W6XNnS0oJIJIKsrCwAwKZNmzB16lQTakw0OAkh4Jr5v2EdMwmB1/4VNf+2GplfWgH3rQshlN43lmfbBL42XuBr4xVcbJXYekrH26cl3q1RsfFzHcFoe1mbCpS4gNEZwhiPdguUuIHRboEiF2BVGdiIaPgyvVM/ABw5cgTLly9HXV0dPB4PNmzYgPLycgDAggUL8NRTT8Hr9aKyshKxWAxSSpSWluLFF1/ssp9ZslP/vn372IesHwUCAfYZGCRktA0Nb/8nmt/dCtuE6fAu/XuoGdenA24gEEB2djYCYeB0M/B5s8TnzdKYTo5rWtvXEQAKnECJW2B0IqSVGON4S1umlYFtsOC5gHgM9N6ACGTXGwOZOfgDOPiEDv8JgdefBQB4lz4K+8Tp17zNnh4HrVEJf4eQdrpZ4vPEtL8ZiKacnTwWYHRGSkhzpwe4fCeg8LLogMBzAfEY6D3TL1kSkXmSLykPvP4cav99Ddy3LkTm3SsgNGuff7dDEyjLAsqyug5RMV3ifCuMkGaEtSaJ3ed1bGwCGiPt5S1K/F2d6WGtvR9biRuwawxsRDQwMZARDXPtLyl/M/6S8s8+hffv/gGWkcXm1ksRGOUCRrkEZo3sukxDm8TnTcmw1h7cjjQAO8/oOBcEUi8B5Dk6Xg6Nj5PTXhtvPiAaCqK6RCgGtEaBUCx1kAilzEtfLtPLRuPPZKzIE1he1vcPpWAgI6LES8oXwjZ2MgK/+FfUPPctZC18BM6Z8wd0QMm0CnwhB/hCTtd1DMckzrQgLax93iRxugV463Mdp1uAcKy9vEtDWt+1QifgssTnuzSRMg24LAIuDXAanwGrwkBHBABSAqGoRGtKuOkq+HQKRB0CVHx5h6CUsr3W1GUp68Z62RlLALCrgEOLj41BAwqc/fOEMAYyIjIkX1LesO2nqN/8IkJ/3Yfs+74LxZlhdtWuik0VKPUApd289kmXEhdbO/Zji48/vhhvYQtGgZZoz07wqmgPZ6nBzQhtxjKRVs7Z4XNq4EvOd2rsI0eXJ6VERI//kRGKxcdhPR5Uwno82IRjHZYb0zKtbHoZaUynbfcy22vTMwBEr1jnVBalQxhKBCKHKtI+e22A3QnYVQGHJjqvo8a7Jzg6bCc+nZjfIXhZBsAfUwxkRJQm+ZJy24RpqN/8Ii78aCW8DzwOW+nQe4esIgRGOoGRToEZed2Xk1KiTQdaIvFw1hJNTksjsCU/t08nw5w0Pp8JJpfp7csj8csiPWFXOwY+kRLqug5yqeWsCqCIeHDsbtw+La5Ytrt1W6KAIyrTlpn9yy6VlBK6jN80EpNAVO9mLIGY3rGc7EXZjstlt8uTQao9FMm0oJMeeGRKKEoPQlfLlggmNhWwKSnTiTBkSwwODciyJcsoxvzUMjYF0MNB5HhcHYJVPBDZuwhEdjXeTWE4YyAjoi45p8yBdfT4Hr2kfKgTQhi/bNLvG7s+v0B0KY1wlgx8wZQg11XgM4JgolxDG3A2CLRE9JRl8bLRfr+XvuvWkcsHwN6OxVWHo95ezrpeNAGoSmIsAE1pH2sCKeFGtAcjLT7OsCRDj0gPSSllOoan9GmRKNNxft+0DgUCEXi9A+JlQIMGAxkRdUvLzsOIlT9E085fonHH6wgf/TO8D/wfaN5uetnTVVGEgNsCuC2pc6/fL8i2WDzIRRKBRJfp45gO6EgZG8tkp7JXXFcHGpub4XC5u1hHXnHd9O+//FgRostwo4rLhx9ViPRyXa2vtAfG9rLiMtu8/HfycjNdCQMZEV2WUFV45i+FrWwKAq89gws//Cay7/sunFN79pJyMp9VFbBeVcPm1YWIQCDK1hGiXuJPDBH1iK20HCMf+wnsE6YhsOFpBDY936uXlBMRUfcYyIioxxSnG95l/4jsJavR+uf3UPPsSrT5j5ldLSKiQY+BjIh6JfmS8ry/Xw9hs6Pm31aj6Q9bIHXd7KoREQ1aDGREdFUsI4uRt+oFuOfeg4Y3f4baV/4JsaZ6s6tFRDQoMZAR0VUTmhVZ9zyE3If/BZEzJ3DhmUfQtuM1hI7+BTLWu4dCEhENZ7zLkoiumX3idIx8/Cdo/M3rCH76Pmr3/n8IuxP2CdNhL58B+6QZUF0es6tJRDRgMZAR0XWhZmQje/G3oM/7W2QEAwgd/AitB/eg/vVnAaHA6psAe/lMOMpnQMsfPaCe3E5EZDYGMiK6roQQsBaPg7V4HDzzlyLWUIfQ4Y/RemAvmn77Ohrf/g+oOflwTJoBe3kFbGMnQ2hWs6tNRGQqBjIi6lNqZg5cM+fDNXM+ZKQN4c8+RevBPWjd/yGad/83hM0B+/ibYS+vgH3S/4KakW12lYmI+h0DGRH1G2Gxwj5xOuwTp0NWrkTk3EmEDn6E0MG9qP/lCwAAa8n4RL+zClhGlfLSJhENCwxkRGQKIQSshaWwFpbCc+cSxJovIXToY4QO7kXTO/+Fxv/3C6hZubBPqoD9xgrYx94EYbWZXW0ioj7BQEZEA4LqzoJrxp1wzbgTMhpB+Pj+xI0Be9HywXYIiw22simw3zgTjkkzoGbmmF1lIqLrhoGMiAYcoVni/crG34zMhQ8jesGP0MG9aD24F5d+9RIuSR2W4nGwT5oBR3kFLEVjIRQ+VpGIBi8GMiIa0IQQsOSXwJJfgow7FkNvaYrftXlwL5rf+zWadrwOxeNtv2uzbCoUm93sahMR9QoDGRENKoorA87p8+CcPg8yFkX4xEGEDn2E0IE9aNnzG0CzwD5uSvzGgPIKaNl5ZleZiOiKGMiIaNASqgb7uJtgH3cTcM9DiNRUx8PZwY9waetPgf96GZbCMfFHapRXwFpSBqGoZlebiKgTBjIiGjIseUWw5BUh47ZF0IPNCB2pQujgHrS8vx1NO38JxZ0Je+LSpn38VCh2l9lVJiICwEBGREOU4nTDOXUunFPnQuoxtJ36q3FjQPCjnYCqwXbDZNjLK+Aor4CWW2B2lYloGGMgI6IhTygqbKXlsJWWI/PuryFaew6hQ/FHajT898/RsO2n0PJLYJ8UD2dW30QIlZc2iaj/MJAR0bCj5RbAPfceuOfeAz0URPhIFVoPfoTgRzvR/Ps3oDgzYJ84HbYJ02AtGgstrwhC5emSiPoOzzBENKwpdiccN90Cx023QOo62k4fRejQXoQO7EVw3x/ihVQLLAUlsBSWxodRpbCOKoXizDC38kQ0ZDCQERElCEWBzTcBNt8EZC5YBj3YjMi5k2irPo7I2ROInD2JYNW7QDQCAFCzRsAyqhSWwjGJcSm03EI+pJaIeo2BjIioG4rTDdsNk2G7YbIxT8ZiiF6sRuRMPKC1nTmOlr07oDfWAwCE1Q5Lgc8IaJZRpbAU+KDYnWbtBhENAgxkRES9IFQVlvzRsOSPBqbdbsyPNV2Kt6KdOYHI2RMInzyElj07AD0GAFBzC2AddUNaa5qanQchhFm7QkQDCAMZEdF1oGZkQU28fzNJRtsQOX/aaE2LnD2Bpve2QQabAQDC4YalcAyshWNgGXVDPKjlj4awWM3aDSIyCQMZEVEfEZoV1qKxsBaNNeZJKRG7VJvWmhb66z40//EtQEpAUaDlFRk3EFhHxS97qh6viXtCRH2NgYyIqB8JIaBlj4CWPQKO8gpjvh4OIXLuZFprWujgXshwKwBAcWel3UBgLSyFNrKYj+MgGiL4k0xENAAoNjtsvomw+SYa86SuIxY4j8iZE2hLtKa1/mU3mv+wJV6gq8dxFJZCcfFxHESDDQMZEdEAJRQFWm4htNxCOG66xZh/5cdx5Mb7pPFxHESDBgMZEdEgc3WP47BByyuCllMANScfWk4BtJz8+HR2HoRmMWt3iAgMZEREQ0JPHscRvXgG0brzaPvkfcTqLwC6nlhZgZqV2x7QjLAWHyvuTD6eg6iPMZAREQ1hXT2OA4i3qMUuXUS07hyidecRq42PI+dOIXRgD/SWRqOssNrTApqWm2hl8+ZD846EsNr6ea+Ihh4GMiKiYUioajxc5eR3uVxvbYkHtURgSw6hQx8hGrgAxKJGWSUzJ7GteGCL2jMQLrkBWk4BlIxs9l0j6gEGMiIi6kRxuGAtugEouqHTMqnHEGsIpIS1c4jVnUf04hmE/roPelM9LiYLW6zxlrRE+DNa2RKXRxWbo1/3i2igYiAjIqJeEYpqPEvNNvYLnZbXnT8Lj2yLXw6tPY9YING6duwviO75DRBpM8oq7qx4OMtNBDVvoh9bbgHUzBwIRe3PXSMyDQMZERFdV8Jqh8VbCEuBr9MyKSX0xkB6y1piaPtsP2INte2FVQ2ad2TKjQYj21vYvPkQDhdvNqAhg4GMiIj6jRACamYO1Mwc2ErLOy2XkTZEAxc6h7WTBxH80zvGmwuAePBTM3OgeLzGNlWPNz5k5kDJzIHqyYFis/fnLhJdFQYyIiIaMITFCsvIYlhGFndaJqWE3tJgXAaNNdQh1hiIjxvqEPEfQ6yhFrItnL5NuxOqJxHYMr1QPDlGaFMzvfFlHi/vFiVTMZAREdGgIISA6s6C6s4CfBO6LCOlhAwH4zcdNAagN9Qh1lgX/9xQh2jgAmInDyPWWJfWlw0AhNPdHtw6tbQlApwnG0Kz9sfu0jDDQEZEREOGEALC7oJid3XZypYkpYRsbUmEtURga6xLBLgAorVnET6+H7GGABCLpK2ruDITLW2J1rXMZFjLSQQ4L9SMbL74nXqFRwsREQ07QggIpxuK0x1/u0E3pJTQg03xoJa8RJq8TNpYh+iF0wgf/TNijQFAj6V+QSK4tV8WTWtpS0wr7iwIlXeSEgMZERFRt4QQUF0eqC4PLIVjui0ndR16sNG4NKp3aHlrO3Mc+uE/IdYUaH9lFQAIBUpGFlRPNhSnB4rLA8WV0WlaNaYzIOwuPmx3CGIgIyIiukZCUdr7t40q7bac1GPQmxs63ZCgN9VDb2mE3tyA6AU/9GAj9JYmyEi480YUBYojIx7QXB4ozsQ4OZ362ZUBNRHuhIV93wYyBjIiIqJ+IhTVuGGgJ2RbGLFEONNbGo2gpgcTn1saoQebEK2pNqb1YDMg9U7bElZbe8ubMyOlJS4Z7FKmk2UcbrbG9RMGMiIiogFKWG3QrCOArBE9XkfqevyGBSO8JYKbMd0e7qIXzxpBTraFuqiAgOJwd90SlzYv/TIr9R4DGRER0RAiFAXCFW8BQ89zHGSkzWh5iyVb21Ja4YzWuNqz7eEu2JTeJy5JURGyOyFsDih2J0TqtM0BxeaEsCc/O6HYHBB2R3zaKOOI95ez2obFGxkYyIiIiAjCYjXuALX0cB2p65ChoNHyFku0vDXXXYRDFZDhVuihIGQoGJ9ubYGsvwg9HIQMtUIPt0KGgl1eYm2vmAJhsydCXDLYdRXeOn+Ol3Gkh74B+n5UBjIiIiK6KkJRjMeHILd9fjgQgMfbw35yUkJGwpDh1nhIC7XEw1sirOnJMJf8nAhzMtyKaN25xLzEuuEgEI1c9vuE1ZYS0NJb8RSbI6UFLx7mtJElsI2ZdC3/TD3CQEZERESmEUJAWO2A1Q5kZF/z9mQ0YoQ3o4Uu3NreKhcKQoaDKQEvMW6oQ7RDWdkWgrPiLgYyIiIiot4QmgWqZgFcnmveltRjQCx25YLXAQMZERERUReEogL91OeMDxchIiIiMtmACGTHjh3D7NmzUVZWhhkzZuDQoUPXVI6IiIhoMBkQgezhhx/GN77xDRw9ehSPP/44HnzwwWsqR0RERDSYCCmlNLMCNTU1KCsrQ21tLTRNg5QSBQUF2LNnD3w+X6/LAUBVVRWmTZuGWbNmITMzE4sWLUJlZWX/7tgwVF9fj+zsa79DhgY3HgfEY4B4DLTz9vDxH6Z36vf7/SgsLISmxasihEBJSQlOnz6dFrR6Wi7V+vXrcfPNN/f1LlDCli1b8NBDD5ldDTIZjwPiMUA8BnpvQFyy7PhKhO4a7XpajsyxdetWs6tAAwCPA+IxQDwGes/0FrLi4mJUV1cjGo0alyL9fj9KSkquqhwAtLa2AgAOHz7cL/tAcQ0NDaiqqjK7GmQyHgfEY4B4DKSbMGECnE7n5QvJAeDWW2+Vr776qpRSyjfeeENWVFRcU7mNGzdKABw4cODAgQMHDqYP+/btu2IWMr1TPwAcOXIEy5cvR11dHTweDzZs2IDy8nIAwIIFC/DUU09h+vTply2Xqra2Fjt27IDP54PD4ejv3SEiIiIy9KSFbEAEMiIiIqLhbEB06iciIiIazhjIiIiIiEzGQEZERERkMgYyIiIiIpMxkNE18/l8mDBhAqZMmYIpU6Zg8+bNZleJ+sF3vvMd+Hw+CCFw4MCBtGXHjh3D7NmzUVZWhhkzZuDQoUMm1ZL60uWOAZ4Xhr5QKIR7770XZWVlmDJlCubPn49Tp04Zy3ke6KVePzSMqIPRo0fL/fv3m10N6mfvvfee9Pv9Xf7/33777WnPDJw5c6YJNaS+drljgOeFoa+1tVVu375d6roupZTypZdeknfeeaexnOeB3mELGRFdlblz56KoqKjT/JqaGlRVVeH+++8HAFRWVuLkyZNpfznT0NDdMUDDg91ux4IFC4zXGs6cORMnTpwAwPPA1WAgo+ti6dKlmDx5Mr7+9a/j4sWLZleHTOT3+1FYWAhNi7+ZTQiBkpISnD592uSaUX/jeWF4+fGPf4y7774bAM8DV4OBjK7Zrl278Mknn6Cqqgo5OTlYtmyZ2VUikyX/Yk6SfP70sMPzwvDy9NNP49ixY1i3bp0xj+eB3jH95eI0+CVf8G6xWLBq1SqUlZWZXCMyU3FxMaqrqxGNRqFpGqSU8Pv9xnFCwwPPC8PHs88+i61bt+J3v/ud8Xogngd6jy1kdE1aWlpw6dIl4/OmTZswdepUE2tEZsvLy8PUqVOxceNGAMCWLVvg8/ng8/nMrRj1G54Xho/nn38emzZtws6dO5GVlWXM53mg9/guS7omJ06cQGVlJWKxGKSUKC0txYsvvsgfumFg5cqVePPNN3H+/Hnk5ubC7Xbjs88+AwAcOXIEy5cvR11dHTweDzZs2IDy8nKTa0zXW3fHAM8Lw0N1dTWKi4tRWlqKjIwMAIDNZsPevXsB8DzQWwxkRERERCbjJUsiIiIikzGQEREREZmMgYyIiIjIZAxkRERERCZjICMiIiIyGQMZERERkckYyIiIiIhMxkBGRNQHnnzySbjdbrOrQUSDBAMZERERkckYyIiIiIhMxkBGREPGhx9+iHnz5sHlciEzMxNf/epXUVNTAwA4deoUhBDYsGEDHnzwQWRmZsLr9eJ73/seotFo2nYOHDiA+fPnw+12w+Px4J577jHe05mk6zqef/55TJw4ETabDfn5+Vi8eDEaGhrSyn366ae45ZZb4HQ6ceONN2LHjh19+49ARIMSAxkRDQkffvghbrvtNmRmZmLz5s145ZVX8PHHH+PLX/5yWrknnngCuq7jV7/6FR577DG89NJLWLNmjbHc7/djzpw5uHDhAjZs2ICf//znOHr0KObMmYOLFy8a5b797W/j8ccfx5e+9CW89dZbePnll5GRkYHm5majTCQSwf3334/ly5dj27ZtyM3NRWVlJerq6vr+H4SIBhdJRDQEzJ07V86ePVvqum7MO3DggBRCyO3bt8uTJ09KAHLOnDlp661Zs0Y6nU4ZCASklFKuXr1aOp1OWVNTY5Q5deqUtFgscu3atVJKKY8cOSKFEPLpp5/utj5r166VAOT27duNeceOHZMA5GuvvXY9dpmIhhC2kBHRoBcMBvH+++9j8eLFiMViiEajiEajGD9+PAoKCvDxxx8bZRcuXJi27qJFixAMBrF//34AwO7duzFv3jyMGDHCKDN69GjMnj0bu3fvBgD8/ve/h5QSDz744GXrpSgKvvjFLxqfx44dC6vViurq6mveZyIaWhjIiGjQq6+vRywWw+rVq2GxWNKGs2fPwu/3G2Xz8vLS1k1+PnfunLGt/Pz8Tt+Rn5+PQCAAAKirq4OmaZ221ZHD4YDVak2bZ7FYEAqFer+TRDSkaWZXgIjoWmVlZUEIgSeeeAL33ntvp+W5ubnGdLKTf8fPBQUFAACv14sLFy502sb58+fh9XoBADk5OYhGo6ipqbliKCMi6gm2kBHRoOdyuTBr1iwcPnwY06dP7zT4fD6j7LZt29LW3bp1K5xOJyZPngwAuOWWW/DOO++kdbz3+/344IMPMGfOHADAvHnzIITAq6++2vc7R0TDAlvIiGhI+NGPfoR58+bhvvvuw5IlS5CdnY3q6mrs3LkTK1asMELZ8ePHsWLFCixZsgRVVVV45plnsGrVKmRnZwMAVq9ejVdffRV33XUXvv/97yMWi2Ht2rXwer1YuXIlAKCsrAyPPPII1qxZg0AggDvuuAPBYBDbt2/Hk08+iVGjRpn1z0BEgxQDGRENCbNnz8Yf//hHrF27FitWrEBbWxuKiopwxx13YOzYscazxtatW4d3330Xixcvhqqq+OY3v4l169YZ2ykuLsauXbvw6KOP4oEHHoCiKLj99tvx3HPPpXX0X79+PcaMGYOf/exneOGFF5CTk4Nbb70VGRkZ/b7vRDT4CSmlNLsSRER97dSpUxgzZgzeeOMNfOUrXzG7OkREadiHjIiIiMhkDGREREREJuMlSyIiIiKTsYWMiIiIyGQMZEREREQmYyAjIiIiMhkDGREREZHJGMiIiIiITMZARkRERGQyBjIiIiIikzGQEREREZnsfwAjsDtNUhYNvQAAAABJRU5ErkJggg==", - "text/plain": [ - "PyPlot.Figure(PyObject
)" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plot(epochs, losses,\n", - " xlab = \"epoch\",\n", - " ylab = \"root squared error\",\n", - " label=\"out-of-sample\")\n", - "plot!(epochs, training_losses, label=\"training\")\n", - "\n", - "savefig(joinpath(DIR, \"loss.png\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Evolution of weights" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlgAAAFyCAYAAAApuaQRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeVyU1f4H8M/zPLOyzOCwKSoOi4KKGi6VlVehTDMVEjSzMsrQn0a03G63ut2brSaW18qbecsutmmittq1EsUsyVIuboGKGyLurAKzPuf3xzMzMOzCsH/frxevYc5z5jln5FE/nHPmPBxjjIEQQgghhLgM39EdIIQQQgjpbihgEUIIIYS4GAUsQgghhBAXo4BFegyO4zBhwoSO7gYhpAEJCQngOA6nTp1q1Xn0ej30er1L+kRIS1HAIl3WqVOnwHFco1+k63PVf7qENISuMdIWZB3dAUJaKyQkBPfdd19Hd4MQ0kpLlizBM888g759+3Z0VwhpNQpYpMsLDQ3F4sWLO7obhJBW6tOnD/r06dPR3SDEJWiKkPRoEyZMaHAqsfa0gcFgwLBhwyCXy/Hrr7861a2qqsKQIUOgUCjw+++/N7tdg8GAp59+Gv3794dKpcKwYcPw4Ycf1qlfWlqKpUuXYvz48QgICIBCoUBAQADmzp2L48eP16m/ePFicByHjIwMrF27FqNGjYKbm5tjDVprzvef//wHw4YNg1qtRlBQEN5++20AAGMMb731FsLDw6FSqTBo0CB8/PHH9b5/k8mE5cuXY+TIkXB3d4enpyfGjRuHr7/+2qmeXq/H2rVrAQBBQUGOqd/aa+lOnjyJhx9+GIGBgVAqlejTpw8SEhJw+vTpOm3bX3/27FkkJCSgd+/e4HkeGRkZ9fbVzv4zMxqNeO655xAYGAi1Wo1Ro0Zh27ZtAIDy8nIkJyejb9++UKlUGDt2LPbu3Vvv+S5evIgnnngCoaGhUCqV8PHxQVxcHA4dOlSn7o4dO/DQQw8hLCwMHh4e8PDwwOjRo/Hvf/+73nPb3+OlS5fw0EMPwc/PD2q1GjfeeGOT79NuxYoV4DgOX375pVP5I488Ao7jcNtttzmV5+TkgOM4LFy40Km8vLwcL7zwAoYOHQq1Wg0vLy9MnjwZP//8c502G5qqs1gsWLJkCUJCQqBSqRAaGoolS5bgxIkT4DgOCQkJ9b6HiooKPPnkk+jbty+USiWGDx+OjRs3OtVpzjWWlZWF+Ph4x/Xl7++PsWPH4vXXX2/sj5D0cDSCRUgzqVQqrFu3DmPGjMGcOXOQnZ0NjUYDAHjiiSeQk5ODJUuWYMyYMc0+58yZM3HgwAHMnDkTZrMZGzZswLx583DhwgU8++yzjno5OTn4xz/+gaioKNx1111wd3dHbm4uPvvsM2zZsgVZWVkYMGBAnfMvW7YMO3bswPTp0zFx4kTIZLJWnW/FihXIyMhATEwMoqOjsWnTJjz22GNwc3PD/v37kZaWhqlTpyI6Ohrr16/H3LlzERQUhFtuucVxDqPRiMmTJyMjIwORkZGYN28ezGYztmzZgpiYGLzzzjtISkoCADz++ONITU3F/v378dhjj8HLywsAnBYw79mzB5MmTUJFRQWmTZuG0NBQnDp1Cp9++in++9//IjMzE8HBwU7v48qVKxg7dix0Oh3uvvtumEwmx8+yKXfffTcOHjyI6dOno6qqCp9++immTp2K3bt3Y8GCBTAYDIiPj8elS5fw+eefY9KkSTh58qTT+Y8fP+4IebfffjtiY2Nx8eJFbNq0Cd9//z3S09Nxww03OOovXboUeXl5uPHGG3HXXXehpKQEW7duxYIFC3DkyBG8+eabdfpZUlKCm2++GRqNBvfeey8uXrzo6M++ffsQERHR6PuMiooCIIW72NhYR7k9oO3evRsmkwkKhcKp3P46ACgqKsKf/vQnHD58GOPGjcOkSZNQWlqKr776ClFRUUhLS3M6d0MeeughfPzxxwgJCcEjjzwCo9GIFStWIDMzs8HXmM1m3H777SgqKsKMGTNQWVmJ9evXY9asWdi6dStuv/12AE1fY9nZ2bjpppsgCAJiYmIwYMAAlJSU4PDhw3j//ffxzDPPNNl/0kMxQrqokydPMgAsJCSEvfDCC3W+MjMzneoDYOPHj3cqGz9+PGvor8EDDzzAALCTJ086la9cuZIBYHPmzGGMMfbll18yACwqKopZrdZm9d3e7pAhQ1hZWZmj/Ny5c6xPnz5MJpOx48ePO8pLSkrYlStX6pxn+/btjOd59vDDDzuVv/DCCwwAc3d3ZwcOHKjzupaeT6fTOfUrPz+fKRQKptVq2aBBg9jFixcdx/bs2cMAsOnTpzud67nnnmMA2OLFi5koio7ysrIyNnr0aKZQKNjZs2cd5Q39HBhjzGQyMb1ezzw9PVl2drbTsV27djFBENjUqVOdygEwAOzBBx9kFoulzjkbYv+Z3Xzzzezq1auO8vXr1zMAzMvLi82cOZOZzWbHsaVLlzIAbPny5U7nuummm5hMJmM//PCDU/mRI0eYp6cnGzZsmFP5iRMn6vTHbDaziRMnMkEQ2OnTp+t9j4sWLXK6Jj/44AMGgC1YsKDJ9yuKIvP29nbqy/nz5xkAduuttzIAbOfOnY5jM2fOZADYhQsXHGVz5sxhANiHH37odO7z58+z/v37M19fX1ZVVeUor+9nvW3bNgaAjR49mlVWVjrKz507x3r37s0AsAceeMDp/AMGDGAAWExMDDMajXXONWnSJKf6jV1jTz75JAPAvvrqqzrHLl++XKeMEDsKWKTLsgeshr7++c9/OtV3VcBijLFp06YxAGzp0qXM29ub6XQ6dubMmWb33d7up59+WufYsmXLGAD28ssvN+tcw4YNY3q93qnMHoieeOKJZvepOedbvHhxnfrR0dEMAFu7dm2dY8HBwWzAgAGO51arlfXq1YuFhoY6hSu7r7/+mgFg77zzjqOssZ/D5s2bG/2zmjFjBuN5npWWljrKADCFQsEuXbpU72saYv+ZZWRkOJVbLBYml8sZgDpBJz8/v04AyMrKYgDYvHnz6m3H/h/6wYMHm+zTpk2bGACWmprqVG4P1+Xl5U7lZrOZyWQyNnLkyCbPzRhjd911F+M4zhGc7WHyp59+YjKZjL3wwguMMSmM+fr6siFDhjhee+nSJSYIArv11lvrPffbb7/NALBvvvnGUVbfzzohIaHBgLNkyZJGA1Z9wXTAgAFMp9M5lTUnYNUOw4Q0haYISZc3adIkbN26tV3b/PDDDzF8+HD89a9/BQBs2rQJ/fr1u+bzjBs3rsGy7Oxsp/KMjAysWLECe/bsweXLl2GxWBzH7NM0tV1//fUNtt2S80VGRtYpsy9Kvu666+o9tmfPHsfzI0eOoLi4GAEBAXjxxRfr1L906RIAIDc3t8F+12RfC5ebm1vvBx3Onz8PURRx9OhRjB492lEeFBQEHx+fZrVRW+0/A0EQ4Ofnh4qKCgQGBjods//ZnD17tk6fz58/X2+f7e89NzfXMY1XXl6ON954A19++SWOHz+OiooKp9cUFhbWOc/AgQPh4eHhVCaTyeDv74+SkpLmvFVERUXhiy++QEZGBmbOnIkdO3ZAp9Ph5ptvxqhRo7Bjxw4sXrwYhw8fxqVLlzBr1izHa3///XdYrVYYDIZ63+exY8cc73Pq1KkN9mH//v0AgJtuuqnOsfrK7Ly8vBAUFFSnvF+/fo1OLdYWHx+PFStWIDY2FrNmzcLEiRNxyy231PlZE1IbBSxCWsDHxwfjxo3Dhg0bMGDAAEyfPr1F5/Hz86tT5u/vD0BaiG6XlpaGu+++Gx4eHpg0aRL0ej3c3NzAcRxSU1PrXcxd81y1tfR89a1Tsq/rauhYzeBWVFQEADh8+DAOHz5cbxsA6gSIhtjP9+mnnzZar/b5GvpzaY6G3qdWq623HJDWA9nZ+7xlyxZs2bKlwXbsfTaZTJgwYQKysrIQGRmJ+++/H97e3pDJZDh16hTWrl0Lo9FY5/X19cfeJ6vV2sg7rFZzHZY9YI0fPx48zyMqKgrLly9HVVUVduzY4VS/5vv85Zdf8MsvvzT5PhtSVlYGnufh7e1d51hjP8fG3r8oio22WdPYsWOxfft2LFmyBOvWrUNqaioAYNSoUVi2bJnTeyakJgpYpEfjeemDtBaLxfGfoV3NgFNbWloaNmzYAG9vb5w+fRovvPACXn311Wtu/+LFi+jfv79T2YULFwA4/wexePFiqFQq7Nu3DwMHDnSqv379+gbP39AnJFt6vtayh5O4uLg6n+Zqzfm++eabRkdBauvITWjtfa65mL8xX331FbKysvDwww/j/fffdzq2fv16xyfg2sLQoUPh6+uLHTt24Ny5czh69Kijz1FRUXj99dexe/duZGRkgOM4jB8/3vFa+/v885//jDfeeKPFfdBoNBBFEVeuXKkz6mj/u9LWxo8fj/Hjx6Oqqgp79uzBN998g3fffRd33nknDh48iJCQkHbpB+laaJsG0qP16tULgPMUDgCIouiYmqgtPz8f8+fPh5+fH7KzszF69Gi8/vrr2Llz5zW3v2vXrgbLak65HT9+HIMHD64ThgoLC+vdVqEprj5fcw0ePBgajQZ79+51GtVpjCAIAFDvqIv9k3bXMuXT0a61z/afR32jpPVdP65kD025ubmOUcLo6GgAwC233AKFQoH09HTs3LkTERERTgFozJgx4Diu1T+bESNGAJA+tVhbfWUt0dg1VpNarcaECRPw5ptv4rnnnkNVVZVjiw5CaqOARXo0+7oc+7C/3fLly3Hy5Mk69UVRxH333YeSkhKkpqaiX79++Oyzz6BWq3H//fejuLj4mtp/9dVXUV5e7nh+4cIFLF++HDKZDHPmzHGUDxgwAHl5eU6/sRsMBixcuNBpCq65XH2+5pLJZFi4cCFOnz6Np556qt6QdejQIVy8eNHxXKfTAQAKCgrq1I2JiUFgYCCWL1+On376qc5xs9lc735LHen666/HDTfcgHXr1uHzzz+vc1wURaewbt8uo/b72LlzZ50RrbZgnwJLSUmBn58fhg4dCgBwc3PD9ddfjw8++ABXrlypM1XWu3dvzJo1C7t378ayZcvAGKtz7j179qCysrLR9u+9914AwMsvvwyDweAoP3/+PN56661WvTe7xq6xXbt2oaysrE65/e+OWq12SR9I90NThKRHe/DBB5GSkoLFixcjOzsbISEh2Lt3Lw4dOoTx48fXGZV69dVXsWvXLiQnJ+OOO+4AIC0mfvvttzFv3jzMnz8faWlpzW4/ODgYERERiIuLc+yDdfHiRbz66qtOezc9+uijePTRRxEZGYn4+HhYLBb8+OOPYIxhxIgRDY62NcTV57sWL774IrKysvD2229jy5YtGD9+PHx9fXH27FkcPHgQ+/fvR2ZmpmN9WnR0NN544w0sWLAAM2fOhLu7OwIDAzFnzhwolUps3LgRd9xxB8aPH49bb73VsTA8Pz8fu3btgre3d7MXzbeXdevWISoqCrNnz8aKFSswatQoqFQq5OfnIzMzE5cuXXKEiWnTpkGv1yMlJQWHDh1CREQEjhw5gm+//RaxsbHYtGlTm/bVHpxqL2K3H7MHv/rWIr377rs4cuQInn76aXz88ccYO3YstFotzpw5g3379uHYsWM4d+4c3NzcGmz/tttuw7333otPP/0Uw4YNQ0xMDIxGIzZs2IAbbrgB33zzjWOqv6Uau8befPNN/Pjjj4iKikJwcDBUKhWysrKQnp6O0NBQ3HXXXa1qm3RjHfwpRkJazL5NQ+09bRqCerZpYEz62Pytt97K3NzcmEajYTExMezYsWN1PrqdmZnJZDIZi4iIcNq7xy4+Pp4BYO+//36TfbF/5L+yspI99dRTrG/fvkyhULChQ4eyDz74oE59URTZe++9x4YOHcpUKhXr3bs3mzdvHrtw4UK9W03Yt1XYsWNHve278nyNfcS9oW0wLBYLW716Nbv55puZRqNhSqWSBQYGssmTJ7NVq1Y57TPFGGMpKSls4MCBju0Qav8cCwoK2GOPPcYGDhzIlEol02g0bPDgwezhhx9m6enpTnUbug6a0tiWHgMGDHDajqI57RUVFbHnn3+eRUREMLVazTw8PNjAgQPZnDlz2ObNm53qnjhxgsXFxTFfX1/m5ubGxowZw9avX8927NjBADi2S2jOe2ysrw2x7ze1atUqp/Lt27czAIzjuHr3VWOMscrKSpaSksJGjRrF3N3dmVqtZkFBQSw2NpZ99NFHTvuGNXQtmc1m9vLLL7OgoCCmUChYcHAwe+211xx7rT322GPNfo8N/Rwbusa2bt3K5s6dy8LCwpinpyfz8PBgQ4YMYc8//zztg0UaxTFWz7gtIaRNTZgwATt37qx32oQQ0jwffPABEhMT8e6779a5RQ8hHY3WYBFCCOnUzp8/X+eXkbNnz+KVV16BIAjX9AlSQtoLrcEihBDSqb3++uvYsmULxo0bBz8/P+Tn5+Pbb79FeXk5Fi9eXGerE0I6AwpYhBBCOrXJkyfjjz/+wJYtW1BcXAyVSoXhw4dj0aJFTp+2JaQzoTVYhBBCCCEuRmuwCCGEEEJcrEsFrMrKSmRlZTW5MR0hhBBCSEfqUgErNzcXo0aN6nSbBvYEjd2Xj/QcdB0QgK4DIqHroHFdKmCRjtPUPbpIz0DXAQHoOiASug4aRwGLEEIIIcTFKGARQgghhLgYBSxCCCGEEBejjUYJIYQQcs0KCgpw6tSpju5Gh/Dx8UFgYGCjdShgEUIIIeSa5OfnY+zYsT122yQ3Nzfk5OQ0GrIoYBFCCCHkmly+fBmVlZX45JNPMHjw4I7uTrvKycnBfffdh8uXL1PAIoQQQojrDR48GCNHjuzobnRKtMidEEIIIcTFKGARQgghhLgYBSxCCCGEEBejgEUIIYQQ4mIUsAghhBBCXIwCFiGEEEKIi1HAIoQQQghxMQpYhBBCCOkxTp06hQkTJkCr1WL06NFt1g4FLEIIIYT0GBqNBq+88go+++yzNm2HdnInhBBCSIMYYzhVDvzvCpO+LjPs2Wtp1mvPVTKca4PbFfZxA/q4cY3WWbZsGfLy8rB69WoAQElJCUJDQ3H06FHccsstyMjIcH3HaqCARQghhBAAgEVkOFJSHaayLjNkX2EoMUnH/dVApDeHaQM4/KcZ51udI+LFLNHl/XxhJI/Fo4RG6yQmJiIsLAwpKSnQarVYs2YNYmJioNPpXN6f+lDAIoQQQnogg4XhYLEUov53WQpVB4oYDFbpeJCnFKaeGs4j0ptDpA/nGDXKyhKaFbAWDOYxfYDrVyP1cWu6jpeXF+Li4pCamork5GSsWrUKaWlpLu9LQyhgEUIIId1cqUkaiZLClDQ6lVMCWBkgcEC4FzDSm8PsEClMXefNwUvZ+BRcc/Rx45oVhtpKcnIyYmNjERISAn9/f0RGRrZb2xSwCCGEkG7kfKUtSNVYM3WiXDqmEoDhOg43+/NIGiqNUA3TcVDLWh+mOqPw8HDo9XosXLgQKSkp7do2BSxCCCGkC2JMCk7/qxWmzldJx7UKKUDFDOAR6cMh0ptDuBcg47tnmGpIYmIikpKSEB8fDwAwGo0ICQmB0WhEaWkp+vXrh/vvvx9LlixxabsUsAghhJBOziJKU3o1w1T2FYZS2+LzPm5SmJoXxmOkLUzpPQGO61lhqj7p6elYtGgR5HI5AECpVKKgoKDN26WARQghhHQiVRZpsbkUpqoXnxtti89DNNJ6qb+OsC0+9+bg38SWBT1RYWEhoqOjodPpsHTp0nZvnwIWIYQQ0kGKjdJIlH1kKusyQ24pINoWnw/tJY1M3RsqhakR3hy0CgpTzREQEIDc3NwOa58CFiGEENLGGJM23PxfrTB16qp0XC0AI7w5jO/D4/FhHCK9gYheHFTddPF5T0ABixBCCHEhs8iQUwzsL2LYf4U5Hi8ZpOO9lNKoVFyQtPh8pDeHQVpA6GGLz7s7CliEEEJIC10xSOEpu0aQ+qMEMNs2Lw/yBEboOCwawmOEjsNIHw6BHrT4vCeggEUIIYQ0wSoyHCuD04jU/iKGsxXScbUADNNxuN6PQ2K4tFZquI6DhtZL9VgUsAghhJAaSk0MB5yCFHCoiKHK9im+vu7SqNTcgdKo1AhvDgM1NMXXVWzfvh3PPvssysvLwfM8YmJi8Morr7h8VJECFiGEkB5JZAynyp1HpbKvVC88l/PSp/hG6DjcE1IdprxVFKS6sl69emHdunUIDg6GwWDAbbfdhnXr1mHOnDkubYcCFiGEkG6vwsxwqJhh/5XqxecHihjKzdJxX5X0Kb64IB4jvDmM0Em7nisEClNd1bJly5CXl4fVq1cDAEpKShAaGoqjR49Cp9MBAFQqFa677jqcOHHC5e1TwCKEENJtMMZQUFF3rdSxUoBB2lsqTCuFqWmBtjDlzaG3mhaetwVr6RVYy4pcfl5Bo4Og9W60TmJiIsLCwpCSkgKtVos1a9YgJibGEa4A4Pz589i4cSO+++47l/eRAhYhhJAuyWhl+KPmdgi2MFVklI5rFdL03qR+PJ4ezuE6bw5DeqHb3ti4M7q6+zuUf/+py8/rOeleaO+4v9E6Xl5eiIuLQ2pqKpKTk7Fq1SqkpaU5jpeVlWHatGl4+umnMXLkSJf3kQIWIYSQTu9CJauzr1RuCWBh0vFQjTQq9XhE9RQfbYfQ8TxumgJ1xI0uP6+g0TVdCUBycjJiY2MREhICf39/REZGAgDKy8sxefJkTJ8+HU8++aTL+wdQwCKEENKJlJkYjpRK4elQUfX+UheqpOPuMmk7hJv9eTwyRApVw3QcPOQUpDojQevd5FReWwoPD4der8fChQuRkpICALh69SomT56MSZMm4e9//3ubtU0BixBCSLtiTNo/KreUIbeEIacEyC2Rvi+srK4X6CFN8SWGV3+CL0QD8DQqRa5BYmIikpKSEB8fDwB466238Ntvv6GiogJffPEFAGDmzJn429/+5tJ2KWARQghpE0YrQ15pzSAljUwdKWW4avv0noIHBmqBcC8OD4XxCPfiEK6Vbh3jSZt0EhdIT0/HokWLIJfLAQB/+9vfXB6m6kMBixBCSKsUGZgtRKFGkGI4UQ6ItjVSvZTAYC8OI3TA3cE8BntxCPfioPcEZLRBJ2kDhYWFiI6Ohk6nw9KlS9u9fQpYhBBCmiQyhtPl0mhUVqEc+SarI0zZb2LMAdB7SqNR0wKl0ajBXtJzHxUtOCftKyAgALm5uR3WPgUsQgghDpUWhqOl1Wui7F9HSgGD7VYxakGJMC8R4V4cogPsQYrDQC1tgUCIHQUsQgjpYRiTRp1yHAGqOlCdviptyAkA/mpp9OlGPx4Jg+AIUu6mYvh4N+9j8oT0VBSwCCGkm7KIDCfLawYpW5gqZSi2bcYpcECIRgpPs4Jti8y9gDAtB10D99wrcv3G3IR0OxSwCCGkC2NM2rn8eJk0tVczTB0rA8yiVM9DDoRrpTVRUwPtQUra9kBJ99sjxOVaFbCOHTuGBx54AJcvX4aXlxdSU1MxZMiQFtV78cUXsXjxYhw8eBARERGt6RYhhHQrjDGcq5RCVF6Z/ZHheBmQV8ZQYqqu29ddClLRATwWDYEjSAW40SJzQtpTqwLWggULMH/+fCQkJGDjxo2YN28eMjMzr7leVlYWfv31VwQGBramO4QQ0mVZRYb8q8Dxcoa8Uobj5bA9SkGq0lJdt587EKLhMMIbmBHEI1TDIUTDIVQDaGjvKEIalZmZiYULFwIAzGYzbrnlFrz99ttQKpUubYdjjLGmq9V18eJFDBo0CJcvX4ZMJgNjDH369MGvv/4KvV7f7HpGoxETJkzAZ599hqioKHz77bcNjmBlZWVh1KhRGDt2LLRaLWbMmIG4uLgWvXFybYqLi9GrV6+O7gbpYHQdtI7RCpyu4HGqgsPJqzxOVvCOx/wKDmYmhSOBYwh0Y9C7iwjyEBFkf/RgGOAuQi107Pug64Ds378f0dHR2LdvX5vcKLktVVZWQi6XQy6XQxRFxMfHY8KECUhOTm7W6+1ZZPv27RgxYoSjXKdz/uBHi0ewzpw5g4CAAMhk0ik4jkNgYCDy8/OdAlZT9f7xj3/gvvvuQ1BQULPbXrlyZZf7gXYHtS8e0jPRddC4q2ZpxKn2NN7xMmmEyv4brVIAQjylkajpvtJaqFANh1CtdJNieSfffJOug55Nq9U2q96VqiJcqSp2efve6l7wVjd+DS5btgx5eXlYvXo1AKCkpAShoaE4evQo3NzcAAAmkwlVVVXgef6a+6DVahv9e9CqKcLa8/kNDYY1VC8zMxO///47Xn/99dZ0gxBC2lWRoW54sq+NOl9VXc9TDoRqpBB1fQjvmMYL1XAIcKd76pHu7+tj3yP14HqXnzdh2Gw8OPyeRuskJiYiLCwMKSkp0Gq1WLNmDWJiYqDT6XDq1CnExsYiLy8Pd955J+bPn+/yPrY4YPXv3x8FBQWwWCyOqb8zZ87UWUfVWL3169cjNzfXMXpVUFCASZMm4YMPPsAdd9zRundGCCEtxJgUlGqvhcorrbuo3EcF2xoo4La+1SEqRMPBl3YvJz3c9IGTcHO/611+Xm9101PUXl5eiIuLQ2pqKpKTk7Fq1SqkpaUBAPR6PbKzs3H16lXcd9992Lx5M2bPnu3SPrY4YPn5+SEyMhKffPIJEhISsGnTJuj1eqfpwabqPfPMM3jmmWccdfV6faNrsAghxFWsIsOZirojUPaRqZqLyvu6SyFquE5aVB7iKU3lhWgALS0qJ6RB3mpdk1N5bSk5ORmxsbEICQmBv78/IiMjnY57eHhg9uzZ+PTTTztPwAKA1atXIyEhAa+99ho0Gg3Wrl3rODZlyhS89NJLGD16dKP1CCGkrYlMCk2/X2LYe4nh90sMWVeYI0QJHDDAQwpRt/jzSBgI20gUh2AN3f6FkK4qPDwcer0eCxcuREpKCgDg+PHjCAwMhFwuh8lkwubNmzF8+HCXt92qgBUWFlbvtgwA8N133zWrXk2nTp1qTXcIIURahlDhHKb2XmYotU3rBXsCo305xOh5RPSSQtQAz86/qJwQ0jKJiYlISk3XK/8AACAASURBVEpCfHw8ACAjIwP//Oc/IQgCLBYLoqOj8fe//93l7dJO7oSQLu1CZXWIsj9etC007+sOjPbh8JfhPEb7cBjty8G7gdu/EEK6p/T0dCxatAhyuRwAMG/ePMybN6/N26WARQjpMoqN0qiUI0xdkkarAMBbCYzx5bAgnMdoXw6jfTgEuFOYIqSnKiwsRHR0NHQ6HZYuXdru7VPAIoR0SlfNDP+zBanfL0thKq9MOqaRA6N8ONwTIoWpMb4cBnjQJ/YIIdUCAgKQm5vbYe1TwCKEdDiDheFAkS1M2UaockoAkQFqAYj04XBnoDTNN8aXw0At7SFFCOncKGARQtqVWWT4oxg1wpSIg0WAWQTkPDBcx2Fcbx5PREhhakgvQEYL0AkhXQwFLEJImxEZw9FS50/0/e8Kg8EK8BwwxEtaNzUvTApTw3UclAKFKUJI10cBixDiEowxnCqH0yf69l1mKDdLxwdqpO0RZgbzGOPL4TpvDh5yClOEkO6JAhYhpEUKK2ptj3CJ4YpROhboIW2P8Nx1Upga6cOhl5LCFCGk8zAYDBg5ciTc3Nywd+9el5+fAhYhpFl+vSDi6zwFDldYsPcSQ2GlVO6nBsb4cHh0qBSmRvlw8HejMEUI6dz+9re/YezYsdi/f3+bnJ8CFiGkUafLGf68x4pNJxm0cgXG+AEPDOIxxrZxZz932h6BENL5LFu2DHl5eVi9ejUAoKSkBKGhoTh69CgOHz6MY8eO4cknn6SARQhpXwYLwxsHRLyWLcJLCXwSJWBSrxL4eHfcjVsJIV2LqcwMU5ml6YrXSKGRQaGRN1onMTERYWFhSElJgVarxZo1axATEwOlUonHH38cX3/9NY4dO+byvtlRwCKEOGGM4dt8hsczrci/CjwxjMffI3l4KjgUFXV07wghXcm53UU488NFl5+3/+1+GDDZv9E6Xl5eiIuLQ2pqKpKTk7Fq1SqkpaXhL3/5Cx555BH07duXAhYhpH0cK5WC1XdnGCb25bBlsoBwL5r+I4S0TJ+bdPCO0Lj8vApN8+JLcnIyYmNjERISAn9/f0RGRuLnn3/Gd999h5deegkGgwHFxcUYOnQoDh8+7NI+UsAihKDCzPBqtog3D4jo4wZsvk1ArJ6jtVWEkFZRaORNTuW1pfDwcOj1eixcuBApKSkAgAMHDjiOZ2Rk4KmnnmqTTxHyLj8jIaTLYIzh8+MiwtMsWH5QxLPX8fhjpgx3BfEUrggh3UJiYiIsFgvi4+PbtV0awSKkhzpUxPDobisyzjHEDuCw/EYBQRoKVYSQ7iU9PR2LFi2CXF53JG3ChAltMnoFUMAipMcpMTIszhKx8rCIEA2wdbKASf1pMJsQ0r0UFhYiOjoaOp0OS5cubff2KWAR0kOIjGHtUYZnfreiwgy8NobH4xE8FHTvP0JINxQQEIDc3NwOa58CFiE9wN5LIpJ2i9hzkeGeEA7LbhDQ152CFSGEtBUKWIR0Y5cNDM/9bsUHuQwROiBjqoDxfWg6kBBC2hoFLEK6IavIsDpXxPN7RYgMeGssj4VDeMh4GrUihJD2QAGLkG7m5/MiHt1tRfYVYF4Yh9fGCPBTU7AihJD2RAGLkG7iXCXD03us+CSPYYwvhz0xPK73o+lAQgjpCBSwCOniTFaGtw+LeDFLhEoAPhgn4MEwDjxtFEoIIXVkZGRgypQpGDRokKMsMzMTarXape1QwCKkC/uxQERyphVHS4FHhvB4cRSPXkoKVoQQ0pghQ4a02QajdhSwCOmCTpczPPmrFZtPMfypN4fPowUM96ZgRQjpXKquXoCh4oLLz6ty94faw7/ROsuWLUNeXh5Wr14NACgpKUFoaCj+/e9/u7w/9aGARUgXUmVhWHZAxJJsETol8FmUgNkhdFNmQkjndPLgx8j59U2Xn3fwjX/GkLFPNVonMTERYWFhSElJgVarxZo1axATEwOdTocjR45g5MiREAQBDz74IBYtWuTyPlLAIqQLYIzhm3yGxzOtKKgAnojg8XwkD08FBStCSOcVNOx+9Am+3eXnVbk3PnoFAF5eXoiLi0NqaiqSk5OxatUqpKWlISQkBAUFBdBqtSgoKMCUKVPg4+ODWbNmubSPFLAI6eSOljA8lmnF1gKG2/ty+O9kAWFeFKwIIZ2f2qPpqby2lJycjNjYWISEhMDf3x+RkZFOx/v164d77rkHu3btcnnAos9wE9JJXTUzPPubFRGbLMgtYfhiooCtd1C4IoSQ5goPD4der8fChQuRlJQEADh37hxEUQQAlJeX49tvv60TvFyBAhYhnQxjDOuPiwhPs2DFIRHPR/L4Y6YMsXqe1loRQsg1SkxMhMViQXx8PABg06ZNGDZsGEaMGIEbb7wREydOxIMPPujydmmKkJBO5GARw6O7rdh5juEuPYflNwrQe1KoIoSQlkpPT8eiRYsgl8sBAElJSY7RrLZEAYuQTqDEyPDCPhH/+kNEqAb4/g4Bt/ejAWZCCGmpwsJCREdHQ6fTYenSpe3ePgUsQjqQyBhSjzI885sVVVZgyRgej0XwUAg0akUIIa0REBCA3NzcDmufAhYhHeT3SyKSfhHx2yWGe0M5pFwvIMCdghUhhHQHFLAIaWeXqhie+92KNUcYhumAnVMF/KkPTQcSQkh3QgGLkHZiERlW54h4fq/08eB3buKxYDAPGU+jVoQQ0t1QwCKkHew6JyJptxUHi4CHwzm8OlqAr5qCFSGEdFc0L0FIGyqsYLh3uwV/+tYKlcBhT6yAf4+TUbgihJAOdPDgQUyYMAGDBw9GWFgYNm/e7PI2WhWwjh07hptuugmDBg3C9ddfjz/++OOa691+++0YPnw4rrvuOowbNw7Z2dmt6RIhnQJjDCsOWhGWZsGPZxk+/JOAzBgBY3zpdxpCCOlIlZWViI2NxSuvvIKcnBwcPnwY48aNc3k7rfrXfsGCBZg/fz6OHj2Kp59+GvPmzbvmehs2bMCBAweQnZ2NP//5z3jooYda0yVCOoV/54p44lcRDwzkcXSWDA+G8eBpF3ZCCGk3y5Ytw4IFCxzPS0pK4OPjg5UrV2Ls2LG45ZZbAAAymQy+vr4ub7/Fa7AuXryIrKws/PDDDwCAuLg4JCUl4dSpU9Dr9c2u5+Xl5ahbWloKnm868yUlJUGr1WLGjBmIi4tr6Vsg16C4uLiju9Bl/HaFx6O/uGFesBkvDTZCrACKKjq6V65B1wEB6Dog0v/XzXGuyoBzBoPL2++jUqGPWtVoncTERISFhSElJQVarRZr1qxBTEwMzp8/D5VKhalTp6KgoADDhw/Hm2++ec0hq7S0FEVFRY7nOp3O6XiLA9aZM2cQEBAAmUw6BcdxCAwMRH5+vlPAak69uXPnYseOHQCArVu3Ntn2ypUrMXLkyJZ2nbRQ7YuH1HWukuHBPRbc4Mfh3QluUAjuHd0ll6PrgAB0HfR0Wq22WfVWnziJF/9w/WafLwwJx+Khgxut4+Xlhbi4OKSmpiI5ORmrVq1CWloaPvzwQ3z//ff49ddfERAQgOeffx6PPPIINmzYcE190Gq1jf49aNWnCGvfeJYx1qJ6H330EQBg7dq1+Mtf/oLvvvuuNd0ipEOYrAzx26zgOSDtNoF2YyeE9HgLgoMwPaCPy8/bR9X46JVdcnIyYmNjERISAn9/f0RGRmLAgAGIiopC3759AQD33nsvpkyZ4vI+tjhg9e/fHwUFBbBYLJDJZGCM4cyZMwgMDGxRPQB44IEH8H//93+4cuUKvL29W9o1QjrE45kifr/E8NNUAb3dKFwRQkgfddNTeW0pPDwcer0eCxcuREpKCgBg1qxZWLNmDcrKyqDRaLB161aMGDHC5W23eJG7n58fIiMj8cknnwAANm3aBL1e7zQ92FS9srIyFBYWOup+8cUX8Pb2pqFn0uV8eETEqhwR/7pZwI3+9ElBQgjpLBITE2GxWBAfHw8ACAwMxLPPPouxY8dixIgR2LZtG/71r3+5vN1WTRGuXr0aCQkJeO2116DRaLB27VrHsSlTpuCll17C6NGjG6xXWlqKuLg4VFVVged5+Pr64ttvv60zpUhIZ/bbRRELf7YiMZxDYjiFK0II6UzS09OxaNEiyOVyR9ncuXMxd+7cNm23VQErLCwMmZmZ9R6ruY6qoXr9+/fHb7/91pouENKhLlQyzNhmRaQPh3duEjq6O4QQQmwKCwsRHR0NnU6HpUuXtnv7dKscQlrILDLMSrfCIgKbbhOgpEXthBDSaQQEBCA31/WfYGwuCliEtNBf9ojYfYFhx1QBfd0pXBFCCKlGAYuQFvj4mIi3DolYeROPW3rTuitCCCHO6H8GQq5R1mWG+busSBjEYdEQ+itECCGkLvrfgZBrcNnAMONHCyJ6cVh1s0CfeCWEEFIvmiIkpJksIsPsdCsqLMBPEwWoZBSuCCGE1I9GsAhppmd/F5FxjmHDrQICPShcEUJIV/TRRx/huuuuc3z5+PhgxowZLm+HRrAIaYbPj4t444CIf97IIyqAfi8hhJCuqvYmo8OGDcO9997r8nYoYBHShANXGB76yYo5IRwei6BwRQghzSWWVEAsqXD5eXkvd/Be7o3WWbZsGfLy8rB69WoAQElJCUJDQ3H06FHHLfl+++03XLhwAdOnT3d5HylgEdKIIgPDXT9aMFADvP8nWtROCCHXwrDjEAxf/u7y86pix8DtrhsarZOYmIiwsDCkpKRAq9VizZo1iImJcbrf8Zo1a3D//fc73UbHVShgEdIAq8hw7w4rSkzAtikyuNGidkJID8QYAzMbwQyVEI1VYIZKGPOPNuu1qqgIKCKDXN6npkavAMDLywtxcXFITU1FcnIyVq1ahbS0NMfxyspKfP7559i9e7fL+wdQwCKkQf/YJ+KHswxbJwsI0lC4IoR0HYwxwGyCaKwEM1RBNFSAGasgGiprPFZKj4baz6uDlGisAjNWAqLodP6Si2XN6kdzpvLaUnJyMmJjYxESEgJ/f39ERkY6jm3cuBGDBw/GkCFD2qRtCliE1GPzSRGvZYtYej2Pif1o3RUhpO1VhyJ7uKkVdhwhqIHjtV5XOxQ54XhwKjfwSrXzo8oNnNZHelSppUelrVypdjx6Hz0BrJvcfn84LRQeHg69Xo+FCxciJSXF6diHH36IefPmtVnbFLAIqeWPYoYHdloxM4jDX4ZTuCKENJ9oMkCsKIdYUQqxoqzG9+UQq67WP5LkCEVVgGht+OQcXx1yVGrwSrfqUKTR1QhB7k71HAGpRpDi5MpWrSmVXSpv8WvbW2JiIpKSkhAfH+8oO378OPbt24dvvvmmzdqlgEVIDaUmhtgfLNB7AB+Op0XthPRUjDEp+FSUQawog7WiDGJlGcSrtseawcl2zFpRBphNdU/GC+DdNeDdPJ1Hgjx7OYJPdQhqaNTIDZyidaGop0pPT8eiRYucFrKHhISgvLxtQyIFLEJsRMZw3w4rLhmA32Nl8JDTP2SEdAdMtEKsvOoIS/UFJGudY2X1jiZxciV4d0/w7lrbowYy/37g3TTg3TUQ3KVH3t1TKvPQSOGIglG7KywsRHR0NHQ6HZYuXdru7VPAIsTm5SwRW/IZvp0kIFRL/xgS0hkxi7nWqFJ5dXCq/VVZDmtFKVhVBcBYnXNxKjdbGNKAd9NA1ssXfL9QR3CyByR7eOLdPcErVB3wrklLBAQEIDc3t8Pap4BFCICvT4tYnCXi5VE8pgTSuitC2gsTrVJYKiuGWFYEa3mx9H15MaxXSyBeLXUKUcxYVfckHCdNv7l5gvfQgnfzhMy/f3V4ctdAqHHMPl3HyVy/9xEhdhSwSI93pITh/h1WxA7g8FwkhStCWsu+fslaViQFpbJiWMvrCVDlRRDLSwHm/Gk3Tu0BwbMXeE8vCO4ayHT+TuGpZnDi3TXg1e7geKGD3i0h9aOARXq0cpO0U3uAO7B2ggCe1kkQ0iBmNkkBqeA0qs5aGw5Q5cV1F3vLFRA8e0nBSaODYkA4BE0v8J69IGh0tkfpOCdXdMwbJMSFKGCRHktk0nYMBRXSonaNgsIV6XmkKbpyaTSprMboki0sVQeoIrDKq47XGQGA48F7aiF4SgFJ5t8fytDh4DU6KUzVCFCcihZ6k56FAhbpsV7PFvHFKYYvJwoI86J/+En3wRgDM1bWCEvFTqNN9uk5a1kxxKsldTaktE/R2QOSPCDYaYTpKhPQq78evLuGpuYIaQAFLNIjbT0j4vm9Iv4xkkeMntZdka5DNBpgLb0Ma+kV6bHkCkT7c/uoU1kxmNno/EKZHIJtZInX6KAIbPkUXWVREQTPXm34LglpO4wxPP300/juu+8gCAK8vb3x/vvvIzQ01KXtUMAiPc7xMoZ7tlsxpT+HF0ZSuCKdAxNFiFdLnIJTdZCqLmOGCqfXcWoPCFodBK0PZL59oQwZRlN0hDTi66+/xk8//YTs7GzI5XK88soreO6557BhwwaXtkMBi/QoFWZpUbuPCvgkiha1k/ZR76hTmS04lVSPPjltbMkL0oiT1huClw/k/pEQtD4QtN7gbWWCxhu8kvZlIp2XqfwiTFcvufy8Cg9fKDz9Gq2zbNky5OXlYfXq1QCAkpIShIaG4s0334TRaITBYIBMJkNZWRn69evn8j5SwCI9BmMM836y4kQZsCdGBi8lhSvSOi0edVK5Q/Dylkad/PtDOeg6KTzZygStN3gPL3A8jbCSru1C1noU/LTS5eft96ck9B+f3GidxMREhIWFISUlBVqtFmvWrEFMTAzuv/9+ZGdno3fv3vD09ETfvn2xc+dOl/eRAhbpMd48KOLzEwxptwoYqqNwRRonmgyOESaxZmBq7qiT1hvygdeBrxWcBK0PjTqRHsN/5Gz0GnSry8+r8PBtso6Xlxfi4uKQmpqK5ORkrFq1CmlpacjKykJubi7Onj0LjUaDZ555BklJSUhNTXVpHylgkR5h21kRf/1NxDMjeMQH06hAT8asVmnUqayozkhTzdEnVnXV6XXNG3XS0qfqCKlB4enX5FReW0pOTkZsbCxCQkLg7++PyMhIJCUlISoqCl5eXgCABx54AFOmTHF52xSwSLd3qpxhdroVtwVweGU0havuipmMjk/RWcuKpE0vy4pgLS2qsUVBkbQtQc370vG8bdTJxzbqNIJGnQjpJsLDw6HX67Fw4UKkpKQAAIKDg/H999/jiSeegFwuxzfffIOIiAiXt00Bi3RrlRZpUbtGAayLFiDwNDXYlTDGwKquVu/jZA9NZTVCky1A1V7nBEEubTug0UnbEugHOz0XbF+8pxeNOhHSjSUmJiIpKQnx8fEAgEceeQQ5OTkYNmwYFAoF+vTp41gI70oUsEi3xRjDgl1WHCkBMmNk0KkoXHUWTLRCvFpaHZRqhiZbmLKPRtW+5QqndJO2H7AFJHlAsC009XKEJkHjDc7Ng7YlIIQgPT0dixYtglwu3dxbqVTi/fffb/N2KWCRbuudwyI+yWP4LErACG/6j7Y9MLOp3tDkuE+dY+Sp7g1+eQ+tY3TJaT8nbc3RJh1N1RFCmqWwsBDR0dHQ6XRYunRpu7dPAYt0SzvPiXjyVxFPDuNxTyitu2otxhisRRdgPXUMlScttUJU9YhT7YXhEGS2ncOl0SVFYFitKbpejk0xOYH+OSKEuE5AQAByc3M7rH36F410O2euMszcZsX4PhyWXk/hqiWY1Qpz4QkYTxyG6eQfMJ48DLH0CgDpJr+cQuUUlOR99Lbvq6fpeI0OvJsn7eVECOmRKGCRbsVgYYjbZoVaBqyPFiCjRe3NIhoNMJ3OhenEYRhPHobpVA6YsQoQ5FAEDoT76FuhCBqCCqUndP2DwKvcOrrLhBDSqVHAIt0GYwyLfrHiQBHDL9Nk8FVTuGqItbxYClO2QGUuyANEEZybB5T6IfCcOBvK4KFQ9B/kdOPfqqIiCleEENIMFLBIt/Fejoj/HGVYO17AKF8KV3aMMVguFsB0UgpUphOHYblcCAAQdP5QBg+F+w2ToAweCpl/IE3pEUKIC1DAIt3CL+dFPJYp4tGhPOYO6tkBgVnMMBUcdwpUYkUpwPGQBwRBOXg0NEFDoQgeAplX07ebIIQQcu1aFbCOHTuGBx54AJcvX4aXlxdSU1MxZMiQZtczGAyYPXs2/vjjD7i5uaF379547733oNfrW9Mt0sMUVjDEb7PiRj8Ob97Y88KVWFUB06kcGE8cgvHEYZjzj4KZjeDkSij04XC/+U5puk8fDl7l3tHdJYSQHqFVAWvBggWYP38+EhISsHHjRsybNw+ZmZnXVG/+/Pm44447wHEcVq5cifnz5+OHH35oTbdID2KySuFK4IG0WwXIe8CidkvJJcf6KdOJwzCfOwkwBt5DC0XwUGimzIUyOALyfiG09QEhpE3l5OR0dBfaXXPfM8dYzZtyNd/FixcxaNAgXL58GTKZDIwx9OnTB7/++qvTCFRz6wHA3r17MXv2bOTl5dXbZlZWFkaNGoV9+/Zh5MiRLek2aaGioiLodLqO7kYdC3+24sMjIn6aJuAGv+43esVEEZbzpx2L0U0nDsNafBEAIPPtC0XQUGl0KngoZL5923zn8s56HZD2RdcByc/Px+DBg1FZWdnRXekQbm5uyMnJQWBgYIN1Wvzr7ZkzZxAQEACZTDoFx3EIDAxEfn6+U3Bqbj0AePvttzFt2rQm205KSoJWq8WMGTMQFxfX0rdArkFxcXFHd6GOj0/K8V6OCitGGjBQZkZRUUf3qPWYxQTx7AmIZ47Amn8EYsFRwFAJ8AL43nrwYaOlzTr7DQLnoQUg7UtlBIB2+Bl1xuuAtD+6DoiHhwe2bt0Ki8XS0V3pEN7e3vDw8EBRjf94av/S0ar5g9q/LTc0GNaceq+99hqOHTuG9957r8l2V65cSSNYHaAz/ca656KIp7OtWBDO47FRnh3dnRazVpTZFqP/AdPJwzDlHwOsZnBKNyiCBkMZFQdFcAQUgWGd5hYxnek6IB2HrgMydOhQug4a0eKA1b9/fxQUFMBisTim/s6cOVNnuKw59d544w1s3rwZ27Ztg5sb7bFDGnehUtpMdJQPh7du6jrTgowxWK+ct+2OLq2hslzIBwDwWm8ogyPgNXICFEFDIQ/Qg+OFDu4xIYSQlmpxwPLz80NkZCQ++eQTJCQkYNOmTdDr9XWm/Zqqt3z5cqxbtw7btm2Dl5dXa94L6QHMIsPMdCusIrDxNgFKofMuane+3YwUqMQyaThZ1nsAlCER8Jx4N5RBQyHo/Nt8/RQhhJD20+JF7gBw5MgRJCQk4MqVK9BoNFi7di2GDh0KAJgyZQpeeukljB49usF6BQUF6N+/P4KDg+HpKU3zKJVK7Nmzp972aJF7x+ksi1qTd1ux6g8RGVMF3Ny7c45eMdGKil+/R9l3ayFeLZVuNzNgEJRB0mJ0pX4IePeuOa3ZWa4D0rHoOiAAXQdNadUarLCwsHq3ZQCA7777rsl6/fr1a3DdFiG1fXRUxDuHRbx7M99pw5XxxGGUbH4X5oLjcBtzG9zH3gFF/4FOt5shhBDS/dEmOaRL2HeJYcHPVjw0iMP/De584cpachml36xB5b4dkPcfCN/H/wmlfnBHd4sQQkgHoYBFOr1LVQwztlkwTMfhXzcLnWqtErOYUJ7xBcp/WAdOoUSv2U/A7fqJdD8/Qgjp4ShgkU7NIjLM3m5FlQXYdJsAlaxzhCvGGAx//IaSL96DtegiPMZNh2bSveDdPDq6a4QQQjoBClikU3vmNxE7zzGk3ymgv0fnCFfmC2dQ+uW/Ycj5HcpBkfB5eDHkvQd0dLcIIYR0IhSwSKe1Lk/EmwdFrBjLY3yfjp9yEw0VKPthHa7u/BKC1hveD/0DqmFjO9WUJSGEkM6BAhbplPZfYZj3kxX3hXJIHtqx4YqJIir3bkfpt2vAqiqhmTQHnhPiwCmUHdovQgghnRcFLNLpFBkY7vrRgnAvYPW4jl3Ubso/gpJNq2A6nQt15Hhopz8MWS/fDusPIYSQroECFulUrCLDPdutKDMB2++Uwa2DFrVby0tQuuU/qNzzA+R99PBNSoEydHiH9IUQQroyxhiYCDArc3yJNb6v/bz2MWaFc5nIIFpq1RcZmKXWcbHm+dBk2/0n+sF/TC+XvW8KWKRT+fs+EdsKGb6/Q4Des/3DFbNacHXXNyjb+jHA8/CKWwT3sVPACXRfQEJI58JYjZBgaeTRIjpChHM5q1EuOgKK6FTOapSL1YHEwmAymnGauyKdxxZmmC3YVIcYKdi4Eidwti+Ad3wvffG1v+c5cLLqMkHJO8p4XjoHJ5PqqX1cuyE0BSzSaWw8IWJJtohlN/C4rW/7r7syHMlCyeb3YLlYAPebpkAzZS4Ed02794MQ0nkxkUE0ixDNtiBi/94sSs8tNZ6baz2vGWpqhx5LdXBpKPTUDDf2563FCRx4WwCxP3IyWzixPwq8dEzGgZfzENRSOW8BVG6q6lBjCypNhh4BTRxv+Bh4dJkPFlHAIp3CwSKGhJ1W3B3M4c/D2jdcWS6fQ8lX78NwcDcUwRHw+/M7UPQLadc+EEKazz5y4xRs6gs09QSc6sd6XmsPQWYRVrMtBNU6BxOvsbM8IMh5KZzIOPAyvkZw4aqDi32ERS1UhxxZdbBwCj8yvjp8yOoGJL6Z9TiBa1VY6Q73ImSMwSxaUGWpgkJQQC1TuezcFLBIh7tUxTDtewtCNcCaP7XfonbRaEB5+uco374RgrsWurnPQB05vsv8dkRIZ2Ef1bGa7GHFOeRY6ymz16t+Tf2vrTdAWRhwjYM3joAj58HLbY81n8t4CEoecvcGjjue1zpW6xz245yMk4KVQP+euIJVtMJgNaLKXAWDxYgqi8H2WP1cKqsur7IYYahRXlWjfs1yqy01PzrqYcSHT3NZnylgkQ5lsjLM+NEKgxX4epIM7vK2/8eIMYaq7F0o/ep9AuvuTQAAIABJREFUWMtL4BkdB8/bZoNXuu43F0I6Us1pLKtZhGiqL9zUF3yqv28sLFkMFkC84Ci7pqkq24iOI6g4fS89ytQCeI2s3uDSVEiqE3DsIzc8BZ22xhiD0WpyhJdrDjxmA6qstvq2IGWwSuUm0dxk+wLHQyVTQS1T1XpUQi1ToZfKy1EuHVM61Q3tFeTSPw8KWKTDMMaw8GcrfrvEkDFVQGA77NRuKjwhbbtw/CBUEWPhG5sImU9Am7dLerY6U1r1BB+rqW7QkcoaGvFp6LXXGHg4QFBcW+AxWoxw17qDl3MNhiWhnvPwCl5aR0M6lMhEW6ixBx+DUyiqMhucygwWAypt5fbAY7AYcNVYATMz1whTRrBmDC2qBCVUjoCjhlomPVfLVfCW66CSKeEmV0ElqBzlKpkKakEJtVwNlb2+U1BSQ87LOtUMBAUs0mFWHBLx4VGGteMFjPVv23VXYkU5Sv/7ESp+2QKZbwB8FrwC1eDRbdom6dwYsy0Wri/s1DPqU7dMWqcj1gpBUj1WpwzXsHbHMSqjqA4uQo2QIih5yD1k1SGmVkASFPWHJaGespasw+kOa2+6AnsQcgpANUJOpWN6rMY0mbnKaWSoOkA512sKz/E1AozK6Xt3hRt83HSABejl4eUUlOx16gtAUjBSguc6/s4c7YECFukQ/z0j4qk9Ip4ezmPuoLb7y8ZEKyoyt6JsSyqY1Qrt9HnwGDcdnEzeZm2StiVaGawGK6xGEVaDCKvRCotBlMpqPjfanteuYxRhsT1eU+hxBBneeeTGVuY0yqOoNYpT53X11LGP8NB0VpdjES2OEOM8PeYcapxCUs0RI0vdESMpSBmbbFvgeFu4sQUZudoRcjwU7vBx00EtU8NNVms0yDF1pq47pSZXQcHLmwzeFLQbRwGLtLucYobZ6Vbc2Z/Da2PaLlwZjx9CyaZ3YS48Abfrb4d2agIEDf1j0BEYYxBNrOEw1EhgshhrHDNYIZobn4IQlNIIj6ASIKh4yFQCBCUPtacMglKATGU7ZqvHy5s3+tOZph7ItZNGgwz1rgNqaiF0zfBUX5lZtDTZvsAJUMurR3WkL2lUR6PwgJ+bj3RcUNnqqeusEZLCk7LGsc43LUaqUcAi7eqKgWHaDxYEegCfRgkQ2uA3dUvJJZR+vQZVWRmQB4bB74m3oBgQ5vJ2ehKrSYSx2ITKc0bgfJkj8Fgco0RW5+fGGmX20aJGchEncE5hSLCFIIVGBrVK6VQmU/EQlFJ4cgpMKh6CgqfRny7K/nF5o9UIk9UEo8UEg9Ueaur7lFjN5/ZgZHAKQTVHjoxWU5N94DnesT5IbRsNqrleSOPm61gvVN/6IZVQY0rMsW5Iei4XaNS8p6GARdqNWWSYuc2KUhPwY4wMngrX/kfIzCaUZ2xG+Y/rwCnd0OueJ+E25jZwfM+Y728NJjKYyi0wXDFVfxWZYLR9byqr+Rv6ZenBtjjaEXyU1SNDCo3cFpgaDkM1wxQvo59RZyMyEUZb0DFZjdL3tq/LxZehrFI6lRkttmDk+DLaXutcZrLUem5vQzRD/H/27jw+jurO9/6nll7ULXW3pNZmW7YwxrvxymKCzT4QCCRhmUwICU5gyCQkZJkb5rmZ3Ce5zCTP607ySiYTMpPJkzuBZ8jNhGAIBBKWYMAGTPCC8YLxvsjGltSSWksv6q6q8/zRrVa3WpJlW7Yk6/d+jaa2U6ePRLn7m1OnTw1zkqnBBkl7TQ8V3vK8nh9Pv56g/tuF3zRzG27pDRIjRgKWOCuUUjzwpsPrTYo/3WhwXmDk3sSUUiR3vEX0qZ9jtzdTesXHCPzFnegl/hF7jXOB3eOQbEsVhahkayZIOVZfF5OrzMRb6cZb4SY4ozS77iLuxKiorpDeohHmKAfLsbGVjeVY2I6DrazMPsfGKljPLHv3246NlV1P2+lceEllf3oGCjR5+wvCUy4Q9Qzrtlc+j+HGY3jwGG7cpju7ndnnNty4DRelLj+evGPu3nPMftuGO7vPUzC4eqINkhbjmwQscVb863sOP9vp8IsVBivrRu7NMd3USPSpn9Hz/iY8s5YQvu8hXDX1I1b/eKIcRaojXRCckq3p3Hq6q+8DUzc1vJVuPJVuQjOzAar3p9yN4Rn4v5HdlsITGr1bHX0BIxM4rLyQkR9ELGewcNK7ni2XDSuWY2Hn1vuCTt965pzcenaZOc/J1ZcLSMrJBqX+ZfvOt/POH27PzXAZmlEQUtxGXuAx3bgND6VuP5VGecE+T145t1m47TE8ueCU6EpQU1mV2zecAdFCTDQSsMQZ96ejDl9Z7/DV+Tr3zB6ZcOUkYnS+8Cu61z6NUV5N5b3fxjvv0nP+Td5K2gPewstspwvmP3IHMr1QJWE35bMKQ5SrbHgDYy3HIhJvozkeoSnWQmtHK54WD5bqF2IcK7cvP5D0D0FWLthke2nyzisMPlZRELEce1hz7JwsUzcxND27NDB1A0M3sutmZrt3v2Zg9Ntn6iZu04uRd35+nX3nZX5Mzcy9hpmtr289e75m5taN3P7iOnv39bbJpZu4DQ+mfmYfTt7mtFHhky+MCDEUCVjijNodzYy7um6yxvcvOf1wpRyH+IY/0fH7/0ClEgQ+/GnKrrwVzTWyT0EfLcpR9ETTRbfwen+smJ0rq7v1XGAqn1uGt6IvQHnK3Rjuof/eSik6U100xSI0x1poirfQHIvQHG+hKdZCczxCa6K9qHfF1E3M3g/3vECQCQ35ISOvTPa4qZt4TE/edmFoGbDu7L6Bgk1m2yzczqujIBz1ew0jG07O9VAuhBgdErDEGdPek/nGYK0P/usaA/M0x+v0HHyf6JP/RvrwLkqWXEnwlnswQ1Uj1Nqzx0rYheOg8n562lN9D5PVwBN04al046v1UjEvUBCiXKVDP7exx+qhOR7J9T41xyJFISr/m1Uu3aTaF6baX0V9YDJLaxdS7a+ixhem2h+m2hcm0ZmgsrLyDP+FhBBi/JOAJc4Iy8nMddWShD9/1CR4Gt8YtLva6fj9fxB/+yVck6ZT9eUf4Dl//gi2duQpR9F5ME6iqacoRFmJvl4ow6PjDWcGk1cuCOQGlnsr3XgqXIN+u852bFoTbTTlwlNLX5CKR2iORYj2dBScU+Etp9ofpsYX5pJJS6nxV+UCVY2vipA3cMLBw0ntxDNACyGEkIAlzpC/fcvh5Q8UL37Y4ILgqYUrZaXpXvcMnS/8Ck03CN3xZfzLb0A7w+NLTkfsgwTNm6K0bI6S6rAyvVDlLryVbvyTvVReGCgYC2X6Bu6F6k7FONB1jJZsaGqKRwpCVEu8FVv1BbUS05sNTFXMrDifFVMuzfU61firqPKFccs8PEIIcdZIwBIj7uc7Hf5lh8O/fkjn6smnNu4q+f4mok/+DKvlKP4P3UTww59B95eNcEtHRk80TcvmKM2bosSPJTH9BlWLglQtDVFa7yt6uG3aThNJtLGvu4Wmpt4ep8IQFUvHc+UNTSdcUpnpafKHmR+eTZU/TI2vKhOq/GFKXX4ZSySEEGOIBCwxol475nD/GzZfnKvzhbkn39NkRY4R/d3PSW5fj/v8BVSv+u+4J00/Ay09PVbCJrK1g5ZNUTr2xdANjYr5ARpurCE0uwwbi81N22jcczTvtl1mIHlbIlrwbbigp4xqXyYoLapZQE02PFX7M7fvKr3lGGO4104IIUQxCVhixOzvVNz2ks0VdRr/vPzkeq5UqofOl35N1yurMUpDVNz9TUoWrRhTvTKO5dC+q5uWjVFad3SibEVwhp8LPjGFygsDGB6dXW17+f82/xdrDq2lo6cLt+HO3qYLMy1Yz0WTFmfDU6Y3qsoXpsT0jvavJoQQYoRJwBIjojOluPkFi3IPPH6NgeskvjHYs38H7b/+IVZbM2XX3EHZNX+J7hkboUMpRdehOC2borRs6cCK2fgneZn24RqqFofwhFw0xyP8Zt9TvLD/FQ51HqGypJwPT7+WvzjvSqaHpo2pkCiEEOLskIAlTpvtKO5cY3M0Dm991KTCO7xA4fQk6Xzul3Svewb31FnU3PNtXLVTz3BrhyfR3EPz5igtm6IkW1O4Qy5qL6mgakkI/yQvCSvJq4df54VNr7D5+FbchosV9ZfypaX3sLR2odzSE0KICU4Cljht/9cGhz8eUfzheoPZoeGFq+TuLbT/5p9xOtsJfvQ+SlfeMurfDkx1WUS2ZAardx9OYHh1wguDzPjEZILT/ShNsaVpOy+sf4XXDr9JwkqyqHo+37j0S1w59TL8Lt+otl8IIcTYIQFLnJZHdjv8YKvDPy/Xub7+xOOunGSMjmf+N7E3/4D7/AVU/c33MKsmnYWWDsxOObRt76R5U5T2XV1omkb5nDJm311F+ZwyDLfO4c4j/HbrU7x44FWa4xEml9Xxybm38hfnXUldac2otV0IIcTYJQFLnLI3jjt8fp3NvbM0Hph34nCVeG8D0cd/jJOIEbr9S/gvuxFNH7kHPw+XchTRPd20bIrSuq0Tu8ehrMHH+R+fRHhREJffpKOnk2cOPs8L+9ews3UPpW4/V0+9nOunX8288CwZVyWEEGJIErDEKTnUpfj4SzaXVmv89ENDP7LFiXcRferfiW/4E55ZSyj/xFcwK85uz49SitgHSZo3Rom8EyXVaVFS5WbyVWGql5bjrXSTttP8+YNNPL9pDeuPbkQph4snLeV/rniQ5ZMvwmOcG887FEIIceZJwBInrTutuOVFC78Jq68zcBuDh6vEtvW0//ZfUKkU5X/1NXyX/MVZ7f1JtqdoyQ5Wjx/vwVVqEF4conppiNL6EgDeb9vLCxteyU2tcEH5dL6weBXXNKyg3Bs6a20VQghx7pCAJU6KoxSffsVmfxesv8UkPMg3Bu3uKNHV/0bindfwzruE8ju+jBEKn5U2WgmbyLsdNG+K0rkvhu7SqFwQoOHmOkIzS9ENjeZ4hGfe+8OAUyucX95wVtophBDi3CUBS5yU/7HR4elDimf+wmB+RXG4UkqReGct0dX/Csqh4q4HKVl61RnvtXIsh/adXTRvitK2owvlKEIzS5l55xQq5gcwvQbxdIKXDr3KCwdkagUhhBBnlgQsMWz/Z6/D97Y4/NPFOh+ZVjw43e5opX31T0lufZOShZcTuv1+jLLyM9Ye5Sg6D2YmAY1s6cBK2Pgne2m4qYaqJSHcAReOctjStJ3n969hbeN6mVpBCCHEWXFaAWvPnj3cfffdRCIRQqEQjzzyCHPnzj2pcg888ADPPPMMhw4dYtu2bcyfP/90miTOkE1tOp9ba/OZCzT+24WF4UopRXzDy0R/9zM03aRi1d/jW7TijLUl3pSkZVOU5s1RetrSeMpd1F5WQdXSEP7azAzwhzuP8PyWV3hJplYQQggxCk4rYH3+85/nvvvuY9WqVTzxxBPcc889rF+//qTK3X777Tz44INcfvnlp9MUcQYd6VZ8en0JSyo1fr6i8BuDVnsL0cf/heTODfiWXkXw1i9g+AMj3oZUZ5qWdzIPV+4+kp0EdFGQ6qXlBM7zoekaHT2dPLV7TeHUCtNWcP15V8nUCkIIIc4qTSmlTuXE5uZmZs6cSSQSwTRNlFLU1dXx1ltv0dDQcNLlGhoaePbZZ4fswdq8eTNLly5l+fLlBINBbr31Vm677bZTab4YprgFH3nNR3NCsebaJNXezOWilMJ+5xVSL/4KzePFdePnMGctHdHXdlIOXbuSRLfHie3vAQ3KLvASXOCjdIYX3dRIOxabW7byytE32NT8Lg6KpVUXcuWky1hWvQi34RrRNk107e3tlJefudu+YnyQ60CAXAf9VVRUFGyfcg9WY2MjkyZNwjQzVWiaxtSpUzl8+HBBcBpuuZPx8MMPs2TJklNtuhgmpRR/87LNnm7FH66IMXtS5h+S1Xqc9t/8M6ndW/Bdej2hW/4a3Vc6Mq9pZyYBbc5OAuqkHALTfcy4fTKVFwZw+TMh/f22vbywr29qhZkV5/OFJZ+VqRXOgv5vImJikutAgFwHQzmtW4T9b7kM1hk23HJibHlos8NvDyievNZgQdBBOQ6x139Px7P/ge4PEv6b7+KdPTK9Vj0daY6+GqFlc5R0l0VJtYf6a6uoWhLCW5GZ4LM5HuGlHa8WTa1w/fSrmB6aNiLtEEIIMbEopehNJfoIDiU55YBVX1/PkSNHsCwrd+uvsbGRqVOnnlI5Mbb8dr/DdzY7/OMynY+fpxPZc4yWx75Lav8O/Jd/hOBHPofuPf1v4FlxmyNrWvhgXQTdpVO9LET1snL8k71omkY8neCF/a8UTa3w5WX3sqTmQplaQQhxzlNKYWd/rAHWLad32xlwf8G+3DFnGPX1q7dffd3xOO6jx3Cy+xzILHu3VXYblbefvPL9yg1az3Dr71/P8Op2sn/nHy+6kAcuOH/E/rudcsCqrq5m8eLFPPbYY6xatYrVq1fT0NBQdNtvuOXE2LE5orj7VZtPnq/x3y9UdK15guQfHsUIhan60j/hmXHhab+GnXI4tq6VxjXNKFsx+Yowk6+qwiwxcJTDO03biqZWePDSL3GFTK0gxISmsh+IacfBUiq3tBxFWjknWGaCRf55aScTIAqXefUWlB14OdRrFS4HDkIDh5q+IOSc8K9yZhiahqlpBUtD0zD1zFJTCpdhYGgaOpl9ukZ2qeXtp287t79fOQ1cup5XT7/6Bq1/eHUX1jHw/ksqR3Y82SkPcgfYtWsXq1atorW1lUAgwKOPPsq8efMAuPHGG3nooYdYtmzZkOXuv/9+nn76aY4fP044HKa0tJS9e/cO+Hq9g9w3bdokY7DOkGNxxUW/s5jk03h52RHij/+IdONuzEs+TPWt96G7vadVv2Mrmt9u5/ALTaS7LWqXV1B/XTXugCsztcL+wqkVrj/vKplaYQxpa2uTMRfjTG/vRzobCNLZD/vMeiYQpJ3sdsH64OWjXd24S0oGLd8bXDLHB3qdodrStz5YYDmTXJqGS9cxs0HCpelDLs288n1LDVPTs8u8+rLnFAWWfsHF1PQB9w8UeExdH3B/cX301TtkG/RcSDnRN6/l/WBopxWwzjYJWGdWwlJc+azNsa4068ufQr3ya8zKWso/+TViwdrT+oeklKL13U4O/fE4iZYUVUuCTL2hBleFyZtH3+apXX9gc9NWmVphjJsIb6hqiDDS28MxWCjp7dUYLMAUHz879Y80QwOXpuPSMyEiP3S49EygKFzPLgvWT1zezCs7UGDpCy4DhZyBQ09hSMoECldesBDDNxHeD06HzOQugMyHyl+vs7GO7uO1zh+j3jlI2dV3ELj+U2guN7G2tlOuO7q7m4PPHqf7SILy2aXM+sxUrMoUq/f+nqfX/ZHmeIT54dl867Kvs3LqcjyGewR/M3E22f0+2FOOUxQEcvuKtjPnpgYtn3+8MGSk+gWRVL+QkerXO9K/fG/Z3tsyI02HQQNGcTApDiMeXafUNIcMJbkwchIB5lTKm5pGtL1dPliFOAEJWAKAf9rcQ90bv+Z7bU/gqZ1K+dd+jLv+gtOqs6sxzsFnj9OxJ0bZtBIWfPE8jpUf4ye7f86atetA07i2YSUfn3kjMytGbmChyFDZgBOzbWKWRdy2iVn56xYx2yZu2cRsi5hl9+3PX7dt4r3LdBpH0wYNMiM9VsSlabh7g0A2ALj1whBQuN1X3pcXGHJ1FG2fKOwMFDL6elOGKmvmnTOS30wSQowPErAEL721k2VP/YgLUkcJXf9Jyq79BJp56hN0xpt7OPTH47S+24mvxsMFd09ma9lWfrb733mvdTe1/mo+e+Gd3DTjWoKekZ/1fbzID0CFIcfqF4QyxzJlTi4sDac3RgP8ponPMPCbBn7DxGca+A0Dv2kSdrvx+0rwGSakU5T5fLng4C7o+dBx9wsb/YOMe4CeEne/cOLOljc0TW4RCyHGLQlYE5hK9bD3qf9k5vonORqcTs0DP8Ezefop19cTTXP4xSaa3m7HE3BRc2uQtf51PLT3BdqTHSytXch3V36T5ZOXnZPTK1iOw/5YjJ2dXbzX2cX7XV20p9IjFoD8hpENPiZ+08gGIpOwx81UX0leGTMXjnKhaYgA5dH1YQcZGXMhhBDDIwFrgurZv52WX/0Iva2Zxxru4it/85d4vKd2OaRjFkfWtHBsXSu6W8dzpcYfSp/ltcNv4Dbc3DD9Gj4288M0BOtH+LcYHSnHYW93N+9lg9R7nZ2819nFrq5uUk7mJlnQ5WJOWSlhj+esBiAhhBBjgwSsCcbpSdD57C/pfv337C6bxTfn/j2//eR5lHpP/gPcTjl8sC7CkTUtKFsRW9TJE/4neb9jD1PVZL609F6un37VuJ23Kmnb7OrqzgWo3p6pPd3dWNmep0q3m3mBMj5UWclfn9fA3EAZcwMBar0eCUVCCDGBScCaQJK7t9D+m3/G6Wznj/Pv5SvcxJqPuKkvPbkg4NiKpj+30fhiM6mYReT8Jn5V9l+0qAiXhS7iBxf/T5bVLhw3ASNmWbzf1VXUI7W/O5YbtF3n9TI3UMa1NVU8cMH5zAmUMTdQRpXHM6ptF0IIMTZJwJoAnESMjmd+QWz9H3Gfv4Cnr/pHvrCzhv+80uDSGn3Y9ShHEXm3g0N/bCIZ6eHI5CP8ZupvscrS3DTjOj56wQ1jekLQjnQ62wvVmRemujgUj+fKTPWVMDcQ4JZJdcwpy4SoOYEyyt0ydYQQQojhk4B1jku8t4Ho4z/GScQI3f4l1tbfwBdfVPxfC3XuumB44UopRfe+JAfW7iZxNMWhioM8M+dZSieXcO+sT3HttBV4zLHTk9Pa05MLTzu7+nqljiaSQGbQ+HS/nzmBMj5RPzl3W292WSllrlP/9qQQQgjRSwLWOcqJdRH93b8T3/AnPLOWUP6Jr7Bbq+avnra4eZrGdy8aXrjqOhRn59MHSR20aSxt5MVZL9EwbzL/Y9ZXmReePWq3AZVSNOcFqfxeqeaeHiDzjKkZpX7mBspY1TAtG6TKmFVWRolx7n2LUQghBqOUAuWglINSdnbpZPdlt52+431lB9nn2HR2RCEVABQohcouQdH3kJi8Y6js//UvN9g5DFGur45csaLXGqhNqu8l+pWrrF1KIDxrxP7mErDOQYmtb9L+xE9QqRTln/w6vouvo7UHbv6dRUMZPHalccKJD7uOx9ny5C60vSZN3ibenP0GCy6ZxQ9m/g8qS87e1/SVUhxNJPvd1utkZ1cXbak0kJmMclb2dt6V54eZGwgwN1DGjFI/HglSQowLSimUY6GUhWNnl46NcuxsALCzAaB3PX/pFO7LBoD+6xTVkVl3HGvAOhiwvn51DPRag9Sn+oUZcPLa3y/sKJUXauzs3yfbrt7t3nb2C0ID1Z8LFmJQC6/8BwlYYmB2d5To6n8j8c5reOddQvlfPoARrCRlK27/k01nGl6+yaTMPXi4am6KsOmpnfh3B+hwd7FrwU4WXzGHBwNfoCZcfcbarpTicDzBjlyQ6gtUXZYFgFfXs4PLA9xYV5sbaH6+34+pD38smRDjQd8Ht1UcBByr4MPbcSxwHBxloRwrU26ApXLS2dAyUJl0Nhz0lskulYVjpzOvk10mEzFcLgNlp/u95gDnO+m8189/vcIyStmj+vfWNANNN9A0PW89b6kZaLp+4vXebd3MbusFdemaO2+/jkbmOJqefe1MXYXbefv0vu3eYxSU0QpfFy2vfgNN0/q9RnafbgC9be2ro3+78l+zs6uLYDCEhgaaBplXy4zDQOv9wxYezy4L1zOL4ZXTstUPp1xvfQxcrvdY3uuOJAlY5wClFIl31hJd/a+gHCo+/XeULLkSTdNQSvHlNx3ebFKsucmgoWzgC2jHkd1sfXY3tXsmoRsuDi7axyU3LOTm6pVAZoLJkeQoxXudXaxtibAu0sralggfJDNjpEpNMzO4vKyMWydPyo2Rmub3ycNYJ5jM/2rPfEA7drpv3UkXbCu7b79yLBw7lf0gzz+377zecr09DU6/wFKw38n0IgxZpqhHpXeZCUeDlukXkjJBKvuaZ6nHIfPB70LXB1qaaLqJrpsFx2xb4XhKMoFBd6HpJi7Di2a40DUDzXChaQZ6wdJEN8zMMltP/vl6wWuZfa8/QEjpCzL9A1G/cHQS54mTp1xthGTi4UFJwBrn7I5W2p/4Kcltb1KyaAWh276IUVaeO/7wDoefv+/wv1caXF5b+CaSstO8uu8Ndv3pELMPzKGGOuKLOll+yyIqgpeOaDvTjsPm9mguTL0eaaU9ncbUNC6qKOeuafVcHq5kUSjIlJKScTPFw7lCOTZWOo6VjmGl49jZZf/17s52WrzugqCjbAvHSRUFmYECTUHQyQYhVVBPui8Y2WegV0PT0XU3umFmP9gNNM3M9gqYuQ9bvfeDPffhbOZ9WGfK95YxTE+/D24DXTezr2UWf8DnlcnU2fta+W3o7e0wB+gNMQepxygIKMWhJRuQ8oJOpq6T/7cmM/oLcWISsMYp5TjEN75M9Kl/RzNMKj77LXwLLy8o8+IRh6++5fD1BTqfm9UXrprjEZ55/3ka32zm0sOXstBehL7IZtkt8/AGR+bbgHHL4q22dtZle6jWt7YRt218hsHyygq+OnMGK8KVXFJRjs+Uy3A4lFI4dk9fCErFsaxM8LFSMWwrs7TS8X7rib71QYKTY/ec8PUzPRHuzE/vB7bR+8HtQjdceR/kvcczx1wuT6Z3Q88Gm+z+/Hr0bD1av3oy+8xcMCrYr5u59vSVG2C/bmZ7UaSnQghxdsgn2zijbIv4plfpWvM41vHD+JZdTfDjf4PhL3xo8q6o4i9ftrl+isY/XayjlOLd5h08+f6zRLfFuPbotczquRD/hR7m3nIe3orTm+epPZXijUhrrodqU3uUtFKUu1ysqKrkf86bw4pwJUvKQ7gmyHipVLKdVDKaCzdWOoadDTSZgJPfY9RshoicAAAgAElEQVS33lcukbeeOYZyTvi6hunFcPkwXX5Ml69g3euvwXCVYGb3DVaub9uH6fZjmD50w017e7v0XAghxDBIwBonnJ4ksbeep/uV1djRluwg9q/gmT6vqGx7j+KWFy0m+eCXK9M8t+9PPPn+HzAaXXzk+E1UdVcRmOPj/Jsm45/kPaX2fJBIsC7SyrqWVtZFImzr6EQBk7xeVlaF+fS0qaysCjM3UHbCbyyOd46dpqt9Hx0tO+iI7Mwtk7GmQc/RNGOAcJP5cblK8fqqC0OQ249plvStFwSkvHWzJDtYVQghxGiSgDXG2d0ddK97hti6Z3CScXxLr6Ls6ttx1TUMWN5yMj1Xnclj3D/jJT737MtURCu5PXIrlZEwZQ0+GlbVEpzuH3YblFLsjyd4uuMQayMR1rW0si8WA+CCUj8rwmG+PnMGK8JhzvP7zunxUz3xCB2RnURbdtAZ2Um05T262nbj2CkAfGWTCYTn0jDvrwiE5+DxhbMhqqQgEOmGPKtQCCHOZRKwxiirvZnuV1YTe+t5APyX3kDplbdiVgz+KBpHOdy7ZhNNkeeYabzD3j0N3N/2BQKNIXy1HhruqaV8btkJP9gdpdje0ZkLU+sirRxLJtGAC4NBbqyrYUU4zIqqSmq9p9YDNtadqFfKML0EKmdTXnMhDfP+imDVXILhObi9oVFuuRBCiLFAAtYYkz52kK41TxDf9Aq610fpVbdTuuIWjNLgoOd0pbr5476XeXTHH+juOc5C13w+2/33uN8vwVPuYtqdNVQtCaHpAwerlOOwqb2ddS2trI1EeCPSRjSdxpX9ht9nptWz2Ovh+oZphM7BZ/L1xCNEW96jI/IeHS3v0RHZWdQrFayalw1S8wiG51AaOk9uxQkhhBiUBKwxomf/Drpefpzkjj9jhKoIfvSv8V96PbqnZNBz9rUf5He7/8CLB14l5Vh0J1byufiXuLBRw/Tq1H+0mtrLKtDNwkHlMcvirdY21kZaWdcS4a22dhK2jT/7Db+v937Dr7Ii90iZtra2cR+uMr1SezMhKi9QJePNQLZXKjwn0ys1/5MEw3MIhufi9g4eboUQQoiBSMAaRUopku+9TdfLj5PavwOzdirld/4tviVXopnFDx3u6ulmc9NWNh7bwsbj7/JB93EqS8q5seE2dq29iFtbEvhMmHJNmElXhDG92XCUSvF6NkytjbSyuT2KpRQVbhcrwmH+Yd4cVlRVsjh07nzDr7hX6j06W3ejnMzjdXK9UvM/Kb1SQgghRpwErFGgbIv4O6/R/fJvSR87iLthDpX3fhvv3EvQ8gKO5VjsiOzKBKpjW3i/bS+Ocqgvm8Qlk5awrHoxNQemsfupVi604tRcVsEF11fTbKT5bfMHuW/5be/sBGBKSQkrwpV8tmEaK8KVzDkHvuE3/F6phTTMv1N6pYQQQpwVErDOIieVJP7WC3S9shq7vRnv3IsJ3f4l3NPn5R5rc6jjCBuPb2HDsS1sadpGwkoScJextPZCbppxHctqF1Ful9OyKcqxF1s51trE89Ueqq9xs08dY+3abRyIxQGYWVrKyqpKvjHrAlZWVTLNN76/4ZeMRwpCVFGvVGAKwfBcGhbcSTA8l2DVXEqDDdIrJYQQ4qyTgHUWOLEuul9/hu61z+AkuvAtvoLSa76De9J0oskOXj+0jo3H32XjsS00xyOYusmCqjncNe8OltUt5ILy6Wi2Ruv2To6vaePFw4fZEkryRn2aTefHsY0U2kFYFApyc10dK6squTxcSc04/YafY6f6vsHXspNoZAedLTsH7ZUKhecSCM+RXikhhBBjhgSsM8hqb6H71SeJrf8jSjn4L70Bz4qb2em0sfHYOja++zC72/YBcF5wKldMvYxltYtYWDOPEtOLUoqOg3H+sGY/a440sbkkztbyJNGFNjoajhXi2uppfH1OmMvClQRdxeO2xrp0TyfRlu1EmzM/0islhBDiXCAB6wxIHz9M15rfEt+4BrxeWldczXtTqtjcvot313ydHjtFuTfIstpF3DbrIyytXUiVrxIAWyk2Nrbyh207ea2llc2eGF2mg3uKxkXBEHeUT+H5g+VEusv58XI3987Sxs1tv2SshWjLtmyYyixjHQcB0A0PwfAcymsWSa+UEEKIcU8C1gjqObiTrj89zrFd63mvJsjOy2fyrmqjrfNl3LvcLKyey+cuvJNldYuYHpqGrulYjsOWaAePHNzNyweO82Z3O126g9vRWFRSyhfrGrh+Vh0XV1bw/+7U+Lu3HeaWw4sfN5kdGpvBSilFvOtILkT1Bqpk7DgApruMUNU86qZfR6h6PqHqCymrmIGuy+UohBDi3CCfaKdJKUV0x3refv0x3okdYnuFh8ZFHiDJDK/i+rqrWVa7kAXVc/EYbizHYXM0ylO79/Jqc4TXW1rptC3cjsb8Li+f1qu4dnot1100hVJ/Zt6pprji9pdsnj/i8LX5Ov/PxToeY2yEK+XYdEX309G8nfZsoOpo2U4q2Q6Ap6SSUPUCps29g1D1AkLV8/EHp6Fp58Z0EEIIIcRAJGCdAkc57InsZf07v2ND4wbed/dglWmEy0Msq7+YVZMWs7T2Qsq9IdKOw8b2dn605wCvtbTyeqSVbsuiBJ0L4yV8IhLgIlXGFXPrqL+6gpIqT8FrPd/ocPdrNgB/vMHghvrRCyaOnaKzdRfRbJjqaN5ONLIDO5351qIvMIVQ1XxmLL43F6a8/tpxcwtTCCGEGCkSsIapOdbChuPvsuHoRjYdfYdOJ4nHVswzA/z1eddyyfwP0xCsJ60UG9ra+bcDTbzasoM3I63EsrOkX+wO8LmOMHMOGMxJlVC3IET1LeUEz/cXPcamx1b897cdfrTd4cP1Gr9caVDjO3tBxUrH6WjZkbu9Fzm2hVh0b3bwuUZZxfmEqhZQN+MGyqsXEKyah6ek4qy1TwghhBjLJGANIp6O807TdjYez0zyebjzKBowPaFxVdRiWfUCll65CnPyDN5ua+exDyK89u4bvNnaRsK2KTNNLg9X8mDtdOYeNqnZbKH1KALn+6m5qZzKCwO5mdb729mu+OQai51R+NGlOg/M18/ohKCpZLRv4HnLNqLN2+hq2wcoNN1FoHIWZRVzOH/hXZkwFZ6L6fafsfYIIYQQ450ErCzLsdnVtjc3a/qOyC5sZVNTUsnCtJ+PHdKY2+kQWnodO6+4jldSDt/Z38z6De+TdBwCpsnKqjAPzZvDck+Qul0ObWs7SLam8FRA9RVV1FxUjrdy8Of5KaX4xS7FV960mVYGf/6YyaLKkQtWSimSsaZsmNqanR5hG/HOIwAYZgnBqnlU1a/ggqVfoLx6AWUVMzFMD21tbVRUSA+VEEIIMRwTOmB90HU8N2v65qatdKdi+F0+Ftcs4Iszb2P27gN43tzAO+Ve3p77CX5cVsPbHV30bHmPkMvFyqpKvrdgHldUhZlfUkb7tk6aX26nY28Tx9064YVBLvjEZALTi28B9teWVNz3us3qA4r7Zuv8aLmOzzz1cKWUItZxqF+Y2k5PvAUAlydEqHo+ky+4OfNNvqr5lJWfL/NLCSGEECNgQgWsrlQ37xzfxobsbb8Puo9jaDpzKmdyx6xbWFa3kNpOm9def4HXWt/n+xX1vHPJ3aTRqNBdXOHz8b+mTeWKqjALgkF0BZ37YzQ9H2XT1qPYPQ7BGX4u+OQUwhcGMDzDCyuvHXO46xWbmAWrrzW49byTG8juOBbdbftoz+uV6mjZQbon8wxCr7+WUPV8zltwVzZMLcAXmCKDz4UQQogzZEIFrH9662HWNq5nStkkLq5bzEV1i5lROZstHQnWvL+dn27fwBbDh+WbTmUpXFlby6drqrmiKsy8QCA3DirZmuLIC800b2ynpy2Nt9LN5KvCVC8rx1sx+C3A/tKO4qHNDt99x2FlncZ/XmlQXzp46LHScWIdh+iOHsws2/cTbdlOR8tOHDsJgD/YQKh6PjOXfTHzTb6qBXj9Vaf3hxNCCCHESZlQAeuehZ/i0ws+zb6kwWstEf7brggb29dgA+FUnMusOJ+aXMe1C5cxLxgq6OGxkjYtWzto2hClc18Mw6MTXhSk+qJyAued/EOU93cqPvWKzYYWxT8u0/m7hTq6lpntPNZxMBeiYh2HiEUP0d1xMHd7DzLjpUpD5xGsmkv9rI9mw9R8XJ7ASP25hBBCCHGKJlTA+v6+Vn627wAOUKPDpdEj/EPLfq6oKGfxFTfjnbmoICgpR9GxL0bzhnYiWztw0orQBaXMvHMKlQuCGJ6Tn5PKsdP81/bDPPz2Aabrh/jO5EYCjYdYsz0TpnrnlALw+KooDTbgD06letpK/MFplIYa8Aen4fFVyS0+IYQQYoyaUAHrmlCAWSVpFm9+kYb2D/AtWkHZp76Iu/6CgnKJlh6aN0YztwDb03ir3NRfU03VshDe8hPfAkz3dGV7oQ4R68jezoseoit6kETXUTw4/C2g6SbqeD1WcCrhSRczbe5fZkJUcBr+4DSZCkEIIYQYp04rYO3Zs4e7776bSCRCKBTikUceYe7cuSdVbrh1jIQVL/yCnn3b8F98HWVXPYRZNSl3zEraRLZ00Lyhnc4DcQxv5hZgzUXllDUU3gJUyiEZa+q7jRc9SKzjMN0dB4lFD+YeEwNgukvxBxtIeafyvLqJA75p3LmggY/Om05J2SR5/p4QQghxDjqtT/fPf/7z3HfffaxatYonnniCe+65h/Xr159UueHWMRJCH7sP3R/ACGTmc1KOomNvjKa322nd1oFjKUIzS5l1Vz3BOR6SiSPEou/TvKWvFyrTI9WYG1QOUFJahz84lUDFTOqmX5fthWrAH5qG4S7n+9sU//dGhyVVGv/naoPzA3JrTwghhDiXaUopdSonNjc3M3PmTCKRCKZpopSirq6Ot956i4aGhmGV8/l8w6qj1+bNm1m6dCnLly8nGAxy6623ctttt51023ta00TfjdG+o5mexBG0UBNmXQuUNdOTPEKiq5GeeBOQ+dNououS0il4y+opyf1MoaSsHm/pFAzTO+DrfJDQ+MIGL2+0GHx1Voq/m5vCNU6fcdze3k55efloN0OMMrkOBMh1IDLkOijUfzLuU+7BamxsZNKkSZhmpgpN05g6dSqHDx8uCEdDlfP7/cOqo7+HH36YJUuWnHSbtz33Sz7Y8xLJnkZs8xgqEIPsl+5csRClrkzPU8205QW9UCWldWjaySWj3x10uGetTYkJL99kcNWk4U/fMFbJTO4C5DoQGXIdCJDrYCindYuw/7fYBusMG6rccOsYCR1NB3G0DsLTFlEx7XbKKs7DH8oMKHd7QyPyGnFL8bdvOfxsp8PHpmn8YqVBpVduCQohhBATySkHrPr6eo4cOYJlWbnbe42NjUydOnXY5Xw+37DqGCkf+ux3zujUBltbMw9pPtAFP7tc577ZukylIIQQQkxApzwiqLq6msWLF/PYY48BsHr1ahoaGopu7Q1Vbrh1jJQzFXaUUvxku83FT1uYOmz8uMnn5xgSroQQQogJ6pQHuQPs2rWLVatW0draSiAQ4NFHH2XevHkA3HjjjTz00EMsW7ZsyHJDHeuvd5D7pk2bTmkM1pnQklB89jWb5xoVD8zT+V8X63hP4yHNY1VbW5vcaxdyHQhArgORIdfB0E4rYJ1tpxuwEm2HcNJJXP5KXL5yNH14D2MezItHHO5+1cZW8MsrDG6aOk6/IjgM8g9JgFwHIkOuAwFyHZzIhJrl8ui6n9Ky9XfZLQ3TV54JWwP9+Aq3DbcvV0/KVvz9RocfbHX4i8kaj15pUOs793qthBBCCHFqJlTAqr/q69Qs/STpWGvxT3eEeNMu0vFWrHiU3jmweumuElz+SixPJVvi5bjtSh6fHOZDlZWY+8N05IUxsyR02r1jQgghhBi/JlTA8gRq8QRqT1hOORbpeHtRCNvUGGHD4VbqtFZuLt2Nu/ktDh+IoKyewgo0HZevHNNXibu0EjPbG+b2V2IO0FtmuErO0G8shBBCiNEwoQLWcGm6ibu0CndpFQDtPYovvW7zeFRxz0Uaf7fcwO/K3BJUSuGkYqRjbaTjmZ6wdLw1sx1rJR2LkO5uId70/uC9Y24/Ln9F0W3JoluWpdnesZOc9FQIIYQQZ5cErBN4/bjDp16x6UzB49cY3DG9MNxomobhKcXwlOKtOPH8XYP1juV+4q3EmnZixdpIxQbrHavI3Ir0lWO4/eiuEgyXF91Vgp5dGq6S3LaRt7/vWP62F00eOi2EEEKMGPlUHYTlKP7xHYd/eMfhQzUaj11lMLX09Aey9+8dG0pB71gskgtgfdttOOkk6ViEnnQSJ53AsZLY6QROKoGTTqKc9PDaZbj6BbK+8KWbJVhKp600lNvOBTl3tqyZF+TcvsLtbDkJcUIIISYK+cQbwMEuxadesflzs+I7S3S+uUjH0M/+twRPtndsII6dxrF6MuErlciELys58HY6iZ2K46STOFYCO51XLhklkWztK5fOlksnUPYwQ5zuyus58w7Yy6YZbnTDhWa4M6HPcKFlfwrX3QXLAcvqroHLmJnXQJPJYIUQQpwZErD6+c0+h8+/bhNyw9qbDS6rGd/jnfRsqMBTelr1DDXfiXKsbOhK5AWwRDaAxfttJwrK9W0nsRIdOHYalf3pW09l11N5+1On9ftkaP2CmXuIQJdd113opnvg8nrxeZpuoGkGaFrmm6WanhlDlw13Bfv0bODTjOx52XVNB733PD1zjOJ9vfX01dV7LLvUjIL13jbJmD4hxJk2+JSbg+w/yfID1z9Y3QPv1nR9RO+0SMDK6k4rvvymzSO7FX91vsa/fcgg5JHejeHQdDPX03a2KKVA2QMEslS/cJbGGWJf8f7supN3npVX1kljJTuHVZ9jp8BxUMoG5Zy1v80p6Re6cmFNz+7TMvuUUhzQdcj1/GWXmoaWt15Yd19Zrf95uXNyJQasO3+7oNdx0GPaoO0YtJ15b9Cq/zvwUPMxFxxT/Q4NfmyoOod/3sDHCtpfUGTgcwd9vUHaaNsWB3Wj3+ucuI5htWvYf8/i40P+dzql84c+XvRqp9i+outtiLInGxoGOnCmw8541XD9t6i7+DMjVp8ELGBji8Oda2w+iMMjVxh85gINuXU0tmV6d0wM3YRxMs2FUg4oB5UXupST3afsvPXecnbBeu6Y46AYYJ9yIFtPbl0p6FdPbzgd/HVUv/Zk9zk2iUScEq838/v0/WJ9W7k34uwHff523wnZ/z/AOf0/wPrVl/9axcfy6u8fNAaoR9EX83IK/t0PFhTzQtqwzht0o9/7zPDrHPL9qSjEMkjb848P3MbiQJypK5FIUFJSMuBrFFY1yO/QP+wOWZYCQ//tBzupf5nic074nj+M1ymuY4BzTuazZZCyRX+DE9Y9/HYMXnfxrlgsjt/vP4m2DPKaJ9PuQeserIbh1+Gvmz/seodjQgcsRyl+sNXh7zc4LKrUeO4GgwuCEqzEmdF3y260W3Lq5NEYAuQ6EBlyHQxtwgasY3HFZ161efmo4sGFOg8t1XEbEq6EEEIIcfomZMD6/SGHz621cenw0o0G10wex10KQgghhBhzJlTASliKb/zZ4afvOdw8VeM/rjAIe6XXSgghhBAja8IELKUU33jkEO0tMf5raYjbLwqiewYZnCeEEEIIcRomTMDSNI0vx/ZS9f778D50/ApwmxjhAHp1AL0qgFEdRM9uG1UBNI9rtJsthBBCiHFowgQsgFlfuRaVXIkd6cJp7sRp6cBu6cRp6cTacYSeV3dA2s6V14I+9HBZJnhVZUKXXhVArw6il/szkzYKIYQQQvQzoQIWgOZ1Y06phCmVRceUUqiOOHZzJnTlAlhzJ+n3j6LaY32FDR09XJYNXsFcr5deFUCvCqL7PWfxtxJCCCHEWDLhAtZQNE1DC/nRQ36YWVd0XKUsnNaubADrwGnpxG7pxNp3HHv9Lkj2PZNP83uyYWuAAFZZhmYaZ/NXE0IIIcRZJAHrJGhuE6OuHKOuvOiYUgrVncyFLqe5L4ClDu7Fae0CJzuLtKahV5YWBrCqAEZ1pvdLK/PKTPJCCCHEOCYBa4RomoZWVoJeVoI5vabouLJsnLbu4gDW2Ep68wFUd7KvsMeV6e3qHXyfve1oVAfQwwE0t/xnE0IIIcYy+aQ+SzTTwKgOYlQHGei7iU68JzvuqxO7uQMn0oXT3EH63UP0RDrB6ntYsBby5fV6BTOD7msCGNUh6f0SQgghxgAJWGOE7vOgT6uCaVVFx5Tj4LTH+gJYbw9Ycwfp7YdRnYm+wl5XJnTVBHPhy6jJhrDyUjRdwpcQQghxpknAGgc0XceoLMOoLIPZk4uOq0Qq0+vV3JEZgN8UzYz9+vOezNiv7NAvTL3vVmO2Ny0XxMIBNJcMvBdCCCFGggSsc4BW4sYcrPcrbeNEOvsCWFNvz1cjPS3b+249aqBXlmWCV1UAvSZUEMSEEEIIMXwSsM5xmssY/JuPvbce84KX3dyBdbAF+897CqadoNRLZ22or+dLxn0JIYQQg5KANYHl33p0zZlScEwphepK5kJX7OBx9K6UjPsSQgghhkEClhiQpmlogRL0QAnmjFpis6sorajIHVeJVG6wfX7vV2bcVzeo7MCv/uO+ssFLxn0JIYQ4l0nAEqdEK3FjTg3D1HDRMWVlx3015Q287x339Ur+uK/shKuDjPvSStxn+bcSQgghRoYELDHiNNPAqC3HqB1o3JfCae8u6PlymjuxDrbgvL0XlUj11VNWknnEUL/bjkZ1EC1QIuO+hBBCjFkSsMRZpena0OO+upNFtx2d5k7S7x1BdcT7Cnt7Z7sfYNxXRSmaoZ/l30wIIYToIwFLjBkFjxs6v7bouOpJ5wKX3RTNLJs7SG3cV/isR0NHD5f1fdsxvxesKoDmGWgufSGEEGLkSMAS44bmcWHWh6F+kHFfrd15vV7ZKSd2HcVetxNSVl89IX82cOUFr+y0E5pfppwQQghx+iRgiXOCZhoYNZnbhP37p5RSqI548Wz3H7ST3nKw4EHbms9deNsxG8RkygkhhBAnQwKWOOdpmoYW8qOH/DBzUtFxJ96TG2yfP+O9tfc4Tnt336OGXAZ6OJA32D4gU04IIYQYkAQsMeHpPg96QzU0VBcdUykLJ9JVcNvRae4gve0QPS2dhY8aqigruu2ohXzopSVopV60Ug+aLoPvhRBiIpCAJcQQNLeJMakcY9LwHjU02JQTmcpA83kyA/lLvZnQVVaCXuZFK83u610vy677JJQJIcR4JAFLiFN0wkcNxZKozgROVxLVlcDpTqK6C9ftD9qwupOoriQq3jPAi5AZeF/mzfSElWWCWe96Jqhl17PhLBPKZKyYEEKMJglYQpwBmqahlZZAaQnDHZmlbCcTyrqSOF2JzJxg2UCmuvv22UfbsLqSqO4EKp4qrkjTMrcjSwfqHfOilxUHNa1EQpkQQoyk0wpYe/bs4e677yYSiRAKhXjkkUeYO3fuSZV74IEHeOaZZzh06BDbtm1j/vz5p9MkIcYtzdDRAj4I+IYfyiwbFevJha/83jGnK5HpGetOYh1pzQW1oluXALpW1FNWfBvTC+kerA4LzWOCx4XmMdHcLnAZMr2FEELkOa2A9fnPf5777ruPVatW8cQTT3DPPfewfv36kyp3++238+CDD3L55ZefTlOEmJA000AL+tCDvmGfoyx7gN6xZEEvmepKYB2O5PaRTOfO7xywIYC7N3CZmclcPa7sejaEeTL7i7Zz5fPOdeeV9bjAZUoPmxBiXNGUUurExYo1Nzczc+ZMIpEIpmmilKKuro633nqLhoaGky7X0NDAs88+O2QP1ubNm1m6dCnLly8nGAxy6623ctttt51K88VJam9vp7y8eKC3mCAsG2I9dDS3EizxZyZuTdmZZdqCnrz1/GNF6xak+x2zneG1wWWA2wR3dukywZPddpmFx9wDbLuMbHmzr7xLB9MAM7vUNZCeuBOS9wMBch30V1FRUbB9yj1YjY2NTJo0CdPMVKFpGlOnTuXw4cMFwWm45U7Gww8/zJIlS0616eIU9b94xAQU9I34daAsG5WyoCeNSlmoHguVSkOPhepJZ7Z70pDKbmeX9Fh96ykL1ZksPrc30A2XBphGZk4z00Azjcztz/7b+cv88sMsh0vPq9sctB5MfczeepX3AwFyHQxl0IC1YsUKdu7cOeCxd955B6DoH/5gnWHDLSeEmHi03kDi85yR+pXjFISxvrBmodKZAKYse/ClZaPS/ZbZ405P/vnWAOc7md6/4fbSDSTbuzbskGfqYOi5/Zl1HYy+Y33l+paYeuYh6eaJy9GVwHEn+/YZutzCFaKfQQPWunXrhjzR4/Fw5MgRLMvK3fprbGxk6tSpBeXq6+uHVU4IIc4ETdehxI1W4h61NihHDRjQipZDHR8i6OWWyRTKdsByMvt61+3MeqYep3D9FP8Hb7T/DkMvCHPFga13X+GxgnK9+3oDotF7TC/YN2BoLAiQ2fMKjuuF9feu69qY7SUU49sp3yKsrq5m8eLFPPbYY6xatYrVq1fT0NBQdNtvuOWEEOJcpelaZuC+e+zNjKMcpy902XZxOMsGMWU7YGfCXHe0k9ISX3E5e4Bg13turp7CJck0jjVIOdvJ9AIWrDun1yM4kPyAVxDQ+oe6AQJefgAsCIi9+4zC4/lhLz9sGv3K9O7XB3i9gcrKhMRjzikPcgfYtWsXq1atorW1lUAgwKOPPsq8efMAuPHGG3nooYdYtmzZkOXuv/9+nn76aY4fP044HKa0tJS9e/cO+Hq9g9w3bdokY7DOsra2NrnXLuQ6EMDoXwdKqVzQyoUuy86GwLyQN0RAywVCu+82rurX49cXLIdRf0HZsxwQITN+0OgfwrRMwBssvPULewVhUR+qbOYn0ZPEV1aaV17LHNPzw3Cv+KYAAAjwSURBVKHWV5euFZXND5Ho2tDnjbOextMKWGebBKzRM9pvqGJskOtAgFwHp0spBU5eSOwf0Ox+Yc1xigOlbReXtwrPzYQ8lbc+eNm+9by2OKrvdQYqb9mZW8y2A2cjSej5AU47cTArOpYdKzhIWc/ymbjm1Y9Yc8def7UQQghxDtM0ra+3h0zn03iUH7SVo6A3CDoqG9CyQaw3TDoO2KovpDkOylZ55TLHC86zess5faG033bReUXHBihr26iewnOd7uSI/n0kYAkhhBDitGi6Bnr2m6uM39A4kmRUnBBCCCHECJOAJYQQQggxwiRgCSGEEEKMMAlYQgghhBAjTAKWGJbVq1ePdhPEGCDXgQC5DkSGXAdDk4AlhuXJJ58c7SaIMUCuAwFyHYgMuQ6GNq6maUgkEgCDPoRanDkdHR1s3rx5tJshRplcBwLkOhAZch0Umz17Nj6fDxhnM7n/6le/4q677hrtZgghhBBCFMl/0sy4CliRSIQXXniBhoYGSkpKRrs5QgghhBA547YHSwghhBBiPJBB7kIIIYQQI0wClhBCCCHECJOAJYQQQggxwiRgCSGEEEKMMAlYYkgNDQ3Mnj2bRYsWsWjRIn7zm9+MdpPEWfDAAw/Q0NCApmls37694NiePXu47LLLmDlzJhdffDHvvffeKLVSnA1DXQvy/jAxJJNJPvaxjzFz5kwWLVrEDTfcwMGDB3PH5T1hEEqIIUybNk1t27ZttJshzrLXXntNNTY2Dvjf/6qrrlK//OUvlVJK/fa3v1WXXnrpKLRQnC1DXQvy/jAxJBIJ9dxzzynHcZRSSv3kJz9R1113Xe64vCcMTHqwhBBFVq5cyZQpU4r2Nzc3s3nz5tyEv7fddhsHDhwo+F+z4twy2LUgJg6v18uNN96IpmkAXHrppezfvx+Q94ShSMASJ/SpT32KBQsWcO+999LS0jLazRGjqLGxkUmTJmGamadsaZrG1KlTOXz48Ci3TIwWeX+YeP7lX/6Fm2++GZD3hKFIwBJDWrt2Le+++y6bN2+msrKSu+++e7SbJEZZ7/+K7aVkruIJS94fJp7vfe977Nmzh+9+97u5ffKeMLBx9bBncfZNnToVAJfLxVe/+lVmzpw5yi0So6m+vp4jR45gWRamaaKUorGxMXediIlF3h8mlh/84Ac8+eST/OlPf8o9DkbeEwYnPVhiULFYjGg0mtv+9a9/zeLFi0exRWK0VVdXs3jxYh577DEAVq9eTUNDAw0NDaPbMHHWyfvDxPLDH/6QX//617z00kuEQqHcfnlPGJw8i1AMav/+/dx2223Yto1SiunTp/PjH/9Y/uFMAPfffz9PP/00x48fJxwOU1payt69ewHYtWsXq1atorW1lUAgwKOPPsq8efNGucXiTBnsWpD3h4njyJEj1NfXM336dMrKygDweDz8+c9/BuQ9YTASsIQQQgghRpjcIhRCCCGEGGESsIQQQgghRpgELCGEEEKIESYBSwghhBBihEnAEkIIIYQYYRKwhBBCCCFGmAQsIYQQQogRJgFLCCFO0ne+8x1KS0tHuxlCiDFMApYQQgghxAiTgCWEEEIIMcIkYAkhxoX169dz9dVX4/f7CQaD3HnnnTQ3NwNw8OBBNE3j0Ucf5Z577iEYDFJRUcHXv/51LMsqqGf79u3ccMMNlJaWEggE+OhHP5p7zmIvx3H44Q9/yJw5c/B4PNTW1nLHHXfQ0dFRUG7r1q1cfvnl+Hw+5s+fzwsvvHBm/whCiHFDApYQYsxbv349V155JcFgkN/85jf8/Oc/Z8OGDdxyyy0F5b75zW/iOA6PP/443/jGN/jJT37Ct771rdzxxsZGVqxYQVNTE48++ii/+MUv2L17NytWrKClpSVX7stf/jIPPvggH/nIR/j973/PT3/6U8rKyuju7s6VSafT3HXXXaxatYqnnnqKcDjMbbfdRmtr65n/gwghxj4lhBBj3MqVK9Vll12mHMfJ7du+fbvSNE0999xz6sCBAwpQK1asKDjvW9/6lvL5fKqtrU0ppdTXvvY15fP5VHNzc67MwYMHlcvlUt/+9reVUkrt2rVLaZqmvve97w3anm9/+9sKUM8991xu3549exSg/vM//3MkfmUhxDgnPVhCiDEtHo/zxhtvcMcdd2DbNpZlYVkWs2bNoq6ujg0bNuTKfvzjHy8499ZbbyUej7Nt2zYA1q1bx9VXX01VVVWuzLRp07jssstYt24dAGvWrEEpxT333DNku3Rd59prr81tz5gxA7fbzZEjR077dxZCjH8SsIQQY1p7ezu2bfO1r30Nl8tV8PPBBx/Q2NiYK1tdXV1wbu/2sWPHcnXV1tYWvUZtbS1tbW0AtLa2YppmUV39lZSU4Ha7C/a5XC6SyeTJ/5JCiHOOOdoNEEKIoYRCITRN45vf/CYf+9jHio6Hw+Hceu+g9/7bdXV1AFRUVNDU1FRUx/Hjx6moqACgsrISy7Jobm4+YcgSQojBSA+WEGJM8/v9LF++nJ07d7Js2bKin4aGhlzZp556quDcJ598Ep/Px4IFCwC4/PLLefnllwsGojc2NvLmm2+yYsUKAK6++mo0TeOXv/zlmf/lhBDnLOnBEkKMed///ve5+uqr+cQnPvH/t3OHKotFURiG39FsOBrVcEC8CIvIAa9A4RTDaaJFwaagxWAQi83gBZhNFhGzdyAYzEaDIE4TJo+M88P7XMBi7ZU+9l5s4jgmCAKu1yu73Y4kSd4h63w+kyQJcRxzOp2YzWb0ej2CIACg3++zXq+p1+sMh0Oezyfj8ZhsNku32wWgXC7TbrcZjUbcbjeiKOJ+v7PdbplMJuTz+W+NQdIPYsCS9N+rVCocj0fG4zFJkvB4PCgUCkRRRKlUev91NZ1O2e/3NJtN0uk0nU6H6XT6rlMsFjkcDgwGA1qtFqlUilqtxnw+/2PxfblcEoYhq9WKxWJBLpejWq2SyWT++dkl/Uy/Xq/X69tNSNLfuFwuhGHIZrOh0Wh8ux1JcgdLkiTp0wxYkiRJH+YToSRJ0od5gyVJkvRhBixJkqQP+w1A08aWqMuWugAAAABJRU5ErkJggg==" - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "n_epochs = length(losses)\n", - "n_parameters = div(length(parameter_means), n_epochs)\n", - "parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)'\n", - "plot(epochs, parameter_means2,\n", - " title=\"Flux parameter mean weights\",\n", - " xlab = \"epoch\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Note.** The the higher the number, the deeper the chain parameter." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "savefig(joinpath(DIR, \"weights.png\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Retrieving a snapshot for a prediction:" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3-element CategoricalArrays.CategoricalArray{Int64,1,UInt32}:\n", - " 7\n", - " 9\n", - " 5" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "mach2 = machine(joinpath(DIR, \"mnist3.jlso\"))\n", - "predict_mode(mach2, images[501:503])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Restarting training" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Mutating `iterated_clf.controls` or `clf.epochs` (which is otherwise\n", - "ignored) will allow you to restart training from where it left off." - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Updating Machine{ProbabilisticIteratedModel{ImageClassifier{MyConvBuilder,…}},…} @194.\n", - "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/AkJde/src/machines.jl:355\n", - "┌ Info: loss: 0.44491807\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.45756835\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: Saving \"/Users/anthony/Dropbox/Julia7/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", - "└ @ MLJSerialization /Users/anthony/.julia/packages/MLJSerialization/NEVjq/src/controls.jl:39\n", - "┌ Info: loss: 0.46934748\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.48012182\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: loss: 0.49021214\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/controls.jl:281\n", - "┌ Info: final loss: 0.49021214\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/train.jl:29\n", - "┌ Info: final training loss: 0.010609606\n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/train.jl:31\n", - "┌ Info: Stop triggered by Patience(4) stopping criterion. \n", - "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/B3jW2/src/stopping_controls.jl:77\n", - "┌ Info: Total of 32 iterations. \n", - "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/Twn0E/src/core.jl:35\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlgAAAFyCAYAAAApuaQRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdd1hUV+I+8PfcKQwMTZogiogNFRCFiKAx0VgiJtGIEjeajWVX84umra4mbpJN3KymaKKuJtH9bhJb3FjS1Rg1GguW2EFFbFjWglQFhjIz9/cHgpQZLGHuMPB+nodHZu6dmderCa/3nHuukGVZBhERERHVGcneAYiIiIgaGocuWIWFhTh48CAKCwvtHYWIiIiogkMXrNTUVERFRSE1NdXeUeqFvLw8e0dosHhsbYPH1TZ4XG2Dx9U2GupxdeiCRVWZTCZ7R2iweGxtg8fVNnhcbYPH1TYa6nFlwSIiIiKqYyxYRERERHWMBYuIiIiojqntHYCIiMhRXbhwAZmZmfaO4dDy8vLg4eFh7xj3xMfHB0FBQbXuw4JFRER0Hy5cuIAOHTpwqaBGyMXFBSdOnKi1ZLFgERER3YfMzEwUFhZi+fLl6NChg73jkEJOnDiBUaNGITMzkwWrNuaiQkg6F3vHICIiB9WhQwd07drV3jGonmnUk9wL9m3GtXfHo+RCmr2jEBERUQPSqAuWU7vOkNy9kTF/Mgr2/mzvOERERNRANOqCdUnlg6/7vgd99CPIWfkhctYshGwstXcsIiIicnCNumB9d96M5/eq8H8dXoBn4oso2L0B1z9+FaYb2faORkRERA6sUResl8JUeL2LhKn7zPiP+wD4TnofxszLuDbnBRSnn7B3PCIiInJQjbpgAcCMKAl/jZDw8m4zPjO0R9PJC6D2aorr/5qKgt0b7B2PiIiIHFCjL1hCCLzXTcJLYRKe32XGF1eawHfie9B3H4Ccr+YhZ9V8yMYSe8ckIiJSRG5uLt5///37fv2ZM2fQtWtXdOnSBZ9//nkdJrt/6enp8PHxUfQzG33BAspK1kfdJfy/DhL+vMOEZWdVaDJ8EpqMeBkFezfh+oJpMOVl2TsmERGRzf3egrVmzRrExsbi0KFDGDNmTB0mcywsWLcIIbCgh4Rx7QXGbDdh5Wkz9N0fhd+Ls2HKuY5rcyah+Owxe8ckIiK6Jz/99BO6du2KiIgIPPTQQzh+/Di2bduG6Ojoin1SUlIQHBwMAHjuueeQm5uLyMjIKvtUlp+fj7FjxyIsLAxhYWF4++23AQBLly7FRx99hNWrVyMyMhLHjx+v8ro9e/YgKioKkZGRCAsLwyeffAIA+PLLLxETE4MuXbogMjIS69evr3hNcHAw3nzzTcTFxSEoKAjLly/HvHnz0K1bN7Ru3Rrbtm0DcPss1ZQpUxATE4NOnTrhl19+sZj/t99+Q58+fRAdHY2uXbti7dq193Vsa9PoV3KvTBICix5UodRswjPbTNBIwLCQ9vCb8i9kff5PXF84DZ5PPgd9j0EQQtg7LhEROYArhTKu2OB2hQEuQIBL7T+LMjIyMGrUKGzduhXh4eFYsWIFEhMTsWDBAquv+fTTTxEdHY3Dhw9b3ecf//gHSkpKcPToURgMBvTs2RMdO3bEH//4R5w9exb5+fmYPXt2jdfNmjULkydPxtNPPw0AyMnJAQAMGDAAf/jDHyCEQHp6OuLi4nD+/HloNBoAgMFgQFJSEn777Tc89NBDmD17Nvbt24dVq1Zh+vTpSEpKAgBkZWUhPDwcs2fPxp49ezBkyBCcOXOmSobc3FxMmDAB69atQ0BAADIzMxEVFYUePXrA39+/1uN5L1iwqpGEwH96qVBiNuEPv5SVrMHBTeA78V3kfbsYuWsWoORiGpoMmwSh0do7LhER1XOLTpjx9kFznb/v37tKeCtKVes+e/fuRWRkJMLDwwEAI0eOxMSJE3HlypXf9dmbN2/GvHnzIEkS9Ho9/vjHP2Lz5s0YPnx4ra/r3bs33nnnHZw+fRp9+vRBz549AQDnzp3DyJEjcenSJajVamRmZuL8+fNo06YNAOCpp54CAHTt2hUGgwGJiYkAgKioKJw9e7bi/bVaLZ555hkAQPfu3eHv748jR46gWbNmFfskJSXh7NmzGDhwYMVzsizj5MmTLFi2ppIElj5cVrKGbzHh235AfJAangnPQ9OiLXJWzUfplXR4j30Dak9fe8clIqJ6bEIHCU+0rPsZOQF3cRtdWZYtjri0aNECJpOp4nFRUZHV9zh+/HjFGacePXpg4cKFFt/X0ucMGzYMp0+fBgBs2bIFL7/8Mp544gls2bIF06dPR1hYGD7++GOMGDECs2fPxpAhQwAAXl5eVTLpdDoAgEqlqvHYaDTWegyq55JlGREREdi+fXutr/u9WLCsUEsCK/uoMHyzCUM3m/BDf6Bfcwn6bv2gCQhG1mf/QMbsF+A95m9wah1u77hERFRPBbiIuypDthAbG4tx48bhxIkT6NChA/773/+iefPmaNWqFc6dO4esrCx4e3tj2bJlFa9xd3dHYWEhjEYj1Go1OnbsWGO4sF+/fvj3v/+NuLg4FBYWYvny5XjttddqfP6aNWuqPD558iTat2+PkJAQtGjRAtOnTwdQNlRYPgds+fLlFUOH96qkpAQrVqzAM888g3379uHq1auIiIjA9evXK/aJi4vDqVOn8Msvv6BPnz4AgMOHD6Njx47QautuZIoFqxYaSeCrR1QYusmEJ342Yf2jQO9mErQt2sJv8nxkL5mF6wtfheeQ8dA/+ATnZRERUb3i6+uLZcuWYeTIkTCZTPD09MSqVasQGBiIKVOmIDo6GsHBwejVq1fFa7y8vDBy5EiEh4dDr9dj//79Nd73jTfewAsvvFAx9Dh8+HAMGzbsjnn+9a9/YevWrdBqtVCpVJgzZw4AYN68eXjyyScRGBiI2NhYBAUF3dfv19vbG6dPn0ZMTAzy8/Px5ZdfQq/XVylYTZo0wQ8//IC//vWveOWVV1BaWoqgoCB8++239/WZ1ghZluU6fUcFHTx4EFFRUThw4AC6du1qs88pMsp44mcTdl2T8dOjKjwYUHaqVzaZkPf9/yH/12/g8kBfNBn+AoTWyWY57iQ7OxteXl52+/yGjMfWNnhcbYPH1TaqH1elfgbR3UlPT0d0dDQyMzNt+jl3++fOZRrugk4t8G1/FWJ8BeI3mrDnWtlkRaFSwfPJCfB6ZhoMh3cgY/5kGLOv2TktERER2RsL1l1yUQt8P0CFSC+BR38yYf/121eEuET1hu9LH8JceBMZc15EUZr1S1uJiIio7gUHB9v87NW9YMG6B64agfWPqtDBU6D/BhMOZ90eXdU2bw2/yf+CJjAEmZ9Mx82ta+HAo69ERET0O7Bg3SM3rcCGR1Vo7SbQd50RKdm3S5RK7w6fCe/AtXcC8r77N7KXvQdzifVLX4mIiKhhYsG6D55OAhsHqtDCFXhkvREncm6XLKFSwfOJcfB6djqKUnbj+ty/wJh11Y5piYiISGksWPfJSyewKV4NP11ZyTqVV3U40KVLL/i9PBfmYgMy5ryAopMH7ZSUiIiIlMaC9Tv46AQ2x6vhoQX6rDPi7I2qJUvTrBWaTp4PTVB7ZH76Om5uWc15WURERI0AC9bv1NRF4JdBajirykrW+ZtVC5Tk4gaf8W/DrW8i8n74D7KXzIS52GCntERE1Ni89dZbKCkpuefX7d+/HyNHjryrfSMjI2Ew8GdbZSxYdSDgVslSibKSdSm/askSkgoeg0bDe8zrKDqxHxlzX4Hx+mU7pSUiosbk7bfftliw7nQPv+joaKxYseKuPuPw4cNwdna+r3wNFW+VU0eau5aVrF4/GtFnnRG/Pq5GgEvVW+c4d+4Jv6YtkPWfGbj24YvwemYanDs+YKfERESkBFNeFkw3suv8fVXuXlB5eNe6z3PPPQeg7P57kiShWbNmaNOmDdLS0nDx4kUcO3YMo0aNQmpqKkpKShAUFITPPvsMfn5+2LZtG6ZMmYL9+/dXrJL+/PPPY926dcjLy8P8+fMRHx8PoOyGyjdv3oSrqyuCg4MxZswYbNy4EVeuXMG4cePw+uuvAyi7cfSYMWNQUFCAiIgInD17Fq+//joee+yxOj8+9saCVYdauglsvVWyHllnxLbH1PBzrlqyNP4t4ffKPGSv+ABZ/34T7gP/CLd+I3gfQyKiBio/aT1ubry7M0H3wm3ASHgMfKbWfT799FMsWrQISUlJcHV1xejRo7Fz505s374drq6uAIC5c+fCx8cHAPDuu+9ixowZWLBgQY33ysrKQlRUFGbMmIGffvoJL730UkXBqi43NxdJSUm4fv062rRpgzFjxiAwMBDPPPMMXnnlFYwaNQoHDhxAt27dfudRqL9YsOpYiPutkvWDEX3XG/HLIDV8dFXLk+TiCu9xf8eNjStwY/0SlFw6Ba+np0DS2el260REZDOucfFwDute5++rcr+/+00mJiZWlCsAWLFiBZYtW4bi4mIYDAb4+/tbfJ1er8fgwYMBALGxsThz5ozVzyifu+Xr64uQkBCcO3cObm5uSElJwdNPPw0AiIqKQkRExH39HhwBC5YNtPUoGy586Ecj+q03Yku8Gl7VSpaQJHgMfAbaFm2QvewDXP/4Vfi9Mo9nsoiIGhiVh/cdh/KUVLlc7dy5EwsWLEBSUhJ8fX3x/fffY8aMGRZfp9PpKr5XqVQwmUxWP6P6vkajEbIsQwjRaH7OcZK7jXRoIrBlkBoX84EBG0zIK7G8PINzWCy8x76O0gtpKD59VOGURETU0Lm5uSEvL8/itpycHLi7u8PLywslJSVYtGiRzXJ4eHigY8eOWLlyJQDg0KFDSE5Ottnn2RsLlg2FewlsHqTGmZsyHt1gwk0rJcupXReo/VqgIGm9wgmJiKihmzx5Mvr06YPIyEhkZGRU2TZw4EC0adMGoaGhGDBgACIjI22aZenSpfjoo48QFRWFhQsXonPnzvDw8LDpZ9qLkB145cuDBw8iKioKBw4cQNeuXe0dx6r91814ZJ0JEV4CPw1UQa+peXr05rZvkPfDfxDw1jKo3Jrc1+dkZ2fDy+v+xuSpdjy2tsHjahs8rrZR/bg6ys+g+qSgoAAuLi4QQuD48eN4+OGHcfLkSTRpcn8/9+zhbv/ceQZLAdG+EjYOVOFwtozHN5pQaKzZafUP9AUkCQV7f7ZDQiIiItvbtWsXIiMjERERgREjRuDf//63Q5Wre6FIwSoqKsKQIUPQrl07REZG4tFHH0V6errFfU+dOoW4uDi0a9cO3bp1w/Hjx5WIaHPdm0pYP0CFvddlPPmzCUXVSpakd4NLZC8U7N4A2Wy2U0oiIiLb6d+/P44cOYKjR4/i6NGjFVclNkSKncEaP348Tp48icOHD+Oxxx7D+PHjLe43YcIEjB8/HmlpaZg6dSrGjRunVESbezBAwo8DVNh+VUbCZhOKTVVLlj4uHqasqyjmjaGJiIgcmiLLNOh0uiqLkXXv3h1z586tsV9GRgYOHjyIn38uGyZLSEjApEmTkJ6ejuDgYKvvP2nSJHh4eGDo0KFISEio8/x1qbMOWB6rwsgkZzy5oQSfdy+C5lbNld2bQjQNQs62b+HUNOSe3zsnJ6eO01I5Hlvb4HG1DR5X26h+XK1dmUeNQ15eHrKzb6/QX33eo13WwZo/fz4ef/zxGs9fvHgRzZo1g1pdFksIgaCgIFy4cKHWgrVgwQKHmmCY4AU4680YsknghcNafNlHBbVUNvE9/8HHkfv1J/CQzFB5+tzze3Niq+3w2NoGj6tt8LjaRuXj2lCvfqO74+HhUet/Z4pPcp85cyZOnTqFf/7znxa3V1+AzIEvcqxVfJCE1Y+o8E26jGe3mWAyl/0+XaL7QKi1KNiz0c4JiYiI6H4pWrBmz56Nr7/+Ghs2bICLS83bwrRo0QKXLl2quMO3LMu4ePEigoKClIypmMHBElb2UeGrszIm7iqb2C7p9HCJ6o2CPRsg17JKLhEREdVfihWsDz/8ECtXrsSmTZvg6elpcR8/Pz906dIFy5cvBwCsXbsWwcHBtQ4POrphIRI+iJGwONWMK4VlZ7H0cfEw5Wai6MRvdk5HRERE90OROViXLl3C5MmTERISgt69ewMAnJycsHfvXgBAfHw8ZsyYgejoaCxatAijR4/GzJkz4e7ujiVLligR0a5Gt5MwbZ8Za8+ZMamTCtoWbaFp0RYFSetscoNQIiKqOydOnLB3BFLQ3f55K1KwmjdvXutcqvXrb98ipn379ti9e7cSseqNJk4C/QIFVp+VMalT2XOuPQYh56t5MGZdhdrb8p3NiYjIfnx8fODi4oJRo0bZOwopzMXFBT4+tV+IZperCKmm4SESxv5qwuUCGc30As5dHkbut4tRsOcneAwabe94RERUTVBQEE6cOIHMzEx7R3FoeXl5DndFpo+Pzx3nh7Ng1RODWwqoJWDtOTNeCFNBctLBJfoRFOzZCPdHR0Go+EdFRFTfBAUFNdgLsZTSUO+dyXsR1hNNnAT6BwqsOnt7KNU1Lh7mmzkwJDeuIVMiIiJHx4JVjySGSNh5Tcb/CspKlqZZK2hbdURB0vo7vJKIiIjqExaseuSJlgLaW8OE5fRx8ShOO4TS6/+zYzIiIiK6FyxY9Yink0D/5lWHCV06PwjJxY1nsYiIiBwIC1Y9kxgiYdc1GZfyy0qW0DrB5YG+KNy3CbKxxM7piIiI6G6wYNUz5cOEa6oNE5oLbsBwZJcdkxEREdHdYsGqZzy0Ao+2qDpMqGnaAk5tOyN/1zo7JiMiIqK7xYJVDw1vJWF3hoyL+bdLlj42HiVnU1B69bwdkxEREdHdYMGqh55oKeCkqjpM6BwRB8nVg5PdiYiIHAALVj3krhV4tNrVhEKtgT5mAAr2bYa5pMiO6YiIiOhOWLDqqcQQCXsyZFyoMkw4EHJRAQyHttsxGREREd0JC1Y99XjQrWHCs7eHCdU+AXAKjeIwIRERUT3HglVPuWkFBjYXWHVOrvK8a9wglJxPRcmlM3ZKRkRERHfCglWPJYZI2JshI/3m7ZKl6xQDycObZ7GIiIjqMRaseuyxIAFdtasJhUoFffdHUXjgF5iLCu2YjoiIiKxhwarH3LQC8dUWHQUAffdHIZcUo/DAVjslIyIiotqwYNVzw0Mk/Ha96jChuokvdB0fQMHu9ZBluZZXExERkT2wYNVz5cOEqytdTQgA+h6DUHrpDEovpNkpGREREVnDglXPuWoEBgXVHCbUhUZB1cSP9yckIiKqh1iwHEBiKwn7M2WcvVFpZXdJBX3sQBgO/QpzYb4d0xEREVF1LFgOYFCQgLOlYcKY/pBNRhTu32KnZERERGQJC5YD0N8aJlxdbdFRlYc3nMNjkZ+0jpPdiYiI6hEWLAeRGCLhQKaMMzeqLdkQFw/j1QsoOXfMTsmIiIioOhYsBxHfQsBFXXOY0KltJNQ+zVDAye5ERET1BguWg9BrBB4LElhVrWAJSYI+Lh6Fh3dCLrxhp3RERERUGQuWA0kMkXAoCzidV3WY0KVbPwCA8cgOe8QiIiKialiwHMjA8mHCc1XPYqlcPeAc2RPGA1sgm81WXk1ERERKYcFyIC5qgcctDBMCgGvcIMjZV1F8+ogdkhEREVFlLFgOJjFEwuEsIC236jChNqQThE8gCpLW2ykZERERlWPBcjADWwjoLQwTCiGgjnoEhqNJMN3ItlM6IiIiAliwHI6zWuDxlqLGcg0AoI7oCaFSo2Dvz3ZIRkREROVYsBxQYisJR7KBk9WGCYWzK5y79ELB7g2QzSY7pSMiIiIWLAf0aAsBV03NRUcBQB83CKbsayhKPWiHZERERASwYDkkZ7XAE0ECq87VLFjalu2hCWyNgiSu7E5ERGQvLFgOKjFEQnI2kFp9mFAI6OPiUXRsH4y51+2UjoiIqHFjwXJQA5oLuFkZJnSJ6g2hdULB7p/skIyIiIhYsByUTi3wREvLi45KOhe4RPVGwZ6fIJs42Z2IiEhpLFgOLDFEQkoOcDxHrrFNHxcPc14Wio7vs0MyIiKixo0Fy4H1DxRwtzJMqG3eBpqg9sjfxcnuRERESmPBcmDlw4TVV3Uv59pjEIpPHoAx66rCyYiIiBo3FiwHlxgi4VgOcCy75jChc5deEE4uKNi9wQ7JiIiIGi8WLAfXv/mtYUILZ7EkrQ76Bx5BwZ6NkI2ldkhHRETUOLFgOTgnlcCQ4LKrCeWaJ7HKJrvn58KQvFv5cERERI2UxYJVVFSEiIgIbNy4Uek8dB8SQyScyAVSb9T849QEBEMbEoaCpPV2SEZERNQ4WSxYOp0Oly9fhkqlUjoP3Yd+gQIeWuC7/6ktbneNi0fxqcMovXZR4WRERESNk9UhwqFDh2LNmjVKZqH7pFUJDGkp8O0lNWQL44TOnXtC0rtzsjsREZFCLJ/yANCjRw9Mnz4dly9fxsCBA+Hn5wchRJV9hg4davOAdHcSQyQsOaVCSg4Q7lV1m9Bo4dKtHwr3/gyPQaMhNFr7hCQiImokrBasMWPGAAB+/PFH/PjjjzW2CyFg4m1Y6o2+gQIeGhmrzpoR7lVzaFcfF4/8rWtReGQH9NGP2CEhERFR42G1YJ07d07JHPQ7aVUC8c2MWH1WYEaUVONso8Y3EE7tIlGwax0LFhERkY1ZLVgtW7ZUMgfVgSHNS7HyvAbJ2UCEd83t+rhByP7inyi9kg5NQLDS8YiIiBoNqwULAEpKSvDf//4XO3bsQHZ2Nry8vNCrVy889dRT0Go5j6e+6eVnQhMnYNVZMyK8aw4TOofHQnJrgvyk9WiS8LwdEhIRETUOVq8izMjIQFRUFEaPHo3Nmzfj8uXL2Lx5M5599llER0cjIyNDyZx0F7QS8GTL8kVHa15NKFRq6LsPQOFvm2EuLrJDQiIiosbBasGaMmUKsrKykJSUhHPnzmH37t0Vv2ZnZ+Ovf/2rkjnpLiWGSDh1AziSbXm7vvujkIsNMBzapmguIiKixsRqwVq/fj3ee+89dO/evcrzMTExmDlzJtatW2fzcHTv+gQKeDkBq8/WvDchAKi9/aELjUY+V3YnIiKyGasFq7CwEN7eFmZKA/D29kZhYaHNQtH900gCTwZbHyYEAH2PeJReSEPJxVMKpyMiImocrBasqKgozJs3r8ZaVyaTCfPmzUNUVJTNw9H9SQyRcPoGcDjL8nZdh25Qefrw/oREREQ2YvUqwpkzZ6Jfv34ICQnBkCFD4O/vj2vXruHbb7/FtWvXsGnTJiVz0j3o3UzA+9bVhF18al5NKFQq6Ls/ipu/rIHH4D9B0untkJKIiKjhsnoG68EHH0RSUhKioqKwcuVKvPnmm1i5ciWioqKwa9cu9OzZ854/7MUXX0RwcDCEEEhJSbG6X3BwMEJDQxEZGYnIyEh89dVX9/xZjZlGEhja6g7DhN0fhWwsQeH+rQqnIyIiavgsnsEqLi7GggUL0L9/f3z99dd19mHDhg3D1KlT76qcrVmzBmFhYXX22Y3N8FYS/p1qwqEsoKtPze0qTx/oOnVHQdI66HsMqrHyOxEREd0/iwXLyckJb7zxBh544IE6/bBevXrV6fuVmzRpEjw8PDB06FAkJCTY5DMcQU5OTsX3nXWAl1aPJccKEBxeYnF/OfxBlH75HjKT90HVvK1SMR1S5WNLdYfH1TZ4XG2Dx9U2Gspx9fLyqvLY6hysyMhIHD9+3Gal6E5GjhwJs9mMmJgYzJo1C76+vlb3XbBgAbp27apguvqr8h/wsBATfvifhLm99BbPUMnRD+HqT19AlbwDXhExSsZ0SNX/46G6weNqGzyutsHjahsN8bhanYM1b948fPTRR/j6669hMBiUzITt27fjyJEjOHjwILy9vfHss88q+vkNRWKIwLmbwIFMy/OwhCRBHxePwsPbYS68qXA6IiKihstqwerTpw/S09MxfPhwuLq6ws3NDe7u7hVfHh4eNgsVFBQEANBoNHj55ZexY8cOm31WQ/ZQgICPDlh91nLBAgB9TH/AZELBb1sUTEZERNSwWR0inDJlipI5KhQUFKC0tBSenp4AgJUrV6JLly52yeLo1JJAQrCEVWfNeLebZHGYUOXWBM4RPVCwax1cew3mZHciIqI6YLFglZSUICwsDJGRkWjdunWdfdjEiRPx3Xff4erVq+jbty9cXV1x+vRpAEB8fDxmzJgBLy8vJCQkwGQyQZZlhISEYOnSpXWWobFJDBFYlArsz5TxgK/l8qTvMQiZC6eh5EwynNpEKJyQiIio4bFYsLRaLUaOHImffvqpTgvWwoULsXDhQovb1q+/var4oUOH6uwzG7teAQJ+zsCqszIesHKdgFObCKh9A5GftJ4Fi4iIqA5YnYMVGhqKixcvKpmFbKB8mHB1LYuOCiGgj4uH4chOmG7mKpyQiIio4bFasGbNmoV33nkHBw4cUDIP2cDwEIHz+cBv161Pdnfp1g8QAoX7eAskIiKi38vqJPepU6ciMzMT3bp1g4+PD/z8/KpMgBZC4MiRI4qEpN+nl//tYcJufpb3Uend4RL5IPJ3r4dr7wQIyWr3JiIiojuwWrCioqIQHR2tZBayEZUkMKxV2dWEH8RYvpoQAPRxg1C4/xcUnzoMXXsu3EpERHS/rBasL774QsEYZGuJIQIfHwf2Zsjo3tRywdK26giVdwAMR3axYBEREf0OdzUOJMsyLl++DKPRaOs8ZCM9mwo0dQZWn7M+D0sIAefwWBhSdkM2mxVMR0RE1LDUWrA2btyI7t27Q6fToUWLFjh69CgAYPz48VixYoUiAalulA8Trj5rhtnK1YQA4BweB/ONbJRcSFMwHRERUcNitWCtXLkS8fHxaNmyJebPn1/lEv/WrVvj888/VyQg1Z3EEIGLBWXDhNZoW3WApPdAUXKSgsmIiIgaFqsF6x//+AdefvllfPXVV/jTn/5UZVunTp2QkpJi83BUt3o0FQhwKbua0BohqaDrFAND8m4FkxERETUsVgvW2bNnER8fb3GbXq9HXl6ezUKRbVQME567wzBhRCyMGRdReo0LzRIREd0PqwXL398fqampFrcdPXoULVu2tFkospM5ACMAACAASURBVJ3hrQT+VwDsqWWYUNeuK4TWCUUpexRMRkRE1HBYLVhPP/003nrrLWzZsqXiOSEEUlJS8P7772PUqFGKBKS61cP/LoYJtU5wCo2CgfOwiIiI7ovVgvXWW28hLi4O/fr1g7+/PwBg4MCB6Ny5M6Kjo/Hqq68qFpLqjiQEht/N1YRhcSg5nwpTXpaC6YiIiBoGqwuNarVafPfdd9i6dSs2bdqEzMxMeHl5oW/fvujbt6+SGamOJYYIzD8GJF2T0dPf8qKjuk7dACFgOLYXrnGW5+IRERGRZVYLVrnevXujd+/eSmQhhcQ2FQjUlw0T9vS3vI9K7w6nkDAUJe9mwSIiIrpHvKNvIySJsqsJ19zhakJdeByK0g7DXFSgYDoiIiLHx4LVSCWGCFwpBHZdrW0eVnfAVIqiEwcUTEZEROT4WLAaqe5+As31tV9NqPb2hyawNQwpXHSUiIjoXrBgNVLlVxOuOWeGyVzbvQljUXRsH2RjqYLpiIiIHBsLViM2PETgqgHYda32eVhyUQGKzyQrmIyIiMixVbmKcPv27ff04l69etVpGFJWjJ9Ai1vDhL0CLO+jadYKqiZ+MCQnQde+q7IBiYiIHFSVgvXwww9DCAH51pVlQtxeI0mW5SqPAcBkMikQkWxFEgLDQySsOG3GvFgJKqnmmlhCCDiHx8FwZCfkoc9DSDzpSUREdCdVCtZvv/1W8X1GRgbGjx+PXr16YdiwYWjatCmuXbuG1atXY8eOHVi8eLHiYanuJYYIfJgM7Lgq4+FmVhYdDY9F/vZvUXrpFLRB7RVOSERE5HiqFKyoqKiK74cNG4YRI0bggw8+qPKCJ598ElOmTMHixYsxcOBAZVKSzXTzFQhyLRsmfLiZ5X2cQsIgubjBkLybBYuIiOguWB3v2bhxI/r3729x24ABA7B582abhSLliFtXE65Nt341oVCpoOsUA0Myl2sgIiK6G1YLlqurK7Zs2WJx26ZNm+Dq6mqzUKSsxBCBDAOwvbZFR8PjYLx6HqXX/6dgMiIiIsdk9V6EEydOxJtvvolr165hyJAh8PPzQ0ZGBr755hssW7YMb7/9tpI5yYYe8BVoeWuYsLe1YcLQrhAaJxQl74amzzBlAxIRETkYqwXr9ddfh6enJ959910sWbKk4urCgIAAzJ07Fy+88IKSOcmGhBBIDJHwRZoZ/4qToLZwNaGk1cGpfRcYkpPgxoJFRERUq1qvuZ80aRIuXLiA9PR0JCUlIT09HRcvXmS5aoCGhwhcLwJ+vVL7MGFJ+gmYbuYomIyIiMjx3HFRI0mSEBQUhJiYGAQFBUHiOkgNUrSPQLArsLqWexPqOnUDIFCUske5YERERA6o1rZ07NgxjBgxAq1bt4aTkxMOHjwIAPjb3/6GDRs2KBKQlFE+TLg23QyjlasJVa6e0IZ0goEFi4iIqFZWC9amTZvQpUsXpKenY8SIESgtvX2zX41Gg48//liRgKScxBAJmUXA1st3uPnzyYMwFxsUTEZERORYrBas1157DSNGjMCePXtqXDHYpUsXHDp0yObhSFldfYBQT+A/J81W93EOiwWMpShKPaBgMiIiIsditWClpKTgmWeeAYAa9yD09PREZmambZOR4oQQGB8q4et0GdcNls9iqX0CoGnWCkXJSQqnIyIichxWC5aXlxcuX75scVtaWhoCAgJsFors549tJQgAS05ZP4ulC4uF4dg+yCajcsGIiIgciNWCNWTIEPz973/HyZMnK54TQuDq1auYPXs2EhISFAlIyvLWCQxrJbD4hBmybPkslnN4LGRDPorPJCucjoiIyDFYLVizZs2Cr68vIiIiEBMTAwAYO3Ys2rdvDw8PD7z11ltKZSSFTegg4dQNYJuVNbE0zdtA5emLIt6bkIiIyCKrBcvDwwNJSUn49NNP0a5dO/Tt2xft27fHnDlzsHPnTt6LsAF70F+gvQewONXyMKEQAs7hsTAk77Z6louIiKgxs3irnKKiIiQmJmLy5MkYM2YMxowZo3QusqPyye6v/WbGdYMMX+eat87Rhcchf8f3KL10GtoWbe2QkoiIqP6yeAZLp9Ph119/hdlsfaIzNWzPtiv7q7HUymR3p9ZhEC6uMHCYkIiIqAarQ4T9+/fHpk2blMxC9UjFZPdUy5PdhUoN547duFwDERGRBRaHCAFgzJgxeO6555Cfn4+BAwfCz8+vxnpYXbt2tXlAsp/xHSR8+aMJv16R8XAzy8OEhft/gTHzMtQ+zeyQkIiIqH6yWrAee+wxAMCCBQuwYMGCKuVKlmUIIWAymWyfkOymV6XJ7g83q3myUxcaBag1MCTvhltvLttBRERUzmrB2rp1q5I5qB6qPNk9s0iGj67qWSzJyRm69l1hSNnDgkVERFSJ1YL10EMPKZmD6qk/tisrWEvSzJgcoaqx3Tk8FjlfzYcpPxcqV087JCQiIqp/rE5yJwIAH51AQi2T3XWdugOQUXRsn/LhiIiI6qlaC9by5cvRs2dP+Pn5wd3dvcYXNQ7jQyWk5QHbr9YsWCo3T2hbdYSBVxMSERFVsFqwli9fjj/96U8ICwtDZmYmEhMTkZCQAK1WCz8/P0yZMkXJnGRHDwUItPMAFp2wvCaWc1gsik4ehLm4SOFkRERE9ZPVgjVnzhy88cYbWLhwIQDg+eefx+eff45z587B19eXt8ppRMonu689JyOzyMIwYXgsUFqC4pMH7JCOiIio/rFasE6dOoUePXpApVJBpVLhxo0bAAA3NzdMmzYN8+fPVywk2V/Fyu5pNc9iaXwDofZvyVXdiYiIbqn1Zs/FxcUAgMDAQBw/frxim8lkQlZWlu3TUb3hoxMYWstkd+fwWBQd2wuZa6MRERFZL1jR0dE4evQoAOCJJ57A22+/jQULFmDRokWYMmUKYmJiFAtJ9cOEUAknrUx2dw6Pg7nwJorPptghGRERUf1idR2s1157DefPnwcAzJgxA+fPn8crr7wCk8mEBx54AIsXL1YsJNUPDwUItHUHFp8w46GAqt1c06ItVB4+KEreDV3bznZKSEREVD9YPYPVvXt3PPXUUwAAT09PfPfdd8jPz0dubi727t2LkJAQxUJS/SCEwPgOEtack5FVbbK7EAK68FgYkpMsDiESERE1Jve00KiTkxPXv2rknm17a7L7qZqT3Z3DY2HKyUDp/84qHYuIiKhesTpEOHbs2Du++LPPPqvTMFT/+TqXTXZfdMKMl8OkKjcBd2odDqHTw5CcBG3z1nZMSUREZF9WC9Zvv/1W47ns7GxcvXoV3t7e8Pf3t2kwqr/Gh0ros86EHVdl9Aq4XbCEWgNdp24oStkDj4HP2DEhERGRfVktWMnJyRafT0lJwahRozB37lybhaL67eEAgTbuwOJUM3pVm+zuHB6L7ANbYcy6CrU3SzgRETVO93yz57CwMEybNg0vv/yyLfKQAyhf2d3SZHddaDSg0sCQssdO6YiIiOzvngsWULYI6enTp+s6CzmQ0e0kmOWak90lnQt07SNRxJs/ExFRI2a1YGVnZ9f4unr1KrZu3Yrp06cjLCzsnj7oxRdfRHBwMIQQSEmxvhjlqVOnEBcXh3bt2qFbt25VVpCn+sPXWWBosOWV3XVhsSg+kwJTwQ07pSMiIrIvqwXLx8cHvr6+Vb4CAwPxyCOPICcnB5988sk9fdCwYcOwc+dOtGzZstb9JkyYgPHjxyMtLQ1Tp07FuHHj7ulzSDnjQyWk5gI7q63s7hzWHYCMomN77ROMiIjIzqxOcv/ss8+qXIIPADqdDs2bN0dMTAzUaqsvtahXr1533CcjIwMHDx7Ezz//DABISEjApEmTkJ6ejuDgYKuvmzRpEjw8PDB06FAkJCTcU66GJCcnR9HPi9ABIXo9/nW0FJ2ciqpskwLb4MaBX1HcJkrRTLai9LFtLHhcbYPH1TZ4XG2joRxXLy+vKo+ttqTRo0fbOksNFy9eRLNmzSrKmxACQUFBuHDhQq0Fa8GCBejatatCKeu36n/AtvZcJxPeOCDhk4ec4a27XchvdumFGz8th6erCyStTtFMtqL0sW0seFxtg8fVNnhcbaMhHtf7muRuS9XPmvG2K/Xbs7cmuy+rNtldFx4LubQYxWmH7JSMiIjIfqwWLEmSoFKp7urrXocLrWnRogUuXboEo9EIoKxcXbx4EUFBQXXy/lT3/JwFnrQw2V3j1xzqpkEwHN1tx3RERET2YbUZzZw5EwsXLoRKpcLgwYPRtGlTXL16Fd999x1kWcbEiRPrrFiV8/PzQ5cuXbB8+XKMHj0aa9euRXBwcK3Dg2R/40Ml9F1vwq5rMnr63z4D6Rwei4LdGyCbTBAqlR0TEhERKctqQ8rJyUFkZCS+/fZbqCr9cPzoo48wePBgZGZm4oMPPrjrD5o4cSK+++47XL16FX379oWrq2vFWlrx8fGYMWMGoqOjsWjRIowePRozZ86Eu7s7lixZ8jt+e6SE3s3KVnZfdMKMnv63T4rqwmJxc/NXKEk/DqfW4XZMSEREpCyrQ4RffPEFJk6cWKVcAYBKpcLEiRPvufgsXLiwYvjv6tWrVRYqXb9+PaKjowEA7du3x+7du5GWlob9+/ejU6dO9/Q5pDxJCPw5VMLqczKyK63srg1qB8ndC4ajXHSUiIgaF6sFy2AwID093eK29PR0FBUVWdxGjVP5yu7LTt+e7C4kCc5hsTCk7OHFCkRE1KhYLVhDhgzBtGnTsGTJEuTl5QEA8vLy8MUXX+C1117DkCFDFAtJ9Z+fs8CQlgKLT1Sd7O4cHgtT1hUYr6TbLRsREZHSrM7BWrhwIQoLCzF27FiMHTsWGo0GpaWlAMrK14IFCxQLSY5hQoeak92d2naG0LnAkJwETbNWdk5IRESkDKsFy83NDWvWrEFqair27duHK1euICAgAA888AA6dOigZEZyEL2bCbR2BxZXmuwu1BroOjwAQ/JuuA8YaeeEREREyrjjOguhoaEIDQ1VIgs5OEkI/Lm9hL8fNGNurAyvWyu7O4fHIXvpLBhzMqBu4mfnlERERLZndQ7WgQMHsGXLlorHubm5+POf/4yePXvirbfegtlstvZSasRGt5NgMgPLK01213WMBlRqFCVz0VEiImocrBasV155BTt37qx4/NJLL2HVqlXw9/fH7Nmz8c9//lORgORYmrqUrey+qNJkd0mnh1PbzjCwYBERUSNhtWAdP34c3bp1A1C2ZMOaNWswd+5crFmzBu+99x6WLVumWEhyLOM7SDieCyRdq3w1YRyKzxyFufCmHZMREREpw2rBKiwshIuLCwBg165dKC4uxuDBgwEAERERuHTpkjIJyeH0aSYQ4gYsTr09TOgc1h0wm2E4vs+OyYiIiJRhtWCFhIRgw4YNAIAVK1YgKioKXl5eAICMjAy4u7srk5AcjiQExodKWHVWRk5x2VkslYc3tC1DOQ+LiIgaBasF6y9/+Qvef/99+Pr6YunSpXjppZcqtm3btg0RERGKBCTHNLqdBKMZWHaq0mT38FgUndgPuaTYjsmIiIhsz2rBGjt2LLZt24ZXX30VmzdvxtNPP12xzdvbu0rhIqquqYvAkGCBxam3J7s7h8dBLilC0anDdk5HRERkW7Wug9WrVy/06tWrxvNvvfWWrfJQAzI+VEL/DSbszpAR11RA07QF1L6BMCQnwblTjL3jERER2YzVM1hEv9cjgbcmu5+oPEwYh6KUvZDNJjsmIyIisi0WLLIZSQj8OVTCV5UmuzuHx8Kcn4uS9FQ7pyMiIrIdFiyyqfLJ7stvTXbXtgyF5NYEhuQkOycjIiKyHRYssil/F4HBlSa7C0mCc1h3GJKTKia/ExERNTRWC9bSpUuRlZVlcVt2djaWLl1qs1DUsEwIlZCSA+zJKCtUuvA4mDKvwHj1vJ2TERER2YbVgjVmzBicOXPG4rZz585hzJgxNgtFDcsjgQKt3IBFtya769p1hnBy5r0JiYiowbJasGobvsnJyYGbm5tNAlHDU32yu1BroesQzXlYRETUYFVZB2vDhg0Vt8cBgDlz5qBp06ZVXlBUVIRffvkFkZGRyiSkBmFMOwlv7jdjxWkzJnVSwTk8DtnL3oMx9zrUnr72jkdERFSnqhSstLQ0/PDDDwAAIQR27NgBJyenKi/QarUICwvDzJkzlUtJDq/yZPeJHSXoOjwASCoUpeyBa8/H7R2PiIioTlUpWC+99FLFLXBatWqFb7/9Fp07d7ZLMGp4xodKGLDBhD0ZMmKbusKpbWcYknezYBERUYNjdQ7WuXPnWK6oTvUNFAh2BRanlk12dw6PRfGpIzAX5ts5GRERUd2qdR2stLQ0jB07Fm3btoW3tzfatm2LcePGIS0tTal81IBUTHY/IyO3WIYurDtgNqHoxG/2jkZERFSnrBasAwcOICoqCt9//z169uyJ8ePHo2fPnvj+++8RFRWFgwcPKpmTGoix7SWUmoEVp81Qe/pC06ItryYkIqIGR21tw9SpU9G5c2ds3LgRer2+4vmCggIMGDAAU6dOxebNmxUJSQ2Hv4vAEy0FFqWa8XxHCc7hcbi5ZRVkYwmEWmvveERERHXC6hmsPXv2YNq0aVXKFQDo9XpMnToVe/bssXk4apjGh0pIzgb2ZshwDo+FXGxAUdoRe8ciIiKqM1YLllarRUFBgcVtBQUF0Gg0NgtFDVu/5rcnu6v9W0Lt0wxFHCYkIqIGxGrB6tu3L6ZPn47U1NQqz6empuKNN95A//79bR6OGqbyye7/PSPjRimgC4+FIWUPZLPZ3tGIiIjqhNWC9eGHH0KWZYSFhaFz584YMGAAIiMjERYWBrPZjDlz5iiZkxqYMe0llJiB5afMcA6Pg/lmDkrOp975hURERA7AasFq0aIFkpOT8eGHH6J9+/Ywm81o3749PvroIxw9ehTNmzdXMic1MAGVJrtrWraH5OrJmz8TEVGDYfUqQgBwdXXFiy++iBdffFGpPNSIjA+VMPAnE37LktA2LKZsHtYT4+wdi4iI6HertWABwPbt27Fjxw5kZ2fD29sbDz74IB588EElslED17+5QEtXYPEJMxaEx6Fwz0aUXrsITdMW9o5GRET0u1gtWAUFBXjyySexefNmqNVqeHt7IysrCyaTCX379sU333wDFxcXJbNSA1M+2X3mYTPmRHeG0OpgSE6CpulT9o5GRET0u1idgzVt2jTs3bsXX375JQwGA65cuQKDwYAvv/wSe/fuxauvvqpkTmqgxrSTUGwCvjyvga5DNIo4D4uIiBoAqwVr7dq1ePfddzFixAioVCoAgEqlwlNPPYWZM2di9erVioWkhquZXuDxIIFFJ8zQhcWi5HwqTHlZ9o5FRET0u1gtWLm5uQgJCbG4rXXr1sjNzbVZKGpcJnSQcDQbOO4XDUgSDCm8SwARETk2qwWrQ4cOWLJkicVtS5YsQceOHW0WihqXfoFlk90/TdfDqU0Eb/5MREQOz+ok9zfffBMJCQlIT0/H8OHD4e/vj2vXrmHVqlXYt28f1q5dq2ROasBUksCf2kuYdcSMdzt0R/GP/wdzUQEknf7OLyYiIqqHrJ7BGjJkCL755hsUFxdjypQpGDlyJCZPnozi4mJ88803GDx4sJI5qYEb275ssvs6fQxgMqLo+H57RyIiIrpvFs9glZSU4IcffkBkZCQOHDiAgoIC5ObmwtPTE3o9zypQ3Suf7D73og8GNm8DQ3ISXLo+ZO9YRERE98XiGSytVouRI0fi4sWLAAC9Xo/AwECWK7Kp8R0kHMkGskK6o+j4b5CNJfaOREREdF+sDhGGhoZWFCwiJfQPFAhyBZZrukMuLkTxqaP2jkRERHRfrBasWbNm4Z133sGBAweUzEONWPlk93nXgyC8/Hk1IREROSyrVxFOnToVmZmZ6NatG3x8fODn5wchRMV2IQSOHDmiSEhqPMa2l/D2QTNOtnwY7faugXN4HHQdou0di4iI6J5YLVhRUVGIjuYPNlJWoF7gsSCBqTeewob2Z5H1+T/g8/9mwakV110jIiLHYbVgffHFFwrGILptfKiEQRvVSH/sNbRc+yYyF70B3xc+gDbQ8p0FiIiI6hurc7CI7GVA87LJ7u8e00A/5u9Q+zZD5ifTUZpxyd7RiIiI7goLFtU7KkngzS4qfHdeRutvnbCm19sQendkfvwajDkZ9o5HRER0RyxYVC+NC5WQlqjGoBYCLx1xw4CmM3DDKCHj49dguskbjRMRUf3GgkX1Voi7wH8eUuNkohrRrX0wMOAfyMgtxIm501Fw86a94xEREVnFgkX1Xmt3gc8eUuPnUS2wstc/gNwMbH3vTXx8qABFRtne8YiIiGpgwSKH0cZD4L3H28B57DvoUJQOj7XvIHSlAQuOmVi0iIioXmHBIofTulMoAie8hV5FKfj08mz8JakUbVYZsfCYCcUmFi0iIrI/FixySLq2neEzejrCru1BmvZj9PYHXtxtRpuvjPjkOIsWERHZFwsWOSznsFg0eXoy1Id+xr/yPsPxBBUeChCYlGRG26+M+JRFi4iI7IQFixyaPvoReCY8j/xfv0HAvpVY3luNY8PUeNBf4PldZUVr0QkTSli0iIhIQSxY5PBcH3wC7oNG48aGZbj567cI9RRY0aesaPX0F/h/O81ou8qIxSfMLFpERKQIFixqENz6PgXXPsOQ982nKNi3CQDQoYnAl33USBmmRlxTged2mtBulRH/TjWj1MyiRUREtqNowTp16hTi4uLQrl07dOvWDcePH7e4X3BwMEJDQxEZGYnIyEh89dVXSsYkBySEgMfj46CPHYiclR/BcGRnxbaOTQRW9lEjOUGN7n4CE3aY0O4rI/6PRYuIiGxE0YI1YcIEjB8/HmlpaZg6dSrGjRtndd81a9bg8OHDOHz4MJ566ikFU5KjEkLAc/gkOEc+iKyl76Ho5MEq2zt5Cfz3ETWOJqjxgK/An3eY0H6VEZ+dZNEiIqK6JWRZVuQnS0ZGBtq1a4fMzEyo1WrIsoyAgADs2bMHwcHBVfYNDg7Gjz/+iLCwsFrf8+DBg4iKikJsbCw8PDwwdOhQJCQk2PB3Ub/l5OSgSZMm9o5hd7LJiOKv5sB8PhVOo16DqkU7i/sdz5PwwQktvv+fBsF6M/4SWozEICM0Fv7ZwWNrGzyutsHjahs8rrah9HE1moFCE1BoFDCYgEKTQKER8HWSEex6/5XIy8urymP17w16ty5evIhmzZpBrS77SCEEgoKCcOHChRoFCwBGjhwJs9mMmJgYzJo1C76+vlbfe8GCBejatautojuU6n/AjZV5/NvI/PR1lPx3NnwnvQ9tYEiNfXp6AT1bAcnZMt4+aMKLB5wx7xTwehcVRrURUEuiyv48trbB42obPK62weNqG+XH1WSWYTABBaVAobHsq8AoV/q+/Hm50vfl+8sovIvXlpotZ3g5TMJHsao6+z0pVrCAslJVmbWTZ9u3b0dQUBBKS0vx+uuv49lnn8X69euViEgNhKTVwefPb+P6wmnI/GQ6fF+aA41voMV9w70E1vRV40iWjBkHTRjzqwnvHALe6KLCSAtFi4iosZJlGcWmsrJScKvYFNwqO2Xflz8vV9vH0nNlj/NL9Cgyl6LACBSb7i6HWgB6DeCiLvvSqwEXtaj43kdX/rx0e5+K/cWt/au+tqlz3R4rxQpWixYtcOnSJRiNxoohwosXLyIoKKjGvuXPaTQavPzyy2jXzvIQD1FtJGc9fJ57B9fn/xWZH78G3xfnQN3E+pnQzt4Ca/upcfhW0Rp9q2j9rYsK3fQCTWS5xj8SiIjqI5P5VnkpvfVlBPJL5RqPqxYjuVpJqlmeCo3A3ax2o5HKiotec+tXNaBXi4rHfjpAr5GgVwOitBg+bs63CpCoVHrKC5CoUZI0DvAPX8UKlp+fH7p06YLly5dj9OjRWLt2LYKDg2sMDxYUFKC0tBSenp4AgJUrV6JLly5KxaQGRuXqCZ/nZ+L6vMnI/OQ1+L44GypXz1pfE+kt8HU/NQ5l3j6jBbjC28mICC+B8IovoFMTAVdN/f8PnYjqr1JzpeJTCuQbqz+2XI7KH9+08BrDXZwJ0qmql6DbBaiJFmiuLzsDVKMoaUSl72+9rvI+mnsrQNnZJfDycv0dR7B+UnSIcNGiRRg9ejRmzpwJd3d3LFmypGJbfHw8ZsyYAS8vLyQkJMBkMkGWZYSEhGDp0qVKxqQGRu3pC9/n30XG/MnI/PRv8J34PiRn/R1f18VH4Jv+aqTflLE9/SbOleiRnCPjp0tmLDgOmGVAAAhxB8KbVC5eAm3cwaFFogaq2CTjZilws6Ss3NwsvfW4tNLjktuP86ttzy+Vb5Wksq8SK3OCKnPVAK7qsl/dNICrRsBVA3hogUA94KqRquzjqhGVvq/5WK8GVPx/lE0pdhWhLZRfRXjgwAFOcgeQnZ3NCZi1KLl8Ftf/NRWagJbwee6fkLS6u35t9WNrMMo4ngMk58g4miUjOUdGcraMa4ay7ToV0LFJ1eIV4VU2xs9hxtv4d9Y2eFyrMporFZwSK4XIyvb8SttvlMjINwqrk6TLOasAN21ZESr7EhXfu956XKMM1fLYWQ1IDfj/Gw3176uiZ7CI7EnbLAQ+E/6BzI9fQ9Zn78DnT3+HUGvu672c1QJRvkCUb9X/6V03lBWt28ULWH3OjEJj2XYfXfXSVTbMqOcwI1EFs1w2F8hiASoFbpbcfUG6WQoU3WG4TCtVKkPa24XI49YwmZtGgpsGkIwGNHV3vl2YtJYLFM9eE8CCRY2MU3AHeI97E5mL/47s5e/D64+vQkh1d1mur7NAn0CBPpUuWDTLMs7ewO3ilV37MGNEpWFGnsInR2CWyy6Frz5H6I7DaJW236i0vaAUqG1oRRKVzw5VLTg+boCbVrK6vXKBKv/Squ7uv7OGOleIbIMFixodXfuu8H72VWR9um3iIwAAGZJJREFU8U/krJqPJk+9bNNhO0kItPEA2ngIPNnq9vOVhxmTs8uK1+JUs8VhxvLS1cpNoIlT2b+s+a9kuh/G6leXVZogfbPiOblSUao5Z6j64wLjnT/XRX270LhXKjn+LkBbjahWgkStBclZxaF2qv9YsKhRco7ogSYj/oKcL2dDcnaFxxN/Uvx/2HczzFhWvKoOM5Zz1QCe2vIvAU+nsu+bOInbz1v53kPLs2P1WfkZocJaLqEvtLbWkFFGTqEOJcJosSzdabgMKJsAXTE5Wl1pTpAG8He2MqG6yiRsUTGR2k1b9jz/vlFjw4JFjZa+W1/IRQXI/foTSM6ucO//B3tHAlD7MOPFAhm5xUBuCZBbIpf9Wnz7+wv5wJFsc8U+N0utf45beUFzAppUKmie1b+/tU/l7901jecHpizLMMllqz9b+yoxAyWm26tLV19D6F6L0t1cYg9Uvcy+fL0gvRpwgoCXCxBU+eqyamWpfL5Q9avLXBr4hGoipbBgUaPm2mswzEUFuLF+CSRnPVwffMLekSyqPMx4L4xmGTdKygsZkFssW/w+59b3Z28AuSXmiufzaylokihbTVklALV066vy41u/Vn6skgC1EBWPzSZnOGuNt/er9F6VH6sqvab6e5otlh8ZJbUUotvFSL6Lfe7/z00lLCy2qLm9kGKgS/k2yWJRsva68kUYrZXc7Owb8PKq42WpieiesGBRo+fW7w8wF+Yjd+3HEE4u0Hfra+9IdUYtCXjpAK+KFSnuvaDlVRSy22fK8krKSoxJLrtxqtEMGGXcfiwDplu/1nwsVzwuLJYhqW+/tsRcdkan+nsZzTJMslzpMSrOKkmibNXo8i+tJKo8Lv9yVgPuUtkVY2XPiVtflV9b83WV97G8/dY2VfXFF8ue51whosaJBYsaPSEEPAb/GeaiAuT890NIOhc4R8TZO1a9oJYEvHWA930WtDspO9PiUqfvSURUH0j2DkBUHwgh/n979x4cVZWgAfw7995+d9J5Q0ISGg1JBGUSYBXQgAZl0XEUjIzs6Ags7mhpjSWWulvKFs7W4GqNOjs+dgdHF+PoOuhAdJz4GNDioUYHjQqsDAYwSweEPEnIo5Puvmf/6M4lHR5C0p1ON9+vquu+zr05fTikvpzTfS9Sf3w3bFMuRUvlv8P7zRexrhIREcUxBiyiEKGoSLvlAVgm/gAtz/8CvfW7Y10lIiKKUwxYRAMIzYT0f1wJU+75aF7zr+g7tD/WVSIiojjEgEU0iGK2IuOf/g1a+hg0/9dD8DcdinWViIgozjBgEZ2EYnMg4/bVUKx2NP3nvyCwbwfi+LnoREQ0whiwiE5BTUpBxp2PQnG60PvKozjy2B3oqnkHsq831lUjIqJRjgGL6DS01Exk3fsULLeuhJaRjbbXnsJ3v7gV7W+/hEBHa6yrR0REoxTvg0X0PYQQUN2TkDb1MvibDuHY1jfQuXkDjr3/GuxTL4dzzkKYc8+PdTWJiGgUYcAiOgtaZg5SK+6E6+pb0fXJu+jc9id0b98ES8EUOC9fCOukSyAUDgwTEZ3rGLCIhkCxO5FUfiOccxaiZ8dH6Ny8AS3P/wJaRg6ccxbAfvFVUCx8FhwR0bmKAYtoGISqwl46G/bS2eit343OzVU4WvVbtFdXwjFzPpyzr4eWmhXrahIR0QhjwCKKEIv7AliWXgB/WyM6t/0JXR+/g84tVbBNuQzOyxfC4r4g1lUkIqIRwoBFFGFaahZSrrsNyX9/M7r/uhGdW95A03+sgHl8MZyX3wDblEshVDXW1SQioihiwCKKEsVig7PsOjgu/SG8X/8VnZur0Fr5CNSUTDhnXwfHjKuh2J2xriYREUUBAxZRlAlFhe3CmbBdOBN9DfvQuaUK7dWV6Hj3FdgvmYek2QugZebEuppERBRBDFhEI8icez7Sbr4PrmuXofOjanR9VI2uD9+CdfIlcM5ZCEvBFAghYl1NIiIaJgYsohhQXelwXXMrkq+8Cd2ff4BjW95A87P/DNO484O3eZg6B0Izx7qaREQ0RAxYRDEkzBY4Zl4N+4z56N1Ti2Obq9D2P0+g/c//Deel18Jx6Q+hOlNiXU0iIjpLDFhEo4AQAtbiabAWT4Pv8AF0bn0Dxza9ho5N6+CYVg7nnAUwZbtjXU0iIjpDDFhEo4xpbD5Sf3w3kn+4FF0fvx28p9Yn78JSNBWOmVfDWlgCxZ4U62oSEdFpMGARjVKqIxnJVy1G0hUV6P5yKzq3vIHWF1cDQoE5vxCW4mmwFpXCPL4YQuV/ZSKi0YS/lYlGOaGZ4Jg+F47pc+FvPYLePbXw7qlF57Y3cey9VyAsdlgm/gDWolJYiqdBy8jhNxGJiGKMAYsojmhpY6DNvBqOmVdD6gH4PHvhDQWuo1VrAD0ANW0MrEVTYSmaCuvEEigOTicSEY00BiyiOCUUFebxRTCPL0LyvH+A7u1G776d6P3b5/Du+QJdNe8AQoEpbyKsxVNhLZoGs5vTiUREI4G/aYkShGK1wzb5EtgmXwIA8Lc1hqYTv0DXh3/Gsb+8CmGxwVIwBdbiabAUTYWWOY7TiUREUcCARZSgtNQsaDPmwzFjfnA6sWEfvHtq0bunFkffeA4I+KGmZgWnE4unwjqxlNOJREQRwoBFdA4QigpzfiHM+YXAVYuh9/aEphODn9/q+uRdQIjgdGLRVFiLpsLsvgBCM8W66kREcYkBi+gcpFhssE26GLZJFwMA/Eeb0LvnC3j/9jm6Pn4Hxzb+AcJshWXiD2ApLIW1eBq0rFxOJxIRnSEGLCKClpIJ7ZJ5cFwyD1LX4TvYP534Bdr/9ALaq34LNSUTlqJSWIumwVJYAtXpinW1iYhGLQYsIgojFAXmvIkw500ErrwJeq83OJ0Yuh1E96d/AQCoGdkw5xbAlFsQWp7P5yYSEYUwYBHRaSkWK2yT/g62SX8HAAgcbUbv3h3o89Shr2EvvJvWQXq7AQBqSubxwJUXXCrJaZxaJKJzDgMWEZ0VNSUD9unlsE8vBwBIXUeg5TD6GvbC17AXfQ170bntTehdHQAAJSnVGOHqH/FS08YwdBFRQmPAIqJhEYoCLTMHWmYOUDobACClROBoE3yevUbw6vr0Lzi28Q/Bc+xOmHMLEEjPRffEi2DKK4CWng2hKLF8K0REEcOARUQRJ4QI3ocrNQu2KbOM/YH2FvQd3GeMdAW+/gStNX8OnmOxDxjlCi61rDwIVY3V2yAiGjIGLCIaMaorHTZXunF7iNbWVrgsGnwNe+Fr2Ic+Tx28//spOrdUAQCEyQJTzgTj81ym3AKYxo7n/bmIaNRjwCKimFIdyVBDNzftp/d0wXdwnzG92Lt3B7o+ehuQOqBqMGW7jweuHDe0zFwoThc/10VEowYDFhGNOorNAUvBFFgKphj79F4vfIf2G9OLfZ46dP11I6AHAADC6oCWlQtT1jhomeOgZeZCy8qFljkOisUaq7dCROcoBiwiiguKxQrLhEmwTJhk7JP+PvibDsHfdBC+xoPwNzbA39QA798+h97ZbpRTXRnQsvpDVyiAZeVCSxvLz3gRUVQwYBFR3BKaGaZsN0zZbtgGHdO7jwVDV1MD/E0H4W88iL763ejevgnS1xsspKjQMrKPB66sXJgygwGM9+8iouFgwCKihKTYk2BxF8PiLg7bL3UdgY4W+I0Rr2AI8+6sgb/1MKDrAILfajRGuzLHwZTVP+WYA8XqiMVbIqI4woBFROcUoSjBZy+mZAKFJWHHpN8Hf8vh4KhXKID5mg6id+9X0DvajHJKciq0zNxg6MocFwxiGeOgpmZCsQweSyOicxEDFhFRiNBMMI3Jg2lM3gnH9J6u0GjXQfhCI199njp0126G7O05fg2rA2pKxvGXKwNaSgbUlMzQdjqEzcnpR6IEx4BFRHQGFJsD5vxCmPMLw/ZLKaF3tMLf/B0CR5sQaG9G4Gjw5TtUD+/uz6B3tAJSGucIswWqa0DoCgWxgcFMcfC2E0TxjAGLiGgYhBBQXelQXemnLCMDfgQ62gYEsBYjiPmbD6F37w4E2luMW04AAFQT1JT08OA1aDRMSUqBUPgtSKLRiAGLiCjKhKpBS82Elpp5yjJS16F3HjVGv46PhDXBf7QZfQf2IHC0GfD7jp+kKFCT042px/7g5RcmeMfkQHEkQXG4oDiSICx2jogRjSAGLCKiUUAoCtTkNKjJacCgach+Ukro3ceM4BUexJrh++7/EGhvhuztQfPgk1UNiiMZiiMZamh5spfqcBnrwspQRjRUDFhERHFCCBF8tJAjGRh33inLtRz5Di6zBr2rA3pXO/SuY2HLQFcH9K4O+FsOh8p0QPZ5T7yQog4KX6F1pwuK/fjomOJwQXUmQ7EnQ9gcDGVEYMAiIko4wmSBlpoGnGZKcjDp64Pe1WGEr+OvdmM90NUBf1tjaPsYZG/3iRdS1GD4ciYHl1Y7hMUOxWKDsNggrPbQPltw38DtAWVhMjOoUVxjwCIiIgiT2fgw/ZmS/r7QqNjgYNZu7Nd7e6B3tcPfchiytwfS2w29tzt4a4sB36w8gaIEw5fldGHMFgxkVlt4WWv//lAZsxVCUSLQSkRnjgGLiIiGRGhm4xuUprM8V0oJ2ecNBa4eyN4e6N5uyN5uSG//ek8wjHmPH9N7eyDbW4LL/jLe7vBvYJ5QUQGhmSFM5uDIWGhdmMwQmgnCZDnpOgbt9/X50OVKCS9vtkBoZsAUOlczQwxc57Muz1kMWERENOKEEBAWG2CxYbgRREoJ+H2hMNYN3dszIJyFQllfL6SvD/D3QfpCr0Hrurcb8thRYz8Gl/H1ou10o24noyjhwUszA6oGoWqAqgZDmqoN2BdcClUFVFNwqZkglGBZDDomNBNgHFMh+s9RtbBjYeeoobKKCqhqsIyiBuuqBLehKJyiHSYGLCIiimtCCMBkhmoyA86UqP2c1tZWpLqSTxrOZN/pw9vAdQQCkAFfcOn3QeoBwO+DDARCQbEHMuAHAv7jS78fUvcDA5fGdfynn24dKqEEw5eqAEIFVOWEINa/fcK6EdxOcmzQstfnR5vNBggRCnZK6GcrwdHH/nOFAIQSPC90XBjl+ssOOGbs718XwfDYX27gcSGgpo056VMchmrEAlZdXR2WLFmC5uZmpKSk4MUXX8SkSZOGXI6IiGikif6RJthjXZUwUg8Ywa0/qAWDWzCknRDYAsGABj0A6HqwrB4ILfXgeqB/n36ScoP2BQadO6hcfxkEfND7Bl2rrw99qhI6VwekDin712XwGqFtKY+vG8fCzpGADBgPbT8bzjkLkbLw9oj9m4xYwLr99tvxs5/9DEuXLsUf//hHLF++HDU1NUMuR0REREHGiJLJHOuqnLXW1lakpaVF/LpSylD4CgC6DAtnMhTQoAeCQU3qUMyRfVC7kDIa44rhGhsbUVhYiObmZmiaBiklsrOz8cknn8Dtdp91uX61tbWYNm0aZs6cCZfLhRtuuAEVFRXRfjujVltbG1JTU2NdjYTEto0Otmt0sF2jg+0aHYnSroND4oiMYHk8HuTk5EDTgj9OCIH8/HwcOHAgLDidabnBnnnmGUydOjWabyFuROOvAApi20YH2zU62K7RwXaNjkRs1xG7McjgbyOcauDsTMsRERERjVYjErDy8vLQ0NAAv98PIBiaPB4P8vPzh1SOiIiIaDQbkYCVlZWF0tJSvPzyywCA9evXw+12nzDtd6bliIiIiEazEZsiXLNmDdasWYPCwkI8+uijeOGFF4xj11xzDT777LPvLUent379+lhXIWGxbaOD7RodbNfoYLtGR6K264gFrKKiItTU1OCbb77BZ599hsmTJxvH3n77bUyfPv17y9HpbdiwIdZVSFhs2+hgu0YH2zU62K7RkajtGtd3cu/p6QEA7N69O8Y1GR3a29tRW1sb62okJLZtdLBdo4PtGh1s1+hIpHYtLi6G3R68Ce2I3AcrWl555RXccsstsa4GERERET7//HPjtlFxHbCam5vx3nvvwe12w2aL7B1YiYiIiM5GwoxgEREREY1GI/YhdyIiIqJzBQMWERERUYQxYBERERFFGAMWERERUYQxYCUAt9uN4uJilJSUoKSkBOvWrYt1leLW3XffDbfbDSEEdu3aFXasrq4Os2bNQmFhIS6++GJ8/fXXMapl/Dldu7L/Do3X68WCBQtQWFiIkpISzJ8/H/X19cZx9teh+b52ZX8dunnz5mHKlCkoKSlBWVkZvvzyS+NYQvZXSXFv/PjxcufOnbGuRkLYsmWL9Hg8J23TK664Qq5du1ZKKeXrr78uZ8yYEYMaxqfTtSv779D09PTI6upqqeu6lFLKp59+Wl511VXGcfbXofm+dmV/Hbq2tjZjvaqqSpaWlhrbidhfOYJFNMDs2bORm5t7wv7GxkbU1tYaN7atqKjAt99+G/aXLZ3aqdqVhs5qteKaa66BEAIAMGPGDOzfvx8A++twnK5daXhSUlKM9fb2dihKMIIkan9lwEoQN998My666CLcdtttaGpqinV1Eo7H40FOTg40Lfh0KSEE8vPzceDAgRjXLDGw/w7fU089hR/96EcA2F8jaWC79mN/Hbpbb70VeXl5WLlyJSorKwEkbn9lwEoAW7duxVdffYXa2lqkp6djyZIlsa5SQur/i7af5D16I4L9d/geeeQR1NXVYfXq1cY+9tfhO1m7sr8Oz0svvQSPx4Nf/vKXuP/++439idhf4/phzxSUn58PADCZTLjnnntQWFgY4xolnry8PDQ0NMDv90PTNEgp4fF4jLanoWP/HZ7HH38cGzZswKZNm4xHdLC/Dt/J2hVgf42UJUuW4I477kBLS0vC9leOYMW5rq4uHD161Nh+9dVXUVpaGsMaJaasrCyUlpbi5ZdfBgCsX78ebrcbbrc7thWLc+y/w/Pkk0/i1VdfxcaNG8M+38L+Ojynalf216Hr6OjAoUOHjO2qqiqkp6cjLS0tYfsrn0UY5/bv34+KigoEAgFIKXHeeefhN7/5Tdx3zFi566678Oabb+Lw4cPIyMiA0+nE3r17AQB79uzB0qVL0dLSguTkZFRWVmLy5MkxrnF8OFW7sv8OXUNDA/Ly8nDeeechKSkJAGCxWPDpp58CYH8dqtO1K/vr0Hk8HlRUVKCnpweKoiAzMxOPP/44SkpKACRmf2XAIiIiIoowThESERERRRgDFhEREVGEMWARERERRRgDFhEREVGEMWARERERRRgDFhEREVGEMWARERERRRgDFhHRMDz88MNwOp2xrgYRjTIMWEREREQRxoBFREREFGEMWEQUd2pqalBeXg6HwwGXy4Wf/OQnaGxsBADU19dDCIHKykosX74cLpcLaWlpuPfee+H3+8Ous2vXLsyfPx9OpxPJycm4/vrrjWdP9tN1HU8++SQuuOACWCwWjB07FosWLUJ7e3tYuR07duCyyy6D3W7HhRdeiPfeey+6jUBEoxoDFhHFlZqaGlx++eVwuVxYt24dnnvuOWzfvh3XXXddWLkHH3wQuq7jtddew/3334+nn34aK1euNI57PB6UlZXhyJEjqKysxPPPP49vvvkGZWVlaGpqMsr9/Oc/xwMPPIBrr70Wb731Fp599lkkJSWhs7PTKOPz+XDLLbdg6dKlqKqqQkZGBioqKtDS0hL9BiGi0UkSEcWR2bNny1mzZkld1419u3btkkIIWV1dLb/99lsJQJaVlYWdt3LlSmm322Vra6uUUsoVK1ZIu90uGxsbjTL19fXSZDLJVatWSSml3LNnjxRCyEceeeSU9Vm1apUEIKurq419dXV1EoD8/e9/H4m3TERxiCNYRBQ3uru78dFHH2HRokUIBALw+/3w+/0oKipCdnY2tm/fbpRduHBh2Lk33HADuru7sXPnTgDAtm3bUF5ejszMTKPM+PHjMWvWLGzbtg0A8MEHH0BKieXLl5+2Xoqi4MorrzS2CwoKYDab0dDQMOz3TETxiQGLiOJGW1sbAoEAVqxYAZPJFPY6dOgQPB6PUTYrKyvs3P7t7777zrjW2LFjT/gZY8eORWtrKwCgpaUFmqadcK3BbDYbzGZz2D6TyQSv13v2b5KIEoIW6woQEZ2plJQUCCHw4IMPYsGCBSccz8jIMNb7P/Q+eDs7OxsAkJaWhiNHjpxwjcOHDyMtLQ0AkJ6eDr/fj8bGxu8NWUREA3EEi4jihsPhwMyZM7F7925Mnz79hJfb7TbKVlVVhZ27YcMG2O12XHTRRQCAyy67DO+//37YB9E9Hg8+/vhjlJWVAQDKy8shhMDatWuj/+aIKKFwBIuI4sqvfvUrlJeX46abbsLixYuRmpqKhoYGbNy4EcuWLTNC1r59+7Bs2TIsXrwYtbW1eOyxx3DPPfcgNTUVALBixQqsXbsW8+bNw0MPPYRAIIBVq1YhLS0Nd911FwCgsLAQd9xxB1auXInW1lbMnTsX3d3dqK6uxsMPP4xx48bFqhmIaJRjwCKiuDJr1ix8+OGHWLVqFZYtW4a+vj7k5uZi7ty5KCgoMO51tXr1amzevBmLFi2Cqqq48847sXr1auM6eXl52Lp1K+677z789Kc/haIouOKKK/DEE0+EffD9mWeewYQJE/C73/0Ov/71r5Geno45c+YgKSlpxN87EcUPIaWUsa4EEVGk1NfXY8KECXj99ddx4403xro6RHSO4mewiIiIiCKMAYuIiIgowjhFSERERBRhHMEiIiIiijAGLCIiIqII+384brRYFfNaLwAAAABJRU5ErkJggg==" - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "iterated_clf.controls[2] = Patience(4)\n", - "fit!(mach, rows=1:500)\n", - "\n", - "plot(epochs, losses,\n", - " xlab = \"epoch\",\n", - " ylab = \"root squared error\",\n", - " label=\"out-of-sample\")\n", - "plot!(epochs, training_losses, label=\"training\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "*This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 1.6.0", - "language": "julia", - "name": "julia-1.6" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "1.6.0" - } - }, - "nbformat": 4, - "nbformat_minor": 3 -} diff --git a/examples/mnist/mnist.md b/examples/mnist/mnist.md deleted file mode 100644 index 36f7cd29..00000000 --- a/examples/mnist/mnist.md +++ /dev/null @@ -1,339 +0,0 @@ -```@meta -EditURL = "/../MLJFlux/examples/mnist/mnist.jl" -``` - -# Using MLJ to classifiy the MNIST image dataset - -```@example mnist -using Pkg -const DIR = @__DIR__ -Pkg.activate(DIR) -Pkg.instantiate() -``` - -**Julia version** is assumed to be ^1.6 - -```@example mnist -using MLJ -using Flux -import MLJFlux -import MLJIteration # for `skip` - -MLJ.color_off() - -using Plots -pyplot(size=(600, 300*(sqrt(5)-1))); -nothing #hide -``` - -## Basic training - -Downloading the MNIST image dataset: - -```@example mnist -import MLDatasets: MNIST - -ENV["DATADEPS_ALWAYS_ACCEPT"] = true -images, labels = MNIST.traindata(); -nothing #hide -``` - -In MLJ, integers cannot be used for encoding categorical data, so we -must force the labels to have the `Multiclass` [scientific -type](https://juliaai.github.io/ScientificTypes.jl/dev/). For -more on this, see [Working with Categorical -Data](https://alan-turing-institute.github.io/MLJ.jl/dev/working_with_categorical_data/). - -```@example mnist -labels = coerce(labels, Multiclass); -images = coerce(images, GrayImage); -nothing #hide -``` - -Checking scientific types: - -```@example mnist -@assert scitype(images) <: AbstractVector{<:Image} -@assert scitype(labels) <: AbstractVector{<:Finite} -``` - -Looks good. - -For general instructions on coercing image data, see [Type coercion -for image -data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1) - -```@example mnist -images[1] -``` - -We start by defining a suitable `Builder` object. This is a recipe -for building the neural network. Our builder will work for images of -any (constant) size, whether they be color or black and white (ie, -single or multi-channel). The architecture always consists of six -alternating convolution and max-pool layers, and a final dense -layer; the filter size and the number of channels after each -convolution layer is customisable. - -```@example mnist -import MLJFlux -struct MyConvBuilder - filter_size::Int - channels1::Int - channels2::Int - channels3::Int -end - -make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) - -function MLJFlux.build(b::MyConvBuilder, rng, n_in, n_out, n_channels) - k, c1, c2, c3 = b.filter_size, b.channels1, b.channels2, b.channels3 - mod(k, 2) == 1 || error("`filter_size` must be odd. ") - p = div(k - 1, 2) # padding to preserve image size - init = Flux.glorot_uniform(rng) - front = Chain( - Conv((k, k), n_channels => c1, pad=(p, p), relu, init=init), - MaxPool((2, 2)), - Conv((k, k), c1 => c2, pad=(p, p), relu, init=init), - MaxPool((2, 2)), - Conv((k, k), c2 => c3, pad=(p, p), relu, init=init), - MaxPool((2 ,2)), - make2d) - d = Flux.outputsize(front, (n_in..., n_channels, 1)) |> first - return Chain(front, Dense(d, n_out, init=init)) -end -``` - -**Note.** There is no final `softmax` here, as this is applied by -default in all MLJFLux classifiers. Customisation of this behaviour -is controlled using using the `finaliser` hyperparameter of the -classifier. - -We now define the MLJ model. If you have a GPU, substitute -`acceleration=CUDALibs()` below: - -```@example mnist -ImageClassifier = @load ImageClassifier -clf = ImageClassifier(builder=MyConvBuilder(3, 16, 32, 32), - batch_size=50, - epochs=10, - rng=123) -``` - -You can add Flux options `optimiser=...` and `loss=...` here. At -present, `loss` must be a Flux-compatible loss, not an MLJ -measure. To run on a GPU, set `acceleration=CUDALib()`. - -Binding the model with data in an MLJ machine: - -```@example mnist -mach = machine(clf, images, labels); -nothing #hide -``` - -Training for 10 epochs on the first 500 images: - -```@example mnist -fit!(mach, rows=1:500, verbosity=2); -nothing #hide -``` - -Inspecting: - -```@example mnist -report(mach) -``` - -```@example mnist -chain = fitted_params(mach) -``` - -```@example mnist -Flux.params(chain)[2] -``` - -Adding 20 more epochs: - -```@example mnist -clf.epochs = clf.epochs + 20 -fit!(mach, rows=1:500); -nothing #hide -``` - -Computing an out-of-sample estimate of the loss: - -```@example mnist -predicted_labels = predict(mach, rows=501:1000); -cross_entropy(predicted_labels, labels[501:1000]) |> mean -``` - -Or, in one line: - -```@example mnist -evaluate!(mach, - resampling=Holdout(fraction_train=0.5), - measure=cross_entropy, - rows=1:1000, - verbosity=0) -``` - -## Wrapping the MLJFlux model with iteration controls - -Any iterative MLJFlux model can be wrapped in *iteration controls*, -as we demonstrate next. For more on MLJ's `IteratedModel` wrapper, -see the [MLJ -documentation](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/). - -The "self-iterating" classifier, called `iterated_clf` below, is for -iterating the image classifier defined above until one of the -following stopping criterion apply: - -- `Patience(3)`: 3 consecutive increases in the loss -- `InvalidValue()`: an out-of-sample loss, or a training loss, is `NaN`, `Inf`, or `-Inf` -- `TimeLimit(t=5/60)`: training time has exceeded 5 minutes - -These checks (and other controls) will be applied every two epochs -(because of the `Step(2)` control). Additionally, training a -machine bound to `iterated_clf` will: - -- save a snapshot of the machine every three control cycles (every six epochs) -- record traces of the out-of-sample loss and training losses for plotting -- record mean value traces of each Flux parameter for plotting - -For a complete list of controls, see [this -table](https://alan-turing-institute.github.io/MLJ.jl/dev/controlling_iterative_models/#Controls-provided). - -### Wrapping the classifier - -Some helpers - -```@example mnist -make2d(x::AbstractArray) = reshape(x, :, size(x)[end]) -make1d(x::AbstractArray) = reshape(x, length(x)); -nothing #hide -``` - -To extract Flux params from an MLJFlux machine - -```@example mnist -parameters(mach) = make1d.(Flux.params(fitted_params(mach))); -nothing #hide -``` - -To store the traces: - -```@example mnist -losses = [] -training_losses = [] -parameter_means = Float32[]; -epochs = [] -``` - -To update the traces: - -```@example mnist -update_loss(loss) = push!(losses, loss) -update_training_loss(losses) = push!(training_losses, losses[end]) -update_means(mach) = append!(parameter_means, mean.(parameters(mach))); -update_epochs(epoch) = push!(epochs, epoch) -``` - -The controls to apply: - -```@example mnist -save_control = - MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) - -controls=[Step(2), - Patience(3), - InvalidValue(), - TimeLimit(5/60), - save_control, - WithLossDo(), - WithLossDo(update_loss), - WithTrainingLossesDo(update_training_loss), - Callback(update_means), - WithIterationsDo(update_epochs) -]; -nothing #hide -``` - -The "self-iterating" classifier: - -```@example mnist -iterated_clf = IteratedModel(model=clf, - controls=controls, - resampling=Holdout(fraction_train=0.7), - measure=log_loss) -``` - -### Binding the wrapped model to data: - -```@example mnist -mach = machine(iterated_clf, images, labels); -nothing #hide -``` - -### Training - -```@example mnist -fit!(mach, rows=1:500); -nothing #hide -``` - -### Comparison of the training and out-of-sample losses: - -```@example mnist -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") - -savefig(joinpath(DIR, "loss.png")) -``` - -### Evolution of weights - -```@example mnist -n_epochs = length(losses) -n_parameters = div(length(parameter_means), n_epochs) -parameter_means2 = reshape(copy(parameter_means), n_parameters, n_epochs)' -plot(epochs, parameter_means2, - title="Flux parameter mean weights", - xlab = "epoch") -``` - -**Note.** The the higher the number, the deeper the chain parameter. - -```@example mnist -savefig(joinpath(DIR, "weights.png")) -``` - -### Retrieving a snapshot for a prediction: - -```@example mnist -mach2 = machine(joinpath(DIR, "mnist3.jlso")) -predict_mode(mach2, images[501:503]) -``` - -### Restarting training - -Mutating `iterated_clf.controls` or `clf.epochs` (which is otherwise -ignored) will allow you to restart training from where it left off. - -```@example mnist -iterated_clf.controls[2] = Patience(4) -fit!(mach, rows=1:500) - -plot(epochs, losses, - xlab = "epoch", - ylab = "root squared error", - label="out-of-sample") -plot!(epochs, training_losses, label="training") -``` - ---- - -*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* - diff --git a/examples/mnist/notebook.ipynb b/examples/mnist/notebook.ipynb index 00cf8e74..19efa715 100644 --- a/examples/mnist/notebook.ipynb +++ b/examples/mnist/notebook.ipynb @@ -38,16 +38,7 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "┌ Info: Precompiling MLJ [add582a8-e3ab-11e8-2d5e-e98b27df1bc7]\n", - "└ @ Base loading.jl:1423\n" - ] - } - ], + "outputs": [], "source": [ "using MLJ\n", "using Flux\n", @@ -81,8 +72,6 @@ "name": "stderr", "output_type": "stream", "text": [ - "┌ Info: Precompiling MLDatasets [eb30cadb-4394-5ae3-aed4-317e484a6458]\n", - "└ @ Base loading.jl:1423\n", "┌ Warning: MNIST.traindata() is deprecated, use `MNIST(split=:train)[:]` instead.\n", "└ @ MLDatasets /Users/anthony/.julia/packages/MLDatasets/eZ0Va/src/datasets/vision/mnist.jl:187\n" ] @@ -146,7 +135,7 @@ "source": [ "For general instructions on coercing image data, see [Type coercion\n", "for image\n", - "data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1)" + "data](https://juliaai.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data)" ] }, { @@ -1275,7 +1264,7 @@ "text": [ "┌ Info: Updating machine(ImageClassifier(builder = MyConvBuilder(3, 16, 32, 32), …), …).\n", "└ @ MLJBase /Users/anthony/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:499\n", - "\u001b[33mOptimising neural net: 100%[=========================] Time: 0:00:08\u001b[39m\n" + "\u001b[33mOptimising neural net: 100%[=========================] Time: 0:00:07\u001b[39m\n" ] } ], @@ -1528,7 +1517,7 @@ "outputs": [], "source": [ "save_control =\n", - " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jlso\")), predicate=3)\n", + " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jls\")), predicate=3)\n", "\n", "controls=[Step(2),\n", " Patience(3),\n", @@ -1571,7 +1560,7 @@ " rng = 123, \n", " optimiser_changes_trigger_retraining = false, \n", " acceleration = CPU1{Nothing}(nothing)), \n", - " controls = Any[Step(2), Patience(3), InvalidValue(), TimeLimit(Dates.Millisecond(300000)), IterationControl.Skip{Save{typeof(Serialization.serialize)}, IterationControl.var\"#8#9\"{Int64}}(Save{typeof(Serialization.serialize)}(\"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist.jlso\", Serialization.serialize), IterationControl.var\"#8#9\"{Int64}(3)), WithLossDo{IterationControl.var\"#20#22\"}(IterationControl.var\"#20#22\"(), false, nothing), WithLossDo{typeof(update_loss)}(update_loss, false, nothing), WithTrainingLossesDo{typeof(update_training_loss)}(update_training_loss, false, nothing), Callback{typeof(update_means)}(update_means, false, nothing, false), WithIterationsDo{typeof(update_epochs)}(update_epochs, false, nothing)], \n", + " controls = Any[Step(2), Patience(3), InvalidValue(), TimeLimit(Dates.Millisecond(300000)), IterationControl.Skip{Save{typeof(Serialization.serialize)}, IterationControl.var\"#8#9\"{Int64}}(Save{typeof(Serialization.serialize)}(\"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist.jls\", Serialization.serialize), IterationControl.var\"#8#9\"{Int64}(3)), WithLossDo{IterationControl.var\"#20#22\"}(IterationControl.var\"#20#22\"(), false, nothing), WithLossDo{typeof(update_loss)}(update_loss, false, nothing), WithTrainingLossesDo{typeof(update_training_loss)}(update_training_loss, false, nothing), Callback{typeof(update_means)}(update_means, false, nothing, false), WithIterationsDo{typeof(update_epochs)}(update_epochs, false, nothing)], \n", " resampling = Holdout(\n", " fraction_train = 0.7, \n", " shuffle = false, \n", @@ -1639,7 +1628,7 @@ "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", "┌ Info: loss: 1.968148\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", - "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jls\". \n", "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", "┌ Info: loss: 1.2209107\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", @@ -1647,7 +1636,7 @@ "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", "┌ Info: loss: 0.46833506\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", - "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist2.jlso\". \n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist2.jls\". \n", "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", "┌ Info: loss: 0.42414027\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", @@ -1655,7 +1644,7 @@ "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", "┌ Info: loss: 0.40475494\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", - "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist3.jlso\". \n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist3.jls\". \n", "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", "┌ Info: loss: 0.40977737\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", @@ -1718,155 +1707,155 @@ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n" + "\n" ] }, "execution_count": 26, @@ -1912,17 +1901,62 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "3-element CategoricalArrays.CategoricalArray{Int64,1,UInt32}:\n", - " 7\n", - " 9\n", - " 5" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" + "ename": "LoadError", + "evalue": "UndefVarError: ##364 not defined", + "output_type": "error", + "traceback": [ + "UndefVarError: ##364 not defined", + "", + "Stacktrace:", + " [1] deserialize_module(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:984", + " [2] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:883", + " [3] deserialize(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801", + " [4] deserialize_datatype(s::Serialization.Serializer{IOStream}, full::Bool)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:1331", + " [5] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:854", + " [6] deserialize(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801", + " [7] deserialize_datatype(s::Serialization.Serializer{IOStream}, full::Bool)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:1356", + " [8] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:854", + " [9] deserialize(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801", + " [10] deserialize_datatype(s::Serialization.Serializer{IOStream}, full::Bool)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:1356", + " [11] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:854", + " [12] deserialize(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801", + " [13] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:869", + " [14] deserialize(s::Serialization.Serializer{IOStream})", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801", + " [15] handle_deserialize(s::Serialization.Serializer{IOStream}, b::Int32)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:907", + " [16] deserialize", + " @ /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:801 [inlined]", + " [17] deserialize(s::IOStream)", + " @ Serialization /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:788", + " [18] open(f::typeof(Serialization.deserialize), args::String; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})", + " @ Base ./io.jl:330", + " [19] open", + " @ ./io.jl:328 [inlined]", + " [20] deserialize", + " @ /Applications/Julia-1.7.app/Contents/Resources/julia/share/julia/stdlib/v1.7/Serialization/src/Serialization.jl:798 [inlined]", + " [21] machine(file::String)", + " @ MLJBase ~/.julia/packages/MLJBase/Fl6Zc/src/machines.jl:406", + " [22] top-level scope", + " @ In[28]:1", + " [23] eval", + " @ ./boot.jl:373 [inlined]", + " [24] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)", + " @ Base ./loading.jl:1196" + ] } ], "source": [ @@ -1960,7 +1994,7 @@ "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", "┌ Info: loss: 0.4575673\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", - "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jlso\". \n", + "┌ Info: Saving \"/Users/anthony/GoogleDrive/Julia/MLJ/MLJFlux/examples/mnist/mnist1.jls\". \n", "└ @ MLJIteration /Users/anthony/.julia/packages/MLJIteration/J0pbp/src/controls.jl:203\n", "┌ Info: loss: 0.46934554\n", "└ @ IterationControl /Users/anthony/.julia/packages/IterationControl/wJWPx/src/controls.jl:278\n", @@ -1984,119 +2018,119 @@ "\n", "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", "\n", - " \n", + " \n", " \n", " \n", "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n" + "\n" ] }, "execution_count": 29, diff --git a/examples/mnist/notebook.jl b/examples/mnist/notebook.jl index 5c7f9311..b9a99c24 100644 --- a/examples/mnist/notebook.jl +++ b/examples/mnist/notebook.jl @@ -43,7 +43,7 @@ images = coerce(images, GrayImage); # For general instructions on coercing image data, see [Type coercion # for image -# data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1) +# data](https://juliaai.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data) images[1] @@ -194,7 +194,7 @@ update_epochs(epoch) = push!(epochs, epoch) # The controls to apply: save_control = - MLJIteration.skip(Save(joinpath(DIR, "mnist.jlso")), predicate=3) + MLJIteration.skip(Save(joinpath(DIR, "mnist.jls")), predicate=3) controls=[Step(2), Patience(3), diff --git a/examples/mnist/notebook.unexecuted.ipynb b/examples/mnist/notebook.unexecuted.ipynb index e78eb059..3f4a5883 100644 --- a/examples/mnist/notebook.unexecuted.ipynb +++ b/examples/mnist/notebook.unexecuted.ipynb @@ -117,7 +117,7 @@ "source": [ "For general instructions on coercing image data, see [Type coercion\n", "for image\n", - "data](https://alan-turing-institute.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data-1)" + "data](https://juliaai.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data)" ], "metadata": {} }, @@ -488,7 +488,7 @@ "cell_type": "code", "source": [ "save_control =\n", - " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jlso\")), predicate=3)\n", + " MLJIteration.skip(Save(joinpath(DIR, \"mnist.jls\")), predicate=3)\n", "\n", "controls=[Step(2),\n", " Patience(3),\n", From 18df1b77b0b498b0c9e964874afdddea50b2efc4 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 16:07:21 +1200 Subject: [PATCH 23/24] bump 0.2.8 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6ce654f4..04ca2dcf 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MLJFlux" uuid = "094fc8d1-fd35-5302-93ea-dabda2abf845" authors = ["Anthony D. Blaom ", "Ayush Shridhar "] -version = "0.2.7" +version = "0.2.8" [deps] CategoricalArrays = "324d7699-5711-5eae-9e2f-1d82baa6b597" From 61c3801bf04dba8b8a259c2d3d97e8008731240c Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Mon, 22 Aug 2022 16:14:06 +1200 Subject: [PATCH 24/24] trivial commit to trigger CI --- src/MLJFlux.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/MLJFlux.jl b/src/MLJFlux.jl index f46f88ef..cb54f686 100644 --- a/src/MLJFlux.jl +++ b/src/MLJFlux.jl @@ -1,4 +1,4 @@ -module MLJFlux +module MLJFlux export CUDALibs, CPU1