diff --git a/Project.toml b/Project.toml index 072b398..cf97679 100644 --- a/Project.toml +++ b/Project.toml @@ -3,12 +3,16 @@ uuid = "02ac4b2c-022a-44aa-84a5-ea45a5754bcc" version = "0.2.2" [deps] +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" ReachabilityBase = "379f33d0-9447-4353-bd03-d664070e549f" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Requires = "ae029012-a4dd-5104-9daa-d747884805df" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] +LinearAlgebra = "<0.0.1, 1.6" ReachabilityBase = "0.1.1 - 0.2" Reexport = "0.2, 1" Requires = "0.5, 1" +Statistics = "<0.0.1, 1.6" julia = "1.6" diff --git a/docs/src/lib/Architecture.md b/docs/src/lib/Architecture.md index 7fe8c73..f37646e 100644 --- a/docs/src/lib/Architecture.md +++ b/docs/src/lib/Architecture.md @@ -53,10 +53,20 @@ dim_out(::AbstractLayerOp) ControllerFormats.Architecture.dim(::AbstractLayerOp) ``` +#### More specific layer interfaces + +```@docs +AbstractPoolingLayerOp +``` + #### Implementation ```@docs DenseLayerOp +ConvolutionalLayerOp +FlattenLayerOp +MaxPoolingLayerOp +MeanPoolingLayerOp ``` ### Activation functions diff --git a/src/Architecture/Architecture.jl b/src/Architecture/Architecture.jl index 7027862..5cf69d1 100644 --- a/src/Architecture/Architecture.jl +++ b/src/Architecture/Architecture.jl @@ -6,15 +6,22 @@ Module containing data structures to represent controllers. module Architecture using Requires +using LinearAlgebra: dot +using Statistics: mean +import Base: size export AbstractNeuralNetwork, FeedforwardNetwork, - AbstractLayerOp, DenseLayerOp, + AbstractLayerOp, DenseLayerOp, ConvolutionalLayerOp, FlattenLayerOp, + AbstractPoolingLayerOp, MaxPoolingLayerOp, MeanPoolingLayerOp, layers, dim_in, dim_out, ActivationFunction, Id, ReLU, Sigmoid, Tanh, LeakyReLU include("ActivationFunction.jl") include("LayerOps/AbstractLayerOp.jl") include("LayerOps/DenseLayerOp.jl") +include("LayerOps/ConvolutionalLayerOp.jl") +include("LayerOps/FlattenLayerOp.jl") +include("LayerOps/PoolingLayerOp.jl") include("NeuralNetworks/AbstractNeuralNetwork.jl") include("NeuralNetworks/FeedforwardNetwork.jl") diff --git a/src/Architecture/LayerOps/ConvolutionalLayerOp.jl b/src/Architecture/LayerOps/ConvolutionalLayerOp.jl new file mode 100644 index 0000000..fa2cb46 --- /dev/null +++ b/src/Architecture/LayerOps/ConvolutionalLayerOp.jl @@ -0,0 +1,154 @@ +""" + ConvolutionalLayerOp{F, M, B} <: AbstractLayerOp + +A convolutional layer operation is a series of filters, each of which computes a +small affine map followed by an activation function. + +### Fields + +- `weights` -- vector with one weight matrix for each filter +- `bias` -- vector with one bias value for each filter +- `activation` -- activation function + +### Notes + +Conversion from a `Flux.Conv` is supported. +""" +struct ConvolutionalLayerOp{F,W,B} <: AbstractLayerOp + weights::W + bias::B + activation::F + + function ConvolutionalLayerOp(weights::W, bias::B, activation::F; + validate=Val(true)) where {F,W,B} + if validate isa Val{true} && !_isconsistent_ConvolutionalLayerOp(weights, bias) + throw(ArgumentError("inconsistent filter dimensions: weights " * + "($(length(weights))) and biases ($(length(bias)))")) + end + + return new{F,W,B}(weights, bias, activation) + end +end + +function _isconsistent_ConvolutionalLayerOp(weights, bias) + if length(weights) != length(bias) + return false + elseif length(bias) == 0 + return false + end + @inbounds begin + s = size(first(weights)) + if length(s) != 3 || s[1] == 0 || s[2] == 0 || s[3] == 0 + return false + end + for e in weights + if size(e) != s + return false + end + end + end + return true +end + +n_filters(L::ConvolutionalLayerOp) = length(L.bias) + +kernel(L::ConvolutionalLayerOp) = @inbounds size(first(L.weights)) + +# application to a tensor +function (L::ConvolutionalLayerOp)(T) + s = size(T) + if length(s) != 3 + throw(ArgumentError("a convolutional layer requires at least two dimensions, but got $s")) + end + p, q, r = kernel(L) + @inbounds begin + if p > s[1] || q > s[2] || r != s[3] + throw(ArgumentError("convolution with kernel size $(kernel(L)) " * + "does not apply to a tensor of dimension $s")) + end + d1 = s[1] - p + 1 + d2 = s[2] - q + 1 + end + t = n_filters(L) + s = (d1, d2, t) + O = similar(T, s) + @inbounds for f in 1:t + W = L.weights[f] + b = L.bias[f] + for k in 1:r + for j in 1:d2 + for i in 1:d1 + T′ = view(T, i:(i + p - 1), j:(j + q - 1), k) + O[i, j, f] = L.activation(dot(W, T′) + b) + end + end + end + end + return O +end + +function Base.:(==)(L1::ConvolutionalLayerOp, L2::ConvolutionalLayerOp) + return L1.weights == L2.weights && + L1.bias == L2.bias && + L1.activation == L2.activation +end + +function Base.:isapprox(L1::ConvolutionalLayerOp, L2::ConvolutionalLayerOp; atol::Real=0, + rtol=nothing) + if isnothing(rtol) + if iszero(atol) + N = @inbounds promote_type(eltype(first(L1.weights)), eltype(first(L2.weights)), + eltype(L1.bias), eltype(L2.bias)) + rtol = Base.rtoldefault(N) + else + rtol = zero(atol) + end + end + return isapprox(L1.weights, L2.weights; atol=atol, rtol=rtol) && + isapprox(L1.bias, L2.bias; atol=atol, rtol=rtol) && + L1.activation == L2.activation +end + +function Base.show(io::IO, L::ConvolutionalLayerOp) + str = "$(string(ConvolutionalLayerOp)) of $(n_filters(L)) filters with " * + "kernel size $(kernel(L)) and $(typeof(L.activation)) activation" + return print(io, str) +end + +size(::ConvolutionalLayerOp) = (3, 3) + +function load_Flux_convert_Conv_layer() + return quote + function Base.convert(::Type{ConvolutionalLayerOp}, layer::Flux.Conv) + if !all(isone, layer.stride) + throw(ArgumentError("stride $(layer.stride) != 1 is not supported")) # COV_EXCL_LINE + end + if !all(iszero, layer.pad) + throw(ArgumentError("pad $(layer.pad) != 0 is not supported")) # COV_EXCL_LINE + end + if !all(isone, layer.dilation) + throw(ArgumentError("dilation $(layer.dilation) != 1 is not supported")) # COV_EXCL_LINE + end + if !all(isone, layer.groups) + throw(ArgumentError("groups $(layer.groups) != 1 is not supported")) # COV_EXCL_LINE + end + act = get(activations_Flux, layer.σ, nothing) + if isnothing(act) + throw(ArgumentError("unsupported activation function $(layer.σ)")) # COV_EXCL_LINE + end + # Flux stores a 4D matrix instead of a vector of 3D matrices + weights = @inbounds [layer.weight[:, :, :, i] for i in 1:size(layer.weight, 4)] + return ConvolutionalLayerOp(weights, layer.bias, act) + end + + function Base.convert(::Type{Flux.Conv}, layer::ConvolutionalLayerOp) + act = get(activations_Flux, layer.activation, nothing) + if isnothing(act) + throw(ArgumentError("unsupported activation function $(layer.activation)")) # COV_EXCL_LINE + end + # Flux stores a 4D matrix instead of a vector of 3D matrices + weights = cat(layer.weights...; dims=4) + return Flux.Conv(weights, layer.bias, act) + end + end +end diff --git a/src/Architecture/LayerOps/DenseLayerOp.jl b/src/Architecture/LayerOps/DenseLayerOp.jl index cd2a535..a492883 100644 --- a/src/Architecture/LayerOps/DenseLayerOp.jl +++ b/src/Architecture/LayerOps/DenseLayerOp.jl @@ -73,6 +73,8 @@ dim_in(L::DenseLayerOp) = size(L.weights, 2) dim_out(L::DenseLayerOp) = length(L.bias) +size(::DenseLayerOp) = (1, 1) + function load_Flux_convert_Dense_layer() return quote function Base.convert(::Type{DenseLayerOp}, layer::Flux.Dense) diff --git a/src/Architecture/LayerOps/FlattenLayerOp.jl b/src/Architecture/LayerOps/FlattenLayerOp.jl new file mode 100644 index 0000000..8f4910c --- /dev/null +++ b/src/Architecture/LayerOps/FlattenLayerOp.jl @@ -0,0 +1,46 @@ +""" + FlattenLayerOp <: AbstractLayerOp + +A flattening layer operation converts a multidimensional tensor into a vector. + +### Notes + +The implementation uses row-major ordering for convenience with the +machine-learning literature. + +```@jldoctest +julia> T = reshape([1, 3, 2, 4, 5, 7, 6, 8], (2, 2, 2)) +2×2×2 Array{Int64, 3}: +[:, :, 1] = + 1 2 + 3 4 + +[:, :, 2] = + 5 6 + 7 8 + +julia> FlattenLayerOp()(T) +8-element Vector{Int64}: + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +``` +""" +struct FlattenLayerOp <: AbstractLayerOp +end + +# application to a vector (swap to row-major convention) +function (L::FlattenLayerOp)(T) + s = size(T) + if length(s) == 1 + return vec(T) + end + return vec(permutedims(T, (2, 1, 3:length(s)...))) +end + +size(::FlattenLayerOp) = (nothing, 1) diff --git a/src/Architecture/LayerOps/PoolingLayerOp.jl b/src/Architecture/LayerOps/PoolingLayerOp.jl new file mode 100644 index 0000000..343e195 --- /dev/null +++ b/src/Architecture/LayerOps/PoolingLayerOp.jl @@ -0,0 +1,94 @@ +""" + AbstractPoolingLayerOp <: AbstractLayerOp + +Abstract type for pooling layer operations. + +### Notes + +Pooling is an operation on a three-dimensional tensor that iterates over the +first two dimensions in a window and aggregates the values, thus reducing the +output dimension. + +### Implementation + +The following (unexported) functions should be implemented: + +- `window(::AbstractPoolingLayerOp)` -- return the pair ``(p, q)`` representing the window size +- `aggregation(::AbstractPoolingLayerOp)` -- return the aggregation function (applied to a tensor) +""" +abstract type AbstractPoolingLayerOp <: AbstractLayerOp end + +for (type_name, normal_name, agg_function, agg_name) in + ((:MaxPoolingLayerOp, "max", maximum, "maximum"), + (:MeanPoolingLayerOp, "mean", mean, "Statistics.mean")) + @eval begin + @doc """ + $($type_name) <: AbstractPoolingLayerOp + + A $($normal_name)-pooling layer operation. The aggregation function is + `$($agg_name)`. + + ### Fields + + - `p` -- horizontal window size + - `q` -- vertical window size + """ + struct $type_name <: AbstractPoolingLayerOp + p::Int + q::Int + + function $type_name(p::Int, q::Int; validate=Val(true)) + if validate isa Val{true} && (p <= 0 || q <= 0) + throw(ArgumentError("inconsistent window size ($p, $q)")) + end + return new(p, q) + end + end + + window(L::$type_name) = (L.p, L.q) + + aggregation(::$type_name) = $agg_function + + function Base.:(==)(L1::$type_name, L2::$type_name) + return window(L1) == window(L2) + end + + function Base.show(io::IO, L::$type_name) + str = "$(string($type_name)) for $($normal_name)-pooling of window " * + "size $(window(L))" + return print(io, str) + end + end +end + +# application to a tensor +function (L::AbstractPoolingLayerOp)(T) + s = size(T) + if length(s) != 3 + throw(ArgumentError("a pooling layer requires a three-dimensional input, but got $s")) + end + p, q = window(L) + @inbounds begin + if mod(s[1], p) != 0 || mod(s[2], q) != 0 + throw(ArgumentError("pooling with window size ($p, $q) does " * + "not apply to a tensor of dimension $s")) + end + d1 = div(s[1], p) + d2 = div(s[2], q) + d3 = s[3] + end + s = (d1, d2, d3) + O = similar(T, s) + aggregate = aggregation(L) + @inbounds for k in 1:d3 + for j in 1:d2 + for i in 1:d1 + cluster = view(T, ((i - 1) * p + 1):(i * p), ((j - 1) * q + 1):(j * q), k) + O[i, j, k] = aggregate(cluster) + end + end + end + return O +end + +size(::AbstractPoolingLayerOp) = (3, 3) diff --git a/src/Architecture/NeuralNetworks/FeedforwardNetwork.jl b/src/Architecture/NeuralNetworks/FeedforwardNetwork.jl index a9f95ba..3f61ab7 100644 --- a/src/Architecture/NeuralNetworks/FeedforwardNetwork.jl +++ b/src/Architecture/NeuralNetworks/FeedforwardNetwork.jl @@ -32,7 +32,9 @@ end function _first_inconsistent_layer(L) prev = nothing for (i, l) in enumerate(L) - if !isnothing(prev) && dim_in(l) != dim_out(prev) + if !isnothing(prev) && + ((!isnothing(dim_in(l)) && !isnothing(dim_out(prev)) && dim_in(l) != dim_out(prev)) || + !_iscompatible(size(prev), size(l))) return i end prev = l @@ -40,6 +42,10 @@ function _first_inconsistent_layer(L) return 0 end +_iscompatible(t1::Tuple, t2::Tuple) = _iscompatible(t1[2], t2[1]) +_iscompatible(i::Int, j::Int) = i == j +_iscompatible(i, ::Nothing) = true + layers(N::FeedforwardNetwork) = N.layers function load_Flux_convert_network() diff --git a/src/Architecture/init.jl b/src/Architecture/init.jl index a62c2b7..c013585 100644 --- a/src/Architecture/init.jl +++ b/src/Architecture/init.jl @@ -3,6 +3,7 @@ function __init__() @require Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" begin eval(load_Flux_activations()) eval(load_Flux_convert_Dense_layer()) + eval(load_Flux_convert_Conv_layer()) eval(load_Flux_convert_network()) end end diff --git a/test/Architecture/ConvolutionalLayerOp.jl b/test/Architecture/ConvolutionalLayerOp.jl new file mode 100644 index 0000000..48e9c90 --- /dev/null +++ b/test/Architecture/ConvolutionalLayerOp.jl @@ -0,0 +1,99 @@ +using ControllerFormats.Architecture: kernel, n_filters +using ReachabilityBase.Subtypes: subtypes + +# 4x4x1 input tensor +T441 = reshape([0 4 2 1; -1 0 1 -2; 3 1 2 0; 0 1 4 1], (4, 4, 1)) +O_Id = reshape([2 7 -2; -1 4 0; 6 9 1], (3, 3, 1)) +# 2x2x3 input tensor +T223 = reshape(1:12, (2, 2, 3)) + +W1 = reshape([1 0; -1 2], (2, 2, 1)) +b1 = 1 +W2 = W1 +b2 = 2 +# 2x2 kernel and 1 filter +Ws = [W1] +bs = [b1] + +# invalid weight/bias combination +@test_throws ArgumentError ConvolutionalLayerOp(Ws, [1, 0], Id()) +@test_throws ArgumentError ConvolutionalLayerOp([], [], Id()) +@test_throws ArgumentError ConvolutionalLayerOp([W1, hcat(1)], [1, 0], Id()) +@test_throws ArgumentError ConvolutionalLayerOp([[1 0; -1 2]], [1], Id()) + +# one filter +L = ConvolutionalLayerOp(Ws, bs, ReLU()) +# two filters +L2 = ConvolutionalLayerOp([W1, W2], [b1, b2], ReLU()) + +# printing +io = IOBuffer() +println(io, L) + +# output for tensors +@test L(T441) == reshape([2 7 0; 0 4 0; 6 9 1], (3, 3, 1)) +@test L2(T441) == cat([2 7 0; 0 4 0; 6 9 1], [3 8 0; 0 5 1; 7 10 2]; dims=(3)) +@test_throws ArgumentError L(T223) +@test_throws ArgumentError L(reshape(1:4.0, (2, 2))) + +# equality +@test L == ConvolutionalLayerOp(Ws, bs, ReLU()) +@test L != ConvolutionalLayerOp([W1 .+ 1], bs, ReLU()) && + L != ConvolutionalLayerOp(Ws, [b1 .+ 1], ReLU()) && + L != ConvolutionalLayerOp(Ws, bs, Id()) + +# approximate equality +@test L ≈ ConvolutionalLayerOp(Ws, bs, ReLU()) +@test L ≈ ConvolutionalLayerOp([W1 .+ 1e-10], bs, ReLU()) && + L ≈ ConvolutionalLayerOp(Ws, [b1 .+ 1e-10], ReLU()) && + !≈(L, ConvolutionalLayerOp([W1 .+ 1e-10], bs, ReLU()); rtol=1e-12) && + !≈(L, ConvolutionalLayerOp(Ws, [b1 .+ 1e-10], ReLU()); rtol=1e-12) && + ≈(L, ConvolutionalLayerOp([W1 .+ 1e-1], bs, ReLU()); atol=1) && + ≈(L, ConvolutionalLayerOp(Ws, [b1 .+ 1e-1], ReLU()); atol=1) && + !(L ≈ ConvolutionalLayerOp([W1 .+ 1], bs, ReLU())) && + !(L ≈ ConvolutionalLayerOp(Ws, [b1 .+ 1], ReLU())) && + !(L ≈ ConvolutionalLayerOp(Ws, bs, Id())) + +# size +@test size(L) == (3, 3) + +# kernel size and number of filters +@test kernel(L) == kernel(L2) == (2, 2, 1) +@test n_filters(L) == 1 && n_filters(L2) == 2 + +# test methods for all activations +function test_layer(L::ConvolutionalLayerOp{Id}) + @test L(T441) == O_Id +end + +function test_layer(L::ConvolutionalLayerOp{ReLU}) + @test L(T441) == reshape([2 7 0; 0 4 0; 6 9 1], (3, 3, 1)) +end + +function test_layer(L::ConvolutionalLayerOp{Sigmoid}) + @test L(float(T441)) ≈ Sigmoid().(O_Id) atol = 1e-3 +end + +function test_layer(L::ConvolutionalLayerOp{Tanh}) + @test L(float(T441)) ≈ Tanh().(O_Id) atol = 1e-3 +end + +function test_layer(L::ConvolutionalLayerOp{<:LeakyReLU}) + @test L(T441) == O_Id +end + +function test_layer(L::ConvolutionalLayerOp) + return error("untested activation function: ", typeof(L.activation)) +end + +# run test with all activations +for act in subtypes(ActivationFunction) + if act == TestActivation + continue + elseif act == LeakyReLU + act_inst = LeakyReLU(1) + else + act_inst = act() + end + test_layer(ConvolutionalLayerOp(Ws, bs, act_inst)) +end diff --git a/test/Architecture/DenseLayerOp.jl b/test/Architecture/DenseLayerOp.jl index 92e851f..92b9925 100644 --- a/test/Architecture/DenseLayerOp.jl +++ b/test/Architecture/DenseLayerOp.jl @@ -42,6 +42,7 @@ println(io, L) @test dim_out(L) == 3 @test dim(L) == (2, 3) @test length(L) == 3 +@test size(L) == (1, 1) # test methods for all activations function test_layer(L::DenseLayerOp{Id}) diff --git a/test/Architecture/FeedforwardNetwork.jl b/test/Architecture/FeedforwardNetwork.jl index 77828e9..357368a 100644 --- a/test/Architecture/FeedforwardNetwork.jl +++ b/test/Architecture/FeedforwardNetwork.jl @@ -48,3 +48,16 @@ println(io, N2) @test dim_in(N1) == 2 && dim_in(N2) == 2 @test dim_out(N1) == 3 && dim_out(N2) == 2 @test dim(N1) == (2, 3) && dim(N2) == (2, 2) + +# network with all layer types +L1 = ConvolutionalLayerOp([reshape([1 0; -1 2], (2, 2, 1))], [1], ReLU()) +L2 = MaxPoolingLayerOp(1, 1) +L3 = FlattenLayerOp() +W = zeros(2, 9); W[1, 1] = W[2, 2] = 1 +L4 = DenseLayerOp(W, [1.0 0], ReLU()) +N3 = FeedforwardNetwork([L1, L2, L3, L4]) +T441 = reshape([0 4 2 1; -1 0 1 -2; 3 1 2 0; 0 1 4 1], (4, 4, 1)) +@test N3(T441) == [3.0 2; 8 7] + +# incompatible dimensions +@test_throws ArgumentError FeedforwardNetwork([L1, L4]) diff --git a/test/Architecture/FlattenLayerOp.jl b/test/Architecture/FlattenLayerOp.jl new file mode 100644 index 0000000..b4a1f17 --- /dev/null +++ b/test/Architecture/FlattenLayerOp.jl @@ -0,0 +1,11 @@ +L = FlattenLayerOp() + +# output for tensor `T` +@test L([1 2; 3 4]) == [1, 2, 3, 4] +@test L([1]) == [1] + +# equality +@test L == FlattenLayerOp() + +# size +@test size(L) == (nothing, 1) diff --git a/test/Architecture/Flux.jl b/test/Architecture/Flux.jl index f4c2318..8f01d2e 100644 --- a/test/Architecture/Flux.jl +++ b/test/Architecture/Flux.jl @@ -1,5 +1,9 @@ import Flux +################ +# Dense layers # +################ + L1 = Flux.Dense(1, 2, Flux.relu) L1.weight .= 1, 2 L1.bias .= 3, 4 @@ -48,3 +52,19 @@ W = hcat([1 0.5; -0.5 0.5; -1 -0.5]) b = [1.0, 0, -2] L = DenseLayerOp(W, b, TestActivation()) @test_throws ArgumentError convert(Flux.Dense, L) + +######################## +# Convolutional layers # +######################## + +LC = Flux.Conv((2, 2), 1 => 1, Flux.relu) +LC.weight .= reshape([1 0; -1 2], (2, 2, 1, 1)) +LC.bias .= 1 + +# layer conversion +op = convert(ConvolutionalLayerOp, LC) +@test op.weights[1] == LC.weight[:, :, :] +@test op.bias == LC.bias +@test op.activation == ReLU() +L_back = convert(Flux.Conv, op) +@test compare_Flux_layer(LC, L_back) diff --git a/test/Architecture/PoolingLayerOp.jl b/test/Architecture/PoolingLayerOp.jl new file mode 100644 index 0000000..a48dffd --- /dev/null +++ b/test/Architecture/PoolingLayerOp.jl @@ -0,0 +1,44 @@ +using ControllerFormats.Architecture: window, aggregation +using Statistics: mean + +LTs = (MaxPoolingLayerOp, MeanPoolingLayerOp) +Ls = [LT(2, 3) for LT in LTs] +L1, L2 = Ls + +# printing +io = IOBuffer() +for L in Ls + println(io, L) +end + +# invalid inputs +for LT in LTs + @test_throws ArgumentError LT(0, 1) + @test_throws ArgumentError LT(1, 0) +end + +# window +@test window(L1) == (2, 3) + +# aggregation +@test aggregation(L1) == maximum +@test aggregation(L2) == mean + +# output for tensor `T` +T = reshape(1:72.0, (4, 6, 3)) +@test L1(T) == cat([10 22; 12 24], [34 46; 36 48], [58 70; 60 72]; dims=3) +@test L2(T) == cat([5.5 17.5; 7.5 19.5], [29.5 41.5; 31.5 43.5], [53.5 65.5; 55.5 67.5]; dims=3) +for L in Ls + @test_throws ArgumentError L(reshape(1:4.0, (2, 2))) + @test_throws ArgumentError L(reshape(1:8.0, (2, 2, 2))) +end + +# equality +for (L, LT) in zip(Ls, LTs) + @test L == LT(2, 3) + @test L != LT(2, 2) && L != LT(3, 3) +end +@test L1 != L2 + +# size +@test size(L1) == (3, 3) diff --git a/test/Project.toml b/test/Project.toml index 5a963fc..a349f54 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -4,6 +4,7 @@ Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" MAT = "23992714-dd62-5051-b70f-ba57cb901cac" ONNX = "d0dd6a25-fac6-55c0-abf7-829e0c774d20" ReachabilityBase = "379f33d0-9447-4353-bd03-d664070e549f" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" YAML = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" @@ -13,4 +14,5 @@ Flux = "0.13 - 0.14" MAT = "0.10" ONNX = "0.2" ReachabilityBase = "0.1.1 - 0.2" +Statistics = "<0.0.1, 1.6" YAML = "0.3 - 0.4" diff --git a/test/runtests.jl b/test/runtests.jl index 06fefff..61daa5c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -16,6 +16,15 @@ struct TestActivation <: ActivationFunction end @testset "DenseLayerOp" begin include("Architecture/DenseLayerOp.jl") end + @testset "ConvolutionalLayerOp" begin + include("Architecture/ConvolutionalLayerOp.jl") + end + @testset "FlattenLayerOp" begin + include("Architecture/FlattenLayerOp.jl") + end + @testset "PoolingLayerOp" begin + include("Architecture/PoolingLayerOp.jl") + end @testset "AbstractNeuralNetwork" begin include("Architecture/AbstractNeuralNetwork.jl") end