From a2a2292a823893fd4fac65c774b0ee786936380f Mon Sep 17 00:00:00 2001 From: Astitva Aggarwal Date: Sat, 18 Nov 2023 03:11:09 +0530 Subject: [PATCH] THE PDE SOLVER WORKS COMPLETELY --- src/PDE_BPINN.jl | 222 +++++++++++++++++----------------------- src/advancedHMC_MCMC.jl | 36 ++++--- test/BPINN_PDE_tests.jl | 198 +++++++++-------------------------- 3 files changed, 163 insertions(+), 293 deletions(-) diff --git a/src/PDE_BPINN.jl b/src/PDE_BPINN.jl index e53947e6d7..32dd0a2545 100644 --- a/src/PDE_BPINN.jl +++ b/src/PDE_BPINN.jl @@ -19,8 +19,8 @@ mutable struct PDELogTargetDensity{ Phi::PH function PDELogTargetDensity(dim, strategy, dataset, - priors, allstd, names, physdt, extraparams, - init_params::AbstractVector, full_loglikelihood, Phi) + priors, allstd, names, physdt, extraparams, + init_params::AbstractVector, full_loglikelihood, Phi) new{ typeof(strategy), typeof(dataset), @@ -41,8 +41,8 @@ mutable struct PDELogTargetDensity{ Phi) end function PDELogTargetDensity(dim, strategy, dataset, - priors, allstd, names, physdt, extraparams, - init_params::NamedTuple, full_loglikelihood, Phi) + priors, allstd, names, physdt, extraparams, + init_params::NamedTuple, full_loglikelihood, Phi) new{ typeof(strategy), typeof(dataset), @@ -65,76 +65,57 @@ mutable struct PDELogTargetDensity{ end function LogDensityProblems.logdensity(Tar::PDELogTargetDensity, θ) - # forward solving - # Tar.full_loglikelihood(vector_to_parameters(θ, Tar.init_params), Tar.allstd) - # println("1 : ", - # length(Tar.full_loglikelihood(vector_to_parameters(θ, - # Tar.init_params), - # Tar.allstd).partials)) - # println("2 : ", L2LossData(Tar, θ).value) - # println("2 : ", L2LossData(Tar, θ).partials) - - # # println("3 : ", length(priorlogpdf(Tar, θ).partials)) - - # # println("sum : ", - # # (Tar.full_loglikelihood(vcat(vector_to_parameters(θ[1:(end - 1)], - # # Tar.init_params[1:(end - 1)]), θ[end]), - # # Tar.allstd) + - # # L2LossData(Tar, θ) + priorlogpdf(Tar, θ)).value) - # println(typeof(θ) <: AbstractVector) - # println(length(θ)) - - # println("1 : ", - # length(Tar.full_loglikelihood(vcat(vector_to_parameters(θ[1:(end - Tar.extraparams)], - # Tar.init_params[1:(end - Tar.extraparams)]), θ[(end - Tar.extraparams + 1):end]), - # Tar.allstd).partials)) - # println("2 : ", length(L2LossData(Tar, θ).partials)) - # println("3 : ", length(priorlogpdf(Tar, θ).partials)) - - # println(length(initial_nnθ)) - # println(length(pinnrep.flat_init_params)) - # println(initial_nnθ) - # println(pinnrep.flat_init_params) - # println(typeof(θ) <: AbstractVector) - # println(length(θ)) - # println(typeof(θ[1:(end - Tar.extraparams)]) <: AbstractVector) - # println(length(θ[1:(end - Tar.extraparams)])) - # println(length(vector_to_parameters(θ[1:(end - Tar.extraparams)], - # Tar.init_params[1:(end - Tar.extraparams)]))) - - # Tar.full_loglikelihood(vcat(vector_to_parameters(θ[1:(end - Tar.extraparams)], - # Tar.init_params), θ[(end - Tar.extraparams + 1):end]), - # Tar.allstd) - - # θ = reduce(vcat, θ) - # yuh = vcat(vector_to_parameters(θ[1:(end - Tar.extraparams)], - # Tar.init_params), - # adapt(typeof(vector_to_parameters(θ[1:(end - Tar.extraparams)], - # Tar.init_params)), θ[(end - Tar.extraparams + 1):end])) - - # yuh = ComponentArrays.ComponentArray(; - # # u = vector_to_parameters(θ[1:(end - Tar.extraparams)], Tar.init_params), - # depvar = vector_to_parameters(θ[1:(end - Tar.extraparams)], Tar.init_params), - # p = θ[(end - Tar.extraparams + 1):end]) - - return Tar.full_loglikelihood(setLuxparameters(Tar, θ), - Tar.allstd) + priorlogpdf(Tar, θ) - # +L2LossData(Tar, θ) + # for parameter estimation neccesarry to use multioutput case + return Tar.full_loglikelihood(setparameters(Tar, θ), + Tar.allstd) + priorlogpdf(Tar, θ) + L2LossData(Tar, θ) # + L2loss2(Tar, θ) end -function setLuxparameters(Tar::PDELogTargetDensity, θ) - a = ComponentArrays.ComponentArray(NamedTuple{Tar.names}(i for i in [ - vector_to_parameters(θ[1:(end - Tar.extraparams)], - Tar.init_params), - ])) +function L2loss2(Tar::PDELogTargetDensity, θ) + return Tar.full_loglikelihood(setparameters(Tar, θ), + Tar.allstd) +end +function setparameters(Tar::PDELogTargetDensity, θ) + names = Tar.names + ps_new = θ[1:(end - Tar.extraparams)] + ps = Tar.init_params + + if (ps[names[1]] isa ComponentArrays.ComponentVector) + # multioutput case for Lux chains, for each depvar ps would contain Lux ComponentVectors + # which we use for mapping current ahmc sampled vector of parameters onto NNs + i = 0 + Luxparams = [] + for x in names + endind = length(ps[x]) + push!(Luxparams, vector_to_parameters(ps_new[(i + 1):(i + endind)], ps[x])) + i += endind + end + Luxparams + else + # multioutput Flux + Luxparams = θ + end + + if (Luxparams isa AbstractVector) && (Luxparams[1] isa ComponentArrays.ComponentVector) + # multioutput Lux + a = ComponentArrays.ComponentArray(NamedTuple{Tar.names}(i for i in Luxparams)) - b = θ[(end - Tar.extraparams + 1):end] + if Tar.extraparams > 0 + b = θ[(end - Tar.extraparams + 1):end] - ComponentArrays.ComponentArray(; - depvar = a, - p = b) + return ComponentArrays.ComponentArray(; + depvar = a, + p = b) + else + return ComponentArrays.ComponentArray(; + depvar = a) + end + else + # multioutput Lux case + return vector_to_parameters(Luxparams, ps) + end end + LogDensityProblems.dimension(Tar::PDELogTargetDensity) = Tar.dim function LogDensityProblems.capabilities(::PDELogTargetDensity) @@ -146,28 +127,24 @@ function L2loss2(Tar::PDELogTargetDensity, θ) end # L2 losses loglikelihood(needed mainly for ODE parameter estimation) function L2LossData(Tar::PDELogTargetDensity, θ) - return logpdf(MvNormal(Tar.Phi[1](Tar.dataset[end]', - vector_to_parameters(θ[1:(end - Tar.extraparams)], - Tar.init_params))[1, - :], ones(length(Tar.dataset[end])) .* Tar.allstd[3][1]), zeros(length(Tar.dataset[end]))) - # matrix(each row corresponds to vector u's rows) - # if Tar.dataset isa Vector{Nothing} || Tar.extraparams == 0 - # return 0 - # else - # nn = [phi(Tar.dataset[end]', θ[1:(length(θ) - Tar.extraparams)]) - # for phi in Tar.Phi] - - # L2logprob = 0 - # for i in 1:(length(Tar.dataset) - 1) - # # for u[i] ith vector must be added to dataset,nn[1,:] is the dx in lotka_volterra - # L2logprob += logpdf(MvNormal(nn[i][:], - # ones(length(Tar.dataset[end])) .* Tar.allstd[3]), - # Tar.dataset[i]) - # end - - # return L2logprob - # end - return 0 + if Tar.extraparams > 0 + if Tar.init_params isa ComponentArrays.ComponentVector + return sum([logpdf(MvNormal(Tar.Phi[i](Tar.dataset[end]', + vector_to_parameters(θ[1:(end - Tar.extraparams)], + Tar.init_params)[Tar.names[i]])[1, + :], ones(length(Tar.dataset[end])) .* Tar.allstd[3][i]), Tar.dataset[i]) + for i in eachindex(Tar.Phi)]) + else + # Flux case needs subindexing wrt Tar.names indices(hence stored in Tar.names) + return sum([logpdf(MvNormal(Tar.Phi[i](Tar.dataset[end]', + vector_to_parameters(θ[1:(end - Tar.extraparams)], + Tar.init_params)[Tar.names[2][i]])[1, + :], ones(length(Tar.dataset[end])) .* Tar.allstd[3][i]), Tar.dataset[i]) + for i in eachindex(Tar.Phi)]) + end + else + return 0 + end end # priors for NN parameters + ODE constants @@ -230,16 +207,16 @@ end # priors: pdf for W,b + pdf for ODE params # lotka specific kwargs here function ahmc_bayesian_pinn_pde(pde_system, discretization; - strategy = GridTraining, dataset = [nothing], - init_params = nothing, draw_samples = 1000, - physdt = 1 / 20.0, bcstd = [0.01], l2std = [0.05], - phystd = [0.05], priorsNNw = (0.0, 2.0), - param = [], nchains = 1, Kernel = HMC, - Adaptorkwargs = (Adaptor = StanHMCAdaptor, - Metric = DiagEuclideanMetric, targetacceptancerate = 0.8), - Integratorkwargs = (Integrator = Leapfrog,), - MCMCkwargs = (n_leapfrog = 30,), - progress = false, verbose = false) + strategy = GridTraining, dataset = [nothing], + init_params = nothing, draw_samples = 1000, + physdt = 1 / 20.0, bcstd = [0.01], l2std = [0.05], + phystd = [0.05], priorsNNw = (0.0, 2.0), + param = [], nchains = 1, Kernel = HMC, + Adaptorkwargs = (Adaptor = StanHMCAdaptor, + Metric = DiagEuclideanMetric, targetacceptancerate = 0.8), + Integratorkwargs = (Integrator = Leapfrog,), + MCMCkwargs = (n_leapfrog = 30,), + progress = false, verbose = false) pinnrep = symbolic_discretize(pde_system, discretization, bayesian = true) # for physics loglikelihood @@ -252,37 +229,30 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; # for new L2 loss # discretization.additional_loss = - # converting vector of parameters to ComponentArray for runtimegenerated functions - names = ntuple(i -> pinnrep.depvars[i], length(discretization.chain)) - if nchains > Threads.nthreads() throw(error("number of chains is greater than available threads")) elseif nchains < 1 throw(error("number of chains must be greater than 1")) end + # remove inv params take only NN params, AHMC uses Float64 initial_nnθ = pinnrep.flat_init_params[1:(end - length(param))] - if discretization.multioutput - if chain[1] isa Lux.AbstractExplicitLayer - # Lux chain(using component array later as vector_to_parameter need namedtuple,AHMC uses Float64) - initial_θ = collect(Float64, vcat(ComponentArrays.ComponentArray(initial_nnθ))) - # namedtuple form of Lux params required for RuntimeGeneratedFunctions - initial_nnθ, st = Lux.setup(Random.default_rng(), chain[1]) - else - # remove inv params take only NN params - initial_θ = collect(Float64, initial_nnθ) - end + initial_θ = collect(Float64, initial_nnθ) + initial_nnθ = pinnrep.init_params + + if (discretization.multioutput && chain[1] isa Lux.AbstractExplicitLayer) + # converting vector of parameters to ComponentArray for runtimegenerated functions + names = ntuple(i -> pinnrep.depvars[i], length(chain)) else - if chain isa Lux.AbstractExplicitLayer - # Lux chain(using component array later as vector_to_parameter need namedtuple,AHMC uses Float64) - initial_θ = collect(Float64, vcat(ComponentArrays.ComponentArray(initial_nnθ))) - # namedtuple form of Lux params required for RuntimeGeneratedFunctions - initial_nnθ, st = Lux.setup(Random.default_rng(), chain) - else - # remove inv params take only NN params - initial_nnθ = pinnrep.flat_init_params[1:(end - length(param))] - initial_θ = collect(Float64, initial_nnθ) + # this case is for Flux multioutput + i = 0 + temp = [] + for j in eachindex(initial_nnθ) + len = length(initial_nnθ[j]) + push!(temp, (i + 1):(i + len)) + i += len end + names = tuple(1, temp) end #ode parameter estimation @@ -314,10 +284,6 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; full_weighted_loglikelihood, Phi) - println(ℓπ.full_loglikelihood(setLuxparameters(ℓπ, initial_θ), ℓπ.allstd)) - println(priorlogpdf(ℓπ, initial_θ)) - println(L2LossData(ℓπ, initial_θ)) - Adaptor, Metric, targetacceptancerate = Adaptorkwargs[:Adaptor], Adaptorkwargs[:Metric], Adaptorkwargs[:targetacceptancerate] @@ -364,10 +330,6 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; samples, stats = sample(hamiltonian, Kernel, initial_θ, draw_samples, adaptor; progress = progress, verbose = verbose) - println(ℓπ.full_loglikelihood(setLuxparameters(ℓπ, samples[end]), - ℓπ.allstd)) - println(priorlogpdf(ℓπ, samples[end])) - println(L2LossData(ℓπ, samples[end])) # return a chain(basic chain),samples and stats matrix_samples = hcat(samples...) mcmc_chain = MCMCChains.Chains(matrix_samples') diff --git a/src/advancedHMC_MCMC.jl b/src/advancedHMC_MCMC.jl index 77a7f650c4..e2ab61a838 100644 --- a/src/advancedHMC_MCMC.jl +++ b/src/advancedHMC_MCMC.jl @@ -65,22 +65,23 @@ mutable struct LogTargetDensity{C, S, ST <: AbstractTrainingStrategy, I, end """ -Cool function needed for converting vector of sampled parameters into namedTuples in case of Lux chain output, derivatives +Cool function needed for converting vector of sampled parameters into ComponentVector in case of Lux chain output, derivatives the sampled parameters are of exotic type `Dual` due to ForwardDiff's autodiff tagging """ function vector_to_parameters(ps_new::AbstractVector, - ps::Union{NamedTuple, <:AbstractVector}) - if typeof(ps) <: AbstractVector + ps::Union{ComponentArrays.ComponentVector, AbstractVector}) + if ps isa ComponentArrays.ComponentVector + @assert length(ps_new) == Lux.parameterlength(ps) + i = 1 + function get_ps(x) + z = reshape(view(ps_new, i:(i + length(x) - 1)), size(x)) + i += length(x) + return z + end + return Functors.fmap(get_ps, ps) + else return ps_new end - @assert length(ps_new) == Lux.parameterlength(ps) - i = 1 - function get_ps(x) - z = reshape(view(ps_new, i:(i + length(x) - 1)), size(x)) - i += length(x) - return z - end - return Functors.fmap(get_ps, ps) end function LogDensityProblems.logdensity(Tar::LogTargetDensity, θ) @@ -559,9 +560,10 @@ function ahmc_bayesian_pinn_ode(prob::DiffEqBase.ODEProblem, chain; end end - println(physloglikelihood(ℓπ, initial_θ)) - println(priorweights(ℓπ, initial_θ)) - # println(L2LossData(ℓπ, initial_nnθ)) + println("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, initial_θ)) + println("Current Prior Log-likelihood : ", priorweights(ℓπ, initial_θ)) + println("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, initial_θ)) + println("Current custom loss Log-likelihood : ", L2loss2(ℓπ, initial_θ)) Adaptor, Metric, targetacceptancerate = Adaptorkwargs[:Adaptor], Adaptorkwargs[:Metric], Adaptorkwargs[:targetacceptancerate] @@ -609,6 +611,12 @@ function ahmc_bayesian_pinn_ode(prob::DiffEqBase.ODEProblem, chain; samples, stats = sample(hamiltonian, Kernel, initial_θ, draw_samples, adaptor; progress = progress, verbose = verbose) + println("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, samples[end])) + println("Current Prior Log-likelihood : ", priorweights(ℓπ, samples[end])) + println("Current MSE against dataset Log-likelihood : ", + L2LossData(ℓπ, samples[end])) + println("Current custom loss Log-likelihood : ", L2loss2(ℓπ, samples[end])) + # return a chain(basic chain),samples and stats matrix_samples = hcat(samples...) mcmc_chain = MCMCChains.Chains(matrix_samples') diff --git a/test/BPINN_PDE_tests.jl b/test/BPINN_PDE_tests.jl index ac9ddfc6b7..c36b72723d 100644 --- a/test/BPINN_PDE_tests.jl +++ b/test/BPINN_PDE_tests.jl @@ -129,6 +129,10 @@ mcmc_chain, samples, stats = ahmc_bayesian_pinn_pde(pde_system, priorsNNw = (0.0, 10.0), progress = true) +# println(ℓπ.full_loglikelihood(setparameters(ℓπ, initial_θ), ℓπ.allstd)) +# println(priorlogpdf(ℓπ, initial_θ)) +# println(L2LossData(ℓπ, initial_θ)) + # discretization = NeuralPDE.PhysicsInformedNN(chainf, # GridTraining([ # 0.01, @@ -190,11 +194,14 @@ bcs = [u(0) ~ 0.0] domains = [t ∈ Interval(0.0, 4.0)] chainf = Flux.Chain(Flux.Dense(1, 8, tanh), Flux.Dense(8, 1)) +typeof(chainf) initf, re = destructure(chainf) chainl = Lux.Chain(Lux.Dense(1, 8, tanh), Lux.Dense(8, 1)) initl, st = Lux.setup(Random.default_rng(), chainl) -initl +Lux.initialparameters(Random.default_rng(), chainl) +initl isa ComponentArrays.ComponentVector +typeof(initl) <: NamedTuple @named pde_system = PDESystem(eqs, bcs, domains, [t], [u(t)], [p], defaults = Dict(p => 3)) @@ -203,14 +210,22 @@ function additional_loss(phi, θ, p) # return sum(sum(abs2, phi[i](time', θ[depvars[i]]) .- 1.0) / len for i in 1:1) return 2 end -discretization = NeuralPDE.PhysicsInformedNN([chainl], - GridTraining(0.01), - # QuadratureTraining(), - additional_loss = additional_loss, - param_estim = true) -# discretization.multioutput -pinnrep = NeuralPDE.discretize(pde_system, discretization) +# collect(Float64, pinnrep.init_params) +# typeof(pinnrep.init_params) <: AbstractVector +# pinnrep.init_params[names[1]] +# typeof(pinnrep.init_params[names[1]]) <: ComponentArrays.ComponentVector +# pinnrep.init_params +# pinnrep.flat_init_params +# pinnrep.init_params + +# pinnrep.flat_init_params +# pinnrep.init_params + +# pinnrep.flat_init_params +# pinnrep.init_params + +# names = ntuple(i -> pinnrep.depvars[i], length(pinnrep.depvars)) # res = Optimization.solve(pinnrep, BFGS(); callback = callback, maxiters = 5000) # p_ = res.u[end] @@ -219,10 +234,10 @@ pinnrep = NeuralPDE.discretize(pde_system, discretization) # depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = NeuralPDE.get_vars(pde_system.indvars, # pde_system.depvars) -ntuple(i -> depvars[i], length(chainl)) +# ntuple(i -> depvars[i], length(chainl)) -[:u] -length(chainl) +# [:u] +# length(chainl) ta = range(0.0, 4.0, length = 50) u = [linear_analytic(0.0, p, ti) for ti in ta] @@ -230,7 +245,7 @@ x̂ = collect(Float64, Array(u) + 0.2 .* Array(u) .* randn(size(u))) # x̂ = collect(Float64, Array(u) + 0.2 .* randn(size(u))) time = vec(collect(Float64, ta)) dataset = [x̂, time] -plot!(dataset[2], dataset[1]) +# plot!(dataset[2], dataset[1]) # plotly() # physsol1 = [linear_analytic(prob.u0, p, time[i]) for i in eachindex(time)] @@ -238,14 +253,32 @@ callback = function (p, l) println("Current loss is: $l") return false end -res = Optimization.solve(pinnrep, BFGS(); callback = callback, maxiters = 5000) -p_ = res.u[(end - 2):end] # p_ = [9.93, 28.002, 2.667] +# res = Optimization.solve(pinnrep, BFGS(); callback = callback, maxiters = 5000) +# p_ = res.u[(end - 2):end] # p_ = [9.93, 28.002, 2.667] +discretization = NeuralPDE.PhysicsInformedNN([chainf], + GridTraining(0.01), + # QuadratureTraining(), + additional_loss = additional_loss, + param_estim = true) + +discretization.multioutput +discretization.flat_init_params +pinnrep = NeuralPDE.symbolic_discretize(pde_system, discretization) +prob = NeuralPDE.discretize(pde_system, discretization) +res = Optimization.solve(prob, + BFGS(); + maxiters = 5000, + callback = (p, l) -> println("Current loss is: $l")) + +pinnrep.init_params +pinnrep.flat_init_params mcmc_chain, samples, stats = ahmc_bayesian_pinn_pde(pde_system, discretization; draw_samples = 1500, physdt = 1 / 20.0, bcstd = [1.0], - phystd = [0.005], l2std = [0.008], param = [Normal(9, 2)], + phystd = [0.005], l2std = [0.008, 0.004], + param = [Normal(9, 2)], priorsNNw = (0.0, 10.0), dataset = dataset, progress = true) @@ -567,137 +600,4 @@ println(sym_prob1) # plot(sol_pestim4.ensemblesol[1], label = "estimated x2_1") # plot!(sol_pestim4.ensemblesol[2], label = "estimated y2_1") -# plot!(sol_pestim4.ensemblesol[3], label = "estimated z2_1") - -# using NeuralPDE, Lux, ModelingToolkit, DataFrames, CSV, DataLoaders, Flux, IntervalSets, -# Optimization, OptimizationOptimJL - -# # Definisci il modello dell'equazione differenziale -# @parameters t, R, C, Cs - -# @variables T_in(..), -# T_ext(..), -# Q_heating(..), -# Q_cooling(..), -# Q_sun(..), -# Q_lights(..), -# Q_equipment(..) - -# # R, C, Cs = [1, 2, 3] - -# Dt = Differential(t) - -# # eqs = Dt(T_in(t)) ~ (-T_ext(t) + T_in(t)) / (R * C) -# eqs = Dt(T_in(t)) ~ (T_ext(t) - T_in(t)) / (R * C) + Q_heating(t) / C - Q_cooling(t) / C + -# Q_sun(t) / Cs + (Q_lights(t) + Q_equipment(t)) / C - -# domains = [t ∈ Interval(0.0, 365.0 * 24.0 * 60.0)] -# bcs = [Dt(T_in(0.0)) ~ 4.48] - -# dt = 10.0 # 600 seconds (10 minute) - -# # Define the temporal space -# tspan = (0.0, 365.0 * 24.0 * 60.0) # Dati per un anno - -# # load sampled data from CSV -# data = CSV.File("shoebox_free.csv") |> DataFrame - -# # Put the sampled data in dedicated variables -# T_in_data = data."OSGB1000005735772_FLOOR_1:Zone Mean Air Temperature [C](TimeStep)" -# T_ext_data = data."Environment:Site Outdoor Air Drybulb Temperature [C](TimeStep)" -# Q_heating_data = data."OSGB1000005735772_FLOOR_1_HVAC:Zone Ideal Loads Zone Total Heating Rate [W](TimeStep)" -# Q_cooling_data = data."OSGB1000005735772_FLOOR_1_HVAC:Zone Ideal Loads Zone Total Cooling Rate [W](TimeStep)" -# Q_sun_data = data."Environment:Site Direct Solar Radiation Rate per Area [W/m2](TimeStep)" -# Q_lights_data = data."OSGB1000005735772_FLOOR_1:Zone Lights Total Heating Rate [W](TimeStep)" -# Q_equipment_data = data."OSGB1000005735772_FLOOR_1:Zone Electric Equipment Total Heating Rate [W](TimeStep)" - -# t_data = collect(Float64, 1:size(data, 1)) - -# dataloader = DataLoader([ -# vcat(T_in_data, -# T_ext_data, -# Q_heating_data, -# Q_cooling_data, -# Q_sun_data, -# Q_lights_data, -# Q_equipment_data, -# t_data), -# ]) - -# dataloader -# # Define the NN -# input_dim = 1 -# hidden_units = 8 -# len = length(t_data) - -# chain1 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain2 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain3 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain4 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain5 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain6 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 -# chain7 = Flux.Chain(Flux.Dense(input_dim, hidden_units, σ), Flux.Dense(hidden_units, 1)) |> -# Flux.f64 - -# #Define dependent and independent vatiables -# indvars = [t] -# depvars = [:T_in, :T_ext, :Q_heating, :Q_cooling, :Q_sun, :Q_lights, :Q_equipment] -# u_ = hcat(T_in_data, -# T_ext_data, -# Q_heating_data, -# Q_cooling_data, -# Q_sun_data, -# Q_lights_data, -# Q_equipment_data) - -# # Define the loss(additional loss will be using all data vectors) -# init_params = [Flux.destructure(c)[1] -# for c in [chain1, chain2, chain3, chain4, chain5, chain6, chain7]] -# acum = [0; accumulate(+, length.(init_params))] -# sep = [(acum[i] + 1):acum[i + 1] for i in 1:(length(acum) - 1)] - -# function additional_loss(phi, θ, p) -# return sum(sum(abs2, phi[i](t_data[1:500]', θ[sep[i]]) .- u_[:, [i]]') / 500.0 -# for i in 1:1:1) -# end - -# @named pde_system = NeuralPDE.PDESystem(eqs, -# bcs, -# domains, -# [t], -# [T_in(t), T_ext(t), Q_heating(t), Q_cooling(t), Q_sun(t), Q_lights(t), Q_equipment(t) -# ], -# [R, C, Cs], -# defaults = Dict([R => 1.0, C => 1.0, Cs => 1.0]))#[R, C, Cs]) - -# discretization = NeuralPDE.PhysicsInformedNN([ -# chain1, -# chain2, -# chain3, -# chain4, -# chain5, -# chain6, chain7, -# ], -# NeuralPDE.GridTraining(dt), param_estim = true, additional_loss = additional_loss) -# prob = NeuralPDE.discretize(pde_system, discretization) - -# # Parameter Optimization -# res = Optimization.solve(prob, -# BFGS(), -# maxiters = 1000, -# callback = callback) - -# p_optimized = res.u[end] -# # Plot fo results -# minimizer = res.u.depvar[1] -# T_in_predict = minimizer(t_data) - -# using Plots -# plot(t_data, T_in_data, label = "Dati Osservati") -# plot!(t_data, T_in_predict, label = "Temperatura Prevista", linestyle = :dash) +# plot!(sol_pestim4.ensemblesol[3], label = "estimated z2_1") \ No newline at end of file