From a5a674aafa6c889843e6a1f43a41017884a71804 Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Sun, 31 Mar 2024 23:59:40 -0700 Subject: [PATCH] NeuroMod.DAModGain = 0 by default, esp in vspatch; vspatch uses std init wts and learns on CaSpkD instead of GeIntNorm -- gets nice differentiated reps and works well. vspatch test case tests probabilistic reward learning; works well. --- PVLV.md | 22 ++++++++++------------ axon/gtigen.go | 2 +- axon/layerparams.go | 4 ---- axon/neuromod.go | 4 ++-- axon/pvlv_prjns.go | 4 ++-- axon/shaders/gpu_newstate_neuron.spv | Bin 35996 -> 35768 bytes examples/pcore_ds/params.go | 9 +++++---- examples/pvlv/params.go | 6 +++--- examples/vspatch/config.go | 2 +- examples/vspatch/params.go | 11 ++++++++++- examples/vspatch/vspatch_env.go | 15 ++++++++++++++- 11 files changed, 48 insertions(+), 31 deletions(-) diff --git a/PVLV.md b/PVLV.md index a2c210e95..df4483cbd 100644 --- a/PVLV.md +++ b/PVLV.md @@ -38,16 +38,16 @@ Note that we use anatomical labels for computationally-specified functions consi In contrast to the minus-plus phase-based timing of cortical learning, the RL-based learning in PVLV is generally organized on trial-wise boundaries, with some factors computed online within the trial. Here is a schematic, for an intermediate about of positive CS learning and VSPatch prediction of a positive US outcome, with an "Eat" action that drives the US: -| Trial Step: | 0 | 1 | 2 | 3 | -| ------------ | -------- | ---- | ---- | ----------- | -| Event / Act | CS | | Eat | US | -| SC -> ACh | ++ | | | | -| BLA | ++ | | Rp | R | -| BLA dw | tr=S*ACh | | | R(R-Rp)tr | -| OFC | BLA-> | PT | PT | reset PT | -| VSPatch = VP | | | ++ Rp | | -| VP dw | | | | Sp Rp DA | -| DA | ++ (BLA) | | | ++ (US-VPp) | +| Trial Step: | 0 | 1 | 2 | 3 | +| ------------ | --------- | ---- | ----- | ----------- | +| Event / Act | CS | | Eat | +++ US | +| SC -> ACh | +++ | | | | +| BLA | ++ | | Rp | R | +| BLA dw | tr=S ⋅ ACh | | | R(R-Rp)tr | +| OFC | BLA-> | PT | PT | reset PT | +| VSPatch = VP | | | ++ Rp | | +| VP dw | | | | Sp ⋅ Rp ⋅ DA | +| DA | ++ (BLA) | | | + (US-VPp) | * Rp = receiving activity on previous trial * DA at US is computed at start of trial in PVLV.NewState, based on VS D1 - D2 on prev trial. @@ -190,8 +190,6 @@ The learning rule here is a standard "3 factor" dopamine-modulated learning, ver where `DAlr` is the dopamine-signed learning rate factor for D1 vs. D2, which is a function of US for the current trial (applied at start of a trial) minus VSPatch _from the prior time step_. Thus the prediction error in VSPatch relative to US reward drives learning, such that it will always adjust to reduce error, consistent with standard Rescorla-Wagner / TD learning rules. -Also, the learning factor for the `Rp` receiving activity on the prior time step is the `GeIntNorm` Max-normalized value, not raw activity, because VSPatch neurons can be relatively inactive at the start (this is done by setting `SpkPrv` to `GeIntNorm` for this layer type only). - # Negative USs and Costs There are two qualitatively-different types of negative outcome values, which require distinct pathways within the model: diff --git a/axon/gtigen.go b/axon/gtigen.go index 319ea717a..f4b332a06 100644 --- a/axon/gtigen.go +++ b/axon/gtigen.go @@ -120,7 +120,7 @@ var _ = gti.AddType(>i.Type{Name: "github.com/emer/axon/v2/axon.DAModTypes", I var _ = gti.AddType(>i.Type{Name: "github.com/emer/axon/v2/axon.ValenceTypes", IDName: "valence-types", Doc: "ValenceTypes are types of valence coding: positive or negative."}) -var _ = gti.AddType(>i.Type{Name: "github.com/emer/axon/v2/axon.NeuroModParams", IDName: "neuro-mod-params", Doc: "NeuroModParams specifies the effects of neuromodulators on neural\nactivity and learning rate. These can apply to any neuron type,\nand are applied in the core cycle update equations.", Fields: []gti.Field{{Name: "DAMod", Doc: "dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine"}, {Name: "Valence", Doc: "valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently"}, {Name: "DAModGain", Doc: "multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor"}, {Name: "DALRateSign", Doc: "modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)"}, {Name: "DALRateMod", Doc: "if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%"}, {Name: "AChLRateMod", Doc: "proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%"}, {Name: "AChDisInhib", Doc: "amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory"}, {Name: "BurstGain", Doc: "multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!"}, {Name: "DipGain", Doc: "multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}}) +var _ = gti.AddType(>i.Type{Name: "github.com/emer/axon/v2/axon.NeuroModParams", IDName: "neuro-mod-params", Doc: "NeuroModParams specifies the effects of neuromodulators on neural\nactivity and learning rate. These can apply to any neuron type,\nand are applied in the core cycle update equations.", Fields: []gti.Field{{Name: "DAMod", Doc: "dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine"}, {Name: "Valence", Doc: "valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently"}, {Name: "DAModGain", Doc: "dopamine modulation of excitatory and inhibitory conductances (i.e., \"performance dopamine\" effect -- this does NOT affect learning dopamine modulation in terms of RLrate): g *= 1 + (DAModGain * DA)"}, {Name: "DALRateSign", Doc: "modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)"}, {Name: "DALRateMod", Doc: "if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%"}, {Name: "AChLRateMod", Doc: "proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%"}, {Name: "AChDisInhib", Doc: "amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory"}, {Name: "BurstGain", Doc: "multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!"}, {Name: "DipGain", Doc: "multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}}) var _ = gti.AddType(>i.Type{Name: "github.com/emer/axon/v2/axon.NeuronFlags", IDName: "neuron-flags", Doc: "NeuronFlags are bit-flags encoding relevant binary state for neurons"}) diff --git a/axon/layerparams.go b/axon/layerparams.go index 4796ffeda..bd2113495 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -949,10 +949,6 @@ func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32, vals *LayerVa SetNrnV(ctx, ni, di, SpkMax, 0) SetNrnV(ctx, ni, di, SpkMaxCa, 0) - if ly.LayType == VSPatchLayer { - SetNrnV(ctx, ni, di, SpkPrv, NrnV(ctx, ni, di, GeIntNorm)) - } - ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP) // Note: synapse-level Ca decay happens in DWt ly.Acts.KNaNewState(ctx, ni, di) diff --git a/axon/neuromod.go b/axon/neuromod.go index 4c04433ff..e9085fbe0 100644 --- a/axon/neuromod.go +++ b/axon/neuromod.go @@ -60,7 +60,7 @@ type NeuroModParams struct { // valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently Valence ValenceTypes - // multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor + // dopamine modulation of excitatory and inhibitory conductances (i.e., "performance dopamine" effect -- this does NOT affect learning dopamine modulation in terms of RLrate): g *= 1 + (DAModGain * DA) DAModGain float32 // modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors) @@ -86,7 +86,7 @@ type NeuroModParams struct { func (nm *NeuroModParams) Defaults() { // nm.DAMod is typically set by BuildConfig -- don't reset here - nm.DAModGain = 0.5 + nm.DAModGain = 0 nm.DALRateMod = 0 nm.AChLRateMod = 0 nm.BurstGain = 1 diff --git a/axon/pvlv_prjns.go b/axon/pvlv_prjns.go index 5d2bfe36e..bf1efe803 100644 --- a/axon/pvlv_prjns.go +++ b/axon/pvlv_prjns.go @@ -56,8 +56,8 @@ func (pj *PrjnParams) VSPatchDefaults() { pj.SWts.Adapt.On.SetBool(false) pj.SWts.Adapt.SigGain = 1 pj.SWts.Init.SPct = 0 - pj.SWts.Init.Mean = 0.1 - pj.SWts.Init.Var = 0.05 + pj.SWts.Init.Mean = 0.5 + pj.SWts.Init.Var = 0.25 pj.SWts.Init.Sym.SetBool(false) pj.Learn.Trace.Tau = 1 pj.Learn.Trace.LearnThr = 0 // 0.3 diff --git a/axon/shaders/gpu_newstate_neuron.spv b/axon/shaders/gpu_newstate_neuron.spv index 13fc4c85164e007ad82fb4821326aa0253b6ac39..ff1f94efebf8a124a48154c7176f2e33c6e9e5b8 100644 GIT binary patch literal 35768 zcmb`PcVJytk^f&}NluT`Lqd$xd*bvENU$u$vWe{oNpV8Yi~VAY*piTBV^ag6*O1U_ z=%I&Rwvc66w(n*;yUVg!w)bpdY0K{K`#I-+uSSCc`^WE~%boeooSE;LQ|^858J6vT z>hex!zfPxfSm(?8>!x-@Xa9n5KUn&~oq?i{Y#iTs!QA||3ogBEHGYS6mT@b#!{yU2 znGn0WlT+YWR|p3?Lf0ByWa3(;|KNX))~Y;W7z-M8M_ zo4sK7+5KcP2af^>3~)r z+_PQ5`ZdY}9qY#;ZS3yr&5m|wySr*$GIu%=W%OV);S(B)xb+o`@y>L&l?vfQJ)~cx zTZ>UEVHZb?nhb88CuQqL4K_^en7pN610_B+P)@_GFJs==>&`M6=Eu0#?w*|^s`)YQ zp@{__IF9b#xktCQAM0RKcYbzq??S&rgpG_Y=!Z(*I6TqDJxuzRiNQ9;;nK%$9M#k= z#9dLa;jV72A8Tb|c*E3uZ+nX!S+K=^M+w`ott^Eyr*Krg+IeEkV6^d>$=iBm%oSr! zhI>2d9(w?1f49>~ijC615t}VWm2XnJ!WN1jYHV1Pvc-;ao=4Jf4 zp>|H;c{D!r^HZCq>plQGM%U={%+U0)H*`BscEgN^BKKgK;iJ~F9Wi!t`~ z-8stKVvJqD2*-j=eN;7VV(j!YV>@!<+D`!qbunM!S0(`Da=wFE{p zFZ5E^ozqj>CBq#Iu7m4GOKfy_mUC1~ntWjJynbq|_blDr51!_PPs6c2=fN7D7fV#6 zemx&KOWJal_+Yep0urbC)pcJNHtQHaxNh0SDl%Su;k;$n_uI*oIyV$SRb}%_j0xh04`<<;tEIo`>tgSlP!T zy~Td4$C1g!80$k^3+O!5x#3!Hp8uR%Lp!=tdIX3~)g^YW4LbGR7<6j8A?VcfI$g}$ zk-C?&yj@=v6WrM_w_$2#H?DEOxQn^#tf4zG8!;)?I6d zpZ6aA@RQ!22Rv(detPumDKJ`mc7qXn%k-_cVzVFdx6IY2g)wqyGeD#DqXrY*ZO_oH z#i+yZd~qv$$$7;y`a8$Uhi7}_3r354!Qea8^&ajkec^k(h5%Q5;Od8+vC4IU zKYnl>>MratFt|qkU<-5A-#JdY-Z?h)wkd-SMD`aLt*)zqqQi4ycCx#(y0w0B?&ufi z&avRC1Z6zxxaI~ba8OGN^CHi*ETvF#_H=<>qCC8C48EBZSCZyB5=V@Zm%`; zW-i-v9xTu2BZTQaEb!~^fYIW7g27R1{Z`K(Sm7v)S!Vvg;K^8`A6&tz-$3yz>yLL> z9M#yL^e^)YUYAZA7w(&8dvk1BYI}n2yc(K3`N3!l-1`bv=D6XA54`JJQmr2tyf`YS7mKqXt9WIw$Ci-}%rPxAUk}mX%`;0i$)10UQSl(wVh4Ivrpi{8c=SQMVp5;Fxfo6WpAKRi_5^ zxmx>BgRzP7${`-rr&$Y%I?!1qd-Ivip^G0lt)8`g#gEt{J;qb@vJRSO_$t|mThHD8 z;!j=0h1YZSskzF&nw%P++`g+TbBj|~^$mE9QEIz$Qg4#A4kzngz8GoPGB02>CDQ9` ztvA?yy2^Jty4w%Vqtnwnbt~gPMfcjT9e(izr){3zH96JY*T+}53$*T*zs;zvv! z#dEw38lx8LCnriGW7K2gw6THGmwJ?VW8Ha`R{i@5-t3JwUb8RS_n%YcJ2DwL_jSN% zo-=g#@`;kT=x|-Pw(>+@hCW^oN&CQae0;$Nj^pFyQC0ZR4}Rn0lg-|$vEjIGoc%XY za2t@9_0g?Q|Pb?ZiF_vmilvJdz!<=VaTUccmFAFLZM+ep6sB`%*O#HZa``0-gn zd=_DRmMHm!dTe}_;9O+wpC-AikFucZH&5gMrwM7_0l=uGuyQ`zk9rEbM7P2ZKWZxM zQZVwIu6y~s$(NmB-GbAGy2V=hgOO*bJ3e!p?&JX@&&JuQ@{JBP10&DGEFa;@7HfU` zgpX~k3(CIz!pe88<(q`Mm(CET+_OGZ$OT5*JUy#V|201toW!VnRYXiMIDu{7x^<{4 z+5O=K$l{>x_`-?Fu@8K1nAtY(a}=KeUG$57tSPu`nA+3EVqMia?C+c@nfF`o1mc6y zHq~Waa)L32(S$LE;e^3opJoPm>xVym;lHU_N9gb$4LaP1i(d0D$Ny4BQpZdmc#kHm zJQG^1=4##zTHeijLA&?QQk=3r!de2OmA9CYniH(d+rFtXLTUq6=51ejw3P9IF=lGY zc+ovytlzVxx9hCUS#gPyX?J zQ{!}KjzZb`iw!R0yKmXl>(=kPh+pd8o87frr>6aw1E1}T13%`Wj;rQBK)S|Kzl@d4 z_|DP2?ia5jc=Z)rrsh0dUi4B=J-ntTw{4u9(wFhFGd5zBx>g%Un9%-$Gq&MLzpGcQ zjA`b!=DUE3F(&-#o~Zd5Q#}u>?(x9;T-m&*&77RzwBfd|^Wqu6m6HMjPqXK`~Y^;?TbS{)$e-m=X z$`zb8qF0oON0!J7KF>lUMab88c@R!Rrq<}j?sxx`>|8fQuyp+AgseAP=Wp8onUhz_P;e93V!kN_HVIcaAa1sA9IN?LRP0hZpe(>M8 z{+4#H!mqr5$;PjdKHS+uyo8P2INY^2am$OD@QLZE z$*uZ8!~Ov)^(?<-6r0?@htU2j6;H)I9`F zu8CnRi+Nyc@@f?Qoy&{g;<16z;@H5*)$nVBGB#{+Z0L)}*5A28m^$@lwRvsOQL$G!)Pe(ZUD#F&eI?E7(jHM{eP3whc11-oehJEUSaOa;F~jTOJXQmey+dEYbs z!>it`;l9q5(n~#6O#YR@qoptXZzYDN^`rS-l}&zqovU;=LVeI_dfxW98!L0IulpwT zE10n|2Rr<|*FfpZ9E|SVJvW+g=AmHv6=6~Z_7=F94Lydw(6zg^Mqn3_lPI_ z>TE7Mw9YL)teNV!fU#!cUdFHN+h*~?AFS-#@*-M(juTq*uwFK^@k@Ow#(LST!3vV! z0i{1{Wn}aQrQ)9mt>a($U@mnaK5IsH^&0A~?M~fR`_|m7ozX2DwkB%mHy&Hx<`EBAWC*tFc%+BYSQbpXzNdo!o~t z)ZlF=*NU}-ZDx9IQ6JV*gNHtK{#a8!PvCS^)p=a1(#t!K)xDNqi=_5$>epXv_*FYK z?JF4bE$>#3&j8{reg-V>T;0Z~>)L6UYizWb11#o1k2%nDj%(T&TGCD+j2zf#3ppxA zjtx`WdV0+)W5F-xLC<*}Cv0KAEhHijHd{jBR1MX|BBH+)&jrC6MZqy{?1xq<-FkKq*vDi7;WQpttmRO*H6~D zK;OUU<30b|FR;b2mvx4O4bATr`g*)zdAwkGykL2} zV0pZp^|Z(jEb{}){J=6lu*?rE^8?HLz%oCcIkd}yu%nvN{1IzrtGC#1)4=nQo%ltSmX^|gT<_DJffn|QY>(gR9upAF8#{*j` zULG%gdAwkGykL2}eBPi%eqfm&Smp?g5~jo?g5~jo zj#$W$M|WHADH?1oCGt!QOTTPVCFXyF!Q@J zVCMHgz|8N#fSKPz0rUK>5Gm*N{H_j|=XXuO;K$fUkuHVfK26tNQ3+BGmk$7P4>$L&4Oc#A!`!gleVmz=M4=l$6TPhwLi08G2U!F^_ zJeOe0bx3kuzB443;^AWzJxkGg#&fmN|oE&gTlFMSftJA6VuGmid8Y zeqfm&Smp<|6u(?k{0`Iw=hJjiXRw1BjJ0Fz;0E(KdAf93950y1J1UvJU>@(C0rPn8 z3z*0IV8A?H0uzt?w3rvnd5K{^=Vjc+ocE9hJ70HNj0cwEf#rB$Io<`rXfYmGjt7?G zf#rA?3Zun%U^yOGjt90>JlEN0Hh!+N_bOOkS73Qvf#r1tme&Ii{k~$ z;|0s(1j#$Wcd;;9o*!O*Ka0ZuHWqeThZjbGhjzF*gXL|vcc{Pm_JM2FO1(**U5*v@L!C9anj1T zbmuo&W4u@K8!NiXRs0{LJ8i+g{u`A%pZMkZ1S1Z8{mcZ*^9d%p#qVg{;R)vX^!px| z=hN?dV4lxalHJ$yd0N0cpJxTk^LbvtJfGeVmTl@_c~h`2frFakVg7j0cwEf#rB$Io>tGXfYmGjt7?Gf#rCQ z6Gn^iz;ZmW91kqVTO*7XKLC0O2@7W27{5A%AA`g4Y?(#3i>x6wC9rdfZyQ6I%~yt&K%YR@iytcN*4_~ z`sSi7_+Kx5g|1`8VpR84y6{DQf-ai%v7+7nm7|q$T=F^U!+j$7+)#ZUcam(JljK|} z8BXY{ByG_h&S3amt2_Ddqkic4-KaZ$U@NskSWEbEQLA`29`E?9)JkQ&Wjyw^A8RoB zMHwIau`Z)6#xlNAD{w{fqrdIGFNN90eWgav`i@w|4=QWiyug-<#rRqGOW7O~8+q6Z z5v%w?_5c4W*zDt+_G6DkTbAut-wl4;7rU>OI*Hg{)_tch7vti0*PJ8k_?_^7@;j{j z{~6@|KTW4@j|}n6le{MdJjSsPch>dFhw+PjvP|AbsUIlbG-!;hjlen-?I3ItK zk9q&3`%0a+Co9e=x`@X`jZV{@*zuy~X9|n)eo}t-U8#zl(e#V{OT}8Lr|VfuqSU4})J2 zaCp-Peit^f$MS!Yn?7uU!&9p}b4xdC0ogjqljKX1G&J#?2TR58T)2 zf(v<>7v3GO(#5k7Wo=?^NWLcl4J2E~` z*PR&jL%+GvE4TVS*56={k^M%=%aeXU&{->21|5z=L5CCk;N|zuuH?-@zeV!(LEkEw zXO8{1N#=QCy(jskpx-L_kwM=sndg`NcSweV^-0Op!1^;JpBePqBvS+1cS>f=)^|x} zZ&;s_JP`D0$?PlJXC(7%wf;=Wrv!agGW*!}Imu@SeO~f8LEkNz{cis~MPAUKCH>G~ z-z%B@Z~uLgISZ^mTQcW@_2)?DjIjP($;`3!=SgM_T7SM|)~odwNM@Z|zg_ZUgZ@Iv zYlHqG$*dXs-ywN8=r5Lhb$ zCG#9||9d1~81(l@=GkQXdnNOnvi?5Frw9H0lE;Gn0m=IX{ezPGba9@2Nca79#roeX zeYvj7wX2`n*mW!C;`ikJVaZpkFX!e*ba7UESO5J{-Dy9R%z5=OUHEfDW>36*@>P-&%d{P%aFKuGnCz&|lj`1nUIRl&D{OEWY!vSZf~&PkPMD5|F7q)-`~{LuZuav?t1)|WNd?a`rWMiZ|k~J7k=2- zhm*R@c~tTPy6|(K-;umjAM>Wq@9M(OeSU9=K0AcPJijj;Klk~Ahv^gZ{GojCbDuwY zm_Ce&8o=!vy6|(KKbD-w12-=B`4h?b(TDTtPj%%!Gr~Nc^^(8Y__@!YJxm{uhd$rZ zh2NmwK5o|i&vh-8=QiPSOb<4`9@Aee(eGKp?$zZneOoeqUR!@D`959u2kfsTcXhFk zyx#s=GQQZ1{f%VsFY00q-mLrI>RPUgwT0bz|4uSC=ly%hx9EyF{y{o9usO#+N(PTP z{zy$*ohA| zCO`AvU-}N|G5#`P#K-3NeUiar{C??z$mVc}WH`iphYE8(?BoL@_6rnmRQJQAlh3s} zTsmh*91H)Zo7mW#?+D4@G2fBWoe#VB!%>p4-Jvm(^JvLn)X+Z1G(Lyu9SfghrR%S} zH^PxV_%H@yVRzr-Br_+Df4pR{82=HCkK>;pU4LzS`rzaF#qOG%DES%kjWv0sFlvI$ zH91K#c&y1v>8=TObAFU$Y|gPt@_mZ;HC@I|7RFirbzRPJiZFb!`7AwE@&WSUEXD2| zr%A@%2VfPp=lDsR8;bLKo0h`Bg ziDd9NhD)V;4A{-*GRfGS<8sMUiWlec3SrD6HqYagl0A>u9q-YSv3VY^l1vV8`#eT6 zIbDOtO4eUn1Nz`Y4e-NmepgGLPBpkj7&XA=8az%ic&x!1>8=5G^BI(k&HCC#H@_k2 z`fK^&j}QFlhu!>!CC?;&*9wClHuGC289eeEk#2t2&1b!2Y_91B$zTsK=1x5uAK&KX3m=>gGbKSOE+ii<})f8oAoC&y7`Ss*I&yI ze|+FaKkVi=E_pWbn-B&+Z02`^Wbnvui*)nDZaz0k#%BGdMmN7FO4nb@4}W~%M?dW5 z_aw=4iQkij!4I4HJw-BjkNZy_JO$vh_HuHOi zWbnxEHtFVv-F$XR#%6t2qnqE9bp5sb@W%&!^uum`(~|cjelx=0ht2$+DH%NSo0V>U z*v)57GB)e;jc$IsrR%Tdhd(~>qaSv@dpJmU-mkd(J!y|*K2Mtev!wHxDL(u53WGm3 z^WP^KJo10Gbo0k3NNh$Mk&Z`YU6CBYp5;OvJ+Ocgq(@{)T-0 zZh5nJvgFilulYZFE@0F7GC4R3G20v`(_iD-Dk>6{in;&-bd97q@)?e4? z=69!b{k8n?#|M7&!)|`Bm;CI+?=E5R!)AVWO9qep-XPulu$#{tC1bPxrbaivH%r%F z%MX8i;732~=JyuK&q@5=Dhz(u%N(PVoJ}lk*u$#|EBxAGw(MC7Fk4e{G%MX8i;732~=J#>Q&rke5 zAq;-l%q?;dh^ZAryY}P;B=;rqs>H2H=;g1je=!f0>J}dbJiQngh!4I4H zeO@wnj{|d7j;_J7+p~ z^ZiB1FHC&DCJesVT;pGo3?BJ@UAp;VH@{z&jLrJ5G`jizs&xGozI!C#j}Lt5hu!>s zP4bHpzh4&yKWygr8{Qg|>OXM5(%Y(w;ht2%{ zLNa*d_igFshuwVsQZhE{f7R&b_t(<(*Yd+3ANbJ^yZQZ%CGFH$Uv=^S_d@S=W(U zbo1L!y38#<{PBSw{ji(g{*qsr_$?C#KWygLCmB5Q>z8hR*v)4^GB)eW8{Pa4kRJKr zj}QFlhu!=Rl>Dm1?;v6D!)AU5O9qep4v}tt*v;oq$=IwP*68MUxb(;me|+FaKkVkW zLh`EqaSwjJ6rPW62EhV!4I4HohunU@;gtu`C&Jo^Ce@menF#~--Xg6Km755 zAN{bK-)hNsCVm$QgC92YyI3-KE?&seAY_FW__s9&2LzGBfs_1 z%@4czY>{5DE{L*lnd82qrA-)70+k>B;w%@4czj7r94 z{Rxe3eq+)jKm755AN{bq@3`cL#KrnVqdV3O(qk6@^+sWg6`RL; zlVtEX)+b8$Sh1VWlO$ua{^UkCzo$r#{P4#Ie)Pj`eovMBro``Q!r+I^{GKiuJo39) zy7^%@pRQzV)^BNa^V=#t^1~k=_|Xr$`E8T@=EScj41U7Ud=7-&Uc1Xr% zeX`Nb?-|k~Km755AN{bK-))lLlKAZu20v`(w@Wg3~Kl))ezh_H+TjKW|VerFde$SN*9{D{_y7^%@pXW=)X8i??Zhp5*kNoh* z2Y&R!&i5Y2>(2WVck_Lr1^m8>BlvcH)D@_-~YcPm2F0 zVZ_Jg_-~dB9^=17y5nOfK3I(ZR_X6a@!uwl_}Co(?UKP`{C7xqeC)&ri}Bwn{kGVr z{0|BvJ~qeykYw-}|6b{ikKOzK!;-OC|45_zJou<|p9gy+;Exad=!g9y&A+eunB;GY z%j0$N@2fs8oyO-bY@g5t#<_v)exH>5E&0)pv&BB#za!TC+pYT~Pw38hgFpSr@hRPD zYR>~VcYGXJ*EXUS)!=kMu1CyZKRb1gqF89dhV3({Rn?9TB;$=F=GFG+Uo e9#mX7;6v^3!_Iikmpl4LFPn=390n1V>o7j$!6o*b;>=#?amJ~^LY-;EbS_r*| z-XZi3p)A{Xvz={hmSua(vdhwz-Ti*&-0x~M2-rXNpv#^4X3ora=9GKidxmBEpSHZy z*{{>-9M<{z{<^6h(b>Ns+z*z1aA%djub zdva~&|E4D(~$Yj@Ai5!L({_t3zoUe0*jAQ8nNv8bUhOtuV8&89`iCRFUMz2MsDoR>egb+$rQQF4OPC($)@fOy0#cG$6Ks&V@|Hw+?7w&nUiZKXWKr^ z$>vSNgSrVDDC1#XHfhh!HwNcqZbrH{cGni)%7eMtGBPxzTbqNq8N6nJ7xOZH%}_h1 z@H`ry`T40$({&$!9iwY>dS+;PTd&L?+{-*QGsqagXcIdaL)Do()u+{&w+U4r-uN?T zV-wxoy0t#c+0}bzqz@Dy=1ltd&S`Db;#d2YdD}Zfx;7s3wyC>!{ZoP;^R{7pL&@U2 z%$qQlMw^#;8xvygjGME7(~P-c4SK;62aLvgs(dTPn8C($w;$skA0L_2t;HDo`tBTM zZZXC#V1#4YeHp*7!J!=sevDn%+OAe3eBr{_E8hW~_ho;vMNGN)fz!6^B357VgRk~t z$%h@TBfS#3#o#(Jxfp}%NN-EoOmL+yTt_C2)%b9Qmwg(Z(1$^?ULaR2G_y$qa`*vJj*$%B~3mscwRd-*1J`A_k*W7;nQ$z&v~$h=Oq#qsb9}W z&XTsAB|aFfo`A%ues$f~h0Qv~53XBwv5JgWUpQ~sH9p^)nQQyk+Aj91wOx$CTe%D9 zJn-JAGU;i#2xlK?Zw&3*IzHc>t@919dWMy)p!}69d!ceQUb(U-hUekBFjn^QNN=$p z>v3dqF~<53*8(~Zb#Ay8oaaC1*3gdblpXxA&T7OJBk3o~V3?NBqHUbxor)Usuo0HfK@o z6W16%d5wWBUSrEU5`wf6XyJ<%)2U8xn=0@wc12d*QNi+#%a z(rLuF=~LELPp6nBNNS72d1Nx;l}E!{{g##I#kxx`DfxMJ&^DFxtFPGLsCC!c;pe@F zKm4S(=K;?eo}V5)dkTyepWR@@-ZFjDP1x*5{4I0!X<>{U+6>TW{iwl2ciWS7Ycc9D zJYU=jUvf0_*7_`-v;NMp^5NMY`GV0RUoiL%b-joCN?-V1t0BM@AGrEqXRLBv;Ex|% zhq?=U3=FQ3KiI-t^>>bwu6K@2y=}^%1Cjj&Myu;;py=?Nn4RqItZuDeoICo(xpOSI zDnS_!KJaY9Nbc_(FI^m5t`ZxJ=CzFuhq3y))%uX%YYCrbURyi4sR&%KliONimQ%KGCS7DqMq zC;iL3g4dYK{+Fd9!lv!g2e$u3j+;#}j2A8C*NKwjqfh7;S-L#k>YwBmCf5-WNhG zz~QR??fW?xd`BkBn`+?$24AhAiOF5{ogEC$3Rg&)54@S5x}GW@o}ndo8DC%F&GXZ1 zj95H7eV#d1^DcRbOr-`V>CU@(oF_2aLJj&lVANo!TjvCw@jD+n<90sQz{%1FubFe` zGHx(hj15NY^;&7{7xc1ri(capdttsSR@T5SPSS;bai0-~dYTh80L%Mqr7%8wm?(Z= zw8#^z)IeWl*ax0^H!Lf_7+irpqhtu;5=SK^k*#)6O8$KpmaWOE!3f}V1k?Tu&47(yD)7!JEC&#%uOP`~Gv9d`BiD=e`aY&2xqh zUp`S17agwa)>fYA%h1Q`A!#3Yj*l<+z;S%MJgN#G`oV8}e6rbFH8vdAjkEs-N-p-m zx^@19u@H}aux{Px>>l0iTlN9prChyt-s_h+qz5}b>y{nI6v^-&g7{pN`r;4~raI{+B96jsh>`%zC}m+Dsd;YUq{T?R&; zGjuPXH~F$NtXpu}P`6l1e=zb4b;oCJ)}1_HhoRld=oW?>A&U&gOeDQuZoBX1}CuXTel8%CA&Yo z09hQ=9bY&xIrf3ibu-)MeU9QYpo@Odk2M9C4O4sCSgfl$hy9(iB=dgjoj`mr+NQd! zOHMGxFq$yNFq|;>>(k62Z~gG6FZ?$(>j)kGqd|xJaM5f2<@jI5Na~pB1Mks&FKGAv*@{!vM_5Z>wDJ}+QgecpdD}NtMo4YI%Dn9>kCrk%Fvd(x885o$ zi}ibs^md)KIV&!c&KJJ(so%7+Ht;9!SZ{7?cV~~|NNB~$tI7e2{K-GQZ)%(l z%~2>@f3d-3eD{r;dfobc7x7E|d$YTC>(sO#bKtX`ap1>1)N$4P2T0dg>X)&S8Q;0O z*ZtyE1h2k=%ha5w%Zpy>sfX9}38*tl`+lS z+n-4iuGW2)z2)jb|~pC_C5w3(9=oHjgIH@h*oZ&7`xwo5dt zJTThu?Bt$a*)7CdUU2fP+r4vVvDHU{Y_(6xMIU}x+E%}3A=W~^fzCzJ>u*BNSh<4J zM)ZnO*Jgk5CEtd*EqYU^9PlI0wevfAvo#Q@3I}ZDn%L3ZDGC=?4mBrhs=+UbIBTlG zFO4{Bs=+UdIBTlGFOPWH^EK}k74Iwj2GP<}vV6>6Z@-v~v+t&f3 zK3+rU&6;Z0Em%|kcHLssZ*FY@)=KFhe0+8*tD{Gf|s(lICZc7rR*(E-78+oF1)YgT{x5aI}Aj>5ia6i1}A)|x2f6J)ers~*WcLg zb^MwdDR%W~>QhRxaAcCT@8#6FxCLHMvzE zXxKksrJm*Y%x04aV~p4$FZ9goa$(JHoK97EfzxyVZk^t}@VzNG`QTemi@Jxv$u%*I zWibzIOmIKddGtAMxI15eTcI!Uc`lF7FZ3k`&#s8E9>F&2?i{SevEJ4_(T{Zq7X4U*U(N1(%tBuFeZd~RfE`k?>!yO=p~i||U#Zn$!o2Sp|KU|{ z)^K0vO6jGZDklHR;K9PtO zhHp8G`BF{IzRXGa9(m5b%L_l|g)iVnd$UffKGjy{N8e8Ko7(IiB`EP~P0BpUHZ;#~ z%#}}>r+rgHUBKd7>lxuBM6_wCNi@k1#!tYbW80Uzq7SjR5d8eVnme3!2CZyUp8%{)~40{8x+3+ikx zJG9O%KCGGQw}7!`;$FtD?AvDX!XK>c+wvk>evT7b^RQkvv++xPD#m)*ticMB-vOmR zYh`5gI;G;D2(9B^`Cu+}AUIACtew#<8@DLW+;!u1{n3ZDQ}Chb z=HJFtf7VUGH}|$jf7Z;%9-Ql!>dXOgST_~dKO&m+KC7`HSH@H z^DXaIkIw+&Eq(?p?_AZ!sO#Ekm}_jbm;)^4K#w`lbB>3%F|?$eKo~i&(H3%4j2s)L zw)OOyS;m52%!8iuJVMyQep^UH9&EJ6USfj9v7pBs=sCw4VNDEuR4+@XofB-dm}8A} z=U`3;7wn6Q0Y_}Kh5i+zf2;+5kth0Mp8cJ*!peEU%So@U2Qb>k=~`2CVy~a9bAi5p z(Z_rKw_jk3V=wCr2^*TE@i~uZwi{!-VCI1zV+3Q&_9G4$c4PDb;|wxJ-NASU8G{EH<1j}3!I&>& z}GdIrnu8H}~acvtIUJ%h2fz%C0I^9jb8 zWIt*M#-yAX>}|mh*z;ykI#mSk4RPysT}`Ca_!= zuw0iDgwY~Du*?rE^8?HLz%oCu%nvN{1IzqQ6h@2uz%oCu%nvN{1IzrtGC#1)4{RxZ z`MHH(er|!~=N4FgZt;Fci{k~$;|0s(1j#$W zw^A4_@&n8Kz%oCu%nvN{1IzrtGC#1T_~rS-k+BFwgIrfWeWm-4HOpcit8-zjxjnFu!-+A27dnu23<| zWkr)?b-;M1EO`UQdmPwIz<758yCYycBf#zn81Ecl_cqw6l4)QxxPXaj!I%SM>QXT3 zXlz-7`F)qZo;Sv0KhK+=Tljh2c#hl8^TrtmW?sG<%lxoseqhUVaW_BY%nvN{1Izrt zmg47iwMusL^SU}cU|v^e2h8j0{D65~tqz#i)nx(my1Fu8URMumuv2uW!JP&_FxPKY zz+Ar@8Vv5V*cZ%wsUz{g+}CRZY?&_ly7p&Drp0(*IUZP!2ewo^I1tZk4Zl2>V0kXV zmg}Og*BV%!ORzkbV0kXV@?4%Nj26cWmd6X0#|xIn3zo+V=JBqP%zS_i=pyfMz?L`I z`ha=78yoCw-D#0CSmq3tIfG@+V43qd!e}uESk3{KbAaU>U`yq2U3})?=eqdJ0drlv z4!{o31;16gsOh=7(;{cE%o!|m2FskmGH0;N87y-K%bd>>MvMHwGC#1)4=nQo%lyDH zKd{UXY$<-ZruZGG3(lwOqRwCkH5hBh*uf3vbMg%7v^ZWck9SlueZf55I|AnM-W@QH z_r8F6yaXm5`DrmPnDY|De$LCdjXCcj4R(RupAF8#{-dSiF z%Q^7NIlyv`%L%3yLO z@|=U^IS0#g4wm-}Sl%;WdC!35Jp;yh#(3ESoONJ_HW+hg?63y&J1c&NH<;g9$w`a# z1IzUT%k=}x^#jZG19Sc806#F-@2-Hkes>4V^}A1j-PiTIKVZZ>O&7`R=lZP(nCrJD zV6NY2z+AtXfVqCR1#Cr=_l|%a(O`E4?8pYYJ7E4Sd5cW392F6J%HPra`SgAO^L%0!VbjM>YF-L>C)1n`k{aC~H%X#t3dDjY~#du&j9$1bCmg7A{ z7%j#F%kjW+Jg}wW<$Z}?-j`r`UxMX*36}RISl*Xld0&F%eF>KLC0O2y8`BYd3V4#yO@uA0(MM;-5aoD8|=P-9oJy@>(047qC0hFLgDdX zWVMC)?rSlh+xRfA$EZJN$SPf|hw~bJgJhcZM@o+NYoxPQZ0F2jO%QLB?yGdsu%mA- z+JgVJ(pTs@RxCz!U!@CQUL6yZ=U2mIp8sleYmr(SN;sY*eA>6eW3ag!?qglpt6S}ZoBW*@Az_Fv!~b} ztbNw?3SF!>F3wQa4(oz5k@;m#s5>>~I#CznXRM3^POFN1Q2AYhoIj5X`-JoH=lPiT z&%3YGd3%cDoT`g>T-4}v-H9D9YJQfm81Luhci)w&*qKeg=)Y8~m3q3Ky+jUtiF3{p zaa=3sqwcJub9FanpUj;xo%aj;Vjs)wvoq9>alw~4g&R4EM-9){-TuCVU+^>V3j+>s z`oQm^M)p|#FL&~+RzG}smiVGB7l(M{$nk34f%3oILXH>DnoEV%8V{6zx8@iNIR-j> zf8l=S0H-S&+!%dW%Ma4s{eRIt^R@pd|I&_}3kJg=MEg1Rv3}in4Uw7)p=b0Q2oX;go;e5VW3g>e| z#(C#?jPNzOXe*_!(H)G7JcGLDe&B1D#7`*il6fAo?selkcCQ)k)RJ*CM#cm8HM-zJ zUgm{&$A{=*ULLGFI`i^a-S^i8cAWIb=?;J18L>ZJ_x*H%A0^Kx=zfZ#=rk7V}0{cn|gXwdgc zX8+s1PcmnL^`}VYT(JIB$(#|^pC*|(w*GXF~kj#3u{!GcNbL+QBept|-C3$Vo zpDmd+WB=z!9uE3*C0`Zv=SgPo*#G&GR|owClG#_bzfkhkL4T2C&N|y~mpl^m7fa^M zwEZQL*)P^#DtS%NUnY4l=r5PNKInHy-VpRxNIoX$cS`2{%JE+*`KX}3O7e=JzgqIK zL4S?p%TvsJMG7kIj=sb3xDp9QY_ZQhjh`0 zHmFm9^Yz2JSkKHk{jSwTy~#nJkLbeZ`Av+wB@+kSF+M6e$6$^e<71NXxmz*Db^myg zJ00-%Di7Ev8l16wQZnsFvVng}7Z~F~{-N%S7aiPrJ}nupFKW1cMlxK%&GoaAGgrp$ z7@w1jkGXzcGI7Ao^$U{0;rd10X>bMqk}fc~AxEy@=J938k;hi))ZaYrk&KW1zalyN zv$pL2Rmu3+|7*$rPU$iJ*QMiQ|6f|dKgR!t?D*LKmy`c1rN{WcA{`(5|LPL{G5)W~ zj?Z(Ox%+j=tTp1?)?mLO8602!U(Z>;zp1NV7juZ+_4qBx*ao$m*mu9J>q=etVKZ0u z;=Q`C59-NygYLhhEBdlWpRCJ$MkW8QF8ti*_araX$77_=@9VWcC2mri_aj{k3x!DIaIN_Twh z9>c#&#^xOVA$gmwnB#lW$$`x|{!=n|%<+Bc&VijVfRV%Te;~Q1E5`q!bmC)k{2xgM zkMVyj-SM##AM8=O9RI&0-=r(X|A}?fVZvjp4zy1;mrAe+}R$=l@@dG!f{7dG?imkb_x4M;aH?4G;jlCinY=sdfe zpS$xPAbCg1f1oh(V{`t4B!kEN2TON;?BoL*Q(wnFL^|h89N(eBh>y+j50eZY;~y^F z@v##hjI-DM_;=mJit&#S1|DM_Dc!NK6AO$v{9H4KM@i?524fD7mQG_1u^poej5$O$ zhhrtfA?71^HNz$DUyVt_W zlCeEUVkv{k^24Z1%-%}(rCysxrWUv_jw8qEr zPnWL0Ha>mu@%&8@Nd}KKIa|7Gg58|Yk&Mkb&Xs((;(cA0 zvGauS4ElyH=Qv*&zS#T>xIpp&^5GeP-8n9ljLmhqNHTfB?Xy}kJY1KHCF`%P3w`jR zF8EEkgx zM6&+cG0+De#(*DokKv(`r_vZ6CX6v)^BAs@3?9euaOoZccJq0JWNgl{M)I`c#d#bQ z#yn#4Jg$}OdBpB`Lz1z19)~591Kd7WOD3mluuii6+8WTO_^1Z>VK=`K$up@2>xEGR zY_7ou$>6aDkCg5jU^ky@BxAF_vC++MlXU&H{P4$zIieqS^V=->mc;K`VerFdexs7X zBfm#UH$Uv=GbS0EYdS6&jQ4Q+Of){`e4TXtwVdgL51fgG-JG{bo=u#u7Y1i+=KN^M z;F0rVq?H2H=;g1je=!f0>o*;QH@q3~$_+c}@CrJj6{BDqL ze%Q^YD;b;h8ynsHwo2Du%MX8i;732~=C@7qeB##=20v`(cavoB$ZxxJ^TTdFJ0xSX zKH2Ez_hjk%Yx&`i5B%td-TZErygTvRDGYwt%x{-u@W^jUy7^%@pJ~b1tj{#M`Q0L2 ze=R@!@qr)xu$$kkE?&seD+AjX8qPiH^05o_1E&lA0PP9 z54-v8le|6gdx|jlVKcv{N(PVoo+jP=u$#}*C1bPxj7B%VXG+&!%MX8i;732~yrUki zJMUNA{hoB2F2S(RQg`& zaqKS>#@MlW>@SxL9>;!%bdMdo$Nmb**sR~#=pN@QrR%SZ6F&Ik!#L@O-TYo9d0*o9 zYGLrhW`3`c3?BKtR=W9NH=ox@#%BHXjc$H#kgmU$AO85jkAB$A?~Rh5lK8zz82qrA z-#ya9KR)oIA9nNmwB%D6DudONm_`r{T*m<7at~+Nsck}&C$4&m{+@LG6~4Dgz#kv@(hs}&{l4U9Cw_k*41U-ROf z`TeDI{k8n?#|M7&!)|`xF7o2__*cTUL&HQvMb-*LP{e+nxcJtX^GB)eW8r}T*&;vjG@qr)xu$y1M229Nv>kZyk1&F4VL*sLGa=;n8@^vDl?eBehv?B;if5(7)_`r{T*v;=K$uCa)jur+#Z02{2WbnxESn1}6-F%Ld zjLrJ-jc$GqkRJKrj}QFlhu!>6ko=Ow??hqn!)AUbNd}MnPL^(d*v)69WNg+S*y!fB zN_ymnKR)oIA9jBhJ4G_Tclxu~snThD7Q=R$E-;?k$o?#Lx@7u!oM%XXsr0Yv@|eyP z#yGJVJ4-Tn9Ov26Jx=T%*Ey20SwFYYJ?M4-xbo$ z54-t1NHR9-S2nu&Jy?3=hd(~>qaSwjdx+#a62FHEgC92YdzfVK$nPrY=7-&U9*!LJ zM>M+mt&tx2;g1je=!f0>1|`2D@mnhle%Q=!NHTcjH!R)!u$$l2lCfD|*XZUqB0ciM zA0PP954-uTmwad9w?P>Eu$kW@C4)zP*GM-%?B=skGB)d*8r}RhOOO2U#|M7&!)|`p zN`7VHH!2K%*v#)ylEEXtG3n-q-F(I+W3xWd=;n8w^vDl?eBehv?B=&c@~aZR>xID& zoB2IjGI-?o80qGR-FzM^8JqRTHM;pdUV7w*KR)oIA9nYBg5>Xti}fcqx??>_dW?lX zKE$FQc8~Q2$*)dh?FwV8*gV!7C45RSlck#5h+`_+T;qYo)&>#ebbJ;$w6C*GmSE@!ufb@v##hEXIGM^tYz?ZxTj)Y>xkC z$>1^mTckTacH)D@_-~bdSBig^Fydo#{I}t!E5?7jbjQa|e6Se*9n#;H;=fZE@v%An zyCj3h`0tkP_}GaL7URE1`rA|d_X;CEHphRTWbhdO{n8yDJMqC{{0~TfM~eSJVZ_Jg z_#cuC9^-#ly5nOfK3I(Z5$W$t@$VK!d~A;YQOV#j{>P*{K6dZ_k4wg8{S%Gu^Wc-x zeIDE<0e^hpM?dWTH*%kn{4H^Lq%Qt_)u*M?_}qo others - "Layer.Learn.NeuroMod.BurstGain": "0.1", // 0.1 == 0.2 > 0.05 > 0.5 -- key lrate modulator - "Layer.Learn.RLRate.On": "true", // note: applied for tr update trials - "Layer.Learn.TrgAvgAct.On": "true", // true > false + "Layer.Inhib.Pool.Gi": "0.5", // 0.5 > others + "Layer.Learn.NeuroMod.BurstGain": "0.1", // 0.1 == 0.2 > 0.05 > 0.5 -- key lrate modulator + "Layer.Learn.NeuroMod.DAModGain": "0.2", // was 0.5 + "Layer.Learn.RLRate.On": "true", // note: applied for tr update trials + "Layer.Learn.TrgAvgAct.RescaleOn": "true", // true > false }, Hypers: params.Hypers{ "Layer.Learn.NeuroMod.BurstGain": {"Tweak": "-"}, diff --git a/examples/pvlv/params.go b/examples/pvlv/params.go index 954788b21..cce3f4b40 100644 --- a/examples/pvlv/params.go +++ b/examples/pvlv/params.go @@ -118,9 +118,9 @@ var ParamSets = netparams.Sets{ }}, {Sel: ".VSPatchPrjn", Desc: "", Params: params.Params{ - "Prjn.PrjnScale.Abs": "6", - "Prjn.Learn.Trace.LearnThr": "0.1", - "Prjn.Learn.LRate.Base": "0.2", // 0.05 def -- todo: needs faster + "Prjn.PrjnScale.Abs": "3", + "Prjn.Learn.Trace.LearnThr": "0", + "Prjn.Learn.LRate.Base": "0.05", // 0.05 def -- todo: needs faster }}, {Sel: "#OFCposUSPTToOFCposUSPT", Desc: "", Params: params.Params{ diff --git a/examples/vspatch/config.go b/examples/vspatch/config.go index 558891968..be9e412c1 100644 --- a/examples/vspatch/config.go +++ b/examples/vspatch/config.go @@ -68,7 +68,7 @@ type RunConfig struct { NEpochs int `default:"30"` // total number of trials per epoch. Should be an even multiple of NData. - NTrials int `default:"32"` + NTrials int `default:"128"` } // LogConfig has config parameters related to logging data diff --git a/examples/vspatch/params.go b/examples/vspatch/params.go index b0737602f..ce1f01e10 100644 --- a/examples/vspatch/params.go +++ b/examples/vspatch/params.go @@ -17,18 +17,27 @@ var ParamSets = netparams.Sets{ Params: params.Params{ "Layer.Acts.Clamp.Ge": "1.0", // 1.5 is def, was 0.6 (too low) }}, + {Sel: "#State", Desc: "", + Params: params.Params{ + "Layer.Inhib.ActAvg.Nominal": "0.2", + }}, {Sel: ".VSPatchLayer", Desc: "", Params: params.Params{ + "Layer.Inhib.Pool.On": "false", + "Layer.Inhib.Pool.Gi": "0.2", "Layer.Learn.NeuroMod.DipGain": "1", // boa requires balanced.. "Layer.Learn.TrgAvgAct.GiBaseInit": "0", // 0.5 default; 0 better "Layer.Learn.RLRate.SigmoidMin": "0.05", // 0.05 def "Layer.Learn.NeuroMod.AChLRateMod": "0", + "Layer.Learn.NeuroMod.DAModGain": "0", // this is actual perf mod }}, {Sel: ".VSPatchPrjn", Desc: "", Params: params.Params{ - "Prjn.PrjnScale.Abs": "6", + "Prjn.PrjnScale.Abs": "2", "Prjn.Learn.Trace.LearnThr": "0", "Prjn.Learn.LRate.Base": "0.05", // 0.05 def + "Prjn.SWts.Init.Mean": "0.5", + "Prjn.SWts.Init.Var": "0.25", }}, }, } diff --git a/examples/vspatch/vspatch_env.go b/examples/vspatch/vspatch_env.go index 366de0367..a18b1bda6 100644 --- a/examples/vspatch/vspatch_env.go +++ b/examples/vspatch/vspatch_env.go @@ -30,6 +30,9 @@ type VSPatchEnv struct { // trial counter is for the step within condition Trial env.Ctr `view:"inline"` + // if true, reward value is a probability of getting a 1 reward + Probs bool + // number of conditions, each of which can have a different reward value NConds int @@ -82,6 +85,7 @@ func (ev *VSPatchEnv) Desc() string { } func (ev *VSPatchEnv) Defaults() { + ev.Probs = true ev.NConds = 4 ev.NTrials = 3 ev.NUnitsY = 5 @@ -176,7 +180,16 @@ func (ev *VSPatchEnv) Step() bool { ev.RenderState(ev.Sequence.Cur, ev.Trial.Cur) ev.Rew = 0 if ev.Trial.Cur == ev.NTrials-1 { - ev.Rew = ev.CondVals[ev.Sequence.Cur] + rv := ev.CondVals[ev.Sequence.Cur] + if ev.Probs { + if erand.BoolP32(rv, -1, &ev.Rand) { + ev.Rew = 1 + } else { + ev.Rew = 0.001 + } + } else { + ev.Rew = rv + } } ev.Sequence.Same() if ev.Trial.Incr() {