From abc9a0b68e04024b493f4603fa79b3c0685d448d Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:28:00 +0100 Subject: [PATCH 1/9] new bigger map added --- voxelgym2D/envs/maps/100x100x100_dense.npy | Bin 80128 -> 0 bytes voxelgym2D/envs/maps/200x200x200_dense.npy | Bin 320128 -> 0 bytes voxelgym2D/envs/maps/50x50x50_dense.npy | Bin 20128 -> 0 bytes voxelgym2D/envs/maps/600x600.npy | Bin 0 -> 2880128 bytes 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 voxelgym2D/envs/maps/100x100x100_dense.npy delete mode 100644 voxelgym2D/envs/maps/200x200x200_dense.npy delete mode 100644 voxelgym2D/envs/maps/50x50x50_dense.npy create mode 100644 voxelgym2D/envs/maps/600x600.npy diff --git a/voxelgym2D/envs/maps/100x100x100_dense.npy b/voxelgym2D/envs/maps/100x100x100_dense.npy deleted file mode 100644 index 1962b83b87a20cc4a97115e8b001045ea6796900..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 80128 zcmeI2J#JJ%5QI(2DK=Y>9GDCtBr-A(2>}tZuq^_F(HfB;PQgVv0!FB$-S*DTn>XDv z-81EY_WZo+s;}R!K;rk?_ix^Pcy#vT?C19C`tH+i`(m?wdGmaGzS-W~?!NCXzka;k zU0wg%e|`Do?)rZJ?(^lh>-+VSXBQXeoBKbWo^O6_R{cCa3;FZs)qmSoH6#=R(^hFu@TuL=ZGoVE59EDj-|#WSZf1T04(a=dHL9YyVGrIzE; z=evd+u=!H%rS}FzRTZg}-`e|CCi(CK0&C}xd+tG_$v1Mmw9Y5TtR7!$E-4*#!$Ad8 ze6;t2;&E$Rdp=UnZSYRUBp>Z{r&w%D0wh2JB#jZ>uW%&APVYD|Nl{g-@^kQoi8Zs;2tR zwr|B(>dwa1IbO9@7nX(c&75! za?CxS>dXB)0y*xr=XLJJd%Biq>iO|V`AC%Ft+nqXr5_hsT&0emgkw38oVC|u(W+0Q z%@=z0&ZuyV0Qg$fV?9UMlf0H@lFwKgcilwbszLs#-pK>3~bG{;9t3WnEMKXW1v;n3J}i8}wt+(Q^$BR#nd_ zmG%i(%}eU;_nh%}$wPZRBhwa{wda)5Ymvosj}#ZJeb3n=d()avPA4)WdM5!AC?g=h z59HIibKJ*0taBCjQFHfsP69m$;Qd6E9>_wtL7c@1_|^d(Cc%7zWtJq-5GqNRIk(M&Ce?rYhU1Q)d%`fsuhP? z-QN6a{qyXQ zqw_8G?|mMn{z~(&y2LWMSX6RxgkfZIiTNcwH044eMXyaE3Tt)Tbid8UoU-@ za*1-U71t;{w0*YX>!pu2zutPSxc1K3tA8uLUi$RnY}L0FS1bSCbzAZE(#QKeO8vFs zTIy?*`>ptT=`+gQkfRk>^keNqAU9{}DtAA1E_Fs*r*hZskH(34NZ=F!`FS~YmuDo< zn*iU}-V`)Tu9 zs@FQ_to@eGW9_}sxvu3>``qYUz2vK%Q|@^$Ijwx;@>`u-<)Q8&0jv2z_psY|E97il z$0O-8k@Z}Crq*+1%`c~~mxM|rpidz8{#u{wC!!(==n=4f|5N&U38+K@#|Zodw?Sb~ diff --git a/voxelgym2D/envs/maps/200x200x200_dense.npy b/voxelgym2D/envs/maps/200x200x200_dense.npy deleted file mode 100644 index 6101fe7bc9ce72c50c7bf59106b8641a9276af67..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 320128 zcmeI4&(5w#al}uyT*cWPki4+6iXtR-?1-|-A}eIYwpbvHY?MuutH_P!5@KVO;;+9m z@BFLo>7J)p(9AqD)m6Wq_X8os|NP6p{o!B#{U1L3=ZF9L^z}DC{Kxm7{`upl-+uc~ zpT79`>D%wV|J(Op{pF9}egE}0Kkfhit3Utn&5!#({OPN|e)Hq{*Z=tCmtTDR@t=P4 z#mE2txb%--efax7{`Yrp>(8t8f3|wr*GQV(XUiY_uhy0>dwl6N2)#{&KvvO6}iJn6hUS3?PD$_ncMJi%N3U+sNA>E%(uL1zm6abl z+495J(`)oxp3eK}`A6TI7bn}_tB-WK1nk^Sues^!szuMCE-5)@5Ey-Lj9${XhrsB& zi8^-=*}WuDywPXRt!`H*fOoK3Pv?j4(>|WoAKbIlr}1y)jh;u#hxVUAVE4V9qGWxa z^6}KZmjCJXQ|DlFN6*3b*P2TJ^W>`X)ZlNm@|;Mi5`oqC;&rbQyB)7y{8n{$vgmG~ z0Ory^dFBNCt=2QH)%7%i)%U`_cbY?Ve>T5ez3BYCe!F>C_j^8X`M%5R77ofWM_~8e zuOZh zOEG_|<<}~;&PDHAU$?Gzxq5SfKkq)$pCf>|oUU`O>z_W&+xCwlYWres`(yufwZ3j$ z?{fCy((>iiPr7piTJwqCpL1RRbTr>yzbK+DAGUw%T=c&6b?bVUs~4A+FRy;mog>hi zPxStr>-wjo`S$un5w$*AeeJnZ>z2RU_ILT-#zT3=2<*Na*8MTJ_ncyJW9eig%gVK@ zlZ}g)A93;`d$;cHyJ6kGoAUc~EN(2F_YTkQuyXC{oTU)gU7X@rzv|SQ!>c#YN9S+#%lI}b;iBkIeixa zPXc&{sGhx~eU1R;>s`+!^zGc@w%c`87x#Ai>%Itxz=sd|J-vPf;Nx9iyQurM#cel{ zm+r1F-4}r;5y0>8NttJ|f{%Bd@l@$-i`#AnFWp^Vx-SAEAOaxJ`a8wmN59)@i_>mS zFWp^Vx-SAEAOaxJ`a8wmM?cju0<-+bRC%Z1kEbXl5fFiD1n_sjyH?|J+VAW**4(l(vzxur0uvHjWWQ6HzzA-A^= z^zo|R{-y6r0Pn)93-nj3moMqA5x^YRZtvehf3Ldl@Gb=}&d|M!meQ~Z;GL{iwjb%$ zCb0UBM(@?8!*@T*ZS@@Au@rA2u==iW?0``5d*SBzDFSF}AsVLPdf!X&ndw$i|ZTDum zt)9P)oAPi8tiCJUVk`Rt;$EfIRx~+I2@%iMnLavjMMHZZ_d!)^;zvca@$%h2g|SJz+SU_O6Lv&?Du8$ z?x3`v!gBFi?>nuf(BfDt1m0(b|bmF=B2 z&nAH1-?J$wy$IwafZwNAJ(U`HX8T9qoAvj!^Q-fC8YkryffEGq`}C?OGO4EsJe`2o z@7YWD>GP_Z-|Ibi=~m?|-?0R|-XC;h)692+&nk63p>AX#Q00BWX9i|F9+1Pk?s&Z6 z?$h~t=V$Bp-W>Y%&gpwB0nDXJjitly7R8zGyq2Oj=j=4R^RxAPJkhUruIhK>o2{qw z&iXxl{xq-M{!#pPeOABk%{e^!~wt@8Bt)ze?~ed=B>PxkLo+_H1<_UFyn z+yAb(LljXCz zjyGrMXXONcRccm_@RgM#{6*KSJbJ#OIkEfo^6h?S`>m?C%W?F6RX(%6M|rHCoAncY zel?f(y=V^T=bg(-AO5RWFD}rn%7yJ4JiII0cUHZpbM(%?s(zK^JL zo>^aMN99AGS6-g<4LrQ+lUR;q73UF3Ix|YZ>piXNj{0VwTlKqokL|yz-YUP*dsVr3 z`5xsqd(O+RbZZcpeJ@_;Pto$q)6zYaGh5#lAJ%@hyr=!JxLTdlxo+!Ujj#24YJKV* zMCUz~Gh1J8?%DFP{8(YbuI>vQ*fbWZq1ltDKvur+A zHFx*i65o4fSs{siZ?m55{n7PopV{**AG10-&mqv72Ya96wyp}Br`4-VM$R)5uo-p9M-*x^QgV^`Yr+@AOa$giva$9qgr>^N44Lv?tRBO zRGde}zN!d_fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2;59S|GoIl(^WPR5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TFl>0sQ|e zp*}19xwPW_mc4kq<$={xxjZ#D=B_FNA|L`HAOa@|;NOQ)TVC)TwRH~rIERY;Q+3n_ z&Y|}F;v8!0Jiep0&S8J{ocDS7YE|?{ZJonD&Y@!8R(wab-|ZUbQ0;fEML-021ZIC{ zqvvrBmFGA{Y1H@ZxzY2pDMxj24z<^3^juHJexKDHJ&$v!wy)MAAOa#F0wN#+A|L{1 z2;e`T&DJw+)l~#UKmz;ea!ZoJ-_Qu_eDSiM4$?R*55~O|E<2g z{as%@{oU`o{&ZgiJPFwM?b%D(_Y<)132X1<=rdb)ea-fpJ-_Qu_eH>yfPK%Ny`(Jy zUIgqr9$oMH8SS%rZr5Mc`>VWI_o{M}ZxIlIK7lNMCs;mOI(-s%o@nui+P{;RJfik1F!JySz8=~?X+pnB_2MAivjkZ0Z#Mhl3tRu1U3DyxsTPRsx7{;tp2ey7jx=8d{9PEn*|`<$NZ zZ2wp~{BFq0QidL@vV6_zWbx1P5v4QB`PBI+9<4s7a^K#!#XIZ%?emz;=RK!cKda|i zIad4Q9?M6R4t!^?Q9N3Gvh%6xuf;p-epSw1zP;w;<+r8F%F)sv-DmlT(izQl`dk!` zR-e=PZ|~pYo^8MS9Kdro2XwsZuD{v)tQ@m_?Rl1uC>`rR>pF^ucb}|$Py6!b&eA{4 zX}7=Su&QTu*y~katM^#`SNr20%SV(Bd}pswJhJx5KA&BGS-D$ZyE$*UZ@EORz4)>G z@A_l;h|<~38+AX5N7g=3oKN-1%H8@p#V1Q&%OmT0mKQ6>Y+rkx>#udrYv1<2eeK2Zly20#PUXwm7u;9<;eNH+%gf5s z>!a^E2(ac|MJES!o{K2%JjZt^f%|G2+^?#lEU90n7dNwJ*(h^{9EpC%dy_)V$bzMyJ8*JK9g@&JtkFyNb?P z+WhWO-1*L*sTegccAwE{vHFho)48(*So5x;bCx#0dlYxR^V*8wKAQsfs@2(?I{yp; z)!y;0-`l3T%VYHZZQOSAfZMDN?p3R^Id%RS1ggE`UB9TrTnj>^$Qm6}fd(~=$TO4c-oU{8{a|nQMROPsqiX{*=KikLp z(AtN9<<4IFxR&be1laGer*}KgQ~7$_y}qBy`?S7Z^Mmfwcq%W40Nzhj<+zrLB@i_~ z+sFFQ+J}JU&R+Yt_EWX_{W?kc6#+X3@AWG$`nFEMdu}cLb;r6nL!iZ-=X^@P zsQG#KdCI($Qv@m#@cui{(yz=cv-cM7YWtZPf6g2De(u|=J={ZmbynX*AR7U^+vu8| zLpFb-exm2-)o(O!^>dbA^*r+bECTlZMy;Pk`CQtld9wSQBc{$GAOfQV*uVQxy-|NU zw?|-f9@)Ldw%DVsdFX*-tVlXpNYXQy}<7;_bUHuq#e-ka@{hrr!V zEcYtwyQkupE^5Ah38Y+!K-7D)eJU}k?A3CwvaZbhr1z-#p3JD;B9M)M{oQ5rx9Z2{ zs&yDJzI#dQM*by(a7BcU9Lb=dSK*o~V1f`E>s^fv7op^?996U%j|z(e;{e zmVEHxmDB2?pRM*B*7jVsHGHc25Xd&~SwB8h?oplPS?#>nq_S}E%Gc6u<(@s?(uv-0 z`PM#%K=eG>{WuivRblharsp-eEIGaMwRBs#XV15EqW4?Awa+0CJx_K&4uyMF*u1mp zc}?zgPOn^S-P4?U{n>e?_m)79fA;Th(`aQ0WS`%zKbx1e-u1P5pXEPFXEkTky(kW> zKH$RA^VWgiYSo*|E&A2wI_uYaPP2Y?&wJ-#>+k0EzR%9PV z(0U(ZxBI7B`gXss#5rO6daXHb<5j$Puyo^zJgXyXPSHBMQ?h<{^>qJk0`|S^u5Y2W z%fb8pEu6FF@SdNSepa6HbvJ?C_k0_jT^`=|Z{wXUkN5n%^s{l4pJx#meb={B8Rc;5 z-0l3c0wOR^!0Y$T(w$Fq_6BmFb#cBsIC$6dNpm=`oO8I6i`xmqqBBKmroRdeSpDXL(wev-h){w>h$| gy?JHLEni#$-tWljbKS42Szp=L>q*bte9!a$0FkSDI{*Lx diff --git a/voxelgym2D/envs/maps/50x50x50_dense.npy b/voxelgym2D/envs/maps/50x50x50_dense.npy deleted file mode 100644 index 50cdb81f170bb49120b641038b720142fe8cd973..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20128 zcmeI1u};G<5Qf9bQ+P8-8M;(L2#JlYs0#xVifAeZL{WlRP@jSqE>|6(akYUk>6 z{AcJd+jp1$zH`-~-}euT<>OiLS$x@5-99(=+Sr@*l`V~3Z<>#$dVAV5t9rkGSG~6N zZohr0-s|1^Vm>R)e0Euyt?_)$i@)dRb{ze$!q2gIhSuRE#<|y_b3i`l!Fg!D1GuNL zR5P0x-eS!G?~T#3=68ZiEY-{=#GA`=Xe~GI)a#)>Q;Tys_W5_?W3T)C#y$rhDIch> zMWy;orLLAaz(*?FqG0r#G}RQk>lk2tKU zegwb~>Q=!%)IG<#?;LSd5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}f$0+9 z{{_>XZAzUDy@!Z{cJ+vJXjdHe5eLOS^)P{Bc@2$69Q3dYo{|6wkN^pgKpF(_y9Q0e z^>kfg4$$d#%FakYo&df_zAEof*Bih|$`$JSs+6x>>-x?CdAV|v>Kj~rRmxYcb$#c6 myj;0S^$o7RD&;Hpy1w%W&C8v8SO1}0Po3Y@$5i%@=lKibnZW=6 diff --git a/voxelgym2D/envs/maps/600x600.npy b/voxelgym2D/envs/maps/600x600.npy new file mode 100644 index 0000000000000000000000000000000000000000..97f6eb225081e5ed9e39d0f993c89d562b4f9210 GIT binary patch literal 2880128 zcmeFYzpn2+ljZf2auuC5NE1wi5JKvVY!C?n5o$3#B0vo4K_rN);70or=!dPPb9SBW zpa0b=SJ`WV@v&WAwbsY}efOhR>i_q@{~!O||L{NkXMg*@{`P@zbmeLKXo<-mcAu0+h_OuY(G}L&5?D^$`9SFx#i230PZ_#^mV_k7M0hl z&wA1`Cueh?ak<`gHs9&`^`yQhqdEAxms(Z=?01zlxAn#5z|!B%Vb5ouvpBMJv+!i) zYw2dmEgxJ0t^4;{=eqY(_sScsV+qXm-#tIuk5zATWSz6}LpN(~`SK-z`;HoY-LI=f z<@M^bp7hMg+1zJbu6Lcyce;K(sqe{X4!-WCmX!eeU1iN}eX%*P^ha~pI?dlAz-|8t3R(EAHd?;3 z{&&yM_G8uC99ieA{LsysTfTe=;J%|qU-wcI0THM~AnH4VK9$(W>pB74H)~vX7JHJF zpRH>twffmQ&EF!>dKawqx9Gl`mi2z1TZJtARvRtfS^vA|XZx}0ZH}ySR(|Ma%`IQP z1aRL`qpy3ZiGT=HA`tbRL7z%&unv*R~v1P-FdbA@VlCO_xx;M^ka>)epD|4A|L`HAOa#F0?`Cm{~ZL~ zs64yFYMv&L0lIwt}*5@7w#pu5Tp=h@?`Kb;c+5fA|p5CIVwC&2!D4*hXQ z-z#yRHGc1=nFxr02#CNa0oHff>W=!cbymlFZu@Eeoq#v*(|X_2X=Vwq@5lBVowI#G zf7FlaL_h>YKmb(q!HYMyH6*!-bijf?z>zzTue-aGQJ#}(E(bDjYE zT}11jCn^Uk1ft%_inoksz$>FSc@hB;5CIXGCve+$9eLQ}JZYWCK!E)&pr0Y@Ummjg z$-+fGMBpa^_`R^lpL%O8KLPf;h}O?fTn>LC5cN)e>a8^q5P|Om@O%8es+qXx4T!)v z0sLOrJL?#Gi|Y0&0sH=6v)3 zu2p@*f3~q2Px%#r8wq6lF7*7|cn29X_3okCen!SOdeXbN@m7^d1h@pEzZ={%QWpUc z7$Fe-yWj0Ml1VlD1a|L1Sn1rZQ|T>`tmVd?Oz1+ z4t}vX@vl+-C%mbr2!O!oJIz)H{~9?2vfcNt9}Xp{g23+moH`Hx8b#m)fzfw=!dqm| zQ4U^pk=ai48s&7tn|g`>2#mhdY<2LjkwYNceOo`A)aO)fzS;RX=j!}2f!TMOrT*nR zi^!hkp1piTFnhU&*F8n+ynJWMevZKIJ3r@~(|MQ2?0HU#bE>oXZT>>ijYR`wpwj zU%s=5?AhNz1cQhk-kc*mt4Cb|_Py;Uv3gbJySa7#Gl44aVASX5wA$SzFnSN!>b!IB z>goL51iat>EwsG3^z_8-i-6^H3m4_!5b%C4Ruh~jpUgH0OtG3+EIhpHh zeP^4`r8!?6&5xZpI!|;zHosf+qwmG5UlsnyTg5MY!uKsBa#b0@y_#S6t}<5Pd)jA} z`&{+O=DX@^^&C5oxBhC*)pOqd*?OxvSI^y+XO+Y1IX1sr^jGv8hx7-6OhZpCpI=69S z4XdInBxX$vThbl$C?^ z70t!=vwpnhw$E*IFHX1WRKM?A?&suN!h+e-vAB!i8+by~*?oqn8 z+(XtJQTG_tCoAV$d_~z{RIc%`Ml2G zp3jSu)nReFMc4AGHoxT_vgXMC9$MU5zFYOI^_Gw5_1p8Yc(S(JwZ5Cr>-_Ecyf|4M z7Pnh;Ew5_xTkauij_mKD#jWMLRqwst>VIn8J0J8;<%#NxoEm>75Ooi%K0l|@suuy) zy`9qa;v$`&37ooDuf9L0(kcjean7oPoZe%$yx#d*dac~E^(~#<^;sXSdhhk!T;Aus z^FhzsXEr_L)c7-jZ1*+m=jT*f^&&8PAJO$*IlXnFIrRE@=Yw9aZ`FO|)A*S{)%%6- z&*`+gO8}g28FzW;{AU8y-^u56+695?_uTT2eBNWL->G%)d|rCdJiL5+>qK+t_4CdL zy<9X%mshnPYS2F922t*UWdynqB+Yi2P8FzU^p9lBo zzUn6eD+KUftBxy~cg~#RwBwn1Hkt=B%Z_$52lU(VymA(OtBxy~bw&iD3E=k?-FLSi zd}keZ^RdoH^J3-b=|<=D?$^tweGw1=5fA|p5CJ{`@4xTSdVE4ZcX%b|&*^`yio+L+ zS2#rH^zQfTCL!$_YfCz|y2#7!)0_?y4 zJ$;ZEtwa6kvFCSp|8%}y|J{7E=X-um@6YC$tv{X5_V4*eeXowa4<8yuKm_U&Sp8m9 zNxqt|b*@V8S)Z-E-s`izR4)P|AOa#F0wTaC;QjZ$rN>Wlrb8?5>iU@!^Icc-t)82A zc;>`vzSg-jDfnGmdA-;9Nz_3EL?DX5>h~eapLd_pyxx7Y>y2{Rt;^0q{zTv;0sH$p z>E(vr;(5a@Dnk?j`+H%{qw-|wGs-cG-&r4{`Mm08{bi{i&7Z|@l#fv^yLC}{)JFs= z60pCo>=~@z(Rub<@;66d^xk*t=3J?ImB8*j+4HMDbZ(A-xXqPr&{@So8ZyauTxcgRRSiE z`&CEz?$+H;QVB&s1V#z$eoscvkNQ#FcLJk)v(zM-~hD%5~WRD_L-_f+ltyEh%1yxO~f-&jiOCITWL0wN#+ zcM`zg)oSCNw~;T^sdtf&gdB;02v`EAz7M;7Z4S*j1a|MK+IbE|sagWn-Ua;H9GZ)O z2#A0Ph=2&p6TsiqYU6xTome4o>RqgO)fo{uMZo?}PWg!LYjanfM|1P)XL)<+dg)ky zx6i$}dFfc*Ub@jbHb>QYG`Caz?ERebp}ry@0wQobfmgq`Hh0x|)%R(6XUk#zMa{Eu zkRK8FM8N(oKGpuThx0$}WnP2t>SJccmAzGe7qk8=-&JPsv-gm-J~QK=yn*9Sd!N>D z4&$digi`H$Ul` z4!!RwyI##(%I`Y+y^s3KPg)K|KmxUQQwzVpEcKcP9awwM_*3hD{8d)qxzzc?SFcn)z>T+pN&xy{Y) zXZ3CWQ}e98R(aSw(eqW`Iwt}mAOa#F0wR!?K=gl4*nWA*j5*7i$NFN=$7rd_n}EGn zZ+*6&_3u4r`ZmbvA{U?x>FKuelF_QU2a_KJKN`Bj9~+RWWH`gh#e`>U4R`uCn!<5AVG z_q~nkRppi4_b7j_I?KaLcT~sr*W8D|D1YxdANNw}5%9h@tJmwFZNIwzY&_*BH-YN! z-TL>Q=f1sr+RP`tU z_PtsCQD3SPf!_((cjP_)y_f#YBj9~+qk6nVZ!3@T_o}nBy>v%)Y=6yt2#oUguJds( zl^y}_d$W4I{@ND-5fFh@0``03Jzw>ub8`f|?`>3X&ZVkH35@dhsvGsCx;X;4AA1Vx zcC>Zb=hnaXJbV6I{CVG-)w_jLl^m8odv3X~`&oV4Uvn-2%inv>buV=g@V+;zhdxzC z>mPILzqM}bG#7!B1X}MD>vlZpb)~oU?>%4fq%-pbyzkBG%{$VG9Rij=d%oi};%qB_ zMDLXzJ-@yEm7XWgv~qZ@pUA}QX}K_aNjr`J?lrFC=T7hwHCi`OebLAEKQ+(h=M;xq z`rMa}&@z$bCPv?r@(N zXV0ttE&_N57_+B?PdjG!oz0)+hj})i)(?DQwELRBMF8&r<6G(PO{@FNeYdzDjJe&) zod}442#A0Ph=2%)fC!u=fd6jBc-Bq5?%;;8UIIB50TB=Z5%@#^-*=3kYPBZ$uzQcQ=ecS6>a%`#>-&yW5kX-09$V)lyr1mR%H3PHo@eXrdCf&21A*QR)y6}Q?q8g+lGeNNL|^&NHYG@o1h>;Bem zHi}!8K5LY8>UIKI-sh~3*?WQh?77?P!G%5E#*xh{oBnP0r#vF?5`k>r6Skk*?v~99 z`nPf0&9lk{=cmu^>(AU zW$9DzhTe1>{a)UrTrYL>yExsi-VJ?r9L4jq3+;{)h`KNDK3@5}biDI<>5b-7-Dd(` z_tDe&?4sUoFYj&p^={}*$6oJll%7|=Xr0k~s{2eJ>fS!PsJ9!%ElZzzH&oNHz3u>hF>emRc-$6G0HA;C;Me)r`pq8_!`||EHO1an9OQ+Z8)c$Bb)qN%qb$_2- z)Z2~XmZeX<8>;Ep-uG+@dwz93+ILm|)Va}Isv9E^b&q3i>QqH>%hIRL&Ajb2`n|kO zyH5J(cX7I3otqhR8pU(WjjBcoMBSTLAFo_qI$pWFbVhTjZj6A}z4Ua(+|;S+<-Kjc z&dt2-)a%`iQuOK6}=W6}!a<+Qx zc{RVgJa+S*I=`E{+WAwQS$(VVw|-e%vg=w7Rpvdey&Sved-|vLd%14iKgHXt?``+u z#ifc))%&Z$7e046LuYsF`PkJzb-tJD*8NkwSNq<256HF3rRq84t~OTVy~{T`Z#BQW ze0KAmI=`E{+WAwQqx$MTqDb%csdi8B+e?kQ*0_7VrF(k4m+RL3)7)qK-+B+oHOr%V zJ#tqYt8w4u>y>xbhgV;=&TOvR>e+m~^l#&C^LTNps$+R%omb<#%P~4{HNU%jcJoG^ z-_2S5d=%$XeX88gDLz^H-g-~HT(kUpdRg{obKO>-$SG$K@ zj$V0peYDPd`J2^g`FU-9mfNZN+wbKR*DQUj+;0|Mqds=Ij`s1YtLAq%_io;(^Se2# zpO4~vs!x^sImIVi-&6O+;?T=;s&CKN>OLz^HQiNy(dVjho%I{dGh4q|UZ?7_aqIcn z<wdw$f{YTc|K)Z2bouR7X1S?5*xvVCX0uUTJgPG0)6xlY&b=8pDX&F?Os z-MmrfcXMVrAH}O$pDg#&^HGhf^=tFl{n5GA@t%+R@T%+i_v$mM)AO^tKkCEQ*}ita z+T7;JIDn)MaMYq!sAuG95d^6vVWJ&Mo^pIxsuM~>`cHLlhCp1%Jo zyiRkP?Z3-)w%=}jHNU6x?(!NvzngRS{HRaWtr6J0pQ!U|>GGTcr(H+TVU4@~X3zI> z%bdIemVYW7PT6oNn(Qb&st+Rrt32XIXmEP$Lx78-&SYV$L;l& zPxkrk_i-xU>3ctwd$#XsPP6@I^V#~@e!KOy|LlCXfAsm;e5dN8x!8WEa^K$9a?U=# z{XS;%vF>Zu$8LQu$FBbD`JSKE{aGKk*ROIsb?)~2IF;|zeWLHF+_Qb*x5_x1&(?e8 zw7RUhm*18S%Xd}X7N@N1RXJz(efqw$^MSw9oM!vO_v$#?Z?}HcC(iBmk3J9I*~e%u zwqJHWRsC7c+2>U`vwXAfi{;zu_Hx*I*1YFqcc10Es_rh&=<`)MXZIa_kG5ZSKJaIK zXP?9O>gbhkwg2upFaOZl^@H=?deDs?k)z5O&F7VVr|-WC=hK{K`|om{?YCQB&F|^F zySzrv@8;Y+Kk8F;a|CwpCHnkax_s5(<>d@|UZa<Gr<5*RdNu&1ts(SgLl_Y`<20yF;#>mVc~!x%Jlb@-?f29972Ie5xNKaQgno zT%NBw&1ts(F0t8uyY!&!~-uLu-~ zQAA#res*2tKQ(6O!}7=Gz3ZFp&+2FQyX&v&d3#UM^Qzo-eS`C^PU}4MtBkE2TCYu@ z_x@_9llvBd-o0nrZ{^rsN6uZHs^^ja)L50_zUJ!JDKCG&6!k9=5P{nWczq{Ub#CK1nrD^Q z=($lJt-8@ZUUe-Wqw8KiS~{aXZmVnYh+4mmr}Bt^2>c?@`;Ppo)W1YP1a2eH`~9=` zZ{sCO7H^*#OFecMlS5y(Wq zzVA%lZ zy?W1g^LhK}>1Fr3>rdxJfJwmqzL>qFeLn&Fo-nVT969V*J(v8SC7}0omcP2s5wP#o zdp_sp&+1h_e=7N|w0!w?k(vmIfC%Iz(E1K~t?wrH@-eED-S4hHofiQS5P>QLTHoy| z{#!n8&DF}QbrBE&5wHYW-$Ac++i!Nh>SMOgYCZg|p4T}M5CIVo0S*CtpQA=j3aN^K z2#A0PtP;TYfAxIUbGPQo%3HofKmO?>UL_h>YKmY zKmE^{B&W`)V!% zA|L`HAOaZ);O`d3p2yKT=*-sRTyGrJMIA;j-JXu!M}LfV-_E`3tp4da@?q@x8LflP zY(36t6af(!CxG7>#@!`zs_0~9B$F6Vr#5($8#JU}^kJ0YiIqERl zeLEkmv**0$@PV|<=zVI6fCvECYI>)7=zpIAqKj99lL_A%OhJ4YQxyKm>Cb@rV196m7i^ihYgRgZm) ztvaoXfCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fC$tl@aq2#_-gLnixrpegv8@J5B3ba zF|zwb>mwJ&=>D(v6YuXgFCO1{VD>7PS99auHJ%~x>brY2ckh*n%XdQJ@tp^I2HqIi z{iH7fA|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+ zA|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`H zAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F z0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+ zA|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`H zAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F0wN#+A|L`HAOa#F z0=E(Pmw&h|k@ARu2zV3Fcfy;f^h7`eL_h>Y;9Uax?}~TtUdcs31VlgtL_h>YKmYKmYKmYKmYKmYKmYKmCV=lZ z#!(0(_ca#*5fA|p5P=&B;O`zrn-S|6?LOwcebiw@Jw~i+6af(sfoKBw z4q+Va->b(yMyrc;jCNmh5fA|p5CIW*kpTWKvd5}yEthIJPWiWeN9VJAM|Ex8Y@gNo zQ+-$c+H=+YBah9&nxmga5fA|p5CIVofi?lw-(joUc946gdX7{6ZQs#(?$;o9)d-Bf zyVbgCJiPs`a+^Kp?K?`(=8Wp=)#sFd+jn$6n|oB(*3I@=tv}Uw)vrBQ-9Pf!9IQF| zy*jeELibg!$}IvS5KjQ#9o871C|5@oSF4*V^|+hrIZpYveMjfxH={}s=o1)ycdK=M z2X|Mja+^JOH^sc^TKB|u-0Is~pY_$#YwgeWnXNz7ch=`C^{xBxUVnvWE2lS4tJmtQ zbrBE&5%4Bpzdzn{Z@<;_ym_|ts&To+@2UG}_3f?Ca<_W3=T(0<0sB6@=XcZVr}O67 z((5}>1(Sfi7iO=&v~9o9`K+%|U0XNXXSP1N->kpa>g|27=C5(Jd0AYcYx`;bHUYc~ zjI%Uw4&&^3)!$73?}9bnO|PGh#ntNeov4CIAo@ORztMS%!RUF^wRE)p76H5m*7z39 zKhv_fTHQZ8pIw2!vo7xK4!*PFERk7#)X(-&{TP9)eBp~dj#0X|N__9V+f2#y&NAJ` zy^idG)Nm=9A<+0lu?iD~*>_DT^E&awU2k+QN2~atLH|2dDX$Em!6jne6dF_zbqZ(XX~Sn_sH@s z-C6{^-<8#?MMG{`1nj%;p0j&>;N$I8*u?fE*r-}BGfkIv1? zQB9Z4y&8{Ezn0tTIjgfeAMLNYF#`4;#!j89vVPg~b4kZK+x%mvR5eP#^7Wdp_8&c0 zt)i(!$-uqbH&yg;Vi&c)J=kg>y>tvLdSKXs`cc%9;)zGn&a-o@(v*;M(uuW}qcmoM=dN29#F>dvJ6+uywWvvgkJaw=!G zzN~iy-E4VJ`vDj4@pLZrKSRL#9nI>UNv*D@3CzBe(fZT5vh^S3wpy1h?6bJP^OQ82IcK6Sw%hP?AWAA*Pq-#!k`Lgb>rQJW3l?Qz7CtOoN|7zn}+B~P& zyz?YI>qMN-^1rn^`(5?)Z{@07p9%Ed#qR#+?(gjGa_pV2PO_IT>;5V3;&F<+`u;^A z>ivoK7Xdc!>YUkpSo){9v+{W9ip$*Z{uD2-zFzO9rQ_wZx=xFa_j+|s@;^qv``zs7 zjk#7;bppHZWb}M>PG0^;d9BuY`8=hwn)~!Q@%kcwdpN~ov@d+J#!(;D>sb7(Zgoyx z{w){pxtGsVI^H~@^u+6nfaNVNKXb91Fh9l1t1oh}MlYY$by$3@Zgozh{%vmW`KXUm zb>4Y*^-ggZ?Yo<=cYf4I^|~Ghul?$ry!?CdZ0UIUJf+jhm3>{jz6ju+PVpG+3!kiU z)JOF?7C)<7os*Y;%f)-{<@1z|H;*Vi@%kcQd5g=>Tr4NdPx12ViyW-c%V%{R7GJAd zoztj)o7;On>f=I$l0c>9lfXUl*@0 z0=TDBJVyJ%XO(d@S9Dzzzvx_9`>}av%{}Ui&4;Bwn$NqAmB&lhJMXBTm;Y#;(R^9! z*u1jlj`qdo4E^X_qy6Bo$~c_WbYed-<%cv&*q}UVOd?^f-C%pW^PF!&^^W z=HBBeUS55@-;dSv@>yNSa`B#L=i{B1t(ToA{INOo^x-4>*vpY^|8<_(^GEq(zssl` ztUhc$EPWP_o-Qj-Pq*g_`_REW%Lrdp#w^@gK3E)E{a#&XafWVnuE8~QKKIJa(qZvq`K_wU;tbuYT;Llys*Lb` zYFyo=;;tgl}Es_#?hdU>+%v$SOa{pR+iO>aOw` z)xWiFmD{Ov+3$hPC3}9hKQ>?JvvaWZk)z7U=F!t%<fCPL)90hO zoz8i>Kb!OEe7*kc{NDP#Jld~C!25l$^|I4_JwN+?*nFYS&cW74jw&OYM^As1Q&0Ey z{Z($K&Sk$3Hka)A+5Xskq0i32)<=#iBb!H0f0a{D*X~F4v3+;vtG-X2+s%9Wd=$6S zIZyXzb3UD~*Por=Tfdh_`?UyozYn%vcDk?UXWtK-FZ9_t*!sv(Wn}Z{>92C?=~mqb zkDh=8fz22CwJH7WZ^%()tj_y3|9CgOT&Sxu z_VS$GXLCHA?{t4QU+ACCx!WH(s*JmNb)HM0$~%HjE)Bjqa1f1 z=+~>$K9|7gJ)o}2$fdTg&gQu9u(kr5FZ63u(*J%{{r;->$M2=en4K$|WA^;=mxBQN z9eV5Mpe7eB0^aw{)@$X^x(NJE;PrRN&hM@N``zi^^$B>t54K)*;;-k=em}4HW4}Xh z{a5bgww&JYfUS2Mf8`N@3Iz1Ku5e2{kGg*zTPcgcDFW=@wYUB$PU_1c;Qc<>dK?N; z6#)_WL_ojmPjs{=0(TO?@BYqo=Tfb5jXvi^2);tY6F-y`(7u zA|L`H5Jv#t^VS&WM4d!H1Vlgt7zCoe@2x(qbw*Zc)*;Y(7hdaiNW?hx;@Z-QNue&$ z1ghS5Ro~I~drQC8eS59n!pSR#7uS}Kmrv=`B+z;nUh6ff$h8*%uXoha@$z|#PK%S* z`YoK4<2!-Y`}kh9KWp`At?ze~LS+K2cj3KWnUSY=Z?0C))2p=d5U}@*c^-#9JAqF- z{#>oq>ICdPd(W%$@$&D@wWa6fS2{HbwBCi+dQB>F?M1-r9rbj){8rcLaoXLl&PV=# zCeV5pKUc4>w)(Wz*E{Lt4Fp>6!h8J&R-PHW`9jarqqOQ1z`J0L^$96~Cjq@PPnvBl z7H6y5cA%XX2-x@FJ%53rGPVhLzo(X7+rgVVEl#`ZZ_-ri7J=RO)H~nmtMxYs^xlWp z{u?yMQ+jc=I^zyh`2qp^KD_5IFjU5I0^aWndgBh>s00`G_y$d-Y!T4=YLU^p2Lbl` zg1(2>n>4`1d%TH}vg{M^eqUB^-=P#L6R_`rHLuLb(>seVbUnRFt3Cm|3)WblkP>(j zV7)WwdU~y_1s?3NG9!84C!qJW&rk|&0_^t%{kDU*cEE!*zC}}MdjweT47$Dk+P|9s z-T`~Oo8ovnaUN%tvC`gOMvuRHvVOhizxT@cZ<{xx$D2G^-rn<@SnXu-=GxNR@u;)) z3AEmY*LrmBuUc0As6w#R9A|0Y%$v)KE|=<#Z^4h0@Zk1zut2nsyr2&m)A+ke-W^}z2~C*MZlYDOYb$_;_&^( zTRgnhzju3Q*5daLfv==34&Ljpac$-G=4$m?eYNgGpc-%M*L&_mwVq0gW4$DMj$0hO z*Y{lBbK0A$)w_q3l6VlX@56iU;ZYj*5b%CqR_`8Cdr2%8)_l)pz0)m@^^*8FZt?J1 z_i&?~b-NZ^+_6gYc;XU7XD21B{c)u^JcN43fES3vvzT>gt*%rr&Sv)+qczCUQc&w<= z;#V=tj^`E!@AVyzH=Xt7YV~enr7Zgd?ECPZ?>m%2Wdh#ssijw$5yN|n)9yOMt2FBp z*nLmE^L5F{IfFp&y{zstyjIp+=UC!M*MfPEj{^LxoD*$M&g_ht1~Jl%N4a$(JHWT#Ai1bXkz zualH+B+z?5Ui&w)Tgl|bv!%1*NoVdQ(0V7M>-UmTG7kcy?`XBo!{bdFs~ktq-NZ^+ z_6dx>o7K8~hf=tS!0J01J$Dl;k1V6yR_i=GO5Z&3Q|%=p2@#_dGh!E&c30+?vbg+@0UT$t%Y$&)#`2pVGOL zfZorY_ZpR|_kMQwqjFT~v&*x0z6yt)Pn)w>ul=71SdO#vpH;Og0$&7X-_vZpIDaQF z%Wt;+dpFHqCoubdX6s+)tNh;y%)X!5`tRK|o8>oKFTx`5i9qi?eLAZ>5$L_2-F*@M zPGFa3@BH^}n!Qe-_g;GYuk%y>&jfn!XLtW|_xE;pdG^k~M^uSF5$L^_-ThC!wf7!@ z-S^Zx{~poLiF>?u_dj>n?&}14@9T9w%Kw=_@BQrVfA0R??k>;X`S*w_@h1Yk_tM+{ zq^&&>=)IraeG&dnV3%j_{P%8gGx&<@s7@mUa4)QJ#Di+$2;iRMI)3kj{;ctPH_cd_ zp)0~7@QDE4No)LcR(m4QdN*F{BK)0z7uS}~_ik}B_=)SNP9p?xFRSB7kChtuSvjjS zc?jShStC0se6#y_>BBE;^zu2XV{?q2Q(X)Jdq3Xum=x+VO2FRl=sB-C+uwWc<&&+0 z+);hd&wFJ1mp+#O-UF)(^jjmlFH2v#F$7xg#%rCO0zTP&y!7FhHG26R)v-B7kk zfW4pDd5lYSsY+n>J&3ODQb>)yV-^t^Lg zJujcP=vY49^IJIGlEdC_bS~T9dwx1kbbs~xOu+knS-sEQ-`ll(yysbodgpBEWz8>N zcMxd3JMZ*>@h2?S971b5Ai-l{!Cwc-_b5?|K2>>U-f=Ho?iRaImy2Wh`>4ld`H$5 z)=r`?Yg}`+ehN9(6?9SrM4&nWd9YEjFk;&jj z?NP3~bvLq8CJ_*UngnLQXEjsGwFrp7jRa=DU#s;uvQwr!1XkbW?72LN56~|h`=m?+3(w|zUoCl1Vn&EVD|g9TF>%-TDQeRzG4Zq z?!kLKHkG=GfCz{{n?URP);=+|!_pn=9$(eU>%AVItVTz0uHjZLMc@ts_}z_;ccfMd z5vV}`zsnkK<6VLedyG#O)3L=ZrhB_fR$pz$dpoVX-s^iV?>!C9_a;;_5y(dXzdJkb zI<lb+evFKK7V3zkKZx zuZ|X3 zt)i9JdwtEFPGu)x-${1A@^?1@yvJTj)Y-9jPW#^pSbnVe_pX|Wz}p1yJ8MzEz8zcj zTAw3ex%TFB>Z)!LXmReF=Iy0maCm-^ej&zkd1)--JF_d<03z~2%LJCwr?&faw7sc3E1C_o#%92&z0qh zdA%fD$KY~$-?khxBA`x(v)zAKF#p!Ixj9?1X?_^@nQREafn`L=aha<0=w@lXDWYJyPUkv`zPB|^vdO> zvzMmcX)kWOI`zoOu?KdTZ`^r&e>Y&he7wRAzCU)y+vquGj=---)ef`^%zlbDx^GcvwHJI`4Jc_w?L5 zUv|CIywN{9r&s*pKJ3WKZ~OG-(LPxHq&r8T_b#IL=UmsXj^aAnCyJ=`)9P#MPR(2X zZd>=}dmEot9&c`|dZYeYb>8c%Iaues^JUj#ah%o7&gm6@t$SnTZ}l5pXZgyeJIaGy zXSuZc**ey|)&JBwD`!^SQ=DzztbAVcW$y)Z76<5$j?q3?{iHibVDx>k>*ie7uV(X{ z)sG_D@@4zC>Y~@JuUqHdTyN!Mb9r-H)f@F?>sa&E9ISJ!oLO~Q9A|a2@_Efy>)u!# zTKz`XS-!IAj`CpFSuU-9wvIJ#^*^=F%9&O76ldEvE1y?=;eOcp(Pwmw^26>U{W$`o z?}J@8=emA1o9C>46w#J1+rL#8y>5NoI``&!D<_-Fo7<}1s4rW`ny=|Xy ztDBY2tG;l5?EL7nI1`ea`Y|>DxNiyw(5IIxA;Z-BX-x->iJ(i%Y=1gWjC$ zM(SA!^xj3(epWs#Ur}5~`>=9a-Ikxxy4HEty7hJI+?(I6oNO*{ZmW8uzHA+9zM6w| zj+Ha3E{o%=ZdN|>#U(KNPHa8bT^@Ct`_w!SkuN8wIJEj^SoQ3j+4NZ)S9Qhbi-6^qjSI_<&AmHk`OK!f%gO7!xO@?4@yy1D?We^d zdYzq9`Z)>gzO$UE{9WyG@;dLIY){cEmzU07ntG?bxb5oHBPYim1ZLljM^9I+}5j4ck)>#y)u&hG?f-`V%mn!Q3`_MNcmU*V0M zERL(X@Rem;&Eb763lFvraa$wo&23e0%{}kbY7W*pUecV(ERM6fIqAq1o51WlvGr_! zS@do0Q}Zl5;NujBR$usfWo&VXUVnwRa(*W;`_8_f*6bAmv+sme{|ay9WN}>8g|95* zY7XynS$MF0h}#-rZ*Hr4YwmfcR&%h<@sj3LW^tU=%}GbD*aT+ZiLGb*Th)ip)$=+x zMgW|%jAO3rS7qT1AN7eU!3qJ~|LC~VLucv}7=0g6b@hpkC5YlV+GotQs#Xc$9#+q@ z&%vM9$o9Lck9@1=b#9CRIAPnzjv8NP z{d@K(-lKhf_ojdI3D|q#cl^17_0gOEtioTlzW5#0;dcU-|LOVf-d_H9^gX=HIwCvn zGosf@5BT#MS3E?VLB5DyFZTfFsPSdizh{r)J=*7YZ~8Z%fV~%f$Dcb`AG7nHRkivG zf!X)6TmK5zUd~;P-sgLMweL#+_vfoRuLd8Z<9r9-6Y%M)DzzQ~aL+RK{JydueIKuI zRnDIYc)z!wyJ+48w?fVkI{ralTtHHZN(>Q_FyI`%4 zJG;A*#dlTrZrVyWOJMXJ&FZTDE&`+P#H;Qudh2PtczWrqC(+4B0`|TmdwcY%?%#t! zMGbpzr{@(JGCW7$10#bp>k#mMZ*`K(DT9Fbd+O;iGE~&;af;rr$nZta_Fb?T*gmZ} ztG{$>5NN#%)_M&pe3vY~tGeEaSACp5=gq-e&+_rsXX{n-Z~a!8d*5TW+}`=B>P6p6 zRZjBlL%{pJ`M8owRRZ4csi#+!leh04r|A9L^NsTK=HRVob9n1R4}CD#C;~qbz&nW= zfAa9+S`_cmKHhgQ+BaLBHwU&}iyymhPd}@#o^QL~%8`BD=GdKQ%e$NBmh;i~atk-* z;1ICyk<&%0w-B)Jg+0H8+ingv=UM&TJofx-|19-3NA`J(+pcfu?&`dD9^5eAo*y}* z#@l(0=8NJz+Go`FXx(VvY;~jgvedQso%Wa2*DRM-j_m8RIeYcl^7j0`vLAgfw{x{T zZ_n$UPyF_XzQ)hyWzY9+=bqDS&a?X7WM}(lsrTl<)@yOw?F-#qomb9-S8u$HM=!7U z{%!nL^LTUJ)mzQcI=AcR)cIDPtm`dKr~PL2HOr-yBm4Sn&R%`Cygk3K>_^|rZCpqB z-Ikk`$MR(LxlPw{Yt3KH-QqF3ejC5lJhR-`^{Y8r=h%K(`mH=!*IOKR{X%zF=auu| zH9J=4Gn;p}zB(^2|GON$&wJ&9j<>%ky{i7L0$nM)32} zyHy9AyvHiMymR;Ts&HZZ>~V_T_s$#b->V-uc(=Z0RD{HJ_^6UI|Ms`LdX@A38gEuK}luzj}poLcwJdn(6nUvSvf zi9QeCUL*W_>D{UWPV6x&KXz`bpOr&4UzQtt?wz0Qm!%I5EPa+Pd}ke5xmmj4$kJ!) z!oSzZ_G|UMeBZ8Pxv}S2d9rguKWqLhzQ74%7GGI?fD?OsJwNzn<&e!6ctwrg`J;S$ z_W=iQUA7*4Ym6blzR#Y1Ov>>tJ-@sA<4#n01A*Q5a_T(TpX$Hb7rwKOtGP~{1IJVS zcl*M>#y1G;zL(zlY!rKb+56de$WJW-?DyHz&qk}~cXvNqUisk?*nKaj&V$OS{;PfA zTcZyFdkS%MX>ezEuH~Sp^v-@oM zga6gBrR%-E>QCps6Y#zltM|QI{LK0}J+}m`AG@#lTLgOV!S0`)pS~S-{?qwx>Ce6= z=}d1Op1-ZW#lx;=ok!mXn?rP-Tl=x^ z3HrPIpi^x`?yA1we|GHUoz=7TJ^gI^@Sn}ss2})W9Y_1@)~))}x$gva@1b}8dpG{9 z=O@cPNLW8u*Z3xZ)_bt)S?AHW#^w;6=hlAgdxHKhKj>5&k-MsI_@5nZ-r4iF)%SSV z{nPW){Mq?W=ewmp`<|e`%L_WyM&z#Q8~#^En|oEqp0m2y=kTB1XUiY_uZ}HU@AXxG zI`^G`_q|xX@7>~O*3ap=C1Cy7ea+t@(0dPd|MdLy?XdHo&UZ_H_B}y=mlt%ZjmTZq zH~g=T$h~_0m2=>9YRtx&%^_P}`QZ{^zbBSHsIc@|y6~NKWaVb*f+I_xtqcDe-yrbX zdt&G9>A&%+l#)fD_rBQstX!+QujO6kq;q2g*zbv@k8~`3mM(mIjqvHEcdHIKvB#|Z z*txBKRu0*GS#IpPcYd~CmOePJ^jW&_opog8X6b?>OP{R^|6U{8uhsYReY=k3#-3;8 z$<7V^togI}0w;`Fd}Z|kPVDjZ{NS6FLpERF6*YS2kMix^2OPY0*?REpHNvl#-mN;| zZ}F_ch3&J&=hV7)-cvbt`+~!+PV{;B z_8Q^eOYe3a@R}W~bD7P%TVI`*m;YUk-sin?LC4!)lwMW;@SSyxx<9Wz;OM0ntpopF zW3>NXKd)TXb$Xn<_h02zJ$ICU``)7ZR_|lEjm}@qJ<7+{Rp;g9-*P!U_sRvG(|K0= zSM?9yUL*K<>D{gaUcK=)4!ykI`?v91&Ew5=S8p{(>)fuNQ|DWGvaYu{o%Wm6*DRM- zj_m8RIeYcl^7j0`vLAgfw{acicUx{&9?O%}=Qdr-tu=o&cZ9_J^U2k#N^$Xoyo!8ET8^+u7BWKijJI~R4QM^a{jQSp}8||B|ZZuz(x)#6F z{<8X-<n^5A1nuT7K@?oM-j@l6@)7_Rmu9&B0sGa`M(^>s9k_ z{Z^TK-($Ai-ubHPMc+$RPV((T!27-VxROd$0^aYbr&pDex9=XO=>6OCjq>#7;H_tK zcteHW~3wohx$>Mz|I1X}Nc zwO)e?-zAIhsxCjVI*5P>R3)(bU16Q8%F)|5i*HNU+rRW`5oo=O)%9BG<<^_P>bqGz z=k33$-YP%VxvCuHn?qpqopHKLwF-gJcj8r7g$vuK7f&x8c3$a!CxCnVz8XJ+kI^yS z;rEX4`F&M0HUV(YGP3hl(T~25DqQ4~L%{pJaa>BZ3IXr;)YGfNh3&J)DSDrsSNh)x z;C{ca#?Rnmb&PjB))9Hes#Ud00Gy-7RiC$=i{d@n=Qh6C^VoYi<=6V?&9nGC<>S@9 zy>}74|LQ&1+~RJD)Slx$v!;MQuQ7Yhtp0BC<@Jt6b#CFdn`4w$RNZbKou4CM?{)69 zU$ylyJNI+qtArfBs#3EAz&Xok{oOjx!W%wr<*Zzv3E=)m$Ioioy_LY|`-rN$m9xzi z#dEZe^{2Tn0o;SHs-GJCd5wO~e3g*TS5<13061qEt-o96S$M<8t(=wXGXdQH>iAjx zp50YG-siFr_0G4emyHkG&n_>o^X#0`&q-kR{p3s`S8M{a?}Sy)&Y4Z0#c@?ve7*=+ ze%ZLN{Mg*PbC%C+y1Sgb&WpkfZ!?4_x9+Kbz+PCasR>_K4m-FWnrMm++v?}SxfkKCSP7ROcHy)^!(S95ru^G}vj zQQX$<#hcrz-kN*fsnr~;bG)QEm028Tb#u~@D>i}IcVg?={<7%X+^6PQc)-Ug4z0fM z^~%`d5WW5iZ{_?>VD_DTKdsp-1ZLj}tNs<<$jRckstaFP#?>6&=d$o%`w+J^!rt6g z_14_;POau(o#Q3Vsm$UytDBRKT(Jqvz7t!|_LoK9<~}vg!UH}|acK30uUEzvhv@ZJ zcq`|30<-Vz`)SQyAu#(+SoN>)Mot#TRbBYXGOp(EK9_|D+lRQV5%%V`s<-BzcWN~U z>l`m>PGuIyS>2p;sa$v|5NL%oLO~GakhQ4@{unt z0s9VmbFLeyXC=^k7g76J`LKLNaUJc$%4v05en#tB=UMC4*R6AJez$V6xxBfp>W%uc zb*%Yn4%RtV&aApDj-yDfp0oN9!So_puZu6LR@`e*0#ia*?k9a;HppWZy$2dkfS=Lq!PMb!SB>-yDE zTu1vv5w(6=eQn*TdCT8z>)w2CzsGK?0PJYv%1+iz2dKR zZ>;>SexvIwU)gj=d9drm<%>Xzb2dI~KP?W?>+GD;&q-kRo#jj+S8M{a?}Sy)&Y4Z0 z#c@?ve7*=+e%ZLN{Mg*PbC%C+y1Sgb&WpkfZ!?4_x9+Kbz+PCasR>_K4m-FWnrMm++v?}SxfkKCSP7ROcHy)^!( zS95ru^G}vjQQX$<#hcrz-kN*fsnr~;bG)QEm028Tb#u~@D>i}FJ7M>&rr+Wey`GKR z>-kULM>PLdztbFA{j2$H<;c3;;@eu!nxmSpYJ6LMqt~n5Gu!X${G;=?`myg>`n3tP z-c9s+Hj1z3KYbt3{9FA_b7=Li=C_q2>$>=k-As#9^!k`vRf)jc1bV;Ax9Rw$?{T*K zex2V|vRv)_Z8}OX0%HX1@8;Be%&n?kC2;Cp+P<&yS8frQAz*(uc0Qw}+BN~p74!B9 z?fgUl+}Pt!y|pF+BJiC+?|1mUTC-IGJU#N=jtJaL!2WLR{O0VD*(_JgBR$;E3*6Y_4Xl(w1iT2~_tw(Gz8zcj zTAw3ex%TFB>Z)!LXmRef{Il|o6`PYazk`-iSOWIF*nQ1KAd&#yW3MObM#oXTUY+(u zfJwlS5kHS9Krx*ke^bY~SF{?jwB>c!9v^?+JCIV~-tldUe_t0TJjE(C_Ip zBk*C5pIy}5ZE-uDD620PmsVcyb(a6?y5L+rhy05`1_GnMC)C+-H-pu~d{;N?dE{e{ zS@WOv1@5PPsJ{rjL;%0L(eWiV$|?e<3E+2m+DF#@;KLrX=HKUm_p^?-7{&-n~aiiA6vJ zvJ#m6ey!GL&A;ob#bZ}@_k31gySZBDTY0_LTm7{z0wN#+(F9uGw`d>L`?WZFuUF?J z{~{m)_Yj!kxm$ejp?z)J*L->;V#cw}vH z^j`PyDh&}3ftm#HJ&AUYzO3<7p6I^nCjugnhXCt)W_9xyyvHKW$o+D*Xq5R8yvFcm#;er;GMC@th8Rued<2M`xN;zeeHcm zyRiLx^Jst7`}KHw?PulWm9M3fHTNlBElytRr*f(9&jjqf{#?adwf(*4Jm)_vTmL^- zY4sEVdq3XuQ@OJC_2z5!Ud;^-S@X-+9R%>s*ke{&ujW2=AL4zA{F%P?zN1~({S&HPei{Zsqn`A9qr-vc;)p8ouimFRyief?6HC zxVCg^(U99{0dd{oP_V=E9`DE)LcT^wr^B&p$rOzdR z_rU4`{np6t%hH!_41w0W@mgo6fKPTGFMarBjb1)Sb!?8&bE=CWVDD#k9^+D7suGxe zPqXz^xv+fC@|&$^`R?h$XRl8Cu>^20tT8r?y7dU)o_lqyee`FIEZ|t{M-v z-%)O>b!`9AuSH<>9gUu=MMG}c1V-P@Y8~5uHT_kNqvxvekl$DWy>}GrM%}6r=)Ira z{c1eees_8H&a?eXzZQYsd+F`hN-MX~1bXjhcRxB$_I|rOd*`#~m%ms7z4x-aADc$q zvJ=>SPrdWm^GEyZ@!H*w&ZB;{2=v}ptu%5QO`!LFcK4(6Wbe1jvv)pwe))?f(0ebv z{aCl^R*gXK{p{{nZ$?LSK2=8WolTOZ5Odwv_Qs66%_qWZG?*j(Oow*PAS z-h8cIH6HRCOTfMtJCAjvZq*3b_u@UT#)IwGo3GVl`;XGUHCGfr^syXg=g`q80zVO$ zeNVIXKlOZhZI<6`{mX2X{Z9h3?`O9D&yHGonZWG(nXP}B?N8Zf`OVh<)KhCB00O=D zggT8P@DqXF``O+9sprdUyF7d6UuLW9e-h}um)`!LidqqYUj%yZXLtWsC;jU$0=qnW z=S5irekag-FT4A{_tL*bVD~-s&WrGO0zF>4``^21CIa6H^xoI^YRyF8JAvN&+1>x% zO|xB|z4Ib00>2aJy_eqp?^^n|2v`EW_v5v1`@TB&;@Q%9m8)|9OrZ5nyw-p2s?}Es zc)hEZ&Z}H)?iL^Kb=y~S5%`@z@4fwAtAC4tCD406Ui-H1t8*`&EuB}nD)-L>TJOYb z{pYS)eT9J6JL>7Y!n2pN$7y%J*I)Z@5!iiCz4LF;RN5Ya-h0{I@AZFWf0yIv`B!)< z=g$O2-^ps-&t0|p3W3#kGkfk8p1qv2+-B>0{k8uVf!X&nTmKeKrL_cR-^*&f?fd$C zm1FPR>s*!pJAvMN+1>x%O|#bt?7pYg`PaGH{4Gx2>$b1vZxQf*Pgd_Ono4U4*!RGi z+rF>Q#rgFY5R;$v&Y&BUB3~Eew_`r1Tt_xPYtBtkN1eqNx^+m%X#@eh3)UEsK|Sga zV7)Ww)*-=l3Lfl{n?H0$3UL!oUczCTxWXRZ~#V?~*rl%GM?{#K|O4{CBtzIP-^2#S*--q{{ zpCD(47RQ`!d9GR3R9Bq9YF9Z>q(^YyEq-cV;bq?-2M(+T!57{u~euhm!UJ_M@qwtl_mK2)Vr zm4NqqYUx$ws@%Q`?!=!RRUh`sHanvkGJn0r`>&Tzta0oz~0aI-F!3a*L$8Xu^dGb z@P1D%y~s?Nd$ss@t!K_IZ}9}Y-c?H{K9M?RCeV5(UhA2&M|$((+0%*4q+a<5^xntr ze!j$V6iHzB{fwTE%#^v;D6iGJ%-Q8Fp1|rm8a)@ENF6f~7=0(Zb(u0}@UqLZbv{Ej zdB{Vc^-jFj^CXp%3@id33|?5g zGh~y8JOo(p47z!e%E<`=cn9q9M7GR5S-dl6mbZ8Utak?8_(bZMnE>8}_n0~J32)wf zTY4w5sb?Mnt#{$Io+qiCoFL%!j(R#LvSsesq@{UUoV5&Y#St-aG=Wd*^lZQLgISC;fXWt^4*`e~;+TiM@FCbbjux)mI4g-pB6# zD;$;cX9BzLXY~Bf-QQas<+WP(9?>@vx88$y`c|LTx_5u+)gsV(7hdbNXz<;7ac$}F z6R5)p0#)z3s_zqbd_&LHeS59nz{oR$7uS}Kr&np!BhY#mUhDOU$gw8@uXoha@$`Cw zR*RF@`VEYf;ZFjs_wi@t-iqzBr>4^)AOa#F0;dVs?>gqEef9doryYCev@Zf8AOa$g zn}GeU+j(yHawh^JAOa%LBVfPlnD@?UUj$wxpx?)fx30`0AOa%r4gvk0d*`l|R0Kqz z1_6DSYuuAuihu}+KsEyU`@y-9Rj$$JywbxrYxMFd9S#B3y+fDdy05z0Kl)rh$=emH z_whD0r56Da5CI+meU~|GSGh)?^GXljtkKJ-bYcmx-UCZFmX5lKfC&6Wfc1NZt|;dw zfO}$%xzo#?2;514^?PRN-gyh}reeJlmhQVZrQ{-TjsWZT4Bd0d)Om*h?yt(YlQrTj zn`1-<^{7vP{SLkL>l42_f%kp0_3pk6rSl^2`a5Lj_ty8icj@FH;Qc<>dO4`IUA&&Z zO-4JB1laG;TR)PGdi4q5etVUut1|ZToZe@1Je}`!e>Pv}pU%14A33UwyLoko$qviHecwU&biwkIjW4id3BykpvpUfPc99< zI&!c_ev;fBUgyis@2%f^P3YIF(>|BL=slpW%E+a*ug>PU@36K4n=kZhQ_}x_RsH^| z_{Z<1%9x!in`8F;Y=3ON&}Zjh>mx^%k(9>bt>4R|{aOUP-v?VSJKfjwv+sw^7y9fRY<=XYGO~H}^jA6cbZ_5Z z<#y^^_WNLS$)2C>kIfhQ>>O-;SO!v&R2b(I=7qm^!X@mr*od} z&*pqOU#~wqzqfubkM?U3@O~d`z3g;f&(FReHecwobFlT1qsqwU(bHe$)YH9vf0f&* zbJ_2M#U*?0D1R)zRyQgStB=i-ea_-As=LZ(R3CLwebCQqT=jkGoLBBsI#JwC<;&KW z#XDP0>xY$_rEC3bUYh{xov?JX(|$E~)V;9yTHUBTtUfkR_Bo5gsO~DCQGKsE`1I1- z)q$TX<8EH|c@{^u?`rxi&d|-m1AZ`$`a*q`aWogJZk5xlpQt{wzIyemzE7R&<-xx7 zhxOB=_2oT3oiAH|Z@$pWmecxy&nlz!e`*e1r~0q<&E^|^s*J1Lqt3B7M)|MOhs7DX zSvbHC#!+9WuQHD2V%4p3YWcBxt-8^5Z@tmJy*leF`@EOu^gfH<>0H_Rvp7RHTi#he z$WdjS&F5Xe%4ya|RG(R2z4}$(r_S~AWZ!3V%bq{lAB%T%j@f>!+$`PMe5`t#Z?})t zvvY6#T|KY!HfQ#^S8kRLiyzBxRb3Wm=vL(d-^fvAgzr=1DyNp;QN7lAyAD0OetHgH zRmRi2di_}(dwpNqXK{vZb)LvkZ9K(2J124ZA~1SSr+9hw9nHB~=jHPjomGy#bJ_Xy z{Ab(G#uI!-eWBiKob5YWA64hYAxbBT%W5AMhgCnfo?~%_Zgp9(WT~;3O<(+p_5B;l+qxrJdv3Y0B z9qsFN&g}f&`qA%WwV!wHS-n-?(dTA!ovx4OGuw~NXV%ZH_2T^bX4!mL`kzzrc3F9L zb$N+>ly~z*pZ7`auM)+<-+4_%Ht(8Lez|7zVd?)$5c@Ay9xvTk8o#=E`H$B5m7wOo z*t}{|@psMU41Is+K8o;HW%NnSQ_04Wm()i&8?T-ppVa;;J>OpY{?2PEdhu-O)THvu zbt_l)^&8<0)s3BNSO1sO z*njQ%K6O5pl7BZgH-E=971^AjUz5(fYvg5(^NwOquyU{J#-^X|wwi17+`OZjCq{Yf z*43or?|PSG@4Ua`SjAqxtoyN)esvR%Uo2++#pX8SrE)EsBlIgX^7IZKtkKi!2`v^E zw(bd5fAwVN+SM1AF9M?+Px12VJDPK~&dX^g9mG@&XL81t$T_)JJ+iIDIV$@OJMbV%$|!)mABh0x7qr< z$;X_{=FL(+md?K_Oa7J*|HNa8EkD`U$I|7kV)Mm_1f= zpFYQA@=5!2?q2^--kEznUs?B=Od_<|Tq816=poJ(ZmFI}_Pgro*CQs!9t3*tVt3!e z;|&_S9DC=hGwS8bx_^p$HXf%qsP9<l&d|-5_>3d)h#JqN)c^fpi+j|) zqfg7nYwO^_8eikt%PY=3LfW4pz{4f}h5$A#tNJ!)*N>%YshcRpL*Q+|4!y!T(p+2VF8|7c(9 zd-R;@#t2xxUh}c;?^JnlZRxy2@^jJ_AJ+P3H@sa|9xq*9^3Td%{zr8_yLo4Kl-FwA zJ0!=Fu5#?18|$vBQ396t=($sM)-QX0D(7fln}772>c$9IzFzaO?(bB2ac$|mL-KRd z79ZC7XE(fER-Rd1UP2#bo5x30Dj;C_dd;EV8*RT{ebs%N(|cZ(i??rYu01_(|D$?6 zU#Is+b9mQ9=l1qrRgcZRDwm#b%dJHUno>%4K?c1AcPtV)`s9w((dwh30R`kmSSx6-ybz2~=b zo6Y6TwWl}h&#S)Y>-4@?&RL!4+_U~}t!Hz8g9V+5T`#|0I`HSEH>!iaQRApD@4Bdbtv=p)q*s$b>wS2w*QDa-+KXdP$Ir2k z68!q8NyQQX-`UaX^yahrsPE}&e~my^zVO8!*QnidN__9R%gjmi&NAQ5J>K-rxOd6s z3w>^idG)Nm=9A<+0lu^2Y#OUSdtUW-6R_{Ydww^)emZZSExo=IRWJ$IdtvtaOWXDv zozMCj)wOlAeP-*U`_1}$t=`@TYyKKno0r8Ey0)L@Zxg_~z&J|-=P=HmSN+`t@Ge;6 z-SqnDSX`}c--#-i1fuW5_8Xlu8A$s*0;BJ4weCHl-xIHLn?3ivt7cIITKB~6-|E|2 zpY_$#YwgeWnXNz7ch=`C^{xBxUVnvWE2lS4tJmtQbrBE&5%4Bpzdzn{Z@<;_ym_|t zs&To+@2UG}_3f?S!d*FX5a_+P-TfRX&$-y;H+ueD!oAKm-=13h=~Mn~-_iLjp;291 zH``~m{#4&pzxG^p|Hxx=u;%FZ>d4{>-B-CPw+M(pJOO-nSYv#mTpd|lt!}Q=<8G?w zIOX5=9i5Ngj4DMy1Wpqe{ax?YozA)1-{xM`Id$%of7^F-KFeuT*VfJUS*<_Sch#>w zSKUAI*c_}m`e_sa5fA|p5CIWr6JY%vwz_Qxxp%7PIOX5=9i8WX4RTkNfPH_XIdHC3 zk9~}`4(k~0zUCqz0wN#+B5)%C{N2N7Gh!X1-N(GQk2;K~$B1=}A|L`H5KREzA&jH_ zd-d4IXmzoU(e7(50wN#+B9NN^zTX%}-S+CSkJ0L49i!dXTm(cw1VlgtZX|%edl+p- ztYftMnD_QkhY|G{v93`BL_h?h3E(?~akPK09{U)rF4i&Hea%Hc1VlgtL_h>YKmYKmYKmYKmYKmYKmYKm=}4tWcQ2KM=p%f{a@`T-rsLtJihb5 z>{TwW=El8iJVW5sclT=U-YXNA?}WtTI}i2@yfL!-NnZp+KmYKmYKm+A0yW7h<%K9 z-_B8o(eB&%Xq`RhJ%)c0)NX#FgoQC-ww?DZL~>*?72S>4fk)M2!JH5UO9 z5CIVofs6$3cMD_B<7gdpX6tdTH;(F}4x^WDPsi?~KSsN6=iYTz|MVRBF!ub6)pSSRZu`y7SAER(nXPa6n$=Og z2>edKzN_E&^luT!M!>#T@A+AVOq3E1BkvzN5*Ct%+b=GBuUhaIcu zlK-;=^q$W0SNAyr_Pu(~=iK~Rz3S&rC9Q}+eggKr=XWfJFB7ou)qB3n*4s}{FT3Ae ze>yJ$Oak`z#q1?*5fA|p5P@F=^!@$CLjTH7z`j@S`7Qx3Kf6A3UIauSF9EOb#HvnS zGF)e?yhhLMx*zT1RhQlGuD`11z3z8arz$u376B3H6X<;x`UpMGFFMqq7bUr5mulEt9lape7S5f!Z>QkTKT7ni2uk|%ozfO7i7}fdJdF@}LeOBw% zT-86d%FF9qeS$d?c->EwPELw)wM(G)es=YBJ|}_Ry=U9cNwL1G)AwAT;93GU_cd3) zPI>wJrKo@1M8NBPMCq{D!#_(uyDsvd8ng3Z`D63m_09HY^|Slk^;h-0y{G7TRc^b! z!FgAwbsqXv##Rok*Cx<=f3?%eeTzWv-m~qua_p`n=dMoG^T>Z{tjcZIH=B1Bf4e@S z&iC%a%Ln>+>7CL+jw<6RPPg|xeg9SXp5`>$f0yfQzuo$3eoyD!RbtLIyOR@Ya3*mLM_*Q?Euqsmx~Zxz3%?_Zq02zcMaE>~|qJ-upv zdwF{MyLqF|_j0iJqjJ91hkb9}`ql3TJiKy1&ujGZHLKI}Kda|e-}5!Q@9oP=@089g zzti=nINjb?_d0gNr#a2`A4}D)n(f!BZ+GzBY4!J7_jOxe&5LW4PJQAz6GYu#t4~fU zf4XY<&$|9+g1MFGTyyUJtd91(+wac=IahXb?w-#{rM|1(dx<_@pSW)VHh*8YZM9yG z_DSEJo{!yqU$^zuc6mmhuTMN@g6Mm+{c=+I)0Op|eg0>HxfQQ`b6xkVy?k4peP?$m zSU%b3chS$2=JdViA@SMCX->2KKd0&M&i31_?>qWjvD-iT{O2@zcB8r2etAgT<-~H% zKEI3pej3(&?K_)O=;fGG+pq5VS>4}vc9+5`$5ZF-qMs+tsry9VJS09lf!`|Q=QRCY zubll(z7<}+TROXLp}VVd`aE)EA5U|-z5nU^ufq2rd;I`1y8(et}G zch8UdRNWeZ-TR3;zm_h~DRA0#1Rd76>u>gaFUPFjZhgQjYp%YWAOU9Miac74pA_wwD+ne~HutB3Wfqs^0bUX?G~ch>uw^~L7or9YeN zbp3AbX#dsx?(*5q8+Cp+XO{C(ysGudaz8yE)wo)}HlN)eom(C6`KS-Cx}JZpKBGE4 zKfC**K5U)sYxk?oZJw<2YFwlIX1%XjUs1ev`^@G#U7scIu8-FFQ+!)~URyuKf41-K z_cF`lw)!gfTaEiJ*XX>}{Ob_VUdU;Ou?fF{W zXXUA;yUH*6Ts5wo?2mRDCvXJwLmgditaLvpz<3PuI=nx~={+|Iz-pakqI! zIow)jd1ReeI&U?OP`K@Aav6Pw?AIjk?yjd%mT6dcBwH*8S7m zXZzoJ56Cskqk27ZR~xHw-{tF-x97vFPqa=i*RA`}oW1+qb}!z1vg=j7zwBJCzg^B& zk3FyEcbCU*-c#pyb5}cmiZiQkHU8Ewi%WK0%c07=$F-MZ_k2(P)P675t^22Vd-c8T zUc9(e(W!cWRrtc^E@$ZMjy)f{`lrtKa<%s1x3&N3I=HaM+xW71Wz&zoKbwEGztzb; zU(Jbg(fQDKb&U4A+s`ZKs?O@U-Q3mAuX4(IE{cEF{HJ_@`)YrjuQvAb_VVnW@9Ce~ z@8xRk!*6T<)pc-TkGJt<^U9_leSbFpYJaPfeZHC#=e+VkuQz)2i_+=kY3;*bYyZ`C zaAA+P@n!SMrhnW0-I^!*-Q3FYwp`Kg;&eaV-xq<|d$^6;ZXWPk)xr5H<7m!a9xt6< zpXmM3T(%BA?fUC;aAA+Pac1+%rhnW0-I~Yi-Hh__>KCOmnoD(K1axm>oNQGTw=8{Z ze$8_dh<-0QDae&25dAJr_p|w{&!c!&=akKV)V+E28RggO=cUu@6TLs0OLb!eqV8|Z zO`WPJZdv-&xtX_}dcBuXieCMqbw=~4?lXa?d;9F7-fk4PEPd+TP)*17zGqX|^X&Pq zzuEJuUn9VN2if%3DCIpB#Wyd3TF#>G%e&7g!pr< z7pMExyP?mHqj-LHq1{meQTOHD$19(gj(0vUz0rKC`%J*=K6*NzUDVs{<-Kjc-VMF! zc=g?_(pv3z`rK?@_?dp)?Y$J;ovd1ceT?f!1f6a8*(<#=1J=y!3tpYHFA!0bKT z#%(tb_|59zT(xmF=Wcy9KJYu6d-dFGpKA5s!XB$}>G@^z%BJ7rq5T{LR^QF4`_+D@ z&&}qA-`R7w*Mkdtyp1E9S2q3I?oW9{;3Wdtz9(!yx7{t97xZuAwwq^_3(i#=cX`d8 zuf_*{S98vuyS*Mf+2d^-*}Ssp-*$h><4Yjhd$oRi(@G5lEN9jn{cagqoU-cPa&O8Z z0xuHC`dwl9y6uixoUHC`+_L1coU_eObIQiU`Z>+#*8Y|=YknI)7N@Mbx80xe_!5YI zx4!PBwn~6?@2h^QoLl8)^YwIWz2+hy0wPeA!0h*~SKr#N%4gJfEBEgDXn)ml3GCkE z?0IgQzWS`+-TJ;GRYVZjy~o!12=6C*v~u^>t>@W#dtP%9$Uva?j$8W~yvqZRfW3EK zCn<}72#A0Ph=2%q5zyanFOoHM?0t{UPp2F8Y3ok=R(~!5-8Yx7)J5P10{TweaEtXb z=pEL-LnROa5fA|p_>+MCzWvEWEAa%fy~AvN;%`J9GZ4@_%y0|xAOblE=zEak9^^s< zL_h>YKm^R|(j^ADCz70iPJN`;|W~0lWi@+-_2j zBB1+>BAvUB-a+pBnRSQz#5j9i^>-1#JHVJd9emm`yYFoNEI-V%`LurE6QkYN{4D}_ z2N>Tebi+ctNG2s&HAXuvFEq9Z}VrJ zw{m!`XU)Cp%W_%O(Yag%aIaZh!B5nfCHHMUaDO%z*6nEPvd^u5?|Jt8xA^nEH>-CG zrz$xtfA-vRVfVB8w!h|F0+zq`oa)(66;z?)b33%U|)th&u6FUSffA)OGtIoblK=1O7yJz2HOaBgf`BJoav(_yE zR-cxx?XS5HffjG}x{rIQ^a$u4dxW$v0wR!=fWBW@Z)VOH`yN~Rb1qdqN}$D?wLa=g zbt3RP0oHqj?(e;efxgtqwf*@{1Qm%69MpMjh}jJPXt6j z1a=6pzFW}U@usum1i(9b9CxV783MY;8CrE~?LAjZV*Pv1tMRDn*ZbZ^^{Vp9?t7HK zSDoeIr8}x)`)lq)V3faiosWB|^ayz0o7L;}&$eIPe>R@-lbb;G_ip`r&vW12J?^~k zZB*|bg1M56^7pFCmHr(!_Wr6RxBk87)p%6(>wRyddR2L4_dUwrtIqQ9(jC>Y{WbR? zFv{P%&d0q}dIY@h&Fb~~XWOstKO0Z^$xWd8d$<0*=eck19(UgNHmY|I!CXm3`Fqvn zN-sAeAOaBtyuKf+IuRL}J#21fuNiIYi#@mA*!`@&?SE>X)z>NynRabTKmW=Qb<5pVhbhHTNN4IY-TX+)G6SL_h>YKm}c?C(oUf6ui}w+OU&v(~K-R-cxx?XS5HffjG}x{rIQh=2%)fCz|y2xKO} z{_lyFer6_>y|s9=)+@7;cM*7v0P8ytt^3-&MdgjY$J70yxW3ls^u2oZf9>9sR|G^r z1Vlgt7zDijE}zz6u#x89{&wnK#a;v|5wO3LS?@SMd+u~Sd{-Y&bG)^G^>;Dr|JHjv zoon_UveuvGINKi_SI@1UpY5l55fA|pc$EOYvlwSNte%_g<6RHm)kkkG()%KC>RpJx z2vj0qe&`A-Z^u36qEz3lY1IjYWg7*slI@28S? zc@+T>5P?brUj5$M+*Rk5-rDb8E$`p=GW^^6i<)Q1E)OCg0-p&)eGk0*d`_iZF9P28 zmR-+_n{+-C$o>vm{ywMDt_X;L2#A0Ph`m%7zvrpjEJJ@$F1rZPdHi1*$ zhuywxF4F%ZuzOF{&gY;BEr=yIO6$`&RO#JM}K|5RelQ z5P=qfQ{RW(zO6i3_a(4*8A`IIz#K8u06iH`}dGi z5)lxAT>`t`livAVf3KhK@y)vb`n^TvKXw06`P3&jfvESFn?hc9QTOBBCoj=CXW%k- zDy|y-UK!)a&UFI+xl?(ok*C_2muPNhQTLpig4~IK2t*RlcPEnee6Ohc@$NI9H0KF$ z$?5KIe}lj1F)I)GiX{;B?qXBj(=FAfDCO5H?21Tquw{=T&IGBe2Rt;I9@dS<3Kc{}wEZepk`>ja|S-@3!MPe$F3cb~U! zEiS$Hy=B*nBl~uz?C+rE@9kS#Pv7F{y}s^HCq+O6ZX@9ReQD|4#w{vOi)Z%rC_by)vE+fy@OT1I(HX=sP}glwYW4<_v76sj?mkkyzec$ z-rKhwmp=PDX!(mHbXTVqPw(}+s40yIh`bN`0W&ETnUm$Sn`>?sH%wKqOjEqtD%kVBu5fFiz1oYj> zeQ#0s}p1wRs z)hzej`DkC&?Gf0$r_u9!E_FH^fzf;Ss>_yNe)b4>y^EgCo=cq`CD6Nv+5OSJs@o%A z?`2Pof7<#To%5Y>I^3RbmD(tm-MUqOIwt}z5U{_m7cw(5iYJqU-9ue>)V1Hkp+6*o0cXZD1!q6PeGg`;+ zm_c(i|88A|tnwfNA|L`HAOa%roxtwj_tEp;yWTS!%|BXKh0JIkuew*cRLMQ-)9YRI zbY^|2UIauSFM;0gy|te=(U-H9Z?E-i`FpwGw^zsBhYyV+AOa#F0wN#+c?huo{`d4j zVzdtRr^lY(-Tl+~di{6v&7SZ1IlVudXSV)yKHIGRRN)K3If2;g_Ja`xmI z_`YR4$#SJPxUZbm84-vkfcIK;e1%tZ&e49+?`G6jbX_zD^m`>I^6rk&ywpzwRtVso zube%32EMb7C)s@JeTo-6+5TAf9&Mj$b8jBf69EPR{H_?gde_dX`6P-PW<@=$~z5^y9JZywSU0R{p54j9@@ zP590_GPAE}zs4zR-cesGcdRqN2#j*hTK6l7{>31W_1;%~F&Ihn7lGA#$#U*j68&q1 zK$d%4^|6w9&!0#tK74k`;|oh zVi3r3&n+K}%pRI8zo*tcGX11+Di6EwPg%A0Cjs_7u=W4H_RTiO(iLa6J+nLi+1w$# zI!cAA(zV%qIgAbQivmnIV!?j$sHOflkybnjur+46elYw5Le&(^necGqWpwCcUrcXN54_s$1BZ=c!pkW=Gl0@?0s z*3aiuTJ<6@dmqvDt(+|%t$Nma%SZJ3?fF`@*7Dt|XRWtsvaz>$5&u^{n;Xd|u~o&*#O->ae)oqHB3oo8NK|S#xB6 z4_UccU#B@*KVI|Gx!C=^INhql=Fir@^t<$7^o;+&1^(bgNGF`@Z#FvgFGCKC*Bc^>K>lsK42| zQ+cBL&hmJzKI$G=eO|k-=)A0ZJEa@V@3nrX?$xXBYxgyq*Nbyjomn2E^{4WV`pZ%m z%_&RnQ6JHK&~Mb=Y#sD(9cOvGR$u+zU%Rj9yxHGJG>_eW(L8tk_RdG=iSF0Sul*ka z(f8ujugd))Zxz4r3E#Jj$W>(o_iBFOyUJLF?`fY^?sL^AoA0Wx)pP7T-ukOKSI>F- zXX~xzTs?PNo>dO3=h*yi(O>0z`dk(M;8evge8P9u5k9MpQ9Pr#jOOw3)9UA?FP%98 zt^3HnK9}-bwdHuu$y{gayZU@C{kdw(`J9uv&enI8`CLj>dl6{854OJT>(#HPWBan^ zJs(;3Sv*?0SvmB4weonc_k4KmkNRf!^U{~j9D&jM&sH~=@?5p$cFxIMXX`uLd@fDi zYHzN2lge2J0`|S_da`;|=DWFd{yTvx?_kvD_q5vGB`|sq+3LJ=@9OFN-2}Yf|1Gq< zx%BkJ?T3KnbPE^d;1KYBFIKP0oI_WtAdvkXz~?RFF7I17C`T;r`hU!DM-x{$ademer!I?eF^N|&#Cji6r?5sa|A};{hVsO z>QN3}b@d3%IUeOSr>bfZ@Fg(%PP5hdQjnS@knO&$AM00h5fFiR0`_;AeI8Hf&5qgM z!K}YGZ!tdg?7f_>k0+##A|L{mK=gOO_SaklL?E6(^zVMRUp%2VJMP{S`}~`?qSPXg zivas~?yaAT-o0+T@9nhSy*HIB+39F1PyMOK5sN>%I-q?C|$jYe*+)IG{JGc7x zl2kGgs6)X1F0;?;kge@h@1XYm$-NH&`#bPat*>Iau;%qi?Ii%$J(c*=@O{e|PjIgz zxbLadKMfA-u|6rE1eUXps#HYaI|2Ld*z@mhv?~HW1nfKZQLV3Hxv=K-N$n*7*FBZ^ z)9}q6pRdXRzHb?;y(4h1=G)sZIIu@=-_jES5fA|p5CIYR zmjL_kIrRVS{MJfU4)Dz$-+B#7D*|^AV1E~&e+RAiQsDk>8Q;6XI}%5|zdL9tg$Rg% z2#A0P$&gew7b{d2lAfguKprWfdGCl?6Cq1dHk0E`&~rq|C>-N6$nJVlL{>4 zQ3OsC!0++2k8J(LCEJb2Pi6x6y|Bm34CJj%fc-9@-*%uK5x9rIZSNg<*yBAf;H@O= zcM+}s*6rPsHtL<+LJYia$)g-1AOa%rI)U51>&U|%Uw=1M^RwRt^sDlbZxMKz0Diyh zag}jaU#oelon!Nl*007t+AsS)R`alZL4Vbs&WV5sh=2%)fC!8ez~5c=IPU0oWmaFm zUEVum^N0Su1eHt#?j?ZVFMC`inAO*69-R|`%mlLj-q^mNpZRX&O$0YAclbce#CHA7ZLc6fPQa!2O=N>A|L`HAOf!w(0>oT{sNR= z1Vlgt)(G74_X9cBDDeMx1^DwGSM#>cdHajfYvt*!!(Xq?YajhsqnA(Vh=2%)fCz|y z2#5fS0PDYlpv&@Yb$9#We6`VX&|CyQ5r}@*pFC*oJ_77_RfQOv154lXo$a%GezqU0 z-sZ?UXXS@(*4*;tO91yBHTt@jnh1zMB?3|38T6^dMqbwm;J#Voy0h4mto&?UOR3e* z)@lA0f!4cVt-nS0-L$Ot1KlcQ;kVjo`Of;^JwMxzRc~`-owM>oH*0SB@+E-#jv9U4 zOHBkspb~+o?+p4>Vk58X1aRN1aot($NmhQguBFuKXX`Y7i$LpLu-4zA`)*p+`+;s1 zvhZ7Nw0vj%@1CFS$EvqEvd&rgp_?_geEAZtyU7v+#G>0s?<%3Ir{jRv~{nXhUSo)U0Y@glpv;A20 zHb>SuD?fCz=9VvC0=Vy}(bxUDT2x-IKI=)(oSe;l#^rj~*?gz#*OU65jOO6$UTRqh zu-{eI+}0PH151B5hdrNt&f>_@&BBwFucezMw|sC3wC>+)o$KCD-79ajjwLYLfA{=s zKUTfXk#)|>58bS}<;#}rII?uJ@MPs{>1N3-A6x>h`}bPsy7yD}${Vd?3C#B2 zJwMxzRc~`-owM>oH*0SB@+E-#jv9U4ud7An_3E>p^vucG+-F>_cb(05x_&*W@5yKm zzV4-#l>qx)WzB7Uu{p5xcXQbD+2<^dEZr0+ZiK-M`m*6|!D=qjfCb+5Wrd zXZx}0ZH}ySR(|Ma%`IQP1aRL`qpy3ZiGT=HA`tbRL7z%&l8DiMhK&Y({vHuAbo0Qb!r*PX?lWaVe;T1u^cwodc62(;b>YyB;{@1|wF zALv#g3%}Jy%XilQ?)lk%ta_Ux>ztJzx><9}moEX_chu0PqOl}buFb=I~wZ@kub{f(Z7-_d$ohx64&n`3ugEkFFO z=H5L&+ZX*<Nf0 zzcc8rGQ)ZHxav>mL_h>YKm+UADTT zer%o9@t)g$n*S!?&HJ?8?{u130_^*-{YK|(U(g@*qdE}~0TB=Z5fFiO0{FYj9#ce3Iw;~DVE=uMtP zKmYKm@BL>s|4)(f0cvHZMm@Kw(o0maIKabzHb?;@~`IG+i%qS^Y&d;&+@LC zL%wSfu-KH5CK2Egi2iPH(@0$eL|}wK^zVMR-$*9a>=W3%C-(XMgi;Uz z5!fZb{+)a4@8;F{F#_KAc3N*NwW@XroWAGT{=0c~UIav7pTO+zcDH`tu@po=1a=AR z{*I&1@8;F{F#^%|I@@n7g{nkA1egS7f48UWnJlFJhrsE3_U`{Dnf}Ej;C*jwJthlj zi+~7>5n%t$t^SxRRf&KIFbUY-W%fCfg|z<=&^!3U;>5p3`JeEno+1DOqwh3Z9sFzL z5Xg4lyM8#7qzVGN_jBqz{A(0}69h)z{RwZ8Jx4is)kS7I(QA~`32*8t0w6H@PP5g) zzeWy$Z1-*ba8jRBwfSb}=bWqa%LHcMX_oqz?<^vFmV5T{5y9-`9$xnpt@HAoDf>AB zyYKv*b57@79<%2;DbA_R=C}3doU8N81nfJkGJpBbBC=neXP-`R@d(yn|7n-_vS$m%!*fWUKSey{o75cN6e_|F_Wc=F-y>w;uwQ(=A+- zgG0dky;!{}a}Hgpf=?e9p;SXY0Gld@iM`y$H142V39v_3GEtv3*(do{y~iEFLZ0tQ>m2T6w(J zdp^AOM}4#VdFe}Mj=<>sXRDh_d9K=WJLhDsv-O>AK9}Zvbu>SA;^;il{n-3&(T~0t zuYOhdBX1SI@Co0yjL21G1ovuw;k(LMh3{#fRqk`uC!6o8uhnzxJl^`NIakkl`)BK| z=3G5@Tb@-8tLNDKZqZ-md-_}z{@_%_FMPsx))79djHfuV_?*fU)tANX7Tu_OVD-7> z9Ue9-TuWdX?s@CSYrg9%`*|-;x9ViSzn;Ha?nH0xnp0K|)>kwa+t2#(n%h3N&Am9?s#E>GZ@rf+ zxw5~H7N?f)Ry}LIT}CIuyU}tXVJZ# z8!KN+w{@?x^(~#<^;sXSde-`GKCkn)=kwxZby(bP(Y3s)&2PDftU0p3hZeV%?^eC{ zdaM7bb?9OY<){-cYW4JtKNHkH<$N$?|jhn_L)r&IW>MJknO%^{d`WPRWAaw z_Yqz1mD5`%nnSOjcRuL#`c~aXK8@cARJ~vL{+>>|y9B`bmT{Md&VMIR{hfSIr(F=J ze$Orc$mcz_`kh+$&gZ2U&BM#Lw@x&NUO(@A(ChWBx{rJszZ0l>zwrG%opyH#fOFPy zH=p5asWI6ZReR=22a*wM%ym>%x z)erj|{AC&0d98ky-0)%jWtqdD_n0N$st<1-uj#FF&vNdy`-;w+<(^wUqIrz=Yx!lb z!=LSgb??#ksW$iKAw3ab5Ww$>q0Q8U?^{MDD~2|>Gqj~C0^S7hUaOAYJfLTJwAO7M zYmWZeMpoXIZnpgJ)AGw+Z}rEz_sGt}*7xQiJrQ6K!0&*e&D4bNtRpk~j`k^DJDyk0 zp33RfcO|pVh(I&}y!Yt7yZzw%mT{Ly^m%ZP?yG(xutEUuwd%N%dFRY2PCK5NXQO#A zv+QU`b3nfx&nsuqx9Yf(S!YBbngD)Z(S3LO!FSeiHy`VKG%r?;o^Etb?|!{}+7|&4 z5CIVo0TJL6@c#QAt;Z+yxx*_tKd1k*Dh@v^Uf~d()4SiFm#cq?fCyA2u=-udo_96Z z=(%k9d%oefSI6Fm4~-%q0wN#+A|L{J2(bVD_w+$xv<~&B$DZHa{nPn+{de=tp6~fN zy+500w*GWJ+rQ@@^}RawK742t0THNAVD)=ZCHZQ;*10OVXMMKvdauv=QoRU>fCz|y z2#5fmfcM|~mL5OJnGUVItLtY{%y(VQw|Z{g;h7Vw`C8}Bq~LdL<@H|YCs79x5P>KH ztKWwxf8KpY^LqEqt~bhMw=O#e`4fSY1nlqYq?a3di{}lus0>jA?C*s&kIIv!&nU+% zerJ7*=JTqX^_Qi7G=CPqQ9eew?AAr)Q6CYgNWlKSvS+Yps}J71)k=w46rHsOB94 zx4sXXtIGVHm*<-_>VAD)OHBkspfUk{cXHoe)ctt($xU_Ko#irqQk5cbKLPvuV9oC* z$w|n%54J9chEy#9_C03PxA`^a5Xg4#qkcFv?pGb)m~udD3Nn2Q7bj z2-b4a;_1C!i;CQefCw-Ncz<77ddv(rYqxl2U%&ZIGG#mU4l*&2mvaQ7-ru=|wL3@M zk9VKi)a$tSzPIdpb#A7%)9mk{<*zpNbM9L_z1PnrRA&(ofouf4zb`GlY`LrWY4Oay zUWIRUpQqkIb#C(SLm=w?`M7;gCF*{>`@DBkXA*ngTXwxOl;7)`{T;OYy?0YSiCa9q z*L~bdMFd3P1_IvSmzLfQtWIQT@yxz{g4K&XPrZW|Su69u1ft&Gzn!%*LLlmXy!(t~ zt55?jS-ryFD`WP1$m(y_SJn44n^W~a1fuR+yhT6+L;wWz-2sW5W7Pe4_sL0f-W9ma zpNu&Hf6-%1I(5lTfc@^W=g;nseLpPy>|Ag0$GX3)y0_e0_8eL7zvnAE*X;g!e0TS= z=a)Yb5P>)XyWf-E`M4x^cIxrXx_{?wctBvzXbz+6UsdusB zRcA!t6ao7?Iprg|ugzU`9?i|GpXKeP>!oA;-9Go?=A~nKd+A2&*c?^o(cDh;v-fk# zhx&?u2#CP#1YZ5#+T2y=Ro|!Ooh^s;7d6kuL4HKw8v*;f_*VOA59dGaWnP2t>SJcc zmAzGe7qk8=-&JPsv-gm-J~QJ_-oWwG-nTWJ!}zUMdmWe^%mkwU`vUzkGpXzie51z7tmIt;L|~0T)c2*;XU%nnghO~ z#_yE0D*_^LBY~*zORGzF>Z{-ysz!28}t^~9e;V3faC zolW7TJE~**Ywkl}l)rbKk9(=~2zcL{)$8@owqMwxsr_X_o~a4{v9{={;DOn{=MhbcvSW4eQ%?BRe5FiJ<8v!&hqfm9o4b@HTNMf z%HO-r$Guc~1ibIf>h=0(+pq3F8&CPkO`!UFxBk87xo__tci#6ls&@~;TuDaxd)4Ji zFE>>Pc)h=tP8AN-eYW_s*Q;~O>YsgYR^M{4`|P>xuelEa_Pw?AecVf>N1(;uYrWTB z`ywC$BCtxp>pRubS@orJa|Bxa+3RyIRXs|8eQ#EO)R*c+;BNx<9eL0H?xlb82zcMy zs2(rT+sdQ-z3MD&FWpfc+h21Z0;BxB>wMfxrANT~-mG4)zxG8y1Vmt!fc@Th&sTlv z+#CV#dmGi8bE)c40;BxB>PCI3ZjJ!%$DYEv9c^9qx%KZo&z}Dlf8O_I^={!*C5PqD zo?9;LepcW1*PKhh^7o!|-Af$=yzkBGp-+|3`p4Y*Z>`%p%|+lOfz~_4x*bn?UFmK8 zd(T%q>C8LavX0 z{AS^1eN^Mv^V{3E`LoVjIlR`h=3ez>xvc8wTrL8**DS8!Cu+=+yXB+Bp;ez{-TKbr zOFqsJ(0e$;o!u4p%I+im+ywLva^KIaJKQJ6+4HKuivZpM#_Z|f(~jAFXY*(IVV=#W z^#h+6?Y`!35x_gZ_*VLR)9OBR-!1M3V{W%{CjufM0wN#+A|L`HAOdFz;J=$Oo^?~N zJGf!2mq3n1Kmk~m`yN(xbWQ|BKm=f7Njb?B3(-d2X7%`mEpG`o1GoL=f1$ z$JY4>?~gxoYDsui5j}_`vUK&e?Of z*Mlc}yp1E9S2q3I?oWAq2xNPwqkd-7LwEMv?e*Zo9&h8w=9NwVw)?v^PxQOFmE&!> zqTj{oe!9OO0<-sU8@Js&;5Vy-bJfP#oV)ea_`vUM?$vX%eX7-i3wx}_rRSH;E1Q0g zhxT(2@On3+X}tPH>x|}8-FE^}_x9aIz1=8oS^Ctwp*I~zzn3>D*GnD!E>8EWcSGMD zNAdjbLc5~`qVCJPk5@h~9q)WzdZYPN_nm;(ee`s`yQsI@%X`~?y&HPdvDdpBrRUWz zT4yw$>b?_*y0`Bx>g`5x%hIRb4b^mP?|U|dJ9g@-QRZ?^>(AUW$9DzhH5&t_dT1!o?o4h_FdIK zb#645>c$8}-Q$>>I#p5Jvh=BQGjBVMelKs+u9H6cU7YS$=Vr#7M)4eTqpDE?QTOK6 z$19hYj#n-(ozYyX8zbO#FFl5S&Gb?|4`U!Q{u zd%TS^n^!jd+wSkyJkjswR*tvjihdWT`|19E2+ZEYZQOSAfZwVP&Q}>nbN2Fh>Gb+U z?~mrPb?|A|U!Q{ud%TS^n^!jd+wSkyJYMgn$H%Krv`#NqYac#a`>(Ep3wykcFPm33 z{pkC%`B(c}o$T|~oH!Sq4}Dk1XurGtymGGUte)G=UG4lTr>y6q_-D<3$``n=_Q&~Z zV=r$n&+hr2{;B<5uGT*Mw)S6L2N(8u8(%iBZ2HmnXY;T2w>sJ9t2uGbD7HKi<+^qMH22y5x84JC&GM*TkKEP9YTS4EdgYz<;nkO|Gn?zSdNyA# z{oAR29G=hgV`a*WPf&F?Os-MmrfcXL)hAI14ppDOoricgllx874P*DU{@ zUY7mYT({L{xrd&Q+qm02z5KWDTOL{G)$U=JqgUQtAFcCV{$_PreqLLj<#ww6_Io+S zHA~+r_nU>+sE=K)qkX*Us`=f`y_+}c{BF+b=c72E>Qm)@PVvds_tbr{IP~(I>f7_R zy3fi}O?Q=F^tozWXZ=R=%+_y~*Qxq!+vVmV zyt_VT&$GGD`my!!XV|vwFMrJwLnqUjBADdga>nF?-(2x7C^TaeKYxlYM^seVod7`rc3Fp6z>@(`^6Qe71hJ z-)_C_KRe&;AANo{->Ld&F1FvP+_(3&oU_kwzmM5`toxevv0LBEv8z9OzUOCkf7ZwC z^{X6DoxA-$PUSmwpXhrk_iSJItuoH$v-Ms%tuAZs<+r87@?BN8#VPB0RnFOcpT6(x zeBkdir`i7Sy*keJ+pS;qiF3RCqtC;4_A#1^?U$WTRezRq_IXv#EZ^+=V)?eZy&Sfl zHShV@-Dml(s=LcG`g~Q+*?mXfqwSZS5Byo*+2`=RI(p?>?Z11@%Rh8>{ouT}9(1Ee zHDw3`8225{<~af`|Z|O^Lsk)F0aw^yE%8ykNQ;I9D&_?i9SDr^PAj`YBGg z_dR{zRrsFfG~0id>ukSP{jLw4&q<(l-&xmL6p@#upIsOEPmS66u>7%k@A_u@v-;Wn z?)s~G-riI6yehX{-{8Ef(>f3RDq|~$)@u{!y}#P&uk#kq4>Urcp zHCE-e>zmCxi@#kTI-ipO`~7eVN>mSC3CZ2g+6Kc{B`y#va7H2ISKUcJ==axiuGNczUTS`*AlR~ zuethj%FEv$Mg2b;+v-|8 zqSkNYsXQVe0)Ghfz9WAs^)C?+f!hf5e*f(K+jx%V$mz>c@Y5-5P_Qs==*!~?cSHozE|)0 zF0oZVyM24-TfTbdv@ZhR2-tV^tyX&?aGHR9uio?BeBOR~dfEN%`qOz4U=pyuFJ>=k z-%r54C(NrSM-Dqy&n5q73FtkY<*)8@1nhhDp3k}Yw|dpjze>I$}Ole2nU3_q*#)=S4sSM4$?R)_1##|CY~NbG7nnT?9lx1T2BpchGCy_M4rr z`k3vrS`UA#=XFj5L_h>YfI|S^=ctjBLaHJl0wN#+s|4`^Q2gIuQ^75fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fFhp2-yD}xt9v{y*ljA z*5h1n9Mwe~Mlao-j@?IpjCSA7z3Z(0={fRY?D-k3gHEfywT^Rk9IZzkMyr#3KI(h6 zZnS=u&!{fyF!uV4*7bDk{;ck3J?b#pzM6}G2#A0Ph(JaH_`8L%=W(x2I$G(I2DTw{!10tABcqd>DIvM(dz6TaR-ZML-0`3E+2zv3C-67+dw&$JnaF zI_fZDy*19(vFlqtv5x*2v2I7~W3>BrjyjBX-_A$t>^bi_d|>S9qYh)M9{U(=9o8}0 zeax|s5$mlHbr@T9QR|~VP=~SS4|Nz@_1MSQs>Awh9lIXBS|j>nY}H{Mbr`X3N9<#? z`*w~xjCNmh5fA|p5CIX$NC1DgF!ns64r8kx`xskwSVtX3thdJ5I(B`_C)Uv)Bi8MR zeT;VB&QXWa?%VljojvD0hYyTBebixW)ngxHt4`}8AOa#F0wN#+A|L`HAOa#F0wN#+ zA|L`HAOiIXy!yWbzM8xDV#Vb*A@TUlgFORpjO>2V`pAVby8o;F#QXQni^p#sn7zv7 z)!evujb{kF`tDxM-Fs!?@|%!&{N}-)fj35WKk18r2#A0Ph=2%)fCz|y2#A0Ph=2%) zfCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y z2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0P zh=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%) zfCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y z2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)fCz|y2#A0P zh=2%)fCz|y2#A0Ph=2%)fCz|y2#A0Ph=2%)z-Y;9Uax?}~TtUdcs31VlgtL_h>YKmYKmYKmYKmYKmYKmYKm?B!Itr7;Q$ZW3>C2_x4eT5%n0cu2BR;Km?)* z;5&qIw12N2`xvb*)-l?B%|$>2L_h>`6TtTy&CySz2#A0Ph=2%) zK$`&T@37TvJIK9LJ;y2kw(saX_iK>5Y6M2#-D+Jm9^QUexy_#Q_8p~Xb4K;`>T}A! z?K?W3%{{7X>t_3`)}QLT>ersD?jLz<4%Qs~UL9Fnq5CRVGhqcf=R&M3$xdM+P2^5eAd^fuC1Hx zGg}|sZ`R*y_4Ynk^VhiAyezKJwf!`In*iPg##tIThjI42>hC6icflI(rq@r$;%asK zPE^4p5PcuE-{`!>VDvodS~^;PivZpOYkZ65ziC-qt?s{_&#u7VSr_+q2jAIomdLC= z>Sz0?evCj?zVO8!$0*%fCBFCGZKh;;XPIu}UN3rQ+`DD-g+4dMyn0q&^GR}^0N>fM zmBve_rLXln3CzBm*?JZ=_+aUK>B1*_^z!SagS=jPqdMptHIDl7u8YdI+Q&QZsNSmI z)pMi1yz1c7OV3LOzSyIeUzQH?v-Q!(dt~{RZY=`d@5<`cq9L~|0`^^a&)L3N`d+%; zJX?BRe!XvH7y)|^ zW2eqlS-FytuY>cKzLQzQu>Neu^6_kC*N%Tu$Z8*4OLZv~;rNJ?*E( zX?Oi}uG{+C`^cWp`eo0n@yPDCnm^Y2s>Xrsm&F;nZ2wjC!6Rxs#U(0Vug|Ic>YIZ= z?|rQ9=SY{w#VW_qb9s`Ubu!A!tL{`QJ%a{0!qfuU7b!Srk?|)wYSvs$9Ih8Y8U)DQ|i5v3EXC z(lsZ&d|CI`((a$i$^*Xk6Rs(sf3NH-J)Gh(+7~`ql>;|CWpQ+{@=F9d8~{dgAp%!15NC&s;1g z%un(1>Wdt#(aUFb9Ts1!TbYri@t zFaKUVTRL7oPwBLBWnUMs9|E|iQ#?ld!Y6AS^-;Z!#n0+i=j7$za`B#f`8=iL%_B-r zynYB+-s19^i{*s*DPCTEk%KjQ`K+$P;%jxQa~kz;b9>K6eVnTE&bzC3io*Do80QYo?$7o;ptTK+~imr>|7o96>KQ`~I zxkr7m`LOgy^Lf{?@_6Zb=N;Aa@*k};nlEb|n^)G{(Z1N6p&y-Vv>*Ic8Ao%Ss>{al z6o=8i*?9H*jOL22>-qNDkIt2~pBK-TPS)I`zFN7muaD;QuFJ;7J0D9g8@HYx76&ig zp8wr_FQ3(Qb~*OWi_Z^%9w+bpQ{26Cc}dnKCA0kF5dI(e7y6r z^|JGXKQ@P+K73>!dpWZ0zs@sz{wRO!cNvv~)rZZ8rO)Eg(`Du9>GphKA3B(48R4tS zn1x%*2a98?->d5^&d{yS6*;Pnr?_Y5BrZP$M(^nqFR#9%IalkveBPq7%CUDYJD;Ba zZ2Q@Gg3qWg)O(GyeMjq~>by8a=|pi^?Ze`*>gU#TEY8ra&J8)Lji9F{*{8rUvafWVHF7S;U zRYv$eHLh}s_6wiUeOvwDr^?vMk!795F-yKIK3JTgn>FXCFXX5)j^<+3t#X?66V+$d zSFe86_o;KeJlMDXuzq^9zP#tB^JVMr%@=yva#}y|S!J~TPtC#WRR7h!*?hxKm2s7O z)HxQ%DF0RZusB0E3kUeYIO+@aRmRa=th!ZBEI(CrSGk=!mz^t%OZMDR{#bmiZd4vt zADbupoW)^Oca_hmKI)?Spr6;c>ig6=uiU3}qPU&Pm#r_0ceb3?4=XoI*ZSAIHUZW< zVd-Y4{c7&0dtvdlx>0#peQciWa~6kD-BmuL`nT4tayxY{`#rF^WY5p`$L0%tb`G{a za#R`FJbL=8oO-%;KdO)IyE|X?ed^q9-qYu!xSh^3qHZ?EK#Py*%2lMZo)g zu=TRjeLX+>e%O4W&(6WtM~*5Zn@3N7l~Ygm_Wf0Er_N=+4>p(V`Pu&1e4)?I!PZBP zDkGamPk)tDPuK28^|5_-=c~R?o!iZO`g|0((>YJ~XLCNCuh*ZQ-&?gnFTzsl{@x$O7B=8`=>+aH@R^w~Ms`p8jb zWb^3huX5_?+Wn|Lw(stI)%U4$yLnHakK%SZ=jr}z&ZqPB`m^(U>-X|#zZL=S_rcc7 zPWScv?E7K!g+4n6TOT>9jBFk~{Z&pq-KzWG(esbGDq}Ct>3ufG)A>&KXY+;r>72X$ zk)z7En^)(#1gg9v_~g>yt0M<{u=zs2Hl_dl4>_ug)p_6MAMd7@3w2e-UY^tYY>ub%o$k-(3;okMcl#qpm2o$( z&T|P=c}MWcrNLK64)(}TlDos}eA)TE^?R=g{d#rU=Mosb2h>#=xzzU6*&O#B)>dHi zg??>H`rogr-(MB~_`OsavvXy0%${HVau8s@LvQ^Y)a0T?!27=0daWE<7lFSCy#5Z^ z`Mvf3zB~Q9J^}Cd!Pd)8{Pq0V@8=bN?04v`|H{4Gmeczku=Q@^uRJ19fq;J36>f>= zQTNYdD`gQlMS%Ug_SQeeNqsp4yx#{~k3&JKA|L|a2vQC?ydP0TB=Z5y(S8f9LYtg`9|h2#COc1oU0jI}m|*0{Gow zjCY`pA|L`HAOfBQ@IAMqXAg#!^@};9mo!B{1Vlgt;t1e--WubasFMhYfCz{HgFw{x zz163+&d4gwIs{tp!fU+_i5RC|Tw6LZDbyvJK-K%M>O1;=Z|T>%Z?E-RICsyezlD=>{3g(PAHS>iXRSW1_5F@gs7#>s zF1*((GxGHA&DH98dX-il0`{IU&*Sj36Zo{_=W4B1Ct&Z{dtRN7mw#`rEj=&4(y2+H z^)9^DYf_PGF9KfgsHfxQx4KS`)9!wCKJx#WKu(U~y$`SbH)xKh^x|rD#vQ2g1p@Ycc+X#8sEp$T zyx$k}#vQy-2`=pM4Vp^XBB1xxBBON=0_^t%eGjiUX@HCOcoQRK*(c!rzO3H9Ln%}y zVBZ64UYU`ncNSmhdU};seFAtFtg${JCGaG`dS}q}^jcX9JlJDpM)JN-K<{gxp%mH# z*zXJaZ3l1dfCp=Qi>A`{2(aE6bbI}^e>VZV1NL|~#qo6FJkBa(rM7|c)aOskJIk{ zO{_9zvG$3p;JyAD*H&I{u2!$rSL;3ms`0jdz2`nu zc`7z9ualJjBVc)Z&qeu%fH&8c-fO(Y;rETVczCVWsO@439^v^Q6)cMmBg@gQK|hxgpWqcrXz z;QhX=-aVxDl2|UR`JT&qr&}EBCGm0G;^DRK%$798_EshnlczAB{@LKoqSW%pLEAI_u5V>fOXjS@sFo z_u)O?cPNF*1iarynXk27%stS>0!Nt*p7qarRtg zM)JN-VD`Pt*6%x%LS+K8?`5^VG9!lfRgS%L46o9xOQ82&cK7R&k#i3MyYFZ8yoc8t zG)8%?*4@BJ8T<&WzN67|eom!y1A)Y`yJ^+=Ig!$h1X}OLd;LasE1A5xTD=udI&&`p`#!wq z_mWex6$0Mx%j&Ioy77$V!kXX6PMQ1&^xmCcCn?=Xp!a^f_HSgjlF5r_OJ~KC&fH6& z^-f0D?ozT*0=@UM zyZ^oWd%L?ld*|OHs>I(2^xn(v{Fs~h)}9FT-p}s72>&Lq%d>a>cel71e8qKC zrx60U7uGo9K{as%aL;iae|JKE*7&=dW-QLo6=4zhMgZ@mHGVs*JrQWV8?SW{{!PG( zYfI;Mx40Sn#C24s5dyfE)p4Z9N)7z1oYk2;1aOb6k)0I2*?qk9;g>af`5e`;IY!T^ zE{1@;AMbfg3UwJJVDESIoL8Og?>+bO$<{&cs6OcDJ+l2vpGyGmfz<{2t&!cAr7ztW z0e-Z`zFm(N>tEFbUrEu3!2VedCOm+kL8KbDtVDI&F6>rt{_nz~d ze^$2sKUZn>6ajlb-t$wrvi9}nYxQ2u4GvlJ%hw$Q@Xpv{R$8y-K6M}BeTw{mrj%u+SIOnC^bzRTHjc@oQs2#CPz1X|y(T`nx$U0^Spu`) zw^@DFi+~7-0E@uv_iMGD<^Qy9i-&y05@_9n_j+t9brS&*5P>#<*7vP_Vr++{JJvnE zs+HG!Jw92Dj^JFwtz3%09R%>Z8y)XRtrQ|qg8+V)HQdI#1RwSopDdq@0yhvC z{XL;>bnLN#POnb;A|L{N0{T6DX9Pa%@w8ML+9P1!&7PW0i@+WMyvLpj>PE*=ycE4iAjGir5JJ0A@oAIdP%N6r_Nw|)|<@Cr+Ij<^t$s6}BWtVqjb!7|tHpP8-NQo#jZq%#x@@f2d1v*r@nQR!<-@9H=aha<0<-Ta zX9~Gu6PSG`ta^6NZ2ByYtGeRzL%{OO#)aj_=H8vNd}h<#<>Yl$dOdxp%(odZ&4#e|Ao<_``kJk(J-}>CK~ku=+`N zjzI5SMD5SHu3sI+b+k_uQR}DG*VdhyxBT6(I7H@eR9l}&e)2fNO4Y4x*pta+>dsdZM)th%Q-+rC-(yynZ^ z3+5~i&>tP6eX#mTcaFg5`(W42xvpQ$<~ge$MYQG1_HWfiuUlWY&b_(b%E{*P=C-Og z>dV%#=Bqha=U6$j>asY_>SpEhny=Qqu{gB)jjpqNWz!wy!LGAhTK#MtYu@UAYMqrc ztL`bzwr^HGulmCMu=AtO=osaP-ADR!1V-NnyKc^P{c1MPS^X%YEnl{Ot1f!o`nq-Q z&GlAJHkUWIRlQMPwvIJl&A~dy%9&M{#c@_QE1y?=;r`h9(PwpJ`^%!g%E$Yh<Upqd|1AsxQ_N=<+QpjKcjW6 z^Q?92>(;q9zgs!kT;AMP^+tW!I@WwO2kRUwXI5Pn$64L1eB_HuVD_EZdak=X>NfYO zc^)D^PEK)X_07hI?We^ddYzq9`Z)>AzO$Su64-rbIaB$&+U4YR-apx%qE{|2 zoxL>mPJ40N)u~5Ljy(v>z8jC8(x^vZ_MNcm>yg`Y%;LDJyO+lQ^lA?8bNXVv`%Y{<+g}!aoBPx}3lI1>#i7*~zFrwy z9HQ4>;jNs%3CzB;-={Tug~04PVb#CF8#!4VS9Rel%eb1u`&_^=+5WQV+uWz-S$M$5DGsf^@b${r;t;+53UB57 zObiV4dS7 z&8f`dIIEkJj$E+`%)S#_&-S;f51*^&b#9CRIA`}Z&`~2-q|K=00_rmY^xr6o5n}1f} zuUcRHj_U9?0n7jN{BLhB|2z5~US=JU9rqd0Yo!PLd5tR`BF-RRM6Z{7fOFLNGV8yy zNAVu*^S3wsn@_;r3%}#%4%Wx){IjZ7Um-C2UUutW;o8f&%hCIM&#(4<3E=*GRp-^< zV|1MF;Cli-eO0B_BLMDM#-86-_M`9P6|TzpnSl3u``ksVuMqHlPd&X?xb|}PI7RRG z{A%Bq0PfdUbzTiVR>%2{I&l+$)pxUc?k09SSyuV6&h2E;SrHI{y9uzqJFB{P(;iQ^ z%5U}DxHDDWNMQBdw9ehgNSVe7wB7}4ecajIl`Ouix_8r7x>*9F?`T$6^>+~%eJ5UZ zchOr<l^f_-1 z-g=ggw?131nt$uJ%G~=Nv*q^AS5+_iUaE4EZyy5Q@6E@RRH_p2eosBUs+_!i_c%rG z-=1%jr#A<0J)6T@AA0D6xkeHAL;&w3YW(Ek#kDBjqkX*ZV6<RN5q*uH&C8ze-OfFy*_>zfy~)n@ z&r*FOx~sREqjhfA&#CjRJXzOUoKE}A z>T8xuD@XSA*_^%lYT{c}<<^?Nn!CkgcKtSft9fR* zvFlfJw9c{pvh-VdvaYu{?D~c7uFfmx!E1J`&Sy67ZhduLUjBDEdY|{o1s!jHQF>MV z!*|v(>i)d?fTNdQv=01xjnV#l{k(Ej*XeQc-hY)>_1sbZ?R$&rTfL9vHadSb_b4A* zSDlxaf6L|c+$$G!PUl(eU)4W+dyU}drFW|iIC+m%czNgU=~dyv_SxeUz3-hj+P_yn zaPZQL(t+=+V-yFkKH%u37p(*TUSqWXUO%r~)pdHDy!Wf{^3DytD)~?O1SgEAd{pTR zPTu3|`CB}zaAEsw@j12bo%d9Z-M-+ks}p@5zP(2H_tLvn2b|brR(|ZHW99a4+UHHyAvU0O@!I7oU)`fqsk?q&&d-=Xy$8uxOv+`u;hJM!kS$u&L z#w@{Ac&s@(2H`V@ub2ebt}N{U+djFIMk&xA>X$b9!zG zSU+}O^S21}-h Date: Fri, 29 Dec 2023 16:29:14 +0100 Subject: [PATCH 2/9] migrated to gymnasium, agents is not restricted to spawn at the center anymore --- voxelgym2D/__init__.py | 3 +- voxelgym2D/envs/base_env.py | 637 ++++++++++++++++++++++---------- voxelgym2D/envs/env_one_step.py | 99 +++-- 3 files changed, 514 insertions(+), 225 deletions(-) diff --git a/voxelgym2D/__init__.py b/voxelgym2D/__init__.py index 9722efe..a418bf5 100644 --- a/voxelgym2D/__init__.py +++ b/voxelgym2D/__init__.py @@ -1,6 +1,7 @@ -from gym.envs.registration import register +from gymnasium.envs.registration import register register( id="onestep-v0", entry_point="voxelgym2D.envs:VoxelGymOneStep", + nondeterministic=True, ) diff --git a/voxelgym2D/envs/base_env.py b/voxelgym2D/envs/base_env.py index 02be648..4109d07 100644 --- a/voxelgym2D/envs/base_env.py +++ b/voxelgym2D/envs/base_env.py @@ -1,82 +1,140 @@ """Base class for all environments""" -import logging import math import os +import sys from collections import OrderedDict from itertools import product -from typing import Dict, List, Optional, Tuple, Union +from logging import Formatter, Logger, LogRecord, StreamHandler +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import cv2 -import gym +import gymnasium as gym import matplotlib.patches as mpatches import matplotlib.pyplot as plt import numpy as np -from gym.utils import seeding +from gymnasium.utils import seeding from pathfinding.core.diagonal_movement import DiagonalMovement from pathfinding.core.grid import Grid +from pathfinding.core.node import GridNode from pathfinding.finder.a_star import AStarFinder from skimage import draw -# based on https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output -class CustomFormatter(logging.Formatter): - """Colors for different log levels.""" +class CustomFormatter(Formatter): + """Custom formatter with colors for different log levels.""" - grey = "\x1b[38;20m" - yellow = "\x1b[33;20m" - red = "\x1b[31;20m" - bold_red = "\x1b[31;1m" + _format = "%(asctime)s - %(levelname)s - %(message)s - %(filename)s:%(lineno)d" + # different color for different log level + # https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output + white = "\x1b[37;1m" + green = "\x1b[32;1m" + yellow = "\x1b[33;1m" + purple = "\x1b[35;1m" + red = "\x1b[31;1m" reset = "\x1b[0m" - format_ = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" - - FORMATS = { - logging.DEBUG: grey + format_ + reset, - logging.INFO: grey + format_ + reset, - logging.WARNING: yellow + format_ + reset, - logging.ERROR: red + format_ + reset, - logging.CRITICAL: bold_red + format_ + reset, + COLORED_FORMATS = { + "DEBUG": white + _format + reset, + "INFO": green + _format + reset, + "WARNING": yellow + _format + reset, + "ERROR": purple + _format + reset, + "CRITICAL": red + _format + reset, } - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno) - formatter_ = logging.Formatter(log_fmt, datefmt="%H:%M:%S") - return formatter_.format(record) + def format(self, record: LogRecord) -> str: + """ + Format the log record. + Parameters + ---------- + record : LogRecord + Log record + + Returns + ------- + str + Formatted log record + """ + log_fmt = self.COLORED_FORMATS.get(record.levelname) + formatter = Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S") + return formatter.format(record) -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -formatter = CustomFormatter() -stream_handler = logging.StreamHandler() -stream_handler.setFormatter(formatter) -logger.addHandler(stream_handler) + +class CustomLogger: + """Logger class.""" + + def __init__(self, name: str, log_level: str = "ERROR"): + """ + Initialize the logger. + + Parameters + ---------- + name : str + Name of the logger. + log_level : str, optional + Log level, by default "INFO" + """ + if log_level not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]: + raise ValueError(f"Invalid log level: {log_level}") + self.log_level = log_level + self.name = name + self._logger: Logger + self._setup_logger() + + def _setup_logger(self): + """Setup the logger.""" + self._logger = Logger(self.name) + self._logger.setLevel(self.log_level) + formatter = CustomFormatter() + + # log to stdout + handler = StreamHandler(sys.stdout) + handler.setLevel(self.log_level) + handler.setFormatter(formatter) + self._logger.addHandler(handler) + + def get_logger(self) -> Logger: + """ + Get the logger + + Returns + ------- + logging.Logger + Logger + """ + return self._logger class BaseEnv(gym.Env): """Base class for all environments""" - metadata = {"render.modes": ["human"]} + metadata: Dict[str, Any] = {"render_modes": ["None"], "render_fps": 1} def __init__( self, - mapfile: str = "200x200x200_dense.npy", + render_mode: Optional[str] = None, + mapfile: str = "600x600.npy", view_size: int = 21, + image_size: int = 42, max_collisions: int = 0, max_steps: int = 60, show_path: bool = True, multi_output: bool = False, partial_reward: bool = True, - image_size: int = 42, - test_mode: bool = False, inference_mode: bool = False, + log_level: str = "ERROR", ): """ Parameters ---------- + render_mode : Optional[str], optional + render mode, by default None mapfile : str name of the map file in the maps folder view_size : int size of the view window for observation + image_size : int + size of the image to be returned as observation max_collisions : int maximum number of collisions allowed before episode ends max_steps : int @@ -87,26 +145,33 @@ def __init__( whether to add additional outputs in the observation partial_reward : bool whether to give rewards for each step - image_size : int - size of the image to be returned as observation - test_mode : bool - whether to run in test mode, for evaluation during training inference_mode : bool whether to run in inference mode + log_level : str, optional + log level, by default "ERROR". One of "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" """ + self.render_mode = "None" if render_mode is None else render_mode + if self.render_mode not in self.metadata["render_modes"]: + raise ValueError(f"Invalid render_mode: {self.render_mode}") - super().__init__() # current file path __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) # load map self.grid_map = np.load(os.path.join(__location__, "maps", mapfile)) + self.MAX_VAL_UINT = int(0.9 * np.iinfo(np.uint32).max) # 0.9 to avoid overflow + + # an arry to keep track of the start and target location + self.start_locations, self.target_locations = self._start_end_counts() self.world_size = self.grid_map.shape - if view_size < 10: - logger.warning("view_size should be at least 10, setting to 10") - self.view_size = 10 + # initialize the logger + self.logger = CustomLogger(self.__class__.__name__, log_level=log_level).get_logger() + + if view_size < 16: + self.logger.warning("view_size should be at least 16, setting to 16") + self.view_size = 16 elif view_size > np.amin(self.world_size): - logger.warning( + self.logger.warning( "view_size should be less than half of the world size, setting to %s", int((np.amin(self.world_size) - 2) / 2), ) @@ -116,42 +181,73 @@ def __init__( view_ratio = image_size / view_size # should be even if view_ratio % 2 != 0: - logger.warning("view_ratio should be even, setting to %s", (int(view_ratio) + 1)) + self.logger.warning("view_ratio should be even, setting to %s", (int(view_ratio) + 1)) self.image_size = int(view_ratio + 1) * view_size else: self.image_size = image_size # set the agent to middle of the world map self._new_world = np.ones((int(2 * self.view_size), int(2 * self.view_size))) + # place holder for target locations for the sliced view (_new_world) + self._new_target_locations = np.ones((int(2 * self.view_size), int(2 * self.view_size)), dtype=np.uint32) self._new_world_center: np.ndarray = np.array([self.world_size[1] / 2, self.world_size[0] / 2], dtype=np.int32) self._next_new_world_center: np.ndarray = np.array( [self.world_size[1] / 2, self.world_size[0] / 2], dtype=np.int32 ) + # placeholder for mapping function + self._mapping = lambda x, y: (x, y) + # create a list of locations where the agent can be placed + # 25 possible locations for agent + factor = 5 + self.possible_start_locations = np.array( + list( + product( + np.linspace( + start=2 * self.view_size / factor, + stop=2 * self.view_size, + num=factor, + endpoint=False, + dtype=int, + ), + np.linspace( + start=2 * self.view_size / factor, + stop=2 * self.view_size, + num=factor, + endpoint=False, + dtype=int, + ), + ) + ) + ) + self._agent_location: np.ndarray = np.array([self.view_size, self.view_size], dtype=np.int32) self._target_location: np.ndarray = np.array([self.view_size, self.view_size], dtype=np.int32) + self.ini_distance_to_target = 0.0 self.min_distance_to_target = 0.0 self.cost_astar = 0.0 + self.ini_cost_astar = 0.0 self.min_cost_astar = 0.0 self.astar_runs = 0 self._path: List = [] + self.ini_astarPath: List = [] self.astarPath: List = [] + self._astar_grid = Grid() self.action = [-1, -1] self.action_cost = 0.0 - self.obs_world = np.zeros((3, self.image_size, self.image_size), dtype=np.uint8) + self.obs_world = np.zeros((3, self.image_size, self.image_size)) + self.obs_world_astar = np.zeros((3, self.image_size, self.image_size), dtype=np.uint8) self.base_obs_world = np.zeros((3, self.image_size, self.image_size), dtype=np.float32) self.num_targets_reached = 0 self.current_step = 0 self.num_collisions = 0 - self.test_mode = test_mode self.inference_mode = inference_mode - # set back the agent to middle of the world map after these many targets - self.soft_reset_freq = 1500 - if self.test_mode: - self.soft_reset_freq = 50 + # set back the agent to a new location than the previous + # target location after these many episodes + self.soft_reset_freq = 10 - self.target_num = -1 + self.target_num = 0 self.max_collisions = max_collisions self.max_steps = max_steps self.show_path = show_path @@ -161,14 +257,15 @@ def __init__( self.delta = list(product([-1, 0, 1], repeat=2)) self.delta.remove((0, 0)) + # random number generator for shuffling the possible target locations + seed_seq = np.random.SeedSequence() + self.random_gen: np.random.Generator = np.random.Generator(np.random.PCG64(seed_seq)) + # math constants self.SQRT2 = math.sqrt(2) self.EXP1 = math.exp(1) self.EXP1_1 = self.EXP1 - 1 - self.seed() - self.action_space = None - self.multi_output = multi_output if self.multi_output: self.observation_space = gym.spaces.Dict( @@ -185,41 +282,78 @@ def __init__( low=0, high=255, shape=(3, self.image_size, self.image_size), dtype=np.uint8 ) - def get_logger(self) -> logging.Logger: + def get_logger(self) -> Logger: """ Returns the logger Returns ------- - logger : logging.Logger + logger : Logger logger object """ - return logger + return self.logger - def seed(self, seed: Optional[int] = None) -> List[Optional[int]]: + @staticmethod + def find_obstacle_neighbor_count(grid_map: np.ndarray) -> np.ndarray: """ - Sets the seed for this env's random number generator(s). + Finds the number of neighboring obstacles for each cell in the grid map Parameters ---------- - seed : int, optional - Seed for the random number generator(s), by default None + grid_map : np.ndarray + grid map with obstacles marked as 1s and free cells marked as 0s + + Returns + ------- + neighbors : np.ndarray + number of neighboring obstacles for each cell in the grid map + """ + # add a border of 1s around the grid map + padded_grid_map = np.pad(grid_map, pad_width=1, mode="constant", constant_values=0) + # get the neighbors of all cells + neighbors = ( + padded_grid_map[:-2, :-2] + + padded_grid_map[:-2, 1:-1] + + padded_grid_map[:-2, 2:] + + padded_grid_map[1:-1, :-2] + + padded_grid_map[1:-1, 2:] + + padded_grid_map[2:, :-2] + + padded_grid_map[2:, 1:-1] + + padded_grid_map[2:, 2:] + ) + # return the count of neighboring obstacles + return neighbors + + def _start_end_counts(self) -> Tuple[np.ndarray, np.ndarray]: + """ + Create arrays to keep track of the start and end cell counts Returns ------- - Returns the list of seed used in this env's random number generators + start_counts : np.ndarray + shape like self.grid_map with the count of start cells + end_counts : np.ndarray + shape like self.grid_map with the count of end cells """ - self.np_random, seed = seeding.np_random(seed) - return [seed] + neighbors = self.find_obstacle_neighbor_count(self.grid_map) - def make_astar_matrix(self) -> None: + # find all cells with value 0 and without any neighbors with value > 0 + possible_start_cells = (np.logical_and(self.grid_map == 0, neighbors == 0)).astype(np.uint32) + # change all cells with value 0 to max value of int as they are not possible start cells + possible_start_cells[possible_start_cells == 0] = self.MAX_VAL_UINT + # end cells are same as possible start cells + possible_end_cells = np.copy(possible_start_cells) + return possible_start_cells, possible_end_cells + + def _make_astar_matrix(self) -> None: """ Creates the astar matrix for the current world map and sets the astar grid """ - _astar_matrix = np.abs(1.0 - self._new_world).astype(np.int32) + # set the astar matrix to 1 for all cells with value 0 in the world map and 0 otherwise + _astar_matrix = np.abs(1.0 - self._new_world) self._astar_grid = Grid(matrix=_astar_matrix.tolist()) - def run_astar(self, target: np.ndarray) -> Tuple[List[Tuple[int, int]], float, int]: + def _run_astar(self, target: np.ndarray) -> Tuple[List[Tuple[int, int]], float, int]: """ Runs the A* algorithm on the current world map and returns the path, path cost and number of nodes visited @@ -247,21 +381,44 @@ def run_astar(self, target: np.ndarray) -> Tuple[List[Tuple[int, int]], float, i finder = AStarFinder(diagonal_movement=DiagonalMovement.always) else: finder = AStarFinder(diagonal_movement=DiagonalMovement.only_when_no_obstacle) - path, runs = finder.find_path(start, end, self._astar_grid) - if len(path) > 0: - path_cost = end.g - else: - path_cost = np.inf + path_w_node, runs = finder.find_path(start, end, self._astar_grid) + path_cost = np.inf + path = [] + if len(path_w_node) > 0: + for node in path_w_node: + is_gn = isinstance(node, GridNode) + x, y = (node.x, node.y) if is_gn else node[:2] + path.append((x, y)) + + path_cost = 0.0 + for dx in np.array(path[1:]) - np.array(path[:-1]): + path_cost += np.sqrt(np.sum(dx**2)) + return path, path_cost, runs - def slice_grid_map(self) -> None: - """ - Slices the grid map into a 2D numpy array - of size (2*view_size, 2*view_size) + def _slice_grid_map(self) -> Tuple[Callable, Union[np.ndarray, None]]: """ + Slices the grid map into a 2D numpy array of size (2*view_size, 2*view_size) + Generate a mapping from the sliced grid map to the original grid map + Returns + ------- + mapping : Callable(int, int) + mapping from the sliced grid map to the original grid map + potential_start_location : Union[np.ndarray, None] + potential start location for the agent + """ # slice the grid map into a 2D numpy array self._new_world = np.ones((int(2 * self.view_size), int(2 * self.view_size))) + + # new taget locations is of shape (2*view_size, 2*view_size) with all values + # as self.MAX_VAL_UINT and dtype as np.uint32 + self._new_target_locations = np.full( + shape=(int(2 * self.view_size), int(2 * self.view_size)), + fill_value=self.MAX_VAL_UINT, + dtype=np.uint32, + ) + y_min = int(max(self._new_world_center[0] - self.view_size, 0)) y_max = int(min(self._new_world_center[0] + self.view_size, self.world_size[0])) x_min = int(max(self._new_world_center[1] - self.view_size, 0)) @@ -273,13 +430,46 @@ def slice_grid_map(self) -> None: x_max_new = int(self.view_size + x_max - self._new_world_center[1]) self._new_world[y_min_new:y_max_new, x_min_new:x_max_new] = self.grid_map[y_min:y_max, x_min:x_max] + self._new_target_locations[y_min_new:y_max_new, x_min_new:x_max_new] = self.target_locations[ + y_min:y_max, x_min:x_max + ] # set the edges to 1 self._new_world[0, :-1] = self._new_world[:-1, -1] = self._new_world[-1, 1:] = self._new_world[1:, 0] = 1 - # create the new astar grid - self.make_astar_matrix() - def find_target(self) -> np.ndarray: + # set the edges to self.MAX_VAL_UINT in the new target locations + self._new_target_locations[0, :-1] = self._new_target_locations[:-1, -1] = self._new_target_locations[ + -1, 1: + ] = self._new_target_locations[1:, 0] = self.MAX_VAL_UINT + # create the new astar grid + self._make_astar_matrix() + + # define the mapping from the sliced grid map to the original grid map + def _mapping(x: int, y: int) -> Tuple[int, int]: + return x + x_min - x_min_new, y + y_min - y_min_new + + # find the start location in the sliced grid map + # shuffle the possible_start_locations + self.np_random.shuffle(self.possible_start_locations) + # iterate over possible_start_locations + potential_start_location = None + for start_location in self.possible_start_locations: + # check if the start location along with spacing = 1 is free of obstacles + spacing = 1 + if np.all( + self._new_world[ + start_location[1] - spacing : start_location[1] + (spacing + 1), + start_location[0] - spacing : start_location[0] + (spacing + 1), + ] + == 0 + ): + # if free of obstacles, set the agent location to the start location + potential_start_location = start_location + break + + return _mapping, potential_start_location + + def _find_target(self) -> np.ndarray: """ Finds a target location for the agent to move to @@ -287,72 +477,62 @@ def find_target(self) -> np.ndarray: ------- target_location : np.ndarray target location + + Raises + ------ + RuntimeError + If a target location cannot be found """ # 10% of the time find easy target (close to agent) - easy_target = True if (self.np_random.rand() < 0.10) else False - colliding = True + easy_target = self.np_random.random() < 0.10 + # if not easy path, we don't want to sample within 8 cells of the agent + # increase the count of cells in self._new_target_locations to self.MAX_VAL_UINT + # for cells within 8 cells of the agent else 2 to have nothing close to the agent + spread = 2 if easy_target else 8 count = 0 - while colliding: - nearby_clear = True - if not easy_target: - straight_path = True - target_location = self._agent_location - while np.array_equal(target_location, self._agent_location): + while True: + self._new_target_locations[ + self._agent_location[0] - spread : self._agent_location[0] + (spread + 1), + self._agent_location[1] - spread : self._agent_location[1] + (spread + 1), + ] = self.MAX_VAL_UINT + # get a list of all the target locations with the count less than self.MAX_VAL_UINT + possible_target_locations = np.argwhere(self._new_target_locations < self.MAX_VAL_UINT)[:, ::-1] + # shuffle the possible_target_locations + self.random_gen.shuffle(possible_target_locations) + # iterate while possible_target_locations + for target_location in possible_target_locations: if not easy_target: - random_r = self.np_random.randint(6, int(self.SQRT2 * self.view_size - 1)) - else: - random_r = self.np_random.randint(2, int((self.SQRT2 * self.view_size - 1) / 2)) - random_theta = self.np_random.uniform(-np.pi, np.pi) - random_location = np.array([random_r * np.cos(random_theta), random_r * np.sin(random_theta)]) - random_location = np.round(random_location).astype(np.int32) - target_location = random_location + self._agent_location - if np.amin(target_location) < 0 or np.amax(target_location) >= int(2 * self.view_size): - target_location = self._agent_location - if self._new_world[target_location[1], target_location[0]] == 0: - spacing = 1 - # check immediate neighbors for obstacles - y_min = int(max(target_location[1] - spacing, 0)) - y_max = int(min(target_location[1] + spacing + 1, 2 * self.view_size)) - x_min = int(max(target_location[0] - spacing, 0)) - x_max = int(min(target_location[0] + spacing + 1, 2 * self.view_size)) - if np.count_nonzero(self._new_world[y_min:y_max, x_min:x_max] == 1.0) > 0: - nearby_clear = False - - if nearby_clear: - if not easy_target: - # check if its a straight path - rr, cc = draw.line( - self._agent_location[0], - self._agent_location[1], - target_location[0], - target_location[1], - ) - straight_line = list(zip(rr, cc)) - for pt in straight_line: - if self._new_world[pt[1], pt[0]] == 1: - straight_path = False - break - - if not straight_path: - # Astar search to find the shortest path to the target - self.astarPath, self.cost_astar, self.astar_runs = self.run_astar(target_location) - if len(self.astarPath) > 0 and self.astar_runs > 60: - colliding = False - else: - # Astar search to find the shortest path to the target - self.astarPath, self.cost_astar, self.astar_runs = self.run_astar(target_location) - if len(self.astarPath) > 0: - colliding = False + # check if its a straight path + rr, cc = draw.line( + self._agent_location[0], + self._agent_location[1], + target_location[0], + target_location[1], + ) + straight_pixels = self._new_world[cc, rr] + straight_path = not np.any(straight_pixels == 1) + + if straight_path: + continue + + # Astar search to find the shortest path to the target + self.astarPath, self.cost_astar, self.astar_runs = self._run_astar(target_location) + if len(self.astarPath) > 0: + if easy_target: + return target_location + if self.astar_runs > 60: + return target_location + + self.logger.info(" ---Target not set, soft reset---") + # increase the count of agent location in the self.state_counts to self.MAX_VAL_UINT + # so that the agent location is not sampled again + mapped_start = self._mapping(self._agent_location[0], self._agent_location[1]) + self.start_locations[mapped_start[::-1]] = self.MAX_VAL_UINT + self._soft_reset() count += 1 - if count > 9999: - logger.info( - " ---Target not set in %s tries! Setting agent back to reset conditions!", - count, - ) - count = 0 - self.soft_reset() - return target_location + if count > 100000: + raise RuntimeError("Cannot find a target location") def _get_info(self) -> Dict: """ @@ -376,6 +556,7 @@ def _get_info(self) -> Dict: "current step": self.current_step, "no. of collisions": self.num_collisions, "grid map": self._new_world, + "obs with astar path": self.obs_world_astar, } # base observation is the world map which remains constant throughout the episode @@ -406,6 +587,12 @@ def _get_obs(self) -> Union[np.ndarray, OrderedDict]: self.obs_world[:, self._agent_location[1], self._agent_location[0]] = 0.3 * 255.0 self.obs_world[0, self._agent_location[1], self._agent_location[0]] = 1.0 * 255.0 + # mark the astar path in the self.obs_world_astar + self.obs_world_astar = np.copy(self.obs_world) + for pt in self.astarPath: + self.obs_world_astar[:, pt[1], pt[0]] = 0.6 * 255.0 + self.obs_world_astar = self.obs_world_astar.astype(dtype=np.uint8) + if self.show_path and (len(self._path) != 0): for pt in self._path: if self.obs_world[2, pt[1], pt[0]] == 0: @@ -418,69 +605,124 @@ def _get_obs(self) -> Union[np.ndarray, OrderedDict]: dsize=(self.image_size, self.image_size), interpolation=cv2.INTER_NEAREST, ) - self.obs_world = np.moveaxis(self.obs_world, -1, 0) - if not self.multi_output: - return self.obs_world.astype(dtype=np.uint8) + self.obs_world = np.moveaxis(self.obs_world, -1, 0).astype(dtype=np.uint8) + if not self.multi_output: + return self.obs_world return OrderedDict( { # normalize delta "delta": ((self._target_location - self._agent_location) / (2 * self.view_size - 1)).astype(np.float32), - "world": self.obs_world.astype(dtype=np.uint8), + "world": self.obs_world, } ) - # set the agent's location at the center of the map - def soft_reset(self) -> None: + def _get_new_index_from_counts(self, counts_mat: np.ndarray, alpha_p: float = 1.0) -> Tuple[int, int]: + """ + Returns a new index sampled from the counts matrix + + Parameters + ---------- + counts_mat : np.ndarray + counts matrix from which is used to sample the new index + alpha_p : float + parameter to control the sampling probability + + Returns + ------- + sampled_index : Tuple[int, int] + sampled index from the counts matrix in the form (y, x) + """ + flattened_counts = counts_mat.flatten() + # higher the count, lower the probability of sampling that cell + probabilities = np.exp(-alpha_p * flattened_counts) + probabilities /= np.sum(probabilities) + + # sample a cell based on the probabilities + sampled_index = self.np_random.choice(np.arange(len(flattened_counts)), p=probabilities) + # convert the sampled index to 2D index + sampled_index = np.unravel_index(sampled_index, counts_mat.shape) + return sampled_index # (y, x) + + # set the world center based on sampling from current counts + # tries to set the world center to cells with lower counts + def _soft_reset(self) -> None: """Moves the agent to the center of the map and resets the target""" - self._new_world_center = np.array([self.world_size[1] / 2, self.world_size[0] / 2], dtype=np.int32) - self.slice_grid_map() - self._agent_location = np.array([self.view_size, self.view_size], dtype=np.int32) + sampled_index = self._get_new_index_from_counts(self.start_locations) + self._new_world_center = np.array([sampled_index[0], sampled_index[1]], dtype=np.int32) + self._mapping, potential_start_location = self._slice_grid_map() + if potential_start_location is None: + potential_start_location = np.array([self.view_size, self.view_size], dtype=np.int32) + self._agent_location = potential_start_location self.target_num = 0 def reset( - self, return_info: bool = False - ) -> Union[Tuple[Union[np.ndarray, OrderedDict], Dict], np.ndarray, OrderedDict]: + self, + *, + seed: Union[int, None] = None, + options: Union[Dict, None] = None, + ) -> Tuple[Union[np.ndarray, OrderedDict], Dict]: """ - Resets the environment to the initial state and returns the initial observation + Resets the environment to the initial state and returns the initial observation and info Parameters ---------- - return_info : bool, optional - whether to return the info dictionary, by default False + seed : Union[int, None] + seed to use for the environment + options : Union[Dict, None] + options to use for the environment Returns ------- obs : np.ndarray or OrderedDict observation from manystep environment - info : Dict (optional) - info dictionary (optional) of the last step in the stack + info : Dict + info dictionary of the last step in the stack """ + + # Initialize the RNG if the seed is manually passed + super().reset(seed=seed) + + # seed the other random number generator + if seed is not None: + self.random_gen, _ = seeding.np_random(seed) + self.target_num += 1 if (self.target_num % self.soft_reset_freq) == 0: - self.soft_reset() - if self.test_mode: - self.seed(1327455) + self._soft_reset() else: self._new_world_center = self._next_new_world_center self.current_step = 0 self.num_collisions = 0 - self.slice_grid_map() - self._agent_location = np.array([self.view_size, self.view_size], dtype=np.int32) + self._mapping, potential_start_location = self._slice_grid_map() + if potential_start_location is None: + potential_start_location = np.array([self.view_size, self.view_size], dtype=np.int32) + self._agent_location = potential_start_location # We will sample the target's location randomly until it does # not coincide with the agent's location - self._target_location = self.find_target() - self._next_new_world_center = self._new_world_center + (self._target_location - self._agent_location)[::-1] - self.min_distance_to_target = float(np.linalg.norm(self._target_location - self._agent_location)) - self.min_cost_astar = self.cost_astar + self._target_location = self._find_target() + + # increase the count of the start and target locations + mapped_start = self._mapping(self._agent_location[0], self._agent_location[1]) + self.start_locations[mapped_start[::-1]] += 1 + mapped_target = self._mapping(self._target_location[0], self._target_location[1]) + self.target_locations[mapped_target[::-1]] += 1 + + # set the next new world center at the mapped target location + self._next_new_world_center = mapped_target[::-1] + self.ini_distance_to_target = self.min_distance_to_target = float( + np.linalg.norm(self._target_location - self._agent_location) + ) + self.ini_astarPath = self.astarPath + self.ini_cost_astar = self.min_cost_astar = self.cost_astar self._path = [] self.action = [-1, -1] self.action_cost = 0.0 self._create_base_obs() observation = self._get_obs() - return (observation, self._get_info()) if return_info else observation + return (observation, self._get_info()) def _compute_reward(self, completion_reward: bool = False): """ @@ -490,7 +732,7 @@ def _compute_reward(self, completion_reward: bool = False): def _take_action(self, action: np.ndarray) -> Tuple[List, bool]: """ - Takes the action and returns the new agent lo + Takes the action and returns the new agent location """ raise NotImplementedError @@ -500,9 +742,9 @@ def close(self) -> None: """ plt.close("all") - def step(self, action: np.ndarray) -> Tuple[Union[np.ndarray, OrderedDict], float, bool, Dict]: + def step(self, action: np.ndarray) -> Tuple[Union[np.ndarray, OrderedDict], float, bool, bool, Dict]: """ - Takes a step in the environment and returns the observation, reward, done and info + Takes a step in the environment and returns the observation, reward, terminated, truncated and info Parameters ---------- @@ -511,10 +753,19 @@ def step(self, action: np.ndarray) -> Tuple[Union[np.ndarray, OrderedDict], floa Returns ------- - Observation, Reward, Done, Info : Tuple[Union[np.ndarray, OrderedDict], float, bool, Dict] - the observation, reward, done and info + observation : np.ndarray or OrderedDict + observation + reward : float + reward + terminated : bool + whether the episode terminated + truncated : bool + whether the episode was truncated + info : Dict + info dictionary """ - done = False + terminated = False + truncated = False valid_action_path, collision = self._take_action(action) self.current_step += 1 @@ -526,13 +777,13 @@ def step(self, action: np.ndarray) -> Tuple[Union[np.ndarray, OrderedDict], floa if collision: self.num_collisions += 1 reward = -1.0 - else: # only do it if the agent moves + elif len(valid_action_path) > 1: # only do it if the agent moves self._path = valid_action_path[:-1] # remove the agent location if np.array_equal(self._target_location, self._agent_location): - done = True + terminated = True reward += self._compute_reward(completion_reward=True) self.num_targets_reached += 1 - logger.info( + self.logger.info( "%s Target reached in %s steps, Collisions : %s", self.ordinal(self.num_targets_reached), self.current_step, @@ -540,43 +791,49 @@ def step(self, action: np.ndarray) -> Tuple[Union[np.ndarray, OrderedDict], floa ) else: reward += self._compute_reward(completion_reward=False) - self.min_distance_to_target = float(np.linalg.norm(self._target_location - self._agent_location)) - self.min_cost_astar = self.cost_astar + else: + self.logger.warning("No movement caused by action: %s!", action) + + self.min_distance_to_target = float(np.linalg.norm(self._target_location - self._agent_location)) + self.min_cost_astar = self.cost_astar if self.num_collisions > self.max_collisions: - done = True + terminated = True if not self.partial_reward: reward += -self.cost_astar / 100 - logger.info(" --Max Collisions! Collisions: %s/%s", self.num_collisions, self.max_collisions + 1) - elif (not done) and (self.current_step > self.max_steps): - done = True + self.logger.info( + " --Max Collisions! Collisions: %s/%s", + self.num_collisions, + self.max_collisions + 1, + ) + elif (not terminated) and (self.current_step > self.max_steps): + terminated = True if not self.partial_reward: reward += -self.cost_astar / 100 - logger.info( - " --Max Steps: %s/%s!", + self.logger.info( + " --Max Steps! Steps: %s/%s", self.current_step, self.max_steps + 1, ) observation = self._get_obs() info = self._get_info() - return observation, reward, done, info + return observation, reward, terminated, truncated, info - def render(self, mode="human") -> None: + def render(self) -> None: """ Renders the environment - Parameters - ---------- - mode : str, optional - the mode to render in, by default "human" - Returns ------- None """ - plt.title("Voxelgym") - plt.imshow(np.moveaxis(self.obs_world.astype(np.uint8), 0, 2)) + plt.subplot(1, 2, 1) + plt.title("Agent View") + plt.imshow(np.moveaxis(self.obs_world, 0, 2)) + plt.subplot(1, 2, 2) + plt.title("Current Astar Path") + plt.imshow(np.moveaxis(self.obs_world_astar, 0, 2)) palette = [[77, 77, 255], [77, 255, 77], [255, 77, 77], [144, 144, 144]] classes = ["obstacles", "target", "agent", "last visited"] diff --git a/voxelgym2D/envs/env_one_step.py b/voxelgym2D/envs/env_one_step.py index 0c95189..98c1042 100644 --- a/voxelgym2D/envs/env_one_step.py +++ b/voxelgym2D/envs/env_one_step.py @@ -1,37 +1,42 @@ -"""Environment corresponding to Onestep action space""" +"""Voxel environment corresponding to Onestep action space""" -from typing import List, Tuple +from typing import List, Optional, Tuple -import gym +import gymnasium as gym import numpy as np from .base_env import BaseEnv class VoxelGymOneStep(BaseEnv): - """Environment corresponding to Onestep action space""" + """Voxel environment corresponding to Onestep action space""" def __init__( self, - mapfile: str = "200x200x200_dense.npy", + render_mode: Optional[str] = None, + mapfile: str = "600x600.npy", view_size: int = 21, + image_size: int = 42, max_collisions: int = 0, max_steps: int = 60, show_path: bool = True, multi_output: bool = False, partial_reward: bool = True, - image_size: int = 42, - test_mode: bool = False, inference_mode: bool = False, discrete_actions: bool = True, + log_level: str = "ERROR", ): """ Parameters ---------- + render_mode : Optional[str], optional + render mode, by default None mapfile : str name of the map file in the maps folder view_size : int size of the view window for observation + image_size : int + size of the image to be returned as observation max_collisions : int maximum number of collisions allowed before episode ends max_steps : int @@ -42,34 +47,34 @@ def __init__( whether to add additional outputs in the observation partial_reward : bool whether to give rewards for each step - image_size : int - size of the image to be returned as observation - test_mode : bool - whether to run in test mode, for evaluation during training inference_mode : bool whether to run in inference mode discrete_actions : bool whether to use discrete actions + log_level : str, optional + log level, by default "ERROR". One of "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" """ super().__init__( + render_mode, mapfile, view_size, + image_size, max_collisions, max_steps, show_path, multi_output, partial_reward, - image_size, - test_mode, inference_mode, + log_level, ) self.discrete_actions = discrete_actions if self.discrete_actions: self.action_space = gym.spaces.Discrete(len(self.delta)) else: - self.action_space = gym.spaces.Box(-1, 1, shape=(len(self.delta),)) + # self.action_space = gym.spaces.Box(-1, 1, shape=(len(self.delta))) + self.action_space = gym.spaces.Box(-1, 1, shape=[1]) def _compute_reward(self, completion_reward: bool = False) -> float: """ @@ -91,30 +96,49 @@ def _compute_reward(self, completion_reward: bool = False) -> float: if completion_reward: reward_completion = 0.5 + return reward_completion - else: - reward_completion = -self.action_cost - - _new_astarPath, _cost_astar, self.astar_runs = self.run_astar(self._target_location) - if len(_new_astarPath) > 0: - self.astarPath = _new_astarPath - self.cost_astar = _cost_astar + reward_completion = -self.action_cost - reward_euc_astar = self.min_cost_astar - _cost_astar - else: - # fallback to euclidean distance if astar fails - reward_euc_astar = self.min_distance_to_target - float( - np.linalg.norm(self._target_location - self._agent_location) - ) + _new_astarPath, _cost_astar, self.astar_runs = self._run_astar(self._target_location) + if len(_new_astarPath) > 0: + self.astarPath = _new_astarPath + self.cost_astar = _cost_astar - if completion_reward: - return reward_completion - if (self.cost_astar < self.min_cost_astar) and self.partial_reward: - return round((2 * reward_euc_astar + reward_completion) / 10, 4) - if (self.cost_astar > self.min_cost_astar) and self.partial_reward: + reward_euc_astar = self.min_cost_astar - _cost_astar + improved = self.min_cost_astar > _cost_astar + else: + # fallback to euclidean distance if astar fails + current_distance_to_target = float(np.linalg.norm(self._target_location - self._agent_location)) + reward_euc_astar = self.min_distance_to_target - current_distance_to_target + improved = self.min_distance_to_target > current_distance_to_target + + if self.partial_reward: + if improved: + return round((2 * reward_euc_astar + reward_completion) / 10, 4) return round((reward_euc_astar + reward_completion) / 10, 4) + return round(reward_completion / 10, 4) + @staticmethod + def action_to_bins(action: np.ndarray) -> int: + """ + Converts the action to bins of size 1/4 and returns the bin number in the range [0, 7] for actions + + Parameters + ---------- + action : np.ndarray + action to be converted to bin number + + Returns + ------- + bin : int + bin number in the range [0, 7] + """ + # Clip action to [-1, 1] + clipped = np.clip(action, -1, 1) + return min(int((clipped + 1) * 4), 7) + def _take_action(self, action: np.ndarray) -> Tuple[List, bool]: """ Takes the action and updates the agent location @@ -132,11 +156,12 @@ def _take_action(self, action: np.ndarray) -> Tuple[List, bool]: True if the agent collides with an obstacle, else False """ if not self.discrete_actions: - self.action = list(self.delta[np.argmax(action)]) + # convert angle to bin number + action_idx = self.action_to_bins(action) + self.action = list(self.delta[action_idx]) else: self.action = list(self.delta[int(action)]) - self.action_cost = float(np.linalg.norm(self.action)) action_location = self._agent_location + np.array(self.action, dtype=np.int32) action_path = [action_location.tolist()] @@ -154,4 +179,10 @@ def _take_action(self, action: np.ndarray) -> Tuple[List, bool]: # complete path by including initial location valid_action_path.insert(0, self._agent_location.tolist()) + # compute action cost + self.action_cost = 0.0 + for _, dx in enumerate(np.array(valid_action_path[1:]) - np.array(valid_action_path[:-1])): + self.action_cost += np.sqrt(np.sum(dx**2)) + self.action_cost = round(self.action_cost, 4) + return valid_action_path, collision From d40a2f78693b96ca0dfdb7472e4457802fda9938 Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:29:49 +0100 Subject: [PATCH 3/9] examples updated for newer sb3 versions --- examples/onestep.py | 14 ++++++ examples/train_ppo.py | 107 ++++++++++++++---------------------------- 2 files changed, 48 insertions(+), 73 deletions(-) create mode 100644 examples/onestep.py diff --git a/examples/onestep.py b/examples/onestep.py new file mode 100644 index 0000000..19d9f80 --- /dev/null +++ b/examples/onestep.py @@ -0,0 +1,14 @@ +import gymnasium as gym + +env = gym.make("voxelgym2D:onestep-v0") +observation, info = env.reset(seed=123456) + +done = False +while not done: + action = env.action_space.sample() # agent policy that uses the observation and info + observation, reward, terminated, truncated, info = env.step(action) + + done = terminated or truncated + env.render() + +env.close() diff --git a/examples/train_ppo.py b/examples/train_ppo.py index 56759bb..a82bde1 100644 --- a/examples/train_ppo.py +++ b/examples/train_ppo.py @@ -2,11 +2,11 @@ import os from typing import Callable -import gym +import gymnasium as gym import numpy as np import torch from stable_baselines3 import PPO -from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.callbacks import BaseCallback, EvalCallback from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.monitor import Monitor @@ -14,9 +14,6 @@ from stable_baselines3.common.torch_layers import BaseFeaturesExtractor from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv from torch import nn -from tqdm.auto import tqdm - -import voxelgym2D # Create log dir LOG_DIR = "./logs/ppo_onestep/" @@ -61,7 +58,6 @@ class SaveOnBestTrainingRewardCallback(BaseCallback): """ Callback for saving a model (the check is done every ``check_freq`` steps) based on the training reward (in practice, we recommend using ``EvalCallback``). - :param check_freq: (int) :param log_dir: (str) Path to the folder where the model will be saved. It must contains the file created by the ``Monitor`` wrapper. @@ -78,10 +74,8 @@ def __init__(self, check_freq, log_dir, verbose=1): def _init_callback(self) -> None: # Create folder if needed - if self.save_path is not None: - os.makedirs(self.save_path, exist_ok=True) - if self.chckpoint_path is not None: - os.makedirs(self.chckpoint_path, exist_ok=True) + if self.log_dir is not None: + os.makedirs(self.log_dir, exist_ok=True) def _on_step(self) -> bool: if self.n_calls % self.check_freq == 0: @@ -89,15 +83,15 @@ def _on_step(self) -> bool: x, y = ts2xy(load_results(self.log_dir), "timesteps") if len(x) > 0: # Mean training reward over the last 100 episodes - mean_reward = np.mean(y[-100:]) + _mean_reward = np.mean(y[-100:]) if self.verbose > 0: print(f"Num timesteps: {self.num_timesteps}") print(f"Best mean reward: {self.best_mean_reward:.2f}") - print(f"Last mean reward per episode: {mean_reward:.2f}") + print(f"Last mean reward per episode: {_mean_reward:.2f}") # New best model, you could save the agent here - if mean_reward > self.best_mean_reward: - self.best_mean_reward = mean_reward + if _mean_reward > self.best_mean_reward: + self.best_mean_reward = _mean_reward # Example for saving best model if self.verbose > 0: print(f"Saving new best model at {x[-1]} timesteps") @@ -112,44 +106,10 @@ def _on_step(self) -> bool: return True -class ProgressBarCallback(BaseCallback): - """ - :param pbar: (tqdm.pbar) Progress bar object - """ - - def __init__(self, pbar): - super().__init__() - self._pbar = pbar - - def _on_step(self): - # Update the progress bar: - self._pbar.n = self.num_timesteps - self._pbar.update(0) - - -# this callback uses the 'with' block, allowing for correct initialisation and destruction -class ProgressBarManager: - """For tqdm progress bar in a with block.""" - - def __init__(self, total_timesteps): # init object with total timesteps - self.pbar = None - self.total_timesteps = total_timesteps - - def __enter__(self): # create the progress bar and callback, return the callback - self.pbar = tqdm(total=self.total_timesteps) - return ProgressBarCallback(self.pbar) - - def __exit__(self, exc_type, exc_val, exc_tb): # close the callback - self.pbar.n = self.total_timesteps - self.pbar.update(0) - self.pbar.close() - - # scheduler def linear_schedule(initial_value: float) -> Callable[[float], float]: """ Linear learning rate schedule. - :param initial_value: Initial learning rate. :return: schedule that computes current learning rate depending on remaining progress @@ -158,7 +118,6 @@ def linear_schedule(initial_value: float) -> Callable[[float], float]: def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0. - :param progress_remaining: :return: current learning rate """ @@ -176,15 +135,13 @@ def func(progress_remaining: float) -> float: seed=1327455, monitor_dir=LOG_DIR, env_kwargs={ - "mapfile": "200x200x200_dense.npy", + "mapfile": "600x600.npy", "view_size": 21, + "image_size": 42, "max_collisions": 0, "max_steps": 60, - "show_path": True, "discrete_actions": True, - "multi_output": False, - "partial_reward": True, - "image_size": 42, + "render_mode": "None", }, vec_env_cls=SubprocVecEnv, ) @@ -209,33 +166,41 @@ def func(progress_remaining: float) -> float: ent_coef=0.01, vf_coef=0.5, verbose=1, - tensorboard_log="tb_logs/ppo_onestep", + tensorboard_log="tb_logs/ppo_onestep/", target_kl=0.4, ) + # Create eval env eval_env = DummyVecEnv( [ lambda: Monitor( gym.make( "voxelgym2D:onestep-v0", - mapfile="200x200x200_dense.npy", + mapfile="600x600.npy", view_size=21, + image_size=42, max_collisions=0, max_steps=60, - show_path=True, discrete_actions=True, - multi_output=False, - partial_reward=True, - test_mode=True, - image_size=42, + render_mode="None", ), filename=os.path.join(LOG_DIR, "eval"), ) ] ) - # n_eval_episodes = 50 since soft_reset_freq in base_env is 50 - mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=50) + # Use deterministic actions for evaluation + eval_callback = EvalCallback( + eval_env, + best_model_save_path=None, + log_path=os.path.join(LOG_DIR, "eval"), + eval_freq=10000, + n_eval_episodes=50, + deterministic=True, + render=False, + ) + + mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=50, warn=False) print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}") # Create Callback @@ -243,17 +208,13 @@ def func(progress_remaining: float) -> float: TOTAL_TIME_STEPS = 10000000 - with ProgressBarManager(TOTAL_TIME_STEPS) as progress_callback: - # This is equivalent to callback=CallbackList([progress_callback, auto_save_callback]) - model.learn( - total_timesteps=TOTAL_TIME_STEPS, - eval_env=eval_env, - n_eval_episodes=50, - eval_freq=10000, - callback=[progress_callback, auto_save_callback], - ) + model.learn( + total_timesteps=TOTAL_TIME_STEPS, + callback=[auto_save_callback, eval_callback], + progress_bar=True, + ) model.save(os.path.join(LOG_DIR, "ppo_saved")) - mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=50) + mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=50, warn=False) print(f"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}") From e7c694af335b710e2c22f98bad132980f84aec88 Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:29:59 +0100 Subject: [PATCH 4/9] Update test_env.py --- tests/test_env.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/tests/test_env.py b/tests/test_env.py index b496e1b..5f5164d 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -1,19 +1,17 @@ """Test the environment.""" -import gym +import gymnasium as gym import numpy as np -import pytest -from gym.utils.env_checker import check_env +from gymnasium.utils.env_checker import check_env -import voxelgym2D +from voxelgym2D.envs import VoxelGymOneStep def test_onsestep(): """test onestep env""" env = gym.make("voxelgym2D:onestep-v0") - check_env(env) - _ = env.reset() - _, i = env.reset(return_info=True) + check_env(env.unwrapped, skip_render_check=True) + _, i = env.reset(seed=1234) # assert i is a dict assert isinstance(i, dict) @@ -22,7 +20,19 @@ def test_onsestep(): env = gym.make( "voxelgym2D:onestep-v0", discrete_actions=False, - inference_mode=True, - multi_output=True, ) - check_env(env) + check_env(env.unwrapped, skip_render_check=True) + + +def test_action_to_bins(): + """Test the action to bins function""" + assert VoxelGymOneStep.action_to_bins(np.array([-1])) == 0 + assert VoxelGymOneStep.action_to_bins(np.array([-0.75])) == 1 + assert VoxelGymOneStep.action_to_bins(np.array([-0.5])) == 2 + assert VoxelGymOneStep.action_to_bins(np.array([-0.25])) == 3 + assert VoxelGymOneStep.action_to_bins(np.array([0])) == 4 + + assert VoxelGymOneStep.action_to_bins(np.array([0.25])) == 5 + assert VoxelGymOneStep.action_to_bins(np.array([0.5])) == 6 + assert VoxelGymOneStep.action_to_bins(np.array([0.75])) == 7 + assert VoxelGymOneStep.action_to_bins(np.array([1])) == 7 From 57b0a82ea3967d8fe4e3749dddfab10c9fa6dfdc Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:30:09 +0100 Subject: [PATCH 5/9] version bumped --- voxelgym2D/version.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 voxelgym2D/version.txt diff --git a/voxelgym2D/version.txt b/voxelgym2D/version.txt new file mode 100644 index 0000000..9325c3c --- /dev/null +++ b/voxelgym2D/version.txt @@ -0,0 +1 @@ +0.3.0 \ No newline at end of file From 1b08d078bb1a3a9d584d4ed4b1e4d84d522e0e3f Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:30:53 +0100 Subject: [PATCH 6/9] requirements updated to have SB3 2.0 or later and gymnasium --- .github/workflows/github-actions.yml | 9 ++++---- .github/workflows/publish-pypi.yml | 12 ++-------- LICENSE | 2 +- setup.cfg | 7 +++++- setup.py | 34 ++++++++++++++++++---------- tox.ini | 17 +++++++++----- 6 files changed, 47 insertions(+), 34 deletions(-) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index 60588f1..ed37e39 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -1,4 +1,4 @@ -name: CI +name: Test on: push: @@ -17,7 +17,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -26,7 +26,8 @@ jobs: - name: Install dependencies run: | - pip install setuptools==65.5.0 + python -m pip install --upgrade pip + pip install setuptools wheel twine pip install -e .[dev] - name: Pylint @@ -43,7 +44,7 @@ jobs: - name: Pytest run: | - coverage run -m pytest tests/ + coverage run --source voxelgym2D -m pytest tests/ coverage report - name: Build docs diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 77558a5..091d610 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -10,7 +10,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: @@ -20,19 +20,11 @@ jobs: python -m pip install --upgrade pip pip install setuptools wheel twine - - name: Build and publish to Test PyPI - env: - TWINE_USERNAME: ${{ secrets.TEST_PYPI_USERNAME }} - TWINE_PASSWORD: ${{ secrets.TEST_PYPI_PASSWORD }} - run: | - python setup.py sdist bdist_wheel - twine check dist/* - twine upload --repository testpypi dist/* - - name: Build and publish to PyPI env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | python setup.py sdist bdist_wheel + twine check dist/* twine upload dist/* diff --git a/LICENSE b/LICENSE index 72b2e54..7893339 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Hari +Copyright (c) 2023 Harisankar Babu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/setup.cfg b/setup.cfg index 53d2dd6..e76d18d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,6 +20,11 @@ source = voxelgym2D [coverage:report] show_missing = True +exclude_also = + def __repr__ + def __str__ + def __lt__ + def __eq__ [pylint] disable = missing-docstring, @@ -32,7 +37,7 @@ disable = missing-docstring, too-many-arguments, too-many-branches, # many functions will naturally have unused arguments. - unused-argument + unused-argument, [pylint.FORMAT] max-line-length = 120 diff --git a/setup.py b/setup.py index b25b624..1ddcd93 100644 --- a/setup.py +++ b/setup.py @@ -1,12 +1,18 @@ +import os + from setuptools import find_packages, setup +# read the version from version.txt +with open(os.path.join("voxelgym2D", "version.txt"), encoding="utf-8") as file_handler: + __version__ = file_handler.read().strip() + setup( name="voxelgym2D", - version="0.2", + version=__version__, description="Gym environment for 2D grid path planning", author="Harisankar Babu", author_email="harisankar995@gmail.com", - keywords="reinforcement-learning machine-learning gym openai python data-science", + keywords=["reinforcement-learning", "machine-learning", "gym", "openai", "python", "gymnasium"], license="MIT", url="https://github.com/harisankar95/voxelgym2D.git", classifiers=[ @@ -18,17 +24,16 @@ "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], - packages=find_packages(), + packages=[package for package in find_packages() if package.startswith("voxelgym2D")], package_data={ - "voxelgym2D": ["envs/maps/*.npy"], + "voxelgym2D": ["envs/maps/*.npy", "version.txt"], }, install_requires=[ - # sb3 support for gym 0.21 - "gym==0.21", + "gymnasium", "numpy", "scikit-image", "opencv-python", - "pathfinding==1.0.1", + "pathfinding>=1.0.4", # rendering "matplotlib", ], @@ -43,11 +48,16 @@ "tox", "sphinx", "sphinx_rtd_theme", + "recommonmark", + "nbsphinx", + "sphinx-autodoc-typehints", + "sphinx-copybutton", + "sphinx-prompt", + "sphinx-notfound-page", + "sphinx-version-warning", + "sphinx-autodoc-annotation", ], - "sb3": [ - "stable-baselines3[extra]==1.6.2", - "sb3-contrib==1.6.2", - ], + "sb3": ["stable-baselines3[extra]>=2.0.0", "sb3-contrib>=2.0.0", "rl_zoo3>=2.0.0"], }, - python_requires=">=3.7", + python_requires=">=3.8", ) diff --git a/tox.ini b/tox.ini index bf296a4..ebb3a90 100644 --- a/tox.ini +++ b/tox.ini @@ -1,21 +1,26 @@ [tox] -requires = tox>=4.0.0 -envlist = py38, linter, formatter +requires = tox>=4.5.0 +envlist = clean, py38, linter, formatter [testenv] description = Run tests with pytest under {basepython} +use_develop = true deps = - setuptools==65.5.0 coverage pytest + pytest-cov commands = - coverage run -m pytest tests/ - coverage report + pytest --cov-report term-missing --cov-config=setup.cfg --cov=voxelgym2D --cov-append tests/ + +[testenv:clean] +deps = coverage +skip_install = true +commands = coverage erase [testenv:linter] description = Run pylint to check code quality and mypy to check type hints +use_develop = true deps = - setuptools==65.5.0 pylint mypy commands = From d72c441e486b3a9b45a2f9672367756b864556b8 Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:34:49 +0100 Subject: [PATCH 7/9] Update README.md --- README.md | 50 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index f760489..912a017 100644 --- a/README.md +++ b/README.md @@ -1,37 +1,39 @@ # voxelgym2D + A gym environment for voxel/grid based reinforcement learning for path planning.
- - + +
- -
+ + - +
## Results with [SB3](https://github.com/DLR-RM/stable-baselines3) (v1.6.2) : PPO :smile: + Here are the results of training a PPO agent on the `onestep-v0` using the example [here](examples/train_ppo.py). Below you will find the episode reward and episode length over steps during training. As the agent learns, the episode reward increases and the episode length reduces are the agent learns to identify the goal and reach it in the shortest possible path.
- - + +
- -
+ + - +
- ## Installation + ```bash pip install git+https://github.com/harisankar95/voxelgym2D.git ``` @@ -43,7 +45,9 @@ pip install voxelgym2D ``` ## Development + To install the package in development mode, run the following command in the root directory of the repository: + ```bash git clone https://github.com/harisankar95/voxelgym2D.git ~/path/to/repo cd ~/path/to/repo @@ -54,41 +58,53 @@ pip install -e .[dev,sb3] ``` ## Usage + ```python import voxelgym2D -import gym +import gymnasium as gym env = gym.make("voxelgym2D:onestep-v0") -env.reset() +observation, info = env.reset(seed=123456) env.render() ``` ## Examples + The examples can be found [here](examples). ## License + This project is licensed under the terms of the [MIT license](LICENSE). ## Documentation + The documentation can be found [here](https://harisankar95.github.io/voxelgym2D/). ## Changelog + ### 0.1.0 + - Initial release of voxelgym 2D environments tested with stable_baselines 3 (v1.6.2) and python 3.8 + ### 0.2.0 + - Available on PyPI, sphinx documentation in work +### 0.3.0 + +- Migration to gymnasium +- Agent can now be reset to multiple start positions + ## TODO + - [x] Add 2D environments -- [ ] Test with gym 0.26.2 - [ ] Add documentation -## Known issues -- [ ] Currently only supports gym==0.21.0 :neutral_face:, hence setuptools==65.5.0 is required to install gym. - ## Contributing + Contributions are welcome! Please open an issue or a pull request. ## References + - [OpenAI Gym](https://arxiv.org/abs/1606.01540) - [Stable Baselines 3](http://jmlr.org/papers/v22/20-1364.html) From 7689fa9800a5ff1507d2ca472ed0e2590cfa9ff6 Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:38:13 +0100 Subject: [PATCH 8/9] update readme --- .github/workflows/{github-actions.yml => test-main.yml} | 0 README.md | 5 +++++ 2 files changed, 5 insertions(+) rename .github/workflows/{github-actions.yml => test-main.yml} (100%) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/test-main.yml similarity index 100% rename from .github/workflows/github-actions.yml rename to .github/workflows/test-main.yml diff --git a/README.md b/README.md index 912a017..5cb030f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # voxelgym2D +[![MIT License](https://img.shields.io/github/license/harisankar95/voxelgym2D)](LICENSE) +[![PyPI](https://img.shields.io/pypi/v/voxelgym2D)](https://pypi.org/project/voxelgym2D/) +[![Pipeline](https://github.com/harisankar95/voxelgym2D/actions/workflows/test-main.yml/badge.svg?branch=main)](https://github.com/harisankar95/voxelgym2D/actions/workflows/test-main.yml) +[![codestyle](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + A gym environment for voxel/grid based reinforcement learning for path planning.
From 5aac0045623c843146394b3cff1ef08c63b55628 Mon Sep 17 00:00:00 2001 From: Hari Date: Fri, 29 Dec 2023 16:51:44 +0100 Subject: [PATCH 9/9] updated sphinx docs --- docs/INSTALL.md | 28 +++++ docs/INTRO.md | 16 +++ docs/USAGE.md | 21 ++++ docs/_static/custom.css | 77 ++++++++++++ docs/_templates/versions.html | 27 +++++ docs/conf.py | 212 ++++++++++++++++++++++++++++++---- docs/index.rst | 6 +- docs/voxelgym2D.envs.rst | 13 +-- docs/voxelgym2D.rst | 14 +-- 9 files changed, 372 insertions(+), 42 deletions(-) create mode 100644 docs/INSTALL.md create mode 100644 docs/INTRO.md create mode 100644 docs/USAGE.md create mode 100644 docs/_static/custom.css create mode 100644 docs/_templates/versions.html diff --git a/docs/INSTALL.md b/docs/INSTALL.md new file mode 100644 index 0000000..ff4a75e --- /dev/null +++ b/docs/INSTALL.md @@ -0,0 +1,28 @@ +# Installation + +## PyPI + +Install the package from PyPI using pip: + +```bash +pip install voxelgym2D +``` + +## GitHub + +```bash +pip install git+https://github.com/harisankar95/voxelgym2D.git +``` + +## For development purpose use editable mode + +To install the package in development mode, run the following command in the root directory of the repository: + +```bash +git clone https://github.com/harisankar95/voxelgym2D.git ~/path/to/repo +cd ~/path/to/repo +pip install -e .[dev] + +# to aditionally install stable_baselines 3 and pytorch (optional) +pip install -e .[dev,sb3] +``` diff --git a/docs/INTRO.md b/docs/INTRO.md new file mode 100644 index 0000000..a1bc086 --- /dev/null +++ b/docs/INTRO.md @@ -0,0 +1,16 @@ +# Voxelgym2D + +A gym environment for voxel/grid based reinforcement learning for path planning. + +
+ + + + + +
+ + + +
+
diff --git a/docs/USAGE.md b/docs/USAGE.md new file mode 100644 index 0000000..d05046b --- /dev/null +++ b/docs/USAGE.md @@ -0,0 +1,21 @@ +# Examples + +For usage examples with detailed descriptions take a look at the [examples](https://github.com/harisankar95/voxelgym2D/tree/main/examples/) folder. + +## Basic usage + +```python +import gymnasium as gym + +env = gym.make("voxelgym2D:onestep-v0") +observation, info = env.reset(seed=123456) + +done = False +while not done: + action = env.action_space.sample() # agent policy that uses the observation and info + observation, reward, terminated, truncated, info = env.step(action) + + done = terminated or truncated + env.render() + +env.close() diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 0000000..ca6b99e --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,77 @@ +/* Based on Stable Baselines 3 theme +* https://github.com/DLR-RM/stable-baselines3/ +* */ +:root { + --main-bg-color: #B6C8DB; + --link-color: #6DB59F; +} + +/* Header fonts y */ +h1, +h2, +.rst-content .toctree-wrapper p.caption, +h3, +h4, +h5, +h6, +legend, +p.caption { + font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; +} + + +/* Docs background */ +.wy-side-nav-search { + background-color: var(--main-bg-color); +} + +/* Mobile version */ +.wy-nav-top { + background-color: var(--main-bg-color); +} + +/* Change link colors (except for the menu) */ +a { + color: var(--link-color); +} + +a:hover { + color: #798EA9; +} + +.wy-menu a { + color: #b3b3b3; +} + +.wy-menu a:hover { + color: #b3b3b3; +} + +a.icon.icon-home { + color: #b3b3b3; +} + +.version { + color: var(--link-color) !important; +} + + +/* Make code blocks have a background */ +.codeblock, +pre.literal-block, +.rst-content .literal-block, +.rst-content pre.literal-block, +div[class^='highlight'] { + background: #FFFFFF; + ; +} + +/* Change style of types in the docstrings .rst-content .field-list */ +.field-list .xref.py.docutils, +.field-list code.docutils, +.field-list .docutils.literal.notranslate { + border: None; + padding-left: 0; + padding-right: 0; + color: #404040; +} \ No newline at end of file diff --git a/docs/_templates/versions.html b/docs/_templates/versions.html new file mode 100644 index 0000000..c49f844 --- /dev/null +++ b/docs/_templates/versions.html @@ -0,0 +1,27 @@ +{%- if current_version %} +
+ + Other Versions + v: {{ current_version.name }} + + +
+ {%- if versions.tags %} +
+
Tags
+ {%- for item in versions.tags %} +
{{ item.name }}
+ {%- endfor %} +
+ {%- endif %} + {%- if versions.branches %} +
+
Branches
+ {%- for item in versions.branches %} +
{{ item.name }}
+ {%- endfor %} +
+ {%- endif %} +
+
+{%- endif %} \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 5ae22e7..17dfafe 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -3,43 +3,209 @@ # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information import os import sys -sys.path.insert(0, os.path.abspath('..')) -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information +sys.path.insert(0, os.path.abspath("..")) + +# read the version from version.txt +with open(os.path.join("../voxelgym2D", "version.txt"), encoding="utf-8") as file_handler: + __version__ = file_handler.read().strip() + + +project = "Voxelgym2D" +copyright = "2023, Harisankar Babu" +author = "Harisankar Babu" +release = __version__ +version = __version__ -project = 'Voxelgym2D' -copyright = '2023, Harisankar Babu' -author = 'Harisankar Babu' -release = '0.1' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.autosummary', - 'sphinx.ext.githubpages', - 'sphinx.ext.inheritance_diagram', - ] + "sphinx.ext.autodoc", # for autodoc + "sphinx.ext.ifconfig", # for if statements + "sphinx.ext.autosummary", # for autosummary + "sphinx.ext.doctest", # for doctest + "sphinx.ext.todo", # for todo list + "sphinx.ext.viewcode", # for source code + "sphinx.ext.napoleon", # for google style docstrings + "sphinx.ext.githubpages", # for github pages + "sphinx.ext.inheritance_diagram", # for inheritance diagrams + "sphinx.ext.graphviz", # for graphviz + "sphinx.ext.mathjax", # for math + "sphinx_autodoc_typehints", # for type hints + "sphinx_autodoc_annotation", # for annotations + "sphinx_copybutton", # for copy button + "sphinx-prompt", # for prompt + "notfound.extension", # for 404 page + "versionwarning.extension", # for version warning + "recommonmark", # for markdown + "nbsphinx", # for notebooks +] -templates_path = ['_templates'] -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +templates_path = ["_templates"] +html_sidebars = { + "**": [ + "_templates/versions.html", + ], +} +source_suffix = [".rst", ".md"] + +# The master toctree document. +master_doc = "index" +language = "en" + +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'sphinx_rtd_theme' -html_static_path = ['_static'] +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] +html_css_files = ["custom.css"] +html_show_sourcelink = False +html_show_sphinx = False +html_copy_source = False +html_show_copyright = True +html_use_index = True +# html +html_theme_options = { + "canonical_url": "", + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": True, + "style_nav_header_background": "white", + # Toc options + "collapse_navigation": False, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, +} + +# generate autosummary even if no references +autosummary_generate = True +autosummary_imported_members = True + +# autodoc +autodoc_mock_imports = [] +autodoc_typehints = "description" +autodoc_inherit_docstrings = True +autodoc_preserve_defaults = True +autodoc_default_options = { + "members": True, + "member-order": "bysource", + "special-members": "__init__", + "undoc-members": True, + "private-members": True, + "exclude-members": "__weakref__", + "show-inheritance": True, + "inherited-members": True, + "ignore-module-all": True, +} + +# coverage +coverage_show_missing_items = True +coverage_skip_undoc_in_source = True + +# syntax highlighting +pygments_style = "sphinx" +highlight_language = "python3" + +# napoleon +napoleon_numpy_docstring = True + +# todo-section +todo_include_todos = False + +# inheritance diagrams +# smaller diagrams with rectangular nodes +inheritance_graph_attrs = { + "rankdir": "TB", + "size": '"6.0, 8.0"', + "fontsize": 12, + "ratio": "compress", + "bgcolor": "transparent", +} + +inheritance_node_attrs = { + "shape": "rect", + "fontsize": 12, + "color": "orange", + "style": "filled", + "fillcolor": "white", +} + +inheritance_edge_attrs = { + "arrowsize": 0.5, + "penwidth": 1.0, + "color": "orange", +} + +# graphviz +graphviz_output_format = "svg" +graphviz_dot_args = [ + "-Gbgcolor=transparent", + "-Nfontname=Helvetica", + "-Efontname=Helvetica", + "-Gfontname=Helvetica", + "-Gfontsize=12", + "-Nfontsize=12", + "-Efontsize=12", +] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "Voxelgym2D-doc" + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements: dict = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. +latex_documents = [ + (master_doc, "Voxelgym2D.tex", "Voxelgym2D Documentation", "Voxelgym2D Contributors", "manual"), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "Voxelgym2D", "Voxelgym2D Documentation", [author], 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. +texinfo_documents = [ + ( + master_doc, + "Voxelgym2D", + "Voxelgym2D Documentation", + author, + "Voxelgym2D", + "One line description of project.", + "Miscellaneous", + ), +] diff --git a/docs/index.rst b/docs/index.rst index d27606a..32607ea 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,9 +7,12 @@ Welcome to Voxelgym2D's documentation! ====================================== .. toctree:: - :maxdepth: 2 + :maxdepth: 3 :caption: Contents: + Voxelgym2D <./INTRO.md> + Installation <./INSTALL.md> + Usage <./USAGE.md> modules Indices and tables @@ -17,4 +20,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` -* :ref:`search` diff --git a/docs/voxelgym2D.envs.rst b/docs/voxelgym2D.envs.rst index ba3e2b2..c6d7985 100644 --- a/docs/voxelgym2D.envs.rst +++ b/docs/voxelgym2D.envs.rst @@ -1,6 +1,11 @@ voxelgym2D.envs package ======================= +.. automodule:: voxelgym2D.envs + :members: + :undoc-members: + :show-inheritance: + Submodules ---------- @@ -19,11 +24,3 @@ voxelgym2D.envs.env\_one\_step module :members: :undoc-members: :show-inheritance: - -Module contents ---------------- - -.. automodule:: voxelgym2D.envs - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/voxelgym2D.rst b/docs/voxelgym2D.rst index 412d1b3..8348128 100644 --- a/docs/voxelgym2D.rst +++ b/docs/voxelgym2D.rst @@ -1,6 +1,11 @@ voxelgym2D package ================== +.. automodule:: voxelgym2D + :members: + :undoc-members: + :show-inheritance: + Subpackages ----------- @@ -8,12 +13,3 @@ Subpackages :maxdepth: 4 voxelgym2D.envs - -Module contents ---------------- - -.. automodule:: voxelgym2D - :members: - :undoc-members: - :show-inheritance: - :inherited-members: