From 8ab7a90335366ae2351d1f446f21c8a3dccc501c Mon Sep 17 00:00:00 2001 From: Saksham Gupta Date: Mon, 12 Jun 2023 15:10:58 +0530 Subject: [PATCH] Inference-App merge (#181) * changes added for masking- flag option in compile_llama.sh * frontend added, not tested * added num_threads in OnnxBridge->LLAMA and updated ezpc-cli * num_threads added to ezpc-cli aswell * readme updated for ezpc-cli num_threads * ezpc-cli-spp script added, yes to test complete setup * Readme updated * onnxbridge bug fix in sytorchbackendrep * testing changes for mlinf- revert before merge * testing changes for mlinf- revert before merge * dealer.py bug fix [cause: frontend merge] * added chmod +x in ezpc scripts & added preprocess.py * testing changes for mlinf- revert before merge * testing changes for mlinf- revert before merge * fixed dealer bug & app.py code added * changes from mling repo, gpt-branch commit * minor bug fix in tensor.h * inference-app Readme updated & mask -> encrypt * sample image download link added * sample image download link added * reverting changes for mlinf branch switch --- OnnxBridge/LLAMA/compile_llama.sh | 18 +- OnnxBridge/LLAMA/sytorchBackendRep.py | 34 +- OnnxBridge/README.md | 11 +- inference-app/Assets/computation.png | Bin 0 -> 42981 bytes inference-app/Assets/onnxBridge.jpg | Bin 0 -> 12982 bytes inference-app/Assets/preprocess.py | 19 + inference-app/Inference-App.md | 23 + inference-app/README.md | 154 ++++++ inference-app/app.py | 311 ++++++++++++ inference-app/constants.py | 53 +++ inference-app/ezpc-cli-app.sh | 447 ++++++++++++++++++ inference-app/requirements.txt | 5 + sytorch/Toy example- multiple inference.md | 5 +- sytorch/Toy example- single inference.md | 5 +- sytorch/ext/llama/include/llama/input_prng.h | 1 + sytorch/ext/llama/src/llama/input_prng.cpp | 28 ++ sytorch/ezpc-cli-2.sh | 12 +- sytorch/ezpc-cli.sh | 12 +- sytorch/include/sytorch/backend/backend.h | 12 +- sytorch/include/sytorch/backend/cleartext.h | 68 ++- sytorch/include/sytorch/backend/llama_base.h | 102 ++-- .../include/sytorch/backend/llama_extended.h | 127 +++++ sytorch/include/sytorch/graph.h | 9 + sytorch/include/sytorch/layers/layers.h | 296 +++++++++++- sytorch/include/sytorch/module.h | 249 +++++++--- sytorch/include/sytorch/tensor.h | 326 ++++--------- sytorch/include/sytorch/utils.h | 70 +++ sytorch/scripts/app.py | 34 ++ sytorch/scripts/dealer.py | 10 + sytorch/src/sytorch/backend/cleartext.cpp | 178 ++++++- 30 files changed, 2191 insertions(+), 428 deletions(-) create mode 100644 inference-app/Assets/computation.png create mode 100644 inference-app/Assets/onnxBridge.jpg create mode 100644 inference-app/Assets/preprocess.py create mode 100644 inference-app/Inference-App.md create mode 100644 inference-app/README.md create mode 100644 inference-app/app.py create mode 100644 inference-app/constants.py create mode 100644 inference-app/ezpc-cli-app.sh create mode 100644 inference-app/requirements.txt create mode 100644 sytorch/scripts/app.py diff --git a/OnnxBridge/LLAMA/compile_llama.sh b/OnnxBridge/LLAMA/compile_llama.sh index dc5d3a56..38fb6271 100755 --- a/OnnxBridge/LLAMA/compile_llama.sh +++ b/OnnxBridge/LLAMA/compile_llama.sh @@ -4,10 +4,23 @@ FSS_CPP_FILE=$1 EZPC_SRC_PATH=$(dirname $0) if [ ! -e "$FSS_CPP_FILE" ]; then - echo "Please specify file name of the generated .cpp file using CompileONNXGraph.py"; + echo "Please specify file name of the generated .cpp file using OnnxBridge"; exit; fi +if [ ! -z "$2" ] && [ "$2" != "-Do_Masking" ]; then + echo "Invalid 2nd argument" + echo "Please specify -Do_Masking as the 2nd argument if you want to enable masking for frontend"; + exit; +fi + +# Check if 2nd argument is provided +if [ ! -z "$2" ] && [ "$2" = "-Do_Masking" ]; then + mask_flag="add_definitions(-DDo_Masking)"; +else + mask_flag=""; +fi + BINARY_NAME=$(basename $FSS_CPP_FILE .cpp) DIR="$(dirname "${FSS_CPP_FILE}")" @@ -38,6 +51,7 @@ PUBLIC \$ \$ ) +$mask_flag target_link_libraries ($BINARY_NAME Eigen3::Eigen Threads::Threads LLAMA cryptoTools) " > CMakeLists.txt @@ -46,7 +60,7 @@ make -j4 rm -rf ../$BINARY_NAME mv $BINARY_NAME ../$DIR cd .. -rm -rf build_dir +# rm -rf build_dir diff --git a/OnnxBridge/LLAMA/sytorchBackendRep.py b/OnnxBridge/LLAMA/sytorchBackendRep.py index 5e1442fc..36ae78e9 100644 --- a/OnnxBridge/LLAMA/sytorchBackendRep.py +++ b/OnnxBridge/LLAMA/sytorchBackendRep.py @@ -122,7 +122,7 @@ def cleartext_post(code_list, program, scale, mode, indent): if (party == 0) {'{'} Net net; net.init(scale); - std::string weights_file = __argv[3]; + std::string weights_file = __argv[2]; net.load(weights_file); Tensor input({'{'}{iterate_list([n]+ dims +[c])}{'}'}); input.input_nchw(scale); @@ -160,6 +160,30 @@ def llama_post(code_list, program, scale, mode, bitlength, indent): int party = atoi(__argv[1]); std::string ip = "127.0.0.1"; + int nt=4; + std::string weights_file = ""; + + if(party == 0){'{'} + weights_file = __argv[2]; + {'}'} + else if(party == DEALER){'{'} + if(__argc > 2){'{'} + nt = atoi(__argv[2]); + {'}'} + {'}'} + else if(party == SERVER){'{'} + weights_file = __argv[2]; + if(__argc > 3){'{'} + nt = atoi(__argv[3]); + {'}'} + {'}'} + else if(party == CLIENT){'{'} + ip = __argv[2]; + if(__argc > 3){'{'} + nt = atoi(__argv[3]); + {'}'} + {'}'} + using LlamaVersion = LlamaExtended; LlamaVersion *llama = new LlamaVersion(); @@ -170,7 +194,6 @@ def llama_post(code_list, program, scale, mode, bitlength, indent): if (party == 0) {'{'} Net net; net.init(scale); - std::string weights_file = __argv[3]; net.load(weights_file); Tensor input({'{'}{iterate_list([n]+ dims +[c])}{'}'}); input.input_nchw(scale); @@ -184,10 +207,8 @@ def llama_post(code_list, program, scale, mode, bitlength, indent): LlamaConfig::party = party; LlamaConfig::stochasticT = true; LlamaConfig::stochasticRT = true; - LlamaConfig::num_threads = 4; - if(__argc > 2){'{'} - ip = __argv[2]; - {'}'} + LlamaConfig::num_threads = nt; + llama->init(ip, true); Net net; @@ -195,7 +216,6 @@ def llama_post(code_list, program, scale, mode, bitlength, indent): net.setBackend(llama); net.optimize(); if(party == SERVER){'{'} - std::string weights_file = __argv[3]; net.load(weights_file); {'}'} else if(party == DEALER){'{'} diff --git a/OnnxBridge/README.md b/OnnxBridge/README.md index 945423c0..57fb7964 100644 --- a/OnnxBridge/README.md +++ b/OnnxBridge/README.md @@ -45,7 +45,8 @@ Secfloat/compile_secfloat.sh "/path/to/file.cpp" ``` ```bash # for LLAMA / CLEARTEXT_LLAMA -LLAMA/compile_llama.sh "/path/to/file.cpp" +LLAMA/compile_llama.sh "/path/to/file.cpp" [-Do_Masking] +# `-Do_Masking` is an optional argument if we are compiling for Frontend,this helps generate masks.dat file ``` --- ## Inference with each backend: @@ -84,11 +85,11 @@ python3 main.py --path "/path/to/onnx-file" --generate "code" --backend LLAMA -- LLAMA/compile_llama.sh "/path/to/file.cpp" # generate LLAMA keys on client and server machines -./ 1 +./ 1 # start inference on server and client machines -./ 2 // Server -./ 3 < // Client +./ 2 // Server +./ 3 < // Client ``` #### **LLAMA Cleartext** @@ -98,7 +99,7 @@ cd OnnxBridge python3 main.py --path "/path/to/onnx-file" --generate "executable" --backend CLEARTEXT_LLAMA --scale scale --bitlength bitlength # start inference -./ 0 127.0.0.1 < +./ 0 < ``` diff --git a/inference-app/Assets/computation.png b/inference-app/Assets/computation.png new file mode 100644 index 0000000000000000000000000000000000000000..f731621fd70d035c90abfa608d2484fd1d3e1312 GIT binary patch literal 42981 zcmb4q2{@E%`1hcWQkD}^WIH8AmJ-S~#gXh}3z^{{*~xCqOp7RqQYb=pW-!*Vj$wOxb05=W@jDd9T&a0Q10C%v(rG_`wEcYh z&)4^loV$?%Ui++Z^fhm$kd)JtqlY48s?6)T+|A6|gr+Wci!h^cWMi$Y&|=6wejwt0 zfiCd!t^H8q7GnC<{fO}Jh;a4Etbtvpn^0Fk7!XLNhL3vv-&bEQbm;#3;ibjd2jZ8a%BE%y0Sj{UU_s1E$h&>ytsM@Nyc%LHAL(2lK*Mxk zgh6vT$h}}LX#3tquL?Lo(ry#K-Yr2@w5EW`EuYys`9f!ZUrsP|U?cn)M_8m)w2uE=3R?^;cuefG%@8=0eW) zXL&!`WF2E-0HFc-hm#(!Kp$`s1U1-y;#7o|@Zb+(WhIG$*Y@TxoJiehbTa zGX*@j?B53;rT=^?$l5hjiGDL+D7Rqj%$&MdKLg3VN6}+4Y*MF817Pf_7Yf^;N2Rer zf*{cO5jJ@r5oo!elHT$ega0Lpx#npcJyeygHKHvUE!9233o_&VcMaT96xJ8ds05GN zIqclSQRap!Lye0gOw%r{M+MA9IqloZ9}fMC!XO5Mb2oKHL8p9509{rm91PNJ9-tjz zF3p)Y!kO&4Iu$8wH!h*f5hr#>8%a1O33iVimAZ}g*z>T}z9A2~`rT#4ogCE3&rt_# z?>!%1NBTvcMsAR5dp2UjI45wjWn%7&03rhP=xW}~|7iOMq5{{Iq=Rumpc~zP0>%Se zU#|U64s>cx3Q&YOZ|L2mgeIJGR6=5(Kq*$Q+pS<@HBM`sl$e z=MRzg8nwWNX@sc2r&LYJX7mO4xMbN6_`+mA;H zqZ3n6n|`AxyH>w}mFcnRVfQ)wc2{vs;=RLetT{pCcqT=gnbV*;(*^kr`eODt<_ zoan#Kochz;mr!1lkdUx_8U$*K{MRD;Mn^{{ELR#jVGaB1$w)t5L8)2-ZY3;yp)h?C zx=)8XM?}=8?=EflllNNzE9zkallDHaQ&(}D;s`RHrU7dY61N+NMoL^%2 zO6czH9D}mYMsk7y{mm%OAN$n{SnCrV&dggqa&1)2z^1X>a?V~ovPQ9@We9`c8#1Le zEmhQfLn+{up{QC~u9lVfJiA|!$Jk~nb2Et_UmAacnW z8#MSAtOvSZhx@Iv*U`#K#r#kNJpAIn=!G-l2Jtn0nH52bB6UM(Yjufx5nQ0#ng<`^ov=#EysL=?J2v+nqXgtI!6KNG#8*9-8gToV$tWU!KrF7j=tg zW|ZObI|ZcmmY0CFbqs9KEk)s@sJGbDdEycK{?j8z9E7jg9RK>BZCW63!<%@>+)0BHO?=)lp8Ro_rr3kp8Zy)|@SICk*K zx0D3Qx^}h&a$s3I2e#*WLP)6Q<-f@GLCF6VAsm=HaQ!bJ_WuXW_-_xn z_n$LP?aE4kqOx*Qx{P89nFwSbsvKZY{}r19sqPRvacI>X4+fktHU%);^H$EznUelK z)e|AJcfGy6TSOdf9GsW9ko{AQ2k7Cv*ujz={V_c39xotO+uv`IsSCr7e$NmkKe;rt z8nWLC9~+zW=>=j4;2NI+>s7seAXsAlxe>)f{bi-=DG*esr6lS_l@~!d)7skYn3z(Y z#vdTpp@XD)ir2U#(8sSfYI9C_vA7|wGoHgN7e8&9ru+(s#HzsukD7U-4~M&C#p~*G zw(_ytEDl;EhXLTs7k>lmE+Z(iC_0~f^=hP7R4i-3l%sIY{IY$yqk$b9fcF0e@;~_K zeLYE0-u`xBb?IZmi?Tk+_z-qDYLN$Y{?NhftnZXK94*(fu_-BzJ!xQV2d9@|9)XeV zPa-b@+C7L3Kd;K=ww5%E6YbTGi{H}cg0d}!#DJn`=RgG4Sds4{KWu4;uhpKh(H}MJ zMjeC&)aR^>_393+L;v9SA}!`{IF23~A-woT?06P0=*z!BsnhBrIvVeQnTw51)&ctv25ZlVW{X-emF3{jWcwvLq!*QaLCV=n>^W zZNqW;IBs0WiCihiyz^&a;j8!ObwF*EfYgt?Z&GfLejIiD@I+Vc)0e;E`guUdG8=x8f4{hOAc8R;o$m%J9Pcr4gN^(RWB>aQK|o77^<8D5kC~6z8Z;(m(|n^ zAH+n-S+3#KJptPU3}YKTR+bB_ve{W+qBqW$+FX8i>e+{bRZjH){h75j*$CIRqjR~6 zAJjcpAXvCGcG~3YNgq><=|a!wupdKMLqwT@)bVD$M?(k{o*#9=ak8b z#Y}Mtf70$wZ1m5o%M&=F<#RctHc%7E3sWF=y?Wd2f=;H}cN7#ZC8k?cJO*XN{+Rb4 z8KZX-;u4CT!mBl>q-|{OmJT5Q_GSMLTAk`Svr?6(Zd9S#QXCEGE3B_4M0aj7p6yu0 zY{o_Zy1cl!cu*QoXz%0q2XX-1d_!b$)cAe$A^8!W{9@|GESX`lpurs1M5?n;Q7_G$ zW^e1{Q7AgGow?#is~uz6D`q<0SG&(2ipXR)ohSbEUs$4T(v;B(FVhQrVZtAdFpYre z&{1|{BO@Xz?U8Fwe^+X5Z81KZo+llHu5mZ7wzA7)^e7-#7T`ml9K51x=Dy30@I-a| zh`OOckN>JlrvH?JZ5Q?UF?C^Ow7c=5X(HoO>NT|Mf!BKbTOw;x-oo+5Gte}ltEf4OD4zG}02 z9}2}lPwjTInhrl(r?J45x7_YRtWKrc`wl6!LIR=N?zhLz>^}cfH*CD{S5DLFk0pM? z0lRzmmU+Ktm_}UtG-h>2t8z^GAN-ogv%^|kLRe#>hJbRzPwwW;er-0H7c`u#2aCDe z+@!U-11Y4(M$7DvNI_+xT!0JR&InXDZi6 zmzGm3TPJ5RnPLJ&d2nR-8Cs<@d|rS(;U!gDf$guX>vBuLHY)msSsOpjVtNf!5n`4j z%Kx!2RKV`cKh9?W%tSzP4DIOB!hBM_<~L{Ai&3whis9LZ83}l8vWenw$X{}(_^fED zu>6B>%`<1WqIjz($4XkXdw}`jc2etllb-3=yn$56ksOe+O zFtRT~IVaC|FYwx6_)^$s;*Wi&BxL`*b;{gn=^pUeM+>)dA-Xa3wYB3LsfxHo)pn^< z(u=OEiE0B|_VmpOm0#|#ep*5l;;HhYjhWA?6|huw}HODLETo1i_Ii$NU}%61f8voas%CsBawB}T@+nql?(OR z-2u&;i_J7Vc^^DR993H&e*AK)A^C(4>hIrFilCf8d+TO*?p-9}sir3DZNiweNp&lk z>@XbtHDtR>c&1g!N%!kM%iX#zye?PeIB%C>ms>??$u-o9RQYUZ`neG%169!46P+R~ z)XIMpt~wLY{nO#SGoRRowNM%ywyR@q+WK z9F@@xHLLgwE5Em7%>GRmboSmZc4twq=$|Z2t)HDOEy}UQDKb3C&2YI$q3D24| z2;Uczs!gfvd)9OK=WF#8e*u+3ovFQX&79gC#2yz^TRa|9I_J6B85G{&}u~r%AMC?Pi(-6!|$?+t%dF zNmg!T=(&oY3#pNvGqP9jU8wqp-4!P2INW+3zyJ4e2r3^B18=G+F7SW&tSLM5T$O1~ z5X66q`VuXh{m$tPhh>_4H^e_Art|aA(RZEN0>rbLU?~#rAEmX66Qmj3C>W~U()~oa zCI>M)2cgRFU-@&T_?Tdy+mM<0G~DPL1u1;?&+ytj)u=Er%tl_?wctt>zT`Z~%VtNj zZu|&=ZE~yBS^CJFvDDtR>fQ8?k(RvS6iH`GYU|u6`TW{Su;3|bF^B$fE|M(9x3d@{ z*fwMHb9#ULW}rL9N=Ovm-IYTDHG!|1UV}J*9bYwPE-&l@#NM^_aLzyVqu{7(e!;>0 zjEmOCzthF@#6I;t3->U}r@wmDr4Syy4(NB&VxYrdvzwYEa z8F97M^P%Utsh(9oar!=%>Yb?b04?HziLiPtwJUI(p7eJXo$hEEd0cwP!lt3(cLrMH z5j2?~ru#}CCbE{_nD%?f^yP{u?sk^MkE2g=v2K?()=qNYzFfCpS4U>CqBAuErE;Zi zUsQiJ_(BuFE^ki*i0Uo6ymVU2Nn0m0`Y62RUg++b!viyMkIyo3U~=){x4(W0*Nsh+ zB(V2gee-E&uKjYDXVM5F)rJwK>2+h~kbUVa$_`As^VY0ix`tzj*z|}cro6eZXELd_ z$Mn3I95eruq@^8EeYVvBuEMwZByUe)wXyJ5u_-cculU+Emt@-p=M#(U7JdMoJb_;| zlv{l&4KB;~8m(3}te+uCxeV{9BmMU_I+81cj;n~aUCnd-1mGOjw{IrSbl20L`0*!jNkNyLPE^cSYs8!oMloGx1G5p^hs6S`R0cgc=lOKBpY?X#FHDtd?e^%s z)j-fbh08Df^k_+POjI;#^&P1~_fSlZ&XliSrJC6}WM)TFkqlWb0J9&1?I86%LEw@+*RZqy`2~C8Yss7et1Qme?Sv3eXmq*=)BbM7>wMqIqA8|&(?`E15=`{< z*d+`W^(+-Z^yt0)6-Vlr(hNTlRY^!XtsEyKor3dDjIQ%%-ieu(3fK|`y{+wAZ#`vc z{bQB3w2op;8gdwCnpV8}eub%*K~~b_m)O;nN%$-HyvA@@mk%Ead@@-nR+u5Z!%ysd z$0D5}adkY^FLO5^ea+2}c!h4I*e#V4qkk1@vFsPBphxn~GG-cZio4DXXH!4=ONTDg z^VuDo1hGq-{>CZ#)o>oMJc6t^7(w~4>Hjt4B%A(KHKOlcX0w|`WKJ_x&Uol3{#lT$ zSItvyr2i5UR-O}M{M0O#JYlnZX_30S?Ug~ou2%e&HzlVq{+R-7rdwg^s8_N>j zqNH|%Iobp+ld;`)ZIikkq`H@h+>_9VnD$#*Sz=IwRXz^W*4-MQ40i@Z5p|(D9XA)F zi2m5(=J~SMbh&tXD&W@eYTr;`bCDJNaRV>d#D?JXKb#k-mEq8-P988JKR)0InemX3g5=G?)0AN zXnR=&tc{RV+9px1cV*o|?F6fnQMJ@*d++Xih`G~Aexz944j93QpH;P70gKQ+4K@vW z?w9tiS5GZxOUw?hJ9DW{0J=+wh6{M#R?^p14U;pND;4CQr1DTLdq~GLlq<>mGpkNr zd3;NYOY77S$zmg;8AN*%%wSDLO|ZmkZ{;+nXZh{T0o?4a9V5eXH%YYr5+89&GCsbb z3?h5hug2$8I%Bez9MPJ^X3cFYy)`VB=*MmO!A9eeM3%y_JmzrV3+yQQ_v;;Pc2ikp z(+_3VPes43&2-B}_D8gg-)$}I%(;vV@dGB;9-3330U6A(YDrE?5qQ#N+HR@9H$jTZ z>Hk^+)Xl-R&mdxSbRr`2tD|bx;HJqrgn3@}c6^clp#NUvy1SwDsX_XSp!UOJ;{XkH zI>+{T&Y2rN7e{>WY9EHx_1g6~ABuKmm7>e*SWQ~a4G-S*ebCl~@X6$a>QqpvG$yT9 z+Jz%bj3P?`@Xg+2jS5#kon7ypgxf;m$FrGUcsD~iLk{!9@x}qpmwM?M?Y&jq@Sjx% zV}$ZayqjA?sj45fLv3#2C28H6kr^1B?UhvH}+$UvQ&gg>?%}+Xl;UgH;0=hr8Cv3u5vQG=b`A;!P|IsO6*< zO-bcrAef7ej!hJVlRor1QJnC@(k) zA*~Esd(|0u;t3tSrLn$9(?eX^KV5-(tAos5Gw{vXHg$N<$YK3CXhNx8p~@nEWYuOQ z3kc1A-QIQ&LmXj7{JHYj)>I#E9bA?CtdC3Q(LKH$?fO9Gqz)?)^ct$A%yeg5JeMSS zF$7g^Lbfk1CUuIu-t|$JULl;Jaa#wy8aoOOSqZAvL0^feqd8!hy1JAM6ACT_{~*C+ zKi}BRdFk1+LE3us3Csf&!WTL7g}MYsz-TR z1xQ>!Jw{4rug_%>z@ZQuclVfMAXgo$hdGwnKXm_$-?x4dfxKpaDv9`w?So3jRGzsX z{2{7A36c2b`O`?`ix%a0WU)^((uC65m?`964tZUjpTM4<&{AmIr^M_ zNo|(t;!!&2*4fiqt6$!nJ>XoZ7bGYU$FSp3VSh#bd}V;u&_GE^^9CkQXT;Y#Qyks~ zY@Rt&F3g`>-0P%qW$^4$ZeGtdN6YQAznM(2{7)5*ThCid*Dcq@4^ghen)2^+!}ZR5 zf`qhiKl*+L=E(1B2kcqYcnzE%;W?!Sk1M%&P=sP&EvLx;8#7y=Q%<-MyB-P-ZeNZ z|Luvr=vtkqR-t+IF=Rx915z49iO2BBz(z)lc&8LfRa8`%Ji%el4}*5{+%{4e+sR27Ofmpy^BX=$Nmbw`;kJ^~VxRvoS`xw;%qN+w>&o~#HherZb3s+Gm9Qq#JjxB( z7g>4RlOW}FF7M}x{OMo*mMh|Y)ShGewUCwSDMBe>q&$c$*#UQWum6Qf5GD)AXdh(e ztCB%zJ|(Ix&nZivWX$WF+8&2jvY@tckAE(-6m>ti(8B6mIK`*J*S)Bte9v0Q!lX}_ z5*~O5bE@LbYLKC1e`+kdgocaf1W9B1_2y~G6}NixUb0Fr@{JiMZ^8K{*Ea>qBL2z( zJ^vMRcc5M|`iz(|dz0-R+sHTF*@qJSM2{hMKFKDPe1qFMVL;%mkl#GiDIQ^?Tu9;8 z;*1G(MDYh&njiH3cB8?Z(!JW}1G1n~g?_Q@qV5Y*1WUxIQgWv8+Q>ZuN6EG{CP!3E z)H&GuMQYl+$fI{19Y6HFw{{av6}5M?rVzSJ8#_m+H^EbC%F_;_aHUl~ULa9UlZtT7 z_1b)-;F6@?5x7Ji6dDRIfqIIht5(zD3&O_;QZ(9rFXi3vBF zWR|rGXPvyVW%Uac5vf+?#`SXkTlvCV3SACnf&)vm)jGq$qj47seEI65MW0-_umlY? z-7e)lVzae72v3>iX)?O^e06{@HS|wUl4MQV?0t;h98nl+Eicp5+_bBAKy`K}1I!@j zP23-ud-E~o5Ge6D1(6u@yg4YStgYhd3<{-?sqUK=znzicsDa$m zac=Q=p>%Y(y3mwkR@?m?V3SJ5MyOds{IxEt^UJkkLV3$lPprJ-ULGS|vfU6plB9RTMsji8XnBo1D~Ab8=6MtNe8SRT~e@M zZ^rf9f$jYg+Az4ZDmWK??->ld^ZL+lRXRBEu6$$JMe%-%^n77nYa)vzL~8Cx_r*z; z>CTM)_F3}59l|U5xjVy-Flqi{qob0z(@{6R>b{05aPE|-C@F5DD$dC8d^sViT5=9b zTNmBn0(JJ@Zb+V-lD4>+$!-_PwmSj+6eK;X=6gT7GDp)ZyYGW0Ax+ukpQW*i8h(i} zB8x8J*OicJfA2&Gt|R0gKZ!Vs-qMQ=7i39Fj!mIZYkg7$c1n;!77nh^D9Ck>$=~Sk zxV7YNA;bD8;`Y{9J~vEo$GR(e_0fR#mzO4tw?5l$hjZ8(=O}td>67QD!Iy@Oh z#Hx&Ou+U8&Hcl`$ZEUR4UuoEEM+lTMulQSWPY%F4`EL}2t7hyqkZ7cXV0x#S>@sA^3M%;9bwJ7(HZ!*Umi z4TIHUJ6Z>7b^PM>;SD4rfw~kL8a7G7vHh7VF4zdN9 zJX3jLb&WunvFim>6V1cBXZx^Gn;)>@eNKw}6g{8;XJ4eUQ1$z0RQBYwQPx6Ow;UA? z;ma{=Y$v$GZi-Cu?rYUSsu7{m zAw2@xq4TdLy|4WfpL{iz{MBtgrBAFwS5=FT4qT)Oxm>9>+ampBUCMipTZV`GUOhT) zJ^OPW5d+}HV*ih@^)<&5jb)QAo`1R$^^^**LBG3lr zk45h93H`NL2Jof=qhrydyDH>l%^BM$cVtZ~N6lSXeC-shd3Bh&z||qhxi!-05)rvf5?UVBZbgyA z#8S7L_?RRr;;uDb&s^*4CpPR$vUJG}G2IcgXsZ~VQ;5PjD+pm8zEr@VgX5MFz3hI7$qx+{VbJ1D7n?~-3R^ta;LA1$VpFk zG47&-g1%BzLE5`_;{auPgaIm2=T|G!*j7?fVk{3^&ei2ZLGFaw)e<<}GN4-e-Z~h& z@5Ngl0*w#TWZa!=S4MNfvZmMDn*sKJJ7ZMWX3p_E-`P|ADH9jEb~N`c%Pa_CaQVaN zUMcPH5n>TRDxs5;JsrOGQ&#&##NT3nVdI$v@3iK0 zvF!qvVM;51NZZKmevKx1=jg)$P?ev#^*5!J^BDXcE}=7Sl+dB^E;ZfJ&a;py_bKD3 z55mgcUL1=)r!%(>2fE7e=SG&=>~ayK8S01|y^(Z~D{0`lMYzIDAhEg&tiom1$|wqF z&qCon*j-n2>fJBuhYhnG@&;S@cB8!;JXd*Ov?U}<$yaaOtm4!Gc~Np)TLSMP$jq7^ zBTcMOfDDD+yk2L{9Kue+S`=Ugoo=fjG&)~RoLj}r#U%@}h*9g;n}uI{Zy8rKOhuh@ z$)YXDkmUoDF;%Pixm(d4BZac`P{(>XoAlYiQ#-AQD|E7oI8m?VD@!`|^{PXjZ)eX7%?&F%&~ENk(!#e6C$S_3(2=QQexcRK51ah8yzq3 zZ?h4RK?^p55z92D(sm!;TKzl^wCz2gUv=ay^fA(c^`8keGWo>&XgFUsMje$LJz9KJ zShXri>YC5~-o}1q{=k`6KCC#;V4nn)rJ=Sfqf!+R7k}i=+g^W?4{PAmtHfA#=&O)b zF32dAx-=+pD!ezS-96%jS!Y4`!Xhqu7Vw^mgxQeU3(N5rW8spu0~ZuGvnG+kl=u~i zum*=&CgGiJAECf!gSBb(+~Gy5{v-VcExa#bgcOkFrl5SWD&d<+pdv8ckKx!TRr1EFjZHJ;y z5J-TU!Sjl2M|X9;U0K`ow0A6oByDn5ZWCp154qfV|DBPIvEvI&acRLhh4xjxc4rDP zzHe!#a6>!KaY$X@i)NoU+z616(f30aYK6tZF;|6ud^C@k8KtCGz5!JTcNZlv- zgJa11u@7=Y>}MExCtVT}jL-UHO*xOar^9K2v?)y;c}T+Wn-@VjMZqhg*X+toWv@eo z`gj1$G#xW?-DTXaQwoI~3|LYlC)=?q7FXp8Wh>D=QMzehGm!`MZMFOFU<+C4Q}I0T zx|b@}?5M-LstZxD5o?I$RRM(8k!_{&KG}1%<5jb&f;Lf+e(GHBb$zjY8EV8%oK(#} zw<4$3wo^q@Wp7`ql`~Bn|JC)i(99T{~(FC@L>WQ~@f3e@U;AZI;iB4zH(hlo}{i$ zwN9fK(X%4(kD#459{H;-JMiW?VWxA`&Bni4R7^K)KE}i}hSI|(90SMwkQ<|#cin>T zy-pZTc@rdU2L$q)b-RDR(|)^lIwyh;oDwW?jL+5|{Jpc8NFXe6=^Wi|v}5CB&@k8P0^a^}mQ;6IH@cpq)uMlAx4B={F-Cs%57pK1rKewsYy3eei zBvMEEJcfWmqx@)1bgUaE+8OU=(9kkQqz+`O5luHntFo`guH_0Vh(D>!Z6(3F$km>{ zN4CXT0qEpu%q&RnJkp_<9v)99)MoigNOb5HnR#p zuyEQ2zTFP(AiT^a(>3yy1e9Xsa%VP|r>Z(Fzj2?^{hcwY0`U&ywC4~v_m*`OGaMdd z!CfGC$Zjt7LIN-Qcc?YXDgkV@CvYi`ycr0WjT|$6^V-bh-I6Al^_j&mtZ}=OtdpY? zI}g@S@utd2x}ZPQg_-{1l9+QeVCC5EG8g1^-sd&>!OIWJpQ&-z&aSS0$zg!C zXoitH#Dv3`ldU|we*1eI>r(2$-$U@uX7QZ7OScJ$v!(@ssYISNVRmx z7;sHL_j#Ry7ks0kyDH}Fq{%%a!SO1Cp0bvv?@CMuhuVI%W_{BG``}zv8OKO zUCZMv=w#$x-1lC)^CSlPKn?B1^7n@2sGV4$2Cvxsn1vK>ST_jTY#X8!S{&LPzP6w` z=GJLulLPYFi3nEy()Et&nR8ElPHex3YfxyN<=g)4`CE#F_Of^OG<>FXui=MAAn8m8 zXI5xKli2-A0w)I7t?)S_65r#iGb!?9H-NaLOI_uEjEK0p?9x!NLsBi(+*{XgFgm#q zXyLyoAs`L#%55SA13N$&XGbhHur-T2cN?o&M|8<)AxQ19sNsHrH9(i~POwoF&<@3+ z*)bB3k)=v{A%Aiq-=#3i8|x-k_LAAlFEG0PM)hZEvil*5$||)P=IV@(nmOJoJ8#9D z4809eq&dID%b)(vOtBHN#%x)8!>=Y|;-QPTHG@CnS!<7s?GWaCE71^)(kEh!L<)nx z_zeFe!8D<~MJJZ^O#xk6S{1c98(rd{veTQc#O$rPKOm5c{}w+n47Z&mPY-+g1MCzk zobCffZoOAmza_a;>JYsMd$qLjYh0Sfxn9? zw>-sV2hd*FJKb5#nl0^YX$+yd;zLWzo$9^;4XOSw#Z<7@LuMl-V6U<;J*#}oz`)hD ze)o~!?C8!wtx1npuLcX+Fe;Umhbel7dKP$-9ePY#XaQcku9z_XW%1ENpcIUa2EG5o z_8*LJqr&aM7cO=L-t#6Q{xxv6Ke;Yzzoz&8-4CK&<{|kdA`fr5ec`4+H~UYr8QQJB zyxG@8PKLA~ngmv~J;=K+fgDE^h>d6*@x7y{zEnP9Yis54+ihoTe>`BHj@%<2yFL8Z zS&E*E2+A&@TtQXqi-YL4P$$(nz6g9Nby-x@aCF{?6MQI*QaMBB#&;XkJXP2#txbMZYx9Q74!vWql#ZducQB`8 zpo~$`w5FGK;q$XXw*-Ut_Yu~Oio3AICMCv~eVrn?($_D7r16CdDZ&j#`fd=f;SNDT z3L@%MWBqaF{9uwOM~yxvT@taov2i8P3+%=4%LF5{d6Nrer!ZD_)t-v_YCCu`9{gC0*m7k>SJ*Cx9at^u|eb&^*>??Kv^PdNqc;qyrzo? zwYCPU5(3x92vn`#2SMKhfU#iS;|Mh0vXVZb^tw%Zssvz*uz|a7Iec@WyXDlidF%zj zbLn-eeA$*UbKct9Gr9ABBz}3r^NO)tPV8*fwvteb&ZJiut@_u>1bC6}Grqh^c6NZK zpR{NKw)%~%HPBmZ=BWx{E-l$lOFpeD^#VTxezi|@Zhkdo=7+u1y;ousQt5+Ib%F|G{L>PW^0`w`T zRT$W~VzpZJz;3kGN&43p8T9B!9?R9!6zdAU`7}^5_*{scT%fOaeevGXX}sjw;h2&M zCjsm>Tj%xXJm%GC^i94U--c9(e=*MC4hchO;h_h+~wd%fv z@gdJ%KhjjAy^G`%PU4k7H4KA|@*$si(u^Vz#zvzow_!{gT_|6n=ao@XWTm&iH{&e` zfX-vnqUwfgz#h(SB8N>G)Z$r8i(`@Ll2gj5KdJJgUSX z9cX-#_6^>9@(eheSS_?dn3$~@*pOWvt8k?Huc+$~zJE~jzqNBb&{qYOL>@u~kspM~ zPOTE_$??qxTyZ=ba9wmlE9$r66i{2R;CTYOd4xC1vu(DyaF;@5mc;viyTm|2PPWb) zA2<7#FeuEM_(xSk>qs|Ci%^>s!F^t%KfxM;OFk7TzptacypP^0@w-(jnmP|px?^!i z?60We(R0_M2Ht+Ox4LhCFC)n!wZ{5k&MivBX^#F|=xS+6Ey>GZL$dI~VyFr#Mfxz? zqxesnNmHLG6Ua$rW#1JP^eY8(UF@nsjy(Y!P?UVMU1};q zdKxOZ2ApS`sG4<``sM5E>%YH;!y;GrgjNJ6DV_1JSJ-PqOT#3#5ol7u&u zr`R_}O7xm~-CNrd2VLCn@^C+{@H#a`iP;QgUsw=E4eTl__J7DBp) zFnV_)aU*hP=G*NCLgxFHHw&M@MYprDip!FE1(5efoy*U9Mh$$WF3kAOYt>Z&Ppw}> z`DAKFWCM;mVvlk1Um{?bGMFTn5p3&U%dS4om?8MIr8;*g_f6WY`jiX&$SRvDy|&ykCdZ#K7_Ec5oC9(^x^dX{H}34Tdu<~_X}rgaq5 zcDHmk-RIFl!DZU|<&&Ojy|hF^K!SnSigi?j2(Kly2<^65t4!R6E%~j7objC4J zBbFjai2ET$S1EeadrxPRsbzMU5>K`zc>$-UL;P+9YxzXoc?jm?`}B2Hvq>YIZ;J2ia!)p;Cw?rB7Iv5*vZeEhCz6mfT(GTPnR0FaARi!C1?AHVh|&j316 zA}FA}A;{%c2sf*gC8cW$(vL02?RMCYp&1$?-tmhn<`%_p!mE zAo7yk_UHt362aT{U}5!!e0_=)-fj3iz)ZO6pQBRMLZxoyN4urEu=XLoODayj{$l~S z#ZS(TH`VFXdqWchZ_i`jfW~y(q`gjHqu24ok0cfKu-2_(n7ZF$Ds@Rh>#$w;7+V6@ z7fmL1DoLn7y)Zj-WV&Bk8nY4Tuv$b?#eLxiCuO&T;4L1}gCDe1ssO*F&DveTN3Q>) zO-)quqjtZ-?j-hMqv;BmidDCk+cs6A@$4uGjw<2QHp&O@3jqH*HM8b$V8fvqKHpqo zvX?8gCb+FC;DZyUi4IO_b0@-fqyxq71wEdQn#R@k%L5(97q1Wfa0SjLtltsg z24-Ac!b8X+*28R6)zLHBM~m~^gC(USu%VN zq^1|;m&5-OrnFuZVEPDVds@4`vZxTfaLMRpylsikw(KV{QL(7GDB#R3KrLx?XR4#S zduj-(5x~iOth(>m*KtMj0cz4v-fk7o`DJ$-Sk>prHqgs6nqEHfhM@=@e(dJrnO$s z58_Rp28+kSW^7I@dfD1WmN>+}15S{>VLcvXrnO>hn~%R&c*qk(UAaZv-;(w9e^9qE z@f#b-(a&tYpF~c>e)?z#Ab#^#)(sd-hq<$p^jjOV-tu(R#eeG)(Zaxh4z~Kz6cQRAG}B1aQgHFjQkO%!|L|p zX}nyF@zr{)ld~y$bd)ezGoa%;ptG5%D|!A|4F5C2=%6PpOLA)=p?A`~HKHq80FErbwq9ERn5+~%~C z+59fw`*+{>{_9~~yLMgIK8N@F^?JUn0`Vdrk`$PPU z-w|gAI^&*q9?t`8_Xh!mLdM49Ww-XeobO*+zx0-rH&V8f;47up=@mm*vP|Zdz?NHU z(<|b3vN64b(Qq+zyLSKf)sFj~tSz7{kfa%BuOyorE-IB17t zqbs-%9H+a23Xg2fYKL>wi8U_q?!zUrq1(?@Jh_^l@1SCD5idWRoD#D%aQLhf{wUB1 zl2l!(6SR`Ad_!tYcGa~x>0Vzq*NfwTFVYJBcDB*R!47GW95wn6?eu%>Qgjp6&NSe% zmS;^j==8ai+gSDUk4hlk=oTUq3{2>cs>KZ9Kf0POx zO_LHf#jOpPkRgaGP(>vtL7A0^H3r^IBc!T-A3c7>m~u@lKY6U4mg(-4g+ zSDcAzDvR@ed`%`b^$}(q6%MsDTO_}o?@9OUOuSU740}S%1@`B2P3zA)-MMdBD)ld( z@Na@A1%EV|?p?qs+#>w-CzHuT!@+-qRc}}nskamHBsyipQNxc?0{k9Fxn8#J%fO`S zjPW7hj^g%E2;4pze*y{j?U{?RQj(g>|rG89vQVxYQ63-ZGrbGI2U_kRgAk^XQ$%$Fdsx z-9&Qq+V1eWdMe5^F(0CxKM$=vM7Mo7Cen^ZxAi1c=)|5IziHPVrl>FyLzy(ngdC+1 zCBW{G8ccp+$v5Te?i1ewwFkzRz>4nEz}o+X6b1ZBM|J;Bq$tJrTy@Ra{qV|i5eC5^ zj8c3V=Hg!2+IkONq&1VXDM|6DZ^Ekg602J1WFm|v6oJ6Lp|ADA9)d$svsVezDc>62 zA0@$eqBl|U+_Ze^Yt1mY{n>#v$0JLF7sCJ-^H-G5_dL`V>RT0HlilGu1_wWDY-|%5 zDc6~JnllL%lm8MHcjJpqzws6UcK69sVo1ZdYkF#0bNamAoDrqc`_82L!IAdtzIo@= z(9EDd6`)f7j&M*l{72!1P#ff-@#Q;Kxv6H}S}$%?1>3x$GCojdm%~84e_!<$D>__) z7Q2IW32O8~@SqBO=)Z04?iTM!s2BcnXrgh7N?_%7j#1Lq!vKM+ITNk8Uob)EVNJ7G zX4j=l+7;s&dX-hG!AvU99PpzIyAZ29%iW27z<<^mD~@B2d0~=HVC8v(3QLSf*GqoS z??b+okK~?u^C83Pw0eGTW3-9aqP;#?0&vSO`D|CqN*!Hdo08Y!V;tX9jBfn3TXtSr z{Vr<;vg^<_3p)P$@Jl-Xj@7bN5xw0C8rh!|?FGu2W_GM?`A4pv8I|+78f4j9xn~p; z7qK=07r3Pmgz6_oZ`StS=`WsTRVs~5?SkxXy@iuXi&LIZ@7+lb5ZXX0*N zF#uGP?@mY$6k3(Hq7_g#0KUND*#eXj3S|lp&IZHv>cxG+|A@8mcYSGj>}`1qnkjZ| zwY(bLQ)dm|RyTp3Yr=-~_F{sa+l8l>1CBG$@?%Q=5`7OQl}Adyzr~(Qu!jw+8*@FK zaqq%Gk|bL)L0ng?PHSOavsg!;i8)89I}UDEsz!CZ>E1}Z4E2EBwjXH#c6yHZpcaLc zledvBE+3M9`-kgJ434{NVVAZqho-JdxvG+eho4nezP!Mh&~g}E2Cw2wUKC2`|DHdVQ;-ANy+vFY)E0uv#QG}%|-DEW;2G?Vbobvk0!QdP#0rNWSn7} z!R}fFZjX%vlxG4FO8-bq+_Cz(SvaU;EFs-EIGX*rxt9AG>vqz=P0) z9$N?T4TY`i$z^xGTsUnP34+Fqk{Ba@at5oI-|wzH`d$pQ_|IFUee|IHT};ZVeG5D7 zq#Eh%%Dj3$iC*1C;P^sq33_*&1WnrY5Z%Ixy?#tnR@AQJ3IZgJ01U% z*pIQqwyJmx|I;HCS>T#p^n7#Ad7d*Qt^FQw-4)93-$2P!-Kleq5Wm>X8!7S&nh{dM*AO&syz=Fas;?DNA$6`ZvM2j-&a4v2D6 zsh=%|ePkb?{zFZ?C~GN9UH9ciAH~WFj94zYC<%;J zGyu`^t-89OCA3T!SqIvJxA2tK>>ug^ z`3k_gp4a=4mpmPj%-;?gL7VxI{=QEaQf*T?gUeY~2V5Z_JE)C12-~rGeI6^lq%2QO zoH&DwUbBsVarmd#k}Hh8kNr?=2~oI3CKh3_)w`JI=(?w?L&31yj!HrXy^eKX7> zV4$8f*qhG0T(Cg*u@H#;5A9Vm&5tV&?eavG3dcsQKU+==T z>k10np|z+L^t;(mll=>DiQ}FT{x^nICxXk#I9RNB*otUeqLQfqm|fU*Obc0R>&F2N zPhFP?6qe=tX!4?o6D~?Q86&m!v+&ZBL7%X}v1r2OAJQ47lbQ!p)y1<7SZo=6x)3*hD? zo9*<*7x($L$F-<9(Z7psD;AZNbpXB0!iK^znDBz~(%JC6I21(s(4PC|jBYV2%nL5uF;?euCbl3ecP`e;dJT-vh7) zxPpRqBZ~Ms`1FJXAXdE#iu8-%uFX|?JFkmQ%u7$CejH*{F@n;b%?nUJ@!67krcSLi zs%{^Ol~Cq;Z*t>&zVq1Dvqvu2q~GXgfL9)sh!3tU;*Up7!TOkVR{|z}&qQOYj_Hp! zq3gcc{{Q~%0c4#i?j&3@@KZc|yO%4vvP|~kr$sc|q|OC81-`t1@Hb6L|9%`5-fVAX zybnvsPq?=OTW7mcqU85ahPd`NhK|x_tttXuB9(9L4oK5R@}q+{OypSaX48E5T8)so z0?Bk`OHN|*P;cM!>#iN{6RovRwD29Cowg!b@t@<^=ZGb;mum@1nr^RRlUzZdfl9@* zO0Dl)E&f7jnqzHwYM~;g8P~QC>15g(UDd9OD2AUD#o-x+taE zo#X*1KUcfgC1X`A%>z6ix1d-S-Zh>UQ0tR;)gXALyNLNF^1ckExe6vzln2@;e;{0u z1`-Kn7wEBPyGt;_VR3r_vP<~rNP3|JBGm^{iQpMY`sT_~sP%MqUi~Q%=5zP2gNkiq z$&O0cogS1~-6ej8z{Xh>VH&^LLjqzevvckGVlB9i5(_#P=1_T*-WQ})s#FTBpb^1A zG#?!Tda`H3mEk?#MXREtk})c6d>qj4h!0dX3*hb!+NC5{$WV#nu!MWmno z&K@Ebp5&ih4P(dv`h#foKQQ<5#Gxymo*Rs_LC+nEcS6jlgY8CB>}Evp?O&eVmZWm5 z$uVb^+L$lGbN^LdoMn&Y> zOLl4gpz~StLb}8x5$ii98#84TX)t%0usW!vJ3ty9xz6>8q7>0m@K0#8pl+jO! ztz9YPmGT~7t>96$EA<`qg<MQGy7f*qESTd@YKkFE4L$ z!%I?6{+`bW>>QKR6#Sdbqi3$vAg}RBZBVTN_+l#qnv}z_MzzD zo{(7-xXN|bP-J~c7TOO~`mz0*?(fonEJPU&3?57~oF|SBoCztH*7Ex!87o!VEF?6c z$erA3_A8-(B9rhW;T6ID#(jPl3;Us`F+zggg9_&rB3+M&g}EQ~?Z}ha!REdj5`oF5 z9zJ&?y6RIW;blair25vKlS%%cuU>g&TnW&%(f89TV^KjEVaw+N^PQBZl$`oK51M{^)Fb?i zz6S{Yg}bc|>VZ02EQC*_{h2uAOFs({5~ z9#93vVjQjri}C!s6YO~=n%RBN>e<;beL05Yymk^Q3hmDd;XV>0^EEjapDsE{$#Pvv zmhUxtQ?0b$fY(8!Sz)C6Lou+s`G6ut{3tFcECgH{3bzBG_JX%xr=v|anrTtH zb~gY-&LmXpb7DfGrHf@jy41rG>cT=vOt%RrF?;gRFcaI5R;Wz3^%~ zWD#*(lyn?3KgIgjeQ4u(W?Sw2sXZ<&Z}#$hKhK(&yMDywC-;3t`@I^^$Yumbx5jid zHbUy8;F~9tH*A(f07tg-W0dVo&gcShB zKQ=daG)lT$36TB*4LD_9meO-zFoaD86pYp_9|e%w;CP10s%%M>zn=x%y1p{3;Ec@^ z_<7n}K=OK-cz8~7uDcE88GX(dmB;o08e0Z?c;U1{s{2$k-KF@{3I>YNde9DwHF@=m zYG9FfUw+@f0TMN9lDlcuyoCj%n~3MwJcq>lZ>#Xo$$NN92JozVk-mf5+{z3otcUlk zN=*0uTxEW|lU$;6+Wl1|w%;h{ubF|*oo}X%Jw&`>et3b->vQpTmS5$43>u&8fkFhP zmRK?k)2=IZU8a)1UV>w@>@hNHMv;+4z)H0vQovI~2Tqp)ZYA#0!8dkfpx#}dpZu3}J$K&hdjf?}r#a{iP2QKh-dM_vazX8zDoET9iu{Es9*Wqn>6PL>PUaMo zw#12ivYP&`Kj@YJ_;txB=yC)+wDaAQUHUP=|9ue3K6=;eae=v9zg>b|{Gqt4yl+2y#A~bhHT>Tg+P?U;WUdUaUHa|cw8{fhdFru1 zVQ=tnpM5s0CF~>#{4g| z!Yc#PtE6~|#+Do07OxQht)V^&c-;m83gH}<1Us$)ZbbTZmeZMzu_NZDTJH6?|LOq! z*XmM&1!FwOcCG3xLXf#kZyU^xKimt8F)_IlZ^v$bTSqyGHJuC@pHzlLCZ*a0Jm3(# zVYXUrtET@Eoxkb=gr~@P?J%#D$7f>qdZhaTqr}quxWJr1szcv8_aVstblw$7-bv9F zy&vSo#h7;pA6vO;fjWW0iA0^y&Gi%4-2k6k)u~X%pdw^gGK}qORzvb>J>l1ydtGx( zX`qI-q=qTH=nBE8Y=!JS?FJ3dPH3nO^TJFn4{RXZ`Pb^fuK4FsT3qHdeJ0)%VCB{q zR#kNqdTSV1$!hI}V%+K#ZZbPX><-2@ht5Il-wY>Y5T=t%#rv9{g!xwB(*M3E#Wzm* z_ppq5k919++~zQlyEp1}d*TN#>Yi(7|D2x*Z{N8)*dab@`*rcr5f%312cU>xYJdsj z;VI*DVbfknaGNQrUhx>ZuGX+)@E-2fy|Aut%0#2NB#ea%0E{lJ6kskBh~c8Es%~Td zb&bOWj}8Ax{`TZa;+L@qvS94wo`(9)QSd8L-&6fq71k#|R0u|a1#oD~^U{Jkj8^@r z6VH8Vm41b+ujz*t1SYMCk404sF)Beq?dX%A_9Ju-|HCi_f-m8r0bS}Bri^0esWt(I zNGoJ@)_foPn62PFy3c=K>Sbbd9NhG)nLYgx?Fz^YM)lGOdn&T>^1d-8r`F_{7`nVc zbOtNG-5~%f2N;kywF}3a?h3m1#vq2s4xB~ zDYW8|CQDDS#!DT$v@dhG9Jc#>x3uhE&r-5R6>$Q=4*snX{T!o9)tdMBv5IvS*)KDi zl|;r^ZY(ILYnsVdo@|KM^(~uMvg!YQ7Ph2C7iRmEoz_fq6+LT8?UKEb`V=rt0;QKO|ZMBfP4hh;DG@Gz5CP_h#Om z5X9}phF5P~uo-%lFarsv`I5Ltkd-Wv9kCVhZVipB?v3Q>v{jLCQ9PXB0iUz1EJco? zr>UfRLW~q(^sEy06#gf@71n0BFu0gBZ7iAzc^ZaWYsPyNp&94Z9S*1ZoIjXUS!C9} z^ZSnS*~~d!nbqleLP{PXpP<=iwfAR04S+cdM)srvtnt{E>g4f&>1f=8WufQ!L=pfZ zmtq9enxC#s&m|M9+l=^}e|JN$OW+XJv)H5uL7F_e+}RL2n4xyc(YiD^{Mw?o=2hm^ zIG22CN<1QM*$*NA+bg!<0?VY2(fP1iBt(ekip7g2toFcTb&4K|^*rHX`m+tryT3|I zMAq^?zC~_;EE6?@Ed=CtY=IO@b4Q@5Rv9KJF+cCK>|4=aoMR%a_qN1Xz{ZLG+L)Xy(?PH`$bC!Hmf$X2V{PGdCW z?P#20N#Q5MPX=pS92r&uxNGV12apjiLG4C%M@W(^fTJEXMy@7>uwTl9UWX>n&k4XE z1YgiP;Asxjv4SsTA~ZQMq}CfA-hNHk2%oU7^V#zUXtH4pbxZy0m>cQdAYMqqC~PGN zj3EH?pr$9_?zOv(vB$WKlwE^cPQGJw{<)w4SnOL^MQ&z+T8s|h zj)t$(Fv#(FmfAOT31%`r0(CQdv9VlV>VVZr;>za~P_^|AU4`AT>{;Q-X}LuIDS>n_ zh9`ds3QnyEHRXl7o5S?bTI)snA;x^P?(QWde7lcBZI=~RTyQyhK4F`4r1KXrZ5&eJ+JDJDX6?4<7j)qsdW zkCn+A59}((l-iK|y$WXSK3`vaNgCPJAyKviWq&D^0yE7h3XFF1{dxmh>zb1^+#^^G z0fbRUbmb;x62*hXzc#-^=WzfsH>io?#hgV;bGQh$8#_jt4s7AJD&U06pt<6L()Q7* zm&@Qsm`B`6?m|UNh_%g`Q|ceQmTdLW`R}w+o&RQk-k;gl8|}~skH5~si;h)lE`|P0 z?@R9?IT~|+VxL@jJI~h|Z+P!<-?b6B3D4l^lL&5x2;%oV&ugC(sQn=gXGsV3z+Hrc zE@8-1pr*;X`B*0p(o=%%I+(qcLn;HHWv{ikK#66Z!0GVdc3P7KC2T$M%sxyJMt;BR zxI>%>DZ%B7Y8sPkP6y!`&)HiiaqjqrNm=4r{5ZY5N5>gjznS9K48yxxbDhu_Li(F? zpPd?Tm)R^4P{j`Oof5H{`|nsi0|Y9G&|c3U=TOo|lc2s?2#zQIIoppKOPbz%mi;Fb zE`oMVnGhFZC!VClKZII{@p>lMu)Tb4{jQbqL$vj&%MQa*%hsL0*2_gD4g=*kcMhNh z^x$lXN(}`DSk%9W2JS-Y^)_Hg*v{64dYbara3MK6++To^aN;OVy;Z|T;w@(aTXkta zJ1;$CR0c6~80b#7x#`!~K_K(kIaqz>(fuz!)Om75gX`g1-_d|5%2x!V>hIKpbsFHi z<nWQtcTPDC-!RFjJ)vxUFXEDAM%oyP2~@6 z%H%q?wQOd`eHK(|xPnuueGJzndYAbn0^nFi5|Wh!uzO|P2Wa0gJ>0SQdW9}R>GVGo z%Ya;bb~`rAaCuNlS6*P?7f=5uP=v&=5^fvDUV2Wd826`EW#UdNDxI(FdwWE&UvnFc zL^XN@5*aJQ=fjBu5gv9Av?H z0uR7*{$X{hid%(6l5a+=_U;M#8YvjnHQGgC=GCu!*a_-jq_>~{u`z$B2Rj?%?zddO zLkx=Xf&XBTKM-C{1{qjPjQo9G%>_731XZo6-?6lU!NlM@ueotGpCrL{U^kZHK-l@ca&RoqY~2 z1@<$gxiyBk@EYQYrftAqKb=E2)m#fDkJa`siGb)!Y=J4>meJPyo!4CL{Znn4W}9_< z_-AYIFm{kNYgG;PgzCtEpQn~x#>|RNC>h(qW$R2UfIU#a2Cs3u-AQNc!cFD7^5P9c zuSt?x@>jq@E9~AI1Gg-1%c-iXWpB#x^vNv_JSI=ai|R1Tt=A$R3D;xESJ`EXSJ7|7u6_g$jpVC8rJG$ebCkiUum*1QHn+qSKcRV4N_@W zQnU)S_Ng~99AhD=rE_{+z3(2p?M^dcw@4t}0hnYn?6_0{--NeNrQOq(svWwXA3=g5 zSbbyU}0>|y+QJY{;g0JCj1cor1L3GgcM_Rd+ zJRE#RHv{2ya0eL5ScP#)t^%#?GmFNz0vSDY%g<9O5xzOo3sq`E`jDqGn#ho!bgkf1 zcGuSE+I1xQ&eo5FaQDy81o^6NWS>F`MKX9NE5$9R>GuF>xh&rIn$!r%8BU2I%@6Bt z8dPasGz%Ai7&YPBaNF6+1E3F{DK>z3s<$AO*&#ah6NQ>qD0ni604JTlaDnbX892(_ zt#X$@U7XTtN78__8KUQR?_&4SlEx>677l<4MPzxvB9NaZDQPMOZsSxA|C3flP}Ly?HBn8QWL0Z6+V4e&O2QAm}TaBRZ#XL*=gMLo4;SIdjpO+0lVR0bV$c*D1>U3#)OdP~VsTB=Jc&!MMx+_J zbK+4fd32nPq=9+KWcXO~0HJrxp^dHzU)Vge#{2<2+$1QFh2}hdReY`vE;Dvg>05wr z&rplrgwWY9T;V!IV*Oy9&A;Vp8IR~=#&74g9EXPfN>0o=r}*~WN{T@uv!1J8&!AaV zRgE@E^KlY)_q7;RPx~Y_yUo^ndsJOu#Xm5rb$}%Na4GweU~umELzC32>5#z6G_@Zr12hR8-oqTT}HIc0Q@%QPJh}-NDIi zbY^hVsKXFDiVLsp|H_agV)OH_xNewPERcsZG}MdA(f#mroq&Z;kQUPV$}#vB;9ct} z9+zQ)ZSh4#mr!r1dgDMjS8d25AUa>pwM9!4KT^QdQEOB6GuW2){gpECr zRgs5R<#Iokd+)4TrRxAS?%W zXdG_WJ`w@9k#=6RH+^gj9n|IP=N88EX+NOC>8{_m_u*F zX!j_^wiIZeiSKa)JD864EQgk;mvnLt4f%(=+N8PtZkW{G0Gk`h`$TNT7~rzb`ky?K zw-pjMqnuxL%fotm#`ln2O#AiEZJ*tv-RV1A0?qFqA)@dXaZFL-{7(LFr3+~c|B zn9d0%jW803^f768_Ed{YG97^^VcR3E2EUx@xY~YfYj%wHjc}s?T41FI=l#t+V0J5D z@z*G4z=Pcb32CXL`@+)!67{oS<>WAi*0Y*S@w@aGHtA_p(&csVIrDQ(2uFU#^0XKA zjgnibih4)2yac(a5p+U5G6j7nV`8??FW)zE)|n3jy_Xwa#Dv&{G&cn(b1m_5%fJ z2$d@j_orD6OopYYF0E3|Dz?Q*yVMZUd_&6~Fa=g4&&C9+LLBmZuS$5gz=Yi#KtSlVh4_cM+PHPw3@;zr zz0P`0-_c^Kq+nhBf;)ey2mQ-O%sq!zIQGa0e#<+5(V*(u$k2Jt*}QWVT^rqLo=94J zXOU}2f>MgJeynu)vK`~O1~bcwdI4#_{On(xN&bRsp6|#O-oh~nxfWZL{o^docO7Uu zsEz^zsKSg%FR`ZPbMh%S`z(5R8S}BXd&^-^NA2 zed{E0r!rTg1odw3^pnBU*iP zowV)vP)=M>WdRjz&5=nLY4cxP{mz-x4v{{?dLFPUw3B)C@R?eDKY*<;KC3a7^aa9} z;yD);^qt}cTkmufVglU~<8!Xa$0mPYt~IM_xvFwUx}FRdOpf?`mzsUO@W^NEyOdTn z!*`gq!w%bpiWWoD-%l^uw5yQ3$p~QKhqRx4a;%tr$)PXI8U7_Cu7xbCfS(&BRSHh9 zPUGD(<%q!q2(=W{z(NKG!)+P+Olrt}Iy# zSbkmW8U5SGm9ABaGL|TampvP6K?i*!A=hDvT>^XTWJ9>ytO*TVP&=v63-p!?Y zB?FqA3lmRBIliwN{lDfLbKf^(QSU?bAG+Xodv^OBYOJlhtzT!oM>!vURgo%$N8ZKFin}mdWEFIeE4rx9?*C*Jz=7{H5 zx%5?3R1Zf}x2v>hVV4}DjP&fHJiR6jhbKO575>QmX~$Ip)&@H*owxJTzOKAd_;!#X zFt5;P+L{6x{F0{ad%*wcJg=+XfBrjVrg|QctEwn5}wxp zGqrre+FW3;%Av$3n75?9J~uvL>2(xyw+=H&6_!R-Hp|YyP=LZ>Ztui&@Yk)N3N7`? zD`nN@_@LDG%jYBgM6tPtISNg!L90R%`c?@#M(1nWe3GbMrtMm<+wZk&0U;B^AdMK` zVN>l_Nm{G5P^;z7L!xU@MOU`X&n*b=46HCel1TP=cCVm&;}$DT>D(osduE+hC#}d; ziXqPU?Y=X1{8nzg1_zZ&gmxk$1%Y4$%}$_nPb!bRnGj#!Roa>rO`nb(cZm&Nd%2uX z-s~QwnGy?;<=x+0=JAX$`V;~CGg2VRytFIsn-|6_WafUBc-WPdTWS{>N7l2yXwE;+ zpRtqMNOpZO#5_({d{SHiSl)@rSBiw=)rlJ@F^4`a@xOy16L~Q)qU+Yd_cfF%Ma-}H zaeA}Nz??J2(L;S>Qd&sORN=wn?@!mMe|=aZQr~eQMBK_6#^g*X_70f>X;=^ZS;7pS zPsboOA#XVC)SY|5?moEWB ztTrXb-~_6;1y#^wr5FY3!9MTQ2X^yl#Fwt(pN8)Wotk(B-5Q&CtDGlK?~-{F621f( z)Z5{11CxtJeSHI`0X2f|LiCq9%Klc}YUdr4R6ACTeuq6~EW2G@T^;sNhN97QWOD{m zyu|F~i{hR_yk|a%@bEQ&)KX4#!Th@`&OM--^8R$!YQOAKXIW z)Q6tfK>`8J7r;Fi-X2jXxG{l~5gFE!{Q>hQkmY0>k`j+a{D8X7&OVn}RSx_R@>N1y z!rJs&YyDf_8oZ1?L7WaACmF3;EB~>D-59Xdhx6~)4vFW+To(FFn3X?%PeHZh9Ojnv z^*i$Cnr5mWc<$cnm3W@q$R6az9`?4e$jqf-!Sv7fkJ9zCje)XtI9Dy0Sptm>s#Pj! z$~;YV*Dg4QDM|Y>5!K}I>Y8p2Yw0dD{<4t=h8}dX;ZoS`9D$)`n_JL98rBx!U(M%* z3C65qs>wd@u%1RWQc6*j;v;O)RYbi$H^5vR7_y|Tn%ECn(_Sixn8nC=e33k zKtQ_FDaFJ;>E8(UfOw%6=xjD_ZDsl`<)Sw2meN8w0bx=hR=TMKaaYaX`I7jz6;Hqh z2#@^`gHpl){$*D?(cII6M^-bp&7%xcvgQYPK}(l$c%3Lu?+UC!er%P-=<1sypjq-8 zM`#H44nD|p8lmJw8Zyk!-CdkoFew+Yhv?IoT z?h6w8gS85Hg^&N+1=v0jJPW^cZ<%H2uNcqjL06Z^l$ zOrFxo$q7CBk5K8oWkK~+GI{MtaF`Dbl)hlD%dnRY^Fi`oVzBST^Q}l(nabMAk?*d} zp8qEjM3Jg%G2MUuXT*gI1q4Kv`+F+JAVnhWQR>3MTl70YI{zhP=Iv(R+_G_>;Mu6qV{S~q0XXJ($BrRQI6IJ@x^?_f8uRgFdSih%iPQKJW+LkA0SO7T^v&P2c}|a_z*28xF`j zquDqs?Z%L;Z~2elyVr~L+4_ZM-1>CJkXgL*;HXSH=Vj){O(2~Oc->FY0CFGG0_RRB zeXnOz>d7gj?OdofAaS+zANL&^8+N;?4jWGd0xoYUy+JM&B_$Cv0Ak@<8;B9pYGi0w z2mq)eqoRuP^4bW*vgc8|+9D(JNlD4=I!ab|)gXucTn%VG)4H-58$$`L0?1}j^af>RvEFfhS zSuTQD(DP%>CK2a#b^=AXKuj#=zrfVp2)RlPYD2$Fz`s2@2CF(iSRlQ&2hq))g>qYP z%oe2}As(q%FC9a5ShnnHD{-;PcJ~2){b4-?&q}d#=Q`=IzwM$v>{uO;AW;$yQIA~> zQ`-Ei?bI#B#*)}t_2eqzN+Tbr0?Q)L83hXMvB+&l zjK?)AS4`i;1V#SnD~)gcBUe+`0I?s?5YN=z^;y<0=B8+7<7ecQN_V)_ovL?yBBMZb zDUdfwF4uz7hqGd}U6I^*q(^6x62m47&;+*Zv>~_kLF?OZ1uw70?d)b}?k-IydV44L z;7Uq>^*RQ(<%-k{V*_!c)9Z(UGs{#ttKONtiu0Z~!E1kU0@4)GU!3y>KJfS+$WTxa zU?&5Kgl+)XfqPtlI{I?3^-lGowiR>`>~s1S*bIpFoYq&V=g^5{MJOwhAoJG|E%~wg z_2NrE>I6CV%&3Z1UiWu7`4gLJu5Fn*W!Yy0yQ)3LM|Z5$07L{3Bl#FeG;OHh3|V{9 z9?|AC=Wo)79b*Gm(=_~xvD>r%5ebzhp>;k55F%tU=T*dQECmV%KaY-%mZ8M|$z^b6OFGk(Y{;z^6qL(L-1Q*B`y38mreexFSC{kc8Y&> zL33v_Pltv&3$Y8#8TandvWvr%Tc|tI@{n73AqR{)#a02zDh$N@$+w~!GOgMm5IGu` zuO6xX`Jp`l)DDP37Qn|^yM6?0JTFM6GB5{@9Y4rF}m*np!#DacvjdfU&ylY9R%C`3)}!=NSC2I zLDrS72}L%weWv_CU9{D1GgBgv`{;N2RmX6r}ql>Y@jr8go$2Ae1GHn2uHrYOgopcSfWZ^!bwiZoT^} zT=-Y(>rlTV%zBeA=SZNhVTQ{2rH0#+P^7NFssfn+N-*KE@#(!AlK3DNqi{pL_J@uz z&4&euM_OZrfxFMsY_9U?6vWQ#V8gYR93Z-QKGi>s^u*F~F@s}E4N9^NUTaA+x1*sT zfqx;nTU!J?g*QzJv1p`~vatURB~L2rb`QX>8QG1xCGKoNcRE9WF@re4q4*OO?bZY^ zsMzVXn2&L&X7r`^hBcLrF7iVs-_9Q_GV6S2O-CT!=yy zp8vDV8mxknANdR!xm#Co)Z=rz&p(d(7iV;NEqTA|(^cT18KsKYyD~@JMMfBuY!%?{ z+>brme_o=%ugq29=t5iZ52XtNt?vtbY#l6aS*A{0_@wv%?aBpWejC&y^RM$K{9G>l z(p0Jb=`Ui?GSsjKTzN;=3O*PDS#Zt29tFD+#m2;@4n5qj_mjzbTZ~lls z5LzI;iqJ13s!LzPM*laXic&W2cuwE%R*_r2@E(n* zZ-qTDT+n}--E&e%aPsc7g4fTz>U;*O&q*v0V3_?iO=(xR|+-(uy)OW=n`NST21SnaJfE&cqGRF z;kh38!q+4MaXM*_ec8iTu?3Nwmu@)ko)BUczU5fgg=*e%qaxRPrK#mrt@8jHSu0mr z`^#q)*08+yK5d?CfcOcV#q_WZg40j$Zxd-FoQB|C;<3!a+BN0xm;c|}M|aBgN&N4i8vs!zc_V;ZQncE+GA^W!~0NaP<7eoM1{-B>9a1I*UhZU5|*_OnZ6$DH1e zW{+{B^d?xp<~_z8K=_wY%GJJY)q8(_5L+#X1C~U6&%AttE!FZa;zD~*k2(F8_ZVA> ze~aDAjSVp~s^Rl8G&=U@qY`k?2RL@R^Lr&Bi|TBJ|9$U6r}A)A_wLNg<3jsAl*{aU zU1_8^ZFKxn6C(U^Lb}?Vqqm0Kf6JJ%TgtJZ5x=K3$k~m#wxlaoq3lB^mfVj0?_Zy; z4%0Kg>p2BE>euvqLYM{5!+>L+VD?hrY0}}s3mgU)}fq!$` zOjkbs_wju|vsI7wWhe9c{I8@d$NoF`$HdV>DL3cX-7CVQt3Lj!=R*!%NgGz2S{@U} z4n7539Mmjq9`v#Azg444E)XcRbe~!|uN0%?P{_|XvC&liqAlq_@caa44G5S8e%ca) zD%>%K-!H;}{1?}9=q$aOp05BLbkLFe5Non~B0ry*%LnOIRs!Gh1ov_}aF8q%OHb$n zBLPJIuzMAO6FEPCoKHInlSB`MLab9{2c zMxo?!?g`g?&;Q=#6AqjFUd9}K-9xIozB@(&sq;paQ4vdbs3MmwZdelq+XBbik zDQQ*zUG;M*s3{S!G}QRxqAW64lyZG3b6!cRY?FIb)g$`9)A{+!JbrR=Yd@R~dtxq) z{4Tc1yY~FqN5e%7j=Knalpx@yD{gQ@1PjS~Q$Tjh8xfu%Nj>Fr^+05{vWiM8q0bQz zNBXtPCH*3Jsh8mZ$!|c%meY)o>X<0hPy*q!Tz@hzg1VAX04k~8Sw16QQJz!{Z5Tz* zIPRVgC9+3b`)eW1he6p^B1{(_+8~6;r@i`yDS0n)A1{x&v7R}) ztDxXLCfayN=L`g6=dR(Oi;2w!qo0iEiD=#Vsbd`Mlz#sWH?l`dd#1!6h+JhH*LmF8 zQ-dZ|T)_Ega?FH$s}6ky+WZ6?Z{uJs3%E7_whW?H+9o`hTP~O$^FZd z+5>U~TxZRIjQS}3Lv{LH4fa9Z%IynN?mJtnpO6nERXl6@5DzaQGaHs|sb#>zAs!ed z0LpbcP-%F>Ji_y!xQ8T$ka4%G|Es-$i)wp(H{F#r9jmZ)M!$0V{*e_CWuIS02hRV6 zB3S3bu_>oQ*%xlh(U4m`c*!yaoFRR%UfbkOO}g6t?a%E5?QfdGJlXBvmXD3*ku^~8AR5*JXgAcil`vmr- z8hZwr2a^Z*nR$s+FX+`M85e-c4S*?@T{T{erxrO1FwL)yDc(07@So1prd_CCF0P+~ z$?N~rsa%wyBxn`mCKqJ|B@WW#-+Ae@98%Rx6J0gE0dD4eKBIqbrqhD&y!ykN()+jL zfe75MkwD;{UX9MuWJv!KFMR~r5b6ceV2pWd-7(#t`bj5slF84MS50l^_)|ux)5Aws z`op|*coEyJm*`p0fmT8c@Ub$JA#UM<4E5t`mX*y(*p=sgz}l#l@JW#b1PS+@b@j*C z>zuKB6$E4{T(_wqTy6G}UOc=Sp(@>sZQ2Hg$g6Hc4@)5Pfrr|)Zt^qC2fU_9f_soW z|L3Sl(2!c4ol1f*=*mH+UME4@AR>sT@ATqB034;WWv>*TP`2eCS#CO>tA`e!w)b7%?$pkhhfcVK4HUwG86 zbxve@%+w)0pv{sSp&|y(bC3%DSS=F#Z=bvnV+n}QIRYf~Dnc7&04vxDpC&}jYVm51 z8nZvva8c*w^phT8;IS1yeq^Qp4REI}{1YL33sWccGnvhtjJ+(~p8%-s($a}^`*hy@ z!83IniHP8{m!F4coRyw0xOb&dTbiHI|GGQdmbuNlRyn=s_PwbEl|vXK=ZPG~Va91@ z3^RV$Gj@9q`yTiG{C6v~_1yP$Ux)8?-Sws!muQa~I=bO#?B@HP#&yX8!QcY7uu`~h6K$GSwM+pc#iPvkYxTKnZR{0CB4n1# zrySYfY|o};3TyLOD*7$=p7K(2$pzGs2$|qHH;Z#!)Jf`KY_9hG_BImmo0dRb!VH!$ zajlpt60}0C7mveI9kl$UW}aHNavx9LydX@ytMhVZ*t~#}I9;8xYCaOSkxpzQ%n2#Y zYgGGg{z~qhPMfR^+4wcgsMU6Y*e3qP4V`!|e!Tr#0%&CCt1l9^8^Mn8%P@#Y&W#|7 z6Ix8qxXC6wK%@`qWmB6xB)z6}ByaD*5TWwO@%wN4=oZJwV8= zV@>+CmT>a2@>pbU@kE}OdDwbIPOSlD`XH4!!{oIi%E+m!HJTH7w#ATcI-h-K3I!K1 zn-g!Pvmz1Q*c_o^3hUkL9v^-^X(%q_qkG>7h=s_&L7D40^_L|4d5n;dy&Q{lI0KT< zaA19i)la#s@C*3B1vzOl$@pR7PlM?h6E}^CuN%N13X5-=1^qSkNEN|Ga@(iYo-^u~ z8?QcF;AyOaCd?EI`6GSqwrEB6J7D2IV^joCxAd~wvel{j-Y>5NTBUaPTxH#eeB3y= zu?4SeS6hAME240J)}8oO{K%t*q?yz#?KwDFP)Ixpi1nW}V(_Hyp-8||&h=2n7i3KJ6(fx<+gp<&T9`LD-%s{iQuoPLUK zenJpyW2`4TzGIspdjzup+8`#k5?3QVtDf-%s)(&5q`I)y+OmXuVXLl1ZF<$GS*{PLp>{OL@z#{>3kNYm`=?^Giv z$Ej0yHc=}zDG{FA!4YJD&jH&GgdveD&iOE=)~p;zQmnw4ja@UP)~{!Zk47~- zqvSu&!j$!!UfH#rJ8WlTvyz}>0(jWU@#eu#I)Q~<-gKvlytmgdTy1;&enHKlHUg=& zQ@GN&=^>BNzl^AtbcVzqzI$O-K`-j<0*~Ol_XU&xLi+%L8jznad+g+ffT8<&az^*N zQZ5|Yg_=v{r`Kgl32j=Ooi#b}(i|x^R>i@PXkU!K^Gdx9Pk^|zy!hF)tT(jrHW8Q8_NV^5q37u3hQe)UwYiyNzqpNNOZqjT6 zq4YAda=@;yfACl%{lk#}1?nV=%v254rm0%CU3MF}t8-8_)iPXwyl`0vG@6R%DIqSY zw$F%q^x}c1?j^&qi9Tg>U8edBQ?k7Ln%V56FjxCk6ujweKV^3<pv5;bg~W(W!@+n@@K=OtqC+`T|ptrqi?Q zq68UuXs_~ejSa#W7AeU%7AtsvRoSGIIFdmfjik(>C)K&vZ0(T9c4@hM)O$TiN_%tzd}&GMP!GB)I*C;OJ62 za*3f8i+5{y_Y7>V7>kRsW=CKkUE zeMDx(bZ60#IR9-81_})F^qht!2Wh&P-jffTu7-x}(tAf`14X!U= z#a%A;jdGfvzdf<4Lf|?526E!7ZF~VJUGT!tYh0O!PmK(jY;-iYUzh{~q$psgK^w&8 z@3>7@9{9q80BqQK>zXaXn1djBOuzp!mW=>-CLk!W2zHBeu;(&sbS^ z$YcW-G1+%gVj~>+_J;0Z8gsh*=Z=%>l=n{0TP+mI2c_PUWplz9mhYiqa7YddU0!svdwux_VwkDpg4dBW&L{gunorKL}Q#Yxw14& zT)q)MxTKg^N1xK1%HG%+P7bttP1`eaet*dPhEDZY@@ze;ZqJ9u_>t18`wNVnubL4& zhG`S^7Q_k%w;QQTQn~X9(!%4inQ4~ckCVxJx}dm;q9fksZB}Z;Za*1st)6=M>Hk0Z()w>`lW8I z9W~DMi}5&xGj;~AFeoL|GyI)Wr{vu`az{@Rz5f*bs8zh#aTOZ`obHV5_yfo7V>khn z@Vj9Le{MfIMsx}K!U%+4qEQ8I=g7fx^>L3>K*wx_SeIbzl>b;>_Yc;k-R|GFQz;jY z5pXrW+zW@2sv9GGccqCZF2lA|=9+~NWxa#rbvRDN*Jt}ux=LjftT(w)DCtRs*iu7S zA|+nWBcD<*_aIusJlJOzbO~kDzzhZhF2bFh|*^Cm_EEAM-Knj)`>JXw( znOqNwSdFPma$b0Q!x^NJDqohjfY#{ko~`%c@2ePA*{mTUSwxXp1qQ$*dd3CpqPp#g zszs|Emaq08%@Pci@^9ZW09_kS8o0uYHXmcSX-inC2KysP39;S{hOE8*&Cj~~W5lG# zcSCe0;TEwhIw5?$UiRU~Vo%y3l45OnPC&cj-1H!JKa8ve#azX&zrcF(nj`L`_!GXU z!5F8-EjD?Hn$PmRsNJAcIB=N2KSrt>1(5xvImzXzTBrDKO+Xy$q7%9V79pK5eRAgN z^dZ9&dE>i=PnSSfi%+tjv|mjSBX~IhDZ>d2kJU`Q;hjOlH#{-DkLzar9D9-V)jlX@r1vTo?1KDQ{{$O}^DB3o$*bIcvdQ4&_l zO9S?i6rWuGTeEORhxyuWHR~Gj6}``5=ewA6K)|ywu@7(Ee%+kuR6I|v5xZUWvuei8 zn>Tq)NZzb!t?Su?RBnyycIB^~9M=%UgD%$cxf~`fZO%N#@_e!*E!5DdhU+oud#mi8 z-Y0V4h`-Y{$DWzOd^e?x=(71wTDfENzTxAiQ)~IcZ&N~00MFA{O>(&A_5Pi3I*qcA z(=rNTA+t)O&|{l~RPfyVD2f3N{=qm0PaWj;X(ACG!E@6aI_1V6sDSi#hpIrZ{sSmh zHw7`6OMTD9%OCA4z^hi^7%Id23aWF>3h-LsLvd<4U35@LjUFsR_9)DELPyL+4CRDyOrH^o&5>`V*H+yC4>2b4{(SEHCq zs#<3fh7s!-KK;B7>aOggMV;ZO(o5>~GhIW+6Kt9RQv0K8G1E6XRwP!&d1HOD<|@ndyLq>UVM|X&C zqGl{&;*86li4fEo2S$U7CYOvlSK`g}g@L{q4XV^xCnJeX9mlz!;hIK`XekT}~N(Uw&?vTL`3;@7~N_Xlg#}xC#k1I0OuNrE0ZXWY$BJS7Fzarhd{{ zvy#)(Xhssc-yY>Co{d?1s5d3S6ol(6tU|L2;t4p4XvF{Xxj=w3&o8>%! z=fumGXb*k)v9>(-FObX|HTtaqlT$q;HQZ^dyB4XC*lmpCc;p5D2B*30PHb_q-v8 zB;n71Qzj}P;1LJn0YN4&?7fLYdizDw^)?N%W8kBehG>mWYOA`oizC*U|8zOQ6go7n z{W1AqEG^p;f_vlsB=vb6up~v5(Yw8EmrtnxdW%OIQd;eI*{U5)} zKasZozbFy$mzb5S^pOlszABX#+eX|Bu-JEwhtsdNHu2gUPbhI6-TQ>R6ED=P-kaAg zg5Cbv5GG)cpp#Y=1_ILho0D-T8HQ!cZVD9L*9H$31Vd6Ag7hmw^eL>lvE^*w9D_xa zBwIR0)t&mWUh3YVk08JUhs4#uNBG?pqfZ71T4KJkf6N;)kiLW1d+MIEmw>p>eXs*1up2Dm`mka zqP75xV|o+Jl-QCSXj|S7rCWEuv|Wm>Pw);36{Ilb-%9IQ#a3ni(jK_5H@AP;{EH?j zLTmE}=1abI*Wyod;pk7FmXS6rJ{elN*gXu=S)}4BP!W0pLMRP`EeB{*sFt>p5eWcT zN8sPW=ij4K(VKU3%jLa5h5sDg^bJ@L9DonE{j#E!x@4#mE*}{@w`<;w0{cfe>`I8PqQEdx z?i&Xht#2Icmx+<3x`$5yHq=?H2VK0f-V3iNdpRD+t8$gbkuw(VE7(DT}?gq0O<2M1l%O zQUDx>=4LSZYI=)JhQOLnkOFKNL3RhAO-w~iP!VgQEh{460ld4#_Qj{{6j|ow^1A+< zi)b0;zj);ingc;uM=_>&yr@>I5U|m)$yyz5Ej$?680_!zouz9*Lz3gGvmYR!0%ieYHwT6U{I}B5z_*qoWyeiBBE``*&r!rMGyMCf&` zP7p7){rI{yi=g^@2m@=vYKOX;S5HTWI}V4N$eZ4*t({X*!D!giG_W{dn%weT(>e>2 zI$y(Cy+xKC=s`uj)1clAJ>+hR~52D`rL;ny17(3I!;w|>jNTX{h8czkdzGf zDJapPocg~xM*h7wBuIyzq3Jb7(jlV34T=MgVar(!hOkd{p}+}JuYh9SKb}8GJPUv) zlnxz)C;T!?C~vDvavcx;!cY)@L$7~ke#*=QL8r=0)cxQ*2q$S!AFGH`3NMqVoN{Y#JJ@0u`us8rlpFVHfct?{jCxl*S` z=lyU6HOYhe&>`F;$+e|HhMLHjD_2ALy^z!H-gnqGvO@4FnxF!4X)vor%K=^6|CF_l z^-<@>Ux9Bgv~0~dIcWRQRdw!3&reYB8eCMkDsU09g>eSoC!Q$BEzD1ija?2bPNCrW zd^GG~28KvN=Y50jp?$)5Rl)hdAzH$SJ3Vve;plr`TAH)>sk=O1#o;LpE$y#F2blbH zsf#HGHwH1_MdWRJ1@Xpwb! zqa1}j+v`Fp`1svck`RdK7?hgT{^Z~mnAv%qX<5waxLnrtlSjuuv==M>*!2wK0>NBw zbxY3cyk!qhEGuHA#A-z!;=;urSZ@kd$PB^4yn77o0^+q7=X zxjLvvJ*Vu`DXX(5FS?{((>Y_|#l5d4=Qh#ib&5xGe5I$_5vh*CBZz#_>NfKW8V!*= z1}5-4nq*55RvAzkBho(mT{mOJ*pOM9xv*#Yc)PPm(kFfi!+BNi{a578QZ;uq&Ibxc z2wcCXL&3K$NN8wD9MD?Ra+)Dv1qPO&qq_)PJ#XT?&b88$Y#I>GvABg_1Y|N1;^zfaK z?Fl(5eNJg=ifaS4S4b=e3PRVQy(G96#1YB##>>nNM)~S{j&|@mwTDk!PW~}A*HZN> z`N79cBRez{iFms-lLksB!IVDCcS$0~W*=kCVc$O=t9dzQ`O`Y|g_$tde+uBdqKv7w zZ_hsh}e8jhX~{!>J4k43pD8BbCF{E yOAO&N%&-~ty8kQp^zW|j-{EEb4o5lhd%=Q|rw7}Ad6~$iu{>yfAalRV&Hn-C4{FB% literal 0 HcmV?d00001 diff --git a/inference-app/Assets/onnxBridge.jpg b/inference-app/Assets/onnxBridge.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b26cdbe404ef4ecab7038745fe06cf8eb654ba3 GIT binary patch literal 12982 zcmeHuXHZjJ*J!9x5=5j*3y6Z$pdh^m1f=&OC4>N>_bMGi5ornn(t>mm5kYzr5ESVk zJxEcch9W^k;2wP6dA>6D$DO&q?%Z#N4B7jvz4qB_ufA3i^mOh)sIF0gKp=>QI@|yR zBD(l+QIG>OLP)*{;2))@y15SsMALKeA<7V-VFiImaA+elUo&kjX{3jnFv8x$4kaAu z<_SQ9K(Y#fo(QB1$`@*fazwk!@#0%Lc%f)}IbKt7ZJ4&F3d#ws9_)=W4AwD12D>1o z?0FUBsbmAC0R(O+Uj#JJ&DGsUI#7=HFJ5Wj`^B^fF976i?;vdeSNk^zFp}eS^7ZwU z77+;u2oMgqBkbYrC?YB)B_#s8Epq#|5C9?M6XfoT2o!Snxej2Ug8sz;NBJPV(Vo6& z4|nJVPK2F@pRXJ*FBSCPsFi$uz0r36V0ZTsM*N-eyI3L&99jfn?;|2A47+$OC{*@e z@X|{5J{QA(ZDNS>`aAg7$QAu}Cr^a856URW6D7xMfb#M1^G2fnVz^lJZzL+-D1GckUvE5J-f*kk}om+d_!DFr<)! zsDqfqT|03xqyzFFME_dyU(u-90a(Oui{HL0E+r}^Dj_8<3HxWk--rJd-pIob>2z@v z`P=`1{m0y2SXq$^>bj!;MU=mTe^ce375^gHzX1Q!wfvu7l|AyGYx4B-cKyo&_DB(w zE6NS!?t5`bqW``md!)24+Se8JA6KO0>ib`>2;etC>I>}SFA30EfC@PpLh)blm5#i9^OVC9x?*QlmC`bdYWE7aYy%6-! z_%QPB?wV^r#|#%OH0DMGaZ!AtSwSS3>Mo3O=x;?6XlQF=!j$z$KPX}*@?GyJhhW7?Zy{_DZ%!yR6(rlVXRk*49( zZH2B{L25XB!e#kmJBYjJELlktzIrwKX?#B~`K>Eyx?lrt?gHjwI9y&-Jt6*~T`Jv` z;~wY@?)`-S0sMcRJR0&dOp396wSu4sD8?r|Mhb}t(ekX>W zXSWJXxtz>1Lz9FrqiV(~FiOA4CRf5kgntH8Ne%X5KuRD|{RloP zTKH0G-yXSuN`?+u)ogf|FSU?I;P)`sA2-L?S$O7Z)p=|Cd5p_h*e%rJ&^bFR`&7^K zRX!!gD5|SL=@fZ5>O54rNUu|ozBx$FM0Y7Wa)&&pkNAjaHGIS^&_o*qQe^g2#1jlCulHB+KXO^YI`734BtQ`Qe6Rb|kp&rqE| z;H;*MC|9se&0nTrD#W-Ky%LR#4w)PM@-!xAbnEL=cuUUCQ-hEbrsdT_luaCZdWruN zcaf^nQ;4wg%vVWnRqW8_pz&#&+dHrkzb|;qaAJwrXwb=NemwER@4LmbZ)*gp(*+6h zrRy(FYy0}XN6m<7WtEbdr|I2>VIn7Q@Kq!=9pTZdAd(C!hUlO4s}LCe$ik%WEN{?h zdLdEEAeN7k_J-)|iJ52Y3Jb|XtI}_LuhNqCXeyUqvx-c_s)9%{w+>l>jr*~h*iwYu zVsi9!$6K=OVk!+fE_G%6%dNh3ixx}~A}fN7KI0zDcMJ3soHTQU@++WX12!BLhvyZd z6|&w}>mFfWNV%CV`Vd+4t0aRLVxXW5@e*eLSw%jg>=HwMCtwprT*U7OwqDRjBXQ?h#> z8@JNtZY8cK3r!SV1FJxJ?r2pM!qiW&DzQJ-<}TY!{XV6=(IwQRMBAL~gE!5EF_0=6 zLg0x}%pvaL+Hex3KoqNF+JMO?cw$pM^ zt>)bJHq`Q4==q*PD10F`IYTswGNB($cn{s?&Gqz*lcXdjxH{LUhb3oo@T1?5jEKyeLli8*jGCEx*j*uSeOL7JhFK@ z6-jj|x~{l$%J3~ zXwVm;{9G;TnMx5H2OImDsBhv|rX}9?6&%&?OnGgsF_CszEAd0I-xP^kB-noJ{ep)+ zR+_yW*lm0C`&2!^p6lbXXdK_yr_)+yHP8e)dMS`b`n;ap6}KcDGNd3HP1ygF*~Whk zu!0kAadd`spG=@+{9`v%$kx4oRe9+v?ylHGy%LRjP4E?Zo%m1bQH6BMx1aQ!eHb21 zbG;|Yj1j!mcKIzbP-mzK2i(|W=@(>_LJsTHR1du^5N>U0}I0X*36B7&-T`up0E zE5DxA%b(58wnrbxDeTVhDVaWk8U`czuHBrA$LwF+tSPOgu27JQVhLIg!e?BcJ~L9B zG>R{pb|kVl{RoLs(XX;Nzk;(6yEN(vex}nxb+u1N&1684KKbkpF37z|kb5#;b>TZX zz{lVS;}B_4Noa>+lTyy6?QGi;Xrn<)nMO0H`(g68zzD1TljHDH(t#;KHCdCbYKduLZxwL^&*N^_g{Q`Cq}@#`ZIB@Z*v@UDbmjQdwlr) z%0=J6h+IDvn#c{;hokjb{J(hg;nx(X&C}<(jE9gZYd&^xm=%O$eWJAH#Lk4nmHZ`BUSiF| zLA!@F%U1B~Z(0dsE4_7EF)CMDkGou^Ymp@Hu@qVZaQ?iW@(oc*Xk z@&2rm&_ER0T<&X=E=31O zFN6(-HWeJoceYI(A3I(O2_1y@MJZW$?)T|)OwSOjNNhh|ei&o!Xc!qkSA9mM`RG$2 z`gprA6Q!I+U&m{)_kO+%%ZkAh?cn-8r6<`}l5`ZDv-zsTK`Cg-h}K=HrmVfGKRnKf zFnSh&A3PuhX)unC<|Q?_5fx$ug(-!a7-WD>jNBME5}2zFZE46{V7v>s`~KT8J((Y)|f}b zZS)_FiYzHd(0#c_i?8iYztmsQN9%>D(oT$z7+d!SM4-lV-4d7tj8HQc%V&}__qvTU zo$uRW)yeV5Zl$VYX*0bnH~obs);9wpj0jwQX8euGg0qhDGyUM}eH06D_No zS-Z)npw9;z5wkOIg_Xy}TVY2mzC3f#`=3OO_YQKX?a74PF72Hh5Bpo+uO1{|20Z$9 zxvrMeSX{fp8$e;3!L{#quRAgsrXp^4FW;GG`O%WuKA* zjT&C7`$v z-1FnHDR;(>H9uj%++B~JtZoY*k{^8fGhI)yiyJbm;cBUNb#$^wA$7g&bUEMSZ8@X-v+_V(Son4`U$FJ77G}1#j zwWmA}KDX!@OM%VBp9cuyo%O;@FdGlwM_`{jxoCYCt~ro~bbHE-(fDm$w_-BQ=8jOZ zNFCc|u}|Od+O*&JBvbrROqW#GIGb>7~z4Yhu_lI^}-Y-Q@bAC>{X zBGPdvd+qsUDD)CkQSo+O580D*DkhyK(vL>(<-RPrm!jXplHAv%4FaV&Nu!{Jr-kgr zxLz%ukNGoxbtPLWv3yse>+~o*@84$^LWkTm2Olfa_&N6HUb* zF8xtGdY8MWWYlMW$cwDV2SZ4D=Xh#F-v^}K-x;Dj8}Ln_xZSGmmUGte7#Hq0o?_fix$ zThiCZN7ia?gOW+CQXCapj)oBpF|6Piy$%lv>yR~7KJU&@;ZpSSmzO(ALF=I6I~qTl zlNURVtLBg%1@}RM)DG@aa;h{?#sq|-`3;Qe;~s$XxxNSg$)l;fPL};P*rZlGIz3Vp z)Ok5`P{GXZo;%mo_g-OUX;ov)er!7=&UvkD%2%dXbJlRPWPZ^u5;Zp6Zjd ze-2f`AsW){FCgXTfGVXe^L?_@ublcj%xk&T{C>?G*rVmUz_fYCLUpo0WGg{WpFYj_ zP9S3E&aWMt^O`xlOn1-w`yTR-GT$-6yY)ee+>?>_%dBD#hW^l{uanYiD1G_7yrP@Q zm9`PVm#9}?wJ?AHWfVx|j($hUo8kLKI3J5lGJRSst90T@3A3Iwt-zPiAGdD!<3tKc zM@H%{n~OiXb+uG^+!&;6opGkiIn-SspBv{iy;EX3DTpyLS;5Yok*D)@p}SLgSeSIO zBVEom)U@PJWvk{|sVXyjpAF?l;^lv)WQiVXKJY%%(voqJcE%*VUxWBf*CFNP7n;eh z_T|QoEqniH4x8qe+Iks{$cpcY=6jWNos9;I#S*!Esp|T*x3bsYefdRsIUnB3>g5*F z*~M#(Jz)9!bNBbSwnMXyFcN1M7q9NasQ~K5eCcHxi8F%wxaR@WYln&k+PxQbo%V*6 zG7V>NZw&m#IAyLJg1@?P*Mg@d3I7xGEnLJV0_Ad+Pg92JVb34Gj$MEr8y?om)@YtaJX;pPaHtj*{z%0V0RMVZ0c!Oxe00_iS(8 zqo8*LtFkawE-{kJ5z5=@yWox#_s+8^9hLI?vec@kqp=Voh{xwXKk~8~UjOqMmehJu z^W^0&Or2Wm?u)tTK3&_7FXmFq0pn>S&sz34H_P%yBPv~Z#dU%F>jFzP3L1#$M$*%AyBG?eVk*crr99rapiiZh1X*fh;}+zG7^ zpQhp>>Z>PCO5x-*8>e*_qW_eZMwJ$F?~0ys>Wvv1xEWyOfXlC}fVCn7Q&0%%j`A!nJ()Mhxb8u0Shpl%$HoUXU@(Z9Ax}##X?nb9|JA0j?-fk1LuyOn_L5il* zNRInUnFnJOO?qhvGjifcsJ#ZJ)Fm)<>Hf5F2T5%aG(*SM_fT^F_JOeA*?_X(1HxUv zol~QE)hj0Ep1w(>o~?N{t)cmx=bUhuqF|YLBdAOw|z@f-KXEtG-HudxMA4& z>6ACM_*?cZvEeJnml0E!sNI@Q*6}ssF^ii%!Vpoaz-a?+qs%Aam0_Ep@amHa}%)R zEcTON*Q4iFPWIFW%al@M* zD*;^hZ@V`y(`Y?^C~A;RPpZ^oH6<6n9C#~C;=G|;q2|&hnw{>k$w#}>ZHs->2l-uJ z7M>UKDT#5ob}pX(kZD{K2v}N(rQVxfl*c%UWtyU%@lg6$7etp81YLV$20s7uf)n|r z${Z9CveSNP@i(DC)ZC{;V&{G{YO$R+Ega+Pui*EgE=C4H^p9 z{aH66_BH2&C*`t&M*aHBSsH??Hm`|k$5wBq!YNO#ZHS)E55Vm8#D|!D7~gG8M+DX} zKg(6Pig8w^iXSmzz#uwdz71bR25yUKcTyI7HxOGcIg>2L@I((WdBf@ys~(V;uMPG zNrZWGRrTM_6t+r~zy*?IWMl+xJzs&|t= zch#xuj6%vGMUYw^PGdhKdeZWo#L-El*AB~gA*;0D0Nk<&G8f?SVW&-IcRyn?=6-#xAKs3X)(i)9yE4V`vRz?0`avoGD|k-_V>Z(Q*KoFxcl<(+u1$Yj`{2ICi?VFIpnJPFrV*Q zf>!PH5m^mQi%|hMCGPCbpq1fnDZ?Qm5NWj1(a~D&9Wrro@#)r}JDEZ@JLDA&a?f;x z$!{I@=Uuy&fIQAoSZPCfaIqzV7zTMcffd zOca-s4ud5KAd#Ypo{5qBGaf(4lC7j^rBMpAY>Pi$nLk5Wl1Tq&6`DrKMOsJR?!u{xKS_id4V(WaoHnUF@#we1w@8hUDaEWhVsx zaroDR5ML`Rb{Y=Z`1;K+c7#_Aw-u?uKx`7|5YMp`7y6LhicvsndN#%s7t9egA{Be* zMd@C~a}L1pOt%GGQtqlLlj@8qBCR=odw3COd7Tg1#O04J{mf+-))IPoM{7>nBGuSO zV2+v?uGCYtZlfms0@K=c}6*<*peoeOcb`R3cS*n z5{G5|@taOt>ckb$ty;_qL?E+QA}}EQh6V=z$U`VHdKxHCoH`RFR(+i$6Zv|83GU(c z0s+BH=Tpix(I|!PeUA_mekS7`msVk*$iDMuP*d9Uv8 zGr6SQpd==Bs?0f$k+#X=$bTE52#a^U`)GF7;^CU*qWsq3OdR5MD0ZpOf@rtB!~TZp zzKljU>B7wYCBDqp)=4)678{uChx~p6vE^U;r#5K&bJr^i_Fbz%<(5Yd(tYF%aNy%Y zABTiI=$mLNIBC1rP$mME2ot9emH6-l=9`r{BUj^D-bf$Z?2`ZKlM+Sjd-bw$Cs)@` zEqBZ48y$x^`mHoG6N?JHN1&k2_R|w@x6jAXYpkrSX@d?2norWyNmVf5NND2pJ2zH< zqrlw})tf;Lz!}6{rl8qc-WWq7yHv)A2aj1mv=jn61a98+fWuNnX#A=6$?pDKK3TTU ziR^;xfiSp&4dZ$jVxA5Q`O?OBi+>;k+yb6`Mj|;q>x7?N&EB8>_Br2numx&{qvIr{ zSAF!x1@EZtXi#H|EwYCaYj#={&eRG@DCVJGW@cD6vP1|z^5mZV2`)dPK}w(g9M>~2 z0C&S`90r|_>b=w`>gpR1gR|lkWYIfD`r6|l(A5vRKom3m#)p@%J$DJ@&PUv7V^R%h zi_g>s8tJg@KWJLe7OssIP=8O#TfuQ_+>mBr9;;r!eSVISBwx(jby?reUnDs&Qb4MJRwkblw(O;)tOz0|* zQxs6JN`Gl=Pc4Z}ijJL~{nyNJcyej!O;Tc_{#vlAeog3! zzwBNc9?=tjB}>|`@+7=xBXsHOhAvqT%g|U&%eRJN-_d}C3mRmQO=UQ_sP?(EVcW&`^HJKk#p|Ib^8sKNCMrvo%whm0jQ^cmo}$b zIh_YECu=t6kj$#zpo?U|3F{FtW9rb&H>fqWDzgSs&R3_jj}*c#AK=0nsLy1XgtiCE z1v!&K+yf~tg14%iF3fg;?cC1bbd4YT#^tR@?n`+mkO0~_C`d|2VOvblyqUBhl)&;Obf1#{k_mU7&dTE1 z>0*uZA^+zrQ4#^uYPc-NJq-=fjg3uMu`|V|flpV!q4(-)m>qz*VZRuQzKA6us*)lcf5H30qM?Y1!b`n=AEt z9SL~ueAG4r25>GmriK?szLAY%P|2WHh`WAXNOnS zOX&z3n#8Fe9d`TK!{g9TM20rSyfyP`i=k<*eJNq5@RZFg&7v6JyRo9zKdj7MmR}#r z)pRrnTrNu~x;{yyYC@|QMN3N5j8z%0^~_wOCXeDvc!t=G;9@qGBVW_X#Soz~`YX{C zQSk-xM=GwfZ#YIhC)})I+E+gsT@seZd&>qMOpSh-P1aya>hiETX2=iwt)Qo`pEkEO z3}kklT%CIYfFPvZ8*d7lp4{R*`*qK({h^eRu_}zYXLn;M@cVk1zKu;vT*ny&5UChc z&}(XH!aR0Zo{4!?{1cbMTedcxacTV((PplgMFT(u}sU~Ej^U(-Me&#?>G~v z&iAWtfhO{3OxK~Wh7k@5mR>LjjLgrw>UKk7J%^;$zV3O1UwU#=+nXuJATfLE1La8JM1({kcXHx>2Acc^#f;SuE-{LXbT(Sv9mmG4#Z*Vkz(=WbL;XbvT zECEq{6WrbR>u4<;tZLGncQ3tozHW4;(eFze!EPvuLfdrVo&KGv{B>cw^y`VlJo;*i zl-z}+sm6Ckd(Tc@1lG9O9|7$@-WR<9py^f=^~UA~pa`RG8J2<~=8jin!#g?XzJ+T0TJ-@Oou*mh z`Fp(#IZ~)};b`xKd<_Gv=AS={;TV=X4vlO3`94$CR?~*2m%3rF;z}yyha{qiR@%e- z=Q{E<;ru*J*n}lBSh%DSr*~$;BS5HYenF~!`$};SMSadmIyqU(HZ{>{yXiltrNbYTXgIrjAUP@`6 zxp3DIzmj|Y5;c&iG%o(&;b?Er5!%^V8IWoowyukm#nhrynIY$Un;{)%) zVOB8%4oE;xsXLAZA=YGvV30rVIFHeMr&#V=BM^2BnvLL)lP41|?e=zPFBf&b6?Bl_ z_wCHh{HE5pawA{7EzeiXIo3T-z;?z1;;60f$Yh9@*SGQj?LIHde;;~zKILSmgR|q; zHxbbE+h-u#Qw-#X6JAkU8>063TE@{eX~gVd?vN8H#tDH0xYI*i#)3@^R<#NlU4P^g z)V*DDJj|WuI&7JGoQ}RDidsHaFS)uSIa=L{bzqi$kLBBG-RW}A)4`444vuwNo4=~^ zX%_aau&;lg$2)P5C`F*m;-7ujZ2z*vxgz)OZA?MHmGAi+)V|50rw4K2m?AzPcis|! zC$cp?c}{b8X`9SLg_d+4c-xj6dicFyX~(H;Yv!WZ1SJdTzUIx^_(1a>+$ZH9&nHLP zCbt6=a#Q>)O}OY1iJQo><1Vu#unKp`NoX5xG2XWFt48L$Z`8l)xp1T5XXE~px5gif z81NSfc7k--y{6X>UziH1JPJzXG&_fR6PMZd;Hyv#243kzW-Ulc5~=41=)34(!%RA z$u1&IECCNr6dTC0Q+{IbcV#NQM8f`T;<++(l+*3aG9A^Q@^EEqe$Bm8y2KiwfOE4x z@uFlR8Zjn`zL$wuEv{^>QOIAK9N(P9;ga3v-f^X=7Zw+j0_J6h2ZZ@i5odftY;VrE z8W95|Y#V~ZWkd3T(w!uImnG-tuF~xSApZ@jN&Q%?FY}PCx@DCygtFzugVdQ2&N7vo zA;sppr3vbin2v|FV7$;^!J+TGSp-i)IkQ1-z~j&>);O3rm_$`oRq|x(t=C>J6LPFr zBOcDZAYJui;-b)FwYW-xVNglW#f5}>gn9HC@X&W*+%|W=Ukdj0;oU3aD}({Q;Gr==`rrBr_~aDzPQd>p6l5< z%YYCm4?~=p$U+K=UkbMqm8Hm&toyDpe(wA2FueJcZi^wu!6sCsk{nR(gZ^_e)|vLj z1{$ruK7=b8zrEL>gi9-^nbVN4&9b_-jh-4_sc;!?X*gP-)FZSPGy_%l3LloXPdNsX zCHe{;@@1sVR2WfwhfNn@!IGA0hIO*!?XBFqzrhbqO`GI09}cY${P`=I6Zg}J8n<&! z=JzUailSO1OP-{x?_PIXKKc%2ruhDNU~BLo2HGj61z%QrDl-}E_{xx+`H%>x-M(4; zk$0oAwoA3i0KHc0+$L2D*p9d&vj#K+?sRwX!Smv_CM!wiF7~PUwVnGDbNr#EJ5^=A z#D+KXQwG|7#SPZ&tr-^CZLU!u_Yz~BoZYtWy8M_ffZ1MSBu@bnh8w!O#IT>e073O| z5@)HiR21@>MmSYvpfb`sug4RTgSz?rL;bV)q*3veeTVDEkbDkDIjb;MHRBW|=_gS< z-{MMmH2TZY_LG97*nsfNopPcDcok1lWcPg^rf%bJ6&|AM)Ts?B?`s8twiK+d9$T}+ z&)|r5zFCBS;kwTW8))|Gt1K~s>jSUR`UTCzzJnmixB1+Fs%u~3iiy4glw)))u@Uc* z8FNJmu0|$cOtnj!JSS<$meb@9@A&loF}^%nKpekAA~wd!mG|CUdx~I=zxT+q`&$DS zV}go=Z&D>kw9M;p+9If*ON(rNuDNcuD^7}eX{MUdAqh?*T}58{0)YmJLMlmXF~4k%(v)n75e_*l-xqBK^)~ zT^btpK2|)QF|Oq_xBNAbW;dJ4mdr#WPSf*#IO(VuB)89QKch`dcIFXI!{n?%A8f%K#yerZVDNRgZ7O49bxEaB z2>Ir-ttd2`CBFGc5*J?3Pqd6XXdD>s@@Deh8LT(UWY21LF0@k@{g5wHWho;P4V-mO zojO-SP-tAam@j6j%(S^1o9xf1ryJd?`xCTR9X+P1AHzcktB~{%3FYDF>A!PnEpOl? zqg+H({e;q^PXncWt5WS*OWQBvnDz%G&CsCx(?xgO9ZrNpmewFu7`RKCRYCm`E`?PLoQ4=q|=y-PvO=K3QY9%j+d1*P~aTr zMk4RZxOfRTWUv!ht|uxMUVgO`z-b2Hy!l#FAb~l+T3k~A2`JGbJ~qGS=z}?Xo!RyM4^)P3`Myt{?aPlsz??&NeSNdZdjLTIyaD{O|@QnPp^U zQ^~goI21PjefMo4SDUv?kPr(lf%K3LLoTyiE}c>Y-c@=zsf$^xD2YpxELv$+mz^I@ zaFvbt{pv9xm0QDeYH2HlYein9%l$GWhCWI9B~?j=7lx049%u{lFNdKkJ-UZwOBpXfa#TEc1JQZKSRNl1ZlSjYtZXF^b?Qd_PqjEi%x~}Mga-X5Jn~PQ<(EB}m`6dylNN`2~U(neR;eG=_R0F2QAe3p;nPJ7u z#`O#osw4|L!?Y#Jy-+z-_DXt3bH*G6ykd7gW1aX7gwsv9c?{p|b~;W^&U*4Z|NT`) zTjraBeizLv$zZWC2cXL-r$&~X`coZ zbxk2MY=aLf#qy7`ZS&<%x+M&U lpECbXAMpP--OuCap!0}AWoMxWUKhVC(NNWaS1a2_{VxV6uWtYV literal 0 HcmV?d00001 diff --git a/inference-app/Assets/preprocess.py b/inference-app/Assets/preprocess.py new file mode 100644 index 00000000..9ef525ff --- /dev/null +++ b/inference-app/Assets/preprocess.py @@ -0,0 +1,19 @@ +from PIL import Image +import numpy as np +import sys +import os +from torchvision import transforms + +preprocess = transforms.Compose( + [ + transforms.Resize(320), + transforms.CenterCrop(320), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] +) + + +def get_arr_from_image(img): + arr = preprocess(img).unsqueeze(0).cpu().detach().numpy() + return arr diff --git a/inference-app/Inference-App.md b/inference-app/Inference-App.md new file mode 100644 index 00000000..0d6e6c39 --- /dev/null +++ b/inference-app/Inference-App.md @@ -0,0 +1,23 @@ +## Inference App - LLAMA + +Given an model onnx file, OnnxBridge can be used to generate an executable which can be run on two VMs, Server and Client (owning the model weights and input image respectively) and a Dealer (which pre-generates the randomness for the inference.), to get the secure inference output. Along with this we can use the Inference-App to give a GUI for inferencing. To generate the scripts involved in the Inference-App, use the `ezpc-cli-app.sh` script by running the following command locally (not neccesarily on a VM): + +```bash +./ezpc-cli-app.sh -m /absolute/path/to/model.onnx -s server-ip -d dealer-ip [-nt num_threads] +``` + +In the above command, the paths are not local, but are the locations on the respective VMs. That is, `/absolute/path/to/model.onnx` is the path of model.onnx file on the server VM.
+ We also have to write the preprocessing script for our use case, refer to the preprocessing file of the [chexpert demo](../frontend/Assets/preprocess.py). If your preprocessing script uses some additional python packages, make sure they are installed on the frontend VM. Also, ensure that the client can communicate with the server through the IP address provided on the ports between the range 42002-42100. Optionally, you can also pass the following arguments: + +- `-scale `: the scaling factor for the model input (default: `15`) +- `-bl `: the bitlength to use for the MPC computation (default: `40`) +- `-nt `: the number of threads to use for MPC computation (default: `4`) + +The script generates 4 scripts: + +- `server.sh` - Transfer this script to the server VM in any empty directory. Running this script (without any argument) reads the ONNX file, strips model weights out of it, dumps sytorch code, zips the code required to be sent to the client and dealer and waits for the client to download the zip. Once the zip is transfered, the script waits for dealer to generate the randomness and then starts the inference once the client connects. Once inference is complete, it downloads fresh randomness generated by dealer and again waits for client to start inference, this happens in a loop for multiple inference. +- `client-offline.sh` - Transfer this script to the client VM in any empty directory. Running this script fetches the stripped code from server and compiles the model. This script must be run on client VM parallely while server VM is running it's server script. It downloads the keys from dealer, then starts a flask server to listen for inference requests from frontend on port 5000, where it receives a image as numpy array, initiates secure inference with server and returns the result to frontend and starts receiveing keys from dealer again. +- `client-online.sh` - It takes as input absolute path of numpy array of image for inference. Transfer this script to the client VM in the same directory. Running this script downloads randomness from dealer, preprocesses the input, connects with the server and starts the inference. After the secure inference is complete, inference output is printed and saved in `output.txt` file. This script needs to be run every time for a new inference with a new input. +- `dealer.sh` - Transfer this script to the dealer VM in any empty directory. Running this script waits for server to send the zip file, after which it generates and allows the client and server script to automatically download the co-related randomness for server and client. Parallely, frontend also downloads masks from dealer. Once transferred, it generates a fresh pair of co-related randomness keys and again allows server and client to download it in a loop for multiple inference. When Dealer is generating keys, either of Client/Server/Frontend are not allowed to download keys or mask. + +- Use 'clean' as `script.sh clean` with any of above script to clean the setup. This removes all files created by script from the current directory except the script itself. [Note: **This might remove all files from the current directory, keep backup of any important file.**] \ No newline at end of file diff --git a/inference-app/README.md b/inference-app/README.md new file mode 100644 index 00000000..a658326b --- /dev/null +++ b/inference-app/README.md @@ -0,0 +1,154 @@ +# Inference App +This Gradio App gives a frontend to [EzPC](https://github.com/mpc-msri/EzPC) and enables you to make secure inference for images with a pretrained Model and get results in a UI based setup.
+Following are the system requirements and steps to run the Inference-App for doing secure inferencing on X-ray images with a Chexpert Model. + + + +# System Requirements +To successfully execute this demo we will need three **Ubuntu** VMs [tested on Ubuntu 20.04.6 LTS]: +1. **Dealer** : Works to generate pre-computed randomness and sends it to Client and Server for each inference. +2. **Server** : This party owns the model, and _does not share its model weights with Dealer/Client_, hence uses EzPC SMPC to achieve Secure Inference. +3. **Client** : This party acts as Client, but _does not hold any data by itself_, it gets Masked Image from the frontend, thus this party itself _can't see the image data in cleartext_. On receiving the Masked Image it starts the secure inference with Server and returns the result back to frontend. + + +Additionally we need a machine to run the frontend on, this is independent of OS, can be run on Client machine aswell if UI is available for Client VM, as the frontend runs in a browser. + +Notes: +- Frontend should be able to communicate with Dealer and Client over port 5000. +- Server should be able to communicate with Dealer and Client over port 8000. +- Dealer should be able to communicate with Server and Client over port 9000. +- Server and Client should be able to communicate over ports 42003-42005. + + +# Setup + +1. On all Ubuntu VM, install dependencies: +```bash +sudo apt update +sudo apt install libeigen3-dev cmake build-essential git zip +``` + +2. On all Ubuntu VM, install the python dependencies in a virtual environment. +``` bash +# Demo directory where we will install our dependencies and follow all the further steps. +mkdir CHEXPERT-DEMO +cd CHEXPERT-DEMO + +sudo apt install python3.8-venv +python3 -m venv venv +source venv/bin/activate + +wget https://raw.githubusercontent.com/mpc-msri/EzPC/master/OnnxBridge/requirements.txt +pip install --upgrade pip +sudo apt-get install python3-dev build-essential +pip install -r requirements.txt +pip install tqdm pyftpdlib flask +``` + +3. **SERVER** : Download ONNX file for CheXpert model and make a temporary directory. +```bash +# while inside CHEXPERT-DEMO +wget "https://github.com/bhatuzdaname/models/raw/main/chexpert.onnx" -O chexpert.onnx +mkdir play +cd play +``` + +4. **CLIENT** : Make a temporary Directory. +```bash +# while inside CHEXPERT-DEMO +mkdir play +cd play +``` + +5. **DEALER** : Make a temporary Directory. +```bash +# while inside CHEXPERT-DEMO +mkdir play +cd play +``` + +6. **FRONTEND** : On the system being used as the frontend, follow below instructions to setup Webapp +```bash +# clone repo +git clone https://github.com/mpc-msri/EzPC +cd EzPC + +# create virtual environment and install dependencies +sudo apt update +sudo apt install python3.8-venv +python3 -m venv mlinf +source mlinf/bin/activate +pip install --upgrade pip +sudo apt-get install python3-dev build-essential +pip install -r inference-app/requirements.txt +``` + +7. **FRONTEND** : Generate the scripts and transfer them to respective machines. If server, client and dealer are in same virtual network, then pass the private network IP in the ezpc_cli-app.sh command. +```bash +cd inference-app +chmod +x ezpc-cli-app.sh +./ezpc-cli-app.sh -m /home//CHEXPERT-DEMO/chexpert.onnx -s -d [ -nt ] +scp server.sh :/home//CHEXPERT-DEMO/play/ +scp dealer.sh :/home//CHEXPERT-DEMO/play/ +scp client-offline.sh :/home//CHEXPERT-DEMO/play/ +scp client-online.sh :/home//CHEXPERT-DEMO/play/ +``` +In the above commands in step 7, the file paths and directories are absolute paths on the Ubuntu VMs used. To know more about the `ezpc-cli-app.sh` script see [link](/inference-app/Inference-App.md).

+On all Ubuntu VMs, make the bash scripts executable and execute them. + +```bash +# (on server) +chmod +x server.sh +./server.sh + +# (on dealer) +chmod +x dealer.sh +./dealer.sh + +# (on client) +chmod +x client-offline.sh client-online.sh +./client-offline.sh +``` + +8. **FRONTEND** : setup & run the webapp: +#### Create a .`env` file inside `EzPC/inference-app` directory to store the secrets as environment variables ( `_URL` is the IP address of Dealer ), the file should look as below: + _URL = "X.X.X.X" + _USER = "frontend" + _PASSWORD = "frontend" + _FILE_NAME = "masks.dat" + _CLIENT_IP = "X.X.X.X" + +Download the preprocessing file for image (specific to model) inside `/inference-app` directory: +```bash +# This file takes in image as +# preprocess it and returns it as a numpy array of size required by Model. +wget "https://raw.githubusercontent.com/mpc-msri/EzPC/master/inference-app/Assets/preprocess.py" -O preprocess.py +``` + +```bash +# Next we download example image for the app. +cd Assets +mkdir examples && cd examples +wget "https://raw.githubusercontent.com/drunkenlegend/ezpc-warehouse/main/Chexpert/cardiomegaly.jpg" -O 1.jpg +cd ../.. +``` + +***Note:*** + + Further in case of using some other model for demo and customising WebApp to fit your model, + modify the USER_INPUTS in constants.py file in /inference-app directory. + +```bash +# while inside inference-app directory +python app.py +``` + +Open the url received after running the last command on inference-app and play along: +1. Upload X-ray image. +2. Get Encryption Keys +3. Encrypt Image +4. Start Inference + + + + diff --git a/inference-app/app.py b/inference-app/app.py new file mode 100644 index 00000000..1d44b14d --- /dev/null +++ b/inference-app/app.py @@ -0,0 +1,311 @@ +import gradio as gr +import time, os +from PIL import Image +import numpy as np +import ftplib +import requests +from tqdm import tqdm +from dotenv import load_dotenv + +load_dotenv() + +from constants import ( + desc, + Input_Shape, + EXAMPLES, + preprocess, + dims, + scale, + mode, + labels_map, +) + +url = os.getenv("_URL") +user = os.getenv("_USER") +passwd = os.getenv("_PASSWORD") +file_name = os.getenv("_FILE_NAME") +client_ip = os.getenv("_CLIENT_IP") + + +print("Starting the demo...") +with gr.Blocks(theme=gr.themes.Monochrome()) as demo: + gr.Markdown( + f""" + + + + + +

+
+ EzPC +
+

+

Securely Inferencing a Machine Learning model using EzPC

+ + +

+ EzPC + — + Project +

+

+ +

+ +

+ +

+ {desc} +

+ +

+ Try out the below app, and see + tutorial + for more info! +

+ """ + ) + + gr.Markdown("## Client side") + + # Step 1 Input Image + gr.Markdown("### Step 1: Upload an image. ") + gr.Markdown( + f"The image will automatically be resized to shape {Input_Shape} as the input size for lenet model. " + ) + + with gr.Row(): + input_image = gr.Image( + value=None, + label="Upload an image here.", + shape=(dims["h"], dims["w"]), + source="upload", + interactive=True, + image_mode=mode, + type="pil", + ) + examples = gr.Examples( + examples=EXAMPLES, + inputs=[input_image], + examples_per_page=5, + label="Examples to use.", + ) + + # Step 2 Get Mask from Dealer + gr.Markdown( + "### Step 2: Click on the button below to get encryption keys from dealer." + ) + dealer_status = gr.Textbox( + label="Status", placeholder="Encryption Keys status will be shown here." + ) + get_mask_button = gr.Button(value="Get Encryption Keys", interactive=True) + + # Step 3 Mask Input Image + gr.Markdown("### Step 3: Click on the button below to encrypt the image.") + with gr.Row(): + in_image = gr.Image( + value=None, + label="Input Image", + shape=(dims["h"], dims["w"]), + interactive=False, + image_mode=mode, + type="pil", + ).style(width=256, height=256) + out_image = gr.Image( + value=None, + label="Encrypted Image", + shape=(dims["h"], dims["w"]), + interactive=False, + ).style(width=256, height=256) + mask_button = gr.Button(value="Encrypt Image", interactive=True) + + # Step 4 Start Secure Inference + gr.Markdown( + "### Step 4: Click on the button below to start secure inference with Encrypted Image." + ) + with gr.Column(): + inference_status = gr.Textbox( + show_label=False, + placeholder="Inference status will be shown here.", + interactive=False, + ) + inference_button = gr.Button(value="Start Secure Inference", interactive=True) + prediction = gr.Label("Prediction: ", interactive=False, visible=False) + + def show_progress(progress=gr.Progress()): + for i in range(10): + time.sleep(0.1) + progress(i / 10, desc="Encrypting Image") + return True + + def update_input_image(input_image): + return input_image + + def check_dealer_status(progress=gr.Progress()): + try: + progress(0.001, desc="Connecting with Dealer\n Please wait...") + progress(0.035, desc="Dealer is still generating keys\n Please wait...") + ftp = ftplib.FTP() + print(f"Connecting to {url}") + ftp.connect(url, 9000) + progress(0.05, desc="Authenticating with Dealer") + ftp.login(user=user, passwd=passwd) + progress(0.1, desc="Authenticated Successfully") + + # Switch to binary mode + ftp.sendcmd("TYPE i") + + # Get the size of the file on the server + file_size = ftp.size(file_name) + print(f"File size: {file_size}") + + xbar = 0.1 + # Download the file and display a progress bar + with open(file_name, "wb") as f: + with tqdm( + unit="B", unit_scale=True, unit_divisor=1024, total=file_size + ) as pbar: + + def callback(data): + f.write(data) + pbar.update(len(data)) + progress( + xbar + (1 - xbar) * pbar.n / file_size, + desc="Downloading Encryption Keys", + ) + + ftp.retrbinary(f"RETR {file_name}", callback) + + ftp.quit() + return { + dealer_status: gr.update( + value="Encryption Keys received from dealer.", visible=True + ) + } + + except Exception as e: + print(f"Error: {e}") + # print(f"Error: Dealer not ready.") + return { + dealer_status: gr.update( + value="Dealer not ready, please try again after some time.", + visible=True, + ) + } + + def mask_image(input_image, progress=gr.Progress()): + arr = preprocess(input_image) + + # Open the file for reading + with open("masks.dat", "r") as f: + # Read the contents of the file as a list of integers + data = [int(line.strip()) for line in f.readlines()] + + np_mask = np.array(data).reshape((1, dims["h"], dims["w"], dims["c"])) + np_mask = np.transpose(np_mask, (0, 3, 1, 2)) + + print("Masking Image") + arr_save = arr.copy() + arr_save = arr_save * (1 << scale) + arr_save = arr_save.astype(np.int64) + arr_save = arr_save + np_mask + np.save("masked_image.npy", arr_save) + + # for debugging + # with open("masked_inp.inp", "w") as f: + # for x in np.nditer(arr_save, order='C'): + # f.write(str(x) + "\n") + + if mode == "RGB": + arr_save = arr_save.reshape(Input_Shape[1:]) + print(arr_save.shape) + arr_converted = np.transpose(arr_save, (1, 2, 0)) + updated_image = Image.fromarray(arr_converted, mode=mode) + show_progress(progress) + print(updated_image.size) + elif mode == "L": + arr_save = arr_save.reshape(Input_Shape[2:]) + print(arr_save.shape) + updated_image = Image.fromarray(arr_save, mode=mode) + show_progress(progress) + print(updated_image.size) + else: + print("Invalid Mode") + return None + return updated_image + + def start_inference(in_image, progress=gr.Progress()): + print("Starting Inference") + url = f"http://{client_ip}:5000/inference" + file_path = "masked_image.npy" + with open(file_path, "rb") as file: + try: + response = requests.get(url, files={"file": file}) + + if response.status_code == 200: + with open("output.txt", "wb") as file: + print(response.content) + file.write(response.content) + else: + print("Error:", response.status_code) + return { + inference_status: gr.update( + value=f"Error {response.status_code} \nClient in Setup Phase...", + visible=True, + ) + } + + except requests.Timeout: + print("Connection timeout.") + return { + prediction: gr.update( + value=f"Connection Timedout. \nClient in Setup Phase...", + visible=True, + ) + } + except requests.HTTPError as e: + print("HTTP Error:", e) + return { + prediction: gr.update( + value=f"Error {e} \nClient in Setup Phase...", visible=True + ) + } + except requests.RequestException as e: + print("Error:", e) + return { + prediction: gr.update( + value=f"Connection Refused. \nClient in Setup Phase...", + visible=True, + ) + } + + # Read the contents of the file as a list of integers and return the index of max value + with open("output.txt", "r") as f: + # Read the contents of the file as a list of integers + data_as_str = [line.strip() for line in f.readlines()] + data = data_as_str[0].split(" ") + data = [float(i) for i in data] + # find the index of max value + print(data) + print(type(data)) + print(type(data[0])) + index = data.index(max(data)) + print(f"Prediction: {labels_map[index]}") + return { + prediction: gr.update( + value=f"Prediction: {labels_map[index]}", visible=True + ) + } + + get_mask_button.click(fn=check_dealer_status, inputs=[], outputs=[dealer_status]) + + mask_button.click(fn=mask_image, inputs=[input_image], outputs=[out_image]) + + inference_button.click( + fn=start_inference, inputs=[out_image], outputs=[inference_status, prediction] + ) + + input_image.change(fn=update_input_image, inputs=[input_image], outputs=[in_image]) + +demo.queue(concurrency_count=20).launch(share=False) diff --git a/inference-app/constants.py b/inference-app/constants.py new file mode 100644 index 00000000..8c486cd2 --- /dev/null +++ b/inference-app/constants.py @@ -0,0 +1,53 @@ +from pathlib import Path +from preprocess import get_arr_from_image + +# This repository's directory +REPO_DIR = Path(__file__).parent + +INPUT_EXAMPLES_DIR = REPO_DIR / "Assets" / "examples" +EXAMPLES = [str(image) for image in INPUT_EXAMPLES_DIR.glob("**/*")] + + +# ********** USER_INPUTS STARTS ********** + +# Description +desc = "In this example app, we demonstrate how infer any Chest Xray with a model trained on Chexpert Dataset in a secure manner using EzPC." + +# preprocess is a function that takes in an image and returns a numpy array +preprocess = get_arr_from_image + +# The input shape of the model, batch size should be 1 +Input_Shape = (1, 3, 320, 320) +assert Input_Shape[0] == 1, "Batch size should be 1" +dims = { + "c": 3, + "h": 320, + "w": 320, +} + +scale = 15 + +# Labels +labels_map = { + 0: "No Finding", + 1: "Enlarged Cardiomediastinum", + 2: "Cardiomegaly", + 3: "Lung Lesion", + 4: "Lung Opacity", + 5: "Edema", + 6: "Consolidation", + 7: "Pneumonia", + 8: "Atelectasis", + 9: "Pneumothorax", + 10: "Pleural Effusion", + 11: "Pleural Other", + 12: "Fracture", + 13: "Support Devices", +} + +# ********** USER_INPUTS ENDS ********** + +if dims["c"] == 3: + mode = "RGB" +elif dims["c"] == 1: + mode = "L" diff --git a/inference-app/ezpc-cli-app.sh b/inference-app/ezpc-cli-app.sh new file mode 100644 index 00000000..e812c1ed --- /dev/null +++ b/inference-app/ezpc-cli-app.sh @@ -0,0 +1,447 @@ +#!/bin/bash + +# Default values +BACKEND="LLAMA" +SCALE="15" +BITLENGTH="40" +NUMTHREADS="4" + +# Parse command-line arguments +while [[ $# -gt 0 ]] +do + key="$1" + case $key in + -m|--model) + MODEL_PATH="$2" + shift # past argument + shift # past value + ;; + -c|--client) + CLIENT_IP="$2" + shift # past argument + shift # past value + ;; + -s|--server) + SERVER_IP="$2" + shift # past argument + shift # past value + ;; + -d|--dealer) + DEALER_IP="$2" + shift # past argument + shift # past value + ;; + -b|--backend) + BACKEND="$2" + shift # past argument + shift # past value + ;; + -scale|--scale) + SCALE="$2" + shift # past argument + shift # past value + ;; + -bl|--bitlength) + BITLENGTH="$2" + shift # past argument + shift # past value + ;; + -nt|--numthreads) + NUMTHREADS="$2" + shift # past argument + shift # past value + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +# Check that required arguments are set +if [ -z "$MODEL_PATH" ] || [ -z "$SERVER_IP" ] || [ -z "$DEALER_IP" ] ; +then + echo "To run the secure MPC model, please ensure the following:" + echo "Server Files: Client Files: Dealer Files:" + echo "------------------- -------------- --------------" + echo "path to model.onnx path to image.jpg dealer IP" + echo "server IP" + echo "------------------- --------------" | column -t -s $'\t' + echo "Usage: $0 -m -s -d " + echo "Optional: [-b ] [-scale ] [-bl ] [-nt ]" + exit 1 +fi + +# Print out arguments +echo ------------------------------ +echo "SERVER Details:" +echo "Model path: $MODEL_PATH" +echo "Server IP: $SERVER_IP" +echo ------------------------------ +echo "CLIENT Details:" +echo "Image path: $IMAGE_PATH" +# echo "Client IP: $CLIENT_IP" +echo ------------------------------ +echo "DEALER Details:" +echo "Dealer IP: $DEALER_IP" +echo ------------------------------ + +# Getting Model Name and Directory and Model Name without extension +File_NAME=$(basename $MODEL_PATH) +MODEL_DIR=$(dirname $MODEL_PATH) +Model_Name=${File_NAME%.*} + +# Generating Server Script +SERVER_SCRIPT="server.sh" +echo "Generating Server Script for $Model_Name:" +# Script accepts 1 argument: path to sytorch +cat < $SERVER_SCRIPT +#!/bin/bash + +# Color variables +bg_red='\033[0;41m' +bg_green='\033[0;42m' +bg_yellow='\033[0;43m' +bg_blue='\033[0;44m' +bg_magenta='\033[0;45m' +bg_cyan='\033[0;46m' +clear='\033[0m' + +if [ "\$1" = "clean" ]; then + shopt -s extglob + echo -e "\${bg_yellow}Cleaning up\${clear}" + find . -type f -not -name 'server.sh' -delete + find . -type d -not -name 'server.sh' -delete + echo -e "\${bg_green}Cleaned up\${clear}" + shopt -u extglob + exit 0 +fi + +# Current directory +current_dir=\$(pwd) +echo -e "Play Area: \${bg_green}\$current_dir\${clear}" + + +# Clone sytorch +echo -e "\${bg_green}Cloning sytorch repository\${clear}" +git clone https://github.com/mpc-msri/EzPC.git +wait + +sytorch="\$current_dir/EzPC/sytorch" +onnxbridge="\$current_dir/EzPC/OnnxBridge" + +echo "MODEL_DIR: $MODEL_DIR" + +# Copy Files to current directory +echo -e "\${bg_green}Copying files to current directory\${clear}" +cp $MODEL_PATH . + +# Compile the model +echo -e "\${bg_green}Compiling the model\${clear}" +python \$onnxbridge/main.py --path $File_NAME --backend $BACKEND --scale $SCALE --bitlength $BITLENGTH --generate code +wait +\$onnxbridge/LLAMA/compile_llama.sh "${Model_Name}_${BACKEND}_${SCALE}.cpp" -Do_Masking + +# Create a zip file of stripped model to share with client +zipfile="client_$Model_Name.zip" +# Create the zip file +echo -e "\${bg_green}Creating zip file\${clear}" +zip "\$zipfile" "optimised_$File_NAME" "${Model_Name}_${BACKEND}_${SCALE}.cpp" +echo -e "\${bg_green}Zip file created\${clear}" +wait + +# Start a Python server to serve the stripped model +echo -e "\${bg_green}Starting a Python server to serve the stripped model to Client and Dealer.\${clear}" +python \$sytorch/scripts/server.py 2 + +while true; do + # Download Keys from Dealer + echo -e "\${bg_green}Downloading keys from Dealer\${clear}" + # Set the Dealer IP address and port number is 9000 by default + Dealer_url="$DEALER_IP" + + # Get the keys from the Dealer + python \$sytorch/scripts/download_keys.py \$Dealer_url server server server.dat + wait + echo -e "\${bg_green}Downloaded Dealer Keys File\${clear}" + + # Model inference + echo -e "\${bg_green}Running model inference\${clear}" + ./${Model_Name}_${BACKEND}_${SCALE} 2 ${Model_Name}_input_weights.dat ${NUMTHREADS} + wait + echo -e "\${bg_green}Model inference completed.\${clear}" +done + +EOF + +# Finish generating Server Script +echo "Finished generating Server Script" + +# Generating Dealer Script +DEALER_SCRIPT="dealer.sh" +echo "Generating Dealer Script" +cat < $DEALER_SCRIPT +#!/bin/bash + +# Color variables +bg_red='\033[0;41m' +bg_green='\033[0;42m' +bg_yellow='\033[0;43m' +bg_blue='\033[0;44m' +bg_magenta='\033[0;45m' +bg_cyan='\033[0;46m' +clear='\033[0m' + +if [ "\$1" = "clean" ]; then + shopt -s extglob + echo -e "\${bg_yellow}Cleaning up\${clear}" + find . -type f -not -name 'dealer.sh' -delete + find . -type d -not -name 'dealer.sh' -delete + echo -e "\${bg_green}Cleaned up\${clear}" + shopt -u extglob + exit 0 +fi + +# Current directory +current_dir=\$(pwd) +echo -e "Play Area: \${bg_green}\$current_dir\${clear}" + +# Downloading Server Files +echo -e "\${bg_green}Downloading Server Files\${clear}" +# Set the server IP address and port number +SERVER_PORT="8000" + +# Loop until a 200 response is received +while true; do + echo -e "\${bg_yellow}Sending GET request to server.\${clear}" + # Send a GET request to the server and save the response status code + STATUS=\$(curl -s -w '%{http_code}' "http://$SERVER_IP:\$SERVER_PORT/client_$Model_Name.zip" --output client_$Model_Name.zip) + + echo \$STATUS + # Check if the status code is 200 + if [ \$STATUS -eq 200 ]; then + echo "Zip file downloaded successfully" + break + fi + + echo -e "\${bg_yellow}Waiting for server to generate zip file: sleeping for 10 seconds.\${clear}" + # Wait for 10 seconds before trying again + sleep 10 +done + + +# Clone sytorch +echo -e "\${bg_green}Cloning sytorch repository\${clear}" +git clone https://github.com/mpc-msri/EzPC.git +wait + +sytorch="\$current_dir/EzPC/sytorch" +onnxbridge="\$current_dir/EzPC/OnnxBridge" + +# Looking ZIP file from SERVER +echo "Looking ZIP file from SERVER" +# look for a zip file in the current directory with name "client_$Model_Name.zip" +zipfile=\$(find . -maxdepth 1 -name "client_$Model_Name.zip" -print -quit) + +if [ -z "\$zipfile" ]; then + echo "Error: Zip file not found." + exit 1 +fi + +# Unzip the file +echo -e "\${bg_green}Unzipping the file\${clear}" +unzip \$zipfile +wait + +# Compile the model +echo -e "\${bg_green}Compiling the model\${clear}" +\$onnxbridge/LLAMA/compile_llama.sh "${Model_Name}_${BACKEND}_${SCALE}.cpp" -Do_Masking +wait + +# binary to generate keys +cp ${Model_Name}_${BACKEND}_${SCALE} generate_keys + +# Generate keys for 1st inference +./generate_keys 1 ${NUMTHREADS} +mkdir server +mv server.dat server/server.dat +mkdir client +mv client.dat client/client.dat +mkdir frontend +mv masks.dat frontend/masks.dat + +# Key generation and serving key files +echo -e "\${bg_green}Starting a Python server to serve keys file\${clear}" +python \$sytorch/scripts/dealer.py 0.0.0.0 + +EOF +# Finish generating Dealer Script +echo "Finished generating Dealer Script" + +# Generating Client Script +CLIENT_OFFLINE_SCRIPT="client-offline.sh" +CLIENT_ONLINE_SCRIPT="client-online.sh" +echo "Generating Client Script" +# Script accepts 1 argument: path to sytorch +cat < $CLIENT_OFFLINE_SCRIPT +#!/bin/bash + +# Color variables +bg_red='\033[0;41m' +bg_green='\033[0;42m' +bg_yellow='\033[0;43m' +bg_blue='\033[0;44m' +bg_magenta='\033[0;45m' +bg_cyan='\033[0;46m' +clear='\033[0m' + +if [ "\$1" = "clean" ]; then + shopt -s extglob + echo -e "\${bg_yellow}Cleaning up\${clear}" + find . -type f -not -name 'client-o*' -delete + find . -type d -not -name 'client-o*' -delete + echo -e "\${bg_green}Cleaned up\${clear}" + shopt -u extglob + exit 0 +fi + +# Current directory +current_dir=\$(pwd) +echo -e "Play Area: \${bg_green}\$current_dir\${clear}" + +# Downloading Server Files +echo -e "\${bg_green}Downloading Server Files\${clear}" +# Set the server IP address and port number +SERVER_PORT="8000" + +# Loop until a 200 response is received +while true; do + echo -e "\${bg_yellow}Sending GET request to server.\${clear}" + # Send a GET request to the server and save the response status code + STATUS=\$(curl -s -w '%{http_code}' "http://$SERVER_IP:\$SERVER_PORT/client_$Model_Name.zip" --output client_$Model_Name.zip) + + echo \$STATUS + # Check if the status code is 200 + if [ \$STATUS -eq 200 ]; then + echo "File downloaded successfully" + break + fi + + echo -e "\${bg_yellow}Waiting for server to generate zip file: sleeping for 10 seconds.\${clear}" + # Wait for 10 seconds before trying again + sleep 10 +done +echo -e "\${bg_green}Downloaded Server Files\${clear}" + +# Clone sytorch +echo -e "\${bg_green}Cloning sytorch repository\${clear}" +git clone https://github.com/mpc-msri/EzPC.git +wait + +sytorch="\$current_dir/EzPC/sytorch" +onnxbridge="\$current_dir/EzPC/OnnxBridge" + +# Looking ZIP file from SERVER +echo "Looking ZIP file from SERVER" +# look for a zip file in the current directory with name "client_$Model_Name.zip" +zipfile=\$(find . -maxdepth 1 -name "client_$Model_Name.zip" -print -quit) + +if [ -z "\$zipfile" ]; then + echo "Error: Zip file not found." + exit 1 +fi + +# Unzip the file +echo -e "\${bg_green}Unzipping the file\${clear}" +unzip \$zipfile +wait + + +# Compile the model +echo -e "\${bg_green}Compiling the model\${clear}" +\$onnxbridge/LLAMA/compile_llama.sh "${Model_Name}_${BACKEND}_${SCALE}.cpp" -Do_Masking +wait + +# Download Keys from Dealer +echo -e "\${bg_green}Downloading keys from Dealer\${clear}" +# Set the dealer IP address and port number +Dealer_url="$DEALER_IP" +# for first inference +python \$sytorch/scripts/download_keys.py \$Dealer_url client client client.dat +wait +echo -e "${bg_green}Downloaded Dealer Keys File${clear}" + +wait + +cp \$sytorch/scripts/app.py app.py + +while true; do + echo -e "${bg_yellow}Starting Flask Server${clear}" + flask run --host=0.0.0.0 --port=5000 + echo -e "${bg_yellow}Flask Server Stopped${clear}" + + # Get the keys from the Dealer for next inference + echo -e "\${bg_green}Downloading keys from Dealer\${clear}" + python \$sytorch/scripts/download_keys.py \$Dealer_url client client client.dat + echo -e "\${bg_green}Downloaded Dealer Keys File\${clear}" +done +flask run --host=0.0.0.0 --port=5000 + +EOF + +cat < $CLIENT_ONLINE_SCRIPT +#!/bin/bash + +# Color variables +bg_red='\033[0;41m' +bg_green='\033[0;42m' +bg_yellow='\033[0;43m' +bg_blue='\033[0;44m' +bg_magenta='\033[0;45m' +bg_cyan='\033[0;46m' +clear='\033[0m' + +if [ "\$1" = "clean" ]; then + shopt -s extglob + echo -e "\${bg_yellow}Cleaning up\${clear}" + find . -type f -not -name 'client-o*' -delete + find . -type d -not -name 'client-o*' -delete + echo -e "\${bg_green}Cleaned up\${clear}" + shopt -u extglob + exit 0 +fi + +# if Image is not provided +if [ -z "\$1" ]; then + echo "Error: Image not provided." + exit 1 +fi +IMAGE_PATH=\$1 + +current_dir=\$(pwd) +sytorch="\$current_dir/EzPC/sytorch" +onnxbridge="\$current_dir/EzPC/OnnxBridge" + + +# get input file name +File_NAME=\$(basename \$IMAGE_PATH) +Image_Name=\${File_NAME%.*} + +# Prepare the input +echo -e "\${bg_green}Preparing the input\${clear}" +python \$onnxbridge/helper/convert_np_to_float_inp.py --inp \$Image_Name.npy --out \$Image_Name.inp +wait + + +# Run the model +echo -e "\${bg_green}Running the model\${clear}" +./${Model_Name}_${BACKEND}_${SCALE} 3 $SERVER_IP ${NUMTHREADS} < \$Image_Name.inp > output.txt + +# Print the output +echo -e "\${bg_green}Printing the output\${clear}" +cat output.txt +echo -e "\${bg_green}Finished\${clear}" + + +EOF +echo "Finished generating Client Script" \ No newline at end of file diff --git a/inference-app/requirements.txt b/inference-app/requirements.txt new file mode 100644 index 00000000..7374ce1b --- /dev/null +++ b/inference-app/requirements.txt @@ -0,0 +1,5 @@ +gradio +torch +torchvision +urllib3==1.26.6 +python-dotenv \ No newline at end of file diff --git a/sytorch/Toy example- multiple inference.md b/sytorch/Toy example- multiple inference.md index 5d2eddd3..bf854141 100644 --- a/sytorch/Toy example- multiple inference.md +++ b/sytorch/Toy example- multiple inference.md @@ -11,6 +11,7 @@ In the above command, the paths are not local, but are the locations on the resp - `-b `: the MPC backend to use (default: `LLAMA`) - `-scale `: the scaling factor for the model input (default: `15`) - `-bl `: the bitlength to use for the MPC computation (default: `40`) +- `-nt `: the number of threads to use for MPC computation (default: `4`) The script generates 4 scripts: @@ -76,8 +77,8 @@ cd tmp ```bash git clone https://github.com/mpc-msri/EzPC -cd EzPC -cd sytorch +cd EzPC/sytorch +chmod +x ezpc-cli-2.sh ./ezpc-cli-2.sh -m /home//lenet-demo-server/lenet.onnx -preprocess /home//lenet-demo-server/preprocess.py -s -d scp server.sh :/home//lenet-demo-server/tmp/ scp dealer.sh :/home//lenet-demo-dealer/tmp/ diff --git a/sytorch/Toy example- single inference.md b/sytorch/Toy example- single inference.md index 50cbc4fc..ca775147 100644 --- a/sytorch/Toy example- single inference.md +++ b/sytorch/Toy example- single inference.md @@ -11,6 +11,7 @@ In the above command, the paths are not local, but are the locations on the resp - `-b `: the MPC backend to use (default: `LLAMA`) - `-scale `: the scaling factor for the model input (default: `15`) - `-bl `: the bitlength to use for the MPC computation (default: `40`) +- `-nt `: the number of threads to use for MPC computation (default: `4`) The script generates 4 scripts: @@ -63,8 +64,8 @@ cd tmp ``` git clone https://github.com/mpc-msri/EzPC -cd EzPC -cd sytorch +cd EzPC/sytorch +chmod +x ezpc-cli.sh ./ezpc-cli.sh -m /home//lenet-demo-server/lenet.onnx -preprocess /home//lenet-demo-server/preprocess.py -s -i /home//lenet-demo-client/input.jpg scp server-offline.sh :/home//lenet-demo-server/tmp/ scp server-online.sh :/home//lenet-demo-server/tmp/ diff --git a/sytorch/ext/llama/include/llama/input_prng.h b/sytorch/ext/llama/include/llama/input_prng.h index b7c4bd83..0a6ed784 100644 --- a/sytorch/ext/llama/include/llama/input_prng.h +++ b/sytorch/ext/llama/include/llama/input_prng.h @@ -26,6 +26,7 @@ SOFTWARE. void input_prng_init(); void input_layer(GroupElement *x, GroupElement *x_mask, int size, int owner); +void input_no_prng_with_frontend(GroupElement *x, GroupElement *x_mask, int size, int owner); #define TIME_THIS_BLOCK_FOR_INPUT_IF(x, condition, accumulator) \ {\ diff --git a/sytorch/ext/llama/src/llama/input_prng.cpp b/sytorch/ext/llama/src/llama/input_prng.cpp index f095b777..83131adb 100644 --- a/sytorch/ext/llama/src/llama/input_prng.cpp +++ b/sytorch/ext/llama/src/llama/input_prng.cpp @@ -93,6 +93,7 @@ void input_layer_owner_thread(int thread_idx, int size, int owner, GroupElement void input_layer(GroupElement *x, GroupElement *x_mask, int size, int owner) { + if (size == 0) return; if (party == DEALER) { TIME_THIS_BLOCK_FOR_INPUT_IF( std::thread thread_pool[num_threads]; @@ -150,3 +151,30 @@ void input_layer(GroupElement *x, GroupElement *x_mask, int size, int owner) } counter[owner - SERVER] += size; } + +void input_no_prng_with_frontend(GroupElement *x, GroupElement *x_mask, int size, int owner) + { + if (party == DEALER) + { + std::ofstream f("masks.dat"); + for (int i = 0; i < size; ++i) + { + x_mask[i] = random_ge(bitlength); + f << x_mask[i] << std::endl; + } + } + else if (party == owner) + { + peer->send_batched_input(x, size, bitlength); + } + else + { + uint64_t *tmp = new uint64_t[size]; + peer->recv_batched_input(tmp, size, bitlength); + for (int i = 0; i < size; ++i) + { + x[i] = tmp[i]; + } + delete[] tmp; + } + } \ No newline at end of file diff --git a/sytorch/ezpc-cli-2.sh b/sytorch/ezpc-cli-2.sh index 55cba394..7d28f72b 100755 --- a/sytorch/ezpc-cli-2.sh +++ b/sytorch/ezpc-cli-2.sh @@ -4,6 +4,7 @@ BACKEND="LLAMA" SCALE="15" BITLENGTH="40" +NUMTHREADS="4" # Parse command-line arguments while [[ $# -gt 0 ]] @@ -45,6 +46,11 @@ do shift # past argument shift # past value ;; + -nt|--numthreads) + NUMTHREADS="$2" + shift # past argument + shift # past value + ;; -preprocess|--preprocess) PREPROCESS="$2" shift # past argument @@ -68,7 +74,7 @@ then echo "server IP" echo "------------------- --------------" | column -t -s $'\t' echo "Usage: $0 -m -preprocess -s -d " - echo "Optional: [-b ] [-scale ] [-bl ]" + echo "Optional: [-b ] [-scale ] [-bl ] [-nt ]" exit 1 fi @@ -172,7 +178,7 @@ while true; do # Model inference echo -e "\${bg_green}Running model inference\${clear}" - ./${Model_Name}_${BACKEND}_${SCALE} 2 $SERVER_IP ${Model_Name}_input_weights.dat + ./${Model_Name}_${BACKEND}_${SCALE} 2 ${Model_Name}_input_weights.dat ${NUMTHREADS} wait echo -e "\${bg_green}Model inference completed.\${clear}" done @@ -429,7 +435,7 @@ python \$onnxbridge/helper/convert_np_to_float_inp.py --inp \$Image_Name.npy --o # Run the model echo -e "\${bg_green}Running the model\${clear}" -./${Model_Name}_${BACKEND}_${SCALE} 3 $SERVER_IP < \$Image_Name.inp > output.txt +./${Model_Name}_${BACKEND}_${SCALE} 3 $SERVER_IP ${NUMTHREADS} < \$Image_Name.inp > output.txt # Print the output echo -e "\${bg_green}Printing the output\${clear}" diff --git a/sytorch/ezpc-cli.sh b/sytorch/ezpc-cli.sh index be70959c..9ac92d49 100755 --- a/sytorch/ezpc-cli.sh +++ b/sytorch/ezpc-cli.sh @@ -4,6 +4,7 @@ BACKEND="LLAMA" SCALE="15" BITLENGTH="40" +NUMTHREADS="4" # Parse command-line arguments while [[ $# -gt 0 ]] @@ -45,6 +46,11 @@ do shift # past argument shift # past value ;; + -nt|--numthreads) + NUMTHREADS="$2" + shift # past argument + shift # past value + ;; -preprocess|--preprocess) PREPROCESS="$2" shift # past argument @@ -68,7 +74,7 @@ then echo "server IP" echo "------------------- --------------" | column -t -s $'\t' echo "Usage: $0 -m -preprocess -s -i " - echo "Optional: [-b ] [-scale ] [-bl ] " + echo "Optional: [-b ] [-scale ] [-bl ] [-nt ]" exit 1 fi @@ -167,7 +173,7 @@ clear='\033[0m' # Model inference echo -e "\${bg_green}Running model inference\${clear}" -./${Model_Name}_${BACKEND}_${SCALE} 2 $SERVER_IP ${Model_Name}_input_weights.dat +./${Model_Name}_${BACKEND}_${SCALE} 2 ${Model_Name}_input_weights.dat ${NUMTHREADS} echo -e "\${bg_green}Model inference completed.\${clear}" EOF @@ -287,7 +293,7 @@ python \$onnxbridge/helper/convert_np_to_float_inp.py --inp \$Image_Name.npy --o # Run the model echo -e "\${bg_green}Running the model\${clear}" -./${Model_Name}_${BACKEND}_${SCALE} 3 $SERVER_IP < \$Image_Name.inp > output.txt +./${Model_Name}_${BACKEND}_${SCALE} 3 $SERVER_IP ${NUMTHREADS} < \$Image_Name.inp > output.txt # Print the output echo -e "\${bg_green}Printing the output\${clear}" diff --git a/sytorch/include/sytorch/backend/backend.h b/sytorch/include/sytorch/backend/backend.h index c539c013..97df7e73 100644 --- a/sytorch/include/sytorch/backend/backend.h +++ b/sytorch/include/sytorch/backend/backend.h @@ -36,7 +36,7 @@ class Backend { } void truncate(const Tensor1D &in, u64 shift, u8 mode = 0) { - truncate(in.data, in.data, shift, in.size, mode); + truncate(in.data, in.data, shift, in.d1, mode); } void truncate(T &in, u64 shift, u8 mode = 0) { @@ -55,7 +55,7 @@ class Backend { virtual void relu(const Tensor &in, const Tensor &out, const Tensor &drelu, u64 scale, int mode) NOT_IMPLEMENTED; // avgpool API - virtual void div(const Tensor &in, T divisor, u64 scale) NOT_IMPLEMENTED; + virtual void div(Tensor &in, T divisor, u64 scale) NOT_IMPLEMENTED; virtual void sumPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out) NOT_IMPLEMENTED; virtual void avgPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out, u64 scale) NOT_IMPLEMENTED; @@ -66,7 +66,13 @@ class Backend { virtual void signext(Tensor &x, u64 scale) NOT_IMPLEMENTED; // add API - virtual void add(const std::vector *> &in, const Tensor &out) NOT_IMPLEMENTED; + virtual void add(const std::vector *> &in, Tensor &out) NOT_IMPLEMENTED; + + virtual void gelu(const Tensor &in, const Tensor &out, u64 scale) NOT_IMPLEMENTED; + virtual void softmax(Tensor &in, Tensor &out, u64 scale) NOT_IMPLEMENTED; + virtual void layernorm(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale) NOT_IMPLEMENTED; + virtual void addbias(Tensor &x, const Tensor1D &bias) NOT_IMPLEMENTED; + virtual void scalarmul(Tensor &x, T scalar, Tensor &y) NOT_IMPLEMENTED; virtual void optimize(LayerGraphNode *root) { diff --git a/sytorch/include/sytorch/backend/cleartext.h b/sytorch/include/sytorch/backend/cleartext.h index 75de65ce..277021df 100644 --- a/sytorch/include/sytorch/backend/cleartext.h +++ b/sytorch/include/sytorch/backend/cleartext.h @@ -10,33 +10,56 @@ class ClearText : public Backend { void truncate(T *in, T *out, u64 shift, u64 size, u8 mode); static const bool probablistic = false; static const bool localTruncationEmulation = false; - static const bool numThreads = 120; + static const u64 bw = sizeof(T) * 8; + // static const u64 bw = 50; template void fastfor(u64 size, Functor f) { - if (numThreads == 1) { - for (u64 i = 0; i < size; i++) { - f(i); - } + #pragma omp parallel for + for (u64 i = 0; i < size; i++) { + f(i); + } + } + + void modbw(T* x, u64 size) + { + if constexpr (std::is_floating_point::value) { + return; + } + else if constexpr (bw == sizeof(T) * 8) { + return; } else { - std::thread threads[numThreads]; - u64 chunkSize = size / numThreads; - for (u64 i = 0; i < numThreads - 1; i++) { - threads[i] = std::thread([=, &f]() { - for (u64 j = i * chunkSize; j < (i + 1) * chunkSize; j++) { - f(j); - } - }); - } - threads[numThreads-1] = std::thread([=, &f]() { - for (u64 j = (numThreads - 1) * chunkSize; j < size; j++) { - f(j); - } + i64 mask = (1LL << (bw - 1)); + fastfor(size, [&](u64 i) { + i64 val = (x[i] + mask) % (1LL << bw); + val -= mask; + x[i] = val; }); } } + + void modbw(T &x) + { + if constexpr (std::is_floating_point::value) { + return; + } + else if constexpr (bw == sizeof(T) * 8) { + return; + } + else { + i64 val = (x + (1LL << (bw - 1))) % (1LL << bw); + val -= (1LL << (bw - 1)); + x = val; + } + } + + void modbw(Tensor &x) { modbw(x.data, x.size()); } + void modbw(Tensor1D &x) { modbw(x.data, x.size()); } + void modbw(Tensor2D &x) { modbw(x.data, x.size()); } + void modbw(Tensor4D &x) { modbw(x.data, x.size()); } + void modbw(Tensor5D &x) { modbw(x.data, x.size()); } void matmul(const Tensor2D &a, const Tensor2D &b, Tensor2D &c); void matmulTransposeA(const Tensor2D &a, const Tensor2D &b, Tensor2D &c); @@ -52,12 +75,17 @@ class ClearText : public Backend { // void truncate(const Tensor2D &in, u64 shift); // void truncate(const Tensor1D &in, u64 shift); void truncate(T &in, u64 shift); - void div(const Tensor &in, T divisor, u64 scale); + void div(Tensor &in, T divisor, u64 scale); u64 log2(u64 x); void sumPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out); void avgPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out, u64 scale); void maxPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out, Tensor4D &maxIdx, u64 scale, u8 mode); void batchNormInference(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale); - void add(const std::vector *> &in, const Tensor &out); + void add(const std::vector *> &in, Tensor &out); + void gelu(const Tensor &in, const Tensor &out, u64 scale); + void softmax(Tensor &in, Tensor &out, u64 scale); + void layernorm(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale); + void addbias(Tensor &x, const Tensor1D &bias); + void scalarmul(Tensor &x, T scalar, Tensor &y); }; diff --git a/sytorch/include/sytorch/backend/llama_base.h b/sytorch/include/sytorch/backend/llama_base.h index ca012d33..8fb9c688 100644 --- a/sytorch/include/sytorch/backend/llama_base.h +++ b/sytorch/include/sytorch/backend/llama_base.h @@ -63,46 +63,39 @@ class LlamaBase : public Backend { void initializeInferencePartyB(Tensor&data){ u64 size = data.size(); if(LlamaConfig::party == 1){ - input_layer(nullptr,data.data, size, 3); +#ifdef Do_Masking + input_no_prng_with_frontend(nullptr, data.data, size, 3); +#else + input_layer(nullptr, data.data, size, 3); +#endif } else{ Tensor tmp(data.shape); +#ifdef Do_Masking + input_no_prng_with_frontend(data.data, tmp.data, size, 3); +#else input_layer(data.data, tmp.data, size, 3); +#endif } } void initializeInferencePartyA(LayerGraphNode *root) { topologicalApply(root, [&](LayerGraphNode *node, LayerGraphNode *_root) { auto layer = node->layer; - if(layer->name == "Conv2D" || layer->name == "FC" || layer->name == "Conv3D" || layer->name == "ConvTranspose3D") { - auto& weights = layer->getweights(); - auto& bias = layer->getbias(); - if(LlamaConfig::party == 1){ - input_layer(nullptr, weights.data, weights.d1 * weights.d2, 2); - if (layer->useBias) { - input_layer(nullptr, bias.data, bias.size, 2); - } - } - else{ - Tensor2D tmp(weights.d1, weights.d2); - input_layer(weights.data, tmp.data, weights.d1 * weights.d2, 2); - if(layer->useBias){ - Tensor1D tmp2(bias.size); - input_layer(bias.data, tmp2.data, bias.size, 2); - } + auto weights = layer->getweights(); + auto bias = layer->getbias(); + if(LlamaConfig::party == 1){ + input_layer(nullptr, weights.data, weights.size, 2); + if (layer->useBias) { + input_layer(nullptr, bias.data, bias.size, 2); } } - else if (layer->name.find("BatchNormInference") != std::string::npos) { - auto bn = (BatchNormInference*) layer; - auto channel = bn->A.size; - if(LlamaConfig::party == 1){ - input_layer(nullptr, bn->A.data, channel, 2); - input_layer(nullptr, bn->B.data, channel, 2); - } - else{ - Tensor1D tmp(channel); - input_layer(bn->A.data, tmp.data, channel, 2); - input_layer(bn->B.data, tmp.data, channel, 2); + else{ + Tensor1D tmp(weights.size); + input_layer(weights.data, tmp.data, weights.size, 2); + if(layer->useBias){ + Tensor1D tmp2(bias.size); + input_layer(bias.data, tmp2.data, bias.size, 2); } } }); @@ -125,36 +118,7 @@ class LlamaBase : public Backend { } } - void initializeWeights(Sequential &model) - { - // DEALER selects the inital weights and sends them to parties as keys - for(int i = 0; i < model.layers.size(); ++i) - { - if (model.layers[i]->name == "Conv2D" || model.layers[i]->name == "FC" || model.layers[i]->name == "Conv3D" || model.layers[i]->name == "ConvTranspose3D") - { - auto &weights = model.layers[i]->getweights(); - auto &bias = model.layers[i]->getbias(); - if (LlamaConfig::party == 1) - { - // weights.fill(1); - // bias.fill(1); - LlamaConfig::server->send_ge_array(weights.data, weights.d1 * weights.d2); - LlamaConfig::server->send_ge_array(bias.data, bias.size); - LlamaConfig::client->send_ge_array(weights.data, weights.d1 * weights.d2); - LlamaConfig::client->send_ge_array(bias.data, bias.size); - weights.fill(0); - bias.fill(0); - } - else - { - LlamaConfig::dealer->recv_ge_array(weights.data, weights.d1 * weights.d2); - LlamaConfig::dealer->recv_ge_array(bias.data, bias.size); - } - } - } - } - - void outputA(Tensor &a) { + void outputA(Tensor &a) { outputA(a.data, a.size()); } @@ -171,11 +135,11 @@ class LlamaBase : public Backend { } void outputA(Tensor1D &a) { - outputA(a.data, a.size); + outputA(a.data, a.d1); } void output(Tensor1D &a) { - output(a.data, a.size); + output(a.data, a.d1); } void outputA(T *a, u64 sz) { @@ -369,8 +333,8 @@ class LlamaBase : public Backend { void batchNormInference(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale) { - assert(A.size == B.size); - assert(A.size == x.shape.back()); + assert(A.d1 == B.d1); + assert(A.d1 == x.shape.back()); assert(x.is_same_shape(y)); u64 channels = x.shape.back(); // replicate A @@ -390,9 +354,21 @@ class LlamaBase : public Backend { } - void add(const std::vector *> &in, const Tensor &out) { + void add(const std::vector *> &in, Tensor &out) { auto ct = new ClearText; ct->add(in, out); delete ct; } + + void addbias(Tensor &x, const Tensor1D &bias) { + auto ct = new ClearText; + ct->addbias(x, bias); + delete ct; + } + + void scalarmul(Tensor &x, T scalar, Tensor &y) { + auto ct = new ClearText; + ct->scalarmul(x, scalar, y); + delete ct; + } }; \ No newline at end of file diff --git a/sytorch/include/sytorch/backend/llama_extended.h b/sytorch/include/sytorch/backend/llama_extended.h index 60a5fb62..3e9c9b8f 100644 --- a/sytorch/include/sytorch/backend/llama_extended.h +++ b/sytorch/include/sytorch/backend/llama_extended.h @@ -37,6 +37,133 @@ class LlamaExtended : public LlamaBase { // maxIdx.template print<1>(); } + void gelu(const Tensor &in, const Tensor &out, u64 scale) + { + u64 sz = in.size(); + always_assert(sz == out.size()); + T t1 = (T) (sqrt(2.0 / M_PI) * (1LL << scale)); + T t2 = (T) (0.044715 * (1LL << scale)); + auto ct = new ClearText; + // t = x^2 + ElemWiseActModelVectorMult(sz, in.data, in.data, in.data, in.data, out.data, out.data); + Backend::truncate(out, scale); + + // t = x^3 + ElemWiseActModelVectorMult(sz, out.data, out.data, in.data, in.data, out.data, out.data); + Backend::truncate(out, scale); + + // t = x^3 * 0.044715 + ct->fastfor(sz, [&](u64 i) { + out.data[i] = out.data[i] * t2; + }); + Backend::truncate(out, scale); + + // t = x^3 * 0.044715 + x + ct->fastfor(sz, [&](u64 i) { + out.data[i] = out.data[i] + in.data[i]; + }); + + // t = (x^3 * 0.044715 + x )* sqrt(2/pi) + ct->fastfor(sz, [&](u64 i) { + out.data[i] = out.data[i] * t1; + }); + Backend::truncate(out, scale); + + // t = tanh((x^3 * 0.044715 + x) * sqrt(2/pi)) + // TODO: teehee + + // t = 1 + tanh((x^3 * 0.044715 + x) * sqrt(2/pi)) + ct->fastfor(sz, [&](u64 i) { + out.data[i] = out.data[i] + (1LL << scale); + }); + + // t = x * (1 + tanh((x^3 * 0.044715 + x) * sqrt(2/pi))) / 2 + ElemWiseActModelVectorMult(sz, out.data, out.data, in.data, in.data, out.data, out.data); + Backend::truncate(out, scale + 1); + + delete ct; + } + + void softmax(Tensor &in, Tensor &out, u64 scale) + { + // TODO: teehee + out.copy(in, false); + } + + void layernorm(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale) + { + always_assert(A.d1 == B.d1); + always_assert(A.d1 == x.shape.back()); + always_assert(x.is_same_shape(y)); + + u64 channels = x.shape.back(); + + auto ct = new ClearText; + auto shape2 = x.shape; + shape2.pop_back(); + + Tensor tmp(x.shape); + Tensor mean(shape2); + Tensor var(shape2); + + ct->fastfor(x.size() / channels, [&](u64 i) { + mean.data[i] = 0; + for (u64 j = 0; j < channels; j++) { + mean.data[i] += x.data[i * channels + j]; + } + }); + + LlamaBase::div(mean, channels, scale); + + ct->fastfor(x.size() / channels, [&](u64 i) { + for (u64 j = 0; j < channels; j++) { + tmp.data[i * channels + j] = x.data[i * channels + j] - mean.data[i]; + } + }); + + ElemWiseActModelVectorMult(tmp.size(), tmp.data, tmp.data, tmp.data, tmp.data, tmp.data, tmp.data); + + ct->fastfor(x.size() / channels, [&](u64 i) { + var.data[i] = 0; + for (u64 j = 0; j < channels; j++) { + var.data[i] += tmp.data[i * channels + j]; + } + }); + + Backend::truncate(var, scale); + LlamaBase::div(var, channels, scale); + + // TODO: invvar = invsqrt(var) + auto &invvar = var; + + ct->fastfor(x.size() / channels, [&](u64 i) { + for (u64 j = 0; j < channels; j++) { + tmp.data[i * channels + j] = x.data[i * channels + j] - mean.data[i]; + y.data[i * channels + j] = invvar.data[i]; + } + }); + + ElemWiseActModelVectorMult(tmp.size(), tmp.data, tmp.data, y.data, y.data, y.data, y.data); + Backend::truncate(y, scale); + + auto &Aexpand = tmp; + ct->fastfor(x.size() / channels, [&](u64 i) { + for (u64 j = 0; j < channels; j++) { + Aexpand.data[i * channels + j] = A(j); + } + }); + ElemWiseActModelVectorMult(tmp.size(), Aexpand.data, Aexpand.data, y.data, y.data, y.data, y.data); + + ct->fastfor(x.size() / channels, [&](u64 i) { + for (u64 j = 0; j < channels; j++) { + y.data[i * channels + j] += B(j); + } + }); + + delete ct; + } + + void doOptimize(LayerGraphNode *node, LayerGraphNode *root) { if (node->layer->doTruncationForward) { diff --git a/sytorch/include/sytorch/graph.h b/sytorch/include/sytorch/graph.h index 834c0767..471b22fd 100644 --- a/sytorch/include/sytorch/graph.h +++ b/sytorch/include/sytorch/graph.h @@ -68,6 +68,15 @@ void print_dot_graph(LayerGraphNode *root) if (node->layer != nullptr) { // std::string label = node->layer->name + "-" + std::to_string(node->layer->mode) + "-" + (node->layer->doPreSignExtension ? "true" : "false"); std::string label = node->layer->name; + if (node->layer->paramstring != "") { + std::string args = node->layer->paramstring; + std::replace(args.begin(), args.end(), '|', ','); + // remove last comma if exists + if (args.back() == ',') { + args.pop_back(); + } + label += "(" + args + ")"; + } dotfile << node->layer->name + std::to_string((uint64_t)(node->layer)) << " [label=\"" << label << "\"" + (node->mark ? std::string(" color=\"red\"") : std::string("")) + "];" << std::endl; for (auto &child : node->children) { dotfile << node->layer->name + std::to_string((uint64_t)(node->layer)) << " -> " << child->layer->name + std::to_string((uint64_t)(child->layer)) << ";" << std::endl; diff --git a/sytorch/include/sytorch/layers/layers.h b/sytorch/include/sytorch/layers/layers.h index 77674b60..72f9a181 100644 --- a/sytorch/include/sytorch/layers/layers.h +++ b/sytorch/include/sytorch/layers/layers.h @@ -21,6 +21,7 @@ class Layer { int forwardTruncationMode = 0; bool useBias = true; bool isTrainingMode = false; + std::string paramstring = ""; LayerGraphNode *node = nullptr; @@ -106,8 +107,8 @@ class Layer { return activation; } - virtual Tensor2D& getweights() { throw std::runtime_error("not implemented"); }; - virtual Tensor1D& getbias() { throw std::runtime_error("not implemented"); }; + virtual TensorRef getweights() { return TensorRef(nullptr, 0); }; + virtual TensorRef getbias() { return TensorRef(nullptr, 0); }; virtual std::vector get_output_dims(const std::vector> &inShapes) = 0; virtual void setBackend(Backend *b) { @@ -166,15 +167,15 @@ class Conv2D : public Layer { always_assert(a.shape.size() == 4); assert(a.shape[3] == ci); if (this->isTrainingMode) - inp.copy(a); + inp.copy(a, false); auto act_4d = this->activation.as_4d(); this->backend->conv2D(fh, fw, padding, stride, ci, co, a.as_4d(), filter, act_4d); if (this->useBias) - this->activation.as_4d().addBias(bias); + this->backend->addbias(this->activation, bias); } - Tensor2D& getweights() { return filter; } - Tensor1D& getbias() { return bias; } + TensorRef getweights() { return filter.ref(); } + TensorRef getbias() { return bias.ref(); } std::vector get_output_dims(const std::vector> &inShapes) { always_assert(inShapes.size() == 1); @@ -256,15 +257,15 @@ class Conv3D : public Layer { always_assert(a.shape.size() == 5); assert(a.shape[4] == ci); if (this->isTrainingMode) - inp.copy(a); + inp.copy(a, false); auto act_5d = this->activation.as_5d(); this->backend->conv3D(fd, fh, fw, pd, ph, pw, sd, sh, sw, dd, dh, dw, ci, co, a.as_5d(), filter, act_5d); if (this->useBias) - this->activation.addBias(bias); + this->backend->addbias(this->activation, bias); } - Tensor2D& getweights() { return filter; } - Tensor1D& getbias() { return bias; } + TensorRef getweights() { return filter.ref(); } + TensorRef getbias() { return bias.ref(); } std::vector get_output_dims(const std::vector> &inShapes) { always_assert(inShapes.size() == 1); @@ -466,16 +467,16 @@ class FC : public Layer { } void _forward(Tensor &a) { - this->inp.copy(a); + this->inp.copy(a, false); auto a_2d = a.as_2d(); auto act_2d = this->activation.as_2d(); this->backend->matmul(a_2d, weight, act_2d); if (this->useBias) - this->activation.as_2d().addBias2D(bias); + this->backend->addbias(this->activation, bias); } - Tensor2D& getweights() { return weight; } - Tensor1D& getbias() { return bias; } + TensorRef getweights() { return weight.ref(); } + TensorRef getbias() { return bias.ref(); } std::vector get_output_dims(const std::vector> &inShapes) { always_assert(inShapes.size() == 1); @@ -525,12 +526,12 @@ class BatchNormInference : public Layer { always_assert(shapes.size() == 1); auto &shape = shapes[0]; // always_assert(shape.size() == 4); - always_assert(shape.back() == this->A.size); + always_assert(shape.back() == this->A.d1); } void _forward(Tensor &a) { // always_assert(a.shape.size() == 4); - assert(a.shape.back() == this->A.size); + assert(a.shape.back() == this->A.d1); if (this->isTrainingMode) { std::runtime_error("BatchNormInference should not be used in training mode"); } @@ -539,11 +540,14 @@ class BatchNormInference : public Layer { } } + TensorRef getweights() { return A.ref(); } + TensorRef getbias() { return B.ref(); } + std::vector get_output_dims(const std::vector> &inShapes) { always_assert(inShapes.size() == 1); auto &inShape = inShapes[0]; // always_assert(inShape.size() == 4); - always_assert(inShape.back() == this->A.size); + always_assert(inShape.back() == this->A.d1); return inShape; } }; @@ -554,7 +558,7 @@ class Identity: public Layer { Identity() : Layer("Identity") {} void _forward(Tensor &a) { - this->activation.copy(a); + this->activation.copy(a, false); } std::vector get_output_dims(const std::vector> &inShapes) { @@ -650,11 +654,11 @@ class ConvTranspose3D : public Layer { auto act_5d = this->activation.as_5d(); this->backend->convTranspose3D(fd, fh, fw, pd, ph, pw, sd, sh, sw, ci, co, a.as_5d(), filter, act_5d); if (this->useBias) - this->activation.addBias(bias); + this->backend->addbias(this->activation, bias); } - Tensor2D& getweights() { return filter; } - Tensor1D& getbias() { return bias; } + TensorRef getweights() { return filter.ref(); } + TensorRef getbias() { return bias.ref(); } std::vector get_output_dims(const std::vector> &inShapes) { always_assert(inShapes.size() == 1); @@ -709,7 +713,7 @@ class Add: public Layer { } void _forward(Tensor &a) { - this->activation.copy(a); + this->activation.copy(a, false); } std::vector get_output_dims(const std::vector> &inShapes) { @@ -748,12 +752,14 @@ class Concat: public Layer { sz += t->size(); } + #pragma omp parallel for for(int i = 0; i < sz; ++i) { u64 l = i % outchannels; + u64 rest = i / outchannels; for(auto &a : arr) { if(l < a->shape.back()) { - this->activation.data[i] = a->data[i]; + this->activation.data[i] = a->data[rest * a->shape.back() + l]; break; } l -= a->shape.back(); @@ -763,7 +769,7 @@ class Concat: public Layer { } void _forward(Tensor &a) { - this->activation.copy(a); + this->activation.copy(a, false); } std::vector get_output_dims(const std::vector> &inShapes) { @@ -782,3 +788,245 @@ class Concat: public Layer { return outShape; } }; + +template +class GeLU: public Layer { +public: + GeLU() : Layer("GeLU") {} + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + } + + void _forward(Tensor &a) { + this->backend->gelu(a, this->activation, this->scale); + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto &inShape = inShapes[0]; + return inShape; + } +}; + +template +class SoftMax: public Layer { +public: + SoftMax() : Layer("SoftMax") {} + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + always_assert(shapes[0].size() == 2); + } + + void _forward(Tensor &a) { + this->backend->softmax(a, this->activation, this->scale); + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + always_assert(inShapes[0].size() == 2); + auto &inShape = inShapes[0]; + return inShape; + } +}; + +template +class LayerNorm: public Layer { +public: + Tensor1D A; // scale = s + Tensor1D B; // scale = 2s + + LayerNorm(u64 channels) : Layer("LayerNorm"), A(channels), B(channels) { + this->A.fill(0); + this->B.fill(0); + this->doTruncationForward = true; + } + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + auto &shape = shapes[0]; + always_assert(shape.back() == this->A.d1); + } + + void _forward(Tensor &a) { + // always_assert(a.shape.size() == 4); + assert(a.shape.back() == this->A.d1); + this->backend->layernorm(this->A, this->B, a, this->activation, this->scale); + } + + TensorRef getweights() { return A.ref(); } + TensorRef getbias() { return B.ref(); } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto &inShape = inShapes[0]; + always_assert(inShape.back() == this->A.d1); + return inShape; + } +}; + +template +class Split: public Layer { +public: + u64 n_splits; + + Split(u64 n_splits) : Layer("Split"), n_splits(n_splits) {} + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + auto &shape = shapes[0]; + always_assert(shape.back() % n_splits == 0); + } + + void _forward(Tensor &a) { + always_assert(a.shape.back() % n_splits == 0); + u64 split_size = a.shape.back() / n_splits; // 3 + u64 rest_size = a.size() / a.shape.back(); // 2 + + #pragma omp parallel for + for(u64 i = 0; i < a.size(); ++i) { + u64 p = i / a.shape.back(); + u64 q = i % a.shape.back(); + u64 r = q / split_size; + u64 s = q % split_size; + this->activation.data[r * split_size * rest_size + p * split_size + s] = a.data[i]; + } + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto shape = inShapes[0]; + always_assert(shape.back() % n_splits == 0); + shape.back() /= n_splits; + shape.insert(shape.begin(), n_splits); + return shape; + } +}; + +template +class View: public Layer { +public: + i64 idx; + + View(i64 idx) : Layer("View"), idx(idx) {} + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + // auto &shape = shapes[0]; + // always_assert(idx < shape[0]); + } + + void _forward(Tensor &a) { + // always_assert(idx < a.shape[0]); + // std::cout << (idx % a.shape[0]) << std::endl; + u64 i = (idx + a.shape[0]) % a.shape[0]; + auto v = a.view(i); + this->activation.copy(v, false); + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto shape = inShapes[0]; + // always_assert(idx < shape[0]); + shape.erase(shape.begin()); + return shape; + } +}; + + +template +class Transpose: public Layer { +public: + Transpose() : Layer("Transpose") {} + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + auto &shape = shapes[0]; + always_assert(shape.size() == 2); + } + + void _forward(Tensor &a) { + always_assert(a.shape.size() == 2); + #pragma omp parallel for collapse(2) + for (u64 i = 0; i < a.shape[0]; ++i) { + for (u64 j = 0; j < a.shape[1]; ++j) { + this->activation.data[j * a.shape[0] + i] = a.data[i * a.shape[1] + j]; + } + } + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto shape = inShapes[0]; + always_assert(shape.size() == 2); + return {shape[1], shape[0]}; + } +}; + +template +class _MatMul: public Layer { +public: + _MatMul() : Layer("_MatMul") { + this->doTruncationForward = true; + } + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 2); + auto &shape0 = shapes[0]; + auto &shape1 = shapes[1]; + always_assert(shape0.size() == 2); + always_assert(shape1.size() == 2); + always_assert(shape0[1] == shape1[0]); + } + + void _forward(Tensor &a) { + throw std::runtime_error("single input not allowed in matmul"); + } + + void _forward(std::vector *> &a) { + always_assert(a.size() == 2); + auto &a0 = *a[0]; + auto a0_2d = a0.as_2d(); + auto &a1 = *a[1]; + auto a1_2d = a1.as_2d(); + auto act_2d = this->activation.as_2d(); + this->backend->matmul(a0_2d, a1_2d, act_2d); + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 2); + auto &shape0 = inShapes[0]; + auto &shape1 = inShapes[1]; + always_assert(shape0.size() == 2); + always_assert(shape1.size() == 2); + always_assert(shape0[1] == shape1[0]); + return {shape0[0], shape1[1]}; + } +}; + + +template +class _ScalarMul: public Layer { +public: + double scalar; + + _ScalarMul(double scalar) : Layer("_ScalarMul"), scalar(scalar) { + this->doTruncationForward = true; + } + + void _resize(const std::vector> &shapes) { + always_assert(shapes.size() == 1); + } + + void _forward(Tensor &a) { + T scalarFix = scalar * (1LL << this->scale); + this->backend->scalarmul(a, scalarFix, this->activation); + } + + std::vector get_output_dims(const std::vector> &inShapes) { + always_assert(inShapes.size() == 1); + auto &shape0 = inShapes[0]; + return shape0; + } +}; diff --git a/sytorch/include/sytorch/module.h b/sytorch/include/sytorch/module.h index 2f57c15a..fe26a08a 100644 --- a/sytorch/include/sytorch/module.h +++ b/sytorch/include/sytorch/module.h @@ -13,12 +13,10 @@ class SytorchModule { LayerGraphNode *root = nullptr; bool debug = true; u64 scale; - // std::map *> addLayerMap; - // std::map *> concatLayerMap; - std::vector *> allNodesInExecutionOrder; - const std::vector functionalLayers = {"Add", "Concat"}; - std::map *> functionalLayerMap; + std::vector *> allNodesInExecutionOrder; + const std::vector functionalLayers = {"Add", "Concat", "GeLU", "SoftMax", "Split", "View", "Transpose", "_MatMul", "_ScalarMul"}; + static std::map *> functionalLayerMap; public: @@ -31,38 +29,44 @@ class SytorchModule { void generateFunctionalLayerMap() { - functionalLayerMap.clear(); + // functionalLayerMap.clear(); topologicalApply(root, [=](LayerGraphNode *node, LayerGraphNode *_root) { if (std::find(functionalLayers.begin(), functionalLayers.end(), node->layer->name) != functionalLayers.end()) { std::string id = node->layer->name; for(auto& parent: node->parents) { id += "|" + std::to_string((uint64_t)(parent)); } + id = id + "|" + node->layer->paramstring; + // make sure it already doesn't exist + always_assert(functionalLayerMap.find(id) == functionalLayerMap.end()); functionalLayerMap[id] = node; } }); } - LayerGraphNode *getFunctionalNode(const std::string &layerName, std::vector *> ips) + template + LayerGraphNode *getFunctionalNode(const std::string &layerName, std::vector *> ips, Args ... args) { std::string id = layerName; for(auto& ip: ips) { id += "|" + std::to_string((uint64_t)(ip->graphNode)); } + id = id + "|" + paramstring(args...); if (functionalLayerMap.find(id) == functionalLayerMap.end()) { - std::cerr << "Layer not found" << std::endl; + std::cerr << "Layer not found = \"" << id << "\"" << std::endl; exit(1); } return functionalLayerMap[id]; } - template - Tensor& functionalGraphGen(std::vector *> &arr) + template + Tensor& functionalGraphGen(std::vector *> arr, Args ... args) { for (auto &a : arr) { always_assert(a->graphGenMode); } - auto layer = new LayerType(); + auto layer = new LayerType(args...); + layer->paramstring = paramstring(args...); return layer->forward(arr); } @@ -92,15 +96,8 @@ class SytorchModule { void zero() { topologicalApply(root, [](LayerGraphNode *node, LayerGraphNode *_root) { - if (node->layer->name == "Conv2D" || node->layer->name == "FC" || node->layer->name == "Conv3D" || node->layer->name == "ConvTranspose3D") { - node->layer->getweights().fill(0); - node->layer->getbias().fill(0); - } - else if (node->layer->name == "BatchNormInference") { - BatchNormInference *bn = (BatchNormInference *) node->layer; - bn->A.fill(0); - bn->B.fill(0); - } + node->layer->getweights().zero(); + node->layer->getbias().zero(); }); } @@ -114,11 +111,17 @@ class SytorchModule { Tensor& forward(Tensor &input) { - topologicalApply(root, [](LayerGraphNode *node, LayerGraphNode *_root) { - node->numUsages = 0; - }); - input.graphNode = root; - input.graphNode->currTensor = &input; + if (input.graphGenMode) { + return this->_forward(input); + } + + if (input.graphNode == nullptr) { // when the module is a top level module + topologicalApply(root, [](LayerGraphNode *node, LayerGraphNode *_root) { + node->numUsages = 0; + }); + input.graphNode = root; + input.graphNode->currTensor = &input; + } if (debug) { auto& res = this->_forward(input); this->activation.resize(res.shape); @@ -153,33 +156,9 @@ class SytorchModule { size_t wIdx = 0; for (auto &node: allNodesInExecutionOrder) { auto layer = node->layer; - if(layer->name == "Conv2D" || layer->name == "FC" || layer->name == "Conv3D" || layer->name == "ConvTranspose3D") { - auto& weights = layer->getweights(); - for (int j = 0; j < weights.d1; j++) { - for(int k = 0; k < weights.d2; ++k) { - weights(j, k) = i64(floatWeights[wIdx + weights.d2 * j + k] * (1LL << scale)); - } - } - - auto wSize = weights.d1 * weights.d2; - wIdx += wSize; - - auto& bias = layer->getbias(); - if (layer->useBias) { - - for (int j = 0; j < bias.size; ++j) { - bias(j) = i64(floatWeights[wIdx + j] * (1LL << (2*scale))); - } - - wSize = bias.size; - wIdx += wSize; - } - else - bias.fill(0); - } - else if (layer->name.find("BatchNormInference") != std::string::npos) { + if (layer->name == "BatchNormInference") { auto bn = (BatchNormInference*) layer; - auto channel = bn->A.size; + auto channel = bn->A.d1; auto gammaPtr = floatWeights + wIdx; auto betaPtr = floatWeights + wIdx + channel; auto meanPtr = floatWeights + wIdx + 2 * channel; @@ -190,12 +169,82 @@ class SytorchModule { } wIdx += 4 * channel; } + else { + auto weights = layer->getweights(); + for (u64 j = 0; j < weights.size; j++) { + weights.data[j] = i64(floatWeights[wIdx + j] * (1LL << scale)); + } + + wIdx += weights.size; + + auto bias = layer->getbias(); + if (layer->useBias) { + + for (u64 j = 0; j < bias.size; ++j) { + bias.data[j] = i64(floatWeights[wIdx + j] * (1LL << (2*scale))); + } + + wIdx += bias.size; + } + else { + bias.zero(); + } + } } always_assert(wIdx == numParameters); delete[] floatWeights; } + void dumpi64(const std::string weightsFile) + { + std::ofstream file(weightsFile, std::ios::binary); + u64 scale = this->scale; + + for (auto &node: allNodesInExecutionOrder) { + auto layer = node->layer; + if (layer->name == "BatchNormInference") { + auto bn = (BatchNormInference*) layer; + auto channel = bn->A.d1; + + for (int j = 0; j < channel; ++j) { + i64 v = bn->A(j); + file.write((char *)(&v), sizeof(i64)); + } + for (int j = 0; j < channel; ++j) { + i64 v = bn->B(j); + file.write((char *)(&v), sizeof(i64)); + } + for (int j = 0; j < channel; ++j) { + i64 v = 0; + file.write((char *)(&v), sizeof(i64)); + } + for (int j = 0; j < channel; ++j) { + i64 v = (1LL << scale); + file.write((char *)(&v), sizeof(i64)); + } + + } + else { + auto weights = layer->getweights(); + for (u64 j = 0; j < weights.size; j++) { + i64 v = weights.data[j]; + file.write((char *)(&v), sizeof(i64)); + } + + auto bias = layer->getbias(); + if (layer->useBias) { + + for (u64 j = 0; j < bias.size; ++j) { + i64 v = bias.data[j]; + file.write((char *)(&v), sizeof(i64)); + } + } + } + } + + } + Tensor& add(std::vector *> &arr) { if (arr[0]->graphGenMode) { @@ -228,12 +277,103 @@ class SytorchModule { } template - Tensor &concat(Args &...args) + Tensor& concat(Args & ... args) { auto res = collect(args...); return concat(res); } + Tensor& gelu(Tensor &a) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen>({&a}); + return c; + } + + auto cNode = getFunctionalNode("GeLU", {&a}); + auto &c = cNode->layer->forward(a); + return c; + } + + Tensor& softmax(Tensor &a) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen>({&a}); + return c; + } + + auto cNode = getFunctionalNode("SoftMax", {&a}); + auto &c = cNode->layer->forward(a); + return c; + } + + Tensor& split(Tensor &a, u64 n_splits) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen>({&a}, n_splits); + return c; + } + + auto cNode = getFunctionalNode("Split", {&a}, n_splits); + auto &c = cNode->layer->forward(a); + return c; + } + + Tensor& view(Tensor &a, i64 idx) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen>({&a}, idx); + return c; + } + + auto cNode = getFunctionalNode("View", {&a}, idx); + auto &c = cNode->layer->forward(a); + return c; + } + + Tensor& transpose(Tensor &a) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen>({&a}); + return c; + } + + auto cNode = getFunctionalNode("Transpose", {&a}); + auto &c = cNode->layer->forward(a); + return c; + } + + Tensor& matmul(Tensor &a, Tensor &b) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen<_MatMul>({&a, &b}); + return c; + } + + auto cNode = getFunctionalNode("_MatMul", {&a, &b}); + std::vector *> arr = {&a, &b}; + auto &c = cNode->layer->forward(arr); + return c; + } + + Tensor& scalarmul(Tensor &a, double scalar) + { + if (a.graphGenMode) { + auto &c = functionalGraphGen<_ScalarMul>({&a}, scalar); + return c; + } + + auto cNode = getFunctionalNode("_ScalarMul", {&a}, scalar); + auto &c = cNode->layer->forward(a); + return c; + } + + T invsqrt(double x) + { + double t = 1/sqrt(x); + return T(t * (1LL << scale)); + } + void train() { topologicalApply(root, [=](LayerGraphNode *node, LayerGraphNode *_root) { @@ -247,4 +387,7 @@ class SytorchModule { node->layer->eval(); }); } -}; \ No newline at end of file +}; + +template +std::map *> SytorchModule::functionalLayerMap = std::map *>(); diff --git a/sytorch/include/sytorch/tensor.h b/sytorch/include/sytorch/tensor.h index 5c6feba4..fc2ac7bd 100644 --- a/sytorch/include/sytorch/tensor.h +++ b/sytorch/include/sytorch/tensor.h @@ -7,12 +7,26 @@ #include #include #include +#include typedef uint64_t u64; typedef uint8_t u8; typedef int64_t i64; typedef int32_t i32; +template +class TensorRef { +public: + T* data; + u64 size; + TensorRef(T *data, u64 size) : data(data), size(size) {} + void zero() { + for(u64 i = 0; i < size; i++) { + data[i] = 0; + } + } +}; + template class Tensor5D; @@ -146,10 +160,16 @@ class Tensor { } } - void copy(const Tensor &other) { + void copy(const Tensor &other, bool copyGraph = true) { assert_same_shape(other); - memcpy(data, other.data, size() * sizeof(T)); - this->graphNode = other.graphNode; + // memcpy(data, other.data, size() * sizeof(T)); + #pragma omp parallel for + for(u64 i = 0; i < size(); ++i) + { + data[i] = other.data[i]; + } + if (copyGraph) + this->graphNode = other.graphNode; } void fill(T x) { @@ -188,7 +208,11 @@ class Tensor { u64 curr_channel = (i / rest_size) % num_channel; u64 curr_rest = i % rest_size; u64 new_idx = curr_batch * (num_channel * rest_size) + curr_rest * num_channel + curr_channel; +#ifdef Do_Masking + data[new_idx] = (i64)d; +#else data[new_idx] = (i64)(d * (1LL << scale)); +#endif } } @@ -267,6 +291,21 @@ class Tensor { } } + void load(const std::string filename, u64 scale) + { + size_t size_in_bytes = std::filesystem::file_size(filename); + always_assert(size_in_bytes == size() * 4); + float *floatInput = new float[size()]; + std::ifstream file(filename, std::ios::binary); + file.read((char*) floatInput, size_in_bytes); + file.close(); + for(u64 i = 0; i < size(); ++i) + { + data[i] = (i64)(floatInput[i] * (1LL << scale)); + } + delete[] floatInput; + } + Tensor5D as_5d() { assert(this->shape.size() == 5); @@ -285,14 +324,13 @@ class Tensor { return Tensor2D(this->data, this->shape[0], this->shape[1]); } - void addBias(const Tensor1D &bias) + Tensor view(u64 i) { - always_assert(this->shape.back() == bias.size); - u64 sz = bias.size; - for (u64 i = 0; i < this->size(); ++i) - { - this->data[i] += bias.data[i % sz]; - } + assert(i < shape[0]); + u64 newsize = size() / shape[0]; + auto newshape = shape; + newshape.erase(newshape.begin()); + return Tensor(data + i * newsize, newshape); } }; @@ -300,16 +338,14 @@ template class Tensor1D { public: T *data; - u64 size; + u64 d1; - Tensor1D(u64 s) : size(s), data(new T[s]) {} + Tensor1D(u64 s) : d1(s), data(new T[s]) {} void randomize(double range) { - for(u64 i = 0; i < this->size; i++) { + for(u64 i = 0; i < this->d1; i++) { auto r = (double)prngWeights.get(); this->data[i] = (T)((r / (1LL << 31)) * range); - // this->data[i] = (T)((i % 2) * range); - // this->data[i] = ((T)range) / 2; } } @@ -317,70 +353,32 @@ class Tensor1D { delete[] this->data; } - T &operator()(u64 i) const { - assert(i < this->size); - return this->data[i]; - } - - template - void print() const { - for (u64 i = 0; i < this->size; i++) { - std::cout << (T2)this->data[i] << " "; - } - std::cout << std::endl; + u64 size() const { + return d1; } - template - void print() const { - for (u64 i = 0; i < this->size; i++) { - std::cout << this->data[i] % (1ULL << bl) << " "; - } - std::cout << std::endl; + TensorRef ref() { + return TensorRef(data, size()); } - void print(u64 scale) const { - for (u64 i = 0; i < this->size; i++) { - std::cout << this->data[i] / ((double)(1ULL<d1); + return this->data[i]; } void fill(T val) { - for (u64 i = 0; i < this->size; i++) { + for (u64 i = 0; i < this->d1; i++) { this->data[i] = val; } } - - bool isnan() const { - for (u64 i = 0; i < this->size; i++) { - if (toobig(this->data[i])) { - return true; - } - } - return false; - } - - template - void copy(const Tensor1D &other) { - assert(this->size == other.size); - for (u64 i = 0; i < this->size; i++) { - this->data[i] = (T)other.data[i]; - } - } - - void load(const std::vector&arr, int scale){ - for (u64 i = 0; i < this->size; i++) { - this->data[i] = (i64)(arr[i] * (1LL< class Tensor2D { public: - bool isOwner = true; - T *data; u64 d1, d2; + T *data; + bool isOwner = true; Tensor2D(u64 d1, u64 d2) : d1(d1), d2(d2), data(new T[d1 * d2]) {} @@ -389,14 +387,20 @@ class Tensor2D { void randomize(double range) { for(u64 i = 0; i < this->d1; i++) { for(u64 j = 0; j < this->d2; j++) { - // this->data[i * this->d2 + j] = (T)((j % 2) * range); auto r = (double)prngWeights.get(); this->data[i * this->d2 + j] = (T)((r / (1LL << 31)) * range); - // this->data[i * this->d2 + j] = ((T)range) / 2; } } } + u64 size() const { + return d1 * d2; + } + + TensorRef ref() { + return TensorRef(data, size()); + } + void resize(u64 d1, u64 d2) { always_assert(this->isOwner); if (this->d1 == d1 && this->d2 == d2) { @@ -419,31 +423,8 @@ class Tensor2D { return this->data[i * this->d2 + j]; } - template - void print() { - for(u64 i = 0; i < this->d1; i++) { - for(u64 j = 0; j < this->d2; j++) { - std::cout << (T2)this->data[i * this->d2 + j] << " "; - } - std::cout << std::endl; - } - } - - void print(u64 scale) { - for(u64 i = 0; i < this->d1; i++) { - for(u64 j = 0; j < this->d2; j++) { - std::cout << this->data[i * this->d2 + j] / ((double)(1ULL<d1; i++) { - for(u64 j = 0; j < this->d2; j++) { - this->data[i * this->d2 + j] = (T)0; - } - } + fill(0); } void fill(T val) { @@ -454,50 +435,6 @@ class Tensor2D { } } - void printshape() const { - std::cout << "(" << d1 << ", " << d2 << ")" << std::endl; - } - - bool isnan() { - for(u64 i = 0; i < this->d1; i++) { - for(u64 j = 0; j < this->d2; j++) { - if (toobig(this->data[i * this->d2 + j])) { - return true; - } - } - } - return false; - } - - template - void copy(const Tensor2D &other) { - assert(d1 == other.d1); - assert(d2 == other.d2); - for(u64 i = 0; i < d1; i++) { - for(u64 j = 0; j < d2; j++) { - this->data[i * this->d2 + j] = (T)other.data[i * other.d2 + j]; - } - } - } - - void load(const std::vector>&arr, int scale){ - for(u64 i = 0; i < this->d1; i++) { - for(u64 j = 0; j < this->d2; j++) { - this->data[i * this->d2 + j] = (i64)(arr[i][j] * (1LL< &bias) { - assert(bias.size == d2); - - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - data[i * d2 + j] += bias(j); - } - } - } - Tensor as_nd() { return Tensor(data, {d1, d2}); @@ -525,6 +462,14 @@ class Tensor4D { } } + u64 size() const { + return d1 * d2 * d3 * d4; + } + + TensorRef ref() { + return TensorRef(data, size()); + } + void resize(u64 d1, u64 d2, u64 d3, u64 d4) { always_assert(isOwner); if (this->d1 == d1 && this->d2 == d2 && this->d3 == d3 && this->d4 == d4) { @@ -540,22 +485,10 @@ class Tensor4D { void resize(const std::vector &shape) { always_assert(isOwner); + always_assert(shape.size() == 4); resize(shape[0], shape[1], shape[2], shape[3]); } - void addBias(const Tensor1D &bias) { - assert(bias.size == d4); - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - for (u64 k = 0; k < d3; k++) { - for (u64 l = 0; l < d4; l++) { - data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l] += bias(l); - } - } - } - } - } - T& operator()(u64 i, u64 j, u64 k, u64 l) const { assert(i < d1); assert(j < d2); @@ -564,80 +497,6 @@ class Tensor4D { return data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l]; } - template - void print() const { - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - for (u64 k = 0; k < d3; k++) { - for (u64 l = 0; l < d4; l++) { - std::cout << (T2)data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l] << " "; - } - if (d4 > 1) { - std::cout << std::endl; - } - } - if (d3 > 1) { - std::cout << std::endl; - } - } - if (d2 > 1) { - std::cout << std::endl; - } - } - if (d1 > 1) { - std::cout << std::endl; - } - } - - template - void print() const { - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - for (u64 k = 0; k < d3; k++) { - for (u64 l = 0; l < d4; l++) { - std::cout << data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l] % (1ULL << bl) << " "; - } - if (d4 > 1) { - std::cout << std::endl; - } - } - if (d3 > 1) { - std::cout << std::endl; - } - } - if (d2 > 1) { - std::cout << std::endl; - } - } - if (d1 > 1) { - std::cout << std::endl; - } - } - - void print(const u64 scale) const { - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - for (u64 k = 0; k < d3; k++) { - for (u64 l = 0; l < d4; l++) { - std::cout << data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l] / ((double)(1ULL< 1) { - std::cout << std::endl; - } - } - if (d3 > 1) { - std::cout << std::endl; - } - } - if (d2 > 1) { - std::cout << std::endl; - } - } - if (d1 > 1) { - std::cout << std::endl; - } - } - u64 argmax(u64 i) { assert(d3 == 1); assert(d4 == 1); @@ -653,23 +512,17 @@ class Tensor4D { return maxIndex; } - void load(const std::vector>>>&arr, int scale){ - for (u64 i = 0; i < d1; i++) { - for (u64 j = 0; j < d2; j++) { - for (u64 k = 0; k < d3; k++) { - for (u64 l = 0; l < d4; l++) { - data[i * d2 * d3 * d4 + j * d3 * d4 + k * d4 + l] = (i64)(arr[i][j][k][l] * (double(1LL< as_nd() { return Tensor(data, {d1, d2, d3, d4}); } + void fill(T val) { + for (u64 i = 0; i < size(); i++) { + this->data[i] = val; + } + } + }; @@ -694,6 +547,14 @@ class Tensor5D { } } + u64 size() const { + return d1 * d2 * d3 * d4 * d5; + } + + TensorRef ref() { + return TensorRef(data, size()); + } + void resize(u64 d1, u64 d2, u64 d3, u64 d4, u64 d5) { always_assert(isOwner); if (this->d1 == d1 && this->d2 == d2 && this->d3 == d3 && this->d4 == d4 && this->d5 == d5) { @@ -710,6 +571,7 @@ class Tensor5D { void resize(const std::vector &shape) { always_assert(isOwner); + always_assert(shape.size() == 5); resize(shape[0], shape[1], shape[2], shape[3], shape[4]); } diff --git a/sytorch/include/sytorch/utils.h b/sytorch/include/sytorch/utils.h index 91cce19c..57401a33 100644 --- a/sytorch/include/sytorch/utils.h +++ b/sytorch/include/sytorch/utils.h @@ -299,6 +299,45 @@ void collectHelper(std::vector &res, T &a, Args & ... args) collectHelper(res, args...); } +template +void collectByValueHelper(std::vector &res) +{ + +} + +template +void collectByValueHelper(std::vector &res, T a, Args ... args) +{ + res.push_back(a); + collectByValueHelper(res, args...); +} + +template +std::vector collectByValue(T first, Args ... args) +{ + std::vector res; + res.push_back(first); + collectByValueHelper(res, args...); + return res; +} + +template +std::string paramstring(Args ... args) +{ + std::stringstream ss; + auto arr = collectByValue(args...); + for (u64 i = 0; i < arr.size(); ++i) + { + ss << std::to_string(arr[i]) << "|"; + } + return ss.str(); +} + +inline std::string paramstring() +{ + return ""; +} + template std::vector> getShapes(const std::vector *> &tensors) { std::vector> shapes; @@ -345,3 +384,34 @@ inline void printshape(const std::vector &shape) { } std::cout << ")" << std::endl; } + +inline void sytorch_init() +{ + prngWeights.SetSeed(osuCrypto::toBlock(0, 0)); + prngStr.SetSeed(osuCrypto::toBlock(time(NULL))); +} + +template +void qkv_split(Tensor2D &x, Tensor4D &y, u64 n_heads) +{ + always_assert(x.d2 % 3 == 0); + u64 n_seq = x.d1; + u64 n_embd = x.d2 / 3; + always_assert(n_embd % n_heads == 0); + always_assert(y.d1 == 3); + always_assert(y.d2 == n_heads); + always_assert(y.d3 == n_seq); + always_assert(y.d4 == n_embd / n_heads); + + for (u64 i = 0; i < n_seq; ++i) + { + for (u64 j = 0; j < n_embd; ++j) + { + u64 head = j / (n_embd / n_heads); + u64 pos = j % (n_embd / n_heads); + y(0, head, i, pos) = x(i, j); + y(1, head, i, pos) = x(i, j + n_embd); + y(2, head, i, pos) = x(i, j + 2 * n_embd); + } + } +} diff --git a/sytorch/scripts/app.py b/sytorch/scripts/app.py new file mode 100644 index 00000000..46cd53f9 --- /dev/null +++ b/sytorch/scripts/app.py @@ -0,0 +1,34 @@ +from flask import Flask, request, send_file +import subprocess +import threading +import os +import signal + +app = Flask(__name__) +shutdown_event = threading.Event() + + +@app.route("/inference", methods=["GET"]) +def process_file(): + file = request.files["file"] + filename = file.filename + file.save(filename) + + subprocess.run(["./client-online.sh", "masked_image.npy"]) + + # return the processed file to the user + response = send_file("output.txt", as_attachment=True) + + # trigger server shutdown in a background thread + shutdown_thread = threading.Thread(target=shutdown_server) + shutdown_thread.start() + + return response + + +def shutdown_server(): + # Wait for a brief period to allow the ongoing request to complete + shutdown_event.wait(timeout=2) + + # Stop the Flask server by terminating the Python process + os.kill(os.getpid(), signal.SIGINT) diff --git a/sytorch/scripts/dealer.py b/sytorch/scripts/dealer.py index 20e3f067..c569ebe6 100644 --- a/sytorch/scripts/dealer.py +++ b/sytorch/scripts/dealer.py @@ -63,6 +63,11 @@ def on_disconnect(self): os.system("./generate_keys 1") os.system("mv server.dat server/server.dat") os.system("mv client.dat client/client.dat") + + # Check if frontend/ exists (means that masking is enabled) + if os.path.exists("frontend"): + os.system("mv masks.dat frontend/masks.dat") + FileHandler.keys_available = True self.log("New Keys Generated") @@ -75,6 +80,10 @@ def main(): authorizer.add_user("server", "server", "./server", perm="elradfmwMT") authorizer.add_user("client", "client", "./client", perm="elradfmwMT") + # Check if frontend/ exists (means that masking is enabled) + if os.path.exists("frontend"): + authorizer.add_user("frontend", "frontend", "./frontend", perm="elradfmwMT") + # Instantiate FTP handler class handler = FileHandler handler.authorizer = authorizer @@ -83,6 +92,7 @@ def main(): handler.banner = "pyftpdlib based ftpd ready." # Instantiate FTP server class and listen on 0.0.0.0:2121 + handler.passive_ports = range(60000, 65535) address = (sys.argv[1], 9000) server = ThreadedFTPServer(address, handler) diff --git a/sytorch/src/sytorch/backend/cleartext.cpp b/sytorch/src/sytorch/backend/cleartext.cpp index d84de9b1..cd5a9820 100644 --- a/sytorch/src/sytorch/backend/cleartext.cpp +++ b/sytorch/src/sytorch/backend/cleartext.cpp @@ -11,6 +11,7 @@ void ClearText::matmul(const Tensor2D &a, const Tensor2D &b, Tensor2D> eB(b.data, b.d1, b.d2); Eigen::Map> eC(c.data, c.d1, c.d2); eC = eA * eB; + modbw(c); } template @@ -23,6 +24,7 @@ void ClearText::matmulTransposeA(const Tensor2D &a, const Tensor2D &b, Eigen::Map> eB(b.data, b.d1, b.d2); Eigen::Map> eC(c.data, c.d1, c.d2); eC = eA * eB; + modbw(c); } template @@ -34,6 +36,7 @@ void ClearText::matmulTransposeB(const Tensor2D &a, const Tensor2D &b, Eigen::Map> eB(b.data, b.d2, b.d1); Eigen::Map> eC(c.data, c.d1, c.d2); eC = eA * eB; + modbw(c); } template @@ -53,6 +56,7 @@ void ClearText::conv2D(u64 fh, u64 fw, u64 padding, u64 stride, u64 ci, u64 c Tensor2D tempOutput(filter.d1, reshapedInput.d1); matmulTransposeB(filter, reshapedInput, tempOutput); reshapeOutput(tempOutput, input.d1, (((input.d2 + 2*padding - fh)/stride) + 1), (((input.d3 + 2*padding - fw)/stride) + 1), co, output); + modbw(output); } template @@ -77,6 +81,7 @@ void ClearText::conv3D(u64 fd, u64 fh, u64 fw, u64 pd, u64 ph, u64 pw, u64 sd Tensor2D tempOutput(filter.d1, reshapedInput.d1); matmulTransposeB(filter, reshapedInput, tempOutput); reshapeOutput3d(tempOutput, input.d1, newD, newH, newW, co, output); + modbw(output); } template @@ -97,6 +102,7 @@ void ClearText::convTranspose3D(u64 fd, u64 fh, u64 fw, u64 pd, u64 ph, u64 p convTranspose3dLoop(input.d1, input.d2, input.d3, input.d4, input.d5, fd, fh, fw, co, pd, pd, ph, ph, pw, pw, sd, sh, sw, output.d2, output.d3, output.d4, input.data, filter.data, output.data); + modbw(output); } template @@ -116,7 +122,7 @@ void ClearText::truncate(T *in, T *out, u64 shift, u64 size, u8 mode) { if constexpr (std::is_floating_point::value) { out[i] = in[i] / ((T)(1ULL << shift)); } else { - if(localTruncationEmulation) { + if constexpr (localTruncationEmulation) { u64 a = prngStr.get(); u64 b = ((u64)in[i]) - a; a = a >> shift; @@ -124,9 +130,9 @@ void ClearText::truncate(T *in, T *out, u64 shift, u64 size, u8 mode) { out[i] = a + b; return; } - u64 x0 = ((u64)in[i]) % (1ULL << shift); - in[i] = in[i] >> shift; - if (probablistic) { + out[i] = in[i] >> shift; + if constexpr (probablistic) { + u64 x0 = ((u64)in[i]) % (1ULL << shift); u64 r = rand() % (1ULL << shift); out[i] += (x0 < r ? 0 : 1); } @@ -184,10 +190,11 @@ void ClearText::truncate(T &in, u64 shift) { in += (x0 < r ? 0 : 1); } } + modbw(in); } template -void ClearText::div(const Tensor &in, T divisor, u64 scale) { +void ClearText::div(Tensor &in, T divisor, u64 scale) { // fastfor(in.size(), [&] (u64 i) { // in.data[i] = in.data[i] / divisor; // }); @@ -197,6 +204,7 @@ void ClearText::div(const Tensor &in, T divisor, u64 scale) { fastfor(in.size(), [&] (u64 i) { in.data[i] *= divfp; }); + modbw(in); Backend::truncate(in, scale, 3); } @@ -228,7 +236,8 @@ void ClearText::sumPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D template void ClearText::avgPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D &in, Tensor4D &out, u64 scale) { sumPool2D(ks, padding, stride, in, out); - div(out.as_nd(), (T)(ks*ks), scale); + auto out_nd = out.as_nd(); + div(out_nd, (T)(ks*ks), scale); } template @@ -278,8 +287,8 @@ void ClearText::maxPool2D(u64 ks, u64 padding, u64 stride, const Tensor4D template void ClearText::batchNormInference(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale) { - assert(A.size == B.size); - assert(A.size == x.shape.back()); + assert(A.d1 == B.d1); + assert(A.d1 == x.shape.back()); assert(x.is_same_shape(y)); u64 channels = x.shape.back(); @@ -289,7 +298,7 @@ void ClearText::batchNormInference(const Tensor1D &A, const Tensor1D &B } template -void ClearText::add(const std::vector *> &in, const Tensor &out) +void ClearText::add(const std::vector *> &in, Tensor &out) { always_assert(in.size() > 0); always_assert(out.size() == in[0]->size()); @@ -303,6 +312,157 @@ void ClearText::add(const std::vector *> &in, const Tensor &out) } out.data[i] = sum; }); + modbw(out); +} + +template +T tanh(T x, u64 scale) { + double d = ((double) x) / (1LL << scale); + return (T) (tanh(d) * (1LL << scale)); +} + +template +void ClearText::gelu(const Tensor &in, const Tensor &out, u64 scale) +{ + always_assert(in.size() == out.size()); + T t1 = (T) (sqrt(2.0 / M_PI) * (1LL << scale)); + T t2 = (T) (0.044715 * (1LL << scale)); + fastfor(in.size(), [&](u64 i) { + T ini = in.data[i]; + T t = ini * ini; + modbw(t); + truncate(t, scale); + t = t * ini; + modbw(t); + truncate(t, scale); + t = t * t2; + modbw(t); + truncate(t, scale); + t = t + ini; + t = t * t1; + modbw(t); + truncate(t, scale); + t = tanh(t, scale); + t = t + (1LL << scale); + t = t * ini; + modbw(t); + truncate(t, scale+1); + out.data[i] = t; + }); +} + +template +void ClearText::softmax(Tensor &_in, Tensor &_out, u64 scale) +{ + always_assert(_in.shape.size() == 2); + always_assert(_out.shape.size() == 2); + always_assert(_in.shape[0] == _out.shape[0]); + always_assert(_in.shape[1] == _out.shape[1]); + always_assert(std::is_integral::value || (scale == 0)); + + auto in = _in.as_2d(); + auto out = _out.as_2d(); + auto batchSize = in.d1; + auto numClasses = in.d2; + for(int b = 0; b < batchSize; ++b) { + T max = in(b, 0); + for(u64 j = 1; j < numClasses; ++j) { + if(in(b, j) > max) { + max = in(b, j); + } + } + double den = 0.0; + double exps[numClasses]; + for(u64 j = 0; j < numClasses; ++j) { + double x = in(b, j) - max; + if (scale == 0) { + exps[j] = std::exp(x); + } else { + exps[j] = std::exp(x / (1LL << scale)); + } + den += exps[j]; + } + + for(u64 j = 0; j < numClasses; ++j) { + if (scale == 0) { + out(b, j) = exps[j] / den; + } else { + auto t = (exps[j] / den) * (1LL << scale); + out(b, j) = (T)(t); + } + } + } +} + +template +T invsqrt(T x, u64 scale) +{ + double d = ((double) x) / (1LL << scale); + return (T) ((1.0 / sqrt(d)) * (1LL << scale)); +} + +template +T invsqrt_i2f(T x, u64 scale) +{ + double d = ((double) x); + return (T) ((1.0 / sqrt(d)) * (1LL << scale)); +} + +template +void ClearText::layernorm(const Tensor1D &A, const Tensor1D &B, const Tensor &x, Tensor &y, u64 scale) +{ + always_assert(A.d1 == B.d1); + always_assert(A.d1 == x.shape.back()); + always_assert(x.is_same_shape(y)); + + u64 channels = x.shape.back(); + + fastfor(x.size() / channels, [&](u64 i) { + T mean = 0; + T var = 0; + for (u64 j = 0; j < channels; j++) { + mean += x.data[i * channels + j]; + } + modbw(mean); + mean = mean / T(channels); + for (u64 j = 0; j < channels; j++) { + var += (x.data[i * channels + j] - mean) * (x.data[i * channels + j] - mean); + } + modbw(var); + var = var / T(channels); + truncate(var, scale); + var = invsqrt(var, scale); + for (u64 j = 0; j < channels; j++) { + y.data[i * channels + j] = (x.data[i * channels + j] - mean) * var; + } + }); + modbw(y); + + Backend::truncate(y, scale); + + fastfor(x.size(), [&](u64 i) { + y.data[i] = y.data[i] * A(i % channels) + B(i % channels); + }); +} + +template +void ClearText::addbias(Tensor &x, const Tensor1D &bias) +{ + always_assert(x.shape.back() == bias.d1); + fastfor(x.size(), [&](u64 i) { + x.data[i] += bias(i % bias.d1); + }); + modbw(x); +} + +template +void ClearText::scalarmul(Tensor &x, T scalar, Tensor &y) +{ + always_assert(x.is_same_shape(y)); + fastfor(x.size(), [&](u64 i) { + y.data[i] = x.data[i] * scalar; + }); + modbw(y); } template class ClearText;