From 40213c957f360bbfa740b27d67d1693e64b94102 Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 16 May 2024 00:58:47 -0400 Subject: [PATCH] Pali gemma modeling (#1895) This PR adds paligemma modeling code Blog post: https://huggingface.co/blog/paligemma Transformers PR: https://github.com/huggingface/transformers/pull/30814 install the latest changes and run with ```bash # get the weights # text-generation-server download-weights gv-hf/PaliGemma-base-224px-hf # run TGI text-generation-launcher --model-id gv-hf/PaliGemma-base-224px-hf ``` basic example sending various requests ```python from huggingface_hub import InferenceClient client = InferenceClient("http://127.0.0.1:3000") images = [ "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png", ] prompts = [ "What animal is in this image?", "Name three colors in this image.", "What are 10 colors in this image?", "Where is the cow standing?", "answer en Where is the cow standing?", "Is there a bird in the image?", "Is ther a cow in the image?", "Is there a rabbit in the image?", "how many birds are in the image?", "how many rabbits are in the image?", ] for img in images: print(f"\nImage: {img.split('/')[-1]}") for prompt in prompts: inputs = f"![]({img}){prompt}\n" json_data = { "inputs": inputs, "parameters": { "max_new_tokens": 30, "do_sample": False, }, } generated_output = client.text_generation(prompt, max_new_tokens=30, stream=False) print([f"{prompt}\n{generated_output}"]) ``` --------- Co-authored-by: Nicolas Patry --- .github/workflows/build.yaml | 2 +- Dockerfile | 3 +- integration-tests/images/cow_beach.png | Bin 0 -> 67246 bytes .../test_flash_pali_gemma.json | 25 + .../models/test_flash_pali_gemma.py | 39 ++ router/src/config.rs | 21 +- router/src/validation.rs | 24 + server/poetry.lock | 225 +++---- server/pyproject.toml | 5 +- server/requirements_cuda.txt | 4 +- server/requirements_rocm.txt | 4 +- .../text_generation_server/layers/linear.py | 4 +- .../text_generation_server/models/__init__.py | 15 + .../custom_modeling/flash_gemma_modeling.py | 72 ++- .../flash_pali_gemma_modeling.py | 110 ++++ .../models/custom_modeling/siglip.py | 565 ++++++++++++++++++ .../models/custom_modeling/vlm.py | 20 + .../models/flash_causal_lm.py | 12 + .../models/flash_gemma.py | 13 +- .../models/pali_gemma.py | 123 ++++ .../models/vlm_causal_lm.py | 9 +- server/text_generation_server/server.py | 7 +- .../utils/flash_attn.py | 3 +- 23 files changed, 1148 insertions(+), 157 deletions(-) create mode 100644 integration-tests/images/cow_beach.png create mode 100644 integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json create mode 100644 integration-tests/models/test_flash_pali_gemma.py create mode 100644 server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py create mode 100644 server/text_generation_server/models/custom_modeling/siglip.py create mode 100644 server/text_generation_server/models/pali_gemma.py diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f1131450..55bbf407 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest env: AWS_REGION: us-east-1 - EC2_AMI_ID: ami-03cfed9ea28f4b002 + EC2_AMI_ID: ami-0789b6925c11b1fb2 EC2_INSTANCE_TYPE: g5.12xlarge EC2_SUBNET_ID: subnet-931b34f5,subnet-ecb993cd,subnet-943dc2d8,subnet-45371f1a,subnet-ee93e0df,subnet-fddc3dfc EC2_SECURITY_GROUP: sg-030175c435ac141d6 diff --git a/Dockerfile b/Dockerfile index 0471450f..904936d3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ ARG PYTORCH_VERSION=2.3.0 ARG PYTHON_VERSION=3.10 # Keep in sync with `server/pyproject.toml ARG CUDA_VERSION=12.1 -ARG MAMBA_VERSION=23.3.1-1 +ARG MAMBA_VERSION=24.3.0-0 ARG CUDA_CHANNEL=nvidia ARG INSTALL_CHANNEL=pytorch # Automatically set by buildx @@ -181,6 +181,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins ca-certificates \ make \ curl \ + git \ && rm -rf /var/lib/apt/lists/* # Copy conda with PyTorch installed diff --git a/integration-tests/images/cow_beach.png b/integration-tests/images/cow_beach.png new file mode 100644 index 0000000000000000000000000000000000000000..d67f8a1b2a52459dedda0b4291d91c9e557be40e GIT binary patch literal 67246 zcmV)kK%l>gP)EX>4Tx0C=2zkv&MmKpe$iQ>7vm2Rn!;q)?sgq9Tr3g(6f4wL+^7CYOFelZGV4 z#ZhoAIQX$xb#QUk)xlK|1V2EW9h?+hq{ROvg%&X$9QWhhy~o`jaO1dJepD55g6j5$e4!FPP!Bf!_YIM4Dw_vh$S^A-aFBJnIUOq+OvczV+| zIPVijSV>li&xyxPx*+i**ACFQaAy0zc|jvC=l8OnsvwdK6aeu2@re+uJpFQ+5l!gNw2rH z*b&gb4P0EeHDwRD+yMrk4B3=jDM(W&6oB_L`lcK(a0~RVdA+svaryvcsH@ZsaBv8W z6)Ah&=iNP>z5RQp)!z@o9CF5D(ChX9002&9SV?A0O#mtY000O81OXZV1poj50RR91 zJ^>p51poj50RR91Mgb@Q0{{R30RRF300I*L0{{R30RRC20000000001EC2ui0RR92 zEC2ui0h?lQEC2u?07*naRCwCNy<5yB*L5bgzO{DMx%{`jv%5)(;zbfk$&_SC?n>0o*9figUkd-9%e8=0wX~JBtTyBoVVmDc?q1n7%(yjf;@O)4?JUg;7DUjvhEa3 zQ55fNb~pR_U(Y#Jwb#nSzSOQd=l`3UJfuO8&F=G`Q>SY0wb%OA_k9b$^*{bG07}2C z|1A-bnT6lv*Mt|CS@^5)=Vn&EN&a&XX!s6B1`2;gWTa%_XUosc?*;-{@#jS2cQal8 zseIXHW{mt5h)B)MjG4oqsi~&7uz00B{1*L}H8T(+U4|8}$R9Vl38P!I{)g-LdGG=? z0Oh9>75_WDX+-g6_MfO~;oSfwQ`LNt0F1~CstN)ZSEy!$;_|}lfYB9(3kshVe-QnB zAR^QLuK*aCr5C3w0TFqE5~(Sye5~mUnmIc2*XWVwr4_$qanb9q50^#xCL;eQz6kMu z;=)~j2aBt+Sc3UssOAMU*kHvvFfy;UahsKw%S1+m@PO%*SH?fhZxpr)k6t{u_~Clp zxPjMKClN7oxsrw(wD?&(>S0mbO=@bJ|CYiBpZ+05 zaHF*8`#afx3nM~^pfM7zuebhNYY5nBv6>o@0UX{r+~)kD;msN0K#4yQo^`(d{ESEU zCoh7txA&&M%KKxytIL-a=MQQBCxcH95j32JIdp{!W`+&Vg!M>NE<05XFB8PuX=X%F z3?3`uB8;0N#FxB7!h=%{ahCFp1i_7Ve>i~8x_85S@rXqnBe|u=8Xl&h46Gn4Hu3l! zFx)aW`rG{A(}pgVA_z<{#KYTCzA)SZVQ~=?Y5eX4ghfy2S9^TKc|8)?5bkXF1j{k$ zk;`rrS2joM4f(+ENrgMR=@BqaD<(kVs&R|_Tm(0?h+;siptrhlRUZxm)FBpZ} zQeJKN3d{^5Zz)C~1Q%HNT}uCq2xji1th3W1!6R?6k?HVnEj>w&>4KT{(R370Ed_il zj@GkNk@2H40Ayi5k||^J)q*IU_2FO4InWc*5=RW9>A)h=!Nc2dF0Y^UedHM4`SmlD zPuaA6P$Hap%AC4zDFv+@TyWX3GTIgYerbI`T^+w_~;@^MH+;1~dy5Yj3e)_;AC zg>c-PnYkDo5RN=fIjp1qZ@*4kA_Y!ieOO9g6&QrB^&d=&n?R>TM|`x)+mxr9^(XSi z_lc{d|FOY2z#zax*l}fMYNnx5AnKIVYusG0lmc>@+l;=$dO6=`a;KkO;re58jQou( zM9{p9&Vao*#qxMM14bOaN5}*KVwRA2Ye*)a>0z^bN#&Z`j&q|A=g zhnzNFvk_SgEyWj!#p=d_X`GH1B~HR|;!V&V%ZtG@M1!<2`i~f{R}dL9`uxWFgLn|d zdNk2e$+)qhaN4&%t2{{WBg`3M9OggRxNmcSz*>vpw1}HChg`Q3;+M%6q+T;t z6<23EI4!Ts5;s-rpadvUt14>D2*HAh6=cGy>dr3usDz0Aoe}^l67lsnv9;8a3iO^f zcBjf%UL+DBRT@KsIdX@u!MW+uC!z&JfPi_#MM=Z}1EwZ`2%C8ZF<5dD5x#PLE3a^&4(Cmq}9p zW&o24Sir(yrb>jlPYH54Vnm%+H8tZ5lyZQn;|l_MqDBTtN_;KChoMK9QeK}A+rDoX zmnjB`R(Ut9D1DGupc!#WZ+wK7&@W$0@EfI_l|H?!5Bhchh|Caf6vwL#r>hyMDG@1i zS-&wOe+e*~DP;&LbUP8&E|I~3TFnoWm`DhYh{U;%9t^Rmpg7}r8ggn2W1CLUI^;=rx^Z|Q>m8te1zqivu0EJtT&ggI-jQS=71A~lzm=h;3 zD+O$jHzVuYwy9)7MBu>)G15%q9Yb09OjG4Ktbz%@QLMUdT z;h}<5iQ>7bxhpu#p8Fcyh7=8~w{1?@+hq+DLHJmfhg5|$l4C4|;d}$ZTmppwAmXzA z4r|>~su34`jH5hy=xpe*?127iy#FaW7+F}6nrlZjRZTrt8&tiU7buDyd>XRI%%kl4 zvsBM z2nePL#!qVkP=ZE+RLmkG_#M!S5Kc#+CE1WCr9GeqC=K((NA2Xbc1}1-*0S()OUq0~ zRcTm7iJ{ElV@3{A>H(R&pobZtg%ukUaOB*#PoD%F?vfe_iIKsGm{rM#U2gqOX#C z&)F%Ds`wm3bRZ@OvjYi2ZiR4Q=WhoB8+lSQ^QK;{iz_uX=Xlapbl2TtdwE#E<#qdNzg5=3KG!A{- zmdwngK9*lV1`RB1??e`{vY_6Cz`-^GchdS|1mk9I*(@0|%|b8G`f|z!{WFXL zq=ax2DGrps88T6m3*@d(k`fOMxx0j-acnSSX0UOH;4x&fk%B{gAD(~`DKVLvlB#KF zC8lc0h+wMamJT;J_n=E=okd(0B{p1wN8`=8mM^Ilam&(@DhgIUrIWr9hZ@}Zz}zkJ zgK{NAAURXLVn9VW&@%xnl5mJYuSG$?OQ6_R0QYNzX{@d_D zYbF)O4$~mT42(oNt^1WKMRI^5@*hX5rlbUMSW5E}r0Z@Nxhw$_hAAQ`( z+itM8A|5dyaOQ-jr(@;qhe0h%V}^4exurf+d0{!hIwcj3?Whvp?l~g)qX8B;k&00T zKwiMHp9By|0*1q};8i`rYY2$(W`x{sBhsF0%@NyjKrN)46I)&-Yxl&E4$-yV?1*ZR zSOFZUY)Zvb$S+=J{tt8Yd=FDGwFb8I3oHGRH=n*>PSChd{{Rc+FBv5+Y0iz}o6E)` z=@g!PxzR_Rrwz+@M8WDyg_6M>n>rhVa}Q69*m~th+gVKY!CFjf+QwHKQrN!rD9n}c zM8pMwMeK&eb30MOyeI3tdDYfJXzCYP){|^NHgY>LKg*Iyh*Z&s7MB%wd&p=p>-kfI z_Wj`2f;o0rjf`!0dVn^N^84(}dPJkYJa&Ddjd1VEBn{~xvYf4@`ZaOl`h<`}#+*+s zW*wA_{l0YrCdH$QnA6C0XO=&fYm|c8Q*OSE3AhzWNAY4Clp4AT79B2Vi`myHdw`Sl zM-U#OnW}|jn9MkQ<-qlhKx5<8n|U==YrStx%{T-pg?=PO?QG|}S(=uR^IVc+fZAqCZE0R>YS!^xkc4Gf@LOup> zA&_a$DPbd3OYkdEXmuh3)FVa~ZPd_lf0UaUDK=#`sfq(rA_aymtNj+?vIT4fWw0c_ zlm}TL)(kfKAWChh?U;s7u7^&E$ik7G^Is;g8H~I)Q)4ixg{~wRUdgMnus8OqowrU^ zBZ)Dj89sKY6C6$CX-&j~-1I_zrv2A5%1}e>S6SF5Swd(T5hq0%ut7srkk4|W{CbNO zTgXGkEwnN1VZFM~Q6P|1v|Mwl2;DShw#-zFZTr~Nh~>Qc96A+=1rbwyA;cbkxCyQ& zg=E#xus>msZntmZW9I~&un{L8r4xxQb}FiWcH`<4jl}qUf$~8u8@txX5%MmEE-1F(r;DBC7ASoT#nO@YVeIc$} zkL{i+E+`5C4j3mFs>`($?lk5~LaCG3!;H+-dypXK7F5>n5fIGvei?=nu+XED7-wivJCOW{+)_Hxym3o>2feXg_M?o7>>1u2AoZRCsTYnjGgOU-M z&mg?FkWr|r4{J6->QQv73Zzn`$VAov9ozVo2Y_Xn8KsyvZtZ3WV@-QAT64wyELcee zR053i`*O2UvC4XtT!1jaW)}XP2>-E;Qtg=)Wnt9Qdh%J_UofR@1|mp0LS00+`}K%N zpi3A9sv>gq?ZUn+Fx!;NmKA!e%{3P3WW%5`iaMba-3_w{>E%7do9GY@G{LF539ts`CAUAeau7xgj8+N zz|1!>U|eg4B_l}&3R;IHZlOgEA!FQR9)YHP*CR5aKQW6?ODj>T7QGnIm* z)KEyg4OLDwsQmmSDqzQ{V~AtS;*z_>M;D&km?oMNgreDPBW`?_*#^cC=Yz?Tmak{t z6rX5pg`hQv`#V{Yb!4<>PQfmC2o6|LAeV2LlzihBXpzOnZ^z6*PQ8(PKCXYucwq_4R1H+b(!qes z5M~<5riqyzAQTI6)GX7z!{ZL2&XKtQz3`AKNJF*t5FZ_U0X52r&U$%{5#$b{GTLq^ zx`)_PlA>l-a)2-dP&;K^uHjAV@N#hb)=3s25H{!wcA@g)afB9T<08T0L*nF%H?LBH?<+{ne6dKxV zmBFkeuk;mzMM>(}@MH;`*)Zhy24KThdl^AT32|weL~ELeb%w?WWayd5;p;?T)R+k@ z!7m_^ybGtU5O+YDP$2`jV%@3TPF=g`?Up557O^4Zmk;14%Z=I+lP`TlP$q~ebtzH^ zl*RAZe1b~3d@O4ER0zGfS0L9gkjZskPm$P<6Ozb;V(@7)f%VN8*p%67woGhy~<`h$)|yd2LxCVxy!NjRYE|UT&5Ei73LLo_K~4CHXY+QOvRu=8-M9 zf($DDkFH5&U}cxTph|^QEcmCSioW0m6)NrcdS)1mAlWR43MosWv$4V>P=l1mVs_8@ zM$7GlY1fiX?R8 zW07Y=wsqbaoMX1063MdZL0XQ;dZPi6z{~`>j^Q8?8R`x;!`d5D&o$oQAexRS<@urI z8YN$Pt{>J@b{l9%*NAL|M@xa((x6{WV;{}FnC?^1zOSRI;6%}62`_YIS1=a=15UM_ zm4ni_jS)mCy~tv7FMh3nePqx55gvs-?5?P+h9%A0pR5JTME{Y-l*X%cZ71o5`)&Zx{wRU-%(4 zB`zH4ip>!6*5G$+g@h=%!-0*w_mp7Q+;quD@ETpx)?+6|t18HUll(3B*9{w@@}Ltd zTpCK|-COu6N7sb{;^_S*tjtxAl0z$PKs0F;;{gUUhB=_FWWtyV&1-hpK=&9^*M|2Z zZ{@(8)v;@Pxw;E2_Qsl{Bx$dE!i;Gmh0>`cgMo~Rt#7jiPrvBnvEIHLTB?>^E^Mxg zhh4bhG#G$MqpJ<-??K_EAx^`3wX&5N1+5V*PV=W7DY{Vf$;z&2?nw27_td-R68{^t zTFPE)fH`TMjFORyo_}JLbU5hSY5mTL3$grh5h+uVz!tCACY9ToRD1L3ykb|{z^tuH z3n@%C^+c9J8YS=I&A2X7tToQl3Dypsk*{g;);2Q<9q`70CNJ@K371rkM*LA( z?1SNeseqU`6(ZqMgAA2Qls@pgO2DK{$W%4JJ)?aVQb|<>bBIqID@X-F(*a_!!=yPg z4Y7lV-Muo$rwHqC;?T7m2wibrn5rF0;7^>tK1_J zu{Xg~z{6|(`b}v=FBJV@?cGW(dc&Hw<8Au=n9b>=m7V zdPEYHq1hpMyp)Ko`&d2FPHVJe12wTM^ARyOPcSDut*eR11gK#Sr;nk@Yn+n9TZkv& zJSC71tkg42R-*?W5i<)TdOvUC&W)sSA;V!nSe$S+s4+E;JkB8b2fR8Gi1XgJT^F*f z&JuZ|FmeYWcjoaZjdebdJ${0u=Nmtw`KRiQFRZ?1Wo?6162{dH_ zoni4pr#ro^Ut$?tN9o79o^vDqb+)NdmOa6YtYZMAweXhkjA>rU)Uf~ra!gn!f0SXQ zN4Hb{vS!uwzW?x@+4N zLz$U)*?Ql$THK>3kt>La*iaVOK!I*xVoT~WW`=Qjj3OEs_6H#i&6Vhs=TyGQ#;1PD z(x$)%%lxciXgt-)^|Y?_h?L1$P*4^nAHm>c<0*>3r^+;ZC!-OeP2s|Qi?0TzlCwbaoo`;D8tqR?w|gLP%e z@U={ah=qa>)L^EZa{@q3E$HZ?|4^Tts*ltyZ<^_rMjp%=x5>+wgGzefvKc7-0789c zefzyPizQAOe4uY+vQ3#`QZG~PA$03ObT+bJM`fwolCvd_M>9qOve5er(1=0_q{*L} zAt38W2~0%6OqqmtpdY)mHJene_ol=|lM1!Ki7}I72$BW(N0$+wrg|%|w{dq7C7=kB z$W?@@N+hVK7Qb2LxOUE^=@P`oEaD_L=pFNN zqh;;f)t#-&+f#upTO(p&7MOr4k=n7qfK65WM_H^NeL?4RJ9#7_J3$G@b{D7K6N6`y z$)t7yka-w6an4Cr7znJct7%;mDT6vss;1rt!nVwufSr?7%d@Ioc{^IQ$IG^f&(AjU zUDvnSfIwJyU7QX_(4d-5T+1QxnnnY)Xnmkt(4P=eo{vHt%r<08jW4^9T#k*;d4Sb! zKBkMNGkQ_L*h{M@!&ygcvdFs=sCNd(tbixar1KiHs&1OD^=iOGs;ybQQ&;CEZp*P_ z%$#&e>N^IUaOIpxpg61h=qvB+;hZDn=rs0WGU5;L9GFNr6hX=i0nMk?q;fEF>_kKW zC(Ixw5`lmuz|_P?YX*-1{;zIDWZkv+`K@_O} ziTkL%H)@iaWFl}09><1icESt^LT*upLp-@WX+i5grQpcGyQRL25yt$~QpKAI+C_E? zE2nf?bek!oqZN_?p%0|pgEAS3f;^ds2vNn%B+J&%g=@#FmIVaz*`$75 zgBAda#J)wu)__vy6yw{O_LN98-m;e%7m)TQHbxs{#{;R>pgQV}&;V>4uCR%l+)u~r zDH`SU&K}~RLOEk*;pxa}T*Sne8zDAc$-p{ezPPisC0MELuiB2?jNPnq>~$gnqT2EH zq~4x##i9f!ETIi0k`3;>_p0iR+|Pb?V`T9#BD{=u8)XS#Ow5@{0D=j_!6p??>q;2H zS-)-+zdBA@h; z6qvl+ktLeeFAyu((fKSc=pY#BTgp~%M7737v;~zU%Mpv+!s!r4BFj->k>t!v!2kdt z07*naR7Zv1`CvX$kyf5@#1&NCB>Ee1N`@Lv+mLw&_ML?xs2F%sDG``VNay0_!dnx^ zZQHcU6SwQ?ikWK-;}INV=1{Y7GPaL*9mGhAKfnm;)s0f}M-u7BcOVWl+e}#s#K*#e z(;y5oz#Q|Wa+6AClUfK84bsVRNMuZyo$$oD%1P~<69^L%$5GG0Ogbd~xi^!DuU#}g zS7DjdJZ;+O9-sJ`ng}4w#AKlw;b!3|`=&=?J zdc+Cr8=VF>4B$JZsZ%l*3f5e>DeZsJ6UoG?fovlDhs0?WrSoWws7Nj;m?p7KVx@NCZ>gLWo z`g$sF;!qD6a_MXtnE}RpfXTwJ?F8pfoJ@pkpsr+VQcaj@p?M`lOk%b0%&{8KIAJy8 znq}fR5JmDe3m`1PI>QJ`+9|bO-5KERnl5NV1kJ6!{2dsjxk}(XAq^ZcW)hG9A>&MO zQ5|ztiIec>juH9c%p@GZQG`jDAwBVCwvo@AXg1=UbDq`2F^4KC-DFO*b^(P!0-n~a z-jZkWz}mfZSU9D=8EN!3D6mtX-7G2TdmkPv_!-Dd@F6N~#SE$O8J%JLGknB{DQvGL zw9#BzP%-2q4(ti73ngPh#SSpX*qzq%N+wRGwVMb#CdVuRbzwNBT1?Fvs5lmJb-)>m zZmpRSOMt9BaqIm`ZRK_8)lmkMhdNB^$B~^GLZccFr3FZN#Bhe+VMG?Hasevg%E1W) z2(bL5atRYd^w^i3QUYDJ3Jvj!3f*^a;pw>;g_{28qQ|knNh671pA`q5|3$>2bm8&aP zF$=>nPwT3#Dnak|!N{Y~)DqoQDGh{5A0E+?#JlR_jH;RX&bLjg8i5@FGsRJH>|Dru zOq7k4N^O_OxVG2|8! z@TklVxe5>4;u{=N2Zo!mM5DwH1k{2<7#YkMREtcVs{kRJIGI#Dt=-ndZB?%3Xq`i< z1QzF5h)mg7m;;k$QcL({s_GTpK>pgAdDYfa>vh$3jn`GHO=qq*BN%x!nQdH=Yax~M zEQmNuB?|?^!+{1QcH(&Aq^>Fv4wD6iS;z!bIadiwdXvz|O#mZQi8xF{>f=~n5@qIW zIPx@(Q}U{9*R?IFkpp)~VonaS0;`=fgI7~UT3h8z)(zZy3`WT$!D=!PPRcvW!wj=; z5E5XXy%N^D@ar^-%!Te9@k_dD!t{I1Q!yzk5pH_t*YZFZY6#PQNMi8l`a}#PBNotv zZC>+yT2Gu*jG9pirj^uAoS+goW{!qb5?$G1TeL3{SmI;_Vj(56mdM(!^Ge=m<$c@v zR-uZf(@u5ObdC4!XYCTLn_h4lv=w$F_l!&e)Ui(MYHLzW98c@I0+=~q?*Uj45f?&Q z;_7c~M}<+(qpC9qBg-KKQAucKAr%vdpfYAA-ziBT(1mS{WuvVR>1?#h2H^}2&e>H_ z4_qM4dK-D_4o}(9oa%MHW{A0A>+m`e3B}OFK^)0BD}R*B9!<|qPy~?)gM@U-c5!FE zJFBL(a|k)52&1Z5h(fg>@o@t~wN%DH0vS%dCsI}IeCG|F;iS=JN3H6rZCkIMH-mVk zw)3s`ovJ%yO6N!UC2>_*BpW4GA(Mfu5}HrkY+BVK0bHmM8$!v}NQ7B9&MFvFY-l*W z(5N3e9-AvnJ&7*&M`Ou9#etz&H%M7noX7;Kv1r_pD#b+a$J6i+-yu~J*nKqtQ-WH8N zT(papS~WFmJ8e6!q(;OQ@9*4*k={%Jn~VCq z6eInfdYl|RZfyhzb0op65nfRi)Y`AE@TotBV zru$ob%1}I@gE1~jW4I?D^C5Yw#D$Ei@tv9v#&4bXUP*}~9WNWEW1vnPO=klUM99h# z93ugj5hr6R#?`)kpl2R4rS{gl@X;IwE3`PVH#X>@QYJL*uDwwe{Xd zvu`zmY8s{(ayK(Fa*w2-W*g3!SfL+0$7yOE*w`$Y5E4sBqIv4ru?b9IoJ=mFzD>=D z1Q86@a*#Q#`2-aVY9fjgGRrzf^Ym|cGR@|?T^$ z2S&PayA~pDDf3YtN-{JHHM8+oocYIpO=M>x=cY1qaArY%C}AKRap*;w zxl{^xGE((kwNoGNzjU8!CNJn38Pm;hHVlA&2E&< zj2f~aLwrclV2Gl>bD&m$kY-S@acaG@pmtITS0YZrJVvCU#Wtkbui4bw(5ZPC9ho9| zH2pg+O=Q!TTp);WZXEdlFhyT)i^Dd(Dmo*O2|8F8rlBf9AqqAl4n4f!1qG_i7cLP| z6_4a}XNZ2MOxdurAI_0^@0k&2)Olt@1>#D54T50;Q?CX#5>IJDBLxe464Nw4C8E1p z=*}ELAq{Fa@7cV;%wv;FK+i`}OUXbMY8SC3>!QL=7#a?P^F|6}IWUL`70AJ+j<=o6 zCZSX}CY@H*yq1dDF-H@7Z{Z;JK9Ks%D|KqE>ZTmcJvvPIJzx4P=A?MyXb zf7JO1m?V$GGxT8?5SB+UM^wkg%*M(}fa9>RkUN=+(KIxE-~=LEJE@o%#2nW#U?{q3 zT#Ps$4FCbhI2xklKvtdHilc}-l4~MMKfCE>gMLF8_-fwNCl8D`e;25{`=~U_t0YX2cel zO$Cky0&8W4ikg{U8ffV%(bP4IgrhaxX^NxPdz6pF$^-asP*jB#YMLEl9a$|htz;UO zCI@vFS3;GmCCr0_1ti*1%otc6Z)RDFp#vv6`*&$G# zb;ozU^T%KQ8=rT3*Y+DeX!KAgOH5k`Sp!oct%!u_%Cvj_nI|V=m9_BgxvlwpTDLkq zJX#!_c#;XZ`u#iiA1vDkOMkd*IuaxJ=oHC{K4O@e2NAay>Ey-TYPPlWoe%Cm#7;iM znAuy@c&!;=fqF|QD-XKDjCVZ$ATm9SM6DJ#t=zmqRY@&W37lgGb|TYSDkc{N;A&8X zcvICl9zu!2Dc>rVO?S9z50}ld^DA$v#zAdM)+#lgJ5OdNR?HGbG+PI=ncAklYtMuu za&ag%dhIx9)ai}ZI?+@-$kEfO4T3;G2DXr?q>Mi11t@^VxWI!(GG3Oq-iA93L`(Fx z-W4e!G8S2AWa^cC3x$~@B1lUPM702BoSG=CFt2K+>dkyh=oo4yAZLLLZJ2D$u0Heo z|MUOw>@&~2^o1|I_}GQX)o0#1!Mzoo-`%>fGrO=ot;laptE;=SbKBE)aa75ynoOK9 zOjW1N-qehR9T7fzeZDe$`0#M4IBNOL_iw**_jrL>gQ~HHSwSS$O=^C8clM9gy4z8rehEpRN}}vRv2m7jv^+Fn0pAi zvFoihXkOFy%x%||2-wUqD&bm4gc&5r*dZ)9!kC63e_?H@`PSQUOUF%nwCYYeYrM7I z)EnSM2!zgS$XX(^wlJ1q%Y%o8ZT9i#Wh){m(FW09Psv8|YG)izOyqRhkv85oGq9bBfhZTqkHJCuG#71h? zsFEssoqDaDwotF6#?%EH7_*kE^H05S>C?aX!$14|H^1|*uU@MIX9opR;!g0BJaW7lx6Sw1!XbA&pmbV z_QR7mZa(V@5pUE2HDrR{TbS6TDMkG}JJ|NMXd<3In_6JPz0o|x{szH7@c z>JY?9-jTY9>yQz9g2qBpz!6>IBKu5FJSnY1%aTllr^53}w(4p=siuxYh2R)Y#6iO& zPK0HjEV5%+rRrPNR;_8flT~}v*vYCrX}ea*6VzM6hx~|mRiKGwCk%8-_$*Z57ANYqceWqooa)<}@WR5jeB;yp9`Gdp5*|Zks z2Zs+;P1~lb++;c@k$KHik&9PnpL}e-eDN|*C({bGUo0QqedFa<{^$?>??3#Pe{!(< ziO>GS|8n=~7gyCR6gj;Km%+o7iA|h}QKtr2#RRa|aCnjuNV1Db`P9L|ql&@8CQMT& z^V)4!Zsxcau7m(%1f^GCZG#5$*+N0XL4(nBx@i5P)034yZo1>P>pE)-K%SP*TX0tDT+66`k&` zwlC{sZy}Wsbwp~yL}KPcg+|(PjjhMN{9FI=H*|UP2VcLZ(;vP4;luyq_fKxVbM5Mt zt#jwEKlj3u&%G!+SJZbWD_gehq^`}n!-Iq4qvHqKO{df4a%DtJy1li9uJcWM`0(iP z;e$^-^HiNIx{QfJi{rO+~)i=NU^4(_li@)(tzVMm9*=}841)d3K^guyY z8>x9SG7^GQu&8(DVd}e1nSE}!T5ovVD~UC%POF53s1}E1gMlne$GU(K>Y5u8}C6V_^8iti{tX|aB?@WH{o``@|wj-5Ps|BY8?QvKC;fAnYH{N9^~ z^1{FW4}S40zgb_nzA`9*gWr=u$b>wbvZ;scSUYb{oOSG+8iACU8lIeBzUatP+cA4$ zBV$TMSZWstsT0&rrWM!1^NJ@TbtRRPO4xDW9ig}JULPNCFtb*5*|Z1C?%|?2TCA4d zS|wlLk`ihdaGZZU1Qe_=Q##cLb8TuxUKK6@<70hSpdE0sRWIhPCgLi`*FL{bJ>vG}D@y#Egwd^h5Hm`i!PkAL@H{`0%{Z#VDW-mOhoW`~FGzy71$r#^G>$!Aw>E3OK3d#UO< z%W}0eNIjiNRk_J*zO!4^lcwp~uDN&n=I{OP|MKwGn>(|~Wbe{}LSUowiw~y@+D&V&MQXMHus9JBmWUF!*tdh>anS%g^-svbh1+ouCC(I%4Q8p9@bZ)c0%rMfh#i)XkiM$>JEb+?|V(c#Yl_aRzIeH-PA8880^wVKf3G@d}j; zY8`oQS3AwUOYJS*fA^i&UwQSFzkcJL5AGiO2PfUan>R8e$6PTp`*~f}pzXTen%L^6 ze|o(C)>B{p&C8$q;@xVGlZzs2t59#sCS5&!@=ITR?CO<2{onql_Ta{BXGb6xFI`!+ z-Qm&k6>;@MSlm>|`>ts_SJeboujkHP?99)%cB*<(2_G+y8Fa9}zg#ZgzSp!}=P#W5 z%760TKJ&R>YNqE~5@^RnN&q_ph@I2s((2yjJKxrKzkSrA(v>X_-ak0pU+T%SRl?oF z)z*y9PjT(~wVlh?E?&9**yZOgY(I0uT{yIwVM@k8%`$cMtU~R$c08#taUw9lf}A0M zEr5c=Qdc!vKO3o$S6#GryljqE-O)-<8o%=D<2h-OJ>CnlLH(4#c+l$DZ991Kqeo?W z0(i1}8IpzK5zjnEdc&z5N{PJQ!NFD~87>6W6NtTMyp;)_ZTg`h%ao{pN>9A0E@9qfVvm)RUc_(tzLciUmaavoa}w}E63HjR%&l8%p0KWDpIIF#@_aYXSR3u_784N z9bLJw{mJKF*nQ&J$3OP0NIjRz%<3w(GJMymnoJmACEMBFo=&G`Hl0ikmhIQS_Kp1q z2i3G*w(^_5^WRc~rS9O$hu_xszkjrU@Pi+}Y1&raw)d`m zaJ0OA|L%UvOOu)SC->^x2XEo!S3dc~<*QG8eEZoiUH{lGzU!_B?}gg&Ol0Q7Rl=}J zc;dKr0dtQyWk{AnyCt+BqU{4=)!DMsMbjNF+vCM@(dwcLzDd|f9zDYxNk^)Ifh5ap z>{_?MPQlDv(frLLe8M4~dh30eWay_)vlL4z3snIKODV9mUJy@f339}b!n`^qN~Do! zLzT&l$u`5tDpFOG<7Rbu@7Bw2y#3k-2R9dT+!(ZjsX?Oc3}ISTPN?g;qozAIs@o)pO-B zQQxt4tja8NU2GlQymt8J`>%ZK`#<=>)u%u4$U__5O|a_`|m!oGgXdH;u}@ zPES_d*0f$M@!E&CUbubo%hTOUm#*W|-jQ5j$F*Q8JaxiOg3_Ep!i5|w{$e#XZ{E!?r_nbG~KfEoi|TVu!dH=(SY`Eh5cAEC@mFQTnv9Hht`*xs$Pkd$N&E1mJy`mcQscwyw-}~)lejf=z>7jO5Gz;Xe!49y+}eet^M5Qf@ayAzSVWQ!lf~-fk$y-ji9RQ)e+zZ@`hr z&YAU_D9IY-Zke!@(vDF1chnjgdp~Pd^zgxZZ@qKx!NXOjtv59mSJjh>fX=H|Ykap< zZM~Vn5gZtzz9Va^&fB6rq+9Pz;4VIPy>@Pk4lnr!)5AN}@tqsr`G4;GiMK{mDH}f!0q(?@6%h~eEFyEcbII?Cm?_O;o)mP{oqrdc>3tz z&Xw~!-O4|H{aUNKY*mOHsgRQD%;Cb$3VHd1@{7kr2ljV<<%%t>*UnwKZtvads5L>&Tsd@I*LB|eXqu&J-aHY6Y*sUo z7^s1FKLYA+7_y2tTt;at8 zE5G_1*I#&vJZ(LE@8tdO{qVJSZaiq7zPR`B!O=5MJo)Yi_irt_KmN<_e)7pH@H(xx z-u>XtrOTHd9Nbu$dZR|I0xnGXv8&f+m#?|43lOQ8i8(maFa{2Z3V5?tt?{;M{i5lP zS9G#kEki4)n7Jm2U|S5VMwWBX!1k$7_MW14iqrGZH<7FuMuXlJdIU?w!Je*?*bK`P z0`dT2K%Kv^i1PI##D7rhq#&NJKCoJ{R4OHfc~EDV7aHx22rxCH=?k9 zEzI;Y&#WZ%)_gLPsjlG3t2Qk|6`)$e+-_9lm83Q9ykE2$#_+gMN{Bllw_G0Fd*x4# z?XB(Fu5O8U)t!So`v(VPcJ%OQI%mH+=H*Fktx20WI3d7$K#+_Yopa3W0%4HYjFq^a z?s5ChUwrcq-+S+;_wF1uE%`+=n|VH*=#T&1n^ z&Q(#j8c}0PRXMzBW436zlU2KD{BqT<8f}$2ubvQkAAE{3BqODn!DG~O`E0Dz=_%TY z->;buX<0`|M9gVU(on43kFzH-b1tiJ(y?83O3gNy(2wj;v?n4Bu6H02&VxY11xJZj zG!!5B-ddSB3|wCbO5}nEu8M1`iJR}e_`*x~KD=?`_O0X2DG|tnxoJD^bzaG~W0lEa z;}1@Jqvk-aViMP?vA4?Xa7nMccmM9;@lzM;XRqycUHfp~Pii)(>wPMZr(fm>ksbE_I3{sPfpan^XgAl ztMpbXy2Iii z9P>9qNas7FRp%E?bJX~wRnxTEwAT7mgCcZ`!{Mpury^Nm{b4Zfv7lgTK3B{6OtGl$ zz0xfIiS=+)DU@7+Co`K|lkdgZ2T?WL!#y?^)4ofCU_(yo@RKRetUOk>+tB% zi@STUn$G4;(?Z}4O=lOj+~=Nu_OoC5h556e>vkWXPG(>s=0Nany>C1gogFOO`^U?> zhbQ~T?V{19Gan*aDkrjlfvLzW;~1ZTg-`A|C6!+UpOCn5!{1x7oQ@e?BCQuAjS?*w z{R@KrqR&blnJ?m`(YPy$k<;_2Rcfg{uX*TKni8RAo&vc;BfBun9neIYjh65IptC@N zjvCxN!O|kh70gEBKp?Hm?dmn8cxjb*`E`(CNLq&Aa=DpIUZLUOlhNg9#d$U);O)l-1K_|L*Hw z|I<(X+Wg$Lr${8YLl*HS<>m3-`wIU4?Hg~s{HV3wY{)iKmY8<-@CuOxA<^>(R}6?f91da@BisU z>@WY~>%aTE|Lf7gy;;o{x7^Ra^yv%NpE;gAM)i&(ZHUa9SM{pPWqZ=manl{I+LO+= z22u%WWSab7g%4jSZHH#DhBUC52M+H^dtzB#Xd6t|PIIW~nZD~&jEg2x{vY|uKl)E& z2OxehC+bEV78l&cviOHBS}ue|uW3Ae-jqR%5g=1+ITWM>)-cE^P-Y~+ zQc7$Qgn|JKW70~zgLl--+T+>&4R`ZZyZN%-{6TembH=TMIo8@?v20hKjIdqN`I)TL z9&@Vr^`^MM*K$iQH*>*jd3bnPP#qsg?fAYqi6aUrE zKer=#=l;EwmxGn6+U0Y5i^byMqB~l3qW+gZ^UO0l zM#=Y2H!Ba*_)OD0=pr59v2I9Je8vKZ)1dK8eM~A|HXiU<(v1m zC-+``@7{ysZo85x+e7WZ&IcwvxYOt=7^WRqt7&N$R)&h_n z?mwJ4Ot$B9Szg?#-@DU&=9AZ6fAz1A4vv*87Cty^Zrt6UIyzjoUdfR@wl_QPR)?K` z`q_{F`rrL~+ZQf<^H2Wlg%_UnPk(&t@yCAWzxb#1@xAL$d`lQTdc z-rb{w@M$0{<>)xH1&yah4_uYhDPwfOf8N^4I+%J?1V_Y5LPiricI3sZG7_V?Kazv@ z{LNSW?VmOW51Ydy=TFF=Xm`9j#n#r0Y1*1rv#kpkE-Mj>RZR741{PPfPhZ{JakMvc zufO{tk#t_q&nNd+O|ldSM2E-_XQj@lBh{03HlNJ5rgh~`Iz2pD9yXZoUPN8rdw6*M z?!&F~`>0k`EnAhm@x8A<^SRGnc=~zr^8a%7E-|)d*L~kwYd^m4JFj}&y3fA%_U&dj zyG2s6=!jHA$3P%C0RjZ}#2zFAXOd9{0TLt=&w?1pI1Ug5L4bIWL1ZKdAS(fk#1SIE zi6mN*MN%Z2&3@j#k9wZh_u7xORtD!()j6lCo3tB^0;*AU>U7oq_Fj9f|Nr;DVklon zd9SvYZaIJXaq5?g(1yN~*i9-wpgV1SjyXSjV}E}BVtRhj^olO25*RpL^v(hiiO61w zpQ3EZMvqeXzN!c zDU*-O9mWkV_3CX{)sGxvQGvc6KIx zJ1J~c_;CJ3JpGADbIpOl@x&r#)q>2V8jhJ|(RG7~Ez6>)5P(#`8kdraW+EC72bR>A z-fDXB`aB!z0ZIdwA@LfIzb<|O5=&HNhQZhpu#vDgv?&Aytb+{q_M4Qy`5WK6m^B~# z@Xy|O`lvK6ajcJD|Kv~p*Zn9j%n^%u>57A9GCA{*N+E+iV^~$Y%uN+(85+y{8si9C<&?qDWn-` zGvsy+t+m0&#;??QA&qHSxnN>7zBEA*)Qt(=Mvp*~=M@VQ6 ztF})U(?uK8gSVa?K6!R@UqAlYzxd?Ge}4b|+rzzabAJBE|HtpY`OSa*$*=s{is-i8 z!b3osoxG0Av)I-3Y@S2kra*_2>@%%3&r+V`1%i>WJG^k{vRk*Vtn#*`-#4FCy zEo;J<)iq-)N8K3QEYp4oY}_?~#`KfW7itl-xM;FV$%MAa&gk{D&LQcB*}dYGo8 z@UrZWsXs5`OmcS4kcB95a5z-X^YhoCnE_`i9V;S0Rh5OcDP`4BQI=7p=Kzk7rK(2d zVCaiV!EmOWl7gTiR>_f#1>@}7Uwe}fiywcul=I`ho;X~BNdh1&NCfDChX%Zd%9)AN zA%+TVRhIk1a(c0N_sLOFjSu#Zd^upky}i9JKKk)wboXd7ctzkbHLpI=ci-b`NNY9L zy4j!#7-ygV_|KNdC!CkDZl{Y2&_o8rmI2GG@p#cqs(bJMoB!T^8^o3a6aMU<{GMOF zT+C1RM#Hvg4VgJ33HagYqo4oOQkoxsI<1#qeD>V!Jzf^~X2Z7{9AD)qtp4KmQm#gq z1HMaD+`6}oTakH+n+Ns`vc zV%X;_s_YCA>R>eTMM1_`=dAM>NfZHc>U(C+tOHw&#(U1$u5I3b|Lat|C({6mZ0x2=Nt z(GP#{&a+3t-NkJA`~T}dDjq$1>zm)cgk`T(JcKCA^OuX)pTurXPV1(dPEU>wCkJ;P zo_2CN`|*Q={onh?|NFoHAO5GNdU)R$V|_*8?K}7B_$L=HemJ0*W1PFSZ$P&c;=K4p2H{Z*9Z=)Y%bOHnb18FfWNjlH%0F%v0Iids*VD-ilS!Ut7UY5Q< zm1cgP+J!;1)zBD=XuR`9RmB*c^Q#z4GGKs!a8wKkEm>bymGusZyf0P|MF3#tvK$h* z%otOc&FTmD@BQj~_RXh{p1(RdK0P(4&JL{iec!(E@a{Lh{w#F!)3e3+od=JeKI*e5 zO@8+A=LusyJxj&pzxs#&Tc>tl%y0e5y(g12KYejfXn{DJ%|3Yf&+GsGzx$8=^S^iR zjW@43w4g?%K7GYu?tJcJcy)5lSq$1moByvr{@@~*x~?6*I&0Y--Z2(gbRp&8-FKfo zee|ur`fGXd(xHlIOtNfKI{f+tmnWG6Jx;}V=?{Y`8BsyaDgp$|b{O21%VuqpzZ&1{ z;Kr}bXV>F#z03-})b(tmw>OA7mpdkG&Aor|TE|-fd%9Di?o_DOkx^W$saKDMLuD`- z2S)S0&fhgJAGSY){w&yP?2DtKG==S1$sP=b6^5TnByg2+rL`6?iAicf1PHu12UHo& z$PpzY=yZD8ckO66;1rwXqVE>YIadq>h(Vnz%Cb^nW4$qm03ymN&bWauSd`EWhC{O6 zI0s;uvkI>MwZ>axS7iIX&s`VJ&Zf@!2lwuC&X4ZyTZ2_OZo94z>FuvQ8I1-f$I|u7 zyZ7#$P16s5_NguQFD}jo#LHznZOo6J&s&BeK-Vw7^YuG--ROrOe){;yp|Kx)+SmuAfDBo?;>|O&b*}BYRfC3e zDoYc)HuOCxbbaXh+=X;{dQlDr#uy}S`bf6;%b&j-@9qEm)6c*7{Ppv*-j)8tpMP@a z&i%(v9+b!iBrK;B3!Oj^`1mYlJb!1;RA9};VsBIeH#x(>-ag1`I<^|3siv?zekGw1 zfqEHoh*2k>oiv{uFYM#@c-g-5&O6bO+dJaRfKsEJEL})hFZt&+pt2zgWLQg41{EO^ zK&x@3`XXIH&1fe{Y%{P+6=5?aL>0a)c@1tQvToeguLz&p5T2bdS-5PF!;LA?jf5cB ztcQ2*`!^*YR@DFm1r%@yksu2eR>Na_{jT}qPV?#F;)Rfw4mdVc9VtpPTTI3ybZQDT z;*5Z5N1Ag&(Woq(g_#v8^=({Er}O2-#l_Llou`l9QjH<>q3=Z0xw5Dn8OH(^Wi%NA zD6=4$Ra_jal6BsCKvK}cT3kgR8>0%G84#G0aCY9Qg0se2n{sx}?eFi^bpxOmXXj1b zot@5yM|SUI(f4Ql|m0+1j87Dn@>oSOlyB9X4iDsUN@yixbb%?{xT zA`6!}j=CGLcuSNLuG33)0~%Mjxhs$nZVW`P*Z&5Q=f$kUe-MX~Y? z7$Olt<5xZ=Uma!?xMR&CytG8$`9>$>rUZ~K6V)|mat z#2Onx08L0dsLGUeT$V%chgDfPH!O?Ez*iQWfyiW$fBRp1=Rf$nzmZb<{!hR72mj;m z&3dr&Tz7dq;ivcZn%K3c&z+`({t^I)ecR4vFC`}wR?Te8rzeY-^Spm&@Qp`D=d+lb z*%-^FRI3$7c;|j_1?HVLF_QOGhdKXwGN# zq6@QL1hleHSvc4qJ$>@z8(;t0y`#HC#Io8PJsBl~sz78!SOKiDz9=%YCE%P9714-@ z_Xbf2NkoL12vjA;jzvWki2$$~ln6v*m?eZDoSpSC#z9$T7H^Z5F0&g}qZpDwyKI_) zx7A==w{48M91e`3g1`)l!Wl`i&%t}hu}|FGDf;og8@dPo-!~rr;a|Ks>2Z+ZZPK@>0qZ=Ypfxg4%E9)#OVa8tRq`6hM*Av;ER+{$3t;PJwZ zNQsqzk=9zx?KfiyOLi@&EtK}){H^@_&f+ig^pmEOJ4cTn92RFM&-=D*BfLJH7vA2p z7GShkEEHhg&Q+2!vYIwXm!v|m0r96r2z@b;Te9zJ^D{Q!|vQFs+`O+Zdnag|$Q zh;uH=0U&zgM3_O1rLGTUSsG_aMI`4G5tKvDNpj8#;G7c#B)@7ht7?n^K#egXxhRT= z5m3v*1AudTL?*-#vxE@J!V`!^1&ao?u;eW|a)f{?mJ~8C=hHsdAO7I~9hY|h{+qw^ zop-O2G#j&F6jCY zCX-1@{ctqQ!bDb7MASLE%5lETFV@h97!x8@Ww~6oG374gm}NW}h7jQLCk}^|C=($W zR9L-F?n9m~+s{9HIUW~%7MJMWP+wj!Gn_a2`7}k)gZTRV`A7Rt-o?Ql0IATj>H03@ z#S**^Juf@3q<1QU(DuM>FL-BStV7L;B53iNx3YWQ;btz`_8sO5<$a|}y~>u~nA+^- zE+}3tUENk_z>TO&-I0Or7}Kx4$9OX%b4`)H1lnyN&^OeH3c%MZ`8NH?&5c)^enNBDK&L>_x>Ffu+{<+5s4%u${`tJKrzJ_L$4xbQ30ZJ1#_)v`q)|L z$(WFG%$&0r=VD@Kk%f�YS}~Ic2Rotvj5DW?Fa4mhKNJv6*%GtiiTfem=8*?fuF0 zi;pi}eRTBf8$zY3QK0EEjr-)$l(r<(Rs>_mgs~S&R8)r#ye}f;wT^v58Fm$wr`s3v z?IZqb-VM0HJHIqb>aA$-^?%$DWL&kOuJ+ICh=8j-Z~Gj_+YYSK!*bva7~&8fh~yZ8uvSGe9uCXl(0FI97Z%MSXI3Otvl3*g3IG8MA_?cL zVvS#ojdM!YImv=ztS`wJLzw!WQ?DYSUDngtY|)=D>(uKT5BJ~t+5-VFD<#ni&LLS< z08CCXrz9zlszDS`(7Ijrp^qUNKtl+^O;a;cOlfF+h}n=s&c+hyDkGB>A!l0l*!JxO zVb12sxTrMd{j$rvY#BK3@9*1wnxB8bcc18}AWVvRUQ2G;`-9?OJUH~zlSHMFl(Fst zN7#SkYhX(Rqw9OsWoVYH|A5y3TOJu z--w&Vhi(ifuS(>r*qbf<2X3?v5H`T`+b!62=q+3&l_=oN*fe|Z;*YDsR;Fx%Mlp_L ze_UEf%Zt~iuU@`*aq{uYi>~MW@wi#e1cA$ea5BCaRmJ|np>Zb0=&kYAj zDS3-IGjj$5CyQlWM?+>rK4~SG0^LeD+{B_ZP%0?zwR%XQf~K0 zcQo#@L2?0ronZQ_&3yABg;#wgy*kQw5)E+Y9)yeK35|&YV5isR9VNFt+P3$D`>`*a3<$0tXZ-(|d} z5D=so)!TIfl>nhlDJt+qo4cMYSe5wTz&^a=VmKRd_x_{FMP=^q7o}H|m+AP!bFmM< z{_SQy-`}hBqBy@;RD(Py$Xg_WHbf=UYjOACW5&yT4cG~-xUNBQON_a3)Z$jzxY<{S z%`x^?obUF^+dWNns}{Wq^U}*a`3>cVqJUmP>NWyyE?pe59kR0(!-R-m;q1V5ixqeF zR8+8mDl*QIDGXC;+xDVfET+@5(~I+y^YrPtmSm$*osU22@w_qp5*yPyMXv@mZ+E+j8 z4(~Pd^Fc`qC1ulhabRWWps*A&i;_EdG(5c9lR;nrMUYD&)Oy@=xsR?d=DX8#*zxLZ zY1+5e^frR#Do=Y8j!Afh%Y}`6qDx`xhHvRwS-JK^03ojI?s6F=4*=#edSIImin~{} z8(7BMa*D4BA67#|+ZfW2Lf`SD^lo6Z*J%k;$=p3ZnU_Kv_nPJMH629rbZtC}Ch z7!qzJCCEuqym<90#cnd$7hqd>&RG>$vL;FIVu%QdRddR8`K!!1lQF7{2%J-n zfnxv_0jyjZ1ebN_q7KS|Hv-IE5KGRdY^-5sVa_pn=ObqXT**fOz#2pVYfXq8Q&|1V za$Y4tYfMpc_JH1bRZS^5XG9RtsK{_IKuttzjBCo!^immTRR)F+?u-r#%zdQcD79@& z-R$M3<6#kV==)6I45huw?+sGvsgFSg?>)uZ9NGW?AOJ~3K~#I!mBS7Vs=#`6Zr<59 zJ+zxQW&E=A)SH)~%|D|s*2Al_6a@wzhou9J^Z6Wd91Ta3 zQxZ`$k`j|4WsWIyUDvkdaJZT-I&X5{D^kdesw|8oqC|$Avk1yLDX+`#&^Kg&DGLD+ zr5INr$`k{O42nVDH5aqRvd?|w$*3qS#TcXJ$`@@@uTLXHLK|YwIRg+t4l#z5V-^rB z%L)NwjD!YsJ(?m!BFmhew}w`NYfxcOQjxCf5BA;us63vA3^ofGP)kraV+jgx3Xi^U z!_i2qG9~65>$BJQ9zXN0WKkf~v@vn0DxWip0GR6GTVGEru{&OJl6Dv#ba~Lx_E6@w zvC#F>U3KrTg6UU@9(vP?w0+CQ>*Tx3J``?o6)&M0c$unqqc6L8>CxSk9KF?1d=pUr z3(g07c6uTqGQ*;WS+KK)>kOxj_Q8ytObRNDSiqE$p@Adh7&4$Q2OLA6v(=nZ5RuGW z4681L#LOWmuULo{kkFWvvauc!RgxeZB2Y*viwH|JMp2EoKC-rLUshEYLkO|-&PY5y zK6!mnpDmJzj0cF2GDlGhl7MZD*&1Vw??UUHv&J-i7emf0%$j1B%tcYg6cKgh8&_rm z1i=&{b24BQ!5Q>}!dbJ5uwlWv>n4S91|kEZF`85CZCR>7T{lHJOgT4QC`&`ebnPM& z#^v;2JUT`~AY+^;#+YMZMsjx^Eg~0aR}Npi)|cEEh~q9T`z!9|8-07-zC7>FqV$GU z?K*F0cc8lNC0u(tZbXZ3x_&lh^tXQHZXES5sgGW_nU>3#h3a097cy<*bScR|n{hy9 z=x|`!LGX^e18)uFlzIdrYfK7u)d~~g6-}-0ySnK(voY3NudFG=VlXi1j9F(k6G{rv zS=aTUsax+0kzg$sW*{K*iv^%jP9a2VopVLkEKgp&TK2jKS{5`Z(NRv45K&WDd!-Vy zrr7nUnh|54nzmD*kQfxKF)5`OqIJe9W#+=W6jK%mIfAel0D~gx-Ov#^G#X~yL>cp;{kcAfgmw+PMJZJ&`aAiUEhfcXI3B)4LPO6&N@}? z`>t*pG7eBBW&mrA5fOvPDl6rAQ54qNoI>dg0uuSU?# zjt4~-K?@xkN91!3&iSrwFOENxSU-4p_p|3OMVO2g5g{9C@U>t6t}lni8d|%T3|t12 z(pm+)Nki4`qwoqlbIXF*rn78~?6iGXljCI$8TJNLq8c>Tl9ilOa^)y?p$#FV)Tf-GQ$}mNL1RFph_yCl z?n7+5wyXvqc3HQ^6`2(Ptucg_42IARN=pcRj6KkD(R$-UYBR&or^iQC)AU`N4C10s z5wr0wiXGU>mz5wQdG88KZ48|Se*Ez6N1uL?SV{6K>2vt>{YUSAi=3x5O8AO5eXZ-j z4R816;dyO2L)_3@u9Gz`p*;QvD3@e|E@h< zH!ZFk`pld~korbf83ZS>-h=)$@AC#n;QrsW=!iccZoHeUz z&Ky!s&N(Dx5OS6jsg}1h_ z#u66}x_a3(O&3JO^jR5rQUc`eeA=8|EKU}wiBfrZ<7hM}S9o2t1_aECYZ`NEx}bo| zW#^6C8&#Y+Cst8m&MYBG-NvLwIkzE6Wbexyqc3b#6$ZptMNTorm@?Pe&+8_{%&Kjl zCY29}$LGtUf`W8l(H7M8v1!_(9L11)1&Nteo%04DaNHjc3SVNM+R&r%QQ&+3#_trv zeI~*cky2%Qx7fW)Tw>yGayRihYyfWy8PcoU%=+5=RW;u>`EX;i)N2T$-agsixEXDw zyzVGJuK%yrz5Y#L$-2hfo+IeyJVI4%69kEzF@=GW^!^y%zE><}%V=@v336V|&s8~cdA!UiFM?^!3Q!adgh{lpwiY(*t-gMf2@#17W9$G`9 zGAyZYmrY$y=Swt>*(sW;ggbkCeb>G|oxYgGi95HF8EntGwD}8=~5#juNtf0s&A)Btos6;moD?!r56<3kOMyu4_UBPqStF zlxeZHEWxn-w=&HfT@%(kure>ZaHuNg;| zx2nya(G^Tv;kplSbuO#{qgOgZ+#Z~*(z;Q>4$DF{7T!9I6ORujc&CygLlY^d3=Axa z2rSqKPN{X?0w|#(XzUw{CWf|cmntBledtxy8dEq2#>CKBYlzJJygod7u&BF_pI>xM zdvv%5ptJdWqV5(=91MKuLs|K%D#^gvtUsQomkXWNdE(*zo|z0Nro5QWiLe}0 z&fCCwRVwA2Q&Q_at16%pL}mmvYU{3DnUh!vL|4^)PN}fY5m`gcvh#Mt%xxE&KCFm* zhKw)D#bVaBT=;Ta1@CMDTop8|9L8KYTNWiD8bg94qJ5u+zOxNYagMn}YmA31gGp48(^@40M zUv@7~=PzEJzw_231Y9oai|O3osZ!Qy%^?@G>`lhikQ8BA*D(sK5s{+sXe=W2G2|?(xRAsWL-NiQS*zhV#~gDqmI`N# zf-x9VR-is|&Kz>PtRs0cWDH9I3W)W*9+i4$r~`+}=#fd1bu^p|i{W5=c!;I%>cBA# zilKMLIc&?t!9?DCeD7ubGK4&N@W>1&OuD}9-_}=x+i$pE-I3YQ>s=xnbQ3m<8`Aad zGk5E*{-sUU>&w>eTf%E_>&=(iXw+{~nP8)3p4KXeWIX5W zTv4}qKI=o!0V^|D)fA$rQ2_5884Jd?ZB#%7j4>BQxkA-j!eQYn@5I`ejW0Y18&G2@ zrYK5HA481HAw>3|j%ZSq&f4){K!}6Nj*99|Sz3##LZGA3-f%n`?(G3W4slTUR&tIR zs7xs=<}*qB+^tpl%A}R-e(8iX3msL-dxCQFnzOS72+sAMx40dIEEb@%pmYu+Z}W;C2$SD>5o_Uk0%HPUXYx4)h{puIOKrfsm64u^hk zP!MoR8Y1;clSl+M04SIlL=;d2q0hN+02rNR%ptHQb<4I#BoYPbOCus!mQ~C-F0(Z_ zYs0IHloUsmH#v4eE_!@8R$!hkLY*z;=n0PY$0gx(kv@D;zntY>ki|M9&iZ)+vw5iN zJSa7Rj7H-e5=e?E5mgFg3)9Echv2JW+eQN8$cD@zW^bLdZq;~nWK#&cM3A?8#walfe?fEc-YdA1Xp>7sJscHVf*`q*->2 z>HAdA=WQ3j`nTRV{P<;a^yb?ur;7sLTyuJTM@NNiw#?;X+5o$6#7*tgueuwxq1&D% zgd1}KyebcFZ341HF7b$YR^yGQ$zc}VAVT+}eIvUdREQPGC{;&lG{qKjGu&IzD0 zXWy)__g5ex6UF9@xwvML$MDGzJ~ z$~ke!oLIU(rpz&?QB{yKDi*#{qZ~Vx^7y=;&gwTF-uu?$quh1vGNz~ooQ4i!9m0Hi zb{gBcNDgZ1CSXpIQ%-zmG&$Jc+dtSBKmfkYA-N5c+2pWqu0MsF#@rpq%vO_pXPf-e zM&{N}*=@YvOjEc{H$+&2*e-qU*U!>c%WRX4x8=CH;b7Vt5vZ=c(g3!JskErVfCQjf z(2-@$f+~P1qbDeQku+woSxruPP-0eO0p=9gMAj_goI{Yl??ywH7(&Qx5;EwA#qm6x z&U04SAD6?@o-fkzyk`Nl?zD+7r(xNd@uA%t5|Ws=~=Y!&VUdA2#$wCB$KnS0ISB(D`H4#Q1~pY1UYid ziI7NTU~yPjgD3>bf(XWy{W6~|;~)Ouvu6*7CGf@hiFNk}g>#My@A`V4!(6x@j)w_p ze7Gl6_8HHo%hThtZ$5kQaIcu7MDT=y8x5vaG|yFb#Z{E*mHqd|Zu{k^qZ_tQxM4fm z9*%E~h;G_THtC^PHsUX#lEAIH&&zMborec@3gfGm?$!HQ-~ZR2+LJm}#;Iln1Ij8Y z#>fhYOxX5dJyB_lpeV%1fF=ouA|i#vb+1W)2o;h6u|zDI6NdybL+NZA(zK3EkWpz4 zM}}EGK3+@%jtgYg*RwPWq^M{`>s7Tz2RWLLzGy%Ebn)J^iQjwh;KBQoqkT8<2555k{+*}KR%=QK zQ`SBKP2kiyNO=WX!fTO}DAGNFyD_svDN zOHYQ)GUFxq4%qU2rY5+x9!SsQxwE{qftg~$w1A_t_ zO{$XetkJV3F9YmVlv$c+drpqH8dwmxSaz?c-6CREQiT*#&!)}zdKoSj%V{(POP>^h zQj~fbj?daIX<~7bOJ}_IXVY3$oG%U1a8Sjpi6z7gAl6uN4y`fCsH(6KA%PHT@T2o;Z}iPy{T{iaDYxA8x$PzQMd{F5MH6Gbm@ZCE zr!m2M-}u(s-~H{qS0_LF=l{>0C*OK{_nlAkAfUT05jV#!tKjNQvC(Gw?@eFQ?ML=6 z_%i(x-tvYx?#h;qvfDSkQb=9q&sgXy-kFXyt5RGi{p~VK~InLEYukDNP&G^vpyd3IHL{x%^9DVka`5*n+Kl{#a{Dw#V@Q43m zNIa>$tl&(dU5t|lPaZvecK4lcmE!^Qqc1-E(?9s*-+k}hUw!Yl|IVrUOoXrk+L(<% z)!l7UMBtjHxmhw^Um>pkxLG*CNVCWC?ey58y5b2dhu!+n;xB| zqNv(Fhq%5vbP2j;xM;NQa$-oFmDM0BXv`w2235m5Q_-FFWlMlz?p)9B|&$52!5C|-cTTbZO568HSowh{5TG$8=CleYNijQLnm$(EmfnTFDGTol*9T@rMT!jb zE?6q=AB>$fkDfdx@BfG2{ry*q^!9iD#*;@69@sl0Z=Qer(fQel^S&}jBlK?g(d&!n z|K}ghQ~dFdKK}I6j|hE?R0B98|IWb2+2BP3R&BxZl) z@9Y(Q*Pc$>MRG}C;1B?sKF7Su4K&k6bI{W!bxcf{vSdz%2-ks=LZU;DheI4VkSuj4 ziw>et%+Q6z$c6WXw}!+JIZLQamK1Hsrm((l`z&gWt*k?1Lf0W83-=)uRXH9M14mq< zvlc|V5EpgmGc-}6Qqw0=xwAJu9G53ey{y}W12Z`It>5`~3}NoW`RU2)WVSnT- zgtpHSO9nKQMx`@E%fJGTTvoE@a}F8ik-Dr2AhCc#X;42b5S})s{6EzNMuA! zj=>jxSdelbm^wBoc20FvmLc>_lEf%RL&|Lz17MdSGpNz53ILHsSQ!8m41zaEh!zc~ zB2iU&=ws8Rq2s;5hz#{90%FPFOj!&{w6wxNI`6%j$RMaqA5^nJMHOpJS9g)+Y_=GA z3_0%&3sp77E}LZ=V$%yald|kh_U}vv4*6(raQ|@faygryzWQ(f!GCT3~cFfgE~Ah7^Fil>3}3 z=ME;L#cT;+d|^Q~1p$SkWMjyc!!B~&)os_;U2gj%h@lTDCjzjBKtz=LD8j%fiXIS0 z1HY{MMVHUpybMVREJDsg#yX>S4ks?cn+N5q+3<^H{`Aw2{{DaYUp#p9hI5{Xe*V#i z%o0;XqQm`zljGMRww1$S)@o4PCGI&D9pEUv1~OY6a|U`J3%n3*@}x;r@_3 za@3wRp|T>3-6lvK;2;4T_tRke$(3Lv70%U-6f2pJ+JiDhNIE9P~a z&QcH5b>a-&*&B;$(97-E7{}U0yVuEeeC=ogn}Ns+^4>BtgvJT)<(^|!h$$xqMPOD1%&NRju2^><1VBOU zI_7a@Hm1V+96AKBmQq|Ej@-epF!=wo_HNCRWyg6~ewn$}-c@x@pT13Z&-7e?0Wg3; z5TbY$NJ5lMQDI3DexMxTP=x=0e~ZJv!cUISgB=cq{h+W(Te2KBX$po2fF$MyOm|OD zU(T(n_GPWi^uyY9x_f3o%9noY6Q@ttsmi@p=9k}vhGCv7G^O*w=IzDv?eotr#?yTn zXWz{>ni&n5WtCH6S}DT~QSaWmc74^ItmMwElX;%r|JkFnfj)k=d%km4ZDU3?dVV== zYm(#J+4-Gs-LM}#=zCp}L`XFg0h1YgeY^sZx$VYj=XFbOmxlfV6q#qcW8>5KMB@p)wRW%lPUVngE#p$G?i#s4>rhq9(2>e<*2RwclM zxrUn&i-@PJx9+^=vY%rrZlw|_RU@i4R#Ji@hNziqk34h09o>AE}z;w5{Kh%c?4_Qfyi-Cj1ad1bo4 z+7|@RXh}(Jd9GAQsv5UCwHy?&o?jOxuySW8Ib5mGXQ)Urs(6 zRj?Bmi=YS#fs1eijm3HPV~ygj!khl+<3~?S(p2%}eB8|hVF{}U3))TLjz9a!N8kDJ z`zb$obV*Ul?W10g6AL#F$6=g6mdH@1qb}dSdxB?|yWQSG!aesL3Df`Ie-K~7Ke{ro zLHvT*Dz5UgKKC78*bBUT!v0Ntvx7kZ03ZNKL_t)2Eq=lJ^oxqT&uLS?^jW?7qK2>< z{qj5a&R@U(!R}`r@!Gt27!iboc)PcHyPv(3N=VY=79PQ(Q>kOMpeSY(x}V93nViA< zg57R@z7M0&6mBC2DR8vuhq>%#pB*)V+#w(*9E^Vsfl%QGOYFwwcA^q|)Mq!}57xxK z&z_%)XqWo^Xct4h9QbT+r*m8sn!UC8)rf0^fE6N)a4!+$!2}CRNyDuadUkeM)8_H% z?!!kH4pw$)X{_!QY~ALQCzpTpum2P*k1pzG=hJ$_3rWGAO3L~&N}tpFiGTu$%z?B zXc#Apqp4VNCFZ7tC--39&&3HU9FWV|Pj{sR_loL%y06Y>xlCp? zsCp$2CRSCaaQA~Z@A8VB0t7iYzmd_kRhst0>9%^OJk+TWl@b_}_NvusHqdNQgT0BT zEM{0E!mvPzh(eeIlz>Qw#7#r$k4PtS*)kyG%&S-WL5T?|tq`r${<&bPy0 zIKQmS+$Zk4*sRnHqeY}HrImSbUd^S1+pW7_ee=$p`(J(c+rROq1afxdJve%g(WD=jN1<1mKAU;DshlI2U-lIaGV^i9-Ut9 zji)Kdi-7tr&!wi+XJe5NSQbhi!C^)o?C#BI#=O)3C^DnxxVh1<-0a@CzS)oV@ssn1 zm;T`SI9t?Z;}Xpfuw563=o4qJcaPHM_&OwShtY{kxwJa%^~P`g!5@C-H-CG*IiAL8 zwO$30g*#*2Z8WVS<%vmye!*Jr5TX$D#XA>Q8zEfnuV0vZ;6<%qv3b9$)>r8sI5;VP z9=g0L=D)U}0$<>ce4beS`QnSe7U!a0l<4s4UzN&y_Q{7lU#44k*JF`kNKCz;1{ovu z5>8QqSOY}v<~2x&WvcFw1a8ClZl=k=IrsfpHi;Qpyd52IOZSu*r>pz`8J)e2!wHNLcYJqSS0z3uRb~eKE^Q|LJdk>*n$5Z1ne^ z>_2)+)l6h{Ii)w>{mtL`{eMg+w@dZaH88+n1(}3}MhM7->Wi$`S5+DK($DE2J&u?7 z7JQX0M2k7et0@MJ^A?97qL=?M1zxqDf2l}4UN|wnM9BY=VhZ;w+i+S$q_4i_SMBNI z^QvNc`H=v=pj-VyE2?!KKiil6wq8H3l#-})>XSi1ob+$~>Th2!=i<*CHak)ca|%%P z7>f~0HJ>XPY4KtajHuNox4A|kR&xb{u$cH3BLX1BYm+!eI0d`Ah!7~k;w4Mgg9kr1 ze&LOi>lOO!a(KS8(Va57O+OV+9Iz)OWLc2-O>{n}Tqzj6DT_t*a0fA!}-{rOpOc&}SN{=2{T zAK~c6V8P4;zG4}1Fi`-;1h~YPFJE4Wa~z*zcD$U_67FOc=H}5tt$D%hJ-%S=#9#=a znFw4tc+-{1=(3dbm&qtsyYnl*nwN2dc!8J3=QU3Y7wVPWy1%60`&@tUi>NK}r8sCY zJB#(F7xmHh1#UZOXuG%GE1O>~o?fBa9({1_W>3%x}XH=)D zx(7H*7V)4`&E1(I$-+piL}uxVx-{mY*}-VWWj{@_2k_3n2j%7g&77%p#B$HBx#;SmZ- z1f$PIy|s717Y>{qRx$2wW;J3irPf;A=XoAx8|E^VX)c8TVI~iMachQKxH}jrB~?v; z2sd+JfP^5dDx#{QDgq`K5oy&7PAtk2FQ*l~=zm|ZTIeOD054tsS9Iem)qcGESkNmH zyjQ09FX?0BiY}|NIlgiC{?Ai4?0oO@@lj5}81`Le%5i>iar5?3cXDl0>Sc*aHIU53 zJOboY&BEM^lQmo&Yyk(=Xm2+t>|mz4%m&sRxp^|Jn5~}?b-Qm3JdP~PIA^_snp$MV~J|*X$CVTMux8hs{CwUcB5^_ z@L+OVU_|C7>oyA}=d`pS8}WPBawlGioKW44kB=|*5BZ&M{>u0N;VhZSn7GmR@j}*K zIhx69)F{CRAU(nlLbGaC&F3=DrA$`M>O7ZPYprFhrPNw2in*E3X7%c=TMov>3Upy- z1p7h4#>{fC`eeb?Wy{RWoO2cyZV_s#%92ycBB}}ogWIXmo*Z{?eY~cWa$ zU%htg?(ykxc{)U(TGW;nN^A&QP$4-;4j9ZF6pefk?M$L8xmeriMi4nw@|IxY%|JT1 zG18n6K}PJH66zZEyWe^J+jqK8{;G^44f7n#Bs|pm^m5uHW~Ql9Ayivh$6C!igh;>v zw4eqdS5*h^XO94qc=77Z>LP?llq}Hx>O^4XT33j-Zs$AKKnM%|?AiHGK6vop+4jBf z{o$M6`0gCp-6*`a>mg{aWjpSNX&R<-vE5$o_hTu=Ou&NnEVkc1s#$Z@Xs`?uF~i)$ zqt;4alX4*_bpz7g4)tVK0VKAea znlcEj*D~|vv?lV18jBZ(M@wLXKo3{^RqO%@5aARdDmmv>%ImK0QdSiaG$tg2g;@Zw zlv;Pgu-)w_pd!r7Oi7egTc4x*5s08dq*W-);lef?E@IEt9DRwrS*AO91 zG~0myfF}LB3T0^pRFz_#`S$fqCthuycb6Z1^7w;?PtLIU{qO(&z^naS_q)r>i;LY< zW~YK|~o2$OLjB!h@r)=mrGLEG^X0@4VF6wwZ<0M+*Nnwyzf zgu}xFAh#eAc4iI_iwb~<0Vj&uRs#)YW?U6nwTNXbwFpWF1*Wt^I+pve{mfuEQIn!H zT~feao^0H`#n}_N)k;M$&F$G@a6=IU3?PZBFq1P&H8YF$%!a!#I#m_1h+&+9LO(e@ zyVDOA$UQo)?_KBL`=vL2_d9Q`WhmIbb7u`M zH6~^nXBy3$2VoGmZ9WdIyeKydp=x2az%vZCK(z?K30B?WN~ZvAXqWo}iAP&UkP3j1 ztb}@YarVjMXOB*&TVMaR*c|=j;iJ=G*zYc>0&g#4*eP~;gCGl5IGWsyK!Fw=sj3QrWKcUP7dx=! zS_-$QX5L!HHc=QYeRMlH&C0TI2esTr{mB~}~wJZvdg zM(VnD6Nw6gmtME!ZX&K03PB{AjJT~@H09&t<8j#UxBCT0b0<)33D)3tb{e?=ZeUap z&1Mb;i&o%t7*6`UdhLz#pZ!o$E~WHYX(}FMu(R{a_{*O{@bgCxUjOcYc>A}0@8Q4t zAHqgYwK0)e!wyF96-%KJ{I8-RaJA$69KY2T3B=XXd71MN&J=ImIsUC*e&^(eO~;S05#Yj_Qhc4Eac z7cciFlY6+EM>vI5VhD(qbL(KI$wZ}D2Of~QpfED$g%Eco`&A;U-Ugyo*Hv&SwRlTL zrIZq*QTe0oLHw{mHIEw3;A&MRF+sr1IjTKr5kb|3>w<}zcU@*~oQd!@SA;T9tGPQR zh%kYhJ-ij4rfS4VQ#*ziGBm-hU8*f|P$Hs$Bf?k+AQ2_F$8gHR=pFAEv_*2z@VRA>y5@ij(N>IBp=y?Hh9YT5ICG*_? zacld++X>4pv$9z>+`e`F=AGLt(~hTS=kKF>>ieGSQDSp1s1MI#5_FKXUg)2f;baOlt zJP=?H3XjE!pl#fPTP0NYhH{DuIkf}Bs)Y?TdK&NpR!k5bF=Is#P?WSv+zDr4EE>2xu!>73n*YCaa*02B0!_ED^@4opD zSAY7?pYih#dJ-2BkeL(wK;V4IqZNoSdJzNAc=vRGD$tOnSiGem2*5qyMglrLzc?-O zw}18fKYagR5~rhFTv$1g(%qYD*E-mCJdIjWSVPH+D~Be|DLRQHq;POquh_%Anslrx zK~bw03KkNHE+Hq>8U(PRRyPk1GIt9IQ>Tn5407gM*H>SA1D}nTx8MEd19XL?k_Qol zbIR_`4}v4Yns=#NHDf|vZ!FB*2!NS^%sd1jl1`GQtSaDfo|r+v?CwOWN(&1qM8w10 z-Q1bGhA%R+2!r6v5NQaih@4t23gblJFsEuom=>08J5VHu$Q@=KKrM&|E#MAL90&rG zqG5zCaEg|J8x(aRgdHrzh=>e~@b=hokg+g;WvPVb{EwG1lLAD{5Q5ni7KFp7+Cm*j zSGQh6(j@tAuNQy%N4@`iKkUP$l}(Za8dukkPTqKLr?;Mal*>Uk*WUPh|KQVq@m~ja zT}XAB_oKV1O0~v2Ub-yd_WQQzHymOEUJzHcm9Yds!kin)jl>)b>@UtgeE(;+?|@b5^ujewAddMMTy;NBCTP znnM5xcbQh1L9rV=JBh%Xv;>%qPA7n@&vNs6#~dUgK78Yi&0n3??)DpVzFi|`uTCU{ zLR2Zjmz7wl1po;mNP#VQE|U|dlv;^aGXkrKFi+LPO&K){^_yVwh-$8?ZYIp`*5w=? z7H;jBWOg?ZS>|d2KqgF@Rg@W?VPYiLR!Pb1ZFmC$0*u(Ep4VG4hIDZA&CgM zqAggUo;sG8ZGIA~^yb&`;E`J)5fP|SDRb`n?J#b-TcDAi{|!tv|hy8d_naR0~u zd_@D1nsviel*P^Luzq7|qXXRCog$nM;?}lC2EfE%VwehLjdfxL0CtCwGviNw@S}@i ze){|~8|=orH`Mna5SZLk(2WlHX3En2yPuywpWPbIgFFEVBBFt)R%0wo)UP_Q&y%@F zVmi78W=bh}#4yzo0U!m3030mb1@@&T#nGD9T_qA5w@0_{y*ASQ`^T57ZW|cpu~4uI zKxWlESdbH|$UIpzBo!#`3hMh~r;lylg;#TB|KvN)YpYoVU}E zQ)ULSxS1-0#er#_S$Hm`)G8vIZWT^h6Kg7FtsCYbVh%LWEtsWPC8xYf08of)tVCE9 zBKvWaB%7}DYUI&nRTeXw<~anV)UZgZMm5~Qu0en-JWL`$R8<&F`O&A(-hTIM4?q0zcAQ_ko@*)Ne#qKq?bLnM%gyW8Tvz|=Uwmpr4L)6H zXWTsDsn!Z=c}ip!Oz2jUGYcbvrl}Nb2m&!&1k4bj>b}z0bb4~sZC1Wob;q~v-Ms(K z`Q`AxDJvq0c(7GNI0cDl1V~6&S$ts}hUcuPtO0k|YQ@mL(fe)_>zg^Af@0I>T}1;j zA`7!-krks#By<)sCcgK*-#fGM(;xncPW$6^+Rk&Zj%CDhg9N`6+T3m;Gdjeu5P=LX zE!HX^P$ueB6F~3;c7{v$=*g$IUjGWRhNvVG&X6R$GLwofG1nU3dh_}RPo97LYyyN> zNfF$Zj!kFRw>Ym-YuB+brz9x}fKnV_)d+G`kh=(L5>=_@*Bw2%!kKBc-rRiaTiwz6 zdT(Fr`aj!3d`<~{?yH$B#uy8bCJb4v8+qN$g_REiZEs5 zoGD-AQkjj2L|CaUCMq%93~lZt(uhi&$R=X5^|k)i>*->5-0|gbKD7-y zt21lv^J?88lvyQvpw{9@-}-z1IHY_0Z~s`PU8mrbN|$|{!aSBE{{>0geTfw3#h54% zZVnd+kyZy0v8}T*BH#h$)LGciA3R7THAsY6Nwd1EmDS6{z%w)@nAL`76Y zOa!V5{4pmj>_A|eg{ z&H!))*e)B5JidfklW& z1=BQzd6yDJxVaLAyNa-&hDDtL4)SJ8&}jxpH5eSk3~sk0$Aax!c%PVwlu3eASePYC zF(>yVl7->aan89fxi;v-91&EQAzVtSR0HM_8twt=K!8K_`1b2EJyM=FS@xw6r{W;i ze!Y^Ml2;`rA-5@^67uQhjc@$U-yfg+Z2z+#X|ZYCpB$}f%fwzbLs!eaD-}v>@tZdh zs(ZEYz+6if^u@={*1fV2MRXt*W!9(#gi=aYacXfHv>Ucljq|}CpH0(#zI(Dd8>XzB zRkg)$=Dg}VRWJtyW6h$lsPkOS0V5CU!5(lzMAIO-nK4VwQmdnS4R_Jw8?SZilV=}% zL=p7*I{|`Cy$VEHHvuzZgrdgc$ZA4fEh0FG-BQ-HP9S21g_~D`PvaoKs_R+BBUBQY z-5nqeSa@|~kUOQEl_hDSNaj9U*_W{lqvo77F(Vl3zBe}}Bo?AY#?H&=O%`5j)ud&f zOD!BwRc6WRW~NL4a;M>5=IDgFE=8z$t<{VusV49`ch#(VHE$RK+`aKGa?&oZZ7vbf zC+wa&GeLxEM!eaw5VNBA@iL^Tl@UUY4|-tOnkW)tVk5jlmKrvoL(A~J*ka}Q=0Fwj;4W>dH`agZcHx-2Z* zXC?>>2;rnE1k{GbcnLE$>qTkT1>GFml7%oIj$rG$H4ua!kNT@qbLd`84<1R@g(u?ay6a-r}F2Z$QFAm*Jz z6AL7>XT*_2779_%Op$v{nYm9XrBvN(t<0jLwG@G;g}cg0q*SZsNt3Q6fk`DNLMMr@ z-^sb_^ELGxx+V6L>jkm7#XP1Lj z4<>VXBdMTEo5%kE}jh!se<$KGQe<$7NP+2*|N#(CaP0g`bT=Q#>>7lVCv zK0Up(ZH?KiR;%Fws_w`l%BxkEJFz+oBZ$UngosGis4O&$lL#}5RgWMi_@RA1aIDOX zj;@{LE|sX4+l!Ar`0)6x@5Bt!q)Z76AmjzmqrvV*L`e`t-fsS+1ga5c21pW>EX*v- ztu=RdcVh`cm{q`4lcYW(l!ciYkd{r@2poiBwQ|BD+;R{U3n6Y<_mc-TA1jJ*CGPu` zFeA{wV1XM~QQf2z5h~m|GR0$*z zF^h>o@k#Z)Mkp+$M9}g1r`Luv&PR;{nsuS9CeU0*kni^Mwv>~N-o0~!pqH2Xj`Q>L z-L6Cc5eyQ5!+F0ShsxWzes(dR z4t_q=uw4svgZ=Yen#B!H+e zVD8sb4ox_OygA4t$UKSxSVNB18)0HVRoD?uhaksbEM&XG3qXyKW+Z8j#vS2CiL#i( z%A6=7C0DWj#wR%d03ZNKL_t(i7+?`)5rT`Zy3VN#bD4(0%;ssHYN>8KxF!n)n!jNR zs>2Y|Fm1O!4^ihjIa5z;Pd@y~-EaS`ygqTKtLp^r8vf+zqus^nJceqD)LpyvT9@;U zn`OV*M3&SoP1( zo<05e;pLMLk-E8-p~}wE*=SF<^YcAjOcK%xV_3>Fa@)e1PtS2*GGe!!WtB*bZW5Us!=kL@EMi0`<8eKZJ1h13huEFpiXv59-_ojsm<; zkQxE0IuVh`l3UDemDi{y!ESn$wcNHRB2CC}V1%`u-{LzIK&4>vM%hcEa!`S^|E}Y< z>Lv>d_h>QLrGkYRL{@AjFX1VyLK8e{ovV2T&jVjxoL&Xmwi`g1>pba7#18>A2~1Wl z9zPf^pGfFD&+}}eJ(y|OqKpx)qHQ@u?kt=mo;>YkK{*9A77Z1=^`|?4}Dk zy7l(G_iXn|$De*M{N#Tb_wzW@=`K$9*v&M|HhBclF_^i_$tI)Vlsip%dwIrCVzFQr zVvbkM1cQwnEDP=p2#bQ}TD(~Gs8+3-fy13V5({@7gKDj+NmYVSOKrh<8nrl)2m(T& zF2?QI<2qdgb=zT_s~n|FthLU=Zof&Hf&%2>1Sm7+>FMJ~wayXh(3@|*i_KA=j~{&a z(`Qf4%VeN9-mLmQ@3!0hzD|e+6d&EV{my&8^yWAJgB$l=*W;U|Ti3J}3Jx!}TXcU>vDys++fVL?VuzR&S2azz^x{rkqqrZE2!a(NYuAU#i-QB%y*i;WsVKs|PP@y;5113@=x^S= zcK>Tx;*%f!;K|dcrP%D^QP)C!PU+-W6xpT@+TD8N&ENR`d%ymDJ-+QrI1nBB?d1R~ zrI4iDDVN#9%S@J!L;A}p2O)Ui#AfaU3lAe=F-Wl{z$ex8;Nd4UPQdwQbF?}-3K7L3 z+ycVGtD(#$Ld=42t92@6Kh#p(j8e`?mBNT+4j-rSv(KJ%Z~f}Cbi;s^q=3TBy;_qF zdYz~Wa}sU+#B7sS1DK+5Ivnl%ljO2!=gbYxsd-K`7!u8`m_SAWkEVtJ2O)yO`AX1* zXj-3_;=3EP?DmXDoxvPP#G^GA)=rP;$)5Vuq%Q7kadz=^mR0tao&;iM6#<7u>#K;Fr~Tf?T>$8l9&LDiqOix0 zpO$$BQv@BYuZtqs$f(0A)h?w?clYi4?|nC2d+ji&ya-;22$#s(yE}-K85G5d%RWum z+_>@N;o~;53MZ996jnVDN!d%C>ui#`Zga8o=g)RMf(7$@R>}m5QZ03TmlG#th8f(& zgHnRKR}Zfg)$3d>JSp7V<~eG%TKB{7>}>b;?|ysAsZa>jh^SVZsCnTJn!BS|Ekw-l zHc@z3JK0ExxOtbfUz@lgL}<26Od&bpN&$nW2!$`PZovohRElud<`6+IE6{@){hdUU z2sepIi-%%1_i$Uj0+`pvZl;D;lS`}2+IXI z?KsZDtV!J3qfsTiXLa{dhn^ycs(F}OtsDWk3XqZm%A(8-BjY6m1Vmn|7n|K9!kciE z1fsz4eBO`GgmzA)PaIT@W3|~#w)YaXRBpU1Z=4u_h-&QwwAnZ~R(*GT^6;mBvDpO3St~n|wm{~*`)!M2t$l)Z! zjErE0aZ*~Z3-32Kn90nH#LU8sL8@r@s)aC1P-G3NQzu%WsqZ1QpT-LKqSPlGr}qKmFw9HO-rqO6j^xUFy5eZ5tLg&+urZbwMzo%o9VC)*7Vg`rS8j z*7yJ7$J2g44U^xv_qYGSKU&{=>sGb5zwzz=>c9Li=*ee}X;|gsJNLQYNE0uFQ_w;g z3kV|?8N-2P?@mqz0w+%!b1S8cp6XPaIwXlSQ|%f-AqmdX`R3;4n{VFv>U;0L_w|eO zv;Fg@5C7^frVoFVWDHnJX*bSgyWi~x3tAtoHRV!h9A_ZbIR|qw8mAf^J4wiDa;+zC zoxFCByCe1qP1WRKZX^JcKoH@GAkI_#m#$wx=#UO z4pb818f0$mP_bGW-I#CwoqzV9*W;TaU6FJ|bldCKerx;W2Y>u$fArti%g4hw1+lxY z*ZpoP7nh?`X)^)Ad>kS$ISmz^;&M0Lk?ya4_C#*pk=%D7?CgXZ&f!8~R-<~(9fBIQ zkQx)-NkwtsZw1Y@j*806Apl7$HSt1LlV*TDg2R=C!lZf@N?m49ooAcp+;v%riBn3< zX*P0q69FN086xDd${FExE}Z}(5hR+4SwWk=ABTOdwe{BFRW)@vgUHQ_m2ntFL_|uI zqvLf-StZxq=pMZ$4niPFo=YJj)hxmy;cfr`b$1IYW%h`dPB$ecymG# z32mz%6ZfzE<{!MfIr->+`fvErPq*`UIq<4Wh5hmQ9LUAI@6jdkVN5BxQDO$`*`9y= z;ptC4{Pes3fG_XriunrE1Kbt|C6%>I8f z_NGskWLKWhS?;~={@#l%Blpaz$|{wl(Ne9}(!10%tzpc7FMt`?dh$~MSwncgla1NrZrO{Mx%&TE|Pmu!_r&ZW`L}sb6>`h)<35lU$A>7 zMkG2|1#D!fc^k*0P?YIF<{ePcJd#Dl36-O;S_qM%apH(X&Ec(eIDGi{!(Qz~s?zC# zg`_=L)%#(W%C>Ay7rQ$?M`gr_2q~nL;9LWvcANKp{D;EFd%0g!!u#>U9k*?>yD$?ihY)J_go*@LyTUM z_`LW(G*@;QHFGZl=V(BOW;j>ko2lgmg%(tUWlG7GyQ??8`0-!=vK3V3JGXAU^N-%F zS5>U)w7U857|Q0XA}}N%@WsA@B4S9@s;S!-PmUfvB+J0@t+&6pTJCH)0xA^y&9B~V zvmE#F#Yz7sU;avnY-oxwGN~`5`6ugN|IP>nfm}xc4540TUKD*PW=LY93}nOuR2)@J zg)OUfx$DOnib`45^|mb`2Igz&?JtG9uXhiByt^BopGxjnEfV*Z#Q-*aQA1WZS)bp% zyGK?1=*7vV6O5^AdlZX^0T2NTn3#cKMJA0z)nY{yQdPx(A_b93Rs^nMET#0@wiH2tKIbB0 zX5HD?N-55T-(6SC%!NqEMtv?bF!j3M)ayq;9mPaP6)n7?Cy$7xGzu|#D5OiCac+Sz zgRfmxZnjJIvNP#W8;7%ohyhApj1p#6!qgnxdF@w%ruOx3eC6A}_s+GQ`tkGUf`l>F zQMTQ-Vj2{r(FozRZPfs)27z9D^dYos%^5;<>-JqlU{e5%k-q(%KY8}>z3=_`zx~QP z-}=H=-XTUb2Z9d5QU390P-!wk!cm5WfC$-z1`_Sodxz!Yj}XnY_;6mCjhtYjhM+*m zArSb_ph1-aN}H0k-7y)*{oODB>35Fq-~QoW|K<77^UbCuW+ZJYM1u1+0}-O7i09|s z(aUwtf`m+buzRhE%K#855Jn?1E$Wj5hDC*}ygu)XApplvnTh6B+7ghe6p;l12p9|! zlLEw88<@jJ)C{=_p<+%5K@FR_+H5yNjUd+crQLSEJW?xW0wAtcCL)u>nvLoorUPSS zl%^Qa9ZuUH&ZrHfBM+p5y!bFP)6t#~u9(Z)81N?jFXu%}8u^Hm($Q!tGze2Tiga}M zp4_|#_il9QE>}MpH0lqU9YGFc!Hg|j-@lDkKe%`6kH7v--)-M}`S{t37t6hUhNH5BP6s#8sXdzUq zy0H*az$(W5oh7IssC3JNy&D#Yee@XSqFf4q9C8sr?0ae34NA_`!L?3#6hMyRSU@yVf8Nak{6t^h|gKcJVK=1?!2ciQM8Ytv|Z?o~Hx zk_e82Tbt=HU$`E_nEGMxNuuCGcmOhNc6V2+<^F0}`tH$_PhXs#K7amVv0o&VGu*j# zJGz2o7$hg*`n7}A!QuAVqpFJST2YkHfWvCFBGUjsjHae4H*eki=l}daKfG~+N-t8F zy=Wu%;#5;KAOd1??3tE)VJwaWs*2c??%uw;irOl-y$H&-FXz2=3al6c7cdbLvj(=k z5EiVtUOzvQ((kNRP2`hjAD(Q^`_f9bW^eJ`|M1T~|D^|?e)y?o{lWMD_V~q#ccWSi zj0$QMgZUhuFj6V&7l?>fBxfb#Kp`aSAu^lEX5FWh0#*W6I+Ls%n#e08HUR-B1s!DQ z;R*~z)xd}%jQVP22x$h`nkl-loei)$05Vk(K}0e#FrP!Ynhh8t5n>=537KYIOZvRs zC7rxK!$GYMM62cFb{Ere3L)4**a)pO>p$dqxe<(xtZ& z2r0&|ef{fy{%=0c)SYiODFmV(LRc)CKm;o2N)~1*@SlF?+kI~Pl3iS|bZW#?=0GAw zB7;B(1UfF6Rn@Ev50=>K;QFeG-PSN5i9`Y>3`l*^0&IqA)}F8XlP8%GIUcXO+-*e4 zNr+@fgr$_gK~({**}L}nukOG7rQaop-~7%$`)~i7|L*wcMJdI}si9^=B@;(v9yGk7 zt_X;f2m+?sN#7D8DMbY;P-`YY4ji0-XFzC#!Xj2oJ!%dR3>y>R&_Ea%IE>s?0#wS- zUptfD>B0^@88zs@j43!&!{zA)k~bnDt8>5+0ZdVeiGUnkg$M@9%s6yN1ctyD21^Ry z)94|m52xxve0g3{;+WcBRi=N{mdRgjip_S(fA*4p4k89J@n?w9n6PeEcW&QUEa>sk z`SYW*^?9jMvs&#e7WL8bmKlSo!weA&2>|H}Z@>M$m^SNUlM?H&TrNvh?JSppxuAkU z8L_`9B&I45yN)#zJ7QNvT#W_Q7fXzY?3!SV$XG;F6acGOEpFWXAw-bY04lU=%Sgda zdnjVL7!sKl%Pn@B7+JcrNC^FQQ8gh|Db}ji^_>)nAp-jofdCxhVu{NaFJDR#0tgI| zF%c#-1tS0is}n6!=){URGBf5ZB8uqqKwwNB@Ld5=Reeb_`5lwIU8h{jukxLOB(V|0?LPaogu>}7}w%h z4;MLj0L$Xs19ZfhykakQHEP0)j%IVUZ@OZuFghMzv6l$Z6>%E1ug{_Wcj9z6KT55HfhSo*FAAW~#vQxp{y zG7(Ww%_RdOnGJ+~LMB8G1)-GUum>g&a#>uF5F)wII|l%$DaQSq-0YX5kHs*NMYcu6 zwkQ#n3B*y}i$3P%tKp+)Gaz`5gG$wGmtltGMcsyN5 z?qdpFw?)JhD^(LIA%sEo(>-5HkIXI-;)P==CB^6_rcN3~Q)U(sLNpuZwSK~gNkkFQ zX_Evv>Qj<~puHYMM6Yy5fsoJeyu0KeKRR?>v9p;O#9#RpoO-UwL;O|mnm{h81`rSk z%-FK=$Dh17esNj{NQss;qIQui5iL>`0T6MJDiIJv2=w~>*M9z!AMCGI$Im+{<;Jyx zRKS_f(pM+Os7m!3l`*ck5^kcuFPnGM=kW(In8x^5Otl@g&5ftlt$_kAxSfkU%s z9Byu^?Y0jg5VMM^2!Kja=e(PlXTMT1R*?|;KBMqq8tF)({-MzgkM&>{v zTE>`C3bFKk>3bD1uz*hcz>q2cAa-3L1c;tQVu&2r75?kGE|PQZ4KP*9s;W|oeO~}H zYw1<1)n1iCCmLeRYNa?3)(D{}^f|X}OB6U(#M~^Gq9vD1%wWM3Qc5l*kqa5pAj>1h zA#k8*S`#+`nhHpVL5}*30RjUiBq9(|D=vH6=boAOcNfUPbWEynxK)5Dit0F*r>k#u#JLK|dB%CC0ej_G+?Pt;XyMjyCdN^a8*S5HoovG$4s# zkQMVy(4fNVqk0&0vR?I_H^~MH~e%tw*n}mPnIleSuo&7*y zV?c-;n?>`HZl_HR53A;55oFxb9Q=mM8sgBSu`{qXatvw&*o&EDCR3N`Bt#<-m!1rwmQqj~RAC3E6(MpU7pDVo`Rx!X zrh$4t1(oTDs9FeNkQa45;>ex`Q);}S9(!{fyO%+vX?!Uj|LdTaVV7-;he~1WzsJ`B z@9{5O#c13^I{Rbbk>GC+gou}1z3oa#Fq@L%|A(8|R1)}q54UgJR8=IZQrc;jlDl%} zK2*EqbldiuKvpm6le6v9v+d&WO)L4~Z+@{{F825KOifk$R*h|u!hi@c3xsH*rK3~> z47%3_@6l&X}>AR|`^Iihs9 z&80W!3Up$HATQ)#YIgSd2x8Z|MXGc_jOf0d3sEB_vD=Z^qb$!{Z`sD zZi4jZUag>N5er03z1VEleeOfx$SgVMt)Z!wLIaA>dJ!G^#Nk`<;B%&V@MdvfH+Efb zgC;-KvC`;MX>u!>RTDT6Gpm)ZXfG&6Vg^W~bT=KJte1y^ILrcv*_|Mu@Bh&V0D~#I zBCyK`rWgr`L70Y>fr^Q!ngCMQ^a z1kN_^2Ghj)N+abp^?0%`z1RS_828(gl&sqsjiTFObMnGqw0I;`V1W3>7vA~l4}ZWM zH`}ZLr|XmI`hy!cuGdw%bLX}hsO$QSdx*tyb?w@Xwp}BW7QA))PKq_5l_F~70{p6= zifZ6GfTDs(1PtU>HD-fY>b&Dp|ckT0gIh`~ZN&dC4(*$jzp-GA-Q{Rb}|e^iQXHeI!I5UQGp5lO+Vnudzl z(_lcmcmMvA#~-?H)b*RUfr9(15FoL06O=|5ju;~kqKqRqXL5TAph!SKIh~v2-n*|?T@Pvaga#%!nuZfW7$j}g7i~!3Y}EkV*o9v+Fjzqd%W~(n z*Q7l%J7bcXogmxLJDAQssgh@A!Z5$pkza2vMg|Nwgs+0%Wxwne|9&A z9RwPEw>K8|#Cz8c+vl|#ds*&^vKT0#0(DuC8PKWWh#|lzD@jCERn0T$nB~VJPY@Av z=}YNjjKgk^*r(!Ce>0Dr)A^gnTCYcEQz3-QfFu7fbV;y8r@x$x)Z8bKC;-yz*Ja2K zgURoOr6H>L-u}{Xe%O(sKZ1ya1}Ov>;k=FqnOY~WeFKx^B2oyI2z+@MV<5|FGVq^F zLkON30uf(s@r;lo?!WQ5FU$JrvJNNfZW|WM*rci+Ayr6&^z-n+2gwqRA;{8U;)za{e7(oj6Wkd{W7p4-@82x9;KqfHQB{@-c{x zyP2sA`!%e_UD)U6|E7lg$9YStujE&P5vrXp)WI zPQnO)Kmai?6BCh08P>L_3c?`_APX=AWg=&26Ok%U9~d!A&wO~LxIr{6hOpR8t3y3r z6XSN1-i)Q+egQv&XMqC-|%$I)73Y<`IQ%arxq@OU)!mn;+t~B8O z!``Z2SuI`(!_$jjI;I!r5=F%A4_{rWBcOnc-eT(OdFQOU0u-5$yAJ_)wDWU|0L<>U zDS-#c9V9Yy@oHouQw2mO_Vt*{K=_9VA-GlUFsPrWCjSUDvDYFQ2?uRn_KX zyO$c~00S-cf`Edf5wWOk+rIBhF7oWzi(VweFrZOiN$3|r@waSvU;X*P<_s7W(&`3K zbAEpM^k|)d0`hv>^`b`H_gT9_2}Q**dh_~hK0cg^vx|lXtywIH43ef7 zbi;-ztq*NKnNCy#8Ay* zjLdZLy+mC*GNj5Ecrio(D5a!D6#`G-)M+CSu2kFr3P5f@y10Jx$-}xmdx@lVQxV3& zI(X(~52#+4Za3}m%abCe3MZ#W+s(SJYqLS5Z~hjS%B(AX_7`FNvkFHuEYMT1)V%EY z7mtpfoaGLe-C1vvJ5>cmfKoKa>gLTmjG>6m-}DN-W(qAsW-+UqWpNQWV6om*q;M(L zi+tl7zrV9O2m`jQ3?8dCi~xs=GD_sTP1|T}21tHrh=vMH2QZVTq!8wT^CG(u_=wt( zpdP~5orVm|l+l>X4d)3-Z7@V+;=%6e^2r|M$wy2(nm>(T>cYJ+D_6)UxW*JBqH!Q^ zaRM()(gc!9KtL?j}LoD}Tjaz;3tGyHeAaGSZKIxCvt#SSG_^gvaC`zE<6%EH!zw>Tk&TcUn+*;KNpaAwNO8C3K`*lA_1AW3SI5lo!GR>mx;!M~yMYUW7^C3?r&40V+b^Fg;`l5JE6jWM($7!K7V>#n<3XKa~yhD?hGBY_RkkKkYEM z^@a!)X#l}7Qwo6xPbL6!S07^5Hq50wee(46H{QZo zu$^b{`HcDsc59xkJ}ciNfI)0*d5ieNDkY$-z&@Krb#*tm|N4UmZ@#HBTGn)3F4l~f zKacz$0&(bL}2b)7^;rkq=+DiBSKN+t)Oq7Y+r zwPW%wn|K&r7@2V(7ijX%Z*1*l`kKrSj$m2@Bo1aevGp-NszxDXNf2b7+5q%eB!_c zntJ#}K%!FIs3gqWAz!1b`cjICIOnXYLpBCfwNf%t7%kL>r*NSCjt@V(Y#xDIS`Z=; zhCm@EL}13rRF_;N*QFar;IhjsJFpN(ozSl9eB&@pUWur#YyTE5Ju+U6@R=Bq*{L8^ zrTNB;91}6XR}{#Wt0f|DHrunav#x7#T+_`aYB&uM2YC-wLVWk#ce}1dxHNm4-yWL% zxw0?0N+-BziqX$puXOvhH)E=s$b^C*2nr%(%7j%)-}=@!4X}vX?9UaB!E^;L8Yu(} z=}TY!9Yj)7H&rr2G`xTRjYYG#68;fzTGGtxy!qFeAe-|7V>*;BN;SgO%tX;Zi1Yo<&Qu5L`3G9%0nKFjc{O!0RmeJP*s?s zRY_7TDS~@L#(*&r8l)Igh*e$3lsHCYcA3QK-c5Jb*Hke^$4wHE|F4IJ&%ERF(aM!A z?ofzN7i(NykXcbsK~l{kiX5IlJG!|l20pdbDUgu!B^+01)}%(upxX1~(I+2$`0(v7 zd{F^hVHajLva{?)v#M)cP57V@$+azE4BBc66u^Mk?!EPo_HV!0K6-DjPTGQi6;q!G zf9?1F@b10Wh$viQGU#M_ex>@H$z`FL-F@SAZg$f8v=(R;C<_7JyLT@H&N+MSHMIpe z#E(IGd-Rzfh4^3Dr{i48Zy*QTq*Nf_)uAG}PSa%!@2KcG5Z25bdIv}fplU+_2|yh0 z-+z4&7a!^I!z_KM%9IJ~6o@b|jXGswrXdiK+xH*d&(eU`?^Y!Uzs+M3fTo=GWs~-#* zOYT#Q+nfWlMGb-e_V51o!CP-}U>gwJt^`UKgEn7%&a3MSDw!e31r61a7pj0pz=Rz& z`>%cBtDk)GZVDXH#3TgHMO5Lt-~BGf$`__CaXG~>hdla&J})hX;3`5)&F<>%o1fpD zJgc?D5-5O4xpVh!452UOYEq`bgk{{OUaXe@uZ(YCp8ZQHXh6wcZp1FgjL&2pQ3c z!e+Apu-%=74?&s3h(^#W5Yj74zIhoof4rvzopbi{Jymg6zWelB7gWA!gkn}oLBz#k z(KJnraj+X6(E-Edb>akKAOHkCK0Yy#okh~VJbLu${Wrfrc)7&^w?+XqJU%%KF`$bQ z_NA5WfB!%H&)4p}zRe}33QR$Do4fOEJ81Y41|&ubA@~!ysN#)--M8O-aOd{Ty;W0J zfd>_#aq7eY2RjDZJvdARH7lyS+DTFPC*S=K>gCem0x)~5ry^;p+$W8V34A1~K%g9M z-?>lBDRJBixCbsFg+KtRa`7w9O&#atS31A>z{rp6G;bOpMQS!nd4Q-g^y5PxHi9#V zsFVWa;6M}6p-&pNUqoSeRDcly9FogyF4$=*F$89G4EAD?VhqGCs0$;B6h0H^J};MF zU2FLzRn-uja}wrDRBxVM9Vt#&mU#sAdGM=cdhkp=HBbWJOm-2m*uwDLfV)^Tv0c-=}A3emduP0RLCdMV~o!`OJmwbfM-o>l`0AKfVDG zMu_{E>K&d*!~!W$O0ixvr`z?1@4qi1DORcs2qM|()k^&E!$&{<=`UlwljCcrf3?5O^Xql`iK#^VCW>bt-k!^=%*jO1f+zvs`y}8-M(?{ z3va#ox%>B*yLDjTf=#6To%(Q1PtRLW_{ZP~HkTbwh^8-F}q&1=3!L2N(xep>;8p9a^sVXr^ zDI+T#<}t~CErf_;0l`EB8Uh7ELX9yXn3OIxRSbjuIai4g#f+FHf)tL5C4&|-qKH0y z`n0O5-Q8X9rF>uIqfr>Lhq?6`0Gyw%Q>sE>*J8PNrW_$fql?H?7uc}2$T_!dSJ&0q zStlZ!&F1j%uxXlU>@dYtGYjKK9K;$K4G9#807zt&!ruNKQrcU>uRnR#Znt$65P5uf z z0GP_4LTST9T~!Q}DG_qXLQFQecnhFlz?2qLx)gcY7}aTWwsBu;P;)H3pQIrIF%Bg3 z+5F%C_0lfD?R&|w+Ug2O;|5I;kcq_1L_r66BR3%l4*MUP#c8?+%!H=yE6#+0I0he^ z#|xP0_!)Q%^QzO*Yi!pL9nX5>#*I0q48T%Ks)LY|syur1D5bQ&zn`kqEULiK`&tAx zPyj2r1TRrc4V0L1+^5M%4l5=0A9bcX!qx)lrjNwFIv}GfHA2FwH?5QsV|2B6_gPX^ARrDGv^W4Wbx~s^BZMjt zB33CeBhy9loj%DGWXyD7Cu*{P0Dt$>(^F=y>v|l`f{2vT`pay$8zQ`Q>z3D@XQyX% zT?Yo$L1CE?T(s9iL$}?6J5Ljzo}Ds><#Oo<)c;|s-Nyn9uOMq33KPZyZkI=GV3Lns zJYSz3Bd14CpWeK2c=P(6bZ3#HX))mUKl=24`QQHC&pvu)904%a3j?vXFU5NZGcaO< z(zWZ;W!0by1sSM_0jS7O@i;A^6dNgn?r`NiOq>mM5s{D$RFy!>Zm``doK|6{!75P? z;rl=S`Ct6xqd?rmu(wk`c&XsTGpKp@2ApfLa;P*EieaTJzOLn*%P zue-Gcv`E%U{?)s`I6XVtU9QH(om@excU?C@hql{oO37DibDG;UA<8+A-NPUQ7DDLz zOyDh-f@utlgoZ|d9MCz-fnrSV;ekvPiJhq6;$8)SiTnWhEg$PVa5l5EizWc;6tPlD zM09X)FkxiXHUg| z4jJDSvW*#?Ct6wzF~(@3pL^~8_Qli9`5IFF z^WQxD7ytUtpPXzdrh$G{K#^2&yKMuZihu#4MJw8GOS_@Uhz^{tjH9N6Wh zR7(*fGGEFMVO|giJTeWe)(gs12<#tV!r`-1xGpbX=VPPga_QgQkD#O7T}(G2Rj&JQ#Vxz#LfGkeE5?e{!mxf{`LR$H;>PHqJUD|*vzyLVKFPY_!yJOrJaGy zK*R$VsKB^_m@+tr;Foop)*^z4N#9>00)ho3MKnZ4Tdh())06YI>l`FYthu3NKIa19 z5Q>^Iy*%%Zx6gj{>9e9r^rJci23RfXo7eWezLjx`P?sgF)a;>R#an9u)IWn2h(k(Wgz z##q;hhyoI#r4)!U1Y*KL4&U_n=xBFmr=W=Gd6!d6ecxAg?X>!F{YVp9egxhOL!YXu zL?q|VHz1xw%z)M$&*7V(zY!#GE>ey5k?NT?%YCv_4>T3 zlOQoMAc(8>l|o2$T~GSo6Xs@EX*v{U3`WxoD_j{LB#X3&8H`4Kl$L(M4Z4r|K{C4{o_A)qQiw6c*tV^zR#oMEDiPIUNMJII2@yd} z6ckjeSg8~=#A2*ytPP$&f8KT7-rioSD;MEe?yS^I0Yo%b0f-eKR#mEp z(;rg;bb&7*qWx-VW(-Pac0F&yOiqOpXV&B1PJkwIgDVO-PZL>$*`hsv;#!jc=W z28tmNnuaiTS|A)ih1#xF)s$jN(NCn42VEY=zsqvDRMoEQJo?V3j6R`M1 z4(KH=A1DBn+~wTWi&ZbO-Q;?A|JJp;YRHSt2QTx{*|}nG;zo}|h^ek(&Dfv!?Is3d z4io|)k^v*AmeRJOrK%dFV5Wdf9Mud;k6?tzJUIWUs0~)%f{KG5Fref12^7tgQf2Gh zb!+X{9X>faZHoe{l)ja9als765CT(lh?W|71w6uOXmr4FZ6nM8FE-^bfA-#A{rKHb z+Qs>!Rko6`D2oL`gR(t7K3X2ur`s;XxZUKF51#zu!^a1!AAS8RU%Yqg#+{po5nP-d zbJw{uYw&x9;u?A?rR)0`7j>!v1EQ>&S~Ulzl+xppJp7>~T zi90$%E&IL)01x5QWX3b8|CV>hR}it&>7UkJKFjo^d}X9RuTv*3zpBD03a+YX7pu-! zB^sv{)RjX&Uw#e|^p#!00Qp=kR@{We&h9o_Y*rsXdiw17i{1U}-~Febr;vX0{s+JO z^?S!BXKm>Lad+AL*|)#*_UregJ3Bghc7DF;az><+2GlrB! z5IKg?ht~{D2+4&;LWl-XR1qx)*AH+sK#y^;=YD$ z22(_eF{Qde4z9kYszr5B79dYd3V@p@e`=)CN2%N z^zCUernqjq=imDQmhR5s-WT3{c;P2k>Vn%0MK>acDt>rYH(2x1HQ-0+`jLts`3+A*Y%a@k`L#mZJEnE z`s;Y-KIxH6bBUSrhRv08uCVj2C_$KA13@%ESRKYIN1dDEP{^NnwP^-Ev++B@&~K48;zo6W|g+&-H$bx{?Y1^M|%WvNM;N)z3-s$6KFM5TOP21(-f+7gOgHO7+DX`-r0=oCJ1Eo=m zxC#X@?MCR^_U!mff!A%{i&4%@VYS$4i&ls#C3l_lEtH&F0E)y#$Lr!;GXm1VTv|}O zu02a3)~T*m%_=VO*nk9qI53$qLVtFI*KftN%4z{f^Jcp_>Ne}rw|%>LSf#)J@lTd@ z`u%Ty{g3|O_ix|6Q6&sSRSH!Ifg42?DGX40V=DndDZb4o$(~vx z#MUucHlbVR0v2&#R|)b{-JbSrtgJzfJcF0Wx&a#dYltPU~0 z`NjhPDh8!!*O$|iv$NB6n|mi2#*|V_KrDdk^YfFl^~ve!(aGtv7su;u+v^A_BQvN7 z*LN4TU)4t`0E(y~Ldk93o`&T^@&Dqh*u5GI-?e6apACyvZE}ilBfAsTTKK$hN z&FlAW?%%$7{odW%_ir6u+g(U0>&=D%O4n9ZLeeT$1`sGvB7?yJyYD+9@}=Tpv25Gz zcC)P)HK$-;UDvB=;E-ZsLJ{3;H&v=cj2V4K>vz|4y$3n3P5n&xJ8lQC001BWNklFL??=TE@2>pDP;Av}2Vb0H=X8S$VauNM(PLB*DNd_~iKX^yql~@Z)E>y!_JrdvW^_fbQ(95RYRF zq$X__igF+^5l{}Pu2vxgH7G^ZEKAO%xM72tGR43QO5SX@sagaCAVZ|wm#)oBSk=|u-hSUpA}Nv)QF1ZWiqdx5 zi)i3D$LCK28V_q#Rk^svygQz@<5LyOOjlfyCb%FX>I{?RzmofYKpTazH#hL|V;mO> zvoC2O=Nw`Lye#cBb8lm!MO81Cjc?H$lkXuv%<*Qb3J7Gx6zjUl?K+EX`uzO(q|ZXB zLF7OdW8A-XonoaW!zkNZz?c{jo2Jo>A=VIT;Kl@13lahj%I;39R~4TmPm-uO8eR;f zNdyL6MA%C;>@VQ%O*K#pXWMP)o~?iWJ=wINsv?1$7-3~|e8~OI?%u+`n46hpm?&ARQ^n-)yK zj8qu0^cy1UL==hZ9VDQ*;FL~_eg7Aq{{6cjAJlmBVDF9lcW&RfcI(FBYI$~WZEv-x z7Ad5v*{f?6P%X^Z^<5xTv#xFX+{M&X)ne)@>bh>*)?JqnDUMZX&e;dCUJ^`)vhTB3 zv6I=C*ZnaDCjd__hE9L;mfuTTGwb_)sv;)D_e53!FlL|pTGCv`Oy((gl@ZBrqsi0 zw+^pgztMMn<*3ntr!A(&9CD{xR55d(11mX7!N4Q`z>7c~_?pL6<1ijx zH}x`0R&K4hoSr;?{@yAf7_8T41lH7{UH4E<{^Rd{1Y<5sGM54P)Nc+Wyz%&a_L)JA60vt`z%O1 z%OxW&s;b>?HkIf>;|@fgCdmh?G^IY~Xpf{^gTjF-ToS5?Lm`fBX6y_ix`g z+&|dc*;y={HdJz(0>>0N5UWy914FPFV%u(&gUd_Dm^iSni%JnQ@G1Ib-Ovg8ZtghW z_r1?k&(898+wSe{j;fJaWT*&Z10+ zSQ#{s;u{K{mY>AzM`AQP(HN2z2^Stb!-p^}l5_i4fBjb|+mqvyBK_+(`CxDLrO&^4 z$h)2X0w72{16S6Jb)s~2r5047RuHr@lf`W>7bkI>r0z;W)s!MB@W%= z1_5&yCgK6me{pL^|(zxkE_<9k2& z>mU8{!{?{5TDlh)4_Z{{VFoQ*DP8VNvU7a>qxBsr3D*#++S)FJ5DmLhM6m5TD3W## ztHo-ycWr02TA!bFIrqW(2%Ks&RbnxW9J7>C3WUf^v8p&kA`%r<>xmJy&-(I%r~m#Z zzo;WuF;uAv$a|~BwS(o2>-*PMiwAGqyMON{5qIrYa_`qIgiuu#amcy&RMpkQnZtxb zz0!^h=m;|hE_rm!rC%%?L=-IuD574BdU+6I%sKP;XBr)NkeIvx9?!sqjU@38U?w1QCNsnGd2=mB@CM^*{dhAMWkMygAc+j-W)cv#5ze z9dWU{+TYs+GeA}?ri6q635bhQE(#_@DBU@Rni5IQMA7q$qRp5I^3ys2haf&h8Ec zI5|E#+N?R0x~`wKIe+q~>o(V_^o_55>7RW2_aEH7z6h97Dk6)juB)n)%*-5;nU+#Y z&IT3(00w94F_W4WDNKBRz9#15<8|Nvzh#~4ujSWy*Pq*3zsuhHT;|M~`#3YcWMUJy ziIX%-y-v^Z{@)OPIHu|4CN z@p$IUnK@@)ewTH59$u_{womChk8O>PMr+n@J>Td1{d~jj?(T=&8=;dRA(@%ybtOZo zxFKQ@w;9_vJo0j0Q%W0OT8ZuF6Zg+w*4s}9JIgUEugr2eu2Tl8AF8Tc;JOL`a@r!k z?u9UQOy|z-c^;$nnVfTsgd|nY;TT|e}FPefZ=TU*;RM4V11n%DcAlv1Hm6H!i@a~Ouf`&!ma1=;B0 zMT*)u3cUB_Da_1CGZD>So z<`YTE$n4^9l;fC9a{@$%>#{0rYA*~a1zz*Ett`qh2Lxq8GR$hK;9Yf!1yeFO>mW|~ z3zXF8q=~ce+d~Q#$EL1W$ShhIQa&*dAR<_Qd_cmgvWl9LDUz`6PQ|wl>&b(ILo3>; z211Gk2)5<%xl6NWFMal_UwGkoHQc#(@OZwwdGpq{UjE_Z-hnETC+~{8Cv-h(BCzFr zd1yH+^dZKSQqEwA#t6Uwj}JcB^)s2wNGe}dBAjw^6CKRH@#0<%O-TP7Vt~BOcA*f z_UyfalX|+fb(V~$?ezA&yM3m{v(!#e$kf)%NXtoe=m;`_CADl2*8&hcU4B zPhGnF+_N{XT)OhBzxr>YCIw0gfWqEUD&Hwfc?hxV=8yKKQ)y}i%Bq$3YPRURIF6I1 za!CDr5OE2xj~xOvv$ky}nlg(Q@B41`_|YL*uImcfWrG|>Gc(XI4qMwheLnz#aP!*R zclzNE{?6CF{QQk4lcpS#p^<^CqttX->n`ghGb7~F(9n$!u*^C!#m(o{iDPDFSN=^o ze`R>zyix2=|EMPhj9d=J>2Li8BQOj@&RPEA z0k`s9Lh-1L#fq#3Rq@lU-Tn99yK``G7;<30mE#Cu%%yG@k%fgx(NY{aCqp$OWJltG z9Ret7fherWX`3z|WHU3(#AK>yS2`2*cZT<5IC&5RkvEsofjqShXm{u*~oF_Cev943kqbI#=h&8n$?RS@GwTj?CZui z%{UGKxO?_25~XNrHjFW4eEr?KZ`{21)H5I5_T)GN5s52n!EqeRw>afoB%U%zu}Bd{ zH$&2THzQI;)V}ZDzjyEI<*U=lv`n|!B+tZr=|A)=r zq+&K+@6Dksw`nmbmNV*{a~m(h$xTZL$qMX(7}!~6VmFL=vFs;XJCn)u@bLb8Ij^j8 z=(%Ps=-uyhP+N;!m(;y8ATusTlTk{}R+ z_fj>?EZ#dJcFuWU^$VAQq?y(p!o2fsoKAUncQ>by#(q}&J9pndd->XfM@O%{{^qy8 z`@^^HK1jwTo5L6YX&L1Qw+>&w_09kE``@~8?ZOv7_rm$J+kfTBr_J=-?yPlyI!+ol znKWaxVGLMkMWtVlz%-g70>pmRFOMN+sT*`WESH+17iN)cz^-a0+um0sQi?_`r7-rG zQdlh!)ceYNKgQA3wHgo$I;owAld!BG9np3&beSEPoLD3VhI?ZctdS1OO>1X!=*mtTp$Rl znib9B*u^y3&i>Bs*`uS!NUp9uu{ajOQhjcUg;K)+Ucq@WB+8&_v9x2B2MG})fs?{C zM?*+sH+IX_(Zg}|*wTm=&9ZakD<_^r#7iyCmBLODC$8?sn*gx8JwxP*oyL)zd$>sF zu1*PrBI&5@cx%9T?~IgaDt z>vp;gNf8XFXix;qnjotsO~HT-7>U5f7@E4)sBs9DuU*?Z?+a|B58=%_hd;XgzrXq7 z%Rl$ipZ%2p($L?oNE zP|jJT9MDqTc!HhWAQ%+JaUh};l|{T4g_u>%SV&4@838os^80I6K*IG*45VfX80fTGo{39Kup#jA4jj3}F$*m8Jli0CX~$2-hM)!c|o%K|@YzT@(3IvEGF0EGu zu&R?1fJa$XA0+>#|#VyO(14vYDt_a}UbX=;Q9 z0_+LYF=2{hwO8-7VAI4;HVy`qsy;UA%hXi~$nxdJ%$FRW7EKQ!=A4hMooltg31w)R#e( z%0@nA@2fJ@m*-~NRvQQ1Uv|fdI3nJFgEpl(P3sXc#NM(2j?PV30FtN$GNV#SV`{1; z?OE_J3J?KON-?FXszd}Vmr76pwwyx3m~|L>$r?|W^C7u{2tvS2O@J{Z+iE5`Y8OG; zovPj;sYq%=XNB*DP&!rB)JTmfCL<*893zBr6}yC*AjcGgn&voyM$`N1&L&&V zZ#B~`R|N|@hO=zNX>$VZrFAVsjDgG=#*|V6Yu_{y<(M_*oCY)kQYwF2z^FI`nMqGN znb1&yfG6C#X0m5Uicw6E2;BtP>3VOeCq^g(%|Zr%L(mkWIA9q2{z1$MzzB5cS4T(3 zXLhG%T8x{%a_lF}g3RI^5<6GWYTmg@oG&z211oPpWLh0}heykc7cNlK46FH&RO)vB z%oe+?M|l$T>vsH5+or3jNS1UKlAB}=g)g_&wu>cXP&yAE&T59{g40r z-~Ql(M_sjbw&KPU8J&Pn%#7HO&~gZarBE8%V@L|*)v8}CG{$UMd|OQ$ao*RB5hF8U)|@MGnzJKD zfZAcsaQlOU-}_>)Cfp=820;z=+6%u-3P8iiivbVGPJbNFwfpX0%p$oO2th;|cCm zH~(D5teoR!GvvKbDV>01w2-ZsSvB|lNGP*ud;a2;oxP8suaPq5Y(QFK+-up0z(6(S zoXymUFs_rXC7%?=L9>~f_inK`3d5qs?3hG#O*c5fqACL68X`!5LK>5*<&=iO)wKcJ znz#Zd^b6ITL+mUJPBSPv_BoD2Hxz}G2*kTe($0)(Zz7IGob%3ml$KbK&^d>Mo_%?R zlmXAm0$$C~oEIWmu2%2ge|YWslT7j8-ow>uSgyKiv~f&z+djR!%YE0@^_d+ySlvfZ zGq#+B?TK?czwr0}#!r6g1|fPDCuFujpuhf=FFkkTxo`ZJ-~awwclu=y0KRHs3lmj3Fuw{*XHb@6KH$5P)ghs8%Q15H;wncG~1S>chS;Ml_hW3Relrt zDPU+x_K1K03HJKb{M&Qx$OXqey8NE6s!ojF#C}^nx^%26I5gDO_OD{oZ>J~F;F{HL_i_Dxg z8-Qm7_Wip@$A@#ev7>L^?;iBCt%g7Iq&Ndo*GTp4SAOu) zOE1cYIM#KQ;+n|1?sCi#V(#Y0tK&leM6P6#A&vcN75X{qK#+18g}`#mx|ZVwv922@ zytVB>SXiK>bId|gmIl|$gJh)B*#wcZDzYy(vac&J6DQtR%#u|NjF_vs_Ep_ZW<$s< ze%4N^$!xlPCWeqx8e@L<_PwTQjl%td`x*y`xVJt1yMOB!KlhnWv=tH+K&!+UHJ3_B z(wq?W+?k!Td*?s-W1oEI&fVYm&ENTxmtSY8R5cgq7YkR7^>y3SQ*boOpGENqN*y8WHnb+2->X{i0rDSABNC(sFBPJ$&j3LLx|lF6~QqPVoDJK z6g)d;tO|sjd{b|2Z+q{DVd(pQ=+eQfcaU-R+B-k@Q!hOI+$Xu3W(}r^2{CJkmd8=N z_f<_qY}r&}9Aj3^2|*34s{972TfCbHW@<$XjWLz2(nen0OiqP8VW!N^SFN}@p+*8V zTGI6PdwO>@KAih&JNf;4Y5$D=>D&1hjK{%V{ZaU@3 zVf(QsXWxD0{U3k&@&t_|VB5~SE^FGDMc_25RIL=b8iGO|Jpr{1F@wmsI?g$#FmN8p z#+;U#B2<+ZC$1qUu4slvPQ0spRE;q>@5`ohYimNnA&gR2uCAT03<+6?!Ak2a03Z#+ zu*Nqds_JAisj6xWp{}dxbfTtxh|VQICh>U;UDpr8aCr2%nQk2&FJtU?C-k|`KL6F9 z|I!mzE-?nn5K?B6k_tSD=Bz`pqQH=yqwAN>Tt2tsb=ce5S@mO|lWJx`a_yNj+h6*r z&p!3h8@KO2{Il=9y6Vz6_VdMjHH^Nh%rjexieSP>PS}7A1pt{DEV;%rH!&L%fW|m1 zOmVWiU(aSi%{h;XCH6^zF((9|c1v>_hR(IEY9_+@q7$Ep$jDJum6}4T>u-PW&AQ$C z;%7fOX`G|X2%flVn!2hG%rrwzNmC*H6hs*miIF(x9OJMtJ)Q7T%sE$8b$Oc>r>O`# zx1Qxg7?~LWonuuqMKMWk@T!cxOS2mnx2Cvw?s^r~-dyO>p*y!v{nD%Tvk6tLjxp`D z@R3V&X6BcNeknj z|M|`TQ@J{uJzDmUj#r0|ms+qC&Jp6?cH4KWTlXH+btAsgtWgbxomAerzF+xDs;VKP zz3ti0eeq{nftOx<@ueTU!*!hjLd*mx&c6?QhK{ZS8Uj%f+0TYVSoHIcJ8tHjb6f zos>f=HW3kFCIvbT-32h`q)4R0Fdz~UJI4TI; zhM98##=AFP55r3SqPew@7%`5Wrhe>~DRhF#fif{MA!M;pDo8ox!gi`>Na%_lgfxbc zghZTXGcf1Zt||m1!aKL$8;AMs&R*TjP~pPgeBYWV=hyk%5Mkn0$ zd{*pMrs1i^9~VkRTXgr+%$`EfpccyR0J@SS(x)im_;j-(=%vfDN_0!-c5 ztyVdQtXgWHn4N2yq3?4HMctfCTSDvmZiu7!dO&w)o@ABYdhkP3b1XvC)Uq?F&z;@A za`F6=*Dhbbayyl?z?>2)&GfME$~X?a96gN$ZggoTM2 z6@aMtKboq}fG3CxP_+<4Rr&I8bmGQgc<<)B&wTVLW=2BiU17L|m=Q(90qAPgHErv> zGsF!L77PfWXiRH(EE1T8Fuw8D%{Sk8TF^V5U zzIf^CFaLvIzIf>pGt;^&8bXX^5rIfKXJIygvi>22oG~KQ=Kuf%EJ;K`RC0M|th&|W z__*?J3_)0CTid>BfZW^f-uc#_y!7JBuiksS%x+4~5n*xlFeoD+6&Kn1$rN);s?K== zm{wIgYj$?`?jJnr`!4j0zVFPKoiMX7S?4562)%dy%F{~YlNYmMj znO5CWv$9B4H=3b1Ok)_CKnT%*g%J&M5`>EN*=v{n_TT*4m2*2bt`IZ>o+igG7mMX` zHJi?ycL?ZwDK$5M z1=|g#=DY*Mq`Ab7Pk3kQm`i-nu@GX4X?}EcaQ|L18|IHz$A?|QJGbv`?`~hZeC7Q9 z`L?pEqA}|Tm zky1=pKRjA|=cVud;UE3k%?HPdOs-;apjlJQIgR5OWAxtVlmM`5n+yAUFMQ(1L2c2m zZr#3j@8B^4=GcWWR8plBhHjN(NO2@2TxW9Doqohp)w9Xg4vOzq%a}qtnfj^{R}KAY z=sRLAdPo8O7jOcTk!CO;;iTC#pH#lZZKwa@pZvWW*DgRBlIGH{<-IRIHW4}T%z{Wv zqQ!WgbBrOS?3^#?uW=l6jyXpnhrlt7DF@0h-PzmP-6Nzh46AO*OyWEcd0%PHap?Pg z8N!%j?)$N7n(1_U`~ACHTeJQB{jJ%yunfaWb5Kpc`5V9S{+&CY{?rTj^KZz7%_?jx{oHH=zeT<2nr-BPJ14Cx1>xMVq_{qhM{%uG{Ov#dHlI;yMMi7*l6@pyiOVPV5uN!n(z8kWA9?eAS^ zT8Uv8LKfeG)a;rJ1jr|5GqLqMfq)5t03qjGC>_N-yCETNQs*aTSvsjkj@b&`o$ekk z|JAR3lH4$k27KlAzBy|cG&zWwgIx0EOuDsbO-s6hzh zFr+YOig6r~*;Tcf#u&hI>6aJpr(1hfGYMH&tEC9nZQHh!t{=L^B8`KYv3PdEBH~?H ziz2ATk=Y@$LN--MiAhq9VRgLU(m(%|UwZz=lMGspcQJ8_IHqETz1nOEAPaY znW=o`Rn;t&4R?t;SyWX6vgK+q_FX$^d{v8cO*OV literal 0 HcmV?d00001 diff --git a/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json b/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json new file mode 100644 index 00000000..037e0b16 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json @@ -0,0 +1,25 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "eos_token", + "generated_tokens": 2, + "prefill": [], + "seed": null, + "tokens": [ + { + "id": 54901, + "logprob": -0.72753906, + "special": false, + "text": "beach" + }, + { + "id": 1, + "logprob": -0.011009216, + "special": true, + "text": "" + } + ], + "top_tokens": null + }, + "generated_text": "beach" +} diff --git a/integration-tests/models/test_flash_pali_gemma.py b/integration-tests/models/test_flash_pali_gemma.py new file mode 100644 index 00000000..d4e83c9f --- /dev/null +++ b/integration-tests/models/test_flash_pali_gemma.py @@ -0,0 +1,39 @@ +import pytest +import requests +import io +import base64 + + +@pytest.fixture(scope="module") +def flash_pali_gemma_handle(launcher): + with launcher( + "google/paligemma-3b-pt-224", + num_shard=1, + revision="float16", + max_input_length=4000, + max_total_tokens=4096, + ) as handle: + yield handle + + +@pytest.fixture(scope="module") +async def flash_pali_gemma(flash_pali_gemma_handle): + await flash_pali_gemma_handle.health(300) + return flash_pali_gemma_handle.client + + +def get_cow_beach(): + with open("integration-tests/images/cow_beach.png", "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()) + return f"data:image/png;base64,{encoded_string.decode('utf-8')}" + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_pali_gemma(flash_pali_gemma, response_snapshot): + cow = get_cow_beach() + inputs = f"![]({cow})Where is the cow standing?\n" + response = await flash_pali_gemma.generate(inputs, max_new_tokens=20) + + assert response.generated_text == "beach" + assert response == response_snapshot diff --git a/router/src/config.rs b/router/src/config.rs index 989f0e31..d27b1136 100644 --- a/router/src/config.rs +++ b/router/src/config.rs @@ -100,7 +100,6 @@ impl LlavaNext { } #[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(tag = "model_type")] #[serde(rename_all = "snake_case")] pub struct ClipVisionModel { image_size: usize, @@ -108,7 +107,6 @@ pub struct ClipVisionModel { } #[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(tag = "model_type")] #[serde(rename_all = "snake_case")] pub struct Idefics2 {} @@ -118,6 +116,24 @@ impl Idefics2 { } } +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct PaliTextConfig { + num_image_tokens: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct Paligemma { + text_config: PaliTextConfig, +} + +impl Paligemma { + pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize { + self.text_config.num_image_tokens + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(tag = "model_type")] #[serde(rename_all = "snake_case")] @@ -140,6 +156,7 @@ pub enum Config { Phi3, Llama, Baichuan, + Paligemma(Paligemma), Gemma, Cohere, Drbx, diff --git a/router/src/validation.rs b/router/src/validation.rs index f85b169c..96b6cb27 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -544,6 +544,30 @@ fn prepare_input( inputs = modified_inputs; tokenizer_query } + Some(Config::Paligemma(config)) => { + let mut modified_inputs = String::with_capacity(inputs.len()); + let mut tokenizer_query = String::with_capacity(inputs.len()); + let mut start = 0; + for chunk in RE.find_iter(&inputs) { + let chunk_start = chunk.start(); + let chunk_end = chunk.end(); + if chunk_start != start { + modified_inputs.push_str(&inputs[start..chunk_start]); + tokenizer_query.push_str(&inputs[start..chunk_start]); + } + let (image_uri, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?; + let slots = config.get_number_of_features(height, width); + tokenizer_query.push_str(&"".repeat(slots)); + modified_inputs.push_str(&image_uri); + start = chunk_end; + } + if start != inputs.len() - 1 { + modified_inputs.push_str(&inputs[start..]); + tokenizer_query.push_str(&inputs[start..]); + } + inputs = modified_inputs; + tokenizer_query + } Some(Config::Idefics2(config)) => { let mut modified_inputs = String::with_capacity(inputs.len()); let mut tokenizer_query = String::with_capacity(inputs.len()); diff --git a/server/poetry.lock b/server/poetry.lock index 684713f8..5af1fba4 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -359,43 +359,45 @@ files = [ [[package]] name = "datasets" -version = "2.14.4" +version = "2.19.1" description = "HuggingFace community-driven open-source library of datasets" optional = true python-versions = ">=3.8.0" files = [ - {file = "datasets-2.14.4-py3-none-any.whl", hash = "sha256:29336bd316a7d827ccd4da2236596279b20ca2ac78f64c04c9483da7cbc2459b"}, - {file = "datasets-2.14.4.tar.gz", hash = "sha256:ef29c2b5841de488cd343cfc26ab979bff77efa4d2285af51f1ad7db5c46a83b"}, + {file = "datasets-2.19.1-py3-none-any.whl", hash = "sha256:f7a78d15896f45004ccac1c298f3c7121f92f91f6f2bfbd4e4f210f827e6e411"}, + {file = "datasets-2.19.1.tar.gz", hash = "sha256:0df9ef6c5e9138cdb996a07385220109ff203c204245578b69cca905eb151d3a"}, ] [package.dependencies] aiohttp = "*" -dill = ">=0.3.0,<0.3.8" -fsspec = {version = ">=2021.11.1", extras = ["http"]} -huggingface-hub = ">=0.14.0,<1.0.0" +dill = ">=0.3.0,<0.3.9" +filelock = "*" +fsspec = {version = ">=2023.1.0,<=2024.3.1", extras = ["http"]} +huggingface-hub = ">=0.21.2" multiprocess = "*" numpy = ">=1.17" packaging = "*" pandas = "*" -pyarrow = ">=8.0.0" +pyarrow = ">=12.0.0" +pyarrow-hotfix = "*" pyyaml = ">=5.1" requests = ">=2.19.0" tqdm = ">=4.62.1" xxhash = "*" [package.extras] -apache-beam = ["apache-beam (>=2.26.0,<2.44.0)"] +apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] -docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] -jax = ["jax (>=0.2.8,!=0.3.2,<=0.3.25)", "jaxlib (>=0.1.65,<=0.3.25)"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"] +jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["black (>=23.1,<24.0)", "pyyaml (>=5.3.1)", "ruff (>=0.0.241)"] +quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] -tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] -tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] -tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] +tensorflow = ["tensorflow (>=2.6.0)"] +tensorflow-gpu = ["tensorflow (>=2.6.0)"] +tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] torch = ["torch"] vision = ["Pillow (>=6.2.1)"] @@ -418,17 +420,18 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "dill" -version = "0.3.7" +version = "0.3.8" description = "serialize all of Python" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, ] [package.extras] graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] [[package]] name = "diskcache" @@ -871,13 +874,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.19.4" +version = "0.23.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.19.4-py3-none-any.whl", hash = "sha256:dba013f779da16f14b606492828f3760600a1e1801432d09fe1c33e50b825bb5"}, - {file = "huggingface_hub-0.19.4.tar.gz", hash = "sha256:176a4fc355a851c17550e7619488f383189727eab209534d7cef2114dae77b22"}, + {file = "huggingface_hub-0.23.0-py3-none-any.whl", hash = "sha256:075c30d48ee7db2bba779190dc526d2c11d422aed6f9044c5e2fdc2c432fdb91"}, + {file = "huggingface_hub-0.23.0.tar.gz", hash = "sha256:7126dedd10a4c6fac796ced4d87a8cf004efc722a5125c2c09299017fa366fa9"}, ] [package.dependencies] @@ -890,16 +893,17 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] -docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)", "watchdog"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp", "minijinja (>=1.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.3.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1282,31 +1286,27 @@ files = [ [[package]] name = "multiprocess" -version = "0.70.15" +version = "0.70.16" description = "better multiprocessing and multithreading in Python" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "multiprocess-0.70.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa36c7ed16f508091438687fe9baa393a7a8e206731d321e443745e743a0d4e5"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:20e024018c46d0d1602024c613007ac948f9754659e3853b0aa705e83f6931d8"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:e576062981c91f0fe8a463c3d52506e598dfc51320a8dd8d78b987dfca91c5db"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e73f497e6696a0f5433ada2b3d599ae733b87a6e8b008e387c62ac9127add177"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:73db2e7b32dcc7f9b0f075c2ffa45c90b6729d3f1805f27e88534c8d321a1be5"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:4271647bd8a49c28ecd6eb56a7fdbd3c212c45529ad5303b40b3c65fc6928e5f"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cf981fb998d6ec3208cb14f0cf2e9e80216e834f5d51fd09ebc937c32b960902"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:18f9f2c7063346d1617bd1684fdcae8d33380ae96b99427260f562e1a1228b67"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:0eac53214d664c49a34695e5824872db4006b1a465edd7459a251809c3773370"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1a51dd34096db47fb21fa2b839e615b051d51b97af9a67afbcdaa67186b44883"}, - {file = "multiprocess-0.70.15-py310-none-any.whl", hash = "sha256:7dd58e33235e83cf09d625e55cffd7b0f0eede7ee9223cdd666a87624f60c21a"}, - {file = "multiprocess-0.70.15-py311-none-any.whl", hash = "sha256:134f89053d82c9ed3b73edd3a2531eb791e602d4f4156fc92a79259590bd9670"}, - {file = "multiprocess-0.70.15-py37-none-any.whl", hash = "sha256:f7d4a1629bccb433114c3b4885f69eccc200994323c80f6feee73b0edc9199c5"}, - {file = "multiprocess-0.70.15-py38-none-any.whl", hash = "sha256:bee9afba476c91f9ebee7beeee0601face9eff67d822e893f9a893725fbd6316"}, - {file = "multiprocess-0.70.15-py39-none-any.whl", hash = "sha256:3e0953f5d52b4c76f1c973eaf8214554d146f2be5decb48e928e55c7a2d19338"}, - {file = "multiprocess-0.70.15.tar.gz", hash = "sha256:f20eed3036c0ef477b07a4177cf7c1ba520d9a2677870a4f47fe026f0cd6787e"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-macosx_10_13_x86_64.whl", hash = "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a"}, + {file = "multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02"}, + {file = "multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a"}, + {file = "multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e"}, + {file = "multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435"}, + {file = "multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3"}, + {file = "multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1"}, ] [package.dependencies] -dill = ">=0.3.7" +dill = ">=0.3.8" [[package]] name = "nest-asyncio" @@ -2034,52 +2034,63 @@ files = [ [[package]] name = "pyarrow" -version = "16.0.0" +version = "16.1.0" description = "Python library for Apache Arrow" optional = true python-versions = ">=3.8" files = [ - {file = "pyarrow-16.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:22a1fdb1254e5095d629e29cd1ea98ed04b4bbfd8e42cc670a6b639ccc208b60"}, - {file = "pyarrow-16.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:574a00260a4ed9d118a14770edbd440b848fcae5a3024128be9d0274dbcaf858"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0815d0ddb733b8c1b53a05827a91f1b8bde6240f3b20bf9ba5d650eb9b89cdf"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df0080339387b5d30de31e0a149c0c11a827a10c82f0c67d9afae3981d1aabb7"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:edf38cce0bf0dcf726e074159c60516447e4474904c0033f018c1f33d7dac6c5"}, - {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91d28f9a40f1264eab2af7905a4d95320ac2f287891e9c8b0035f264fe3c3a4b"}, - {file = "pyarrow-16.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:99af421ee451a78884d7faea23816c429e263bd3618b22d38e7992c9ce2a7ad9"}, - {file = "pyarrow-16.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d22d0941e6c7bafddf5f4c0662e46f2075850f1c044bf1a03150dd9e189427ce"}, - {file = "pyarrow-16.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:266ddb7e823f03733c15adc8b5078db2df6980f9aa93d6bb57ece615df4e0ba7"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cc23090224b6594f5a92d26ad47465af47c1d9c079dd4a0061ae39551889efe"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56850a0afe9ef37249d5387355449c0f94d12ff7994af88f16803a26d38f2016"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:705db70d3e2293c2f6f8e84874b5b775f690465798f66e94bb2c07bab0a6bb55"}, - {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:5448564754c154997bc09e95a44b81b9e31ae918a86c0fcb35c4aa4922756f55"}, - {file = "pyarrow-16.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:729f7b262aa620c9df8b9967db96c1575e4cfc8c25d078a06968e527b8d6ec05"}, - {file = "pyarrow-16.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fb8065dbc0d051bf2ae2453af0484d99a43135cadabacf0af588a3be81fbbb9b"}, - {file = "pyarrow-16.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:20ce707d9aa390593ea93218b19d0eadab56390311cb87aad32c9a869b0e958c"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5823275c8addbbb50cd4e6a6839952682a33255b447277e37a6f518d6972f4e1"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ab8b9050752b16a8b53fcd9853bf07d8daf19093533e990085168f40c64d978"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:42e56557bc7c5c10d3e42c3b32f6cff649a29d637e8f4e8b311d334cc4326730"}, - {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a7abdee4a4a7cfa239e2e8d721224c4b34ffe69a0ca7981354fe03c1328789b"}, - {file = "pyarrow-16.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ef2f309b68396bcc5a354106741d333494d6a0d3e1951271849787109f0229a6"}, - {file = "pyarrow-16.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ed66e5217b4526fa3585b5e39b0b82f501b88a10d36bd0d2a4d8aa7b5a48e2df"}, - {file = "pyarrow-16.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc8814310486f2a73c661ba8354540f17eef51e1b6dd090b93e3419d3a097b3a"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c2f5e239db7ed43e0ad2baf46a6465f89c824cc703f38ef0fde927d8e0955f7"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f293e92d1db251447cb028ae12f7bc47526e4649c3a9924c8376cab4ad6b98bd"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:dd9334a07b6dc21afe0857aa31842365a62eca664e415a3f9536e3a8bb832c07"}, - {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d91073d1e2fef2c121154680e2ba7e35ecf8d4969cc0af1fa6f14a8675858159"}, - {file = "pyarrow-16.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:71d52561cd7aefd22cf52538f262850b0cc9e4ec50af2aaa601da3a16ef48877"}, - {file = "pyarrow-16.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b93c9a50b965ee0bf4fef65e53b758a7e8dcc0c2d86cebcc037aaaf1b306ecc0"}, - {file = "pyarrow-16.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d831690844706e374c455fba2fb8cfcb7b797bfe53ceda4b54334316e1ac4fa4"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35692ce8ad0b8c666aa60f83950957096d92f2a9d8d7deda93fb835e6053307e"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dd3151d098e56f16a8389c1247137f9e4c22720b01c6f3aa6dec29a99b74d80"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bd40467bdb3cbaf2044ed7a6f7f251c8f941c8b31275aaaf88e746c4f3ca4a7a"}, - {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:00a1dcb22ad4ceb8af87f7bd30cc3354788776c417f493089e0a0af981bc8d80"}, - {file = "pyarrow-16.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fda9a7cebd1b1d46c97b511f60f73a5b766a6de4c5236f144f41a5d5afec1f35"}, - {file = "pyarrow-16.0.0.tar.gz", hash = "sha256:59bb1f1edbbf4114c72415f039f1359f1a57d166a331c3229788ccbfbb31689a"}, + {file = "pyarrow-16.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:17e23b9a65a70cc733d8b738baa6ad3722298fa0c81d88f63ff94bf25eaa77b9"}, + {file = "pyarrow-16.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4740cc41e2ba5d641071d0ab5e9ef9b5e6e8c7611351a5cb7c1d175eaf43674a"}, + {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98100e0268d04e0eec47b73f20b39c45b4006f3c4233719c3848aa27a03c1aef"}, + {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68f409e7b283c085f2da014f9ef81e885d90dcd733bd648cfba3ef265961848"}, + {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a8914cd176f448e09746037b0c6b3a9d7688cef451ec5735094055116857580c"}, + {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:48be160782c0556156d91adbdd5a4a7e719f8d407cb46ae3bb4eaee09b3111bd"}, + {file = "pyarrow-16.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cf389d444b0f41d9fe1444b70650fea31e9d52cfcb5f818b7888b91b586efff"}, + {file = "pyarrow-16.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d0ebea336b535b37eee9eee31761813086d33ed06de9ab6fc6aaa0bace7b250c"}, + {file = "pyarrow-16.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e73cfc4a99e796727919c5541c65bb88b973377501e39b9842ea71401ca6c1c"}, + {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf9251264247ecfe93e5f5a0cd43b8ae834f1e61d1abca22da55b20c788417f6"}, + {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf5aace92d520d3d2a20031d8b0ec27b4395cab9f74e07cc95edf42a5cc0147"}, + {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:25233642583bf658f629eb230b9bb79d9af4d9f9229890b3c878699c82f7d11e"}, + {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a33a64576fddfbec0a44112eaf844c20853647ca833e9a647bfae0582b2ff94b"}, + {file = "pyarrow-16.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:185d121b50836379fe012753cf15c4ba9638bda9645183ab36246923875f8d1b"}, + {file = "pyarrow-16.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:2e51ca1d6ed7f2e9d5c3c83decf27b0d17bb207a7dea986e8dc3e24f80ff7d6f"}, + {file = "pyarrow-16.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06ebccb6f8cb7357de85f60d5da50e83507954af617d7b05f48af1621d331c9a"}, + {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04707f1979815f5e49824ce52d1dceb46e2f12909a48a6a753fe7cafbc44a0c"}, + {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d32000693deff8dc5df444b032b5985a48592c0697cb6e3071a5d59888714e2"}, + {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8785bb10d5d6fd5e15d718ee1d1f914fe768bf8b4d1e5e9bf253de8a26cb1628"}, + {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e1369af39587b794873b8a307cc6623a3b1194e69399af0efd05bb202195a5a7"}, + {file = "pyarrow-16.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:febde33305f1498f6df85e8020bca496d0e9ebf2093bab9e0f65e2b4ae2b3444"}, + {file = "pyarrow-16.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b5f5705ab977947a43ac83b52ade3b881eb6e95fcc02d76f501d549a210ba77f"}, + {file = "pyarrow-16.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0d27bf89dfc2576f6206e9cd6cf7a107c9c06dc13d53bbc25b0bd4556f19cf5f"}, + {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d07de3ee730647a600037bc1d7b7994067ed64d0eba797ac74b2bc77384f4c2"}, + {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbef391b63f708e103df99fbaa3acf9f671d77a183a07546ba2f2c297b361e83"}, + {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19741c4dbbbc986d38856ee7ddfdd6a00fc3b0fc2d928795b95410d38bb97d15"}, + {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f2c5fb249caa17b94e2b9278b36a05ce03d3180e6da0c4c3b3ce5b2788f30eed"}, + {file = "pyarrow-16.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:e6b6d3cd35fbb93b70ade1336022cc1147b95ec6af7d36906ca7fe432eb09710"}, + {file = "pyarrow-16.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:18da9b76a36a954665ccca8aa6bd9f46c1145f79c0bb8f4f244f5f8e799bca55"}, + {file = "pyarrow-16.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:99f7549779b6e434467d2aa43ab2b7224dd9e41bdde486020bae198978c9e05e"}, + {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f07fdffe4fd5b15f5ec15c8b64584868d063bc22b86b46c9695624ca3505b7b4"}, + {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddfe389a08ea374972bd4065d5f25d14e36b43ebc22fc75f7b951f24378bf0b5"}, + {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3b20bd67c94b3a2ea0a749d2a5712fc845a69cb5d52e78e6449bbd295611f3aa"}, + {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ba8ac20693c0bb0bf4b238751d4409e62852004a8cf031c73b0e0962b03e45e3"}, + {file = "pyarrow-16.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:31a1851751433d89a986616015841977e0a188662fcffd1a5677453f1df2de0a"}, + {file = "pyarrow-16.1.0.tar.gz", hash = "sha256:15fbb22ea96d11f0b5768504a3f961edab25eaf4197c341720c4a387f6c60315"}, ] [package.dependencies] numpy = ">=1.16.6" +[[package]] +name = "pyarrow-hotfix" +version = "0.6" +description = "" +optional = true +python-versions = ">=3.5" +files = [ + {file = "pyarrow_hotfix-0.6-py3-none-any.whl", hash = "sha256:dcc9ae2d220dff0083be6a9aa8e0cdee5182ad358d4931fce825c545e5c89178"}, + {file = "pyarrow_hotfix-0.6.tar.gz", hash = "sha256:79d3e030f7ff890d408a100ac16d6f00b14d44a502d7897cd9fc3e3a534e9945"}, +] + [[package]] name = "pydantic" version = "2.7.1" @@ -3016,18 +3027,16 @@ telegram = ["requests"] [[package]] name = "transformers" -version = "4.40.2" +version = "4.41.0.dev0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" -files = [ - {file = "transformers-4.40.2-py3-none-any.whl", hash = "sha256:71cb94301ec211a2e1d4b8c8d18dcfaa902dfa00a089dceca167a8aa265d6f2d"}, - {file = "transformers-4.40.2.tar.gz", hash = "sha256:657b6054a2097671398d976ad46e60836e7e15f9ea9551631a96e33cb9240649"}, -] +files = [] +develop = false [package.dependencies] filelock = "*" -huggingface-hub = ">=0.19.3,<1.0" +huggingface-hub = ">=0.23.0,<1.0" numpy = ">=1.17" packaging = ">=20.0" pyyaml = ">=5.1" @@ -3040,27 +3049,25 @@ tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.21.0)"] agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] +all = ["Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] -docs-specific = ["hf-doc-builder"] -flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)", "urllib3 (<2.0.0)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] -ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)"] modelcreation = ["cookiecutter (==1.7.3)"] natten = ["natten (>=0.14.6,<0.15.0)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] ray = ["ray[tune] (>=2.7.0)"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] sagemaker = ["sagemaker (>=2.31.0)"] @@ -3069,19 +3076,25 @@ serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] -tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] timm = ["timm"] tokenizers = ["tokenizers (>=0.19,<0.20)"] torch = ["accelerate (>=0.21.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.23.0,<1.0)", "importlib_metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] +[package.source] +type = "git" +url = "https://github.com/huggingface/transformers.git" +reference = "b8aee2e" +resolved_reference = "b8aee2e918d7ba2d5e9e80162ae26b4806873307" + [[package]] name = "triton" version = "2.3.0" @@ -3488,4 +3501,4 @@ torch = ["torch"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "df83b265d0263870b5d1ae8bfd847f406abef90868fdf528ff38527b512f86c0" +content-hash = "b2a29b0b6e32d0e7043e94b984c5731f2c27c5d95feccbeb80bd890db22d6c4a" diff --git a/server/pyproject.toml b/server/pyproject.toml index 3868c962..bc936e45 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -25,8 +25,9 @@ opentelemetry-instrumentation-grpc = "^0.36b0" hf-transfer = "^0.1.2" sentencepiece = "^0.1.97" tokenizers = "^0.19.1" -huggingface-hub = "^0.19.3" -transformers = "^4.40" +huggingface-hub = "^0.23" +# transformers = "^4.40" +transformers = { git = "https://github.com/huggingface/transformers.git", rev="b8aee2e" } einops = "^0.6.1" texttable = { version = "^1.6.7", optional = true } datasets = { version = "^2.14.0", optional = true } diff --git a/server/requirements_cuda.txt b/server/requirements_cuda.txt index 7f0efded..9035f6bc 100644 --- a/server/requirements_cuda.txt +++ b/server/requirements_cuda.txt @@ -13,7 +13,7 @@ grpcio-reflection==1.62.2 ; python_version >= "3.9" and python_version < "3.13" grpcio-status==1.62.2 ; python_version >= "3.9" and python_version < "3.13" grpcio==1.63.0 ; python_version >= "3.9" and python_version < "3.13" hf-transfer==0.1.6 ; python_version >= "3.9" and python_version < "3.13" -huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" +huggingface-hub==0.23.0 ; python_version >= "3.9" and python_version < "3.13" idna==3.7 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" numpy==1.26.4 ; python_version >= "3.9" and python_version < "3.13" @@ -40,7 +40,7 @@ sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" setuptools==69.5.1 ; python_version >= "3.9" and python_version < "3.13" tokenizers==0.19.1 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.4 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.40.2 ; python_version >= "3.9" and python_version < "3.13" +transformers @ git+https://github.com/huggingface/transformers.git@b8aee2e918d7ba2d5e9e80162ae26b4806873307 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" typing-extensions==4.11.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.2.1 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/requirements_rocm.txt b/server/requirements_rocm.txt index 7f0efded..9035f6bc 100644 --- a/server/requirements_rocm.txt +++ b/server/requirements_rocm.txt @@ -13,7 +13,7 @@ grpcio-reflection==1.62.2 ; python_version >= "3.9" and python_version < "3.13" grpcio-status==1.62.2 ; python_version >= "3.9" and python_version < "3.13" grpcio==1.63.0 ; python_version >= "3.9" and python_version < "3.13" hf-transfer==0.1.6 ; python_version >= "3.9" and python_version < "3.13" -huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" +huggingface-hub==0.23.0 ; python_version >= "3.9" and python_version < "3.13" idna==3.7 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" numpy==1.26.4 ; python_version >= "3.9" and python_version < "3.13" @@ -40,7 +40,7 @@ sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" setuptools==69.5.1 ; python_version >= "3.9" and python_version < "3.13" tokenizers==0.19.1 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.4 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.40.2 ; python_version >= "3.9" and python_version < "3.13" +transformers @ git+https://github.com/huggingface/transformers.git@b8aee2e918d7ba2d5e9e80162ae26b4806873307 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" typing-extensions==4.11.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.2.1 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/text_generation_server/layers/linear.py b/server/text_generation_server/layers/linear.py index d137a500..8de6ead0 100644 --- a/server/text_generation_server/layers/linear.py +++ b/server/text_generation_server/layers/linear.py @@ -10,9 +10,9 @@ class FastLinear(torch.nn.Module): bias, ) -> None: super().__init__() - self.weight = torch.nn.Parameter(weight) + self.weight = torch.nn.Parameter(weight, requires_grad=False) if bias is not None: - self.bias = torch.nn.Parameter(bias) + self.bias = torch.nn.Parameter(bias, requires_grad=False) else: self.bias = None diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py index 2dc72238..8878ad15 100644 --- a/server/text_generation_server/models/__init__.py +++ b/server/text_generation_server/models/__init__.py @@ -65,6 +65,9 @@ try: from text_generation_server.models.flash_gemma import ( FlashGemma, ) + from text_generation_server.models.pali_gemma import ( + PaliGemma, + ) from text_generation_server.models.flash_santacoder import ( FlashSantacoderSharded, ) @@ -676,6 +679,18 @@ def get_model( ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) + if model_type == "paligemma": + if FLASH_ATTENTION: + return PaliGemma( + model_id, + revision, + quantize=quantize, + speculator=speculator, + dtype=dtype, + trust_remote_code=trust_remote_code, + ) + else: + raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == "llava_next": if FLASH_ATTENTION: diff --git a/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py b/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py index 43b90bdd..ac6fd0e6 100644 --- a/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py @@ -99,8 +99,13 @@ class GemmaConfig(PretrainedConfig): class GemmaFastRMSNorm(FastRMSNorm): @classmethod def load(cls, prefix, weights, eps=1e-6): + dtype = weights.dtype + weights.dtype = torch.float32 weight = weights.get_tensor(f"{prefix}.weight") + 1 - return cls(weight, eps) + weights.dtype = dtype + new = cls(weight, eps) + new.dtype = dtype + return new # perform the multiplication in full precision and downcast after def forward(self, hidden_states, residual=None): @@ -111,7 +116,7 @@ class GemmaFastRMSNorm(FastRMSNorm): variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) hidden_states = hidden_states * self.weight - return hidden_states.to(self.weight.dtype), residual + return hidden_states.to(self.dtype), residual def load_attention(config, prefix, weights): @@ -153,15 +158,11 @@ def _load_gqa(config, prefix: str, weights): class FlashGemmaAttention(torch.nn.Module): - def __init__( - self, - prefix: str, - config, - weights, - ): + def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() self.num_heads = config.num_attention_heads self.head_size = config.head_dim + self.causal = causal self.rotary_emb = PositionRotaryEmbedding.static( config=config, @@ -238,6 +239,7 @@ class FlashGemmaAttention(torch.nn.Module): cu_seqlen_prefill, max_s, self.softmax_scale, + causal=self.causal, ) # Decode else: @@ -295,11 +297,10 @@ class GemmaMLP(nn.Module): class FlashGemmaLayer(nn.Module): - def __init__(self, layer_id, config, weights): + def __init__(self, prefix, config, weights, causal: bool): super().__init__() - prefix = f"model.layers.{layer_id}" self.self_attn = FlashGemmaAttention( - prefix=f"{prefix}.self_attn", config=config, weights=weights + prefix=f"{prefix}.self_attn", config=config, weights=weights, causal=causal ) self.mlp = GemmaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) @@ -351,30 +352,25 @@ class FlashGemmaLayer(nn.Module): class FlashGemmaModel(torch.nn.Module): - def __init__(self, config, weights): + def __init__(self, prefix, config, weights, causal: bool): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() - embed_norm = config.hidden_size**0.5 - self.embed_tokens = TensorParallelEmbedding( - prefix="model.embed_tokens", weights=weights - ) - self.embed_tokens.weight *= embed_norm - self.layers = nn.ModuleList( [ FlashGemmaLayer( - layer_id, - config, - weights, + prefix=f"{prefix}.layers.{layer_id}", + config=config, + weights=weights, + causal=causal, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = GemmaFastRMSNorm.load( - prefix="model.norm", weights=weights, eps=config.rms_norm_eps + prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps ) self.gradient_checkpointing = False @@ -385,7 +381,7 @@ class FlashGemmaModel(torch.nn.Module): def forward( self, - input_ids: torch.Tensor, + inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], @@ -394,7 +390,7 @@ class FlashGemmaModel(torch.nn.Module): input_lengths: torch.Tensor, max_s: int, ) -> torch.Tensor: - hidden_states = self.embed_tokens(input_ids) + hidden_states = inputs_embeds # Get rotary cos and sin for this forward # Avoid to index in each layer @@ -423,13 +419,30 @@ class FlashGemmaModel(torch.nn.Module): class FlashGemmaForCausalLM(torch.nn.Module): - def __init__(self, config, weights): + def __init__(self, prefix, config, weights, causal: bool): super().__init__() - self.model = FlashGemmaModel(config, weights) + embed_norm = config.hidden_size**0.5 + if prefix is None: + prefix = "model" + else: + prefix = f"{prefix}.model" + + self.embed_tokens = TensorParallelEmbedding( + prefix=f"{prefix}.embed_tokens", weights=weights + ) + self.embed_tokens.weight *= embed_norm + + self.model = FlashGemmaModel( + prefix=prefix, config=config, weights=weights, causal=causal + ) self.lm_head = SpeculativeHead.load( - config, - prefix="model.embed_tokens" if config.tie_word_embeddings else "lm_head", + prefix=( + f"{prefix}.embed_tokens" + if config.tie_word_embeddings + else f"{prefix}.lm_head" + ), + config=config, weights=weights, ) @@ -445,8 +458,9 @@ class FlashGemmaForCausalLM(torch.nn.Module): max_s: int, lm_head_indices: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + input_embeds = self.embed_tokens(input_ids) hidden_states = self.model( - input_ids, + input_embeds, position_ids, cu_seqlen_prefill, kv_cache, diff --git a/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py b/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py new file mode 100644 index 00000000..91c709e4 --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.distributed +from torch import nn +from transformers.configuration_utils import PretrainedConfig +from typing import Optional, List, Tuple + +from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear +from text_generation_server.models.custom_modeling.vlm import ( + load_text_model, + load_vision_model, +) + + +class PaliGemmaForConditionalGeneration(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + config.vision_config.quantize = config.quantize + self.vision_tower = load_vision_model( + prefix="vision_tower" if not prefix else f"{prefix}.vision_tower", + config=config.vision_config, + weights=weights, + ) + + self.multi_modal_projector = TensorParallelColumnLinear.load( + config, + prefix="multi_modal_projector.linear", + weights=weights, + bias=True, + ) + + self.vocab_size = config.vocab_size + self.config = config + + text_config = config.text_config + text_config.speculator = config.speculator + text_config.quantize = config.quantize + self.text_model = load_text_model( + prefix="language_model" if not prefix else f"{prefix}.language_model", + config=config.text_config, + weights=weights, + ) + self.pad_token_id = ( + config.pad_token_id if config.pad_token_id is not None else -1 + ) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + prefill_cache_indices: Optional[torch.Tensor] = None, + lm_head_indices: Optional[torch.Tensor] = None, + pixel_values: torch.FloatTensor = None, + # Unused here + pixel_attention_mask: Optional[torch.BoolTensor] = None, + image_sizes: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + inputs_embeds = self.text_model.embed_tokens(input_ids) + # TODO This is odd but apparently pali gemma position ids start at 1. + if cu_seqlen_prefill is not None: + max_s += 1 + position_ids += 1 + + if pixel_values is not None: + pixel_values = pixel_values.to(dtype=inputs_embeds.dtype) + image_outputs = self.vision_tower(pixel_values) + image_features = self.multi_modal_projector(image_outputs.last_hidden_state) + + # mask where image or padding tokens + mask = input_ids == self.config.image_token_index + + # insert image features into input embeddings + inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) + + hidden_states = self.text_model.model( + inputs_embeds=inputs_embeds, + position_ids=position_ids, + cu_seqlen_prefill=cu_seqlen_prefill, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + ) + + if lm_head_indices is not None: + hidden_states = hidden_states[lm_head_indices] + logits, speculative_logits = self.text_model.lm_head(hidden_states) + + return logits, speculative_logits diff --git a/server/text_generation_server/models/custom_modeling/siglip.py b/server/text_generation_server/models/custom_modeling/siglip.py new file mode 100644 index 00000000..f17d6562 --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/siglip.py @@ -0,0 +1,565 @@ +from typing import Optional, Tuple, Union + +import math +import torch +from torch import nn + +from transformers.activations import ACT2FN +from transformers.modeling_attn_mask_utils import ( + _create_4d_causal_attention_mask, + _prepare_4d_attention_mask, +) +from transformers.modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPooling, + ImageClassifierOutput, +) +from transformers import SiglipConfig, SiglipTextConfig, SiglipVisionConfig + +from text_generation_server.layers.tensor_parallel import ( + TensorParallelEmbedding, + TensorParallelColumnLinear, + TensorParallelRowLinear, +) + + +class SiglipVisionEmbeddings(nn.Module): + def __init__(self, prefix, config: SiglipVisionConfig, weights): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + padding="valid", + ) + self.patch_embedding.weight = nn.Parameter( + weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False + ) + self.patch_embedding.bias = nn.Parameter( + weights.get_tensor(f"{prefix}.patch_embedding.bias"), requires_grad=False + ) + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + self.position_embedding = TensorParallelEmbedding( + prefix=f"{prefix}.position_embedding", weights=weights + ) + self.register_buffer( + "position_ids", + torch.arange(self.num_positions, device=weights.device).expand((1, -1)), + persistent=False, + ) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + patch_embeds = self.patch_embedding( + pixel_values + ) # shape = [*, width, grid, grid] + embeddings = patch_embeds.flatten(2).transpose(1, 2) + + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +class SiglipTextEmbeddings(nn.Module): + def __init__(self, config: SiglipTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding( + config.max_position_embeddings, embed_dim + ) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", + torch.arange(config.max_position_embeddings).expand((1, -1)), + persistent=False, + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + seq_length = ( + input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] + ) + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings + + +class SiglipAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, prefix, config, weights): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.head_size = self.head_dim + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.num_heads = self.num_heads // weights.process_group.size() + self.embed_dim = self.embed_dim // weights.process_group.size() + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.k_proj", weights=weights, bias=True + ) + self.v_proj = TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.v_proj", weights=weights, bias=True + ) + self.q_proj = TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.q_proj", weights=weights, bias=True + ) + self.out_proj = TensorParallelRowLinear.load( + config, prefix=f"{prefix}.out_proj", weights=weights, bias=True + ) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return ( + tensor.view(bsz, seq_len, self.num_heads, self.head_dim) + .transpose(1, 2) + .contiguous() + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, _ = hidden_states.size() + query_states = self.q_proj(hidden_states) + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + # scale post matmul + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) * self.scale + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = ( + attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + + attention_mask + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32 + ).to(attn_weights.dtype) + attn_weights = nn.functional.dropout( + attn_weights, p=self.dropout, training=self.training + ) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights + + +class SiglipMLP(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = TensorParallelColumnLinear.load( # config.hidden_size, config.intermediate_size + prefix=f"{prefix}.fc1", config=config, weights=weights, bias=True + ) + self.fc2 = TensorParallelRowLinear.load( # config.intermediate_size, config.hidden_size + prefix=f"{prefix}.fc2", config=config, weights=weights, bias=True + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class SiglipEncoderLayer(nn.Module): + def __init__(self, prefix, config: SiglipConfig, weights): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = SiglipAttention( + prefix=f"{prefix}.self_attn", config=config, weights=weights + ) + self.layer_norm1 = nn.LayerNorm.load( + prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps + ) + self.mlp = SiglipMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) + self.layer_norm2 = nn.LayerNorm.load( + prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(batch, seq_len, embed_dim)`. + attention_mask (`torch.FloatTensor`): + Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + if output_attentions: + return hidden_states, attn_weights + return hidden_states, None + + +class SiglipMultiheadAttentionPoolingHead(nn.Module): + """Multihead Attention Pooling.""" + + def __init__(self, prefix, config: SiglipVisionConfig, weights): + super().__init__() + + self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) + self.attention = torch.nn.MultiheadAttention( + config.hidden_size, config.num_attention_heads, batch_first=True + ) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.mlp = SiglipMLP(prefix, config, weights) + + def forward(self, hidden_state): + batch_size = hidden_state.shape[0] + probe = self.probe.repeat(batch_size, 1, 1) + + hidden_state = self.attention(probe, hidden_state, hidden_state)[0] + + residual = hidden_state + hidden_state = self.layernorm(hidden_state) + hidden_state = residual + self.mlp(hidden_state) + + return hidden_state[:, 0] + + +import warnings + + +def _trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + + +def trunc_normal_tf_( + tensor: torch.Tensor, + mean: float = 0.0, + std: float = 1.0, + a: float = -2.0, + b: float = 2.0, +) -> torch.Tensor: + """Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \\leq \text{mean} \\leq b`. + + NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the + bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 + and the result is subsquently scaled and shifted by the mean and std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + """ + with torch.no_grad(): + _trunc_normal_(tensor, 0, 1.0, a, b) + tensor.mul_(std).add_(mean) + + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == "fan_in": + denom = fan_in + elif mode == "fan_out": + denom = fan_out + elif mode == "fan_avg": + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) + elif distribution == "normal": + with torch.no_grad(): + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + with torch.no_grad(): + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") + + +def default_flax_embed_init(tensor): + variance_scaling_(tensor, mode="fan_in", distribution="normal") + + +from transformers import PreTrainedModel + + +class SiglipPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SiglipConfig + base_model_prefix = "siglip" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, SiglipVisionEmbeddings): + width = ( + self.config.vision_config.hidden_size + if isinstance(self.config, SiglipConfig) + else self.config.hidden_size + ) + nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) + elif isinstance(module, nn.Embedding): + default_flax_embed_init(module.weight) + elif isinstance(module, SiglipAttention): + nn.init.xavier_uniform_(module.q_proj.weight) + nn.init.xavier_uniform_(module.k_proj.weight) + nn.init.xavier_uniform_(module.v_proj.weight) + nn.init.xavier_uniform_(module.out_proj.weight) + nn.init.zeros_(module.q_proj.bias) + nn.init.zeros_(module.k_proj.bias) + nn.init.zeros_(module.v_proj.bias) + nn.init.zeros_(module.out_proj.bias) + elif isinstance(module, SiglipMLP): + nn.init.xavier_uniform_(module.fc1.weight) + nn.init.xavier_uniform_(module.fc2.weight) + nn.init.normal_(module.fc1.bias, std=1e-6) + nn.init.normal_(module.fc2.bias, std=1e-6) + elif isinstance(module, SiglipMultiheadAttentionPoolingHead): + nn.init.xavier_uniform_(module.probe.data) + nn.init.xavier_uniform_(module.attention.in_proj_weight.data) + nn.init.zeros_(module.attention.in_proj_bias.data) + elif isinstance(module, SiglipModel): + logit_scale_init = torch.log(torch.tensor(1.0)) + module.logit_scale.data.fill_(logit_scale_init) + module.logit_bias.data.zero_() + elif isinstance(module, (nn.Linear, nn.Conv2d)): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +class SiglipEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`SiglipEncoderLayer`]. + + Args: + config: SiglipConfig + """ + + def __init__(self, prefix, config: SiglipConfig, weights): + super().__init__() + self.config = config + self.layers = nn.ModuleList( + [ + SiglipEncoderLayer( + prefix=f"{prefix}.layers.{i}", config=config, weights=weights + ) + for i in range(config.num_hidden_layers) + ] + ) + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[torch.Tensor] = None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + """ + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + hidden_states, _ = encoder_layer( + hidden_states, + attention_mask, + output_attentions=output_attentions, + ) + + return hidden_states + + +class SiglipVisionTransformer(nn.Module): + def __init__(self, prefix, config: SiglipVisionConfig, weights): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = SiglipVisionEmbeddings( + prefix=f"{prefix}.embeddings", config=config, weights=weights + ) + self.encoder = SiglipEncoder( + prefix=f"{prefix}.encoder", config=config, weights=weights + ) + self.post_layernorm = nn.LayerNorm.load( + prefix=f"{prefix}.post_layernorm", + weights=weights, + eps=config.layer_norm_eps, + ) + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + ): + r""" + Returns: + + """ + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + + # NOTE: up until this point, the code logits are exactly + # the same as the transformers code. The values evaulate + # slightly differently in our encoder layer. + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + ) + last_hidden_state = encoder_outputs + post_last_hidden_state = self.post_layernorm(last_hidden_state) + + return BaseModelOutputWithPooling( + last_hidden_state=post_last_hidden_state, + # pooler_output=pooled_output, + # hidden_states=encoder_outputs, + ) diff --git a/server/text_generation_server/models/custom_modeling/vlm.py b/server/text_generation_server/models/custom_modeling/vlm.py index 690957d0..b74b43ff 100644 --- a/server/text_generation_server/models/custom_modeling/vlm.py +++ b/server/text_generation_server/models/custom_modeling/vlm.py @@ -11,6 +11,18 @@ def load_text_model(prefix, config, weights, name=None): ) return FlashMistralForCausalLM(prefix, config, weights, name=name) + elif config.model_type == "gemma": + from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( + FlashGemmaForCausalLM, + ) + + return FlashGemmaForCausalLM(prefix, config, weights, causal=False) + elif config.model_type == "paligemma": + from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( + FlashGemmaForCausalLM, + ) + + return FlashGemmaForCausalLM(prefix, config, weights) else: raise RuntimeError(f"Unsupported model type {config.model_type}") @@ -24,5 +36,13 @@ def load_vision_model(prefix, config, weights): return CLIPVisionTransformer( prefix=f"{prefix}.vision_model", config=config, weights=weights ) + if config.model_type == "siglip_vision_model": + from text_generation_server.models.custom_modeling.siglip import ( + SiglipVisionTransformer, + ) + + return SiglipVisionTransformer( + prefix=f"vision_tower.vision_model", config=config, weights=weights + ) else: raise RuntimeError(f"Unsupported model type {config.model_type}") diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index ee72c033..c029d8f3 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -133,6 +133,17 @@ class FlashCausalLMBatch(Batch): device: torch.device, ) -> "FlashCausalLMBatch": batch_tokenized_inputs = cls.batch_tokenized_inputs(pb.requests, tokenizer) + return cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) + + @classmethod + def from_tokenized( + cls, + pb: generate_pb2.Batch, + tokenizer: PreTrainedTokenizerBase, + batch_tokenized_inputs, + dtype: torch.dtype, + device: torch.device, + ) -> "FlashCausalLMBatch": position_ids = [] speculative_ids = [] cu_seqlen_prefill = [0] @@ -207,6 +218,7 @@ class FlashCausalLMBatch(Batch): # Paged attention # Remove one as the first token des not have a past speculative_length = get_speculate() + speculative_length = 0 if speculative_length is None else speculative_length total_tokens = input_length + max_new_tokens - 1 + speculative_length needed_blocks = math.ceil(total_tokens / BLOCK_SIZE) blocks += needed_blocks diff --git a/server/text_generation_server/models/flash_gemma.py b/server/text_generation_server/models/flash_gemma.py index 9c00a056..53bfd064 100644 --- a/server/text_generation_server/models/flash_gemma.py +++ b/server/text_generation_server/models/flash_gemma.py @@ -3,12 +3,11 @@ import torch.distributed from opentelemetry import trace from typing import Optional -from transformers.models.gemma import GemmaTokenizerFast +from transformers import AutoConfig, AutoTokenizer from text_generation_server.models import FlashCausalLM from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( FlashGemmaForCausalLM, - GemmaConfig, ) from text_generation_server.utils import ( initialize_torch_distributed, @@ -36,17 +35,15 @@ class FlashGemma(FlashCausalLM): else: raise NotImplementedError("FlashGemma is only available on GPU") - tokenizer = GemmaTokenizerFast.from_pretrained( + tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, - use_fast=True, - from_slow=False, ) - config = GemmaConfig.from_pretrained( + config = AutoConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize @@ -59,7 +56,9 @@ class FlashGemma(FlashCausalLM): if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id, revision) - model = FlashGemmaForCausalLM(config, weights) + # TODO hardcoded + prefix = "language_model" + model = FlashGemmaForCausalLM(prefix, config, weights, causal=True) torch.distributed.barrier(group=self.process_group) super(FlashGemma, self).__init__( diff --git a/server/text_generation_server/models/pali_gemma.py b/server/text_generation_server/models/pali_gemma.py new file mode 100644 index 00000000..d94b9526 --- /dev/null +++ b/server/text_generation_server/models/pali_gemma.py @@ -0,0 +1,123 @@ +import torch +import torch.distributed +from opentelemetry import trace +from typing import Optional, Tuple +from text_generation_server.models.vlm_causal_lm import ( + VlmCausalLM, + VlmCausalLMBatch, + image_text_replacement, + load_data_uri, + split, +) +from text_generation_server.models.custom_modeling.flash_pali_gemma_modeling import ( + PaliGemmaForConditionalGeneration, +) +from transformers import AutoProcessor, AutoConfig, AutoImageProcessor + +tracer = trace.get_tracer(__name__) + + +class PaliGemmaBatch(VlmCausalLMBatch): + @classmethod + def batch_tokenized_inputs(cls, requests, tokenizer, processor, config): + batch_inputs = [] + image_inputs = [] + max_truncation = 0 + for r in requests: + chunks = split(r.inputs) + full_text = "" + image_id = 0 + for chunk in chunks: + if chunk["type"] == "text": + full_text += "" + chunk["content"] + "\n" + elif chunk["type"] == "image": + image = chunk["content"] + # Should never receive URLs anymore, processing should be done + # On the rust layer. + # This avoid making n queries per TP + # if image.startswith("https://") or image.startswith("http://"): + # image = processor.image_processor.fetch_images(image) + if image.startswith("data:"): + image = load_data_uri(image) + else: + raise RuntimeError( + "Cannot process input image not starting with data:" + ) + # TODO do_convert_RGB should be on by default ? + image = image.convert("RGB") + image_input = processor.image_processor(image, return_tensors="pt") + full_text += image_text_replacement(image_input, config, image_id) + image_inputs.append(image_input) + else: + raise RuntimeError(f"Invalid chunk type {chunk['type']}") + + batch_inputs.append(full_text) + max_truncation = max(max_truncation, r.truncate) + + batch_tokenized_inputs = tokenizer( + batch_inputs, + truncation=True, + max_length=max_truncation, + add_special_tokens=False, + )["input_ids"] + if image_inputs: + image_input = image_inputs[0] + new_image_inputs = { + "pixel_values": torch.cat( + [img["pixel_values"] for img in image_inputs], dim=0 + ), + } + if "pixel_attention_mask" in image_input: + new_image_inputs["pixel_attention_mask"] = torch.cat( + [img["pixel_attention_mask"] for img in image_inputs], dim=0 + ) + if "image_sizes" in image_input: + new_image_inputs["image_sizes"] = torch.cat( + [img["image_sizes"] for img in image_inputs], dim=0 + ) + image_inputs = new_image_inputs + else: + image_inputs = None + return batch_tokenized_inputs, image_inputs + + +class PaliGemma(VlmCausalLM): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + speculator: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + ): + self.processor = AutoProcessor.from_pretrained( + model_id, + revision=revision, + trust_remote_code=trust_remote_code, + ) + + super().__init__( + config_cls=AutoConfig, + model_cls=PaliGemmaForConditionalGeneration, + model_id=model_id, + revision=revision, + quantize=quantize, + speculator=speculator, + dtype=dtype, + trust_remote_code=trust_remote_code, + ) + + @property + def batch_type(self): + return PaliGemmaBatch + + def get_layer_config(self, model) -> Tuple[int, int, int]: + return ( + len(model.text_model.model.layers), + model.text_model.model.num_key_value_heads, + model.text_model.model.head_size, + ) + + def max_past(self) -> Optional[int]: + return getattr(self.model.text_model, "max_past", None) diff --git a/server/text_generation_server/models/vlm_causal_lm.py b/server/text_generation_server/models/vlm_causal_lm.py index 5394feb5..f0db89b2 100644 --- a/server/text_generation_server/models/vlm_causal_lm.py +++ b/server/text_generation_server/models/vlm_causal_lm.py @@ -15,6 +15,7 @@ from text_generation_server.models.flash_mistral import ( BaseFlashMistral, FlashMistralBatch, ) +from text_generation_server.models.flash_causal_lm import FlashCausalLMBatch from text_generation_server.models.cache_manager import ( get_cache_manager, ) @@ -80,6 +81,9 @@ def image_text_replacement(image_input, config, image_id) -> str: logger.info(f"Found {num_features} in image of resolution {height}x{width}") return "" * num_features + + elif config.model_type == "paligemma": + return "" * config.text_config.num_image_tokens else: raise RuntimeError(f"Unknown config {config.model_type} for multimodal") @@ -193,7 +197,10 @@ class VlmCausalLMBatch(FlashMistralBatch): max_truncation = max(max_truncation, r.truncate) batch_tokenized_inputs = tokenizer( - batch_inputs, truncation=True, max_length=max_truncation + batch_inputs, + truncation=True, + max_length=max_truncation, + add_special_tokens=not config.model_type == "paligemma", )["input_ids"] if image_inputs: image_input = image_inputs[0] diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py index 9d0571a6..92126fe6 100644 --- a/server/text_generation_server/server.py +++ b/server/text_generation_server/server.py @@ -14,7 +14,10 @@ from typing import List, Optional from text_generation_server.cache import Cache from text_generation_server.interceptor import ExceptionInterceptor from text_generation_server.models import Model, get_model -from text_generation_server.models.vlm_causal_lm import VlmCausalLMBatch +from text_generation_server.models.pali_gemma import PaliGemmaBatch +from text_generation_server.models.vlm_causal_lm import ( + VlmCausalLMBatch, +) from text_generation_server.pb import generate_pb2_grpc, generate_pb2 from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch @@ -98,6 +101,7 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): if self.model.batch_type in { IdeficsCausalLMBatch, VlmCausalLMBatch, + PaliGemmaBatch, }: # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb_processor( request.batch, @@ -122,6 +126,7 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): if self.model.batch_type in { IdeficsCausalLMBatch, VlmCausalLMBatch, + PaliGemmaBatch, }: # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb_processor( request.batch, diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index 0830656d..ae60fa63 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -116,6 +116,7 @@ if HAS_FLASH_ATTN_V2_CUDA: max_s, softmax_scale, window_size_left=-1, + causal=True, ): if window_size_left <= 0 and window_size_left != -1: raise ValueError("`window_size_left` must be > 0 or -1") @@ -134,7 +135,7 @@ if HAS_FLASH_ATTN_V2_CUDA: 0.0, softmax_scale, False, - True, + causal, window_size_left, 0, False,