From ede048831f7bbb57987922d149855417de7b76be Mon Sep 17 00:00:00 2001 From: shumingma Date: Wed, 23 Nov 2022 08:21:58 -0800 Subject: [PATCH] torchscale released --- .gitignore | 162 +++++++ README.md | 173 ++++++- SUPPORT.md | 50 +- assets/convergence.png | Bin 0 -> 100165 bytes assets/scaling_curve.png | Bin 0 -> 48978 bytes examples/__init__.py | 0 examples/fairseq/README.md | 233 +++++++++ examples/fairseq/__init__.py | 0 examples/fairseq/generate.py | 7 + examples/fairseq/interactive.py | 7 + examples/fairseq/models/__init__.py | 33 ++ examples/fairseq/models/bert.py | 459 ++++++++++++++++++ examples/fairseq/models/language_modeling.py | 357 ++++++++++++++ .../fairseq/models/machine_translation.py | 450 +++++++++++++++++ examples/fairseq/tasks/__init__.py | 32 ++ examples/fairseq/tasks/data/__init__.py | 0 examples/fairseq/tasks/data/basic_loader.py | 78 +++ examples/fairseq/tasks/data/mlm_loader.py | 308 ++++++++++++ examples/fairseq/tasks/data/utils.py | 82 ++++ examples/fairseq/tasks/pretraining.py | 207 ++++++++ examples/fairseq/train.py | 8 + examples/fairseq/utils/__init__.py | 0 examples/fairseq/utils/sparse_clip.py | 75 +++ setup.py | 25 + tests/__init__.py | 0 tests/test_decoder.py | 33 ++ tests/test_encoder.py | 28 ++ tests/test_encoder_decoder.py | 43 ++ torchscale/__init__.py | 0 torchscale/architecture/__init__.py | 0 torchscale/architecture/config.py | 160 ++++++ torchscale/architecture/decoder.py | 447 +++++++++++++++++ torchscale/architecture/encoder.py | 367 ++++++++++++++ torchscale/architecture/encoder_decoder.py | 61 +++ torchscale/architecture/utils.py | 30 ++ torchscale/component/__init__.py | 0 torchscale/component/droppath.py | 16 + torchscale/component/embedding.py | 120 +++++ torchscale/component/feedforward_network.py | 119 +++++ torchscale/component/multihead_attention.py | 117 +++++ torchscale/component/multiway_network.py | 39 ++ .../component/relative_position_bias.py | 79 +++ torchscale/component/xmoe/__init__.py | 0 torchscale/component/xmoe/moe_layer.py | 310 ++++++++++++ torchscale/component/xmoe/routing.py | 459 ++++++++++++++++++ torchscale/model/BEiT3.py | 85 ++++ torchscale/model/__init__.py | 0 47 files changed, 5230 insertions(+), 29 deletions(-) create mode 100644 assets/convergence.png create mode 100644 assets/scaling_curve.png create mode 100644 examples/__init__.py create mode 100644 examples/fairseq/README.md create mode 100644 examples/fairseq/__init__.py create mode 100644 examples/fairseq/generate.py create mode 100644 examples/fairseq/interactive.py create mode 100644 examples/fairseq/models/__init__.py create mode 100644 examples/fairseq/models/bert.py create mode 100644 examples/fairseq/models/language_modeling.py create mode 100644 examples/fairseq/models/machine_translation.py create mode 100644 examples/fairseq/tasks/__init__.py create mode 100644 examples/fairseq/tasks/data/__init__.py create mode 100644 examples/fairseq/tasks/data/basic_loader.py create mode 100644 examples/fairseq/tasks/data/mlm_loader.py create mode 100644 examples/fairseq/tasks/data/utils.py create mode 100644 examples/fairseq/tasks/pretraining.py create mode 100644 examples/fairseq/train.py create mode 100644 examples/fairseq/utils/__init__.py create mode 100644 examples/fairseq/utils/sparse_clip.py create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/test_decoder.py create mode 100644 tests/test_encoder.py create mode 100644 tests/test_encoder_decoder.py create mode 100644 torchscale/__init__.py create mode 100644 torchscale/architecture/__init__.py create mode 100644 torchscale/architecture/config.py create mode 100644 torchscale/architecture/decoder.py create mode 100644 torchscale/architecture/encoder.py create mode 100644 torchscale/architecture/encoder_decoder.py create mode 100644 torchscale/architecture/utils.py create mode 100644 torchscale/component/__init__.py create mode 100644 torchscale/component/droppath.py create mode 100644 torchscale/component/embedding.py create mode 100644 torchscale/component/feedforward_network.py create mode 100644 torchscale/component/multihead_attention.py create mode 100644 torchscale/component/multiway_network.py create mode 100644 torchscale/component/relative_position_bias.py create mode 100644 torchscale/component/xmoe/__init__.py create mode 100644 torchscale/component/xmoe/moe_layer.py create mode 100644 torchscale/component/xmoe/routing.py create mode 100644 torchscale/model/BEiT3.py create mode 100644 torchscale/model/__init__.py diff --git a/.gitignore b/.gitignore index dfcfd56..4eccb03 100644 --- a/.gitignore +++ b/.gitignore @@ -348,3 +348,165 @@ MigrationBackup/ # Ionide (cross platform F# VS Code tools) working folder .ionide/ + + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/README.md b/README.md index 33e8333..9e5c774 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,176 @@ -# OpenScale - Transformers at (any) Scale +# TorchScale - A Library for Transformers at (Any) Scale -Fundamental research to improve modeling generality and capability, as well as training stability and efficiency of scaling Transformers. +

+ MIT License + MIT License +

+ +TorchScale is a PyTorch library that allows researchers and developeres to scale up Transformers efficiently and effectively. +It has the implemetention of fundamental research to improve modeling generality and capability, as well as training stability and efficiency of scaling Transformers. - Stability - [**DeepNet**](https://arxiv.org/abs/2203.00555): scaling Transformers to 1,000 Layers and beyond - Generality - [**Foundation Transformers (Magneto)**](https://arxiv.org/abs/2210.06423) - Efficiency - [**X-MoE**](https://arxiv.org/abs/2204.09179): scalable & finetunable sparse Mixture-of-Experts (MoE) +## News + +- November, 2022: TorchScale 0.1.1 released + +## Installation + +To install: +``` +pip install torchscale +``` + +Alternatively, you can develop it locally: +``` +git clone https://github.com/microsoft/torchscale.git +cd torchscale +pip install -e . +``` + +## Getting Started + +It takes only several lines of code to create a model with the above fundamental research features enabled. Here is how to quickly obtain a BERT-like encoder: + +```python +>>> from torchscale.architecture.config import EncoderConfig +>>> from torchscale.architecture.encoder import Encoder + +>>> config = EncoderConfig(vocab_size=64000) +>>> model = Encoder(config) + +>>> print(model) +``` + +We also support the `Decoder` architecture and the `EncoderDecoder` architecture: + +```python +# Creating a decoder model +>>> from torchscale.architecture.config import DecoderConfig +>>> from torchscale.architecture.decoder import Decoder + +>>> config = DecoderConfig(vocab_size=64000) +>>> decoder = Decoder(config) +>>> print(decoder) + +# Creating a encoder-decoder model +>>> from torchscale.architecture.config import EncoderDecoderConfig +>>> from torchscale.architecture.encoder_decoder import EncoderDecoder + +>>> config = EncoderDecoderConfig(vocab_size=64000) +>>> encdec = EncoderDecoder(config) +>>> print(encdec) +``` + +## Examples + +We have the examples of how to use TorchScale in the following scenarios/tasks: + +- Language + + * [Decoder/GPT](examples/fairseq/README.md#example-gpt-pretraining) + + * [Encoder-Decoder/Neural Machine Translation](examples/fairseq/README.md#example-machine-translation) + + * [Encoder/BERT](examples/fairseq/README.md#example-bert-pretraining) + +- Vision + + * ViT/BEiT [In progress] + +- Speech + +- Multimodal + + * [Multiway Transformers/BEiT-3](torchscale/model/BEiT3.py) [In progress] + +We plan to provide more examples regarding different tasks (e.g. vision pretraining and speech recognition) and various deep learning toolkits (e.g. [DeepSpeed](https://github.com/microsoft/DeepSpeed) and [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)). Any comments or PRs are welcome! + +## Results + +### Stability Evaluation + +

+ +

+ +The training curve is smooth by using TorchScale, while the baseline Transformer cannot converge. + +### Scaling-up Experiments + +

+ +

+ +TorchScale supports arbitrary depths and widths, successfully scaling-up the models without pain. + +## Acknowledgments + +Some implementations in TorchScale are either adapted from or inspired by the [FairSeq](https://github.com/facebookresearch/fairseq) repository and the [UniLM](https://github.com/microsoft/unilm) repository. + +## Citations + +If you find this repository useful, please consider citing our work: + +``` +@article{deepnet, + author = {Hongyu Wang and + Shuming Ma and + Li Dong and + Shaohan Huang and + Dongdong Zhang and + Furu Wei}, + title = {{DeepNet}: Scaling Transformers to 1,000 Layers}, + journal = {CoRR}, + volume = {abs/2203.00555}, + year = {2022}, +} +``` + +``` +@article{magneto, + author = {Hongyu Wang and + Shuming Ma and + Shaohan Huang and + Li Dong and + Wenhui Wang and + Zhiliang Peng and + Yu Wu and + Payal Bajaj and + Saksham Singhal and + Alon Benhaim and + Barun Patra and + Zhun Liu and + Vishrav Chaudhary and + Xia Song and + Furu Wei}, + title = {Foundation Transformers}, + journal = {CoRR}, + volume = {abs/2210.06423}, + year = {2022} +} +``` + +``` +@article{xmoe, + author = {Zewen Chi and + Li Dong and + Shaohan Huang and + Damai Dai and + Shuming Ma and + Barun Patra and + Saksham Singhal and + Payal Bajaj and + Xia Song and + Furu Wei}, + title = {On the Representation Collapse of Sparse Mixture of Experts}, + journal = {CoRR}, + volume = {abs/2204.09179}, + year = {2022} +} +``` ## Contributing @@ -19,7 +184,7 @@ provided by the bot. You will only need to do this once across all repos using o This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +contact [Furu Wei](mailto:fuwei@microsoft.com) and [Shuming Ma](mailto:shumma@microsoft.com) with any additional questions or comments. ## Trademarks @@ -27,4 +192,4 @@ This project may contain trademarks or logos for projects, products, or services trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. -Any use of third-party trademarks or logos are subject to those third-party's policies. +Any use of third-party trademarks or logos are subject to those third-party's policies. \ No newline at end of file diff --git a/SUPPORT.md b/SUPPORT.md index 291d4d4..eaf439a 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -1,25 +1,25 @@ -# TODO: The maintainer of this repo has not yet edited this file - -**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? - -- **No CSS support:** Fill out this template with information about how to file issues and get help. -- **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. -- **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. - -*Then remove this first heading from this SUPPORT.MD file before publishing your repo.* - -# Support - -## How to file issues and get help - -This project uses GitHub Issues to track bugs and feature requests. Please search the existing -issues before filing new issues to avoid duplicates. For new issues, file your bug or -feature request as a new Issue. - -For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE -FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER -CHANNEL. WHERE WILL YOU HELP PEOPLE?**. - -## Microsoft Support Policy - -Support for this **PROJECT or PRODUCT** is limited to the resources listed above. +# TODO: The maintainer of this repo has not yet edited this file + +**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? + +- **No CSS support:** Fill out this template with information about how to file issues and get help. +- **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. +- **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. + +*Then remove this first heading from this SUPPORT.MD file before publishing your repo.* + +# Support + +## How to file issues and get help + +This project uses GitHub Issues to track bugs and feature requests. Please search the existing +issues before filing new issues to avoid duplicates. For new issues, file your bug or +feature request as a new Issue. + +For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE +FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER +CHANNEL. WHERE WILL YOU HELP PEOPLE?**. + +## Microsoft Support Policy + +Support for this **PROJECT or PRODUCT** is limited to the resources listed above. diff --git a/assets/convergence.png b/assets/convergence.png new file mode 100644 index 0000000000000000000000000000000000000000..35d33d51eb604d5a2be50bd5b6e0193ef39f2fe3 GIT binary patch literal 100165 zcmbrmcT`is);QUroZkF5d~nK#WD!TW=*%KC0} zbca7cFZw1IoFyGydz{K`d8D_=>@Z_ctjBiKV%xX*PL{AsX2H)M4IA5dc_okd_)cYe zQTwM%r4482=lAZptsuvz7!~Cl*EiC?^J`1G?JG&DkbaM2m8n*f); z5dY59aAtgQA01IMMO4%EH0U+>(VMETJG=X5zR3|SdfK06)#tFFaq#orS5XccA01uL zN>~tNAMi5+qlfMO)g!vvZ}g zzEZxe9xrxKBg0xY(^%$}^4{}-&l>KLzkE5Y@weN?cwKtRm8R2a3$;5YjEszR&+;^r zWvIP*$;|jp{SwEwtHWNV>Io9+ii(PLxP|l>jZPg+X;|Xlu~s1S81TlP{ff*S!rkiE zhSRk!eQTVZFLmhEc*^bjgIs`&6E?)qWcu7(2T?X!QQ;QP&SFA=UO5 zP-z|(W4ffHl_+1uH0^7lM`%64rWBn1D0-wtuYT+&r;wa(9s`^?j@az;^e~rE+OOxw zFT%MaXL2j&I~6&M*=8{Y&d#|dqXB-U_e=@soT7(+9;m;$EPu)||Is_|@q=8RedcxV zP=*EfLN%o3#Dq+$u3L%64Qr^VJU1w^9dl;>`$)Kukca+r*iq@VAvgPhB0E=&x$cA@ z^8EKtug({D+&PtP(G)SijMGjt`tU0`a9e+CV=a%=QJ&#bQj0RwmqQsD5g+Z@Pfc|D za^Ji+>g+8e50poAYY&H@-jSEP_7!tBbSwyc!g4AzPRMZI`C^?sI1X|0rgy&QsK1x- z!^H;Q*@C9vj(ESZP!4xsu9>?1Jl!x|qoXe~ZJNLS=twhuk17Zzllz@(w>+zVeUYjd zdvePC_t?{{q{Di;q18Ool`71rO-hm^uLRTbwQNCa$p3mFuBQcozQn$tZH;t$wb(Orv7PH-e06yl-N3<#*% z5VXR;RI5>Y58@UCKsEZ{Nfzs8tGXj902$`hcMSG5$TA-&RV zRuu5(-X?>P{~yPssFh+be7#shw1Qp2@r>gphxP?E*40l3Y^|5yphyg#2y*%Tb1Yuk zuLN2d;w~dMPcU+pzP)Nw5h!0GGA1!hAG8zcFEP_#U8whno-P?S+$Xy_SG%)KZf-t! zXNJQe*|Pj@vW&lxTye!*1a%7@5chG=sEMM^=|q0lj6~R(-V0wz!RM*Ic!4xB_|8>u zoWJDQ25Wm+AJWXv)NsYpKOk_=+qj3CeSUj`@-{~g(bX(1yjYiL*{f$rG{K#H7)S0N zt@J&vrGErNJ?OVSoi$AM*_bOey?kzKsYS$hbFpdIJR0*ECCFaL@o3Iwb;Qrb>p&!- zB;y-9&0UD6RxV&k58A6&2h>W3Jrhg<{#bF`O?p7)ulVyl!I*#OtrQyixMd z6E4K`9NY$Nb&T$8d-XmZT2~tOo(T8-)q0Im;W1;TnXUF(c)+9Ac-S0~XHsnh>p35J zGnJ5AK^DJiTIF59?fsP=#0Rh9DP@VN3J21`5y>?w`Wd}>1;dS@s#{(fH4Z{7Q?UYEabLGKqdrJ#}@Ua>CuUj6XZmrvA% zB-IYy>ZvQ8c6p3@7yL~K?(oJ1in!57xj|^8l zJalUf4F$yt6;p{eUZ}NJ^tqyjsh%Pg-)hr_yj;5@^pw8-_>E3Q$*WFs9!2g0Gj(+B z*TJTf4fYAl?u!y3RAWW>Hjrxa(8{IHF;vYjBIkEzZjH!nrDm2!($K~=7z17se|9g@ zG$JXdU&NSLNLu&FLhMCKM?`A`o9Lc+loWwJi<|n`r^rqc3e`P z$V$(-V7Kk4?4YnwCgpHfZpbNn%E!mYG16HE#rBHVLs#&MFW3b(H1+d{#m|G7FnSWG zIN_8BHVF6q(8Uh79z@5eucdY#@{lKy9Go=YPf#MrXltEmlZStI+%s+ma8U}E{25UL z_n}+pf+)A-LRE(ZYw*FBGJn(dMs&=IDCzqk#i`}Bi0Moh8Qlb8@G^*EGh9 za}~6Kz|g~VHfc=|rjvIzvn0LdiQy6hpPWv)R24{!K6LY#>2aETlT_QKAytl=r$+0V z_$d0#muGEnuhq`P89Lu5^qd7PX{oHl{Qipq2vguGx}!Dzy3T=HGqe~pSKMntcsc4b zl>~(m;o|LnrWc;czN@p@2ajK{ZG3sMVsoh_#H}C`^yv;tXEXQ~)Tj57=2wTQ>u4RR zg{s0Y*C!5fAp1SPy;7=Jl<{9Do-X_qdE@(uhI?1MOpMLCSh6&ebN$zbb-5(nwQh2o zme&tcNy%-wAJ9&YvkD`7j9$Dxig$z7OZpnv$lIYX@Uln5G&~`VU&J`~*94DW*7&v= z!tPbkpSn){h5caJL?tKZ<<+dK>g(&zvm(SeS-#M7mez&rcleTcW592rno{C4XvStc z1T7!m=w&KJP1<+)z@0~pt(kWITPwZx{iF;+^uedu8097zFAV@gp5Lxl_jl+azO7le zdN=>)Veq|I6@SX~ab_L$tx7IAToTT6tUdK>qk(zSt9`ewjd-Uy*Zi@*CSmrTu6Jg7 zwy>>dW1%*0duxMW*OghZP_tpg*EN+KIG@?OwY{-0e|3Fz&?OHb92B&TS5_5pquXS6 zs-)U7S!A|1j$E(+hJ9E2o2Un#gV|9Y7x6M)# zSN1w|q=r_wPak`^ZT|Vql>yH|C*$|FZOzmtVVh_E(oIqd03AvDX=6| zD9hd@3>rEI+;@%a9jaWYAqeD_)iWa54TOx!vwCa-oRR1s{$Nz8OSezR9NZt7o(Sq6~fN6?-x)3s zE+l!4bZ~hMjX7I<`}9iq6F(f@sVTdauEgVy)Rg&?&TOitrZz0ra6aOCJ)=WO8(3{A z{;T~>!{}&6Bp7>Tr6!tdwa3tfUqqyh(clG#NDtVVasoFecqFIOV~nobSX>)&4e>i9 zh%Ka>ZsG}aFSUzoZoZgI92y$>Ixy+nTQ6AY?B^&bR@6jozr{)k+}Rj46Dc3uT&vy5 z0i8sHo)pP!%wFaAu8%F2IjVJ59M25J4Xnn2rwU<8^&SC*8*lmh`}^nH@%~Q^b5R+1 znO_nsKEdA*mp&<+VD_omTvFrsp@(VP*&5v`Z#l_aNTfCiyQH*(?h{yLH``IB`j=u4 zMGAf@c;AeF6|nusrh09}FW25-wy(f<{Q1S9KY`o7l8vuh!kp0MVjff^hpWC-+;HB0 zo`+h2e_Bk9U~$GH3W9`}6;wPy%y-$=OR5o{(-_IKkSTunr08vz+N~1cev%wv`igN| zMkSJFwEgZ4`4n};Z^1(z!>H;xD&)E`{z4?!A&&6gmztfm48VK&hIwST zak993Dc0fRv!l+kenztaeLn2A{S&qnQxN4cqyqiVVii;=x#LPOTJmEiRDkDWMiAGt zK|)c7%QLR@bjH-0e|SoA=(R+e`TY5&ZeNL zBlE05p$~IP^lS-$@cTJRFgxWv7a8E)++%D|R5cSK;rV8Sto)y zWYN`@6ixQ-KDH9xN^a)2vHEEd zBjl7k2=8hsyi&DJFZp9dH){m66zO=t<<2*@7<59U(D+JUnKn6LkfpJ8W5M{wM)upg&4n8U!mfJ1aS0K>fI&i$}uS zJfXcv<+Lyk*?yX_h*vY7i^AAm_xuskKD(@cie1RyTjruZCMIXV2By%OaLo&IhaHK6 zZ6lj9xB822&Y-NM(`O|dDKaxtu@UM?g=Bt$hC#tS#mm&f#(B>`C&k+0RSm)<1&kiZ{p02>;%6PX(mTyV-Rg^e! z4ows{;iCl~lfGtEfmrfUoXa!`e}+JY*K)oq&}v?n)qhrKRu^nmBw8t3qB`=eImUR; z>Jrhs9HqjCM@bttW?fdsO|i2k~Px2lt8ta zU7wXfRn;&Hm&y6BFW+zdw6l^IINiWu>Q0#G_ek^{u*)??M-QX_T$oq!94^5G9=cmG zShN@>n~>9ON=AxfQj`(k^dZPbz*CV9oh?$d}; zk_jLgj7*8tKEDt6OrjSuGhm;EY>2dsXVU6PC-s_IJr|LUa)??`$PaVGC@|L&!#?AS z(4~JQYheHs!jP8nkY5I^w5$tzP`mll>LNmKBuROh}OyI2_Z+QAfYBTKj;n zrMAfC9@k1acC9iG&mWNcBO+>-$+Fx>4})iGukkPOLygfzfwThmkrnr5oQA#WdRQ3X z$Z^fZUy)hgmiQHRsjjG#;G)M&yKnM^k7}hv$BV1%-tg%VxB-#I1pjv})L=S(od31q z(Or@%=+HT=0-ta)UNl4W@h)QqVQ(!Jj}Eroh>Sg}%zF&Z&#$e|mCSFsR!~$VcAat= zJmy%fH{AHH)x^lD*Dq=P&*zlcTGkl`IJ#mYLTYFgR^{NX=h2_8tqZ0G==I{-eE^{lVna)C>d@ zOt+U_o28pW9S#ti+`2aG*AgDH%jns=e0~22Vp^6z9@i|J1AroVdoeQb2Oz9WZdgy% z0ss$(!L2S@pSjIPNvLk|6(ePwgu*G62;2m1;vJOHQ_06%?|p0g$A zUn#r7#hE|g2eWF#)Z08iIel$#vU+I66Ojl0`-eE)b)Q(~&-zEzZ0j}MpX z%kKB`ZWS`FKL2?7GX(1gLD&gMY#NzI|z5D3#sXV4ygP!x8y11>1PUvl>Db{@SIHb z1a#VwPr%vKzSYS61ITeBFoba&7)M%@U7fmmhceB$&vSWjP_H;TG6 zxT~dYR+={^*}QJd46lG3n+gy+3a|n0V9iv7<*Yvep8dNPfyZN+4%0I98D&y5 zR@mjOc^AdeLz>y!(W(~80T^}I-=AQ97p*f%#5e05v8szeoz*XnR%a>tKDnRU8#@E@ z_;^&w7I0f`DG@n`ZRz6*g2;Hx7s(mHVc(gY++Zq5%v@6hoc#Su9|`KcP@_U|Y*& zBBQSsF^Mqa1h1<#<*osLa-*jJuE56Y&xgZOpBYgg;dYg^@P`^^2P!$m^pdT;IaWae>$ z;@6CJJqNa+Tz3ORt|qR5qWAc!)d7cG(a*Ubf=k`nqqMo}pynX2oVL0F*T&dfwpq?bkWbO;Ispt?JVa}I!j^n&T2L#=e!gnpCk z@%VZ^=I+^5;_JBK%$&=50fpaZK&>Poqdg}V3V#7ID0MkZ`KUB)3F2>$BVtNMdNQ3| zUdbw?iFZ_bxp-X%dKQ>lk^0g-;4S)?kT>}0qDmr9C>Nt++8 zth_VTqtlYxmN{qazd~_FDKNjq-y@tdP_~gSaSX#U8hUb-;8RwzNI5kiW#kJ)YGp8| z9LR!9YL@g|{FQ;D-dd?Gg{7`b1$~ikL%P_pAE7C|5-U|Sau?#Z5>~C%-|wuJ)M`&P4^1Ct zV+7ZXue9R1qq>Y%b?t!EoL=Q`!)MNxIH78hrB05;)gZ69>*M1i{7G{B+4-Sv*@Oz4 zQK{LDr=gY-!yUec`bSVB^`XoFEzQR;>VXQ-^TDQhFAtQSxRoq{(cE(*o3qvsXN|J( ztX#dBJ3n_!%S!Lo0o(=kz^?7F0a42o9&=JoPev+pBnzJ8=p9BzBrTF#~p7&xAnaZ?WPo9Hl#{AQu{2k3wtl;Zk=iqbET$7qjntN^3ky zrTN|alCLu1h1wjvrcWtr1e2?vy=-bP*Dio1vXtBo6yx-0dM&7*IzE2S>$G04Aa84H zD+*Kzq>_@7<_`D9MR|p!vz&w~7TGSNHNH@>?)zJteQ7H#-%h%%P}FuPjDP&Ab(PVV zIhA8oovy_QuN#os9r-4mNObWHbo3+2K(0yM zx;6qa9*nK{efr|DgD@DZ{DU%YYOZl*#lkIE_{pnx2AwN&)|aQ#@r^3cPgIO$d*+HK%vxvfrwsZgsux>XV%O z{C4TlRp~TP-Vk)9c4xVi;71xI|AW&D%`=V!WtK4Z?ePO{cn~U-58uu3!3jNM(%+eCCEUnhgm=4Sn(||-{<|^BshQ3 zpi9taC`DkGFAsvN2N_!LoFCpTjnN!wQ!-nOf_SX+U!JzkA5eEw|G!csIDkFRdK@Ta%Ep1`G!u0jpXjh9@2QTk??G2jy5Z z@~8{UIE3aRb3dCbdWkmf-4#Jp@m*)aBT8vQrn6)KkG=1dX%w^EO^=DWbTDGKJH(R! zom+NobfUD%8>cfG>i(8w#34Xr#EQqmal{=q*x$?#Ec>Wv0v4VY{CC~@Ln}#;?J%fm z6MJlKA$PN_phM6`!mjp*@H@lLr;kk*zN@gHjU!Z)k1Z!tMe<*pP&WBIp>GO~>)eZU z4&(W+&!eK%7t|{M?<278wVuz&XFOxhM!?hd?}|D;odzS&%DnxF0qX4kSZqdqQggBJ zi4H&SzYJM41j4a8n|S-^FKn_j6GYtrV`$F&-NDO6drxEr?}aR3NL1C$U1&qMYfy!= z3Ew#mYM{Pm{??Y?^%$r*{?Mu&Ogb}8uYr|zzvU5N=>m@pg_I~&1$M3eY_=c&u6_)z z*7sZPglGM43;L}1B6B%7?%>%YVRL?tLU58VAj4^!`?xy<_k zMY&KiIhY&i467CUXV)hX(&^ku+xe)(1rwo*YN-2MU9s2jK591g?7r*&xVNev_Y9v(2q+RX+ z5v{zsivlVCTyPJ#Kl_e)XZGJKPRc>sfy4{V4lzYU^M&cXyS{7I3d#f-uDH7y^ONRt zojUnJVf+EKCeQ4Fyt?}GG1MwH^>3>X(*n(al`vLSTYeI~SQrF9dWd#89Z?2aG6l$i zAaZ^f5ACy0N5DsHOc+Lb6zAQ=|67DbNAphF-asP=@Yjve?rk!Cpn^r~*N)r!$$SpMUKz^m;3(4!>( z4)n}bDvBmd0aS{rp|-YmzEZ28L6LHrQe@BabXUkxtFb0<)q=v=us2lJ;2s7VAj#D}3jniSrooE>lpwgmHW@d>cyBg=;q-Y=KG<3gG4ykPYwmtM5yDi?fHx`u+<~*}C)?{=#f4g8BO(dU9*4 zN7vpKDJm|lo2Y+PS0z;TF*5Tp{>}L7OBhw=kmI}4PRIjRk_Z>kKM45UEw#3rzg`A_ z{paftUSG$ZKpJ?^A{25oc_n`UjBo{H7QMJgl*hSO&!mXWu^%eP^Odl4Sm=amYz7mq zsz7w101g(&g!ZTqd8u8y{1;K;;?{JmKel&f_v_^cc*Jly>qc$J>g96cR;9 z#efy)wlo3ub+I2T$uTT<0qmtc1&^5#@_?X-Uj{_SPeU3INUxN(vJ~Ab<+Eft`R&`@ zv}{alEY7qxzN*GKQm?oU9mJu8ufU>W}17T;s7sd#LpR0n%B-<2G|VU;>SI-Bomj}Exqw$0h`8CHk2Nwe;} zVHj)wVHTl)zw=%uwr`Zh&$PRg75RSP-p(^~!*j1(Ptom2J=tpgTF91 zE2UkyQ<;j?ZjzXBaI_Z0Zp_mc<5Om5XU)Wl zH#Z+b*_|*B|7Lw~I_1_=`|*#@ojvL-B5q`wptW-J3KZ1XMS56Au6<#)HqHt^|6=xZ z;M4TmuQKOdL%w@StD5_2ryR|fJ+Ue>zPdjh)B!LfJK@^rv@;WG-sbU4@x@NIIeUgx zwIxcqKl{<>KLF=EXjsR`d6`gATpJFND1o;QX1DkCk3O|(iL(RTC`~@FI^|FQ$zLQM zNLt1h%LYna+HCmWTLkhzHF#@>KChP)7SEoG%s67|cC3y@2OxL*%J#rF%!bmGSm*LT z9A4l2)fUo0b}k#+2e>~cVYvao9*1ZLlHXz881N^dEOVxdxCn_P>?|m_bf2_I1cEb> zo+*cqKY6M|k{Tqp)&lJcKRLP-)lVQ|FLJGw6;1d65l=oucfw=~Nu(ku>%!s-Hk`JZ zxK@6i5fZqBC>wfucZ1L=CV+3-DJiWe%Jm%(qt*iDMCH$(?E)NBKLy}iJxLM;F**sy zYA9$!#x%JJYn?X`(B~K$0KzaCfrsOoZ7Iu1mEO+eM=fxSU9uX;6Jxa=zf zU`ag&Kt;#{$Qk$hk=0iUr#&G4_3}AT`A~smAvJ%#pDG)3)GJ0)uQ%;Dw@AaooC;+Wh)e;=qc8?Ziou{6xYrv&j4VhuU8SWZdYO}i70 z016?RfQB{JMjiViHnaS#`l7(SFrFa%ZWHMUSXu;jPr> z9t>A_7-mqJU>H;v@9?b{cRG~nj^7DrlOYql=wUstL>C&&=}`mRNHLZDT3ox>xf`Sn zond#*!PEcY9xDj2QMt!oMKQ%@`c$c>@M0OX7^yVgrkf31eMx{rD`WaR^aSW9iXH+= z#Jgcix8MgdvazIge7c-!?7pns1Od$oZo!j+$xn_o02xsrC+#LPae$g~^scAZYk9ij zQ=@paFl_&Q2ZHpy@s+DG-%(~<=Q0*9^dD3PAW!<_e&h|ixN#b=bT$=Uy$K_y*6H0~Hvxd<82T?ja{!z+ z?dE%MY!O(P93tyvl)3zZZKRcmgDrU z`ShfLb#p%Z-QTOTxyg;Rvp*YlaJM}g5;j1~!$_#j*i;qYV}CKY9+Z_fQ!~zuErB$N zfHgS!-%SZrk@lltFy!(--1`i4R$p!sjsqDS5{4W({10b3fd5QmR0{Nvp;n7#XF|s} z|3PpnKp2<@_p~RUl7e;k*#KIXv$wJ{=!JSLrwE?jQe!9)u7tz_a`|tD_MQ`@#+Z0Z z#ebbQ07*a*nN05H<<@>j3VFq(a5lRr%I79s=T|QX=LSJI9}|z(l}90X`NIBzrD8DA zaD7J9dJq|o3#-cEz0Ly77oC_`J`Sc!pb)j0N@hjr z$~XMY8}ioyI*&tXSv8!4ADr5o1wY_psB-q74bOw-2%k4h1&<-l#aqK_Eg7NoCH*}o zEqd4k@?hF$RCwa(0^hOLr3j_qx}|@hx92o?9EtkWTZ~tcgq&r-Gp!r}+2_B80Y!vE z2s+ZEjQIZL1Vk?AVPFA0xur@ZVpUTF&%&nm|4r-dBZHVwVRzDR^Jd=DmB-*G3$CFP zIZp4VjT$En*!z^bR~BH=2XwXN{~b~2fgF_Nn~h_}MN8ulNUeVG0qq7oD5=T2OH$kv z#4ct1&QM^41(5|SOJqE0Mm!D~vBvO8mC(`Wx*fd3g9P9Mv>Tz^1S!DTlAEeh@f5YI zjeO4u@`&R2Nz3efLGDC6?j^{35O6gPduaaT7{Nf%zY@!qFkp}15BRqpSwNm=&`L@p zi?l37WMn_V{>`?bp5f3MH{)x9$qIyH08uMHI=O3X&e1Rv@Gis`2R>#m-WE2P;)52(4(eJRqOz-iR zXzOqH&O+idw${}er-7D?dR+}bgA@ma{j00*-d_@ADrlHtRbpwXIP!*o{dX#b@D!Ma z#DLdBAIFq2;n~Q?0(jtbdWfvi6`L2!CDV`fgOwl7f2tad5m} z9T{H|j7P$oSvBb7g8tg=Hn5xiR5lR=pk~U|3J+)*Jd)V2TgEp=Sx7R{qBv z;?b~3!llIHiTE>Y;Q4ZlR`Y>S_q2d-<6e0S;#0Na`2#?NhCZYtUH}R(<(up}DIo5u zKKq-vn-xRPHVx;QmYYRg#+HE*KgG+-#EMQIhYTTdY!gS zgMH@))Su6c$Q+ab`@uWp!VA+rh`P?NVsu(GY=Qj_^#Lu9il(UYvEM;i-r`dX*wx|D z$q7NdzBhtS9KMrbm2Dyl1=N4zWC!g&gQwc%^@>7x|^SA+m;sC zHE;#$lEpd!W!YZD@lp+DTo^By#mOj5Xlyus(1Ko5j}Y>WC|2}fDAV6SE;k3B+88fx zmm2IE;Ux$%>gc8Ezq}L_yTsXB$Y}Yp=U#$}a#sF0+vvk)u)8!mQ40!%<;ro#3fy%z z({GQAe?39xUCaMz7id7H5_|{K@$8DTtMPb(EP8l$zNX9*ZcNL^>Zb`-YLx^E94OYyO0=L=S)%QfDahjFTI z|Ct65SJBXtiJ}NQxxM`v=2LftW-i)Bnj_Y7SYVV9$$t#5rF<5|pi3;fEEydhY^Vm! z5z2r1=eqVHM_Tx0b>UxF_tA-2yZ>K=D%L907zxzbk9;1?2Ny5_gq3agf4MZ@Hz$3e z0Rr-9Ahs&X%Q+yI9JUk;852YWDsT_9AeQXhap#Yf{zyUCkdi9jnCNVXBXX*|7R9o;3~zWM6~K{1WL>2*g=eN0!JcUyI#M`Q&Ic> zTU$bv`VNYP@xG%n0)c-71=KDDHYa{%qjf}X1z6T$&Y)Q9TqTMeM9%!(5Z9LKyi5Ld zp8)eJbWtjI+&U?PbQ{P{w63StGwL9taSyqJ?mS6{tWS$dJql;}2_LNa_rOD|UL)t- zvvFtsO+Bb?*teR9rAQc~;_=FGHgs2Tc~~5gc61Wlj=uv8&FnK6{rd_0P(!r^Z0hG@ zHhz zGBe7gStMP{E~N-ib+l1JAs?-V)^KZO_WBu-;dncvJ(V9&@8PJ7O_~RFp261JKfh*| zdm}}R9(6tSV|{boLE5cBc&nROKv_{pRxQCgHPGp^l&9PXz;$7i)A42U{y;P#CNOm6 zI!gXhy4L&x;(owo+KqHkCU^GWfkcP;eh+ImP*AhIRECaV0X|7~Yf>ce{uGq=)ZM_f z8B8g+O7tmYWRzr@Dw>Bf#VNN;{R91-Ew*wiEh4qH-8k(5>jbBv>8h%mkfdt>q_RdZ z3Z$CXXp0wbNs3%2JYOR}FCM+yV8w5uFqVJpPdVIJ^y&$^_7nf0w3Qe(UdF#d()|Z^ zb#Ab-3uuBLq)=9RP4mEkJe^)6w{TTeRhoKQhFZFM$`fT$_SQ?VK{cSkhe;#}P{{4q zk80Dx-rJY^kSSW97`2*ZbU^V`=hyevVj$G2=(=&^29PMdozV$h&zrfERLtA?MG9S> z_ht>DW!vFWw=1LzS{CKhaw=$Gj|W1jgfFFlbE;ma5~%%!Sp>W&AgDZ8x(AEwCN$Cl zm?w)6D#sVfh14j&Wzti9j)zR_ns*uuhBN|Yvd^F+kt;tsZb6z3;ff#UfO5>MhEX>( zG;{z62!`#tU0M@9qI&F)^Haz#Q=;yv1!^8Qp|?FmelrFEM(h*N8!H=|86cgc0Cf&- z&*AHGBGt>$;1tQruSRxb^`X#-6AGA1v%M?&g*aq8$&f0Na&{r3_+AZpiJfs5c+q8p zE{RGcEj&5p+zuoiQ}FEiF>q+03 zljwOc>D>g+`^F)le?ce-1}3TR@&{Tn?RM?r~tI4 z2N2cGWF|O2Fz<*bSiJ zc568%xBNRNM1evsC>5v2V!FkQC0oH#Z3!l$zAgj2BaZgn@vjm_+}s1Pr+GPIiGs zQ^hac-e*V8F9X5iG!Q_RLK>ZyJugvONkEAvP#U8lsRHbwJ0 zJfvt-KPd?0jbE})3C7yF4>-ax83$78&0)gvxzWL}I=Z07^p`hwQT(MedQ>F0^iXUi ztIY5BD`~!q4IF^Faw&Eh9Z7OwX?AmS(|u>c6)C;?rZN0f7O-nq&Jn$Y05mx643!w5 zlOAd!)ktuNBA@URpVQfi=$)+|x1MaVzxG^v^}t=p~4 zQ*d-ucTy5{$Fo8pd7A-_EMDLP?o);qBi^R@1*nl|L{d{@k6vjy za*Fa)$Y=EG*>A?$*163(;4rS+mm5kD4VCcS27}0}-L7zmcBUzz{pO0H^Ft0=CWf`! z>x3DX6+q`GK_?|-39daj zy6f!dVF{wd>Q`_irbYO%GRvXkq~{UMXx#JTAt;sz-^>QI zUtKK91W3?PK%n5Rxelfp3zcvntyI_190sDK=Z@3ld!Jw58P8|;i`_P~YIiO@%N$w{ z|D2TbA)r#b)l%mVT8J#57W*Saf&*W+krMVACOWp?_RPHzL;oLDQSTCuXx~&6%C$mp zIKFMqhh35QP0CpcGR zLt$5X{YRQJu>?|Iay+D|3#WZO1TPhueGbkFgRx0W{siP-7dX8%mU-?SAo`ql);|VZ zV5WD1e*-%8S+q2f%6D&LO4u;>f(UnN$)A|&N4BmSSfU({buw0{ZH}O!E~>k}pXh+r zw-tEWIB|bXWKS_m|EYN8>*=QrCzaUEIaA2>XZKk9Alx%T7EjR#%$Xh&1c`0 z8LrmpIqaePIyWZ*$e9fRIof?x>2v|9Ee7S_zx`*53ceCxgU<^bf^bOoU%fv?iADM9 z!+HRlo&u_Zk|&J)J$bl&z8&Sj^?URv^~ipPlOb(Y?=7Na=t$R6qhSD<~l zB+&E8mPfqy_i?9@6ralnx@Fn5R%UwJgjVx+wl^x}Dhf7iyB(8uHd)EfGY-!%g&mjl zd>X#|({50pK~PTY$m&}Cw)uzc?SWB0x{;~WY@OjLGs6a7a8!qhb};dA;0i_8SwhXJ zwve9XZR?Rjb_M8ob#>CV{alQolwMa5d}1^o)aAS&ZSwr)LNG8Sk;9b2Mr%R&o=bkkUnu!zEPmBBR%;Gi7gm^n(@m zd7r{$5ZRdtZqtWyJ{hwm?xSXX^Batn9eIOVlRC%ElD$*b&nYEv?S%X-bn?zT{af^4 z8$!-PYkTWGD0k>?`)l{^Soduet~ujdtt0pd=X8=xyOM>{K+XR|pL-=Z#@ML-`jn(Q z?!w(SU$;G0pbZ*Wo|-wGqhXqv;8N3{bNDv^135!P)n@z&;>q$P86b8Zs_DVfzAgZq zlAP5z#hecx$(4QvouFXt0S6*Zy-Y13UdVM?o6Arh;inV|;U4o7dTxoLIy=MvI>Gq0AjV{RvM3wF2muX{XfaS~$xFi)uhIOC5Yq zfL{JZw9>}iHi1r~R zXJ%rGDaYEo`Ka&2j&~~+@HStRKX5TgZ~!&rXnDjEdf19ETN3sDwh!&0I$y`i~DN>Q9!YDj{r(jK781f496I==QOHjf{3_zXBRLB}%t zyZVM=i^C`nkDRm;Bg8cZQK9JHleh_vDfB?%!OT(pwu^?8Jl#B#b)fbih6mcdiFfSN z2|b!nqhDZfF8#t7Lj%VSep-zwdw{F>VDmwNVj>4;WG&#A2WTQDZOm{gVcf;TcqISZZoT_%pACqR#iR-dHF z)!Qy!bs7+p2Bx=5z@-Je_*R508@5KAqqx6|lIV3;Pk4O_pi8~WH06z)M~}T6O8fe0 zPxWp)jS<&SiN3}S7rg!Re2D`>TGw6Vq1^t+()Pb`6T5<#SWIyinj3k6_h4r|x)nmf zhLuGx#x#-_G9HV<0PE2G1c&o2(FyYiPql2~I7erc!R;_H7xD>pQ!*kz&H{npq>=VP zVm)?p81Nk!r*DN&9#~}CRkG1zfF6YZip|UI?bYkBI4x~@F?PNTUYZyMU(!fV0cbM| zBvMAuDRaV5Zq<_NF!;iSTvixoej*w{2+tsOn=1P?*f_iwDHOjqFmAS2?iLzhkQscB zfkuO(3@3_yKu4XSuV#S2GI{RNe(rW~m>|RZrbf*2liXXH{IB3xCWpdy!3MQBhu=kf zbti(J;Kj5vxhhL~4kr{Hz5|W%OtJW3+ zLrFW!ChGEGE`8MyFurwiKH78K+&SmZhpbHh4uuN{FgMbRi4`0Oya!#}eQ1>Eek?BHyZisIHX$_5)uLlF2@m#Z*Ao?X@}|`Q%pW_+X8?KK6LG-_S@Gcp#Q*7G zR#K@5G5rsjg1~xWV%i($VKkP%Js&VAU&5Td@ZHur{I7gDM-PuZuh^PcjKA*UbD05~ zlFuKoXsfr|IGs2UDHUh=DA~mf13JfiCG4*5{J4+r%F~n36rIyXXR;Vr=PlNzf}YUX zo<4xf_w?vjR4qrC-X5-ZD^hWP_2jPpv6u9=mu8)kxh6EBtSu;hY;aFcE=vi9WZ*rD zK3?~phlI%+8Ox3@E>dn`)^BCSf#nd){o--4mC`~rEs>!0gD}jFC_D_fl5C`u;zhg0 zXojtY7E_c_ygd({2S!|8v)2O7&0_ad-C#eB^uyZkMe0>PFi_$0%b30e>yUr-YJj&& zjy3SAq6kY8`^!DJHb;a@mp8JtFl`$eK+wKCK`X!H`18Q{r5zD=aNIfS>G+I^b9+#n zVbgPCSCLwGVKA-Hex6J{Sl148ijQVUwM}6 zg<0<30}FMxURG;(!R|1WXa!7O5qbM0cq$}NG#~y^Qrhj$lg;Y^Lq)c;&1Jk=Z@YNB zxaup7>Sk2x5~mwQhjWD98~>no18#(7>JG|=#Y z2Nt$=G9{hZ()jh7W4gfQqJVspJ9dOrdPAHRLZH&tOi$cXH7ALfV;Rsg;o?4tS5JK* z?z{RxNbeoq6c$twe1JBnOlVN{op%P-ZGpB`tQG{s)-LmoqwzIG$m8DySsG~^pxiBB zB9FW^euRd@f--*nMb!3bf6Za>Te?*;cap>39tZRKVpm}ubr1||O>#B1Vj&?j44xXU zQ0VTk=_B;n7%yA99&_ie;ps$Z_;hrf=fLFnon5dh9k$yxSG~`5Vq5;TjXzP5u`_1R z#r?a~3HIzcZ2fvc|D(`dDoDb@W$Km6;|y)PpC zTf*_Wmhlp*pF1fo;T^K0oEZg$di}I@u-kO6#*|vMW}NDa;73aeWORzeArZ8E_VVUe zJIOzbb?f&81vcnCj4xfMeJukNO4eZ!svA3M_JC8;E(n8vtCnM?2p4^nx|lwzr}tth z%zfkHM6s(PUXJ;F#m8A0U=XZ1bExQ=W*fzr0|b`Z?@3ioAH=G0=^6&wyAn$o@l6>_@&A6xGo&h{Grjca?YzTe|D z+yNA|b&h_X)5;rLGyX6i^JkwGPjrEcE_q%7;shT&^&#zeS>yUu_EQNp{Cm6Mkw&7; za7iuz2sA%f4jY%ZxyL_LJ!40EuxwjpT%wCl_K-g_Y<1abk*UKS`X7q<>v-I$lzy0_ z<37Q*K)E!3M&Jnl(!1)k<60&WZuM*C>7wM)r_>sJWwKz>V#JYnywYpJe>+@A02;PR z9?uIChA!#zCq^tSF4EnK=1auDcZK@CMrT`cVt-re9 zzr8*U6G$ydZ$4Y8X@NcYZSU9Hj9t&jq&Yz@Ci|ZQIUz7t^tt~dNOCnKdaw?ipv`|f z?3{7YzHpFr!GlI#RACdtW7%57_kd$+f8Ae3>Yu`+OD9o>foc#yaW2I`Z1V62vf^iS zr>sFa-PyZ=*S237dZfaHi=mU&utT>|2q3iK=>L|T~Ulz9IJ{1fzI~T z_VocIV6!O)RkPm@GS3mDLB(a`;lC++iv^%h zIK)2!C)|hQf^8TZe};5>AI3(GB>+V`sIMCPDKPyX{so9TFYu=G{ z1Wfo7uXwZS9pp2}@UP?d!@b#m5{`@&)VaPWy7$y~J%>J7kMgWaJdzFH%7-U{40`B9&Yxy#o@ji&< z(+VRe6BDh7YN9;u%NT2$wY`6%_d=duKlucP#)e&B|E?Kuoiglu2s*Gj9b{2HBSm`U zf@m>!Zf4=Va`vDwi^04V5lybx2d86pQTh9La?;hO* zW7c8Z{mKC1F5~irM8!!BT{DG;J#LBs+HWHLr5_rrC+GgaT=P$!cAN#P*ml+U8I3!^lT1 zAQZ}fOHj{(_;<2@{|5)~TB)b|eKZJpsJ!S@G?>psGT&VQm#&?g5(%)m6+w0oqVd5uLdl5OS`@u&;ie8<1;(K`GjRUv@R@G)pf6M|CY$kah`7u?>(D|X- zuuu5{O>>V|^-R#cl?3$DhoK?CGe$p(x9Cp}A_7_AYQsu zuT8zjP46o_+tB1sgiL+>AHIR;QIPy86`N^#E;gTxeE97NILGb0Wj^z1tV`S4AN*#Y zSDgXSjn3U1yWi3SoIQLaCqf7#l7^@Hr0ye}tNuIZH-Tfva~~7oy>%HlcG|)oQi@jTg-H@vL}m|ow?|#}wkA<& zpLCLq{wrdjWN)YljTJSi@q|I3qU|A@uz8p-0(H{SGM=IvHBtELh;YS-Zcf7T>4%d zDQ)SiRi2A4|HZ_R;+Wbjw}LfuRS^P|@4M(QBG+)SckMcL@}@XyX9=c?I;nSt(PowF zJ=S@dHc2;4gqneK55YbhKxk|ICZy#vW-(6B?B7>1yEWBg{wF_lq1QTHboar+BlX@} z>jz7uK0&-vw7!R~7(3R;bW8QCTy|ar!Ud&9i>p=*?5w70R(gtM(}EnDQe>ZDj%aRR zIWyDcJ}OQUGgqO&sNIp%%3Sb)`rz279&q93bh5cQ_(s{uaLp2}M17XFi7_(a`?Z`0WxA>EPc&Klr%H99V zM#4%c>a*@S<0cWqJbyjlHB_R>jTJ0K&)o7}iod^Az|}ZVfYSl$5fWA$DJ|d#A7-Cti>jMS} zKoS#@YF%IT@MYv*78NSY6%);ivJ=WKMWhHWvp0(4-mC`jKcktA{V7!Kg2t<`nm1-9 z*Aj=GjqMdG^7IULu21+p+=2o3Na<8_u8+;uL%b;AA>t&TaS?qOw7sp@X89B#NTX4f zmPR>t3$g=T>RtUUaxuMUk6m^D`O}(pHzXqIw;)$C$C7AJ{otI);Svp9%sPrm?SSk| z>|UZy_!gOckrPK*1@z=I$DCAA9u?O?`JywDfp_7hqA{w4U7EMRS^gcai1}Lg6HOqi zBWu;MZRDLs5@n~YapXqqDIkfy>OS~EJQauOC2$z=jZ*%@9{|O{b&IaN%VI!)wpn@^ zGgyfyWp>aCQTox^W~UR$i$9bCrQq?h(G8^@`~e5(z;U}cFa#hn%?T!>YVsbR{Vh8X zE%ssPL@dv6Hl43m!7I8yhhHH*hTSUUxW~G8kMoL!^5E>B*C__zN3aPB)2fw3fc2SrSiK3*D_=iMQ5_fps1mvDV>MM(Tq1$n z@ET7hAaAB`mt&pJahOL40T;|ai4bAEi42fPFxvaaiph^4^q_wg;R<4j)IC!kC<`y} zj8!XIX(j|-MuJcpM&Jxih$2s!E5yw1Lt3tbD9ct$eJ?%jT@FeBin3@`bn7 z>(%~Vhqmel&5J?kkyuQSc(?fVAOuYziRe=gFRXj%0ETezZDQyJ{|a`dDXj&PJe67) z6n>`|xyocl59y(~UrQHuykHe&L`@;XpYplIuKi8pW#!9e2gv3D(BGdc65- z@Wz4DL|>Llu0qIj+cF9E8^}Qc(8@+u>D8aM_v^hM+qq^UbVUMpW<`Q@3L2!-*FFrw z^$2w+T>n|WPDNN?(!ZuqUZLP z8S*I@0+^?2-H7 z)HDTvls}6r-X38-BRBLkG{@)tx=XbRcKz2$S-@<}$*Zk~r_jcwT=|_qA=o9Q?wu3m z*J6F396T3zh3C^Va)Z4H-`yHQikmt@K@+LpbAe$36bHX;@aC#aFH$bL*PicGFPsj~ zofQ?yyOkmOJ6*I;i;!{7`tY>BssdPu=EA8y2{9p$$C;^FN?;hIU+*0lBAIyi1KP(W zrQ1j92(i#TrxdO4QgqUWfKne_e|CUMEmH%F#iSw!GA`us!84F4dK*zp99Rkz zUmLc`)L*N%+iDrDW%v(z;132KhtN9j%5>+F19P=b9A*kY&*JEClKENk#|fk!A=$U_ zdSSk0yw{e%`If;OmF7?_yJ1TC!Q%AVTB6hu0Z>W*6NcSuzsdZg!&fjbLeQZdAQfBN zQ5Dzwd|PRS6`AMrxo!V0iL$$p4SYz{EO3NbtzF;~Jco#74mQx)d?M9rnhQLUAuAra zH*|nX--hxBO|(`y?)a&muh&x2$~4XFeIOzznicNAw^THGKo;v8vFb?P?m)@XI z-*fF9TlNxz&%#V~dh&?e3dBc9lY#8BhrL zc2dUDIPZMaru@t2jT@1}a|@4b?jHhmUp;1j{xJVrQ zxAB%LK9lC9dWK-v_MmZC<5PB>G9yy0qt3Rp#jDTaliWFIIXCKz``>1X&iqiwU4>^I zm+*{cYTfT*V^5!ZXe^_L%fV-eqE24pI$i@l#1wH7OJ{u=gozd1CQY2&yU&gBP)i4!i$}7^uZE7_7P#Hg zpoCpnfI`G^P8}l#GtcX!w%-yXNO=S;JE*&ip9Z)HSE|n~y|mK>)TE;^Y3&R9!Ps2W zCn1Y9Ro=f<`uk#mta2JAp{u3#PddqPd?C|hq6_cm{N3=CHF9X_k0SziA5P6E&P0@W z4N0wARXX^Gj)y0?;MUAxBB{SvXS8d|fFpt~(-n2n-ED;Qm5JHvTYWCL7%%8HM~$L& z|2SV#FvPyG2qfWLXDRiRD=!J*#jD3T>yDacSSsZ%1R8 zKzp`q3lq$k%PI2Uum1|tdI(yNam+!0L7)EpL94=oq$sXnBWu+2isYsC*YWHogHJGt zYU72@6+55z=73(zi}*K-w}vh(5_oVgeL0$-yzD1v`QLUP2jqZdJ3PgGrcB58{vp3- z=H-fR7*H=h#O7fYEEMGOuY!!~d!Jeyqj~U?zy2`7ix4cl0w=824;G_$WLDRPvJl|M z;0jFGVU=}Y`arJ) zgh#DNys#)qEzWVtyAXUcp{^O$6kiG5ah?K{h_oE&6uD2hDbVZT5x20YDo8N~TUT=C zp|zou%45t-sQ@=7E)8roHu!Ze8&Efy08Zg=2;bfHa;i>52>CLdwoBCvaVsejv9JIKzVleF74s29Z&1&fF6ySkS&T1tChU0=0#)yuR@ zuD`i_-`i$6K=lH_Q0_Z*kd)))da$MqwMZ_f& zNW3yN9eK~p9g)hzZ&jDmtEb%p?8}L)=lQbm=QX?x5=T07`8rKl9je}Ao-?M`&~_~> zuKy;qIBZ;Jq#5BOafo+yX#f-q7{6Z$=T#8n1JkehEXlyi@C4;7TG>)>CttBN#xl3) z)x3pK;yf?&Z5xHLSP^&wUSJzdLP=esE)8lURR0dr&TKg-ipB0DOR*)xKyB&+-8vy6 zBAT~(DRZfCjlLw1TKx3kO;Ot#<0DA*`O(YK@<;aOv)ur|#k46!Rn z8L^llpd~QG&bLXn2Ra36cOFQnDUO0|Z8nFmvGW|(4)YApj!&bSn2Oebz%uLiS8TzBbSK|E_}J^)u#nD z!&pI%^_5;3$gXh(W6#wyezncMqmP}JhrzJ4)~R`6G#z8If;?T~Wgf77+t+pAftYP$ z!MLLPWUR5nb^o;|T`9M;00<;*ycz~frlKbjBu(W34LI8WRz`-6VIA3&QUH9Z$5w?V zI)nDV@s-_n0@UR!0bO*qDm{I_Fc#2+p2fZabe>%RgiT}%=12DSPBF)z^J!{y=Ba|m z2JS^|r~5zgX@?W%xy;>V-c%c1x+Bud8xIir>|_+3!hPWnRy6f!nWLM5vcAdgKlU~D zNDQy#6D#LMS5W9`ZhaHI;=(}^Xu!iV3#Rz67gW6GCPioiV+n~09)7i%bg>C9wrFr_ zgZ8X|POe}Ax!X{ph6oiT+}2cTP-<0hlk);vh0z(GC^jWO^hNfZnu1m7EHh{{kO0hJ z)c(eD9imOyO>~#SYSo8JFH6jXwhDAT1uVsN8=5a2)S%B}-&9S#kz8iU%|Li2#qYK% zJX4q2D7TFuWgGq)A+G>Z_8P-JKDg1aNofy7`G41xk^BuCBO*MizOpk8iQ@*t zWJ8`IAjcUL$^0Yko6Y zK)FC_iot1h_r5QRO0+YJQ`Egu$F3ZVgn`&}-*E`6rFtcOD|B$-f@=YHo{j7}6H+x{ zvQ$@4Ipa(8IwD0lrPHOX8)4#D;s;cVR--ExK8Y|3B0_KQ?lbpe0%$9jq%?Vir*YOm>ug;saC?JSkyjz(gV@#9Eng8mN52Sge&jIV5*OGdCk3P2@nA-F2R!7aMP{HXydoy{RBwK^g&P{FeEY7o(xc1~y>gW;U=WZl zKn6;`TK=QOeqb}CYz_9&a%&U%Ev`7~uTBsK>u_2jLTfUc)3}}dx}6-_%DcQr5}YRk z`4H_MA);;zdAe>bY$nk4hkW|x@^ul`+CL)mumv^x8(^wik98hCMZIbFAw9Pgo>Fk9 zwg`IJ+6vRUN|d`=(;=ziscx#1^SI*WCUoC}{XxsFy2NGJ%j;<2lT}PpZ|O!VB|60< zG}eJZ*0K+aHzb$2Ak|?Fvolo|st)*6O-svR>)lASgG;&Jc>Wd3OHT>aO+u-Q$nuDi zan+l{Vc<7aM{SP^f$2Z1lWjr@mliVaHMF`aDj{8`Oj$Cve+QS@bV#$Oz;Z z{a63nG&Se!Bn5jV-|?d*#VWYo*5WjmoxzsWipsBHb}OmQK;gHL_t*@?PbtPEf9nve9|{ zerSsqCkNUMXya^LFPb3sXFzqG8zo>N9}=d<`SMcz;*>pQ6krSVN-P`#adiwoKd$GJ_Om^7*N9pAsK(M_P18XIdA)*1>LV)aX~??{?yYgg?lbLkbtT3DX&R(kC3>@ipu0hGio4S-dJD>4Ph{(%924AwztYvQP&kwq9(-$-Mq#z=$H>=7p* zmBep$&(X}N7_d#?aLZrG?{BSV0O&50sufugIL>=!1hIuxp1I>1UcmyPe+Abj9yPU0 zn~7mrRv3LR#bWQLr3%K=hOp(*Ufk}KwP)$)c1?QP4k#k<+yVS7B!p%^Obfi~iz|t3 z%nAR+=U%XOyNXZ7nBFVRy2E(q8KJu1R)~p1nVp^w8i7vRl-j}?6xHYuIWn-!%Cmx$4_27TlZLQi(@b?9?F z_MZ*gU#GP)oE>W;i|QR>DF+R(iM2m`xVwgre9C6%ZTTqp@Iy7IGB|`aFd-Qh@8h%n zd&pwb=i!{llt74y;6t?+8cD@kF7}Bq0c(r1^k)^@bCGY`!GEqasBc@~2r)3rY);Ge zSDl*D+&B-t<2LQZ)iVj;XH{-zUTUz%-5i5|9N~ZU(|zdS`1~1iR~Go&JL zeeD|TLh7stPV!B4OO>ljzHCe*S}&Knc!s(r*QZ2t*@NTH9>ltj?t2bL2dcp1iLs<< zUa|M;d4z{{BP`;KiiOhRs<|D|`6|uH!geSE6<&uFx(vI7t}D6n$*4_WOu4-0(IHBZ z!wjH`eI4ib0>MHs-qVP@seS1xvcQ)aS6r?4*XaITJKbd7I9btaHe1o&9a8(gaCP-* zY)G}L{`eD>qRx_iT1)B<*h1iGu^>dmY#mydUcDqm2}zu6S~{Jv47pI5cswXfqR7Xk zNSLlPA&~<#K)oN$nc^=1&E4n*qyn)mW*yM6 zQF0O8j|xL~0utwPQYw|sUwOIpboqJ0X+6!=CL_MY%|m9Xg4w3m9gec<2Nijcw4H$i z>90!xl{gV`c7y%*1474A-a;NXQ*R;UYC0p;yLAE{D+R1bBa4bG8+Tyw+zVs(?#<)(AfhzgXh$Lo{CU^OT>n6-MHii^}aMw{j z@io=NGGwZA+vL*a;kO|pt1DZRwJPMnbS2*H|6m9}4Qqt>{$lTXr}vbpGV?xp3kP)M z;>Tg)3a%19qn1mJvA7kfD)PS80h$JJ1zT;qk2Qg~lYtqfxy=2}K^b{du1D|4zenRe z>!xkT;)gEU7OrT3V)6Y01mK0}pwN_h4Zi%bAUzZY10Irm$6S*@EbRK&sPqO9CMEc>n&pQ+3vkMJ? z%C!ZMp3s1D$~a(eagNti>WKM;TmMeqW7BK^*^AO3#GsZ#>xbuK8i0G1yi4ytEl;;? zQ0$^2%f8I01C$>Jln(h6m2|oE9SkW8?=$j`KWJ09BpGVCm`>6krv_4Q3ThWCZp)JT zNS1+ZHk;ee{;F(zm}pa}ovv3PDT>X!Y~kZLz?DlpI5V!gR}mu_EKyTGfBzp9(HE>T z-xIOJ2!~i8tK{nN<%e#Mey_B5$ykk1EfLW?%O37z#FC17u>0A3vj@WqVKLP1yQrGsIYv@O52T}4t36*#M#4@I?;6pOx z_m7crUJac@NxXMMrFT98t}OLiPn~d;2X3DFOEuOrQH%YlcsPM8s5Mu|%5~O;b#>Xf zQBGtEI=*s}ycU$VNmyfAinpZg4w+X6wCPQF7M)JBDH5x$1xKXI(Mp{Cid>l;iRJ>9 zV;y1SCUoFsj0ZpH5_hQd5A0u3=OY8c?Y(|NOuncXEFmhk$m&R(+{cVbyTD*Eg7Q+z z{k+B??bv{{X#BM5>Kidy$tBr`6?mD>AWQ@0GcDI|*B#mCrE?3uR$!T=v=T%O&F(qmk|OUAtH9-BA-_?8HK;BfIyt==p+ zV39>#V+q>DN+-Gj#XR(}tGZPfu$*{IgProhOM*sF0THf{7$zjv_w=xs5u0)G+_SYSQBYw_IC!80>dz5Auqxsp=1iwNZ>i5?x#ZhGh%WLBO;6&*v8 zoA)v&Q23@(CfJX!h3(gQJwc^Q*qjiHC?J8{f`4hkBSWb z%bD~7uB&2cdoBHQUPw~owroYO7@7yYmO9u&AU*7Sqru)OiY}g~5P~t!R%ek7OFq#7 zx5RTF06)P>Gk_3Oz*c8w+sBQ=nJZI1f0-RAEc(bPjU)=h=mOw}Zq@_k84Gf+3hT*I z#ZpAg<=9t66aHYw!QfZdcyO2>f{I4qjEuaWfrj?sUvcEP=gJ0xiltK zsaxGkoT|KB_wFu;rvWO&%pjD4UBBc2rG~{)=2wHTWWT`h#Watyb#PvmsT^TKK4`+MW)UqZ1Sff?T^pEDcm4 z<^M)nek&^}SJ(GMW%{|;LqY9J@<89(j=T2p#tYfMxWA|B0)r*RNdv|%SiSN^a@8^F z^YRJpl75#&t6?L=NM~N3*G^A1xOdFKd1m&xp{|VL7G1wB6yx1vdaM})zvqbg#bws| z-#0i^;I5VZ)?yz6Acz#OErr-tosZCf8Z+QVAiDz4nF0ZV;uIGdV3f7l#D=``2N3md zRFC{~9}vx_RgM4pIK)@*xWKlb?4>KXMHjC1!ltQv4sgV)mPun=`z)5QQG>pwMfx}} zE@nbIO>}yy-~xhF*R3hf?bjb>+PYnSUADGV#b))z_RFid9LN6m&dAt(kiOVbIMmZE z2RF9)YzDHi;hUiVJ6N`K9X8yjji~{KI~?k@t(hjbofAk(=mVL4L8YBGF7->QK{Z=j z8bseRyHAb_WAoD1?9|{R+u0LC8nioLKn3N~pSPY@SXHM1VomO=leHx;P{H%ffYAHu zehVG=8Q&w0EMDwC`2&h3nJ-z1fA;K-GC6&5V&*n?zmjAbkCdsm$W5jA+3J7-SjB)~ zFxlq!WyI1Gl&f32s0I4*W$Def0eu;7ekM)m}#6 z#AmOEYv_4x+gry9M-1W{8*PNv5OBWY*wVPG2=T7JFY1e3(%%yGQ=zsvASJeG&8V3k z?faQ)t@s76pDHFZCSF#uEH!fm*^yk&%(FLSNQnz^kHR-g23l9JdT@GM@y#AZZqUj` z)K$I98Jh){$5+N*vQzEBvozVPK){Sq;pz83p*)Tnruxp;V%YQqh;)S~sSyvzGog%g z9}U%w1%f`kKXA-fOU7KK2K@k?;J7_c8lbRPFn%>iN)?kI&WGlyrWKSZQ0Fp7@}hB7 zn^2j`E}wNl^Wr9B{WEuafv`nLDb2`ekfYX%a_^lU#{w5*AN6yo5S z(R`>RHP~vVw;R`KnJHZ_!mOH+&1~2|{zDVpV#@mW2dF{y`!tfyQbMS~6eO*6tiMxY=SXriAt-_n znQhapdDH4*-*%9c;1mOg;(G|y#JN$=u9i-f+4#ZkEdwLwIoX(sZOa?`$n z{M#NAwq_WLE3s>V^t?^Pr%PdEk?pU?Cete64jMMQ(bZf-fLTQmptP^8zQ2Ci(owld~3a9 z9|AUs*8Wh#iG(BP9gkFVP6>SQ0>lDobUy9ds5LpU!m(aJ5%}MjGm$P9l;T7jou~i! zgpMiKH3miEB)<%l{7wbb7lKeqmr?%BQ}vkd?mu-Pg*ErWqGM#zd z?;W>iQ`A+E;(|m~in_TyHyR@t&y8F5i@@1oN6y|QIy}{$- zAZtnz7UjQD^+2X50$Zph$9$WI{S4LO2Gy??^;f4!A>xl!<~`)2p4qQ{%#H})}md`4cQ z&XVYHoNWQX>H%D5Vdnr9x7JRg;Be1?njRqFSd;TQl7xKK>bAGD?s@t)Gsa%#!_oxl z6chf8y7Gos*(Ey2Xb6xS`=nj-bparx`l$a42qwDo8`%Vsk%y*XfOa6-xhDf~5&rBD z)2BoVp0)iOaM{zip2{}T<@S`$LUqh1+pT6~JvMVJcHeH90DW7&flK5&vLl9x1E*~_ z7LM2{Ub+DgNzkr+cf~c0WqOwK`ajaOzH9QK_sTyu!_}D6E=P2({etg*$=U6Ywav+K zd0G5pX|e72aDwXjf6UhszMNUGfj5h*yHwO>7Do7~_@)HN1fpq?I&SrQf>GdevQjkJ zGjDJD@)K8*LCY-rjeU1q`Z71z7<)RnWwRsv$<_12@=n`#EeHIl&n08zhu-qU3{hcp zW1ZVpC#dT(FJr+!f@CB4bA@#%d8^-sBl(-^rWK(n+eb75 z)_obsO`X)CKxgUNc?|sFbT-P0%v{VY>IVYoaGZg}(Pk_`E@Qf>EV> z&s252Y(VMhAkP)&_0~yypGo&-#n=hL@#G^n-vfj{$9Z+VF-<5(FhIa%Gj5k={Z7>% zKpsEi?q2~E9iuO21p>B;Hh(#4#IbsR12&0%Qfp(V;Mltx?}hc+L@|IlA_;5+woX}P zyVkCu>^u z;99A7-K;$|$~xROb%=6v{#=(S#g1g; zPxLRkqFyMvA?n4nc?LMP@tVxsBqT$ynF*X?7F~Oiegbbt;njxUyZJ&Rm&|u&{&=nP z#-YbTJFE{v+&umAHQ2Z^CCE1Ioa|Z#`Y5$V)pm7gM%KQkBHinvDWu&b3E9aei}^DI;GpMHA==A6-2M*YknB?&We4n3vi_Xdj}yHd64 zdwj(jD83_9R(f?vg%zV<>ZOo%_1-+xwpG@S#pEcA2}#2CL_j_fQD7?G<+(wn9MS0j4;y7A z{DGkB>TMLA(LOPESBwoj^N*K03m!!>ZqC?**LS}X!MSFZU+v$xL%jDFhlP6Oea~d&?Krr9 zzum>>Df9-*8kbl?nB6vgs(KDKHvj7zD5~#5(Lx`*v=TBksbBP@d~0H)pGpeqsrM7v z78?7`ys%bJA5T&IK@e4$VqT$EH!z=O|9+>UErTD4XiuZY&R4F>P7I12yZHo=sb3XS z#{r2vtZE3t7VaFQ7Xucg^^RMxZ&DnMfY}mYPT#Vs@@Vt-uyXExxg|zd0xX7# z-D5mWDRF*ds1(d4)ctVYagT~FZ{Q5q!%d2U0p@gx$r3=ceaR|}16-75eg}#Io`r$H zU@)|A$nXDe6%eI*bvT}FaZ+jqJ1!5|X0{&n1dM5};}DWxm7nrMs$Zmon<`g@P6c9g zYB|k@-%kovK~EV?F)IK`&u@w|Jn=Qc~E5D}!G z!OKBJS84;;AAy*ZSBM4sUaWfdXA-9JrlP_vi*>WgLB4cI236IMye~e*>f%!jT2AxX z$|wzCEwJd>{Y^SV9|?3Ko>JzD@)UCUH^kQtV~iIWud#tIaOX8V+;?wo`XL~!tv{}p z7Fha5q=zne+OBX* zG~{LouyV3l><-6Ho7z{0vDUAzkJ;RFB;8nM8H)i<1+9);(O0u{TT%@z6MNV~GMd>} zm_|Eu8da=0Brw-)XnqmPM)t5!%W#mPQq4afw^DvgL~i}e%a(S zSr_{^=k3({6j=aAZ=L zY&)Q^7lRQ%Og@SOlg%BWb&}z}!UB!vi}NVF+qFC3uF#CYTi_-&U(C0VsF0+zhUGyLFD}AUPp;QIxEY4;Mm?$ z#>VdhpnFp+&y-NVP=5W=d6Uu18Q(O$G?6*z)|>(T7`MZbgfP)okwE5EIcJS9eb>rn zBr&A&Wci$b%Y;_fnD_l41BmS3Ko?^BOYKtW3u+Kxz*;b@D6vy|A_)H>0LoaAI;hn? zZm=C}et_|Rsh)*Rl#?06j&z2e=x-u5j0_()$*jnW_M>OkZ)A^r5X+;A#r=+1mnC)A zTEd%P;LIQCP{a{@G( zQ$!%^=e~u|_FKQqWd%!pFz@gORl2~Xy6KLfIP0=`RWW{aw)6w+X57VGl@*wP?U$%Xgwsc4uby8pDfdfq4y_vzG6kIB`}m<2}ZC;wYT z6kC8ayfiIohACId}vl8hBR`2Z*OVHzzC)EGac_*&ZK6FNZ<9a*iWnafXY~9m?N*We=PiPiK zzJ@B#ORah+qrx(TJuX*KXSkBLFZfMZjr%JhBD286dH>8e%QttU8d%bQtXvr(6bBg1 zC&tiTrQI~Lyxm7Cov?+ATWqL}vZ`HU5*alLyubl={O=3xmH;SwJh;ax{}>PygDx8~ z*PKI_(#GIFs^&BEWO&z)l{Ij}5y(Ap_~$3D-_r~%Z;(pAmxoB7d*0QGm-}l>W=|4U zQfl(o=g|T#l}b?7vtICx(VMFd?{G+EKKEn_nty9p_g&QX7mG0PjLCs_oT{OK+nD!* zAfC~eM7Oh4pO-bHET z%k(gF0VDI~f73jjX5Q+Hf4S47?un0!gcSaqAjqFHuEChubE`s*8xz~OxgTNZanWOy z512p7u|hw(aRbDFRlP`D?RD`TbCdEE|Kd@fD_#;$5>-nswI8i|)SZ!>5DQ!TQj_aD z8Kk!Sywi1-%eeZN{mq)nEbN}V$wZH$AO72Cks5Lo(ZQ32i(8oJNYQVgc+{spAQrCMOcn^v~ z)aSSWP>~g2{Z~3(?Fnd;n|w1V@(;6n=%5jHejfw0Z&^i-vcVf_B{i?UI}BL~+yrJq zZ9$pRFyAb(7z<u`P5iIpBWXU|71yt?=0ys-Ga-k*Ul#$cAy)i+<= z_y$W{I4U7*_&md5CJHyk!n!5`?XLp;@j=CUv3KBYw^p!gKu@{I_+}Dj7(3e@TOwsA zNKCe#7>SX%FGsfy(1QI$amiF#yA8c1x)0{dhSgI2GuTufIs&aa2 z`AONJ{lwQt5RI4wHabHym|z1xC=kL zas&Tib{=lJshc?j4n65bPzEl-AWM^HttAz@cjn7S@$y z(=amqdq3fOb(-$WR{tLQxp{;0MQ`cBxNA0f+LJ+3<}Vqm%mCcbID)vm9cTJCEO6!L z%>JfZVi-TvwsHbPeu;nj8dpmHY+=Mbc;kP<-v4oaOsvpHhU%UU=_?NX`?^>gmu+W3 zn~#Ri$A<|a#Z4&DNC&axq{qpR2p9E5=As1|-(G`v%?**PeIS|tC}w7Ms^tTX&R7JI zM+{k%>gyij)LC;ZE?&j8Ad1!e4Eb@zVZo~`9U-B@v!+VI?3p%H zXn;lHXZ^6pw3lYjsGMoaDX03`jD9Z z(iDmB1m;K$*a<*il8k}}LhuuC{XkN^p@0$c)R_jR`7fJT&g-(D*LcgfF<&AVRfo4f z7lbstiJcPg7+Zh#)%9Dh&3E8EA#|PSAEoa}zN?N6d!nd1*woX-w_Z?Ly}V3!PrEgq z`L5@?4G^09ibqJ^DlwFN6(e#IPUvci5tXT-gEmK%_~)Xe2%y)4)Acw z4ZrWhtH)>)K=01H=voV`bS~X_#5Mc9>Tw8v^+tIah0Z&f(0(%64=4Qg<{!?9V=o=} zp;FA8M2Cx?(c2rM^~;d`b0+i)qmBHw;oCyxZDX20=U&1_oTc0?YpMpq`klCTk}IzJ zPOI+EPwx9#1RYfsc(C#!LVn%~(UBYW)HkTOo{69G-7YC;0LlzXS0G{xzUnJAsU)xa ze>oWLTStZ%PWG=A4!YOXddCNEQYIY5RxqiZnz9Z9M`V7Kkt6Pl#s7|>L31lWLg=#c zf8BOWHT?PaJS+bC9;Qv?`uVKT(dvb6Hh9r|@yW&UeWjfdXH`6=snxs%+E7G~potKA zOg92oUJQ%1H%0Bv;wNGDyu- zl~49uMOLRu4QlqeQ_wz$0>Y$HCF&|FI0YSif7zR$&d4s)|r2ybPF zlsv|A+JF>09%*?up%-K$4uhA8Jt|JvGe7@_NOML)V{J2E-*O%2OC z4*JO=%YG(#WmYXUFpcxP?__J-k>v7^okH;H^+&kxC`eG zISObB(fy0Nd*)TIwr~<()2+|fXUrv-K*Q#vCuZjZeK>oNLDBvWiYvXgTHBTtqMqCh zBgr;IRGsurpo|b!D+VH%2bV0rcNsnOHTOV9rm)m9*xLf zd2{On{f75&Pk-SEYpGP6zv?5QpAX^)lPGa%#{15b1{0s220x|M=AKgNp$HK}HF0sxVQW2a+ z+j~}PbD`*n{Ys4>wWs=f9R2z6mK;t}*FB$?Rnp4$Up^bLA9{OMEjgceEtBfW++j^h z|Dga=q8~E|(*L<-3#}bsUH_RsS~qm3y!!vT-6zjG;1e*CHQ;vhnr9lH%a>BxkLAr* zAbC}h(SF>Z=b&rdbUAmPWs(|+*@7Qs=22FhH69p9sediYux|g@;pyP`WIp7A0p#hJ zQ=5!@`Pa^)(Ir8;6G532IO{QEITLPVBD~6Im_UK=4Sycnw0|8>pG+)$uLpFG8*E(nik>R8Qdth>p_#RyD$glhJ_wQf%%X+-knrqTVwI zW-*^(gVVQZaRXo|he7+$slIIA;>j~~i$!V9v)hi+SC#_uDmNd(jS@g#ZVDKrh!Pu} zL#CCE;FeOSN45HFW-A`qTOzfEARe;P_tB7RoOSs57Gf!fsl6%uT7+t5RRIPgHi< z$o~0savtAoX8neF^q|c6&)shcoSO6ZRk`y!prvC0kag!CJk8{#{x1GG`YU!M9_Bk4 z`j<+NBBf2yp+=sfb#Mb$*2fD|_zqXn@%kdkQhUJYSJ~e<(I3^O8{(HNI@%4mz{vIc z(RJ)5r(p+4j|gPwiK}CSLv*=hTh)h*B}D?=>gpKf9}7UsndcNgkO*-C`5|E6l?QyY zB$Gy@Q5N7&?7JK>5_v;6JE2cGl07Jk3pppN(8UkY5$>=zJVXJ5MrkZZ}2s^SvcJoQ@LQD6J zrl-++_U{8}Ixx@lY}y?X@d-_s?me=gOZnng6gKA*=&>T-=1f-#*mM`BA@6 z*LNB&PwCCuRnyk|#lYJXR)PFrO{Gao&8!bMcJG~sM8kw)vV%G>%-7uj?ZW@T5Rm5C0iIcUao-y6LuB^d zF=EY1%f+9v{`<*o23BT^!NFSEk>;2$TnW=ix#oCvE=8NUIg096^RKOknDYl`CRz#M zvq?Mo9!4gLKRM4OvrNMoBOWiP+Kl~lRNo3LEA|V3#F=RGL*mTK{b9{z=A9I4o3=8} zq&Y@9&cJ|quIGS0#sDvpe4;NQ-<6gQ?nW8hJtCNkxdqDx@I>AbbClHb!m@3Psb4D&{0<_Dci_G-6P zjEODHZ*&s%^-=8TOn@M`{!zWrc6!i}ug8vY&rK`0ImPKkyKVPwrJqV2eyK6Ojte*S zVJ2CKQ|LGCo2sZr+C~Tg1>so1<-A9L5az#hp|@FJO945y$wa)oi)Re9Ai9_It1#A5lUh5k<=50Si?4S=Q*a#{AZs?o+54)TA3icTFcs z&~`i26fJUr{t40l9)aS~h@!)Qf@1w0xQUjfmZ5fj>{m)iW2~`grt_!QjbFIJCUKW) zU#`}U1VyC#TA%V+_p{adDOc^K2J4Kk_5|Lnze}dcHCf=9(%v+$+xa@qU`6lp>s7(! zbT|i3Kv@T`=d3z$h@hoewieW~dhdn20lN$QK^7FdC>F1TDcPH;KZkOcRLmTv&a6DW zUE2nc*+1VXIWt4h0$%T@iXo<;jwZ^hcYBfevFSxkxPH)r0?61CTrWX>wO2mSQRiAf zSd|B5^-&za-MIovI2hnfWll?WQTtX*xWIbSuqXzz<(rl_WGtQFhZO{^XQ=JvcLaV&GGI z(}s#-uZjAJ;HL<&PeEXTJ8lL0=x)p!S`wNF*R8U4wP?c^YaYB z{do(I^(@-#NOw|qg%%EI%t!;*ugNXf?MJ+FpywIop6I%k;#XoRmQZ-3hC#@l7m%H~ zo+b~|ZOf6&F){ciyY{mV-}jK~kXSJ(M&+ox+cxvp6ji-hj7`?1v~!<*PF-)b(P)AB zYA2FC=U@L-$-2B}3RTx5_`I03$0usm_0_g*cCeZ@akbNL##No=g6hDY!Y{9z^Nx}d zC5*kXm)Cb2vsjN}PZu^34Ntp$9Jm|2Yoe>Y2K-I7D-U)TFzfGDR=7c|rgXZ47}g+> zqwqQt_o?weF`M7`-o9CO6L`(&aW6k!L4u8I8t0DyOO0gP4{C{ff;q>8ae~wAeu8Gu z^1|QQmf-qAl%@_Ny?RW#ifcVm8lw(x2kLa3I_Ql>2fs0&j{|unN6ECWuWxqk6~9qj z|DK%X;DftitTz^QI9F!cjb=OyIq2!YYW~=h`amq?U}I1=%|T4&H0#<%P1vW2(|DIP}&9e=9dP#ea0LuhBwv?8vOaltwDS&(N zoX#bzL972Q5b3wTniu64UN%k~UGBCpyw2p#q1g%0*45cY(zYxdHNu3+O+&Zy`$#WY3GOUM51$HS=zd_NoVbBk1Ce+}}1h z&?>Q0A`C13n5Xe{j&ba#GpaT1u#Qx|s_;r+(%Q+GGGPK)vu~YAOZb+KYR>P&Q$NQR zZHGnW*tR3#-!`z|ZI{R8iURP_;(*pyxcIE>5K_j&GY-GJsonTXGc=|ShBv6sCYru= z1tm^dH&AD{79L-WXL&$QfY2~$2c2N^R+Oc`X&^QaU~YT3t#LKYesdJBfR z?gZ;M``T1_`|+JA3f6az@g*PDO?{*ddcNfC$E|2P$dY#@=aaPJC%#{+Z3|ipqVOLFNH$x7PVlOR)B-kgtkiP3}==?$Il=?6t;WZvOL; zGx%BNk)9$c&GYrwgrqm#oHh$%#OKU0tD{0FTDN|3;{maF+!NxIo;M@|-~6LtPYeF& zceMpt=F>#)HY4n+oRtTad#ETJDJxPAlcr+yu;%3CJeuj4%ZVWY91n;JFu;;`CjN)` zTkMAFBCnd9gU+FRD5GnZcCj|h@G;V@_?8$zpiq2 z;F~FEUvJ^MOKg-)F3hPTR@0qI#7En}c4rsI?Tul}n#w*^A=Qb&iv12u;rsPAdZIMW z56u{hcHzlf$i|+HwDRDfihhj4&k^s_Rqva_C0lRiGzxpu< z3;8t&D$fdo?t`q@CKee`bNTcj61BYoC1142?l+Uj^wBB_8$1(8n99ty4;`mKzT?I` z^o(TlRO8Fk&`GjKgL;4w<$7 zmNZX6^RejZo$^P1w=s7g`zZF%x;5KWS(~u`VWiL$`&rm6FIAES&FlT#(#zd9J|dn0 zH%D?Lc2S3MC;XO3!PSp(Ay%+U5)Iswq11)eAv($Ry#*=s6*{`{9GyK6ftSXVK0N@A z_A9V-_*h^PXeeg}?gt~iU!SuCCKo&Bu2{+R5EK5Kf%e@T*tyNY%>G&%5j&+9oi$LU z#+SkQh{Hlv%1dnGV6{&idM~e%C21&=P9*7PHq)+_@?(SMP6@A*VY^lIy=Oyx#Oz){ z+eV+V>GZHiq-7OGN4E90AHK|T@!&i%jyU*7DT#IIB8FDU7Ie~P{w!o1>B!7gYHAMT z@+k;o91eZBJih3SU?GAtmnv&4elI~77jAR;j4cW7D-wRWy9A3l9e^)XLR1Ae&8i&U zO~oLBI^f315FP8MnUGM77U|OC2d{LFN5$h`;qC_8k!sNV#zY@PL7%-XDAH@3RtBvY z3a}>x0)vkT3AJv5p)SjR5%Pg;(-8&39uJ+^|7 zy!rPUl$SUK$stzkUpQ1p3pH0v*Qd@^f~EvXYi8}OuRB?xqapVBITKNRQjI(`tP_R2 z!DOWL)U9lWYJ54{Zi<2rdV&o}8(Z&P@_%)Jy(Dpr+jl(Go5?)V)Oo2{l4Yv#^B0XB z$AL4&)Cw*QS1;$caOm0BI^up*zn?Z?G&hVhc56VUz8OEJbaAVsj^VgGCSJ09+rN3I z8OFA~;30QCkrh(5_!8YEBHUKJ@NNyGg>W3tlB`ubHX_m7b7Jf8T75ro05_qUSx zGm~Czarl~jidk@kvp6PcQT1)AO~w=ykDKIZ>|Hqav(O`jJ$|4lP<6h2Sbu7AM26|7 zc4n6xj(Ki0R-!YoXthlIjKfaYL*rd)H1|u_Z556w zj&E~lTb^(Ldzt&Ne&Xs==vKvMj{yJHWd;y2xG&m5 z8ru^p{f9R@WBVe$kV2;JxYg-3MnUElglxSuR5#=VUPsmUZkAnvU~-Ay>A+`t>D?(( z;meCs$LV~}zrh0YW8wfOMT5oostfL|Mqgdj<>KPJvrPDowSMGt_}W|RnWI*2DA|ir zXgGvhhR{HHB`}M~tmMn%%nssLGQH!xS74}GJr``11htj1xm80)Fui%LNmza3x` zxF>LK(vE{Vhemy&8^p3|pAMQM0y&7&{DbE^OvEYtJE52V4xJ=8E`Ln+Qn3OpV-^b| z1Fq+Ig-MBB$-#}q6#3DrzBx#vgwO`RNJ3;bBJ#ZBt8?a*$As z%MWJ9G=1m8=9!6xT`lpc2E?0gT1_^&mlji}Ywsx`Pz&S-W&K~D_fWX~RBU`x`#*G= zdchE_b<($?479yl;Z}b%#$XWHDn47McT-P;`s$i3-FHbK45{SP9|DUXLi>2UXErCd zAT-DXhPN4d^|HPFzHABM+3*?baJ`lcEV?7`JMtq*AF_gC8vase#y_!=-ooA2u{^f; zFs-~z~qUYUE%w98X7gIf19 z?kV)#nVDy#PMxshnX7BO(L2haBapyQ@j{@yDuIT>O#8I+fIv~W?~l6N3he4BkeDKb zy6nY;`b6V8Ub95=y(+O*F5F2)UmtSM@hoX5o8HLbl17A(H#xCuUAAuubkO!CqPf~X zXr6>d-sBPI?5BDFY2okcCk<=Z=zL(lW(NGmR%9z%UpsP7;kqijse(*-vu3n>HL|F; zpMmnd32lR^d-pY|u|o0Sa-+kQTg^o<-JjM*6zr@b;a8>uzY2mK7rnX zip!8M>$NKMrsL@@s^yC1&D2&hKYtx+=`4)A;_VY3v3rD>q2@#JYuL)FDsGmvx0q9Z zvQAEwy`ND0%X3H+5`TWvyMZYG`Ol!0)Hux1b{M4PEM`)> zylhmthuyy~#Vv2ovuVd?lLXXSmII>o>7B*4B0x>GIKF8Kn7M>3yQ$%7nIVFpNpF^P z*}jU1OLc6K5MeTbY$$)$e3RqaQ zs~wEsM)mGBG@Wxoc9W)z(1Xu$TzbR!V$z=E_*%**0ab;&Dt66}XX=D>`}3Y3u#oRI z-@Wtxd9#vG@@~dd;SH(V9x`J7KEZX6lvDxHN!(21o^1J4X78UUFh7_kPGT2n8g5}R zqEPN=ENpNZIaPJovY8v_(kH~T&sffRxs^IyaE`O$M#6tgO=ut1}O|(Cc zsqyX}O!g<5#(PYhGa($N>Octimo+-FevKSWA>O!dx+2>W+T9YMg?Ou6-6-C>6! z!~zA>=WJgoiKUZTvh?(mu8nOXP0?B#qCryzq+!%t>-=@ouMxmzeH0K~diQbh3)`{! z?ipI;l0boG(8iuhACD)$E@l4e#Mqq;x5u0&oC#eyQ`V$;Q8r7EaE#&M>tx^2YgMH) zBrUJXw5u)QPB#9)QTS^kYNG=S1Q@SIG{EPzoe5a#)H7jAm>Z<&2X|zvg+;%qlw9O@ zY})B(Rys?bF#m$lZVct-9{TQ1i?}}j*$2eWf8WF+=h?}uU2#KwwVttxTd{kv$FlK% zxN->+BV&Hi{YT0P?rHzeKZnu$hs~GZ^?&p{L4fZ6yx;%-V}G~x|KE_m+wlJ}39(EL z06cg`Xzbbc3~1Pf%+-5T1Aa#X(7U_|rY4nxqMzU*v_nS}1A4Q3U>eVdQ@@((^Jw;E z)E&C2e{QWLq(_Kw3{cFWKy7O|?tE3E%6UbBQ#CyRbn119cx>6NrujD&>6JgUOH6uR zC>3;R>YDJZsfyU4=Cu7}iJ6f5=41(eNFn%oWKeY_FXAS*4W1DW6l8Va=QA~d)zr-a zem^0bQMTLSzMhkhDxZ1n={EVE6qJeawtnZ&PE?{3d& zSmRW~GC2;{;iVPFtL(*bsK(6l?SDS*cP#2d7O!E=;Io190xfum0a~dN-(zY93ja$1 zL0PcDQ5xG$cYlk{e1q4O?EvO*%=3i{mj`(R#AGnfg$W3wvIE3eqG+dTP1Y8gC6;%; zY*{=2cI0i(x%>9KJ)UMwrXP?}q9U?&3N+d6Dp`Zo0LfPM&kwZqD}l+vj#V651-hWE z$_@~=xerQ_1ateRJulXDO!i~fF?<_r@aPzHW*LAJ(x~b|9^3^>>I3V9fKHQZ)kL+) zShO!_mebBty>C0;P`fgaCec9BIfrlGGHCLx6|^1i1=%Pn2V&=H6MxJYzVaP{n<&&1 zz?-`Q_H#f~Rl8pd>P;!t3B^~4@rJI_cB0PZqeQHt1x>e969#P{y7-=|5VYJ0z5Oj+ zJqr4Q8b`x%F$(j$RVRMHB);nK_%W0?XS%c1~`E z^S=8^fP8R&6V&v&v!*u74!D47-f7TQeSslG zJ7&%W>M==5LZ3rlxKb(ucJ_mBA!`8u}8MvvJ zS#&w|q{X6$3M^b!`|-qn{d2Bv%@RJ>`CR_I624aCkHezDdP5D4l>IgRPuE$Gx^(v# zwJ;6fMpXw{Pno%&|9HjlTt}t>d%A%Fk5CP+Q}#7xNr#*|x7DvMQ3kD#otNb}jT<~Y zDL&O7u8(wZPkDm=2NZbMH@%X@x#sz_CcZ=X?v;(X`f4M@A`)>}7C>!aNXt`S^5u&?5Eq&xqP#1%E-S9yDRxCEu|| z)kT-0LtMG7kl-~XHyEDLPe+&(f@nG|a9bb#35G=2f~gQ!fW*!WXDg!eGNZ~!X*ec9 zZ(G}_>pjB#E#_=D~#U7hAnbbvpg zLq-2)u81=7L9Jd@+6RQbItS9I*zlQbLaw_xqO_+JU9knXEl4c4U4Xe&O*Rc$B z6uZ$o6%)ZFR(-b-$RgOoKS~*j(p{QO_=*#l2pI@8ksy z+K_G`!no&o#YXjYEJAg@-yD*#Ifp%XwK(To(U~M@Yk1#{`|N|2(kh_sTO4;qmgOYr z)6-}%iu*o^C-{eIB1|5rL1myI&%hDSX`N|qX8X)u^Y2UtA& zowS<3g_)^&yJQDO+l}Wa-KKT7emOo02>JH^aft5SoqCwAPLK`1VC3Az;=Y>VQJ37L zq9t7jj?w*3;v4-q4y{&hNVY;;nRb!>6re%11gw+Mocj3$aNm@ZxfOOyS4fsh2@&)WTdRy^XJg)dcZGTWJw#jONYAb zT`vo~U+p?|Ldl{~BYz>1UhuRh{u_xi{n)U>WXU&C7zYsZ8bOclCI0`I4+4Iss(*G+ zvhY9QFTr*E+={ds_YGpsXo=)&DIQ6*-cSmD;zf_ z5Y?!e>QD*7sUFacYzI19{MWV{SG^qk7b~m6(eL;h zzHQun_H9RPRrS0#T=biKh1C#~D9{O;pJne}sm@9kw1v5?4Yq*71fXcS$*PG+Qpuy{ zy;d3O*D^lLHr+;T<4q2r;1#xuQ)b=3Pl~Lx8fvYy2Z`)!3?h$L&*>U4Nz)P?J^oL&h{u($9iB~zRBX14&YBHUU3*g1h z9r{$P%2%t25|_GI(_As>T5xSJ79lr#u3zlP(4f}Ya*`vRg2+vLI7>dZqI3n=u(ym@ z7L5|02_ui%QaO7-BW(^Qxm?*+Isz@#9gWy%3G6Kg`6Hl7>&^4uq(OydVtYN#(;80?H+%pAGhzZ^BEf6YXcg}WCx@xZh9DPe81b7H--0uWXMf2Rv^h?)!{^sKl4rR0vl!nV?&|m4!*fn;*S5qD zi7Ul02P1O>pr!7b!9_4G^mkSB$)Ds!Sb+b%l~Z*~oQdJ;jb4>+JWwsN=CE2TZ=gT;lltw*@4k!*X1L6Y@Z1hJutbB-15gBg--z=qR zucS6*KY!yTOkCXS;XT@0?pi{> zz|4&o0pHAcd%m#|5DSMEJ#z$NIxm4cQK?1%;hkb&th5>H#+;98s00@g%~U%c)p=}h zr5+6{@*ZwAHP9DqONPdiVSNX%_c#wtF{a= zx}nbFICao4Eghf}(JgCBNElNGYo1l)hi{fS0BM-MH&7%(sH1mH05#yRg*2a=oLYE?iTe(wx ziiwCxJ;i*`SF1hefx&2KBNy$3Yv#uYTXuhO;{4sH-l;u@rpVJ z*KTY18&Eb|>FmQr2EraC2VgYpdxOAFI%htBXu6fVWilhE1dKrP2IBy(R(sEvn6=H? zt_wZ@Z36Bj79dvWgOO-~b-nD~Kue1)khGAO3 z(Z$nUF2W=M|GeS7JH(nICpwIyOMqneZS}QIyQJQ4m`5i;n--^%4?t8vhqjFrJ|$TG z6-0r)$!mNE?EFxsY&}M#)DiDKVoY+apfV` zZqPoM^_0BeH9M;B|LaP15+G6@JD2)t44kGf8rR0YV)S^{%cUPsePvV9LG#lJ_hvZ{cYV8c^a466H(E}G;K=-*+ z_dN8-NZ5_G(@rLW`t8WiL&48EdEjy{q5)5LHh0%;QTIZFKXbV%jNo3(2@Sr@@F7zyDEQv6nM+{V8MY-UAl*;Qd*&B zP$N!P)nsruAMd1qb9{rRvqyDvBCc)|KT7M5&UAU-2q?Cz4RLDC;Z_YpyD_jIHdj2C z!NynP-AsNa^_*99^~*7en}A%lBwr&p#9(BlFL~my88lM%|Mg>Msu7$&Rco9UOJvYs zy#Ffj>0KCJvF?SFiyxc9^~!U|O4y9UHeX$6n_r{A0fY|k|`OgkU z|4o|>Loiihu~G8bDe!J@0>-K-}2^jb!^g z_OSq{mu|^-Xc5poXY@k^nN=c^id?>M!rx$Q zk4xHJ8}|rtVuVSvpC|7px4g+5v`Y%LEbCR|Z4e+Yn7`CmAI=89s$S)^`1~k8h5Y-d zaD~zl%kV%Fdq!0k_7LG;>!iR+=MAmA@H;2=8VyG?u)aow`hBOKAi|vOy&jHUSzDE+ zmmQ6M>>vl^+gI)juZg&DRG!hO_G8%Eg=>B7!&gXwpcB#TOl*F{TSz>`_8t=*Y21O7 z?B4EYX($VlVtUo>Q7W>Avo2bLooA3Q`&1t$bUt`W3Tn8 zj^_FS-s!sJ&zCI(_5C~s#Exp<-OP6n;Erqe*iTGlC2Z?YUr?*A`y3K7NU|u{CBKpi zuzD(2AWr;4s$7~nRs3M^MX@8;+n>M>pqWn=hiuy4QS}dN|CndL*A=pspjhrfrGn1n zf5FQBk*<@&^*5a2I<-0~5^c_a1MZ33C}YKfVnNkVsV>;cVk<}l1X-Z67Lg!1dq5Zh z+qQQ533RyKa_8r*1X;zp%BzMIfiKRSUAexn>1*F*V9YnkqhJ}3q^{IbP4?uXfhOc7 z8X(iaP?f2rTU??{>48*kx4=x}QMFM**TIX+(nHv^TU;_3_Vm;iCQzA~Q58xO5@PKF zxC?sfO+D4!v1!6GN+^9%l1lq6$ zbfxjE!Vn?hwYTiVc^!e*_6bs9AJE@A@I0j94v(GH301a-OJ6U33q*+uCn`B%!ovTH1D8sn#B*v9c~|MEz)4DLt=jF@hVw}yMZJu z8f|@eWz1ila223ABYps!&@q?Jfa-O~c0Crd>J5XzU)sk;BOXKYirPQ?$S#^LUSZJR zE1-+iuWLzoS7YTribYURYHq!tB^U&w+AxTElSAi&a!9!CNep^*dFerTl?+Detf4(p z(>~KL?g?aV;eODiYtF1oNtMoEMgxj}p5U#9Q9cM@YW0If&_lWzkFJQ^R3O(9QL`GoGH6a#PMWk^l*7pi$PwrwpcyZ z6KE{vAxctixLlSBCuATMg$$6s5P0eC(Dzt5s0`~FM@Di(yCLhsbWYF{qie_r&f*e4 zSI*Hu8x~4#@mTQ%iC7Y>xV5TF(^i^9mjjY4*AuPjsC2Z!{K8c-hw6hBOkP^TajJ}^ zXq$Q1?gq`k*x1+6mu?;z8N-s=H;Ai6q~g~S*u8oOyMo#;rS!_`NNY2GUamswBqe6^ zKd5y`Zoe9A4jz73p0$?$g%0P1N6F$A8ovqxv~(_wXmz-bd~_Px`t7;m86p~8i~&|xzA>9rysXfFQC{o1NRU#!rpuvKRImw}M5B22 z3xe2FvX~!M={tW5C;)CBUCu7ilv2U^+6`h&`e&LSg?1%nb&`knSjPy%Au{>L-&zX* z)+6x3MD4ySjnwF)>(^f9=S8ne8^)7vPax9H-roreBAo|Gck%$&;F5<*N$-y=vU;GL z0mHdt;jZc#v1>nif;N1{?hQf1=K+uU1gn0f4UNOp2z&0=-^%SE!Fbuh_0a_j;)3n_ zYJ4=2bV+J>@rm#SEHo9?u|gZ#I?N-lqz{9BfL*+#DKRQ9L%sKKBw-goTlGN0{RSG& zPQM__{6Wf{ja#ye%4=xg4A-CGBkZGcYzVxlpO#uAUt3d}^;hQ0NM}ZOS=I@WsNq6q z2GMn9u?&8Bw}m7j`y_2HvCl*Thw)$(-#{Z8pKSzq9v!lVF!BEGwilC;rHc8x=ebsQ z26sQ7wgA*|+b?R(@cAkWkg0+*y|XGLoZF;n)+qnc=XRo^VJk2bsK!#IKq_1Pnm3gD zNU>p1vg&tpZNNUzLwUJgcty6FcE@w6Bl_q(h##R4$C26`6p9ZvK{z-MK=&=^P7BHW zua=-W>>P0D9XBUS9g-yo-gAp>182eG$i#ihG8Gp(x341i443tt)1azQ*ay8?qV;Ru zXWPx*v0?Y+fW-Cv^B{HvxOB!K8fsusguMrW5>O?^?}&&SQpJ2i42Z~Xa_yYQ-b4To zG}YgU#)0@z2Y@LW0Gd<_J>K@m)eD=oUYuaf@cRZefyy4>fi%~%T@XDOns|b^yW#iG z5A~~)04}j*#fI0dre%4uD16xgZnofrd*`S7Tcxx{u4>aR5vhuZ?X+d!VPRX6eYSwJ zRntUlkppZh$xeAIQBx3;;Lm`B^>CeTmZe`Dr=; zaY2~1h0Fs0;Lqtx08|>z0Pu3b4E}w&E4GnICc?AGpc>tDzNhH;r#EpPZ1naC>*hRg zzCe@+WZRR5L(^&Ll?BMD%+u%?E~cL0LsHZ}T~ z6ce?`3B?YLOWyNq0JFwP*J8x zS~5!W9mJx{wP{;#z=HI74MIFIc;PS~&z!1!I+*u;ctQkFswK?Tfn)-xolB%~HSZ9BK%7F;+@a;OEBX6>7ipM(3<`PIt6Ao3k#tB!Qgb)sSUJMd(?t-aw{owK zMvvyYHo%aAWo6G!B`#3r@r2s|0%a-uuqb0BWheQ;s*;EG&q7DRCIVx)M&h>fvn)QicPu0oKYSc)RF#OxHnndqjZ=nGq0Jtnkwon|1{ky zF?4#(?=ReQM|av|O!%PMh_-RCNe6^L)r{MHCUgP-2L;fbk+pmZR{4zeJGI>> z;h6r&@Feu`zF0KO&DL&scq-4&fT>aqH%NRjrh_WdlHTz^%dG^h4I!Ld7%wtdZ6_F; z9>wO;+X2XDa>>z0I{U^bFO1vZPw}}z>k^!j#=Kg3UhBLv ztdPh&ys_pr>Zs0OYY9df`~&xf(cBRD=WqWnho0E@nNiMR0}<}dc!%ip03t{Vz~I+XwiHCSkZ+Ap<77DqPu z?RBwMdG5YG0?#0+{I@{iQ&+0rsWpJnk{MhFrT*pxdU%`dqQ%e&7(Dz6uvk2pYE)nA zvZ@b$%ziXaZR-sfW&bsT1y6&QDA#jBFJxzRvbR#s+jQK@jZ&-t2`*0ctS|%Q5vqDY zA2^7c`MIfOJv{YXb|8=NY5#B8ghj@J-&r61V=GfIzpeztL9DZNxx{2QC&AQ0M?CI) zxHishOwh=~xFe)C4{OjnA|KDAAxLkXR|y9zQSa8vWzW zD#rx}yb%T9_9(GN=pE-U0ClQVxov2J#ASjTi0H(kkvd~}YHh@?;o3#cZbcC^oDdV} zVVo97aN^UNwBF$mS3IcB&0lCxU7Wlk2y_+Pu(k|(zT2S34z&ZGp^#Y3S)eDL&mXR!1p@fXK(OYqVE(B?}`smy0_n0cl0R zg}<#-_hQ+PhYE9dmpW#^!PtZ3z8k1UV!n3g$sl)qEGSN>M|4rmYdI&FA2k4FQ*Y_3U!0q$3T9?sHOK;XSjc0fOmr+?p}{(FKE-3++J3s?iUEoGSoCl& zm)dK?v@dWWx4L~m3d|#Tq}pK)pKqz<{8S9bSm(q+gz;G?hxK{E z3B#pU=3w;&=ut}Je!nPy)}m`WUJ7O((CaIEj%n%CzTP0s)ig(@`fQcncBpBgf#|fj z_5HE|WeahUETmjGo6vqFha-e%O7r9pJqrI8EG&2yehky}lUO(*T#>t}eFZJ+J^@Nlz+irARg zOMxP4)qHI&0Az21p`DdrYIZ9q+56SXBNpGvZp5z|`W^uJX=3}m0(7uj$WrfXeUsqh zpQ!%TFR#mN#yAX0Uk9HV3&1L79UL>5R!AeMAuRf}&e=Y9xvvxg|0pDIO-(X;N{!=3 zllP$wNX?xXo2ZnR9NgmO9JMO|nyYpKav9dxS^e_3Wyo|DR@pIEa39==JAnRxX(NM;*9etUIC(jnE&~+E(syfieb)wYBNXHe!^3`VoAIov1$rImiSGRt_Nx8{r=&=6kojl``UoPdFe>W3@ld{Qj&V5=c7x?Zo z%WUFaZln0XVHd^j+YI6D4ic1Nt$L8We1wqOmH)m!va@+5mRVjz*J8CLvAV4~w~c>7 zYGYo>=yQPML$NMK^{qmFU;Q+{xC4q|wuQcr7rCc;e|0|J>m;GPrXO8V)bMpQhezt) z_fuz>A_S+jZW^0H(t&&OG4g{M+GS@rQpqTeUiWB?K2`aIKUaJ9rC_N_wa04)6BX|y z0rTxxD)tkQ29ze>{zdbE)rH95b^vVlsq&KsSdViM2hhJY!nxy)uh~Y6&@V2ktVV6vnyZ@$res$%` z#);JM1?ezhwK0*w%5J$oovB61byxotGI=)(5>4=`1#AKrv2ks_`+oHKrLV4JKVt6P z4P9-fpu$a+2o}uG#`yA9GcOpxYuP{BG{FwIOccFgZ^vF(-(W9(Yi!08BomJAm2+KP zM`p6IW{)3QXgnwXY4TO85@jNwHco9Jj>wh;+MG1 z{_ko8h5w#C^~WY5S83=*b&0orpG(Q`&?a~Owv|^A5D^JNe~zp- z-2XAzLC_oKvBVk|&(A(ojpdP3;$f#>{QaO_*p2Srqm4la65{v9UT}x&meWJ7v+I8v zFRmniE4;4n{;nxkvg5{mr@BnJi!Tj{+IRw=@_zq9Vumh{G-*bY*)s*7vz-w7eK{xC zpn6&nPQA6C6qKRp`Rp*4put$|-1ZjesP%f4v`j8B zDH+8ef8~sj4D-xv<^H>Q>MoXe?Io}Dfbt#BgSJUEcOR<}R#){orn{kKN+rMCbPpbW zaR`jL_AS$wfJtw`PiF33s<+z8zGcv>m^Cb#eAaO49!-)MwxSK3P(z74;)$v(6K<(d9fx05%Rl z8UHkn)*R-`r`$U8%(a*0yZw5`k@HoSKmE&;w6!1gV?ifRhC&r^R-;y_O_0Z-t`1Y9 zmj6{w)3R7@IM_0BF7Eo)V2YN)+{KfPZ&q#^I$0Pw<*Y9)pMPG3@Doft6qC|o>V}T7 zx+spt%U!px&?gV)`qd-PJH%NQ)Bo8G+y%k;4_)EL=heuI<3j~QHd6+y6unC;iMBR^ zN6~4F?Ne<{7pC$LTC)Rj*FN?Oe z?zh1+S_z`C$CD=_rBNo#xDTbJ?4qdKx073Sb9Uk;$<9lWhFBvET>*MTqUze(r?v&2 zr&j|ilY5Rm*B{QFgbLRtUYQKt{NFa+8vL9~u}bETizn15f>Pen_3WSu9L!zoshx*gS(yB$^`>fLXL zx+*?We^wwSw%{-JzO`Xp+G7q@ip!o=h+nIlD^6ixUt#UStz=Cy7Z)6p6I+ZGV1=mp zxJ1$#I$t`oTNq~uP%UBH4Myfg8qUVai%?}F$Gz!jUx@0Nw?y>6_a_Sz`}gWH9B1&a ziHm%E(0qxQ)3krT`NJYQ}Sf!o!SuHxxk5sg><(^ZF}dMVXD%}4gX@1&>8qZk;^sFI`n(- z0~>?-;=+Gd7eoH)uaeH1mQ#2~_~t#Ks?n9BlxvFf*285LI8iQG zu|}%xftW2z?S|X2RE^bBNv3wLq(?dDkO8v73wJMObPXQL-8!KG>Rj5~tvK75nX=^9 zA>SUY)S|Q17=uVufN{r*&=*NL|BD+&@aFGnOdIIgFGft5p3m)^7OCnPN^IOcdT?tW z8us!cn{Pb)4Lfd>IICku(=TW%WF;BI;(pT6RL9&?Nb7XID|bR{|A5%d zfX(>IhXwM?o*Pc%41+l;ZPbmN9~Ocq{4VvhJ2peds1r`88@vUsHs4Y{r?aSXn4SLj z>9a0;{VUhjCer47$lFe9*(JV2TwaV3K`?$$*BJ5+v+4=)f2S$@v)!h zpJ}ns<$HI0uwp)bUrwsdJiaD>RUV?lnJ$hrTMl;PJ{#AT!f5ZIGLg=nNF-l~FS&xy zdnZ>Lp^CMus8GrIUxO@yu!ghB-*4IaLcegyDbIM}THo$;W>=oo2CDW1nY`yTephkR zt*L^Zh4Q|q=#o6!%*8mCPvexQDfE7@*pZFZXa8NKU-w^IF#0LP|BAjF;KB)^p|VYl zO(pA?CkQnnmuElo>z8{^Q=6$f<;?et+egn`%OEj*VA@dNlIFi0sd#=C60& zzK9|THMMag_H|-?<$s4X-qHV6y_6Hue$f8$^>=TYZ~vM2dHQtcH^t&+^Z1Fg`=eDE z&qIi=u+J(q7JIEL(P?o#fVEJFos?!|$=-2|ZWZWfzmcK6pB_#53}S zk;`jdce1Jrk47G5$aC33ANM%Ry7*RKoN~DvOzbFW`L;1lv7i!3sw>9*aav#W0v6id zl;(4~#J9)!iGWG+1J9?iVh3AaC)$SN3$0A`s(M!k{3Tp%#8!(3iM9XTAd&C4B=Md; zz2a(HtCij7%W-p2?-o}jwVoCxT-35uZMd6?eW)2XZr^w#{(En3dSSX5GLBwBi~SsP zY%e68-m}QJW_ar@f0o4-_Lu@k&O5Jqw%_M>)41lFUtKYX8vbK9&JwLOXzzg+`8Qay zpI565ZajIiOjLtZlC#x2TOvs#$;FWOH#D{=e4$q%f<;n%8G zoV}HsaBku`kVgBjC1UX03@Gqc)71O{aEQLWm7b{*vkRc=HPfKl;Uk?u$d@l)a(Gn0 z66%Mm7!9%No`%Q2t3AYclP=2oleEuY9%8%tjO?Y+g>78+u1y@hdec_eZGs)uD<>JnK(^$JacrK5&{(8g-}GQsoSYp%?b~_j2i} zifCgzf|o2uLx?+8@C!{_#;Z+)aT?V%fJW`%fEgAb_8Jc#t{W1)yu3c_{(AmlCdu+k zSC>210^G*x9Fa7w%?Q~H$iefK-3$0S`HblcQ{@L&m_rJ zuO3&8rC&4H!1blVz8z+v{vv)b7?1Kjq+6khcE_Z%j?{U1+_IuCEo>_(t_X48<>_AG z>F@p)k?hU{8%$Yzi)a1f%sMy9sdVn&9lns|K(S2$uo*#5fVy4T_Xn_~$%RI|20*mx zIu!Tz_71>7>AwJ)GxNS(z-}I!{lYvOtkqeX<9Bgki$fR|1}e=?TV`LI!_RO0s%dz` z6_tx-JaZv>s9W=Ez&V-lUe|dq4sRu?NPVtG0S*6|C}F>WZC%GuIxRa`I_NzIzWrdM zld?GRc~o7OxH{X_xotfB|ZVXv!4$bt&>)t_3&cypJd31(m9-GLd zJ>G#sb46xGjono~L4K&QOW1pYbgMeU5pqGK2VY}Cz+bK^UCnz+{XO`+_g`ZWkF_Nk z3+6*b+SolczQd{X?u&E&Z-^pNM2NRfrz`UiKMb$v`DG5%UGPsx#ItbI+)VxO?VEmi z(zdw zG`&NYTl+akaDxjoO>_y3>17KUC|qrSmqKY25=`*Q(ZeoLE9{+e9PwD%@sm|qufcEL zmEjP^j@3e$6rrvb%8VIk_c=q%%<-3KwxmWX6uw;W3mQa(8o{M6-RT` zq22;l%`x=wEQ*-jobR=>`pL7tQj2FOc~=h4bY^UD5iDZ<6O_WN0S~Qr&ZaFPJ%G z9Bi2o@B)I+{bs!>}?&q%wT>w zxO?uj54$)2aVr+qrT;zJac*_Df+JNUkaoIJ0OihiUo_F`h%}{vt^t#WIe=gb*MK%( z1Aw(PBX;0*UY&a4=^qVbny6cJ1cN^UJQ+~UIuZRxQ%;}0vs}zR4PVHE`Dgp9nYP=# zAYSx5B-CAfu8Y)A7Zsb6FN z?niklHewIifB50-p*&MqEtM_aAF?}&zbyVw3eF4ui=u`HfT3d0-BwnLCt4z-;F3kf z#pSZ;5fT?OwGXR)T{LT$2p6E0|N@p;1=g7l) zEN|Xrc!X8^J5utn1*h>f;zR_-a5;NO} zdy|4{TYa&pKUjTbH1o$Jq4UflulDa+{0}_K~7vCG%o`mg96YFP*Z@4K2ZvUSXvlqZo9PY=WO2(1;2 z!VOeG2R7&KH4|m05+Q5(5bMTchI_JkHDR$BjD^QI&3Z|C+z3P_rkE8&FRvMAH<=p< z+rDG2eA)X-r#ovOC$>QFWEl&r{~)RX>CkuIZ_(LryU_ESoA487og=j9_eXUcSiqU% z$B*Ch>XglFJ|wM$8UbqV^vVHHz3yF6XTSpSt%V@(Q?2>Y50?R-N(D42$M2Y!yh@lx zhl5}8!*H1OaW+?50_~#DJ$vQ)U3%TGk)fIN#|?N3nk}AEJUy!djyU*bBCLRt*OzZQ zVN*78k@Fh94K9l`sgP>4md#(S0jq9rrhF|J_A`{V3^fqU{Lj_p8NDI9XqPvvDgo)s z@s_9J8DFk(S@jmp*}VVjlg}Cu^xrF-s$wypU%cb7ee{oM6E1rozQyBp zSBAl~bRmFgc#CCCCY1d%iJ?I4BucFR{yZ0MH?w75u_7Q~p1j-dv1B^;5XxXBeAz-% z+kF=#xdmxWl8Zy@qEm&v(!00p+ShK4bqN=TNc%CbDsaRSh57cFpNPrBul;=^MT>bZ zg$-YC1iSWNO7M_ENLu;@ko8sdxC{cG#b>Rms%pvmAa?Wlv(u|G>EI<*q+q3x;c6#k zH%TceQ7AMtv)^RuoRrkNKDi@InS4PdjE}(k`1Kke$QRLAFf)R*GIPI~e8$)T+T5`z zmZO)zzr(DrW6k_NAzeIyYUVf&ZtpSQ#~YBo`L*?7PE^O!vdn9)B-ePe7;*o-X24YD zHQh8}K#o!)N43J~`rH6%x8Nxekfv_Zj!D%yC;LjsqT-FNiQ-eoOHa4%aV+5UE|M=K zi1IHVSe?^T8r*BR8Sh=O=?mK&xJUK}HJ0XEN5|fx8CqY2UBJd4cq@K1_^xRqbL;E< z8VF8KPH?4=_C01Eb=Ae9+{NI|udl^#o7OI&w+6^=-x8YqRPw=-Weeq!bFkB0fF@)t zS)Z-*6L3mQ)h(0AgOpw7tt3{VK6%Pr3vAofYq)DCq($47)EzlPwP)}+U)IP7ni}(1 zr^gMWFwAKvUW|&I_B)^YQ|bMMK!~KJfc~uq;(i_nYWK`&Ae6(gLiv5k+?NS+_Y;vr zh;7l8GCKPB{!SsMF{U49ColzDt3kb_rIYz^$HgDc38t@%;KZwVzC~bcX?+ znBu4`0yZyOZ}FV8^w?^!v$J!0?8HNI?9$P$F1dT{5lfYF#zfJal%8AryVy)S)AnxJ z-47D+mHwq`Mm+T!Fw`_!Q|i{;sm8WyLn)_Vso;JU0}GSQ-QL1pz8Wd3@9(`r+Vb3O z>`Xc^DX6rSYU#Ak*#@g3HO`xc#GH>#HVRQYQMb+0FUQ28Leopk15Vue2lf8Z+Dfq) z;tuxg!K^%bZ;J%j@oW0}%yIRu)XF}PvZA7*pX)O-GYwWpgUpZC(o5%GpSO~+GevC9 zH^cbuJ}A{KmJm%U5`tFAELGCRXAUL%U3m*$7L-p&nVYZKxfag>Cwe{28`gVf=DC{# zy_`EWH~6L}^;w?lx7U@$y5EM2tL9^K))|aTp6Qlf$QR9L?t5P*sC%PWN;Dt!q;lHy z&Zi%GB^QKqgH$Q<&wt3f!*!eUM!w2<#PgM zCFV-t`E{r5IJ`D#ewOy*f!2vWu!-`1erBDoSuqjkC`M~$7 zv6W!5Fh7K*EC>V%y~v%9m%zOkx4+LG_ffZ8qTs;-$EA(|HsKyEPLIUfwP~;ILqd~@ z9q|^GD!wNTECe-gSro49T;u4QKGiAK;ClS+jLUhWu51O_IOeycBHb>hij>zS1`Ew( zS*4TLb4=F5jnHu>Jts};xLYetbMK~l(P@b=n@0RI3e-HHfJ1$NL2MJeL6;JXg1dwe z8YX0&RF!G8pFexH=zTETIRX_x3R4vUEy}nWKh$mbN;zt=WTlpwS1#t&8J*RiC}+N? zsHp32FFj~*13ox0@j@Iq{$_k0=_#5K?n%7#>E|JMF*lS=+S+m|hMHYsKl8@Syx=20 z`w^qs3?mrs(T}s^dW+w6o_v%fpu)AJqI7H0@4{`Ly%efJJ?UK(IcL+?JG1<{y5z=| zHIcGupjmzWz5MNvFZnNv&OX>Vkr%ql^;eXEuhQ993EF#;GR8{3BslK+*QlB0UCGSr zd;K)QUC1KEsM?WJPf}c?@tLEO+S9x%LdH{y!rpl+jq6lO9tU9?Ar7}5^)i2CNKzL$ z_KMDAg+}w4;8voj+f3&Fk8^TL$Inm9S*wNm1_pcUaiIqb#0m0F9nhy0&`X9Jrq`~~ zRDx4*l=#uW$Yf+AE++OW*^G!TDh5aCX!j9Q6crVxXJ`9~;HHr`U;oanMG@aFPK4b` z#w5_%t~Y?WMqdu_tXOx7S1_(u!{yLoWr5^W#<8`?oJ>sGob8SmWNl_RQ@)D--iGL_ z#;cGe9=~5pH{bhrw|_F8e6s}Gy>_$yQZFi(bu}`|qqq5#9z1?;w);!arUQ22rull$ zg}G}b-*i)hSe>c_A^^`x)ioHRv7r5i;JoW!CKbgKHxO6`>W~P$>-LC8Ouok22^?NB zYhWF2Q=P-j@ldymiIBBr$~J-v#Ij$Li+wC~)+8sye{WTfg_+G~YuWZ0h z&aG*H+yPQ%r-2teBd_|HQDSnk#{g^#Se&N8=jh7CryqCX>VPMSSP%3Y(BRO8{N#aaiHcXet)=&6}G~X5x2|$Op293 z9J^V;2ik(OP&P^n<7^F3fJij%1y?w-47)7d)~VdcP35?(>N8v^w|P+H%7|NdQ7K-r~V3hY2=Fx0jsEV$%%+j9EG^DcnwLiR@>hqC6%R1Bt( zD<4#CTiy!W*5szeW^4YMcLB45@UlsznybeUXY@U`=6-EaN(b_BC4L$iGFyp0A?SPa zyw8=ib#ZgNl2m2|oESdfyo#0JkKyRU?{Y$LYZX3n30zm;*IFAw8K&^aww9_aVvi-k zB&jWrIGG!Y2pn2X;A}yJGUmz&)C&dO_!|B!C!Ii0pot|N^A>D^TjENvVm_+|xHpUX zj(yAlu~DXD!c}hb*6C^14X6J}OD_O;x_JNln~Rw=H%m)Pd7vRG?Skkzod%GD0!Nyl zvv2H9=cygtH)qI^Ho(k{81)`0!mhA%xm&tnL)+^>+?K*(&v%LE3u01Uegu%rth0*xZ_*Vc&Jy0p(8Or@g2Q{3p|YUe2< zx>)!V4CLhw{9PC_HRyiH1TaR2up2gWj+3FglSt)mOI1#L)vE1aQHC;NB;*h)qkg59 zP?P8$4nVNapKe;8T=i0oee4cKTbH$gzoiX%5edh7Nq2y{O;6O>Pix9OhF3looOZDa z#(q$Fz#2W8`r+X0fkyzm3fd&800DdAl-e=@oVlqB}JEloMekfT{@rxbeRHN#qC$d5vpcNrKvFWCi?L&$*yg zwcU}2H|ra5<+qGU=gn!>#>RV1TxDlkA z)->S9KN<^p2r;Sf(quB;T>-GMcU;Dv=$%A2G?^Gm-yQ>yC=$P}esLk=KP#rnJa(t= z>)?AkkSLjfT8efOU~|)E=zUIL&mEn)xH;E72i$z5EfVlZ&I93(X7X;P+LF20?5souu0W^orp& zUZJK|JW7j6oEjbFG`@dg^Y`4H>-R@^Nb2sE#x(4lU07_4}6v%7{@}5QZDP}hyiW`jcfHX*L#w{fc6c5!Pk;NuvZE0K@cwe$hi@1@; zvwV$q9AsI8n=JO^b4vB~zf;ePb`WpNDK6zWVC--hs(e6kuw-+X1C9hZ!lp#U9=Oy+ z{DYJYyC8#6M~c}ayzqrpu z)85958{X!ezfX)2*#6Yvt)?{ zp~^=b2O_NHmUM5+Try4|qX}dtAU%>W*lD!SIlaLZd2w!Uh47#Tv23JmI^K_zZn!#d zCM(Em{yWN5@U;0)o$B){fSu=-jR@-y;h7R zek@DBG1tT(Ge);Gx7=t5SCk?O#Q;WOZj^onv21nYPj-$^;iM`msoL7|&IsDj1LrPj z>gnxfE*}y?58ESH)xLn^JG8B>1wc$sEfi+j11Vajz(8|wpa3u%oGr09`X#+W7C!WL zcz@Zo36ov#=Z0q{7g=FFsgS+L_&3WrvM*DptXL2}R9QFBSTRW1HNW1?S{H2*S?F*; z`(|wOsYijWmj^Vy?xU=xVxvRPQ`=g?5z2Alzu?4LYpeXZXttER=*^J~oJ%smo_nR{ z6)5|^hbeaVP53`EMZ7OzX-{ws>$?q`eVzS$(f=^4mZTY$pgZr`ByKPsp(De}|G+l^ z&31N-OCzuS9;@IX?QGM%hT;C}5yUPAtNSB6rI>%Hc;{HAEN5g4_r-AF6iIu20rdp* zf)q#SDH(J`y$WcZH~|$Par6da`->gF)g`jDujsb;c(pZ8dn$P=Ql35FKh;d%As*Qk zdBoZWs)bB*dTx}xOlyXZd?|I<)yh(_IcSun;4i;EZ1 zm;7FW6EEm6>UlBeWEhV1_NK8dN_|B{P1){g0h!7r3r$0WQQQ0WzR}K_fpyP5OZ;wP z#jo74%!ZCvFXhigUOVwn_bXEzY89?AFJfs*v1!AY>w4mjnZ`&nHu$>2VUiBaqi3ZE zLZL5zbzMB&>b0?O;Tb7Eeu3S4D{VFLEPP9I(HF^G_~!Qa*>+wXi0x0!&RZ`XuU1 zm;bR-jmOr%Vb9AJo?s%0Fexch8~TcueHu>}6l!|FJ+kP;SXTe79H4&O2zEZb2g0vfbx^2^yX$LL&<{I z$o;;SJZ;kY7|Scavtw>(NlbqmZqWuW`ac7b0(c??`!}2;JWzd_tP0{{CP4OPxoIR z**v%%!Yr*6#S$X6r0LN5Yx`y;j7TuaXSQh&b#oc%Du&;>%k+Gl!Oqf4ZEyeE4V=r| zE6Cz$ZciF5yFlx@5=zq`>e92A=g>jYsLAa%zd2UOz6HSgc#PV<#bq~o653~XB89nH zDxNRr{SgG^k)nTV0Kij;xQfWe_sLI^lx_EOD*_Ng&hAFs59yiE)R!y=+&iEBRWw>q z>a0(5C#h|>0f#|fbqXQ%$>_a+fyY1n1Fz&km*yp|_1gBAYK70)cbUB5xjl;% zFxQ7W{P>{>z#L^eN`EXBR%G5!V*(S1_1w$uDD~dx;}xz4;!_y5j2m|lif`RCb*}1h zybO!qhEYfboH$E9k#P_}x973oJ?3_$dVzO!pyO-{C};hbZ`P~uC8r$=HVhZ$QqSyG zH`f1Y^>^9YG58I;gKO#ai~k9mybEqD$78uB40p(_!BcUFc5AalgpQ$(2yEmB;e6_r z&{9&TcuG-$L{isc8jNXmJodYM2oMS(tf>1q0k`|&Ip$1#hg@GUzuvH*s|AC{C~7-c zJGJa;AHPXbWO?7^%<$MVlbeT+Po?{2B%|488!05kF)2K_h~tXs8Uuao$?5^65SdqD zKVD=NX4Z}na&cG1BXn}zu9Md_ojaH zfK%{Do|l-a@ugaf3>7W7L4db0%ZBQ=Y~v{?OlbrG_Ja}$Qz=gbvmJpEPIq6 zeRfOZv~p$xQNOoL{La(ABTbZ_FXz@g{2q6pv#XNj^45=I_6b_#mTU za@35M0*2(orANMM?m#|+mRSuh+b;f@UE$7h=j4jN`@k*Fh0DTxJ0DGevK2ERUW@}3 z5iQ6%*4`3NsHG2GU8vs%1WXgu1Fjh4?ZMrU5O&o2mtqi17wWlAp?TF@D;_*^0R1RULN<4@sX6co_~ zdRkWjkCO7_pIh$~3jds$1()&LcBomCGq-$o4#*t5{5!y!{uf?D{_JX zXo=cp;7GgB-U69>Tj#15Ye_p`&VsP^cQVJ(PgDqb|8;Ng;m_Xl^uJ8-c(K3?-_f|N z2l9RkpV_#_ZVjE?-6ca<4kgka@d7B}@B#yjI4mI(R?HgpD(#qRY*Z~tNbK@xw=)0f z#V6V>b=D$R8^X_TS#4e^dp;NCW>7mM__Pf#(+XJk^oly@u|tY;DXv1rh2nhq*RYusd z<9w87n~_lARKJ_p9)$EBubg$C>Z7rv?8+@|ljOC1zNZS;Q8F=GqgYPaTv^fl$kqO# z72!IrEwzUPK?QSVChGsZ1(&8VfLnh+&kCKS-LlXTj1YDsmwMKJ3 z8(6jM%tc0-s%i}UjKvUE9}Zc?g>@frSRV)r-;&Y`gv=a!v#!7wNELIy(71Kx%?=$H z_T{CGlVuGyVdDm}Yk}ML^Kg~4-OKZyU;0-h)xsa5%lGODhm?8?*$s%@tF`t1H6Z?MeAoMEwwnourC|B#M z(mT_{HpYRd)grFZh8jsPdEW=n>UC0q;hm!KssA< zw<14m2!Zc@oq`h4yOCeXEpVU$`bU4*N}T1^SryWa-2)QNn)Hhm6CCI}_jVr*sob!| z?+M2Y!yM}g1D`kICLc3q-Ny)=J*!dIA2l^;5T|+VR{cO(<_Wa}EK0t5iFGV)h$ysN zo-mDI)o`_-S^tJ_C?U{e{YACxwgdlksi$B5lQi`@jUFkb1@JPV={}Fq7(hYqVo4~h ziDXZXTm|p>^KCM`!ccg?5N*S>b|CD%R4O%uf^smo{_L3uCl|`!gPfD5jSdlx(W1=Q zwV<=@OV#zOhA&w|4r{RKo;nybwc9=bH?l34YX^pBJb{wR3R+>m2HRJ zA=BZ(hx|9|IjrjiW*fP~3r7N$Uffsv)KT!pB$!9xj3x$Jk6wiBimUm~v3Dtza2wKXI@yLWpJu8Ckv0!i;)|GXGeaPn!(7s;%-hmaNaH-t;?-kfaHDIOB` zgAYE)mUJ<8o`2&zEpPitM|kv3qa1ZE>P7Tc(g-p=o=0iF&%`Hicf$)m!D##GV=(%2 zg6kJY&bH?GJC>ZOu%TT@$~V+q7{&G7z+IA8zfCrAK4lL1_81|f)^`p_O|AEZzlX4N zk$CUdGF!nW8u<0)dV{`)4`BSZXYoYb_mS)e15L_*rh*38-$UF{T`LQ09TbcNBEp_L zE8j{3eE`UwuFZ(KSAUQXR0V{!uo*#jG*p#Kf72JOZG1W{$c1iDTjb_ln%e?lKn!}k z<8--?p7S|gE<~8q+$+ys`ML=%WVntDc(a|9KkcfLlK0#=1i8~L%|{!m%5b5cc0a9p zZV%wH_DUa1HO=s~Bpz|Vq9k7qdnsfcX;Mt`%sm&F;{GfP_ zK`T*;+};<`NM`1jPmJ#c?>65ty8<_fUmV$>2X&UYF2sqPp-Gl49%MRH6>MHVR?W*+ zYhtExjXke6%<&{Z%%=`dDU~I0xvI*g@S%!W8rb50VuC-sqbT{7MG^+!WVPVw@Y?4S z(Evu;-om;yz-1hvqTZY1eF*X2$+|P;-$d|Z{A(AZolY65b&$8MsLb(XaC*jNp zVB^ap+WtVpNGeavqCrd6W`|Vf<7h+jl!2CqPM`fTS`fX>K6FE@iXX^HA^3Or=FMz; z)S<>*7+v-643fstMYwx?xRzn1-ry#9zkH zQt1X&Uaag~It^wwvLvX*!)I!gJ=$@A6UrW-r@3{Mp%*CZ{@o}bLw^lBgFdAV=R7GD zv1J+f2%t*uHoS^|kyS9#`i68U8_IiZC~(lkU?VSKnP!@LQIw2pG>0XmjL`@uR{O&kn#EWyD>$tz_w%oMw=yrR5j1OR6O=mh4aQLIA)U7VT`?;KkJ&f z!k($Z!YOlkTA)HY6M7|?PdE_xx$GjGy1RFUK)hn_t404 zoqii`SpkOzmWm!n(!1aJffmrhzq2C11gLJ0fS0+rWalmbljZ`g>%aUszT z!1&-cq6{mnk6Y(j*=!=rMW_zEx9(WR9Mp2U4eq%c> zdG`UJakKImfbka2f1K#P$H4=KEQ#gypsq&?eHaax9fP&I0ebyuGs zmp)37w8#E)3(SlqVo*vkznE`U3jB3GyUN;rX&8Cp1-N|X+#K%y?uf0Vml=%3L(GS7 z=BKYHZ2f@fZ)nb!1YNFP6j&@O>u;K@o@hCbW_Ua*-b{1cz$G+D|I`+gUx_8c_mC;o zY<2HVnjwD1q0T3|^GnpliHKVREje|zQ+A)xz&RGH_b2FeJvfyLOw9loK3O&qt-e$%a%+B*M`68C_yB!v z;VdvV4Dq{=D_UYXp0YW=_$X9$19O=13dDF`(?8glUc9&G7gwt~>_U0o{b&mN+h4yh z+W~(FHuy@A!|6K-EyH)S8ZC8$1ZcHbL=WArXZW{z%JupTWh~2C{RXH4`<2mOYKBA+JdluZVO1f%cz`g$o zn^@=R4*)qQ%lti0!o`O1%tYFBoi^ZvQ59!rh4p`sp?qMN|A`Pkl zvr6;4?RT*KSxorm+p%LCGzhFosXN^A^697}t6|T1bhu74Uz=6uE$t;X71e#^>{Q$! z@8rN(DoR2ReyiG^fcu#)YK!Al_86(AJ}dM1>mhS0ASkeh;R7z#1@9P*oL$yOVAI~fxAgb|Vt|y^mzaIKX6(*U_FJL)cmIkO z`*R0b&x`I4OIQ5$SEpAFph+75Q+g9cZ%t;u9+>xVvEy3=1{vu?mA!QC%a<|I!Bhw} zYe@4$=hxno=kKu)(r5|~1+?!~!UG@hNbw3Pm7PLPpn|7F-fRLXB;(qdi@>%ADsgsl z>m&&HCi0@CJXe2r)H9JMW>@QSHR{ZMM--sJ|4u5R#e4ILrvmsG4aS+TmUNb4?A}|# z8MC_u+BG^Zz}$VT=TnsSM=IuTm~RMxiC{JNu0K#VfA=rnnRD+s&XDAWmKNjPPoowm z1uglzlun$-9&RbA{jiL$Wk*Ijlt(pQeL2E-BqCIydvH8ZP3{(R{$7QOAV^I>xD8rO zd&_e&VSPVqGbh%nfF6U1^cP*ccCPHZw_!YS7{p;%{@O|-8t$QRAi?c_t+V~eec){R zzm%Of3sApR9mFkOVBop9xR_3mCbEqm$bJv4nY9~Xjy&NG?8Wr0C5f-ILiWCs zbI*EZFix#Z9AgK57`}D*7hdiiGqbj;iRiesiS2#fSq$Sq>nInn2Wj{agG=k)-${yM zy#$<-ITxSgs}nV|2=v4qW%Z-0pZ(9(N0=pJa0YR$iwhM87%XSaJBasQ2QQOV924ve zUF$#>qHk^Q=cCEztgSB4D8QGQJZN|`KpDU(CV#^-t3%vw@atHyl_s>a@l3K+K>aX z&;@~D~0%|&tAFS8-@*k7k@BQXvklb6q zW)S|`(;-L9?xp*+<$JqYlqHg2D6xmqu8lQ& zB@gIcp}ti$3=SN)iMkjhWV=9YKu5=Kw_Q)aQLR{w|SmgvBCkH^dp1#S=G-iUyzab`Qf`2?xu9b{r3&9r6e|8 zj~Z~VN7=qvDX^SETY1bF499&BILkyQ+*{43+ezO_s0>e!$Wi`gLx&`(csr=AwtEiF z2=}s9gjmykHVo!wFk22u*Wx?kEd!j#kai11&u%ct`?is48_#){jyFnp-+CdYX;{cs z>CwYexl!vgFrF#KfeUQh3O;v_Q;qJ`m|D=hkXUYUDbUaV2>D|e{F6l&&9pCg3-W(% zexi*_Yru8oX1U&ON~pEG>z_?r%uc=<&`~z>T)w3}Q^j2+6owfP=8ut(L$nIC<;b3w z4C5+~h+BPpJCwP!5YqvT)`B-Be=ztRVVWk2--Aw<==-Y5r<@EgvWf)`7#QCFWkMw> z?I7wV>U?9DBXb#QCD_emCOdaa=a41cb}CW$`mnz!ps@hA<0M@RwI46V1#ECFH(Ccs z0&LN9y6EG|#dKFaWrMBEM!~8~=k^ zz=KffSTRO*2#2ztM3(xG`Eja*@+WZh+myvSJFHyaCGgzvCF>FT+^qrrb%Dih@eniv zE$8d54&3?!H)5{_Sgs&|fCjkI`pSup3ugiF1LjY<`BleF3E=;uzWw?#@VRG4z#AUp zrw1_a8}0?3Y22g*c1(qxXzYSg=&}3NkC#>Ey)I2Nmy8iKz(W2rBY^TV5x~znu}hJG zQc~#;u(|IWHsub$f}oA&;FY)++F+c6_&CgS0mj}B9Lq4>gg0Ls*6)f0K2ubAR|t)q z^cYG;mKMb)pmgd&&}DC3DGPo9L6(;ZG!b3&14+6q%X7A^E)}q=^M2=?4gtL_usgas z_b%hq=UxOZJTTkpaD|XOjZj}2zKc^+obxY|C>`6Ad=C48#uPpO1;-x{e8!tMM3c*9v#U^y6;zW6;&x?0o?5BU}UN zDDr)+x55gXZwuNqd!cY!+QW8!lmRq15(YvIXSD1prpK^2&&|_C7ZAP6*ozh2UvcjT zEz>qS@b$~#oH(V+kAQua7~7;4Bt5~W#9vSomhg@(ek_MLkFP;7$Vt zs{`tc_lU>~sC7SeuuZ41;tSMprON~ zePr_?IQ!v7x(-1n?jFPISOfsV2>4KuV}fM<@kVBSx#h;t&nMUn9-jzh2u4?U%uzmR zhpk&yxVzJ>w;N|+gs}@1o`*-|^uMzv7~-<7;HBoh&w<$@$Xh01)?V$HU;S5ql+leI*C7_J)^D&yXe7^Sb$r=$Azr)ICjj$YUc`qO7p5Gy^ zfnzvl&HA=18_d0jRREF{hStQOk`GaKPk6Y{-{> ztf^HxAub(LNOArxcCTWku4aM%GyZuS@yClJ?&2$Aqf=OFWK+ShhrZ;SCUzwqs*w}0 z=KQ*MSMEYM;zD?~P2_F&h(&$#b@uOpJZH&O#ib*6AJD7d{p+_Dus48IWm2hA2kmM1rbJb+VHxrq~SVY9({#hz_90I%eCC;90K*{mEo@*dA_kMolik&s&^$9@XXUmb4ayje~2 zC{UvilQrRl9m0c2z`6N<%@7E5<%n)Z{+kmdyOlU6dA6=bRcdoy<5G7(I^ps%HCrE@ zGpEtYn)1~3;FZ^? z)I8GdMh5aa$%Hbu;|8D(u{x~@lXTrZJBz+k>PjNG};GFgwniq%Wh=jSoaLF z=!kAGI?I7m=^4zJOLUKXFr_I!{K6pXn!}eBc9d=1;y47@X7cPYe^by$w=u8j#M73i zQ0BOAoq{=n?lQrb3v#YzF-epO=MQIJJbFzU0{7h%q71r&! z{mwS7OTHl(lkRn|R`XK3IJgF9_EUaaWNS>Uc1TT%evkad2-}NK_q@-dTsH5iu z)DOb$E@1E>58`*M9$btens65$3XjTH4kNMxT~h~j zaQc2Ynm60tWBoERVROxOzBP%QEumeHJ8a&KGyx|FKihwUq69kR5A#;dDV8iw{oW54 z%(mQO2UTXk%gX;WzqG~2IHs+suT7PKzQH{fI~+pf5}=El3z_1|u5z}T-U+WFS8k?^ zJQG8-JFG3R4}-%fxXKZ-KbK4+^7glveQTRzwS;qT)X$ZtbE4wqqJGtY9s{?LoaJXl z_mitED$Vfa$cf5f_w)vLmKe08x6>GeRpg@MfS?jn#F^&Wxj-;C4xsU@9?w|_*Y67aDnkpBTTU>syweL`3VB=us&Q+PM!1uky-N&rb8W&Nf*%_QOw%f_E7yV;`!(OZ2*%w81vjs z%pTeL5={wFp?SJr?sW7;eXdBuvk$C0jgB%jiYXH+pseO+@hD>p)(Uh=#3>*!@(!hrj;OVMU z-eZJ~B4LY%JFEBN7Y6{u`2IEB{hEjz*#>BHB{vW<7N{}V1PMj{DwVJu?*PCFUAn1R zr2F^FnWH2NUarjS`3U6}Nj4ZG)z!fsSL! z)c1(#4E)inYIy6r3~~2pyljg6Lwi!NFW`$9U@HZ#cU9VXVegWVC13pbitL1>&4DnJ z8kgFpK}1n}p3=qtcTE{0?A-fNY>NT=sTDzO<#T8ATi?sK^h^|6*_NJ_Ziv2!m(mrr zCA;8;VRvglT@)yE%?X>j!&NzbFhIJ1Uh$FA)rs4-U-q!@-u}RzN_zaVuPr!rqRjFx zWp|JywQ+k8D>yErE%9Z~X#mICd0)E-28bGStM3minH*VeU<*Wi6s?d4w%LXV ztP7C=UP@Oj3^5r7@x599O?B}pbv{&+8_BlAXM|6prd0)p zh*Aa{Mq0Ncu>F98-thvHHg(Gni^!}+{PXM^YxwykU33ln>*DE2&it>(w9hPZ+^k{!3}oH zFj&uwYaV?Q1f3sU3}!7KL!)2Ga|1U8r(jgz7w{4QE?95!i|06-!j|MrM&W+3H4T93 z%0xB_W2fGDh8~^9VGed0g}RjDDDXl@xa+tH?#NisdXQ*l(F}Wxb?!zUoU1mVWquUc zI{}#_jt9U-a;RM6rAS zbymC3+tzfxaYNf=f%cY#&`4%R+qMbVxMZXjiRDpXTTv;l0tsv;hyf3Jm$f2*=c8EF z$R2M(e{>U?4QfP-d;SAufU4zp`b^NUIfxt(>4vD|8(iupcjxa97s%w<1AX+7h~xP) zj@@}{zqM{0`-HOd80}}WkhjV`ytdzWKe)+!bxN853gx35umpWxQ#F~G*=y4je|jRP zZgPh`@JQyCLA2(sc~!(hNcj<@Ftx+BI`O(>!xMVlg?&8Poq>>%*E^iPY5Q6ioEM<= z8ju~urLVlh{$F}B_F{XXk*fB>w>g1{dz)Ed`H{J-BK0>0Gxd&DY&o==pW?mIU$nu7 z!=XWX{_a3P(GX65rz`0zl85$E(*!s!_v1(WAqg7VUl%54`1c&D#Ho{W|K+$h( zJt{}a-ctSpbCMH$lv|&cvfc?%n_sr@;0LhQ!1UHWy8rV{kbtt%_o0K0O4L4f#aO_w zE6RD#7-!{(sSsXJud*QLY2JBVYjmB;U^o=i&A^8&x1Vf&oHecLlow@AA4~ z7MHiu>958V=E(7})H&4k8vBvB9@xXhNqjVEJW7gYClNwH#;0Q%nC(@W6U73pFDrtQ zesw&HZIo~SxNhe-3M~qy-jXyBR^Mr^=XdL;X#f=029-OvcY7iAcjb z6Dxt$K)_7`$31Y1kBeWqR~FCW_}_+0&vg-j9Y8J+LMAx51mfL)_V8_&*#Hj!2yupk0j6n zO3ei|bh&HiLAJzxfOj!MwsCCkuY2)}9BO4>CFP^u%Sb$jP31s*~>$~E(dUWkhhbarNGB+NlTpGoSu z|7C%Ws+)nZ(s&gPMdVTacTfj1RF#?)Mw<|pjAojo7ZWqpT34Qa`64+vZKM7!BZtFn zzlAW{@^d2GUuGr5{gF1^SQ6)Qql^b#%>T9b-%(XvUGy+~Ezzh^yec9J7(qorAc#m4 zFot3Ur7BfH1gRpu`^0vED=2~>RY9al6Y0$;ML|HCAT@&Y=9S*xxvz%&6dQ{s=133qy~Elp60W*Ln}4?{0}V-yUl~z zucSWNW8sqEDzYgNR0+zqlLI){SF-DF z7+a3Pi5dU@_HKT4uDHd(!sVm-Ap0IW*V{(^a3a>{sfvGa>8)q(FJ$I-YuRs8S?sB_ zWMPy3sa)m;!vQS$j-NN5_)iPh`^(GeGnale`L*|a)>V}s<RHU|H^USl$Y*))?_`hi>OL5e&AjkB#>KGWMPln&{oEhc zEZ^cL@T6+r(zPZua^ z>W2@kimVRnG3tRN3Yr}RHrb5|+)Io?j-0g{HQl3?HjvzPQz|*YpNnh2?Va6w_aE~Z zsTOchS2{f)7Q1qF=TyXvyT<#n?Mok6-8xnN=)A?EbwM(jlIqaQ&e9t1uR--*U6ROVorPt{3Kwr4I-YR8}G(pW5 zW@!;O_3ds|-o8C6cw^Smvg!&8m#bxOP>vMC6shq(cd6z2nR6n_6{~cOW+SLFf2KsM zPGrQFUFcg$S_x*fRQ>0(@`}Sv-Ep5S!jE^z3cU&%d3~n+#^)Dq1+J_9#{zbWTJ2@u z@x{N{W^%M?Y*lBh8BSDjFF|{ZsKD1b=f+iIo?I`lnanKU@-Xi0b*>XVowa&*dtpx} z?*hN8Tc6@Da|YrQE8dvr&xb;&g|PUk_SW;c*TI>Ea~nyM!`aut%OXS~ZZ%ll9JjG= z)71NMq?Kz0`qtvC)feZXclkMON7dQ$tofe>{TQ(*R778A+CFXdy~s)EiVQZ-!)=GN zX0Cs6pOy<+aO~wetG7HG>cqP6(j(r2IQ5dwB4@W{#j>j3?UxZ?$(XN5tBrb+6UTQm z(_MSAlkdczuXE8&4BCiKUy^TG*477n8OuE1^m<(1%q_N&BPUOA8;iapW&hR8VV6$L zFWnaD`et2Ee(8UWLRs;)KhqX9We55mF5|O3cFi4e68u8f0zEbjv)p!-u|2CR z^EfV{DJ^>BwUskLXp@B~)#d(d=@$Hj1FWu9;X`;gU;Gw0t7hf0DtKL_-eWcv-{Zk%J!L+$mkpVB!BxG?AXk+HtH1EZp)c^!@@wZ!b%V znXQIVY8LMgdH=lpgCW++UYm1XK)izwsp5xge@3O4PyEsX+YY;%7>sKd)X z98Pxp!VlQJwEsBRovjWa_O-@$8(Zw9?n+g z`vhvrgxj?BIF(@>Y$A2`4(&|>Tk54VMZMb$nI)p36ikQs_GK=ZX)l+%jL)HB#zijFRk`S8Ik#S6_UE#M zgzmsO!DRO$u35P;cJ}H4ZK_22!H@>-j%}0jt_ur`5tGhE{XaB2Jn*^BqvZ5+r2V`f z{~w#5(H#34sEhe7vmc}*}49$v+_SJILS3co#=LZtkg;SB&NXOtXGul zD@V^h_|c}!Y`yhx0<}WQ%B^LAH(+pfN`e{}dC_I?{fi|EMQQ!Ogo}>_)prp+X3&U zr?Rmw7hN9k@=F;M=`XO!%pVESqw=;#^gW9kvV zZ#_==d;UD-Osgy2|!Sa*NyL zIc$}R1{=696>y}+{mhLm*EvCLE!PR0C3ONHk-O|T7o(H5){J|18#|+$ zlWN8muwO=uEXQsA4b@i*{ay~%mGa0q@)^u~36wTm-da8If@TyUcXE6Fc-o6bv5vZx zJi+$#UK@p?@%B1Dns)2c zZwcwYaTt z@z}1#eNAJ9>&@Id{V1J-4Lo`|IfK_sHSFQVb{U&|lB=HZ@@+28T(40TbG z*PxGXA!{tp;p{F~Wu@@>v(ZFP{hiqM{T-7o^5#1ELjzVis#nz;GTbsoLI>^tF-eMP z&a-E<6^R*Eck*&z?H79jZk#XoQ%(4sn!WC}yz6W>?e{L~ny3ED&w3vW(D{^pq1jKw z-Knx0TPBB-t$I9&cFvGzVK$S-vT%RU>3|8Rb;*6|TJ69WE}=SKw^$n&#owex3Hk&r zoRoW#5!!q)nYKh$aMhy+F%}WD(KfFjnVipjFHXpKot4S$*wOHu!`@o%_5!tH(#NE6 z*h4c#pj>m^-t0(=4Nhyh3u?q7-4^{6%jYxZ^9D2i>mg!w;45sm<_(Re*&Oe7*vgT5 z+3oGnu&i^&oYT;(V(3`r^VeBxwG~}QAJfuLoJjJTnXdEW_x@1k+bb>}IkaTgj>gpy zB2tmfU;Gs1-m>a=ui^Qy&+BadF9!9$`*~E(S{*xW*BSi0q2KvrQAz^$L}PBptfJhD z5lu@P(xD{?ojeE3nOmZZVCcoYP zhH8i{&E&)0$XQRSg|k(2IOn?NNHaIf*Jrf~&Gsc0D(_2d7j@=hRG2h9o6bmNFC+!d%8s@xYS?!Tui9}9qlvzK9X?f)VkhT&AA7xC(NeF7rCmwPJ5a#;jn?U#D-9fW zd|=MOVgBr4?tdICg|d_TpNmlb{no#4<9qP^*Eas$4*zb4f49TmA?Dxc@NcaA7dd=K zT>pQ%9sIQGS3c_S^7b~w^iE;J5_*uBl^zB{a$~;a`f;?#MFVYlj8lz5d!l>y@2l1; z{!QjkDE1eY#@4rBglk96K$5q0XGt-ph#R)%*lBaNKRI@Pi^SE7b5o_VK4gOV?a<5b zAN?wP>0OXT@GNGi7h!@r-?ZF!XYsPqU$=hc`2lq5<@A_(z1^TC%ew0pGNT7V`dl7p z^K6LmoOfJC@*PUIumApAjYC;WP1n|9K5rwsJV%`f6s*Pvzp1FW6MZ7E#DA}8G^_twSLu9IlTbn;F#$ZOqU z_We?n`upW)G;H!+CVlW)i&*ebiR5`Ocy_z8%=ASL>mq7j!SBESJ~bM#m>1={>hF7? z3@R)??I4U^q}CWHaA{0>b}&udcI)?VpV!=ZEoV(XX2#bg>&v!=OF3AwhOhYk?Z)~a z9}P!h%JJQW?5wP;)ym%!7|Nh2F8nJRkf&aJ>t~yb9=hs||9xE^T-SLpwbTIp(fTGZ z>_GEo%=avsVlSGl?ndEjm>ntH9;kEmSWYXNZD)OXwZ@^*9c}V2hdOmXWPH%JD#ozl zsFajnf4ies;3`!t6iqM=FA1x;F55v_XBmx-bS7J7Vl`Cpz5u z!s-;|*5%gX4@OH*=H*DPsljk`%19IiV;v3D@NP$7oVy8<&Gl619y;Qc7Mq;Bi(faCIK z&YXEzSXEi+gE6y0?qBam$TeqJ;I_%T!j)Y)Fc!Ev&A7@0D^Q1aU&Q!F3K!k!7+b%9 zW`9qXg=`bEq}x>C7z2Jqf3~T?zA?MteX@Sxas5%WMN1kJ{k~$44n?3Dp8EC+YtF^h z2u2)BA4eNu1>-t^F{5+m&Pmz2eSg)B=Z5n}E5)(N6K1xsKTnvU?8HW!mE#LI zE=#<~n$7!YnCei9-k1UFLM&Udgp&#vX2ch!It?)-Jiz(Zl4UFWx5?b0nV`2Jol9C0 zb0NZpwJ{cCZeg-8xo-6PCsCvViOt~VF(}3-%2!5mIJ+l2NZi_B5dZCKm&H9W&>JzQ ztWp!tnKhKrynlxD_~VmY`~4!P*`KS#sl~^${MzM=3DHpwBWUe23lS3v zK4_t3WQ_i{E+<29hG7An=OW=*q|6I3+u5pNuGpKc6w@rYxg+rlRBCmqVQH*pa+`g` z9Zu4Q*?Rebx@}}lm&W*#SXT^;_QmjD(P3PJM%%Mz4-Gwvi{ISiNfn9r`z8~p}&reyy`LCT}2D*uO3GC%$<=Z@TXmX|(oKZ+M$z^S(K zNgngWF1z?5aNjumMO6k@I7ZRP`%7#1Z$;k>*tpc;lnPnyR{PDdVZogxvcUZ z-(bFdNuBQLUw{4e(A1yT+1(U5tmRn4c6VSiR%USz?6}Rb0Zk2`J^wGuXvTKm+MIIX zx4hZmT+i-JY;2{NC6hmeNy@cuZnG12l5}$)NV-o9e!LeD5a15ieIa3>5Y1dYD@S4M zr2(3H533!j<=J*={c{_!ddp8Iq{2t{)+cC&+SVLn;BuvKJ(-g3uWqaocKq^Z7J4Js zxPAQi(Yjn9Q?2RYil4TuFXeJEKg1~7nCd=1v%fSE!89qW!%G}Z7Y&JLUHJ9-iefZU zBE{M)d3~9&puuD7Jk{#6lA#t0u^B8ToHCGXCGg2u`5>p(!f?(O`W-Ev} zOM$|fW(`T?b2OXIeZP%s@jI?*T_>wt;=6r&XQ{7{gE>4D>3&Q5^!oP5`_a7V?hppn z;epfyHkvb;FGE5ur#;vAi=6LylhnWLbjFIcTZ>>`Y%yW>S}!o_{D=)UHfLtb)f3oP z7PoIG3l+|N-Ln7UQN*#rmQ;9GpN&f5JM^6Uq8$;1ZYY1Z#Pe*QR>#(#MZBhSNbFbe z;b5my*RToFv2BY7Kc*Q@f9*PR`t*9ZWN(E#TjeoSsN>qQbu=s#Z&MVyp5Hqj&39-4 zLwvl;1#|Ziu!BB|3`CbwgfN4yvVc^8mstHm_XC$}b|Ttu#mIN!7Ug=r-vT9G7C!um z-N1+H9J_&d83j4Hi^j&KZ7w661eB<6{|6Q|z9l~JpRc-o{~uuRxi~dG_4?cY{Pd&l zD?fZMOHn8rsQ(avl%J{p6bk+y^{sm?@iLUdM%o$X=ZJlFSA?)(GKv@MRI${DGYTtn zFHFFJ<#2_t$HE-(_wOQPW$|4$m=)RCCI4B;b@sB$$uLRLr%GX4(XZGyG&IztAxU>^ zxK)bGD!J@JNioyH_4BajnM)7Q7@3tN4sX8|)3khlTPLw|tEj2k-r0Ad5+b{m!#U8> z+dDEc(gYh|5JyN;+udi?%Vwxlbmi1=9jjI(_Y#YtalJy9=TQWRjj4*N4u5}ra8eMR5Iis4DyhoyI)47w}%^$m|I{8sdb7nNaBF$)0d zPq8yHoqShvIsRN9rRb~D;7e00be~s2FgK4BUAc}UD7EC-xir2*4!D^(RV9gChVJ<4 zM@wC$sdtpdGhSX^D#-;iru~YtGiOW-=O_5nZpq2<@$>VG;sw-Uwo|54p>ep=?t1L_ z{gY(YQ$IRQw{(wtdwZ)A&k&E08l4mo78b_5Kk}+0?V2;KB2tw$Bp}E}h95*jSW)Ya zBDn!dis3%JJaM~^MuE{YlOqxF+L^-|W*6k11RuEUm-FZEYgEYerh`KylCBlJFbAfWxgWFE1lE-ofq>+yKn^XDoAR^Pmy z)V%d@Dc>m_Hr~UYyU<}bxzd5;AP*6$Nwpu>! z*|TS>6Fo2lF8V{VenfBfKHZ!HqKI>=6ZNr|>h6fOo&A)aIexu_m0#n@I_a_Z^lSmG z7f15}MLol}QKTxht@6`PrKcch#kmY;$Gn=2y=l7R@Tr3^NR{^APHi}>zNY@jwSIGB zMDou)PWnNjW*YH8Pps~`vzXAP?l@G@vwM1wG!~8%)Xlm0ZUh+nphxQcFdi|AX_j$> z+rdf98_tS3HqIFVkH;`ZipO_N=1z5xV6oahSy!J zuBoejtZKh100bBCjswypl_TWbx`Slhbs!tJ5E(n}f-db0_&MJAJ+Gt;Ww1o-E3 zPfuw}woPYrpq`i0Xm@n!p}8-wQ?Gn_Vkk)e>uea4F?T>>DeLO8& zbBd9Hbf!1xc{{YbiAgU2{re!|m11n7?qg{`tsL9S98RC_kr!&KkAT}jZVOLhe0y-! z%Ly}JZ30iLktmzgJUNEB7bP{(Ck)H{_Yx1Z2l*0>CTjMf2$$phE9b~;v8#Wz0X9*Q zhRBOXnKm_|SG419?y7@X2w|5$PfOc)Cz-}M@6=Wnz~4#kldMcD0psGsS)GwcIa6)> zY8){}!T@{&^~~2C`_FIgRU-^}dqUcuk%v}a2O}F;mEYQ>uZ%)7lYuMIn!M!&&D(x9>cE>ERH(9GE zGxU~>hdHzz!!q&bE5WIr$akxQlg`WphmnOu`Jh6D;4@6!R9f=`IV?J(;n| zRnn6>*#o@!OZ$;=sa#*lQW;eH@)qB#)bo7`)~c_{Na8~H!JA1^s$N{VZ(&cx+Cf9AMKun<%BJ0FLY^7YmBXqHGQo0B1;mFdQ8x_PW9;GGU~QBiE79p@wy=F&y}i7^0PR<&#E( zey+U*NkRI^SkSn>+L%Cz1u!iYj=z44QI5D8@tYr|AGW3nx}bbZA&_@$1*QJ1FPF56 z$82-E3LFCKR$1q~7%1VBrGz<2W!w{<$oadi@I* zF7(rQeBMk+c_5PrkkAuQTeHOUIg9MvFY3AWLu0c6NKlnKDDhgco0rxtm)d_VM?|J- zcEn>bZPc+{(Fs!!PsvH2}O9itQ$%n0S-u7{7Q!!Xc9Ov8wB&c__I)x3{~CBj&G1+F(X# z8j@1TW!5zY?^S(TRI4l-X34VUm!E;F&vj1$dzF$5*4732hxPn;gOwfA>B?ZCZD$FF zMvj~NjXi zJNV}9+qm~rBy}&CuCRLA49DcPIM*X<gL)%?DUvOi;%|()eK?Ou=E4W2CbLoBi=*Uca`AVs93g2l%X@Idlw&zqTNJz`Ncy+S`8xja{y;GTJ&CdzbZ@yr>{F;{| zAf`xDPnd&VwhigJPG{5e#U|G%l?8I^i?F$RwIjEE-ZI-|?$4F7jr|gt#TcJy@a4}x z80}~u--_`Dv*e*|2R>NjHmp2f`nICt)S({CuB>-)A-f2 zzPr#0kwD>a#y(tOvRubd+YOwYFrH%S^L^zSw|wVuFAzdRWN<&C-(h2dEPk#FBOSY9oo2d>w(7M@s9*}Z5=4rtd!_` za-Z+O2yRYkQ3nZNmF%Kxbod&IUsK*wOh+q5hNnLqiakLPMUJ#_WoXcTqvLUgY(jb* zty!-FB9{V$h!jYydYKf3 zP0v15QTVnX`_-#gbTJw1>MC(C73I2xNJy+f$UJQZwdIZ2`ap7GD6`#XdbB5|`SooU zU);>qN%2oNHtsJNByaVBl(E^#!tR_^KVUV=_L#cs=2~eG{+|5>)O1bFemuoMtfXEB>T9dXaRHkhJj1(e9HX|s1g`Xm#>$5qNM&e`e`FtVXO zr6TV8Ud>EQPeG)q#-^}!ak+YsjGg6h8>>!5LBqD4z(b4@b*xL$bqQ&R?*H-8QqdX^ z*CKk2;~-65+02gh-LGB%A<8$TL%CZQIPl&l_Og_rZ{kCM|DWJ3FI9#<93J?X7Snue z$>55Eh5dGN7hN}c*oY=_SuWi4@(LEW?%15^5AO8rAu#xsdw)n-@IM zz>uUzO5;CYTU~cl-~CMB=d#Nf+Cv9>vA<#IwXn-BrcHsR57cYzOSp`t@jM3btPEsq zPiIL~cEosP1Gtr=FE&#|t4J_m#uG4=!RkNyP2Rm7tE@4-5JFINqa>3ib+yG z!luxe;lhNX=)jKHC?78nfHAX5l6_C6z_xWgP@&`oA;gj=CUq}ZC9_EcXNI1+kt4_LJC0h=NoQsALlMyI@g%J@@G@ zGT%80X{Gf$;nE`9X_>B>alc(8JqO2EjyVdNm;lwvy z9%VD3-kZwU6yhtiZ{NGATR%_+PvWfrpGP5W>YD(R<#Y2D@)a=itqHwawzrQ(o2Nx- z^?8Tqm@`6{e32Q!?9YCL5{v6}P)KZY^~FAwx3@2D-`pJTEDm#^YahP@G7jwe1pw2= zA5KQdNME(Fsbvx`DBf|{H(s!=JtbBQNW;V3(P-#<=h*$-#i*;5>6{Q){%AKlg3}yK z{|*F}KrKj>GhI%B9oMZVuAaP2R6R@+I_+%*_JREd$hXicjCAq8^y$@2YmDEzH0U8^mz44gGEk&n` zHY#U@ZF*_hG26cAPNRqcGlyR25-Pb}KzWCs`+ZKe4f(UoISZXg1udbyi&>1c`%!wY7DQ8!WWg z%6&9M&P6qU75tS7(o+SgzM;JZhZf2QP>E7`D4O`p85XamQK19i>Iwv0Ib{z7_;j09 zo-nB-*kX@0FCv4k7l z&)ndffGF+AI&*Uql2ms3tumjHE_w70>@FXqA*B(pXK_&9QEkm<3n zAH3rlY+&g++MF0D+16Qm7865T6zd|Dy(RYFRBSezyGktV63;= z`olS6c@RHx{&islYWm17Ozk(jtM%jIQ(wUL7H?npa@&@K)1wW!L#X|oR{e=n@&GKq zoU;Ki$dOUZ*2%9weLTkswFW}Xr`Iv0ayDzIS3U*Cvqm~}aZLBhA3rbx3+HxKFR1C0 zCnYo+-M+?3!`wZ=yXM0(9F*8^rgd)I+-0IMJlF}DXz5z9mb=vsDDD`yM6S7}Tk0)T zu!Tp_=iOA~AK(DCfmOSdZ9}$do`-S4t-Rr!fey?of73gQqUN?j#F&j?OOU1GODofo z6AYp)K#R06n6yUxx3$k5ITs;*tmjdnADxU=iJNMhEMOyEJ5uDdG%?qjz%?J? zcQ#b$0~yTpw6y4G9{%A`nYr>Q0306>>jBvsMuVwN6)jC%M%hyx-koH6yh1y`J10TG zk#>~WHj>5ghC3RRWOjovsaf6bWldc6ESTPq-tzU$+ALNm29wVYtad17)ih^xi%@bO zAPp%YUInzu@yS+{d{Z$7RGPSf zOMSOTFs9qs;EJBXfbpp2wKJ@(QTwv9vl(~^rKsN3x`D|rm>;q#{f$N1zn4QzmMm-? zwmy9Vq!9lhF{BrB)1&1&Ybf?lhvF_rxJ{hfw0ZN>W{?pHW75dG0;>hhd=saU$!*ot zb23tB2Xf?M)3rU*kxq$qC&YFsht0{7Z4cU~D;@S*mY)qLJGh+`+pz=Ej<*|{5fVcj ze-@c>$77Wc=jbRvG4%naDJ7htCEKP=MW9YHw!~ne!&vcuLg1uqXNiIXx1ib2g`k}q z-_p`T$f=qk0Nk@F=l|H;X|Et)4nHx|11QTov%h5G!z%>YV${<&D{|VY7?jv>z$`}Hc=i46i`W=aos)zTN#QpGPE>Gt z>$gjr{_$h_75Ju9lj=zFMok-%I5BWuj-(>3r0`D|CIs!pV|iTl*MXvW(l6KUMx^q$ zR1QCs{mO+II%))J9aZtbB8nNxoK;eY4 zyxC)!gyJ9rX<;$c0pY{V*z%#M-;%Hdz^x{LxLP7PnDV+t79ZTh0B}|_X+t=^_hNQ!4VsNty)6KQ;Y zq(33qryUZ6=5#@dR0DKG26zj?E)#>c?O=X8i24I2woA_nUe6cd5>OjSsv$DpNRP#Z zXrJb}wG}&EHo=CW^r;!*l6JlrDjJ8vkK>m=R(2xDpCpcmz?n`!*7k0@C9wraH3O;1H&$&d3ll@liY}&!G>#1; zBZce+m#pMikDT>XW`uKp+-kSJPNe=0=1nj5w#pz$Eelf($3>=K?(E23l_yDLAdE19 ztxOIeP)OhN5;)Qp17?P*W(>FjrA7TerNi4Y;*1iIE)#6&`zcRsql?)YS){x)9Q8@5MT%1e5FY{^gUgb(}Y{+*LZd` zQyDO8^#~IEO+=`X?b6~tJESe$GeK}@=U`e@WpD0S&52HBj~UhIp(TgA_yIvt1{Z9@ z&)Moj5Z8%9k|(gORYOy=3d;zxaeA|)Wuk@={1c-7xUEaE7b^pGk$_yxiEX)8EtuS` zp+@NY)KaeUDV2-R0)?VJ1+b*7Mkum;P>;lLMP_`&J7^@OBD9kdAyw?<3Gza9z;^Zx zk1rjS9+lGJ-l|ckyw($*s&)tz;}vhAJFXwG%Lo`#BGr9Pe6UV-Pdx3`+HEBW@@yb* z@Fw93m~637(CLb3ifPJr5J+*fqq6Sc*8(Ln9zFUw9S-toDLid7((V`FabG6ILqpLw zQv*Y_&?3bC+t|x+Li!I0mlA3U@9AtFCTXVvexk8|20Ed)LH8pfB7#AzKI{Y!S+9n) z#o2YFEnl^`xLCW@@Xwe3VXwLXJ%cu9Ye@RFt zL>4YN3^j?4BYbF<&7eRUhZPd)6m15}UB)4cO1&Ctkj}0krBn%b*3SiY=2U%>u9S3) zy!Ym~T_+y+ODuq5btxH@CMNC16|(5@t9+X?mMAcIrJLtyU9z}LPA(oXE6#Hx z?*pm8_@33k5EBpx9wLH1OMOI2l2aME)(UeQfUCDg?ox{J<~43Hp;wPT%;Z~fEo?}v zaQalq&@YwXGF1xQWU&~8+TW-3Vo&R2)b{z%2Z3p)u8GT;d z$foDGLkaX0-O3Py(+dSgn}eG9q@<)4z<^sZPQAl>oVCVwSL@fKm`5N3HV(7XAnLT% zOsMgSPYC62f^#;h<65tISOKkV~f^`kJc4%x+NWe#Y%`U=3;7ZZ%er2R%#zRcK* zO-jmwysa-8A5F?5Ol=#H`bEn)#0NgB>t1ACT6Y*f?S)db43BLS zc*8A{wk8Kq>P8KpO8hyZ+!K#N?8}lVMrRaEMBVx##q<$M4{{*bj?e4DRu!Z$5Vg*l zR1$yU5Knn0jgp(s6jm;xx&g&=ad2r-p#K;sxfh}KE~58--PQwp$m5bU?Pho>ET_T| zR$My-7egW#Q~+E=97iM} zB~48hE`|-41GhjdDoe=guMwLlQZ1npHW^bP;i&FUm9IkOjD|34ZubsOw)R^_;eg_5 z@vkd6H-VsMAiFL?mJ`G%AqoSo$#xHqo0TcZMoFnKZ=%VpuYlMF-03zTUS)vy-6cMp z+whJoOY%sa7#Ylnf37>^Kn9S?_1Vj}I}=utym|J_2SjQ>EO!M9m(F8RN~5*Lv$NDw z!3U0I!i{Vr^FIK2O;uFkWeflHW*)h9UuBP^u&xvAbmEi&P}K#Vsj+qDjrt((>kqZQut@aRZ*=iXb5R5nYqs z&p?J2w?-&0rA6D$^#bwjqxb8Ykvo}NYxe+<)d3v^_f9WKF;r9Fq_1wXjEjxkWB11o zY-)}bL`R~2`t;r2X~Ld~BGFu(7!eW@l7keMFVDV4Tfov1p)?N3nur&BvG5;4^nn2)#sC3Vbw0WhND*teppO4S0R!K?#v#sB9 z7RXZSOW87cfp%?4(Za~58PazGaD7Rc4~6HALyTg9Fs8^E6wbpQvR#O(${i|tUM&x8 zQPVo+rYEAt94lm>OpY`YL+zU`SL4;LuINJ^&; zo4G^AN@AZPLCEDog@*u!G#@7fp*AP0Bs)LmIFOjx0jk9Yem>@v3n@@Y+7E8QyP}cO zcQsViDy5%MrSaqq--5EZmzCRqw&#a-4Wf90(d{bu8j$>Cmed`IGt72qh%~(zt-_ku zCUtl%Q4wqrv(zDaT|jzP;3`5YD=aR|m*?%;Pf=4O8n9IxcNZbKiUOn|1xaj42M)xQ zfQ(Z7k&RGMNM;owD`ZNz#`-pbvrB@6>%0rf!aTq}o5)rEO9Q6u)+y?VsQ0%7EcN^! zzZ2Q;UlK6wPuHlF+z7Jm5EcwT0HwN4ewnJYMz_~){6||@hAP4L|NHl};{PvCE0S|I%vb-&70b&YOd|G$Ayhc2u#GoW zRlY>IIy*5$m{TGte^XW#4=Eq{o!lkgWS`ee2jMP!wJo!EaY6Nc7I9aj^dNTgxGyE+4_p@|YD{im&F@mTv+^5U+OK!61Z9#!|k6k5F z{Z7DrU&8Hjp=1|k#}ve#7yHY$Pgm%mLc%^o{G9@QOKQCY+@&yT62v(XMGymu?vXb< z-`+v?Ahe~-uSHZDM@TWHW(dc6p0|AI3mgJ^V4x(E@Z6Vqgy!z_a?0 z;*nlT%nO~)AR^Uo7fJ&;M7IaupU}JWEn&ooUKDyTA1J2eh~OI85)q2YBV6$r`GQ1q z#91B#q&tLGB5H1^ z$LDVN`xQQc>ti3nX`&}8AE3%`A&F~Sqz7I(MDFVSp%X*RrHFCmI%)8Lp!8*n!6@o= zXCcp?YHBFhOlaXKB#K?(Hh&$?qS3a9Upb#Q)1Pn#E{Vxl5w?8ul2#fG5U3bszYU~J za`Squ@1d_pQ3Ua5?C?s{*FZ6U%K?+qIuI(7b5W}knFlU%umAJ%YvHKq6ENWIsG2>C zto0uvg>fR@RmMq}#9L}&IGIH*3KE`&pc39TV=z?07vJ24dY=FI+6V7^&(-jS%rV(N;hU#m&phtC6h7Nia6lI_9FFkBJSWJ*#6IG5BVf z0?eZcfD2aD?F8o|YmNdzX8mTzZ8=y~#^w1MOB?+&B$kov2t*ssu8^8%t6f7eXpD2F zy`zLX!KUXY>#!+SlDCn~cLuBzospdP^*G@$NEozj zCnQu>e|$p5*<6?#Zo=30N(Pc~vKj>Xmbn&e_n5N=3R#4{hCeAQ@|`t#mXzGTH14sN zi12$Qr|oH?cr)@Lm0gtA?H-GX zm58BA9JcR&O@*2a=8Qv#Ef7d{g%7PE=fwy>+iNpVWQl|-!|Q_15&IzszeNgFhAn_H z90=2=GKs#37+Qw2J-N+CwjXi+jrp~oDELsc-3VYLWOEV*7UM@`0Fe|{d@mK@YD!|~%Lcu+EJPon* z5~&oAs;NT7Gia=e9KykiUR`AYH%L<3)^Lb%Zgztlvu54e0LGU%keqouxB?Q3k{|;3 zIe))Hmhkhse1m=Dg9pq0Y8GR94ZBvHDW}Cvvz0#(@FqH9;$DbHz}XXX;f3vsQ;Kt- z`4j3yP6i*?J3mT(6g3-9E~s`-RkhFcJO@tO9gHKdpGKi0=AXBLgwGW_p1sftYY_@k zsBZ#ERwwcUI$0(mkLhwp@Ze-4v$iqavAywMOCoxjKz*DL@+C|IF=gT@OX{V0xRbKG zgF(tC`Ogz!xwU*XFeIofPp*)Jqwmv@2YOacbumafWAKg?eemAPdqY5%=s;RZqzUpQ z4~5p+`t7d3B0%+rCLZKE!!kP)?R(uQ3By4osPV3lx$+wg!Q)7U9c`ge&X^pyxuzTTt&!bJL1Lzm$N5!f;>hap zbGgX=QOMEm8?dZE8FIrNBnMR@O~~5!Iw(Q(*+C8-%z-bXL!nuWBXJyJZH|V6$kY3T z;hGuwBW^+jJbydbb4~Z*-=Fij{Bb#q^SE{{^7Fn3UW9X;5v=(jSb_Xu;}5#+P_K?v z!E}gTy2!;I(OVFuCr2Fk>sVYES*)2SLPp|_JR8XQv1_^ke_#3{6esLM$R#a>pCVrO zMR+cJq@A%3oJHH-C|1427qv>|Ci=+y zaS%H6i~rje=Yd4loSJ@JhgPDw@0-9`v<#OKa$JkcW$>P*66AAI}xrieonybDE5F=Ze zxM69eKDf1$V=aV8&-!$0TZZH%sEVy2d5vx+PQ?nkU2Xa-Q7XYqS=(k2a_I=QG0h>1 zF3eZk`j>Sa79AxDB}8$iPzH8Sl06*F1H58JZIWiv7ux5C9in|?$KK<8TLJZAwHAa( zTEAMiVvOXc$ei{uB};}=Up^6JZeiC(OoyN!a-`fE63z#?%5rirYbaTW1lBAmdpA_l znOj8c2OA0Uex{Yq*E&gxn4qN6iyMKNB2uC7ERko^-Gp;=?WR{I$k|XTkHSTI96ceP z?An(}-24Hb6Z*)4QU*^GeQRf^P36Z(4=@~F#Qou7tbo>ch&}=8v`&JU@TM(=7sy?p zR5KYpOimd#LNwL^F++JTA%nm#*yh6BGLQ*a2@^O-z_CEm?i?Ipl5a0?ivlr@ZL|%Q z5E^*W10>TWthq1rY{n-a?vJiZBS@S?I4YG0eNl$ufo-HB)&i1|B_+%N(JRrn%gVID zpy<=Yzawu^(9>(%Y(~tDUGq2ygUe%mwY(E}t0YPjkw8bWw;}#Wln3-LeXDpw*Asd5 z3271LTkyI1_u08QW%xWEF);`41h-1KGr&*v;W;Ergbn5-W$}lGU^_ZV9EEyL81&t} zVIfc@2+q(IFXm+v6(P=dbdq!YATgt7!IWy5ZkZ@A$a;QRo)Gm6)Ut}9^57!Hc%p|e z@{xvRbmFfCL|wIl2}4JO7D5G69U7#JW6gl>KI)Y*3C7o2f3;WNPVSm=ZIKU(@}UGtieQtJN2>3eKVo6HP9lGggE?hB_pJLZa2~Wl7r0TeA-kf;*B?IQM}A6 zAma1z^P44fh8(!O0g?*Z%I&Vd5V6|oky=Rdh-|L|0urMG(6iSGlT&4+kjlV*Pz1G5 zt}_5T@+6|4TkANtBC?^EDcnaj+yR>gE zjrl(m&;M_~byNfLaP{#W-+qn!J67Va%!2t>*W~`Iiyh^t+&^74DgS=!{~d2*VY|rE WP?OU;vqy%QXss`nx_)s;wz?-FBSVUa#pmeTcI zIWj}c`jB(7QQYI4lknWNwiQXYPI}#UfAHq&SMx*9Cv5L+b~75+vbIvKDcJvez*Z~G zBO@>mWvd+d{~pW1K~$8OM*_($*#DjqxC8m`8OlbB|5snMuOhsDCE>8}EzDccJyXnG zyKZBD%;09f4NU) zJhS;so8X({m4e~qtSP^LYo4v=#}(*Tnw^`<%P%|lvZKkKQ7np%J9Y$OZ(h!L&Xt?( zlobUWe|QEa!e*pcE5E?3)%zTREXwvbO5rnPkERz)(Jj?O$)H|O)(HEaI*Yol#`x1y zQl8z-&dje3xbaVu@@qO;ACiOWmJ}F+32s>ff1CKG?M%_k|IB7)aeEl4BJ426*SPcL zZ-L?Gmrc6X>%Vhd_UE6S?KbbZk^J{oq=-V*v^=w!5BCk@--oNmL_|RH1xy<54N6^< zNuWn`P=#*!*^Tf1ePQcM{gCG6{)2cnngzRoSeSSIqD>rR(|)cVcMmizp0Bs z?D1=bXu9OB%67ArSZYd2ugzZWY{ZMMD5TgYJf&DBt;!E~A8S{pXd_{$Bk?T>a1AWk zkkxtn-RZ{RRv%9WQI~?2o9m0F(a<8FZ2xmqt<#JVa{21`@0$bgrX|lCdRX^<=Z5$f z*)9F}(5r!ypPxU;DO88fz3>!bk1JGZqpY0W~ly+`RMz(IEM$})mGglTHG;G zVWFYxdeqRla`Sex#ib?p=#AC%Hz@(vXG%NSPv6SoTb<6%o}OgIBko1&3zS@a|ISIY zPm(C#Zn1O`jYnX8xtDs-{`2}TcVASQ@9sdRcu_^Za`b?c6a!?&bx`u4i{88kJgdu< zcVv*fKUM#x^8WM`v$Vg-UStfbwAq)}lk9sfhp|r2H-6{JQPIJpm&^%;!gPn&`_#ELvf$Yw<64v&^PeKfNDki~kl+QZ3sy93 zLK#GyGMwC5A%6Q`2@ZNV;)(+a@|8VE8K1!0$90O-H|UaD=j^5%s)_rs$dB$mdX;nh z(4p^`=U{ZK@+hGvg!^*&*XP0Kf$iCREHhg5RB1wzmMeY9!_^$pjEK);Wrq1F+C4vo ziSO*Z87)bcIG(U-;<`9B$q5xMr-i~bMdC{JE62(=>!lgMvSQzu-BAA?)A_pA~SAV&aXdOTGJ}N$M zvE-D>*ECrB8 zbxYt0YUxilQV>wRvSKIr5G_Zb{ZL+*RFY`>_O(Km@j-?ZmDlBnm)E~#0{ehJB$AxM zkCb<)v2l=IhpWAH<7TDC5iURS6ru~BC*8M?QdOJ*hxTxx8slT<#2H_KA_CW>foy3( za>=83r+>F`N?U|#nM_axvkz&f4w`2<*z;O&cwEO!^c`?{<*+j)5Ij1HtxdP&nmu;3 zc_P7;`c~<>3S|2p1c!PHu?s8amob2&h_DF!M5VnM!ix!H5T9fXSAGZ znPR%_9BVW@aCzA)=EHb({8JSMW=Lj)m4AR-!m`-c8ZBDa3pU02tu+I*~JQ|mgB zx{FDZ=g#=7uXpqFv{2|Dh=)!?)gb~gKR-|7Sm9FhQLc>?AsiA!wZe59(Sj?21F>Yj zbEjCK12LS;tIzqhA=iU6;&&|8=Xd`Wd0^(ocOW)u9(XX%8(~eo?8(C8kPmx4gS+n! zR#(KAr{3GxeA_X+xkdz7+vbE09tGhNj7U|qnv_OvR~<8*GmY0R?Wf7lg#?iu{oNJP z>;oSO5wy%%W(GA#kA5kUD|5{$9yjYa&(-CGh8%ljccxhvmewqj6r??7t(0?gbeSN0 zkz-X})P0hgKtCuA{l!b4#K2K8TrNM!2(fg0Fuk9`tFK;9Av$j+#>b~>Bhc`zmHa$w zL`s~GJS$GOcv3}us*s2FLo+AuamJXE{IcUa`{XqIot{u`Z^9e_v*k-Vb1i61)a*li zJcZKnrE0!a8BuO2DKh{}jwln>_8XtgWzvHyw_NG$XADZTIu#XukLxWzNu=>ehp~-& za6XdNKW>@PWLz6Y9HU#=ozDQ#8d#`H6(-3}Zy8<_0?UoPPss!q$s4pb>LB|fQNGJm z`$AqGA$1Gp)se#Sz&j39GzakaQN;U6FZ|a=u8As;pdvrG1#6s#6&!xA>klX2l4NH; zIo_V+a9tZn_MG$6=YJiAUuVizbkTY;o0KaT4r}+nTHYwiPTBcezxA2y)cZ9;I+@tD zLNh|awm-tm?MaI3uL$AAo&=5>>t3oe03ftmkBy$ZndN)a^6$M#z!mxcz=6KC?0|~F z_{tYK2$DQUSH!(Xo1x%5aH&-Ma2Hejytb>mM-~frk9KE_+s;-QljlqeRT;bve~BRF z!pV5Vjz`+E4#Kr_d2o?6-b0g1Oj_)H@n1s-RJvoCb;IgvD>4Zaof=o@+*`|aiegJ% zq-Eo9U3%tz_c023;s0HRN90uWW-2c05&bPrb{MJHG}xIMbbQVTKaa<+e%~`}_iyz* zp0u^^Tr4V5&rGz<_S0(*ArAiCp~-XiPGMY`%tp+VG)pHXS6)oV`eltx|FEgh$-i#a zRIn`jSOYHE-n61qRI?-u^~wx#2NVArrfje*I*aGt%OcD;-%M0l4(B|>^;>2cOQ~`g zD@*gnY4SaDnUTd;*pTfyVo^dw)Zp;A0etpHX_t+EZ&3OMk?z#=y#C$pM75R6(g%@} zy3HyCzGCHlwNC|uQ4;^YQx7+~+R~O8E-o&@{{j@{7F}!=b1ZQ>AEMplW`_Z4e`y?A z2m)?h^TqfA*yTq>Yh%WWWgfCuXVXvD2!I7rK9UNW`S;vx%H)<@_WgAg=FC zdThzn^hmjxU1PPEg5R>Jo6CBRP|XNRhkNEzQ~H}WQPJ+%;3FbDO~ID^U@>)vuZEa9 z7W)1@Y$#8`JsOuj0rM>o6!x$PtGn4lI))ao@l8!>9I2P?zZ+F8cH&her6#!Wyc_qI~^#$t!1#9rI z#?j@*^?mI(*QK%8(u|6=RsOWvwGXnT3i?Qdp=iE~O&qMABBENO0o`3V=D0K*>MRF-KdgB%Xy*rx+{el@9eWOz%m zFbe+UU^}QFriB*k$5a>_)!B0akmKGgAA;X6WnoBwcc+3;7`9E(V8AhON7SYJT$E3F zC1ibleOW8OGeED^8=bMr^Q`kdDcWo*{zteh-Qn7xe!j{_2OHMW19#Qd&%LOT=26Br z#J(}Jh#)~9ZDPBkFUQiYCqw-5s;!4~<0H+*4Jw30+mB`K zQJ#0wzCNZ+e!ipNiJ@LmOgT7N;v9-Gnyw*a%wh$`+TmW$3Co-_rToro&p&VPe{MbK z;}w4S^Ue(W!L;jOFW6!g#EKRpgs!>HmC=iosOoEDlg^N%bC!;}+lNU%J}?%o!VLpX zc7@Qf+WONm`}~wY)cSHC(jNh)$n6&IWpnt%t${Fc-%T+8Y!T7kVC?pViFZy*z{QjF zXZf$cPR%<{XlZh=LhwfwDY0^EOzbt)_VyP#os=vzX}$#q+D_Hl9RS+a1CZy)i9&km zXsP~|BsFy0)N{risvHciyZGJ@cLYfn69pj>-{CB1OYVVBeE2+mzA0g_kRVjr0u(SP zO(nlERGco8-vg?pCyb7b7!Kt2n%J);-?x{(Rdy?m+s|RVLe0OuuCXSI>_z8yO9?HH z6R86=9=c4@+wsCCDv^-RGKX?^KR3zuQ*{pgrIl?ZCBJf+!|2OIrR*eYo0ek5N7hMU z-d0C=bBr8H%DJ8HOfP?ZLMnzyozCOx?&tcR9UNfL>XVGmorx5L@Av(p0KUJuJdD$M zcI#()lgmm}iMl6xRKN60{hhSpr{R#H2Y9mYC~R-`l*Et!%>`}-K-7a0{*_sqVbGe( zv$OpJs(SFO#u=gpC#NKt| zHtJws$JQZUh~~*7D97rC(NlKeSU6o&6hH4tlI@|bJxFkP;6FM)DZC2=#DnMTbSX|( zTrpjU3vX^~A)Iy0oUO#0dCHj#oQk%xqI-atpc>_rlRABNG(V6B(J4lF?9G<1vRiWg zyq*i$+aYVCL-`!v_~V%yh!bu2P3(87eURm3qZZ)*xE2jC_PojYBMzTlVm?SkoxI3tB3wf~#490|nVP45Q^K z)uVbl@T!$%Cayj+)UWKm(x*z1!w8yH-vx;0Lh)_x&sd&EaM0P6ZO4FlY_&gs(YWOg`!0%R-{qLeyuSKSPe=p z-IUbnU4LcR*4_AC5*%$WlVi&YPE>=Q*^ct!a^+nTxPJ2G+8Id2yn58gr07KTS*l?F z!oxLi4uXTQL9Z>T!vOtlM4X*{5c3+^Z@fLd?B^YtM{exn+5j3>@-VD^%*P`Ndb^2s zn^v%7;AYP|dH`Mtq4ckb$aizdjb=}9G1GIF1VcK&8K*@4!M@i1xtBqlXuzHk0r3N? zLd#8bt9NB#*D1q8ue-i&!Y*<8I{5WM1QlhYjP2hFQ}M|f3wRR3< zobEc1Lzg3nY7?^psD;DkFGdNh;fgb?wG9@hZFTR;NypoSvUB2S?*}%sK zaV9WDqXU|PItABA^&p22C3J}URWKF*OyXqu=?YV$< zO2|JRR!UKeIy8Wm>m1a2mf`5F@lyTfZWbR^pn7q4MTrhq6k9-b8_FIbK1Lz0w#Bpk!PUqiZ7dopG44gNNgnr<6-O2xft*|=#QBGlDZ848+NYZ_^VOxc) zcZ)`_9fYZ;Op#Bodr;PC8{GfJ6Y+ks1*}h-Q(1 zf9@Qg_i>!8QE+O%^5D_R3%$)O_UO#VaIM;K)@zL!?HV2)uCg^&K6-IT(#{z&E4VLp zwoLsPA!hjvZzgegw$+F0$rHW1B2Hz-H7|dl)sV9u-O!D_lP1H>V`jemrqMBexF)m2 zJ5BX037%i+BIEoBhQIZ1QbE~{Y;X4$sP#(eY5(){Y8t*LmT zG=^=bizHsFX|^L7_ha~SA9nuHz%!;R(6Es2BmN>_-o}?IV5-gBA`T#DGLT4ljB1OW z-cGa!w41p8dR+5nPHIM$L96)sSrVzt~EV0RE-1m_JJFR_a-) zMe2Sz#~gAY)=M{NAu00Bc;b7P&DkPvvP^t->YBQIoZI0QCZa?{Ln^sU7aG}+xl~|-S&}yU^Iy+R3jx$EAuxKUaI>>JwMC5b zWfpUb2RsKB$0Awajoz$r+t7BGj&f*5*8xp&ybrGBgw~Ik(CiZI3>|Ck*{9{pi(i8PmW2 zB-;VuYdjnfPr6CxJ3Wiv5VGjNa^EwlcQPHl?q&_>VSLk)d#n@Uvr$zgL_|pVo1g5o zasM!9&tq-YXHPe1OY&qo-?)Zc3E^T0S6E|ADiwI0@{xq-JR+i|>&u)=K!5OO+rg57 z+ajaK_lQKMld#X5WP|J>lm-H#_+9mwPb zL`_R%jnF_b<_;UH-%)~??Xx~+7Hvziq@rs8Em38GG_TBz1D(Dgt2HXn18$&naK5h!_ z`5IC&`Pp`B?*&G58a2`P6)a$y%Kb&WND;wSCA)7^S47WwMg_2End)kvAQVyOwVcoBYv{xts zO&Tl)c$9vJQu^{r{?hR|-qyK`o z)Ryz8uKtE3wXn8OQ&{#6z*P3Ej>pmtf4?|t1E!wLY%vZThSt~d9O9~a=iUe04+_Hjvq;k)Qeg?0vze3vvHA|NQdjUE+N9Ol|TPQJfh&#pt4P*u+ zwN)0}f5Fuip}ArQPA~h96lY0w4&xquTHno|I#EqOt3W=6ud6%#)G}&b@FZ$RKk&15 zdofD2GH`#dr;y&e`0_2Z!I^TE30bD{K#50+Trz?%azs4qYk|>n{|&$F6mEMuTZnA5*q4@}N;5(zvN?3JDSg zF;x61(#TdHBA;)%?&Ga24*d21qwZBB%MrpL*5Q+Fg?lEJ2%@VHhj)QDSx2j;mIJif zxZMDGpTDn-GIEa9ej>x8SjHG!OdG)P(f7T13hyO;^WTHKZ^@?Y$0``i@WmM(3GvAZ z`N?a1tJd+a-i+HNz;;btX`?}tBD@secwOOiX+sw@4FL`ct>)sDs~^=$77(o3K8fibns7r#eB8n{UyD&-uIl zERyXA8G_!XQ%w_Az}I`_3MvXJ#ld+1GJt$}N$32W43 zhy?x0MJ|yBtUrn&C-m zGF&EbB<^WZoT_R&2aH*}UsI8h<4MCT9gns=sEnL@?Kf?A?FNZyj5|VnW7I7HqG!b* zdgEq~a&K~I#tS&%N&Bg2U#g%WKnkt)oucCK8M$+y0QwJL%$8qvbvY1R?JJ6-92eGA zakh7|qpe5q68Q5Eixy;Mqn-vk6Wsy0+7#7K1r>j^CVM6S^<~%mRweZQ-+t;&Q-{94 z0XJvUtDISC9yhImyg44sp`ossM@#NwQh(IIsHT*D<#Vqb&%xr|!Bx)*g7{(f{Z56Y zmxAmaqCzj85#)_nMiD@NvpvNwnBh09`HosF7SDPqY&k+SH+Yv#7Rrj@E%|C6se<=x z+HXL$t^^}!=?=yr@2UOPL$;g#;8ozsl;s9t40K`P&hTj&vCr{VfW5SM8{HpxSsN~e zuO+LUpE5ZR47M_%$7f>MAM-;a=tbjBNt8mPjE*A^54O`XE=nd{A=r(`h@T|1(1O@M ziHxp@`3s*Zm1wTmgzC}pNAZ*Viv4k&e*rLxz`%X&pIUh|PnCF1tWTJ@?tHe6xoo+x z_2U%gua1d{@_P1VgXRr~hxQiFTeH_+lk!~=5M#^lBqwcc2OZI_h?bs`??L+Vr9Goc zVPvm&55&ISrTf0m{vN-2KSliG{7XmRpKS_~HGV=lp7u8bs`b}3ZHAa$uqfcJj(iV` zD2@B~$IK_g}Eqm6YRKyHn+)*-D=6=F_hhBvwo-lK0fjp0Y99i}k%< zs(kZ6DLEoSdq0wrYgvn3pZXaw-QhR+2k`0Sbol!v+6A&mN$eK@$x8!m=*R%p**zr@BVVJ4?d!4tTjiqr-| zxUVpnNz9l)^J zWbx2Tg1t_{nxPc zw$*GRwX{vFcc2-ny-1x1H6aSl(cTZRJV*E zbzizU-x8d8@y&#@BO;*-dT9|}dy8*4;;6aBUSC(KE_ayws-XNvZ_+mulT5H7RYY8N zJ-E5e0jmkHi;}S;8rNZGcUc4%63kha3ru0Lnbn_HSPeb3QqDGBfOlOdTD4|t||1FWJ|Lf1DY-`Z%9LR=LMchY>x5=$jÐD21} zDvRz*EO=Z0#pjgB2$$HwpH7NGkL-py>jbFiX3l}9h_UEhR(~>Fk^Pzf8nOb2geEU; z{_oCOyFVi@jEiGZQ+b0p$0{mLRP`$|;^k}wlz)cxH-$w+xZJIw(Nr59kJd#%w+-B$ z^bsPc)L+xJA95q*_`5H;jzrSq;bFn9#_Vn$GFL0`Z!oUc^R$Hk5>~@*nwrDx<;vs7m%u?16i%u!+dd4TOi)(01 z79?P$%}%cdc}>=MGH(;oOrCl69f?;I0j2I|5f1XQOxAS`ojG7G^DO*OF^2|e91)>< zN2?ps+??)FCVKLnT0?nnw$-L5o()PmN!vCsGoC0HMoc@>`LxU}ZU;EmaGvRRS0G7n z)XWrL9q4+@FKF|ipx8J3sCnk_sYWD(6mvp&M{jXK1117%=ED@F@ObK8Wwa9D$ign*~ zqbm}yQu|EevWFtCCxouZsGSyh9DCGdpd!J{uL)Jl(dVm!*_CFd3z8MaW(#6@wE_k4 zceP5)gjHdo1Y3Fux)*uN+Om04?9d4zkv5Rgz@$Oy;A+;^1-@74aVMqdx26gm)^pQe zL-_DaYzMQF?>@FK&LMJmw@QCHvy2=%Dgzdd(ukuo*hXBY2T4)>Mb`ZZcLsFW^Q?hq z(XweW^Y!v4a2b2)&3$tf<>E_X5G#W1m)iUms)knlx|Wr-o4mKi7$8B{LFdZWhOZ)& zeQSG$J`5u(tgfko3q}>IYgkmoJ;($4CEtPU*h2)ffoczf8yvCje7UCdvOr; ztnUqQr$&qEgm1kkV=fu|=zR4xzeL=0~o>}t1aKl@xnMKI}v>ku4NB1U2!pHRJ9{$v<)aJkXNjJ?EZBEEw z91g7F2AlQ`WK5^*C;(Md0Re}Nzs)bM6U{w}e5pSE@z&en86Ry>XZ2bIqJy5UM7Hn2 z^HFR=9!t6y$c)Ed#{+PqN(b&_vMQ#iXFO%=PZb>B6Bf8s41fQKa?3&Rf3!F8TNp{H zmHz*j?{AteR|}xVxcquNe(I~+Ff#9H^L6ifRNp)qqdw44UkeB&hWlgtMbxdbX3=rH zatUj+N&#`0=P!SLWLsERuqQ(lkrdnhD5L&tz4|W=tf9+)NyB`#DP>M)6}PX>8k}fR zS`y0FS@06wPI2H8f=n4>#;rP8@ zoMuX{TdkycQmUVmRPcNp47GXd+}btAfRhS{3MYY(GWqswYcsKw^UPOCJRD{`?sbBB z0BrT`{sITG@)HOCsn`CA^qW};_e_FI(bI=wZtFS0VwWeD@;iW9BYjN2)}WSmJ)VS_vY%GVcS!_I!M4&-5D4@{{NgM$u`K! zF9&s>*3K;kVuW)@EG4N!!8g+!JVJwgJaVriWy;R8=G`Wh?8gr@4>Hh?XoBv4aj0z7 zn|VF|zB}0K_HjTFV3i3NGiZ*;$=Uhvg{|PNAOz!O<}Ct((#WOBw6XesKNY=RUOfcU z=d4;L;dft=Y^GZF{iU1caYn5E4k78<^dFgTB%(D;kbrL=*1_Rdvy!z6&cDsoNn(UY z#td8im4EuwpjzkM+ewrkl-BcOzP9ACNO~*xCUT7uM&f@Z*kuEASGi01R;}YW;N~O% z8i6%!4kpa#LqX0PrMr(6eKE#hoiuPK&{F#Jf@bh>dWoDg39lNb$E-CLEL>)l%>&yi z&Zey59%aN?`&av(RW2(qce3Ac_j)=8U)nIx78x0O zR@Q6@CF+1mLP8P8se)MkG4y~`N$ec?)W5kGUr)Zd4{oOK>^FYDdZIWu{k2op#0uDT zRnue$e^%Eb_?2C<6qTr7`A}q73p6|t)(e=Xvh}NYDLH&>Fg-owLBpG)rK8lM({5I9 zqG^0s14_xBCF>S@=3++>F7$?3V^_#>V$%(zE6AMg{hs_A;E8&c6E$+@i2GzN`}~i? zwDO|jEz<{CPi@z<>4+kUhF!fiMBiSAidDS4Y`t+=FYa;FSspbVT|Vscfs4nmPMdg( zK#Sra?cdscVmRlq1M3(3QDBi#QBmc^)@M%&h+MfOVP=AnJT*Ms#z~#xTFnX%e_Z#% z?;cC+1Zx%+OE88XZr+XD;tr<1ya4Os%VsLyX;y>TJwjgXf(R$~iB_NbkupOaQ%{qH z^Mj*58l~`8A}j1*ePYQ|eoZcaw}XoJu&j3LbB>sI+tfypmy}U)JMgL{NP??Od%#{V%Tsppcm8 zm&3qCecJUfEN9hU(dDqJqOOukkXlXny^h`jKyG3#weq8rF%axT4u*!}wSxaskQ>ZBMKYGIr-sow5Qss`+?zR1Sn z3h(pBS!w0xjIZ_EADai@uh@dksw(WHhfeTZ@ZSK{<-A9vvX#!n_TS!Q)0p?-xOsrC z`5`7`XLVWe_DTrnj{9qj`;#`d{CxZK;E~qxVuUt}c76I`yj#x(mYvlH$XnpCR1%Wj{`D}uNGNf+DQJiR<=2Aa zGLfL~WSd;2Foo+iAZ2fB?nbro_Fc{EuR3}XHeclKirnX4@{E-@Zz%{XKVU8vUU;Cy z`SfM`qe^Yn`lxMbKeO13)||MAnt_?eLiJx_WIoYfXXd{PaW8ids}&QE;Zy?*?8B@9 zVCQimC2ke@7pJoy)7PEg#8CZ;vQ>ozu@H6&k7I;wf!2!(swfeQM?{AooS9wJqJJDv z0p)W@X_b9NxqWvS87-!WP2l^XIjqj~MtsI@qAHh3%uSg!&H1WUj`$EH4&v8JLYjb~ z5*JDScG*5fj7ytyz7|S7fJQ_>BtJflnLhH^nL;Vp4LxUfB&YqPS^bMF^Px*`b?zV8 z*@k3e_0@t`%kx&9E!gl{Sw<&WjJCSrnI_cT;Vt;&C#4)Q=W29gx@&y8J#8tMdz5JK zDzd3BxmkE2+T)`NoPrkKp#Fl445t7HWZdQt%YOw!T_L`K4%n;rEwd21F8_pr5J>^7qlde4nW4u+~1B8!^Y@$sua;$Hhy z<#tNDZz7;gfoE71U38kmJ|Ld4={KRd$|&-vBFN4jWb#66w)?7uz0&i&$Lf{hR6avZ zZTv2Xh666OXQ0FNZ$-~uQB+Ew8LHE{?kf-?%FutNt1D6)Ru{d4N}P&sg;TQs?0l== zMKwArG!?pqA6%Fv=~Dvr&XReLwJKqFn5bF881UX*O)2o@P4oAVhYspo{(3FsH);@~ z*m*7x$wkL~6kI08W_l7sw$M(34Qwl;fNT`{Ss;>U-=|aZ_Ppfhm%p@6Cg@BwDB&yU z7i6~)uMcE%Y7bi1xDJxCLF~5TICa_jEal)}h0R0*6^M!Wojuk(xUbjRFWE>5xoX_g zU}$|+D@E0K^SV~zj;CQ_mKcQ8>=W;rpsChQacxvoDI(y3Ce)xzrWZChpw^K3a#Xyp z%E;8|w@=~e!)L=|aX=-H=n)sv;OUvGp8hUeaA_hY=pU{J^D#bW9&Qz3jT};vm&Ydu zEUoa}5*-!m;;93V>xB6xNc<>^V(NxG!iGyuMU2*c0uC`yUqT7=hf=TjplA#3od9R} z*#lkJx@3a((DRxnY^qbYUTbxmXaEnirmyaN=2N`v%V*(60zt#-bZ+&<#(UJ5T@*e1hd(9lpJw8OZ4O!aQ zU)>QcAzKh++!kadYuZ4mw`OZwVB_YqRc>#o!1w;|R^0L|2KOboK}NCLO1C*XyXVKv zR_>T?-X)j#xYmE>j|EXcEUT2V{9U@)uHuB&H$U7d9M>IS2xponu3h-HuFuVg5nq0t zD<)?7%dtkjw*Lgtb)Zi&7LXJ~_bc^fBru~H`YP{0ya;fS7!zHxkpc3x! zuxHnDRI_LWuN76#vlqQ)Gf?4$V>%bd&YJUxgXnEC#P+-9=N_1FMpLTCj=ls@F$l3e zFBcF28GzhB*TX;N_|^lxVBrHMniukLXA52QvyfZ8A>>jJ;354f=UE6MpZyE6e&|_@ z$D93Nf$0hRV1MYttJs=bA|2BWhQ{{UkuC4R%+pdZuodwtu5%=a5h%ps-MT-hbMA2X z02u4|omG~d%NXe*U%c6KF9ihh4F6-LViwjdAYutMum`a}CQ%o8AZtkZjfL*X;Z@jt z(+>}gU@~BHgoba?{rQOwKVqTO`uY_RZIxF>`g>_lvUy(P!B&fhQ&pIGDCH6DKtnJ; z08G6PFc%Jt6D@sxDhJT}ee-~;I!r*aUBwzSsT_3RlJ~3+<=gy-VK@M71${r$9;O}0 zqU^Pr!te;5mZ+f62r2>CL@si;$>{A2)a81ZHmzkdP>P)$dIxMNO9jR!qdtWX0d~t| z(URR{*4~eR+;mFtg7*yf4#O0iGyY?l4Ul?F23~S6wT48lam275du^!B`ONP8y-8}C zg5ka0mXZ>u#~;kARHzu-sI{=ay&!wyh5CqVBIRq~;l5J{R-!IdW8syd9n(#t7|ZXx zycos_`5m3>&i`{#uf|Dpd7o?8ON%HR@9-*Aw41RaNnK;&rRrVR)G-N6Kt}b-KZS;d z_KcOUD!YO>f$oDqfZzKI)zaaN6}p%#<3CB}W-_t6m z2O$nECrfeCDHv#aj|f|Qmo_84s zR-9*Bf>((3q@Mt1V~!lq{rEOz;8<>3V`}(PfC0mkx6i?i0f|E0d~qumc#7|2 zFEI$8GCz`kc>_+h zh9=WD!#;%_RuIZv115?M&pU=7O(O5M!|r}FgV-{*-kUw=K44OeDAdRn$o4xt{WqIon{T7j1C{8I$AghQ%;Pf8f9XVt1n0_?a&bar-n$nU4t|s}>S%R5 z(}i<8!nj-ovok|qqNWJ=_zo~$3MAVqXBpSNcC!RTr<8snm<#79X_*WJaj?n10P&(9 zWf4;9e|6pqg44?&jpo6qYB<+Dl@)O96;yq4E{b^_0>!gBl;H>12 zJF|lcB1%0GN&gMmd}D-Kx2>>!yZ#fu0tE_enw~^K131ZsI%EuORoX+s#l~p@%Bl<0 z%)Dgs484*g4tT(O3R5kgy2v0tB}!(h5V49emzeErcm_C$R-AyI7U2Q+tYITs$Q7u! zP#ThCsSo`d90{lW`pc!r3lO)|x?a>ld47D=V(}HHh`%nwySBM)Je3F|0L=8|ynnfquxV(yd^k|yY2W3xA}ZP6u&*{~M? z#B#0--<(<2CHU{s7F>gex6tNvq8G*C=(n|ryo1T$B3j0F0&d63B+mubgwb;nl{xoO?rCr^Q;4%WrQyH5ietH*N-1C zI2=L4tEXBoChoQ#f3!Dee!MqVX$;{24P6jk{R$eluJwT5l`CKD#0z`-`_0Fc*z3P% zBt01V(Vnj)5>4ZA#E*NQFRAWg5MYG2xamDu#<~)H7yA@=)G$o`vc*Bp`Cq7k1HJUv76i>O$UR@dfcrbk#68WZhzQNhUqCQO zoD;VlU`i8nFS*V1RI~k0sLTfFgM?qK?^9xdppNV21T)_A2Ct6n1V$VdA5HcFliy10 zTYF;MA&XW8C&=m=t|61#Rq&7PzP}}xSS9+5L8M5Rn~s42WeoX@gM(9`nJc^b5~2NV z4?n%V-8B(AxbORSdC;c$qNH71LgFsyqP*Kx6w@CaGz{V}E>~q+=mg~tDycSO3;FEK zEVLWPc>pn2U4`Dh>Z?{&s7nm9xn9bQ8T{asBM}FBm*{T6M6sUzYV*5T24F(_OIg0} z7v2_vJx9>9f$5m4FFXR{Dem~cZF?o$Wgi$t#`r}Ywr(WZgwY~?)_5OU6PG9Rlx)-& zu7wwCIS74yEU1gn0zD`5KdAFzztY78ah@+bHoRXv9BXoSm{}B!;WPz7uuU-)hE#f7 zu7;I*W9$A6{&VH!ZjSB(f9?1lD`N%OsCB)Bti5ZFzi}8vfAD=RVi(=N&e6AZ2cqTl zFIlFyC|cA2<+kDe8^p|Yc_-h@?G{fhm6pt`Dz}D2HD9eLTjU%yEN4g$vl($Is3=28)5Hto{6JbC=>P$H)M!7#5G$Q^6#41$BPAv61NEyn3*0pZ|TaA5^FM0j(c|M=PC}` zwDcsTi-*S>?}a7%>#r=!qI77wOw{gZEf!d)(Dfg zi0N+MiN&xG;F9Mtb7!vdgXUgnHX(xI4x93~?;h|v5)u-f{O3U9O95FVAZ=jsosauM zy;O|roq%n7Up=F^FQQ0y|7$9!kNo=Ih{2h0PLO16W>D-se4@Rf&@}qUafhuH6rJV9 z=DX9#6V-?8#45L;99h!*HLJWcP*9r$$=zJc^f&U)ngL8t*gJ@nLD~H&JOF^cFBqR> zq=+&Wg0%v#2MD9-Mb7jpyDXK}(C1jSlV`2?he(JJ)PUyGW6A)Kz zmG;i!VaCVTx)&*_0j_*Z%JOi{+`+FV`UfUApRUkZM6SsrFtjas)a6BzeF%H*vx7}i zQgp%wkj1TgD|A4*u%~|cJc3HR>?iN!Y@_PC1P*NI71{ZPD82hBAYFBol(g0mJb8vc zC~a;# z+G;>)`5_y&T<^@xXIYV`wjdjH@GoaI_!D``*PQZAtZ>+4V&@*9KhwHGWnO(a_ zZ%fh^VL%V)Qk1GtzPVnsl^lJDIZ>i3pT=%wpQ_!eEiR5z<=d8#muCn&YBuV!2YX?Z zjqN~xLFll<22RAXWoCRA5jUd5PESp>a=ka!x566>^OBeMN*l-Jg41k*Y_Sp}iRe3} z$mR@vjrNy~*_W^JH5l*rt1x$F(&d^~uhsfCT2}%xdEmHvo;vcRuLY|;kcM{urI7+N zSq0qiwnxyl|3U8#|HxexjHa+sF8u}M6!Kd5jTI3TSYAz9W3;R;MY9hr)sWe;Zs(b7 zzFmJ0zPXbgC2X{Mi5pG?MI7LJ`l;A6+K>+KK|}RHiBpl4vraJU9GofEMlM<&ypUeI zUOVLSu^yd;U^sxMm!zimLKr>+X9^h#G)OUI7+7BB> zI@$SI>rn?dABd&g=FXFdb446PE_#Z^LD6QJoYKbBj+P4s3?jd^stFsuP*EoMVAA%O z=FQwgGHaO!U(Q@E5=LHFC)LXXhExKe-X)d2m0z{f zs)U`O^BY=7=75Nd3H#l@G(@B>#=^b_CAeTSLiT!7WKbX?;Q|bqe$YP2jj@PZ&Q{a4 zQQBM>((F%Q6HbNe1c>5nsUz~+LR`@Y8Pt~%-nC-cP@0y7x$*^HF7^XMLKC6HL+~vQ z{eV&lV7}M^BQ=+>_VE>jX@E2~GRdm*K``s3A>sRNOn1-5c!-G_b)0da^qA5uWHi%yUhpQIgfy& zd)%^KN~KY{ zYiJOW?(SjeMi2q1p-Ti&y1R2g8tFz*6b9*%RHQ??^S5U2bI$wzsn-R3YpwOfeSe;( zd}82X3!hf^MI1mdhs`kYw;`XbpM&G8sQ+H=<9#Vq!)6S)B9do#y>?AJhA0Vm?@qJ(yNLn9#Gl9l2` z&w#JL?#1^9t}WWbCYl;B(c?c1N_6wJY&ZGmPXiPf!l~c5>g!iz%v2?aDNVxq%JD<`DhmDF zBl9=u3PUsV^px-5rANLhZVH}BmyIO>Nq)Q(ejvKHTb$=Ri((U{68D7#EE>&KQRm3e z1&<~MT%B&FWQAJcZJ1XO(yH43CAX~Xs0X&-EV9#V`VNod4t1KHn|KQcw$>^2no!4XY%dKFMvX3z#1JV=JDuB*EMa)lA_F@{e0E^(ONFuW0 z$CX2GOLBZwTHQApK-3=V)GvW5&CBLtDCWFTD6ky_&Yf|QbJft<62yRy@#u%Pmyxi) zbkm((28}e)qtOhck?+ea*a1GGNz2JHPfi#Vrnh#0hK(FUCGE~yx!n27wAEjhX}41; zhs)zldpvynRN&^`4I@Z8!$SU8PPeRiaTU_dxn#wzc!XMBte-w>TpQ z{`YOhc$s@|4xNUf&}Z|Xq_}LVK^kzf*Cm7bebzuucMc5+iyoKmkuR!u!SFSfWAQsWSF$Nvk%HBJb7;fJRP{jF3TeY@^}H?yu=#!`7ZqKeu3crG|j+g>zMo~?^h_p zAazha^_2G3#jYfr^BW;&G6xZZns5%f*RotXxL*N*Pb>7yxZ% zk}IbTcnlCg2+~Q-=7XE9Yi8*FtP4Y><}QXFzL-KO*%{k4QT~^eF=lq^pH0E3um}ZI z;@-9{d6uM>p!qQBA*xny_h}d-%b3GdU&+J0xJ)+{HP~Q6lL%l&K#*2sHo7^NuO@tM^*d3Un!_{c z5vd5sXP*ZKDq^+|JeDgaztIkUG0|6L8clRKr9OmD^QrNdV{VmJazZ~WJAR9ikfqM7U5xsVqP~Y zzQ(@1O?{8vlL2xN11O{GY4UTi{d`+;An@{sR^+0QRt6O+mNq+TOTrsb==&rkK8=kx za(htUviEI}i=)3>=;?%FaX`tk^>5YzX2@=W6Fmdc2Y9bJL6V&S^m8UatHugS8JSLq zEZEjk=y6WzS%%yU5$&D{w}0k=*So=enO~P0k)vo+`aL45Am*VS`@QJ9&2<3|m^zab z%J)wyD#3Dw7AX;;4rmH`3duL^Z&K~Ph7rQICfHFt7+phn(I0xP9<*jmOF0~r#%oYj zbSz}}{Hj07MZ65dp_6FlvRp!G^R!VHYW>Whdu-;&gdjT%uGk3HZb?YH>*#Y^N^Uwj575 z3AJ;8(!iKBFYonEw%20mnsGi4vI1pTix#7wtg`M;u(y7NqNQ|d&(zwSw5b^g>;vna z)~fv(pj4m;HsFiw1C{EkU)sF$S_3{XJDE=mwLfnyoThFRO5k#|MIk>vl=x0gDi$xW z4)<5)a(h9Zg9uBforZ0RohCvUWg_GfxdO*yx$gKu{X^;}s}=mi5`|}K)7P0yshYWo zdyt6{uKUKgG{Fc|k{w3y-Gl{0wJ>lN)N;cPGl1a&IaO(_D=z{ozCyWLM?yNWXC9`z zg4$)Na6L-Kj~h(Dgh*BhZBYR&oBFMz#elN~G-2V+z`@(GhT7#Ns&@0*`YfE0CW^*Fa!|2)eeSK98$MJS^pA2;O>350VF*6_y!RLW;Y?*x;!r00>+n&}$C6Bty zkEpR%EAntji<%t=A3ZrRE>Od6-!S=kZ_|t8hdU9B>eAI)w8R33QRRuD70^`rJfs-` zX}1Q#?-vm6K}dH$Q2rQ=_Sl{()kl)YJ`I#Zh9r+FGR&bTtOQ)mYK2Zm{0P*gZ9X$N zo3(fE-|hM`%70#$3|~4VU67GWx~b*Tf(K+w6)NJ-bOy9Z6lB2;^FzN%GRV+|KYboo z)X3M6u}St&cYlaRs}0pcYYtMM=zQM|mY+O|-V>CtKHUu1=na`m|Te`i``%x zKkmurUnmR4QvF3dvuwgVLb}FbxtOr<;hn8b-{bNXt6($yxG=U%5Gxn7v(oX)sE~Ij zP%9+s0lDUdt|M`@F76`wJwD0^qh}>~ybQx;i%H7VhX=5786BI@=}q2i|+l zCb7K;9#(yAK)7K4vx!L<6W_^RTU)`yZRoKr>iKHh2Shrw-B8)!7yTAwU-cgGWMP+H zhY`aRFLpPl5 z1ybw^I4u{1d9;aMEU(1dpSVYUAZdSbw}sA!hL|nAuj&oH3&)KYCReHTv+REF?|(pk zTq|VytXYk)HlSKzU5+FjSG-^Ji_6cPwt0P>^>AoVeg3}cnSY%E5z+XgdeD3B*$x^L zoHkkZWImpF1|u!VT584SLcYg0NdQs=+W5V)S?Ep$iCDWmb_!JW%L{OwE&Zr0LqWLE zW+sKdtd}joIz#JePmA?=yZ#R}e7k+20>qh<9VCEKw3fkQG$wIu{iYYr0IF^D42RtR zpC5Ck*|kvZ|AeaH(b2lP*Z!jyy5-h)aiSo-Ms(kU=P5M%`y8iDM2i<1KSUkJg2qf= z<1^02XJz?)XB7ghWt0zScOA=drQ*)0{?D)}O6JOhd<0~sy17CX;z`an(6xWXFtpu@BGE1JEYA| z^(H{h`aA6+UOPb}=XpQO65?T9nsGeJU0OX?d73Ta2A@JL@0i0P1<# zwNiIdq@3?DLN>_ad*3{^@$b#JRJyuVi0`=Dcvr8>Mh58;pVw}k;;1xMQ!HFY{8Cia zA$dc`fW*#ykc5)%ZUPogSrGoL>!L?t5;EFDquarSPWiGzBPrMGhSNEPofX=cFHzWn zlM;8if5k{`BNZ9Z+8a{4M7?N4$8lQJaVY{yN)Oakg_0GG0IfrfIGDTt5C7yz{IiRf z@v;ub(z89m=qr(;#-%o#HMcI?15=koW zrBFYO?uM$+pKPh5+vbca7H6g~m7(GP+Dl`RG(U<{>?Q*JZl>Oz`Z9*a5{{MCUyU1N zpab9MRPEJvT)L=8o4&}SZ#YEKzy_1}IT=U1?+gZ)-!_h7ou?jd%DQ5a_-ts^6*=+IE48i#;;lsgYAUT4V<F-bRpetbnrlVb|S_CUqMzDJe71LneW=Z8PPX(REj><#5sOvhQi-yDqiu z4W3C{{lq57qxjh)8p6AQ^<%?jhhWAPm$PvACj;t*KQ6n_f++JhGPkqjmBv)F`Z%eq zRqU)w-4ep;+=^M^etLi|HL;$}CJLHQWy8h@6>fZ4F&Ci9B^UFqp%VEjIKf{C7?-s` z89kYb!Z#Y#yTIOaU@sb<<@g`$J-+NaHprIdcVF_3_cp$d7F=Q(7@p*A_(Yt#dTWO%q(34!$od8Ed@o{nlyq% zM8wF{KqVP&MlRO1e|5-lkST6Vn|>=qC#5%inpkG3yV`O1rD&@Mi}^Ddrzs9@YCyI` z%6Y-iQ6lF6eom@aYEDA)z#4WkqK1%ZVY;^2z0%UyJuYEs?d1C62<3!ckVz6p(Jm#N z(|!JNU+E(x7jb7pN2ozIf{3DwcvaS@UAS>>VcB!5rx$yfj4CNT3R(g?&hdY^-_wpA z%oQ^p{+x)0vv;@P2UBL~#@%fjn@2outXzk^<<%;f?~&?{+-+WdBp4w8dN0~W5nM>Jw=0I!*} zHki3hRZ38|_W5N1kXM$72l8)*(v+(&0nG7Nrzx$rkrw~FM*xDbZt%||HcE+?XR!=kgM7h^ zbF<0^qib0R$t;cyLjnxT4(91H@3)6bY{#Elz=4RVxs=y)=o>nX(C$}g?LFQ)-Lrm~ zs?Bh(aWu#6yL{&kJN)#m)4m#L46bhHSFRRH4J18eSFy&WeTl}v2&-8!nz#J=+U?MxtedFf3D2Pit$+(0 zUH3q1BVYZ52<+_qe|YX%<&%kQZNR<|jDQ)i`*bXCLknL$T(G+A%1&I}+tnoZ;%H3x z#i#hg0;igM0thzK@4Q)CDm`pU1XC?gXWZ=&!^NvxZsq2L^+d-oHY~6xR$6Hi0&?QF z4?qn>w>1It;z(&6$LH!{#n z@X(ejq{cm_Atd@}tUk3dwqZx9+X^~wqz~90$HX(kzSjA@z7nc!y?R0@ImZ=A%4r$G z{%&M1s^e4q5AVDV3cK32v^X~By9QI=j=R_jNf*5)B5mp9J(U>o|0UCb+DOKncDWvK zCJBL>?FC?zoV4{36E3u>^oP=b1X&}K2r+DLTvLCc<7QpDNjI7Dz>W~aOm)1KtGkaT zSI!ixH27LEP~}0i;=& zh!%Y@#00eyD!}nFH*uBUw6i_LpKVUl@qTm=` z!-;K1=u$@?Aql4cJbFta=2!F7vXGFiK`^+W?PXhTQwXdbw#Ic&AvHx}ve)qw7KYl# zwCSb6iw8iJf&110Vph1uIb*8SOgFuErCoLGf|K$`Bg4+STypv$d8oX7$>Oxe;*${r z2HAJ+<#yi>418X1-QPS}l;jVb!$Dn_bf;0y8AMT9k~#E3m`JDWgX18P&=)b3yRi zAR?cyi5AL;C3=INOS0WHE>PDvN+Ijar<|7+Sp=!)O6Zc~!jO3_mKxhF=qp`}t2a0& zjB+V%GmdZKl_0(&*X4JOIia6d|LasY_!N(ouK0!A`!Y*#+S zrXo9jRcX7W1G%A9o*d19QVq$rvwuCW^X7+xvW5PhPYjX|Gz3WTYY4d3c<3$4e!t{1 z!cb#ljrqIBMPBU>dZ<3XvLEgyVP%Nr#i}VT#~|MZ(vXsOY8AB(5YdO~Mf#7Kt^5pZ zrFY}C#R(voGM8!4)8Fv&ZNG|D84#rygjC915TY|92{hjUSx~#N6;E%(Q$e(XtJE+O z55oihIp>!U8nf}Bw>U?si!?vKI(`n(W)_=yp#t<*w@(W;u}OJn{ywjh-A(iB>Z2Br zuc9b5Zut3vA=iuW!uf+!U%Q5fO8-u%DDtZ5St7Fci+edcAGA}mE85xA$ymZ&vM7W0U|!UdzdVDVZSM{x&I9s2ird1tDCR?L&SyDFjvUN}aCXkOpD1q4TG(b2?EZM!vdeA^ufjvMz8*MJ1U z#i?&BO`ngIQX8c9yr28fJ643qtk1_VvRM`9aE*4ZXmcR2zqzYX6-K3)7UoY5p8r$L zUtbcnw~R&Zg< z<@DO-!sV9R?iTs$@c3B0cF#RaLRPL5_{D9)S-XkaTeb$1O1&MkS=S*`8LE^l+aHb* zk9jTSHcJ{pC$Nd}Bnb#FI_I_*xM|%lq)>GpuHv()PS!%9c8>5bw`8TuVuwti(e3Y1 z**9hGAv;BihnKQ`HI=$L>>eM)#eqUzODyky*?Kp2@%KG^SjYj@wFGOw6RbR&^pfte ztrz8p_2V4=yU+f0`O}nZXbn~GX?R zOiJsb_MVw#q}o*(tD$o7%b`v~Pt&UirSIQC0@!O8*lub)rp}=4=ka0+RKp>6ZKa=# zu=aOPCz+nIN#*h}Gl&0J@HKJDX*Q|4^Dg~yd-f6R({etP=*))^^})=qflF1q_BWxA z!-?_BcMVdNZKKRT40|@9_s0(Yxks$hIz&D*`nRXy?bCK}RdL^VZV+RK%{0V7J5g#{ z(HONSVpdh|n(f3#9VO0o6S&J0IQ^QHf1k225zgI7CwfPA(cD1T(ar{+@w1y*jR@(x zGYusyuHvtC;9@7DC>nnLTFhIICaYef>f)Eoh3||+i_1j>cyT@t!3Aba1T#lLI!;ze z*{P~2AN+E>&=@pT;XbyA=584>MJrm)Px3}x=eC)}(yThqah&#oeT(`4deJI9bM+Zk zr0xOMkPY~#sNcWC9oVKeOTEgRuQsNlF+NZ8_gxL?3tLqv8PROA*XIuB^wD4Wvc`~E zyrrru@$kh9C6X=<$hFXEBvkwSLrltO0(>8$QpIzCyxZVBvybJu2$hP zDDxFlX7Nv8didx$^dSM0FsW~iLaJ}HU@&WH?+tH(HDcMvfO?pSoWdGIr9szdvBaxh zNs!AKxWenszhoZSoO?OwhPxK80Zq&YtV{{E>iR%=88lh<$jvV&w8(K|iBy3b34)e@ z4@&X4mr-PHa@OBE7|c&egd1dqd=*hjgwKM4u)8CpyASMBmcee1i6U!`5CB_)}ct{X1W46RJ`f4r~<+vTE{>>wo!VisK>=pJcz znll5xo4>Mgl+%t6oenn<7qJmp3?gM%o(sP|sPF4_YaJC0M!&If$%KF<@8xU<5mXJ` z7I>6aSMR;9OfGEGQI>MH6BTH)5F~KPNw|6+G(#MbU3_}T*Z@;n;dwzJZJ;3j#ZJm2 zpO3{sjgR=sH@1T7z(Y^%oPUs`_R)Y7hFCT1rQJ$)Bkvxoi11MwtizS!1;rP(J$v28 zWZaahD(?m-K1bmWc}&$`d)LZE14j(_RYav@X1S9@#BzFS1Q&UweV2QKkOyW{U51$H zS3d<0FS5B1hBk~YZs>(-iTc!Wl;3$0xtGbx0}Khsz5k%Uo2Mr5CDXMXUMeU-n|%VZF3km23?y zYI^su{lF6?Tonhb>w)j4k|}dri^axj2RH|i$fZ%tSS0ry2ZQW-*n@K!ruO?&V8xnEBfH;T73eZ z=wFnK+P?0kvO&3CYcGsxs{nHNJ0l`gn>mX61|VP-tX_%yRQtYtT<=u#2^nrnZ-GV< zDAm9a#ec)wUa45>qnn$PP4(2Z2yi8weHsVr_*++Bf$jS2vkCd-cdhhV#|Wju%pdOU zQ)dz9E%Rj5W*mpo#Eh?em6%RHIvrP`FvL0?TgV98HYBRLww>`@(x9GBa4(mdb=C5J zuac41xfJR3OXYwbtO9iC!D5zCA3G^NqsohvC~ddYm@HANXAIQU;{pnwkE^fT#W*Z{ z3v1I8Qp1JSQUKHP<3xL)`A+&;t6H2`?&jb{8JuS9Nb5;3Aa48{Tk!b3VK;?%fnC1t z@i20m!#s5~QemK9t`L3erm-*JE-cBp_*Xu&8KL1_c3*SrH_SVqcs$z^oV~VUPxg!b z#+PM#J{^IKvCyN|7-J@4vcD;bpB|qWG8_VJWKtn0PDjBmPALy!!$59l-?qF`r-@!P z`DoqW<%enO=5(fY&o`XbUabl2khqx8LDSFHn*Fk$L;iu_Rd}RVyvNW{9Yn`aQW^&T z_c?s^8(@ymu5oqzx(M(oo75p9+?l_#2r`0M-1tlnzid70z>~X-5}+MwF**(EyZYvL z%NGpcsp?w6oq6zX7Uo&BW~w8LEK0~|x>^zTIXZJ3^V|_@IU3MC1erAa0$1lKtUU;* zI-6^zDiLe#KJ3r~v}JB2@pyIL-5^JbmAl>bhXeNU0|F)=3f_E`#ZQ4J;XkDBVRH!L z+kA`9NYxt&`KKQuNYsA^}VFQHCN4IhX6~Em=txa(ZWBVy$3a z!6WX9Ux?oCVLt+UvC}fbDsJaF|A~)=*)KMD*dR0px5qzbNi6TESS4`IxE$4WFhKwPSF3Ud<5pVSHxqy)QlUxf<@sX>lf3f|&ppC;qNcAt zrbT0+!vh|7_f@T2-oEBo_lA5x#e8H`CUlb#uteM?U#Q*KsNaTzEL zlqW7!;hZ8if?~%!cQi36w0g8jGnp~S_XFY3!Sh+(Lh}X`r)g)QO&=i30q+I=4HPU; zq9G!pKW~bja-pO>u>Yll)B;&Qz&s1{)Y}nu=Xg)Xv=D5 zcN{*uks0v;Q}t6%Hwf;h&sFkPQmsd>Fc(>K{TV2uDNk{TUmsbT{0=6fq~!Ql@x1a- z-^z-4YtDIPLXmcx%r>$rBCdMQB<{J;T~1ZCPshC3o@c3XdzI5D0SWzy6Ua@pk|;3r z)v;PMe+{5vNU|O8_l|YIe}3^%!LF@ga~i(1De@oj5%xK>jxoXhXdwy!@u>ceQIk`p zu~_KPE*N$a|35V*_tXF6?%rCU2ZI;z0X}wS+?jen`CT-ncmyI3OVFR~V&Fplyfq_W z8w}94{+`>~Eg#Lzuwed-_UkGyr-7*BApM0+A$M@n{U{BOGE6eF zk{&44h}n%M0=g6Ge=k8n{bU7LyhpiaLW2?>q9f)XcX?z|1>SfFclkCrxi*^=r}@uu zY28-rbW%ZBwk1YZ?wxF56sj8JdG>lUdaJw;f8cJfH?rbH=_r=^eraw0`c3`S7 zNB1ip4+&l60H$iLUW4wWIEJ>|Oi&PX*D^23>viO8F)kJ}kH6D+16-ec(kclST~TKI z0820I`Xs9_Ul80A=JH*n?Z>~0m#QoZa9nmyBRG<__w{;$x^+^)n4wB)sVm}C)X7F&luQg zah{-sZC5%s{qr&{GxZ${iMs(%PpY&SJGJ{#Jg3)^i7*#^@#41}3ciCc-<7Kj5y)nNUdmog&iIxBu-6!adC~Xx6cAkg!-s z)csv#Sgpc$_v%j%iE+4!Dcy#;KOXVFxUn>Bz6nveI+Yt&kw-Xm@Q;;hu-n19+XCna zd9@Cu!LKAMTFwTt?(oY~HP3bSE2KmSjBqK_9;#1(84H$`=9*H#Ug+kF;C6A6j#noWv=CbdKKEvUBX(k=1%} zYWZ+@NXLGKU7Yy~p7M7i=E*Q(v=S>zD#wBfU{HDo%#7;m`jrXG)v?Lvj&tsjviW*k z)=P084rfYp1s?O#bK!RrjUC3^lu+sfDYq&W_I;QT!&AuABHhzubU`YOV4Qv~^}FE* z;djw60U{dQi>BB?$h_-!>i+4=k0055&X+=M4p;h7#UdrZd({=C(+WNgX7B=k*)eeE zMwZ1flZjH}HK3|~%n$+vf|kl%$3J@|`o`g+?6q40bx6pmj!tEvQYMz%*0BPtiqcK5 zCz;8xcDdPK)GK=@96vFjWWm<%=aOQjIX4tj-GHjeCixH1lQ>aH$%|(|-@5lzsVTqg70MzmGJr-B95uS&sOg~CasgHgfR-@2k98OrPF2NO8s2==Fz?O zg^1L^BxNr=A=yu3TOg;@Tmp)z!S*1CkhNd}y!ao$Jcs~dmqRd0pi~M8_RPC0%Q<|0 zyKSbK$BBn-W*tf%E#*t5V_h!;7p$a&4*~;;4DIGMsTPVT7ufVBvD0>m?ViaM#W}h| zLh~Z_+I@~zW*w@N#aWlF7aN!Powsoe>v&W^%e+gRlZuM%uYHnM(D-D7Y8e`#Tn^Ae zquySC9jV`2hMTK_51kFGH!pOc57V@m_bw@>+h{Fo=?r8ReSAxO^pxQk>OapI=9^pf zIt13AWe>rAhveZHqcgrO)8|eo^-YQjO0WfEMu06ho?bSJ(s&RziI)~=B(2JR_yVMj zuCebEwu_iFRX&J84_N@8QIkkL=Qz}<(y#ouV(W<`?39Pp@T9bF%@BNs7-|i^Vf8fJ zQ2uol4=@{|4WvvfAOO|PpsA%S5I5oM(A#ur7}6OWBOL2|iBr6(T?T$R-F=l##;r`} zN$kWnEBv=_H)FZu?TT0Ov!3hODUKwSMHQ{(+tWag+F}0y7SIZWBZwG!6enYesp(io zYTIgduUD#H0O(DV0b{!~z7VKvxAX+GdiI>pb{ATi|6V9#hjE@u09&cf)%jTrB^Gk- z(KnhTTF+ZL$=!49rsodkogK&3i{>J_J4ME7Z$?9zECy|fYL-M9Wn6#pNLwX8f+H?W z6AYSifff-viHygL6Tspox!9R0n!+jZd;5B*OC}osrd_?6CLKGrrwr1W+v-I)Hdu|7 zsqlqk_s?tvCV6q0cbcFk)yYR#U%%M|0KK^Ks@8}gs0LVz#uW{z1WyrUyoT&Pc9?2} z-Gp$(8&TNjBzq$oIzjw|w1JUc zg_dkmKJOTmkc$mW9=oNZX43rIpK53aIPbFe)>x0I*!~@Y0dEG4zEyz0)xI*=IIA0$ z&|%aD11yN1^ie9CyZ%1BX9#~2UJ+|vZJecxU&YNOYoszTlJdv)dYCBATN#pN!loN@ z!~LG8sB>|&_qP?i#$8|7pt#NC0rFhOOOxIJ_tB~U%K8uac=eLR70DNKo-E4TtXsr1 zA1RhtJtD~O!L%hyb-!u`qaWiyT=rY=aB)?*vl0n`M!ECB6W+J~J(5hZhiSJP`evOX zyn>T&ad=~KGN%>N|0z_}70qB&$IcQ{8y!%~v$jp9zjGUKJ=xkUX!4x;NR0OJ{x=Tj zX^rLu9y+2P91H50LPGN{V4{v#P#$jgWE8nTQDETQJn3UFr6S;t>QWK0%q40S^Psg% zry@q>VpqQ~mxO)g@R%x~{~N7*<81*YG^ioX=n-+LXM@vz%mj3gAI&X;?C^28lTv}-3yuYUW8Zr^Qr00PH2&ok+b}bU*%RF|x3qr>)v(jF zZb{jFOJ&ks8=fV8Lg@d~6HLg0p^-M7^zD_79y+r=c2*-*w6^s48Y7>*q(C=uo(@P@ z;#r5h4gnb-YKy5X?t2_3d3UYTRE*yh3!=(?uy)x|vUw-<@F3{z!O?E64RVUT_Tu>E z!k-G0C3wG^HHQLhqS9zbJAN`Zvf@f_Wgcf?L!ngU<>!!=PiAblzoVu4A0;29{FUGu zIPu(?mk(eGc#n32u$lZ*X5Lcv62}hu=_AH@U4hvfu-LGGZJ4&=Eqep54|ZBln`dHS zlY5eL!JAY}tnn(?dyqDQj}}(O4;35^9%Xi@2ZD+wbY`!e$X`y+&`P3)+-faWlrc3G zM!s4Rc7mwJPcJUX2-o!A*-gM4l?2)hFpsTL5Qp4d*IqqNZ*WMdkXY;7&;kv#{(Cw% zL{3vWm(Hz**Gr#q7k(?mxdu3%#VJJ(G3d^2e~UWu%v-Vle(-bUGfqHsi9CItOTLty zBZdZcMpc>i*oHpCSIJbB(!?`hNk$#usbCh>KtH}@!iHg~J_W9T(}P4`uN2yHAnd|g zFA+v!(CGd^k9taPG28U@Cs~PP2~I_?qesMIcOu^m4O`2L`x=I{9%vYv*l86L;wkLI znNQdd)|kp0VSJ%%f(Eb1-cy?7Gon0CD7X1oo0s!K1`#^q+F|#$1!7_3{wm%{xzzcj zv&)?RrlTfe@HLR^e2Uvr%0 zq#UJIZ3t}2(qm(&DUG%@=rZHr{J~%~|MuT2$`!*&3x>er3cwk$_6)JQ7fGp$D{oYx zUCf=_T4V}7yipbmwI{BJ)@F%IGRal&0&kAeF*mtJE|+Z_4jbo4!smdtTb<5MVktcW z`(%*DO`&ZPE%9mAY}YDp!3|lSCpDbXjtuDU^1rtL@Ce^Y@O|cgsi{1 z>lOm$o1~(8Mq2^@zEB{nMJxVwc3Wi(ZEgKC33=7pn34H*^^dTCfy{_I|7y>+2J)#3 zhC1P@`-?5Oh!(uz_Ol@=Z{^}+W{1=4{*f?Z=*dVfojiVZG=xO^2Im5lvt}?hAAEcO z1O5xOWD+&7@~;OI-{LT|dL-v*P+QB7YfZLP3U?~xy5hT~?aPfU=iwBpV`of=sDE3- zDUYC0$ZT&n(|P%GT{QpcMGE665oK=P@JlKYVZTxA7_n8(`zk{O*X>|PDZpEmA2*$t zwaZ)_t|9=OzKm!Q@W^$Ijeul{ic-TV&QR30^v$YD!c{y8P`X<5_^L_YI?>Smy-~Ct ze9SO+N1&`!;v&-!qwv|iT&d#?1N|;~7~rp&I0{@Q>neN>!F1NAtX3=($Qgb6xzrWG zP^KxpKQZ!?Z4c0AkKEF0XvPsae{JAB@bm^hy$sSLx-ZhikPT)mb($nE^+28|?IvAtTUAO$>5$L!^cC2(l9ZE-_V@(R? zD0O2J4o9J}BQLVvr6RX`_|u&1+40eL`Zd|I{2 z$Ps%ju#`CnEp%1%a8P+-*;?0~ zU8-*pJn9-2!}8asu?%WfN%QhItS!|fAy15&twm<7rxo4S`e^Bu;4d7?%hV`b zyhpqsur3qxS&ongJEj#hg#EWLINBe*nyE!l*j$2>4d(RRAPDMPs0bzoLYm`Jygb*` z!gKfp7PO21$nGYc)BLp_o9PmV{Bv*WQVly}tRlvz(%=K+KS%*E(gPF_dOnWrbkx}lahhU?iN1$@1X5prp`IlO;DMoaFmMk>(_B-)~?6#v{KL}RRYO+#q8=+CcJ zP|?Gtvoqg9GiW1Yzqs_0)cQ8)0`h8>rol8`PaLTj*EFNnB6iL{_mXZ%rAc_BhyP1MMyL?&R^_r>nm0m zMEvn>e@a!V;ta}Po zD6gye%-e$4<>IdaPAL*?<)+r4iFS1zdCx&o;Uv!tAAZaia3WmYz0g^d+)rC? zS3W+B?IfayFnzSyiZHpV`I z_7c15Tkp*b)DHhV%~6l*u;r!{BF?G)9-m_`|E|cTCKuN05ZCpT6F5et-;{|~En1BOP_iu-lC4C#GL^cscQ>KSV-X#nOyi`hX zeUVOfM0tMKqBE~_+S&zx4e?^fL9`mfiy&Eg*_ zj@|kpKh_dLLdZW zy05ZQ>Vvgb`gU{gCU83?={o9;-eSuP;dhBwXx_q!(?*8O$23=(RGk})FFB@DD)Ve0 ze`+a9kQ_^AA!7&<(~q;hoGdp!W~J7Tj{8qh_%&FN z@ZX>BvFk$qDu;nmNg^n}eLO4vQ0kRLm^KhH(f-rt2sMTxm%YGP2xo6h*OP^6Xe5h; zBv8vOgWupmPk4i8Vsce#{2v0cq7BkYtF@~2gCuc%J>1-7!R~E4e+$yY8ngJJ$K*t5 z>+7<7C)}U_S`ansZ(n!aaK}T+OC676az+Byfqc1D&{jsT&BEgxk!on>Uh$;s{kPg%@T1jzH32uJCZASWbsA}^+vN&K z1Bh4j-=poyw%JUh1a*!Y@0>6+zIiTHNW_1r`SX%KOaWb9Ws;OV>i}kE{mwVX+Hkc$8Df{S<&lB-}5|`x|H(2 z^YQRQwykH`TO3C9w&C5WN_Kk2rLsePX6N2_5hYzQP-p?DZpMrjFK3>FyC+rI7H?Y0 zc7X$8j2}1ow2@MJ{28P7tm`~gfJne@f<&fkaztopz4+^o&H8oj&GtML6r9XG?p~NugwCr(4gyf@m{kTw5Fd=PORBev{J&#U`+4p=1D!Em!L- zSn4=-MZ=gESjuhizeV0^t}P{V;!x(=2#Lg)jV}yywv?kYzrLxDO43wL7v=eyc`Qp* zsrc1NVslq$t~lEco9j7%!64AgQQI9cS8ml!KLRs0tz_P6XhF^LBz8qv%7et(_9(6{8WJB+F}I2LTyjsF8k!HY(AY1U!te< zTKrXL@G4zZJ9`YxCw4{wuN%JO%^U3m>111ZK+R_sbRaIZh#i}kD>#K}1_aX*TPw?q znpIjuVl20{kBWa>XknMd6~|Bi^(%~BJbHkRr~uD)W>xA((8^mofH-Lt^l%(#NCr)g z@_3dCU<4X?97S0u?_0mA8UJMXinVlN)<5opif{5`0+0_u7wn{d$}XwD+|v2E%I=3N0>j*iJIW8KJ0|)P|sq!pbS! zE|)@U$UP9tt#9YMDc#}tTE3j{AZ`Zl z*orbew$?qJ?WultU+jzuE)WG-OyCd)5Q^Cdy9BS~ZJHkmO5hc;DUH_+`}L&Yzhl)u zr9)C=qdB-E6Q~PeU~!TGd6V6?W`(}=crD|A^>Y95^uia}2G7;GQq8;{f{mrkT{m6^@S?UFFur;3hA&Hnyk^5pLG=8^r1-LFl6nfvxd z>ghWIHeK$eEDCZmlDr54%c|F(YUmh$_H_D0oqA5HDkjrSh{-E(wfjNPv-*GDP`Z-E{$sDQ@aSLj-ID3Xv zCckVXHpEIna!F$QG%9>k{ZQ9hn~Eg~5~B2+k|wv*ac%|}Ls5v%2n_P{?+K^Q^V-Gc z0rOWcX{bzWQkc$2Q~*DC{U^kCDc?*Ygpjq!mvQw<$-Ujx->jqHZ{N&0=i{XP4}gP$ z#G4izQT)FP4yK$2mSvTrwZTax;9`MS z=<8i7*PD67rbA5hqyPe*hm5Zou=yR6beBNcu~Z6XG2K!wP8glBnq|XiI>3PvGKc!kFDrWH;=?(|~E|u}d+$-KO z+(94-#pvUZJ0hB%Q?G~>^T16o{Cg9oE>mlNk|S&Pf6I{aatYVUY2-+j4i{Bf_J1g& zePuZHGitg=I-L#k&9~;_kDvR0?nlTF<{aXau-nd;(7!61pYnfBW~7!|^g{~yEaD-3 z!`!XCQ|jaf42)-IGEs)&?XfVilm$!{pN+nm`C0YkJF7-cO3l7YOQ(+w`NZr-pa3Ym za)S-hN7)`&B$Lw&gu%Fqua!^HSPSb^pGsur^t7rInzkvY(YV&qY1N{YOn%q5g}9(` zygKy#jq$N@J)?6ydX%+BA+G}0-`>1yP3OPU7O9?tp;ARgYgSQK#o!`Bk$U@4GhRj> zMmtiVhDZ8*k|XES=sWAjLuvQ}ytDOQ`L2V*7OkEK_G=%B8*vqKVe}`R$+fhR!snn% zkd*38#MV-)kXh49ZmfXDnp|xAx+*A9@K4eVOdJn-vPB{^4X<4j^~n~03q=fcBz4+; zQpn-OwZVj4Sdf5>9F=vounPnwP=hbymCQ4=DaS*(N~?w+pb(~MWBjp2BR%-D-JaF5 z$X)D#{cF~yZDJa5->aD-XsqpId^!{CSN~NF*?9WZ@>Y&W#X1n5K?;X6PVl`Q=R`mX>!f8%(c!uca1^1w-3n@ZDjM z)c;*qIe73@b#2z;{p$8k#LvX6RWM@b>CjQr;7wknsTm&S@< z#H7#Z)vG;bqRbN$mPBGm#G^zkV~ABjrEGEBr4{)w9%%o1!BuFcD8ElWIuxxJ7)*n6ke+S_ZcL`dnlu3BZ-7_$syvq z2NUG$#%ltlmP8HVfJ^Di!xf+OVF70pE%am;y=s!N#~j>ZcIfQlt}ut zfqhP4&)h0eum*M(Ka>isB4Gvc7M@szr8Fw)zgx5Qln?j41*Fa`hZFLuHktEm%Sdxm z`ZG!3)Pv%R7kxvcY|g%;*chl44rTxs%qJOx1yFUxhWlx0D$)qR@Y)vVLFD(orG?=H3r;ZCCGy>hNu6p zR6n)DSN1pCQ=EuxLq^SAg=|jjw23}qh5VSzLM=l}(&(CCHVn<@L#}!hxq=Kfjy}wB zKm$;SI(CqF0BHf#Vr9bWD9IC2r@3(sy(A%@`6QjQ*?R~(V=7bn|Mm9PQBk(xzpo;Q zfJjS&fYOafcOyu5DJdY$(4EqqN+=COcY`#DNOwre&<#VKd*1i=p1sfcfA424*Fx7g z%skJ1UvXd8_4$ImfIRduSg5)G*nA~OrGUgBGQuxs@RJxAN&>YAaFYPwp{ymY4A|jd z*p!-620AD+>2@Y?FAo$qq)fUi`2h?>qBlO{{FX>wUuLfbAi-aIEG5p3RoBZ{Gp7B=rTdtkcPqAhUyc>) z{6PK6Dl8GoqFNk%6<#WdnUnAoglI}P_To<;97h%MY<#iC6VhL=(d_sR<&tUWJ z2{x-`$x)VVN6B-?_r3zS2KeIO|M6Gh&R59T9HFhev*Ic zLt*4X4R-dw`_YR-*|Cpw+aoq^1#S0XB1zcbY73n}q_9qrm!@bEP4LSqw34NN-74ax zkkuPBNR$!eQ(IvQ(`vR271RE5vT5f^b=>}lK!N(a7-O+ z!#R#)Q_8UL*KjEjk=y*1h<}4bcR4^l(Ra_04a(r&0__m8tsV@6iSs%=)W3^#rCzqg zwOLr?9XGaZdhi>O{O8&M^<^EVy-*2CIpqz2dT07gmdl=Tmfl^FuG|7w8=RZjY^bHm=LZQgYWtixOqDN z=N*b(fOVj(F@m0c*mSfm?cM7{4C}SKXuStG z&-}-*r~{j?r4?U>*f7x0TMRV9Q=Jiw=tAhG0AfN4Mo?a0Xiz?E=(^v4!ww7uznl0A zf`rD#4&sp4B2bLrj-pr`1+xwyNBasKP$H2oy%>9Aa{KJRK0RE1dzqC5gGWMO9^!iWEFln1t> z2^)#SRV*KF(yZe~nX)L-L%aZb{Mdfk2Pm2=$eN;NQOX-G?BGPj0+^%)Kpf_hod%WL z*FrnoTsiNQ>+E%84o#8Y&9ISc>Shg75ru_=yz$eMOz7>$R$pQHb`I1UPvzXL9e4-g zARi6kK0&IE70I8*;JLX#w`);n+Q@|b?hYqzAV)b9KVCK>>6Zl-v;)2D!HzUlKcYvL zF9LL&(ms|?t3Ubqj_V`8*<&Td8HWXDnd!%YbsReDQwFV+FR}{QVFG5z3I)U2+by-_ z_3KVk<}GwohQ1Du4Y*Zv`{x~NPI z1dU*l?r4wlS~`W1HcF9j_g`dSK}FG_?-_H7yoq~?T|w#TpYJofGnVw0A)o-7t4a8b z5gI_7$W*SGo9~|hp^aDOD9*398uL&1#Er}X;yE0rs`Lza8*qb`_mhJTh+l`fEm}RMT`KNR z(fFTySP_({gV7x3R zMA!LEj#LmZz(DdjF$IJRr`nV7UF?qr97rr$=c7zo4LVJ3M+ikn*`$NE$x=ja_dh#r z(1?A^6zePWwq@6!7OVerKFw*I#sBtBUjgO*zbA_F2#Nkv-!JmFOZ9fM+_)QIfRS^x zqeYFl1})gP3f;Fi$C^Fth^IcLMp_C;3-7}#fBxl>NM5efx0#V4{k1rMX-KT?neALM zrl^~2$-o^x6^l#tO5NdeKnRWe8#0yeEkQK$iE6yx6FqsqOE!jqzo>8!nymlq;j$7S z^?h@7CjJBE_Ss1G0liv&Bmm_D0;U%DzAa$5X#@YCvgbc*4}hccVB~5CfR_O-eK6|r z&*k_BjQH=)R-4^i9vyENWFaIHK?w&)(C0Ve;NZ9;F*tx!H#x=7Lk=_~(aMbJv|E9i zz%d|x4}#tsAP>jzwgwFR*d2w5qGhOu;l9eXTWmc6+iUMISP$QRRMO?w$Pk zvsc_mB>gu$-73?t^o2H{4sZnyGuJ_NUa@vux8l86coe{Y;-#(&14WuyAo<5w!wxXY zMTV_S=;?1h&15f|k$~C}i96l+6VCtAn6|kO#yhVMH38nI15P+IFA>d1fW% zu~b^WR9x5JFRKx?-ME#hR^%p2K=&`rRn^U{VaaK#(nLm1L16-rCnab57$+$ajQc;J zzM`5VD~nPu@f_Lf{`LX}OC)Pi@+uq=KL_Z4V1(N{2o41{b?zlZF&W&EP1kV z)RtxMfU=MJnFT`ovMHF}^F@DO?jdMN)u_idi@7@4DjAsv&prnnBbaH*dSx@Tq#-Tt5~rniM)k{16$&ym*ghkX5Q>2q_` zhxklh08a^{1>N86lPr?6CYwLgU?*=(dY>+eKWCDi15_vyA*bNwc~Dna2VxTmk1wXf{v}!cK<6mP$HbLQ2i0`Hq;= z{sB8MfF5t?zCp14`&%HuMV7|B21={k9zg^XDnB1aOc;}6;5&EdDu6}M?m-ElxB}qU zhFY)nMva&L+TA%uKsJTCCE#SmP}|umDFh@m2Dm_5}d*jEj_>xCQ(FwTA$K*!khMyTg8Aeho|2Zsa2uG>2G zwi%2Ieg*zXK%1P)jGE>dIsZ$by{Tnp&ta_5>|)~`LT^K#6RJisL^=7Z%s_CfAS&voUZg4&W#2T|4+*$GXArXV&Q!hD z&mQSCUIguN%D2%aHNtvCM}0R+qMcviXFDagp8`e0#(2QmMoyFi23jspK1ubOt?lx= z@Y_WGHOfYKOM@)}*8&*T!fN|V5yJ#?hph%eU>m>|0MRvHuP|F1bYhQN^tS!^S`s4+ zm(8d)n*P0szIOvP!z!`s1mD@_Y_Khu6FrOgMxajk?bH;&#lEER1Inak>---0)lSo9^)W>oh~!MQ#_nu^cE(9LU^3;bfJp!Z)MjVv z^W!2A4VeK4!dQxQHh0vGW%fF%U9#5uY9j?a-l8rG6s1Unsw|$!PIcgw>pl@-DE=2Z z3hz@DVjv=^JHIh&ku_PW{@d;Ns&fk~*kQkMi=tqMWKOWL?5H{!;>Kxp#uAD}shM;F zZ3`YkQkKS~5Y!$X>K%81E}wOxQWn0VWe@#Du2hD%|9*)_esvkhT62PF383AXhI5*G z$=q1-Zn53hms`jjKs8}Psa;`rsC(NwK+1H%d0f$|wF$uUa<#d*=l}YJjj;n=4}#e{ zu#%g4CbGo>bAc_|rlB4|Bo!kQKysr{PoWnx-s%E*$->vz2^jT~IxLYJ<$7f?6K6pf zm*4d1%3QE@DwFFpx6f2n=-I)d!LsoJ?_ZklrvMiN6S8nSjvh(94fB>R!<$m`9Rl); z1n%8&<69_h#|3Q4P zlG8Hl8>l;$+b)~%;bq7R&st6;#_#jE5<575L2elL3{N$fwW z*rAH2M~$&H$~f z0^J%22#c+QX5A+p08CaR3_%cz^L+C+0hv*&NL6;S5s*3|Oa!J4ah+SJ8%3jN-<&F| z%XCto>EZ(~#vI!l~e z@;ne)lwbE7!9_X;7{U@u6na$?i!+8Y|7gf=*+{1a5knqO6xJHLd9lY@z@aQ3N9f9 zK62N;&l&seyH0>$Q{j!BU@&4NM|T``r(Z{~(Y(C9c-cYM8)M1@h?D&!UjSU* zc^}%^oo=X3^{K#DA<%PawZA}W*8&8X$m~S|-DMCh#!YseOu{CkqoZ(gY9L!X4wz{I zF(aF`(}pF|Ni9kK2_N`TVpJ=^JX{hTpVb8Y;wn4fkV2km@O~ELwqNPMziN2C_Wk<> z8B~7}j3=qqr~VFUS=lLXH{8qdcuw$XzxY9^UOg)VGqZxX^sldcIdk^IKRAT-%Ag}( zZku{XbkCn)VJTJH0a4r#kw#aLX8oq~J^p#}i7_+DHJoAgGil<$`j zOO{ap&vNEoxm=F`NY*w6x_qtIzWoiR^Cp<~zc^yZB#!xW+jT6CdzF+bq;bNM=!K@f zC%T1)(y^B2I-u(DmdqLI<%3C%gHeO|PCm7m}e3K(g~A>rCY>)Y6A}Z5CFOx?#+qp+&j;{Q9}; zc9zTjube&bYPDNTc*a79CdiqAr?6m)MHD&9PE0jrg{hy@Tj-c?b3!C&fG%=_iYm#&Q-&UoZ%`+MmtJvbG8aia|ebxOjV*@&* zM~@~;nDCuIZS?Wm#7saaF4nsCj+iWXFJ>yR!&~cV`7`PkgceRL+8`&5uBc$=wm{~= zavL)aO$xMYIg$pNEK@9!`#L;Hg|L7tX=4BKuH5~asiz%V_y=8hVoM$IOwiJR$&u1V zHpG_Ru$4OdcT10ejhk+@8~~Amb|m-XUDkhI6`_%iR2EZ|ZZxD@5TgnOkyKVEF2g=W zT_>DHC@^OW2p#Te5*`J)>x%>6RdllC1fWSDkd0wvTXFb; zmx72s+jm*t5E9AW6tO2Zq1J?RM^_35|E#+iAmny@;BpH;v+~B?IamS7!{`CEJmnqc zs*@8SjSJjf*V`pW>eC$>)-=oS-Ktk;XOUl948WP2n(aEtc)wP|pVIIuF&ehGg*Q4} z>i_Y+^l?G7qFRj%@V$MSC|Kimw7TB@{u4lZLImXE`;*Sn@?4g2DJio~XG~&>R-PPm zF-4Esrs!rP1IBAHe~MaXJmuZU<)Z-!BJ&nN4NJFFmel!>ClMSJ^m)`hPvBvEzb%*; zaXefRt2F|k^(5&8Lmdq!Z=@`qL2~UkDAI5zRv)9!)((L@dI%+d!AV$`ee%Ib#4UP%omD88Nvu0K$1l1e*UCXnjm< zF~nR7J+t1u#m32tE^?JLd}G}Ggy3Z>@Fg%H;Z%mek^_IuM!=~48?3yDArRV{^{x*j z{F8_RZ!m;tVgY{;V10t8mbs(Z_Q{8%V#QUxdXS_J)h z>g{S(rgBO9ThZ3x6SrP~eEwjj&W4mi@TGz0UTss4P)>sCfzx8-Glg~Ou#L<141_MxDQWJZm_ZK0W6Us~{?k`a5)U}zcvt6tzj zO2nV`{q&qhRK5wg;EZ3yol0Y151$Gs{N=LW2i<=V54$X3?|6ii63DYbZ|1lsG^$^V z{Vcjaf}MXH%$FkCGdIAv5H`(>3XOQ#q2%!8MZx)zGuHJCv{4?BhaxtDD(E3eduq+ z2>{wZ;|Gf5?rTvzrl7qYHbWpC_j}oIa_fEd0yvR41_5MIn=#OZWQ4SUlM2Kn!xd<@ zJZ&aJbd9D7dpE%;zPU27w-=FW(~ z8JvO)vO(KA=j>biWbxon1a`d3HbGQ2cf{@TH}J3(p<}P7dM=TShDTfI;>E^}^b&IO zk0LX*Q8&&jYPPPAIGGE>XYK-Lj!K3RH7^RPWhyu z_RkE(m?Q)HtqfQGGrqrpzY7rHxQzN8(9;g+y1w_fut~UK{i4ekBC%xWy*Ul(7>d$1 z%D8e+o=#LOYdzOZO0O{PLH|cc!KmHG>IX|PZl1+-nL%-&ID^(SX2x^c`FvEf4@ch2 zF+%65^8TWv{3D!ZgN5ptE`~ss1FHxBQ1q##z4dfi5+ZJ{#iA6}Z_PIYHyJi*kQLZQ zPqV|Jvs{8uwE9K`|KYIKTjXQW;r9@spjmwK4_-&*8A8(Xbaz&5Z>`A4x1Iw`^2U$ft6y4;!Q$SSm1+H664VzDDRzhETNA3@c>`bn$?x_MeJN}tUG{XTV> zk4lXV2W{6A(`(o7eQ*PqkmXW`fSGPXn}kV6lNFV1H?+}Ui**G0Ajh8XAxXbeWLBwlX z-vvjY+oF<774JI(a!G9)sEPocvUo1WTVkVmVk)QN4VhjA1M7Ov#q77`imd84p3A0ZdfTNr zK^MUxA^fd$%*;QxJ#jW5K9?eEU10B;mulN0D>5nxynHE{H9!RI3ep&3Gi)i0-lI`< zBl(O5rl@l^$^MK)L`Q3u6anX`-#+xvn*M|H;3|KU6u2`C!or4U4}k~BQUeK#H?7HU zne36u@W+kkcti~1S%LF1C0R_M{XCqn((m9yR*X6T%16`2DS^GQ-nHu=PlrMQykyl6 z3VNIk*RMJM`B>ooV!9H9fG*S5A3mYz==n>)M}Vtwu4;D1jj{fjo8HZWi4=>@cS&U>VLL!uY+YPax9aXt@J-u2j*@#bQ zC!l8Zi!MzmEvPWjC#)Zq+iEh=|Ds8BE0J6lcjWjfaCYmbQH9}oro_C?Y}00{L+|ym zLc59nwAwk(C7a7K*ybK9N8InI$tGg*UFE9()ENBaMdkw z!pU=JZg(-HCEq z-esxYoA4~R5(SS{3SuEgKLuTzwl4CR>ja6DjWxQXi)D?+F|Uz6EGn!+~~$av&dd{ZL#ucW>B(*>hn?KF1H z6+VVzoY#~N_Nrr1myM6#$qxn_Vc=|d62{kg@zWO#4Gl#KvCvFsv?g$l8fD0(--_z2 zc?d9D!sQlov#}(acOWwY$C~X`ox$9Zy@J zi?hL4%Z4XeUyO;26^wu#JxtE(20hn>!-Gsshs+jx*yl=SSY?1SYFU^C14wjZo(Znckhb_oRn%3 zuosi5G*Aah>RA#ZZFF-1oJUw%}fsCaxBz%$MG(+k!7JP!FO`4fSC~#$EP3mQAVo zp6gsDFj+>adB4!S3f6LbS?!u1vE)O>^ z8Zh}5X8DeL5_a--qqNX$2QqfQ(Bybt#D(TFCqRY@hEjiR~%L~!iHM!fJAFa#BwaLxna%ZRC!xs z1QDGRyIqM2ADYy@%ST9~Mf%W%!VD7^#SV?PVHVBKb1&vcj7bUbKr@jsWdNia7L)-rL*2MM!U8M}0Y<sne|z!ZCyOnTO0j&_myL-fXnHZ zU0dw++rD|1JI4Nn*V8oDWe48D(f4*=PWEDqUl{LuIvGlw-EXn~$rN>C2tD1a=JoOTws(`* zu9JN{NHU(*p6XD*GKykl-bleYau1*kt!vd3TUnKR6-_G~Q#^c{vUfW*7g@>Z#y&Nz z?G>}ry4ylNf9Hu!gkYt8Q<1|HlKV~OkuvdFfvfmSN`z9|MpN^OH4EJJqwS52o1r-0 zh_li-u2n@+32m=jt4GdEaJ*&OmY1d*IcN*rp-N2O9eoW8kE?BN3%IE1`%p){F7k34 zm6W)QH!Hqw5e}Fa?51nCB^o2jTXCe_|u2th@pi==E+kR#1rw73onF?$uE z_4V}BLbfCVeq?scrSoF?v`T&?$CL=)yF!@^8txr(DK|S)$uQW6sxWnnw1L726ke{e z8SbS-5LZ{pO6r!5_sxNiLe3`|*7u?ONnZ1G^Ov>gZ#Z1prAZlIU1!*Cq!s+^4R~^b z1k3v8OC8#!=jp>PuQwe_mr<=|UnI#5@yj{wstr||3-90U&Jn$mD%9eSY45woFq!Xg zO`J*Ij^XSrIjOCCYiUaPIO84f2;ZNI%~bEwq5f^5ea5G|T>DX9`>nR!8#{AN$?xX9`Z!T47oe3lWNVuh;xO%Rq@IX z=~eHVP+V@;UP=%XG<$pA9_Y=UkEO8_8;}(<1T8CeIyvXgF6J~XqoFr-w6fz4o#F`W z#neiurC4O|7qrggh&Df$mUU^oWDS}yU);;Z6GI;19X{sEHeN+;wbcYqppKPXYRw#} zC+W5~(zdT~$$c3!7^bvQf%S*Y>veVHOzlUl3zKc=sd@bD8a4+^5v&EDfOry&eB8 z9W~qixY?j{<)BpL%TLy%SuSA(vKn)o>o1JtqnZj-mkvWmq?Gw5T@Imp4{YfxSxU;& zzZH0P6d{eIpy1oOe=LqZRwe2bSj;g^Hza?}w3h_Bf~Hkp@$X8}FXA>NuiIS9bwhhq zuYAIb9kHaJ)oR{O^n3VHoDgc5C^MdWV!JDgu9L${45i`b*rS*(ka@$nc6w26B&$~2 zH|Xt*PzXzf+N1IG1)MVy#;M#j4FTB2_>*zxvpzCM_nj?&OkEG0h;5yQ^G~P2ohWBY zRozpTyPUl$XA>Ux3Uv+BQnmsBVGPfVXg^dNnw z>?acESUW)@b9c3A+1-y3cveWqZNhjfg>*IfbgW)xh0?V)(e=qu9{=&Obi=hDfhx$yXeV|sqC%BiygoG>`2lN@38iYiJ`Jz{Ar#m zBLDk<(Q4-@kxlWSfGQrZXSLrqr-ur{X*M0u2hHOSJD)RLjQ>XCePeqheNl>8_K?I?I1>BBin9JTWy^lu z(KSSHp3>`y&nIVUi7RsoDL%UxXvKKH@Lz`>WphYEITMEhe<-#PzPKbPQ*^+GDkPfY z`c{*uer5uMcwdTLTr;Uru7kQMMn= zg4$D5wqAt^aO#!rzEkWYZ%**rns6wIGYf?GF`Sgm4)gtFNKlS)nkp!$Et~l@6A?fv z|7qxJmw8ui$q*x4T5c zBhfwojI&qS6iM&&`*22e$KXG>ClQTwIqN4C80Vdhw|cm(K}jK^lly*KhCNyGZ?4lx zuQ4TT*&z5{!itu`@TYen@kTKj2}fUmH-7#eNd&h>iaCdk0nT3OvsV-b82ESM8%()YXq$m7`8n1%0I&Yup-$J!QTEuvv zA%0ug#1e+l4LPFKy)+Mn6)3pUrSnyW6{z0n7hL~!k=cEh!l!xu4n0jcYv@8SGo^n< zy5U=Q-OuxUbdkxJFk_j^!fDg;j~+CZkHfQ0WS6+`MAWsc3(Nm*lD~|&W0z?+_`S4C z^N~J~NF3cl$zV^`L{~=oJTxC&|JnB>aGv@zbY=ea(IT^;@7LZRuK4fo(|UgdqcM{` z?PxaIFRm>3Tu{}U1cf=`4%lsw%|6KP?F?^>!)#U&WF|HsRN$Uynj?iwWa=g;N(J>T zi(6rl-(#s$M6IKro+wPVD-d{kq8EEUhigAL`h>sqXv#B`j>9utK3_0YjxlD&F|DqE zbVzA|0(7mtt)R)1AIgNj#J*GTzi1lq@CifeBR%Ghx{=V`UzW;EDP-o^v8_s2G$Jx# z!y%RKy-`1Tsr*rKZaf^8lFrSrZ4}JGLn3+UwKWYPZ)#ce(e9Fcd+c{cUyk|k;+OI9 z$ZXwA@Z?if*S{KQ9d<&=t7c&W=Q$+YasBRC6Lq17O)Kt7Z#?x=$>tl<)Zqhj+ktpb z`dR()znGWjDc42o-P69a_@d54uXFvE_hY_mzX*_&r(a@8b3N7_@hnhm7UP`4V!9?b z(Ti{1f6lugOT4Iz9cNCWA;qioD`LT$NvK$1*_~xPKA?trm6QA|tsza`LyeQAmb%8t z=UbHJk9$k?muf0Ff`71i3&TXe)AYE#qTAf??5*3DaUiiQ`7+No=A&b&5%qeF$C29g zZS|d|6uaK@U}yE8MBBfp64g$q(^x+zt$zP(WSvXz8j&{ho&H7}w{a#i?OY>2w>1vG zk?xg>?697tl^JY`emFsoEo>bXrW<}TMLD;x;E^#_IQj`PQGGnm5e?Oe?$a+0i-c;n(N=QRZf`L9#!H86u>_}64&6!c6 zKSlL*jcavD#F2VVmgbovG9I4_TyEd)H~$6pT%Uu=<$&%(lSmu6cR_{wPTq4A4yJS4 z3z+5hjOtk#UvT0i^(9&1L0i<5Hm${DS*rnU>ljAg+Y3!{DYahz5hzE4EU<}J=E@Fw z9K8ry+w=RpGv$cc%(8!~ju$~lg>*knGuH3?*I*e)OdAz_KCGvh_GZ!?m~kGn+$T*6 z^GqGwor^qF_HxKVE92f!#+_k&}$Y_sQF0ddZ$Oeruts zsS|6~uEKAq`o?CU5TbeZe9@CPL~Ev>z9bW8oAFnl4iq$$kMZj*aZOq0cpJF}j}(XB zY>ms3KkHm>_^UY?H_lX8{t~-I@Uy*Tez`5iym1mLf$lVi`TdOm+0~OL<|s(M>_e`L zReO+k#3s9Zaf{)Tu2xTAVmtdT&E^U-*>_WRlzlrFv`3d^^&`lSIi;0_EV?<%(%>LqkB03?S|FEEJJWq$3-&SG*JK(bC zA(8TqlcS8L5z3TY?=JtXb*=Rh6*b07Ah<8g&|kOjM;0zoBdMDYW*%nEn?&>Uk@okI zm4tzJ({f+EUk9%fTuimo54mw?_I6L>xWQX^>-Adt9YQM!$zxQE=UqD@f_L5o-B~>B z5k@s0l;?wdcpj{lmQoh?@;c07-@HgFUh-UJZ7^3#SZyt#gg!HE{e2ISi4gBQ))%j_ ze@xsz|00AAuZsV1x`E{Nmd%JJZ0(uJNXP+wrG?foO&n#Ag+iq{w7ezJ?=A#&zfLTh zEe7sLlt7ms>xN9<#Tc@Vcb^ISZFE9)8b_J=$A|&N)u8Xk_O!z}lj!w#zsswPfggQD zCIY2!ZzR5+N3}F(u67edm-2o6XcwEa8m>izZ}!>GNjorK_4+tK)V0E;?+;r8CG7XL zrh1)j8H?#zH*`4} token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = self.activation_fn(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + +class LMHead(nn.Module): + """Head for masked language modeling.""" + + def __init__(self, embed_dim, output_dim, activation_fn, weight=None): + super().__init__() + self.dense = nn.Linear(embed_dim, embed_dim) + self.activation_fn = utils.get_activation_fn(activation_fn) + self.layer_norm = LayerNorm(embed_dim) + + if weight is None: + weight = nn.Linear(embed_dim, output_dim, bias=False).weight + self.weight = weight + self.bias = nn.Parameter(torch.zeros(output_dim)) + + def forward(self, features, masked_tokens=None, **kwargs): + # Only project the masked tokens while training, + # saves both memory and computation + if masked_tokens is not None: + features = features[masked_tokens, :] + + x = self.dense(features) + x = self.activation_fn(x) + x = self.layer_norm(x) + # project back to size of vocabulary with bias + x = F.linear(x, self.weight) + self.bias + return x + + +@register_model_architecture("mlm", "mlm_base") +def base_unilm_architecture(args): + if hasattr(args, "encoder_final_norm"): + args.no_encoder_final_norm = not args.encoder_final_norm + + args.dropout = getattr(args, "dropout", 0.1) + args.attention_dropout = getattr(args, "attention_dropout", 0.0) + args.activation_dropout = getattr(args, "activation_dropout", 0.0) + args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) + + args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) + args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) + args.encoder_layers = getattr(args, "encoder_layers", 12) + args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) + args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) + args.activation_fn = getattr(args, "activation_fn", "gelu") + args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") + + args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) + args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) + + # args.add_bos_token = getattr(args, "add_bos_token", False) + args.no_token_positional_embeddings = getattr( + args, "no_token_positional_embeddings", False + ) + args.share_encoder_input_output_embed = getattr( + args, "share_encoder_input_output_embed", True + ) + args.encoder_output_dim = getattr( + args, "encoder_output_dim", args.encoder_embed_dim + ) + args.encoder_input_dim = getattr(args, "encoder_input_dim", args.encoder_embed_dim) + + # Model training is not stable without this + args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) + args.no_encoder_final_norm = getattr(args, "no_encoder_final_norm", False) + + args.no_scale_embedding = getattr(args, "no_scale_embedding", True) + args.layernorm_embedding = getattr(args, "layernorm_embedding", True) + args.checkpoint_activations = getattr(args, "checkpoint_activations", False) + args.offload_activations = getattr(args, "offload_activations", False) + if args.offload_activations: + args.checkpoint_activations = True \ No newline at end of file diff --git a/examples/fairseq/models/language_modeling.py b/examples/fairseq/models/language_modeling.py new file mode 100644 index 0000000..5b762a8 --- /dev/null +++ b/examples/fairseq/models/language_modeling.py @@ -0,0 +1,357 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from dataclasses import dataclass, field +from typing import Optional +import torch + +from fairseq import options, utils +from fairseq import distributed_utils +from fairseq.dataclass import ChoiceEnum, FairseqDataclass +from fairseq.models import ( + FairseqIncrementalDecoder, + FairseqLanguageModel, + register_model, + register_model_architecture, +) +from fairseq.models.transformer import ( + DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, +) +from fairseq.modules import PositionalEmbedding +from torchscale.architecture.decoder import Decoder +from torchscale.architecture.config import DecoderConfig +from omegaconf import II + +DEFAULT_MAX_TARGET_POSITIONS = 1024 +import logging +logger = logging.getLogger(__name__) + +@dataclass +class LanguageConfig(FairseqDataclass): + activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( + default="relu", metadata={"help": "activation function to use"} + ) + dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) + attention_dropout: float = field( + default=0.0, metadata={"help": "dropout probability for attention weights"} + ) + activation_dropout: float = field( + default=0.0, metadata={"help": "dropout probability after activation in FFN."} + ) + relu_dropout: float = field( + default=0.0, metadata={"help": "dropout probability after activation in FFN."} + ) + decoder_embed_dim: int = field( + default=512, metadata={"help": "decoder embedding dimension"} + ) + decoder_output_dim: int = field( + default=512, metadata={"help": "decoder output dimension"} + ) + decoder_input_dim: int = field( + default=512, metadata={"help": "decoder input dimension"} + ) + decoder_ffn_embed_dim: int = field( + default=2048, metadata={"help": "decoder embedding dimension for FFN"} + ) + decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) + decoder_attention_heads: int = field( + default=8, metadata={"help": "num decoder attention heads"} + ) + decoder_normalize_before: bool = field( + default=False, metadata={"help": "apply layernorm before each decoder block"} + ) + no_token_positional_embeddings: bool = field( + default=False, + metadata={ + "help": "if set, disables positional embeddings (outside self attention)" + }, + ) + share_decoder_input_output_embed: bool = field( + default=False, metadata={"help": "share decoder input and output embeddings"} + ) + decoder_learned_pos: bool = field( + default=False, + metadata={"help": "use learned positional embeddings in the decoder"}, + ) + layernorm_embedding: bool = field( + default=False, metadata={"help": "add layernorm to embedding"} + ) + no_scale_embedding: bool = field( + default=False, metadata={"help": "if True, dont scale embeddings"} + ) + checkpoint_activations: bool = field( + default=False, metadata={"help": "checkpoint activations at each layer"} + ) + offload_activations: bool = field( + default=False, + metadata={"help": "move checkpointed activations to CPU after they are used."}, + ) + # config for Fully Sharded Data Parallel (FSDP) training + min_params_to_wrap: int = field( + default=DEFAULT_MIN_PARAMS_TO_WRAP, + metadata={ + "help": ( + "minimum number of params for a layer to be wrapped with FSDP() when " + "training with --ddp-backend=fully_sharded. Smaller values will " + "improve memory efficiency, but may make torch.distributed " + "communication less efficient due to smaller input sizes. This option " + "is set to 0 (i.e., always wrap) when --checkpoint-activations or " + "--offload-activations are passed." + ) + } + ) + moe_freq: int = field( + default=0, + metadata={ + "help": "Frequency at which we insert MoE Transformer layers" + }, + ) + moe_expert_count: int = field( + default=0, + metadata={ + "help": "Number of experts in each MoE Layer" + } + ) + moe_gating_use_fp32: bool = field( + default=False, + metadata={ + "help": "Use FP32 computations in MoE top2 gating function" + } + ) + moe_second_expert_policy: str = field( + default='sampling', + metadata={ + "help": "policy for second expert, options: all/sampling/random" + } + ) + moe_normalize_gate_prob_before_dropping: bool = field( + default=False, + metadata={ + "help": 'whether to normalize gate probs before or after dropping experts for capacity and randomization' + } + ) + moe_expert_ffn_dim: Optional[int] = field( + default=None, + metadata={ + "help": "MoE expert FFN dimension" + } + ) + moe_top1_expert: Optional[bool] = field( + default=False, + metadata={ + "help": "Use top1 gate instead of top2" + } + ) + moe_eval_capacity_token_fraction: Optional[float] = field( + default=0.25, + metadata={ + "help": "Default: 0.25, Fraction of tokens as capacity during validation, if set to negative, use same as training. range: (0.0, 1.0]." + } + ) + moe_normalize_expert_grad: Optional[str] = field( + default='world_size', + metadata={ + "help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'" + } + ) + record_a2a_perf_stats: Optional[bool] = field( + default=False, metadata={"help": "records all to all perf stats during distributed training"} + ) + dummy_a2a: Optional[bool] = field( + default=False, metadata={"help": "By passes all to all during distributed training by returning the input buffer as output"} + ) + moe_batch_prioritized_routing: Optional[bool] = field( + default=False, metadata={"help": "if true orders token by the gate prob before capacity dropping."} + ) + use_xmoe: Optional[bool] = field( + default=False, + ) + + # options from other parts of the config + add_bos_token: bool = II("task.add_bos_token") + tokens_per_sample: int = II("task.tokens_per_sample") + max_target_positions: Optional[int] = II("task.max_target_positions") + tpu: bool = II("common.tpu") + memory_efficient_fp16: bool = II("common.memory_efficient_fp16") + fp16: bool = II("common.fp16") + fp16_no_flatten_grads: bool = II("common.fp16_no_flatten_grads") + ddp_backend: str = II("distributed_training.ddp_backend") + world_size: int = II("distributed_training.distributed_world_size") + distributed_rank: int = II("distributed_training.distributed_rank") + ddp_rank: int = II("distributed_training.distributed_rank") + deepnorm: Optional[bool] = field( + default=False, + ) + subln: Optional[bool] = field( + default=False, + ) + rel_pos_buckets: Optional[int] = field( + default=0, + ) + max_rel_pos: Optional[int] = field( + default=0, + ) + + +@register_model("lm", dataclass=LanguageConfig) +class LanguageModel(FairseqLanguageModel): + + def __init__(self, args, decoder): + self.args = args + super().__init__(decoder) + + @classmethod + def build_model(cls, args, task): + + if getattr(args, "max_target_positions", None) is None: + args.max_target_positions = getattr( + args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS + ) + + embed_tokens = cls.build_embedding( + args, task.source_dictionary, args.decoder_embed_dim + ) + + embed_positions = ( + PositionalEmbedding( + args.max_target_positions, + args.decoder_embed_dim, + task.dictionary.pad(), + learned=args.decoder_learned_pos, + ) + if not args.no_token_positional_embeddings + else None + ) + + if args.share_decoder_input_output_embed: + output_projection = torch.nn.Linear( + embed_tokens.weight.shape[1], + embed_tokens.weight.shape[0], + bias=False, + ) + output_projection.weight = embed_tokens.weight + else: + output_projection = torch.nn.Linear( + decoder_embed_dim, len(task.dictionary), bias=False + ) + torch.nn.init.normal_( + output_projection.weight, mean=0, std=decoder_embed_dim ** -0.5 + ) + + if ( + getattr(args, 'moe_freq', 0) > 0 + and ( + getattr(args, 'fp16', False) + and not getattr(args, 'memory_efficient_fp16', False) + and getattr(args, 'ddp_backend', None) != "fully_sharded" + ) + ): + assert args.fp16_no_flatten_grads, "If training moe models, set --fp16-no-flatten-grads to calculate correct gradnorm" + + args.ddp_rank = distributed_utils.get_data_parallel_rank() + + config = DecoderConfig() + config.override(args) + + decoder = LMDecoder( + config, + embed_tokens, + embed_positions, + output_projection, + is_encoder_decoder=False, + dictionary=task.dictionary, + ) + + return cls(args, decoder) + + @classmethod + def build_embedding(cls, args, dictionary, embed_dim, path=None): + return Embedding(len(dictionary), embed_dim, dictionary.pad()) + + +class LMDecoder(Decoder, FairseqIncrementalDecoder): + + def forward(self, src_tokens, **kwargs): + self_attn_padding_mask = src_tokens.eq(self.dictionary.pad()) + return super().forward(src_tokens, self_attn_padding_mask, **kwargs) + + def max_positions(self): + return self.embed_positions.max_positions + + def reorder_incremental_state_scripting( + self, + incremental_state, + new_order, + ): + for module in incremental_state: + for key in incremental_state[module]: + result = incremental_state[module][key].index_select(0, new_order) + incremental_state[module][key] = result + +@register_model_architecture("lm", "lm_base") +def base_lm_architecture(args): + # backward compatibility for older model checkpoints + if hasattr(args, "no_tie_adaptive_proj"): + # previous models defined --no-tie-adaptive-proj, so use the existence of + # that option to determine if this is an "old" model checkpoint + args.no_decoder_final_norm = True # old models always set this to True + if args.no_tie_adaptive_proj is False: + args.tie_adaptive_proj = True + if hasattr(args, "decoder_final_norm"): + args.no_decoder_final_norm = not args.decoder_final_norm + + args.dropout = getattr(args, "dropout", 0.1) + args.attention_dropout = getattr(args, "attention_dropout", 0.0) + + args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) + args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) + args.decoder_layers = getattr(args, "decoder_layers", 6) + args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) + args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) + args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) + args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) + args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) + args.activation_fn = getattr(args, "activation_fn", "relu") + + args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) + args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) + + args.base_layers = getattr(args, "base_layers", 0) + args.base_sublayers = getattr(args, "base_sublayers", 1) + args.base_shuffle = getattr(args, "base_shuffle", False) + + args.add_bos_token = getattr(args, "add_bos_token", False) + args.no_token_positional_embeddings = getattr( + args, "no_token_positional_embeddings", False + ) + args.share_decoder_input_output_embed = getattr( + args, "share_decoder_input_output_embed", False + ) + args.character_embeddings = getattr(args, "character_embeddings", False) + + args.decoder_output_dim = getattr( + args, "decoder_output_dim", args.decoder_embed_dim + ) + args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) + + # Model training is not stable without this + args.decoder_normalize_before = True + args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False) + + args.adaptive_input = getattr(args, "adaptive_input", False) + args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) + args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) + + args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) + args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) + + args.no_scale_embedding = getattr(args, "no_scale_embedding", False) + args.layernorm_embedding = getattr(args, "layernorm_embedding", False) + args.checkpoint_activations = getattr(args, "checkpoint_activations", False) + args.offload_activations = getattr(args, "offload_activations", False) + if args.offload_activations: + args.checkpoint_activations = True + diff --git a/examples/fairseq/models/machine_translation.py b/examples/fairseq/models/machine_translation.py new file mode 100644 index 0000000..009c1a0 --- /dev/null +++ b/examples/fairseq/models/machine_translation.py @@ -0,0 +1,450 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import functools +import math +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from fairseq import utils +from fairseq.distributed import utils as dist_utils, fsdp_wrap +from fairseq import distributed_utils +from fairseq import checkpoint_utils +from fairseq.models import ( + FairseqEncoder, + FairseqEncoderDecoderModel, + FairseqIncrementalDecoder, + register_model, + register_model_architecture, +) +from fairseq.models.transformer import Embedding +from fairseq.modules import ( + AdaptiveSoftmax, + FairseqDropout, + LayerDropModuleList, + LayerNorm, + PositionalEmbedding, + SinusoidalPositionalEmbedding, +) +from fairseq.modules.checkpoint_activations import checkpoint_wrapper +from torchscale.architecture.encoder import Encoder +from torchscale.architecture.config import EncoderConfig, DecoderConfig +from .language_modeling import LMDecoder as MTDecoder + +from torch import Tensor +import logging +logger = logging.getLogger(__name__) + +DEFAULT_MAX_SOURCE_POSITIONS = 1024 +DEFAULT_MAX_TARGET_POSITIONS = 1024 +DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8) + + +@register_model("mt") +class TranslationModel(FairseqEncoderDecoderModel): + + def __init__(self, args, encoder, decoder): + super().__init__(encoder, decoder) + self.args = args + + @staticmethod + def add_args(parser): + """Add model-specific arguments to the parser.""" + # fmt: off + parser.add_argument('--activation-fn', + choices=utils.get_available_activation_fns(), + help='activation function to use') + parser.add_argument('--dropout', type=float, metavar='D', + help='dropout probability') + parser.add_argument('--attention-dropout', type=float, metavar='D', + help='dropout probability for attention weights') + parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', + help='dropout probability after activation in FFN.') + parser.add_argument('--encoder-embed-path', type=str, metavar='STR', + help='path to pre-trained encoder embedding') + parser.add_argument('--encoder-embed-dim', type=int, metavar='N', + help='encoder embedding dimension') + parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', + help='encoder embedding dimension for FFN') + parser.add_argument('--encoder-layers', type=int, metavar='N', + help='num encoder layers') + parser.add_argument('--encoder-attention-heads', type=int, metavar='N', + help='num encoder attention heads') + parser.add_argument('--encoder-normalize-before', action='store_true', + help='apply layernorm before each encoder block') + parser.add_argument('--encoder-learned-pos', action='store_true', + help='use learned positional embeddings in the encoder') + parser.add_argument('--decoder-embed-path', type=str, metavar='STR', + help='path to pre-trained decoder embedding') + parser.add_argument('--decoder-embed-dim', type=int, metavar='N', + help='decoder embedding dimension') + parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', + help='decoder embedding dimension for FFN') + parser.add_argument('--decoder-layers', type=int, metavar='N', + help='num decoder layers') + parser.add_argument('--decoder-attention-heads', type=int, metavar='N', + help='num decoder attention heads') + parser.add_argument('--decoder-learned-pos', action='store_true', + help='use learned positional embeddings in the decoder') + parser.add_argument('--decoder-normalize-before', action='store_true', + help='apply layernorm before each decoder block') + parser.add_argument('--decoder-output-dim', type=int, metavar='N', + help='decoder output dimension (extra linear layer ' + 'if different from decoder embed dim') + parser.add_argument('--share-decoder-input-output-embed', action='store_true', + help='share decoder input and output embeddings') + parser.add_argument('--share-all-embeddings', action='store_true', + help='share encoder, decoder and output embeddings' + ' (requires shared dictionary and embed dim)') + parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', + help='if set, disables positional embeddings (outside self attention)') + parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', + help='comma separated list of adaptive softmax cutoff points. ' + 'Must be used with adaptive_loss criterion'), + parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', + help='sets adaptive softmax dropout for the tail projections') + parser.add_argument('--layernorm-embedding', action='store_true', + help='add layernorm to embedding') + parser.add_argument('--no-scale-embedding', action='store_true', + help='if True, dont scale embeddings') + parser.add_argument('--checkpoint-activations', action='store_true', + help='checkpoint activations at each layer, which saves GPU ' + 'memory usage at the cost of some additional compute') + parser.add_argument('--offload-activations', action='store_true', + help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.') + # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) + parser.add_argument('--no-cross-attention', default=False, action='store_true', + help='do not perform cross-attention') + parser.add_argument('--cross-self-attention', default=False, action='store_true', + help='perform cross+self-attention') + # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) + parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, + help='LayerDrop probability for encoder') + parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, + help='LayerDrop probability for decoder') + parser.add_argument('--encoder-layers-to-keep', default=None, + help='which layers to *keep* when pruning as a comma-separated list') + parser.add_argument('--decoder-layers-to-keep', default=None, + help='which layers to *keep* when pruning as a comma-separated list') + # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) + parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0, + help='iterative PQ quantization noise at training time') + parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8, + help='block size of quantization noise at training time') + parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0, + help='scalar quantization noise and scalar quantization at training time') + # args for Fully Sharded Data Parallel (FSDP) training + parser.add_argument( + '--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP, + help=( + 'minimum number of params for a layer to be wrapped with FSDP() when ' + 'training with --ddp-backend=fully_sharded. Smaller values will ' + 'improve memory efficiency, but may make torch.distributed ' + 'communication less efficient due to smaller input sizes. This option ' + 'is set to 0 (i.e., always wrap) when --checkpoint-activations or ' + '--offload-activations are passed.' + ) + ) + # args for mixture-of-expert layers + parser.add_argument('--moe-freq', type=int, metavar='D', default=0, + help='Frequency at which we insert MoE Transformer layers') + parser.add_argument('--encoder-moe-freq', type=int, metavar='D', default=0, + help='Frequency at which we insert MoE Transformer encoder layers') + parser.add_argument('--decoder-moe-freq', type=int, metavar='D', default=0, + help='Frequency at which we insert MoE Transformer decoder layers') + parser.add_argument('--moe-expert-count', type=int, metavar='D', default=0, + help='Number of experts in each MoE Layer') + parser.add_argument('--moe-gating-use-fp32', default=False, action='store_true', + help="Use FP32 computations in MoE top2 gating function") + parser.add_argument('--moe-second-expert-policy', type=str, default='sampling', + help="policy for second expert, options: all/sampling/random") + parser.add_argument('--moe-normalize-gate-prob-before-dropping', default=False, action='store_true', + help="whether to normalize gate probs before or after dropping experts for capacity and randomization") + parser.add_argument('--moe-expert-ffn-dim', type=int, default=0, + help="MoE Expert FFN dimension") + parser.add_argument('--moe-top1-expert', default=False, action='store_true', + help="Use top1 gate instead of top2") + parser.add_argument('--moe-eval-capacity-token-fraction', type=float, default=0.25, + help="Fraction of tokens as capacity during validation" + \ + "if set to negative, use same as training. range: (0.0, 1.0].") + parser.add_argument('--moe-normalize-expert-grad', type=str, default='world_size', + help="Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'") + + parser.add_argument('--use-moe-pad-mask', default=False, action='store_true', + help="Don't route padding tokens to any expert") + parser.add_argument('--use-xmoe', default=False, action='store_true', + help="Enable X-Moe") + parser.add_argument('--freeze-moe', default=False, action='store_true', + help="Freeze MoE Params") + parser.add_argument('--deepnorm', default=False, action='store_true', + help="Enable DeepNorm") + parser.add_argument('--subln', default=False, action='store_true', + help="Enable SubLN") + parser.add_argument('--pretrained-dense-mt-model-path', type=str, default='') + # args for pseudo-MoE layers + parser.add_argument('--alternate-ffn-embed-dim', type=int, default=0, + help="FFN embed dim of alternate pseudo-MoE blocks") + parser.add_argument('--rel-pos-buckets', type=int, default=0, + help='') + parser.add_argument('--max-rel-pos', type=int, default=0, + help='') + # fmt: on + + @classmethod + def build_model(cls, args, task): + """Build a new model instance.""" + + # make sure all arguments are present in older models + base_architecture(args) + + if getattr(args, "max_source_positions", None) is None: + args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS + if getattr(args, "max_target_positions", None) is None: + args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS + + args.ddp_rank = distributed_utils.get_data_parallel_rank() + + src_dict, tgt_dict = task.source_dictionary, task.target_dictionary + + if args.share_all_embeddings: + if src_dict != tgt_dict: + raise ValueError("--share-all-embeddings requires a joined dictionary") + if args.encoder_embed_dim != args.decoder_embed_dim: + raise ValueError( + "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" + ) + if args.decoder_embed_path and ( + args.decoder_embed_path != args.encoder_embed_path + ): + raise ValueError( + "--share-all-embeddings not compatible with --decoder-embed-path" + ) + encoder_embed_tokens = cls.build_embedding( + args, src_dict, args.encoder_embed_dim, args.encoder_embed_path + ) + decoder_embed_tokens = encoder_embed_tokens + args.share_decoder_input_output_embed = True + else: + encoder_embed_tokens = cls.build_embedding( + args, src_dict, args.encoder_embed_dim, args.encoder_embed_path + ) + decoder_embed_tokens = cls.build_embedding( + args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path + ) + if getattr(args, "offload_activations", False): + args.checkpoint_activations = True # offloading implies checkpointing + + encoder_embed_positions = ( + PositionalEmbedding( + args.max_source_positions, + args.encoder_embed_dim, + src_dict.pad(), + learned=args.encoder_learned_pos, + ) + if not args.no_token_positional_embeddings + else None + ) + + decoder_embed_positions = ( + PositionalEmbedding( + args.max_target_positions, + args.decoder_embed_dim, + tgt_dict.pad(), + learned=args.decoder_learned_pos, + ) + if not args.no_token_positional_embeddings + else None + ) + + if args.share_decoder_input_output_embed: + output_projection = torch.nn.Linear( + decoder_embed_tokens.weight.shape[1], + decoder_embed_tokens.weight.shape[0], + bias=False, + ) + output_projection.weight = decoder_embed_tokens.weight + else: + output_projection = torch.nn.Linear( + decoder_embed_dim, len(tgt_dict), bias=False + ) + torch.nn.init.normal_( + output_projection.weight, mean=0, std=decoder_embed_dim ** -0.5 + ) + + encoder = cls.build_encoder( + args, + encoder_embed_tokens, + encoder_embed_positions, + src_dict, + ) + decoder = cls.build_decoder( + args, + decoder_embed_tokens, + decoder_embed_positions, + output_projection, + tgt_dict, + ) + + if not args.share_all_embeddings: + min_params_to_wrap = getattr( + args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP + ) + # fsdp_wrap is a no-op when --ddp-backend != fully_sharded + encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap) + decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap) + return cls(args, encoder, decoder) + + @classmethod + def build_embedding(cls, args, dictionary, embed_dim, path=None): + num_embeddings = len(dictionary) + padding_idx = dictionary.pad() + emb = Embedding(num_embeddings, embed_dim, padding_idx) + # if provided, load from preloaded dictionaries + if path: + embed_dict = utils.parse_embedding(path) + utils.load_embedding(embed_dict, dictionary, emb) + return emb + + @classmethod + def build_encoder(cls, args, embed_tokens, embed_positions, dictionary): + config = EncoderConfig() + config.override(args) + + return MTEncoder( + config, + embed_tokens, + embed_positions, + is_encoder_decoder=True, + dictionary=dictionary, + ) + + @classmethod + def build_decoder(cls, args, embed_tokens, embed_positions, output_projection, dictionary): + config = DecoderConfig() + config.override(args) + + return MTDecoder( + config, + embed_tokens, + embed_positions, + output_projection, + is_encoder_decoder=True, + dictionary=dictionary, + ) + + def forward( + self, + src_tokens, + src_lengths, + prev_output_tokens, + return_all_hiddens: bool = False, + features_only: bool = False, + **kwargs + ): + encoder_out = self.encoder( + src_tokens, + return_all_hiddens=return_all_hiddens + ) + decoder_out = self.decoder( + prev_output_tokens, + encoder_out=encoder_out, + features_only=features_only, + return_all_hiddens=return_all_hiddens, + ) + return decoder_out + + def get_normalized_probs( + self, + net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], + log_probs: bool, + sample: Optional[Dict[str, Tensor]] = None, + ): + """Get normalized probabilities (or log probs) from a net's output.""" + return self.get_normalized_probs_scriptable(net_output, log_probs, sample) + + +class MTEncoder(Encoder, FairseqEncoder): + + def forward(self, src_tokens, **kwargs): + self_attn_padding_mask = src_tokens.eq(self.dictionary.pad()) + return super().forward(src_tokens=src_tokens, encoder_padding_mask=self_attn_padding_mask, **kwargs) + + def reorder_encoder_out(self, encoder_out, new_order): + new_encoder_out = encoder_out["encoder_out"].index_select(1, new_order) + new_encoder_embedding = encoder_out["encoder_embedding"].index_select(0, new_order) + new_encoder_padding_mask = encoder_out["encoder_padding_mask"].index_select(0, new_order) + + encoder_states = encoder_out["encoder_states"] + if len(encoder_states) > 0: + for idx, state in enumerate(encoder_states): + encoder_states[idx] = state.index_select(1, new_order) + + return { + "encoder_out": new_encoder_out, # T x B x C + "encoder_padding_mask": new_encoder_padding_mask, + "encoder_embedding": new_encoder_embedding, # B x T x C + "encoder_states": encoder_states, # List[T x B x C] + } + + def max_positions(self): + return self.embed_positions.max_positions + +@register_model_architecture("mt", "mt_base") +def base_architecture(args): + args.encoder_embed_path = getattr(args, "encoder_embed_path", None) + args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) + args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) + args.encoder_layers = getattr(args, "encoder_layers", 6) + args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) + args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) + args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) + args.decoder_embed_path = getattr(args, "decoder_embed_path", None) + args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) + args.decoder_ffn_embed_dim = getattr( + args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim + ) + args.decoder_layers = getattr(args, "decoder_layers", 6) + args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) + args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) + args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) + args.attention_dropout = getattr(args, "attention_dropout", 0.0) + args.activation_dropout = getattr(args, "activation_dropout", 0.0) + args.activation_fn = getattr(args, "activation_fn", "relu") + args.dropout = getattr(args, "dropout", 0.1) + args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) + args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) + args.share_decoder_input_output_embed = getattr( + args, "share_decoder_input_output_embed", False + ) + args.share_all_embeddings = getattr(args, "share_all_embeddings", False) + args.no_token_positional_embeddings = getattr( + args, "no_token_positional_embeddings", False + ) + args.adaptive_input = getattr(args, "adaptive_input", False) + args.no_cross_attention = getattr(args, "no_cross_attention", False) + args.cross_self_attention = getattr(args, "cross_self_attention", False) + + args.decoder_output_dim = getattr( + args, "decoder_output_dim", args.decoder_embed_dim + ) + args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) + + args.no_scale_embedding = getattr(args, "no_scale_embedding", False) + args.layernorm_embedding = getattr(args, "layernorm_embedding", False) + args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) + args.checkpoint_activations = getattr(args, "checkpoint_activations", False) + args.offload_activations = getattr(args, "offload_activations", False) + if args.offload_activations: + args.checkpoint_activations = True + args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) + args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) + args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) + args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) + args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) + args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) + args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) + args.is_moe = getattr(args, "is_moe", False) + args.selected_expert_count = getattr(args, "selected_expert_count", 2) diff --git a/examples/fairseq/tasks/__init__.py b/examples/fairseq/tasks/__init__.py new file mode 100644 index 0000000..61e32ef --- /dev/null +++ b/examples/fairseq/tasks/__init__.py @@ -0,0 +1,32 @@ +import argparse +import importlib +import os + +# register dataclass +TASK_DATACLASS_REGISTRY = {} +TASK_REGISTRY = {} +TASK_CLASS_NAMES = set() + +# automatically import any Python files in the tasks/ directory +tasks_dir = os.path.dirname(__file__) +for file in os.listdir(tasks_dir): + path = os.path.join(tasks_dir, file) + if ( + not file.startswith("_") + and not file.startswith(".") + and (file.endswith(".py") or os.path.isdir(path)) + ): + task_name = file[: file.find(".py")] if file.endswith(".py") else file + module = importlib.import_module("tasks." + task_name) + + # expose `task_parser` for sphinx + if task_name in TASK_REGISTRY: + parser = argparse.ArgumentParser(add_help=False) + group_task = parser.add_argument_group("Task name") + # fmt: off + group_task.add_argument('--task', metavar=task_name, + help='Enable this task with: ``--task=' + task_name + '``') + # fmt: on + group_args = parser.add_argument_group("Additional command-line arguments") + TASK_REGISTRY[task_name].add_args(group_args) + globals()[task_name + "_parser"] = parser \ No newline at end of file diff --git a/examples/fairseq/tasks/data/__init__.py b/examples/fairseq/tasks/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/fairseq/tasks/data/basic_loader.py b/examples/fairseq/tasks/data/basic_loader.py new file mode 100644 index 0000000..089dd07 --- /dev/null +++ b/examples/fairseq/tasks/data/basic_loader.py @@ -0,0 +1,78 @@ +import math +import re +import sys +import time +import torch +from infinibatch.iterators import CheckpointableIterator +from . import utils + +class BaseBatchGen(CheckpointableIterator): + """ + This is a base class for batch generators that use infinibatch + """ + + def __init__(self): + self._iter = None + self.epoch = 1 + self.next_epoch_idx = 1 + self.sharded_checkpoint = True + self.should_close_after_finished = True + + def _build_iter(self): + """ + Build infinibatch iterator and assign to self._iter + """ + raise NotImplementedError() + + def _move_to_tensor(self, batch): + + def to_tensor(x): + return torch.tensor(x) + + return utils.apply_to_sample(to_tensor, batch) + + @property + def iterator(self): + if self._iter is None: + raise NotImplementedError("_build_iter() must called first") + return self._iter + + def __iter__(self): + if self._iter is None: + raise NotImplementedError("_build_iter() must called first") + return self._iter + + def __next__(self): + return next(self._iter) + + def setstate(self, value): + self._iter.setstate(value) + + def getstate(self): + return self._iter.getstate() + + def close(self): + self._iter.close() + + def __len__(self) -> int: + return 819200000 + + def next_epoch_itr( + self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True + ): + return self + + def end_of_epoch(self) -> bool: + return False + + def state_dict(self): + """Returns a dictionary containing a whole state of the iterator.""" + return self.getstate() + + def load_state_dict(self, state_dict): + """Copies the state of the iterator from the given *state_dict*.""" + self.setstate(state_dict) + + @property + def first_batch(self): + return "DUMMY" \ No newline at end of file diff --git a/examples/fairseq/tasks/data/mlm_loader.py b/examples/fairseq/tasks/data/mlm_loader.py new file mode 100644 index 0000000..831355c --- /dev/null +++ b/examples/fairseq/tasks/data/mlm_loader.py @@ -0,0 +1,308 @@ +import glob +import os +import torch +import numpy as np +import time +import json +import random +import itertools +import copy + +from infinibatch import iterators +from .basic_loader import BaseBatchGen +from .utils import NativeCheckpointableIterator, WeightIterator + + +class MLMLoader(BaseBatchGen): + + def __init__( + self, + args, + dataset, + dictionary, + tokenizer, + max_tokens=None, + max_sentences=None, + max_positions=None, + ignore_invalid_inputs=False, + required_batch_size_multiple=1, + seed=1, + num_shards=1, + shard_id=0, + ): + super().__init__() + self.args = args + self.data = dataset.data + self.data_dir = dataset.data_dir + self.shuffle = dataset.shuffle + self.dictionary = dictionary + self.tokenizer = tokenizer + + self.max_tokens = max_tokens + self.max_sentences = max_sentences + self.max_positions = max_positions + self.tokens_per_sample = args.tokens_per_sample + self.sample_break_mode = args.sample_break_mode + self.ignore_invalid_inputs = ignore_invalid_inputs + self.required_batch_size_multiple = required_batch_size_multiple + self.seed = str(seed) + self.num_shards = num_shards + self.shard_id = shard_id + + self.batch_read_ahead = args.batch_read_ahead + + self._build_iter() + + def _build_iter(self): + tokenized_lines = self._multilingual_tokenize() + self.padded_batches = self._batchify(tokenized_lines) + + prefetch_batches = iterators.PrefetchIterator( + self.padded_batches, + buffer_size=10000, + buffer_in_main_process=True, + log_empty_buffer_warning=True and self.shard_id == 0, + ) + + prefetch_batches = iterators.MapIterator( + prefetch_batches, self._move_to_tensor + ) + + self._iter = prefetch_batches + + def _multilingual_tokenize(self): + multilingual_iters = [] + weights = [] + + for data in self.data: + multilingual_iters.append( + self._tokenize(data) + ) + if 'weight' in data: + weights.append(float(data['weight'])) + else: + weights.append(int(data['count'])) + + if len(multilingual_iters) == 1: + return multilingual_iters[0] + + sampling_iterator = WeightIterator(weights) + control_iterator = NativeCheckpointableIterator(sampling_iterator) + tokenized_lines = iterators.MultiplexIterator(control_iterator, multilingual_iters) + + return tokenized_lines + + def _tokenize(self, data): + ''' + data: + { + 'source': list[Path], + 'source_lang': str, + 'count': int, + 'weight': float, + 'name': str, + } + ''' + dataset = list( + zip( + data['source'], + itertools.repeat(data['source_lang']), + ) + ) + + if self.shuffle: + chunk_files = \ + iterators.InfinitePermutationSourceIterator( + dataset, + seed=self.seed, + shuffle=self.shuffle, + num_instances=self.num_shards, + instance_rank=self.shard_id, + ) + else: + chunk_files = \ + iterators.ChunkedSourceIterator( + dataset, + num_instances=self.num_shards, + instance_rank=self.shard_id, + ) + + tokenized_lines = iterators.SelectManyIterator(chunk_files, lambda files: self._read_from_files(*files)) + tokenized_lines = iterators.SamplingRandomMapIterator(tokenized_lines, self._prepare, self.seed) + + return tokenized_lines + + + def _batchify(self, lines): + + if self.max_sentences is not None: + if self.batch_read_ahead > 0: + lines = iterators.BlockwiseShuffleIterator(lines, self.batch_read_ahead, self.seed) + batches = iterators.FixedBatchIterator(lines, self.max_sentences) + else: + def dynamic_batch_size(sample): + lengths = [len(x) for x in sample] + batch_size = self.max_tokens // max(lengths) // self.required_batch_size_multiple * self.required_batch_size_multiple + return max(1, batch_size) + + batches = iterators.BucketedReadaheadBatchIterator( + lines, + read_ahead=self.batch_read_ahead, + key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None, + batch_size=dynamic_batch_size, + shuffle=self.shuffle, + seed=self.seed, + ) + + def collate(batch): + batch_size = len(batch) + + mlm_source_max_length = max([len(x[0]) for x in batch]) + mlm_target_max_length = max([len(x[1]) for x in batch]) + s2s_source_max_length = max([len(x[2]) for x in batch]) + s2s_target_max_length = max([len(x[3]) for x in batch]) + + mlm_source_ids = np.full(shape=(batch_size, mlm_source_max_length), dtype=np.int32, + fill_value=self.dictionary.pad()) + mlm_target_ids = np.full(shape=(batch_size, mlm_target_max_length), dtype=np.int32, + fill_value=self.dictionary.pad()) + s2s_source_ids = np.full(shape=(batch_size, s2s_source_max_length), dtype=np.int32, + fill_value=self.dictionary.pad()) + s2s_target_ids = np.full(shape=(batch_size, s2s_target_max_length-1), dtype=np.int32, + fill_value=self.dictionary.pad()) + s2s_prev_input_ids = np.full(shape=(batch_size, s2s_target_max_length-1), dtype=np.int32, + fill_value=self.dictionary.pad()) + + for i, (mlm_input_ids, mlm_label_ids, s2s_input_ids, s2s_label_ids) in enumerate(batch): + mlm_source_ids[i, :len(mlm_input_ids)] = mlm_input_ids + mlm_target_ids[i, :len(mlm_label_ids)] = mlm_label_ids + s2s_source_ids[i, :len(s2s_input_ids)] = s2s_input_ids + s2s_target_ids[i, :len(s2s_label_ids)-1] = s2s_label_ids[1:] + s2s_prev_input_ids[i, :len(s2s_label_ids)-1] = s2s_label_ids[:-1] + + ret_batch = { + 'net_input': { + 'src_tokens': mlm_source_ids.astype(np.int64), + }, + 'target': mlm_target_ids.astype(np.int64), + 'nsentences': batch_size, + 'ntokens': sum([len(x[0]) for x in batch]), + } + + return ret_batch + + padded_batches = iterators.MapIterator( + batches, collate + ) + + return padded_batches + + def _prepare(self, _random, doc): + nonmasked_tokens, masked_tokens = self._mask_lm(_random, doc) + nonnoise_spans, noise_spans = self._span_corruption(_random, doc) + return nonmasked_tokens, masked_tokens, nonnoise_spans, noise_spans + + def _mask_lm(self, _random, doc): + def mask_tokens(): + return f"" + + length = len(doc) + mask_tokens_num = int(length * self.args.mask_prob) + mask_tokens_num = min(max(mask_tokens_num, 1), length - 1) + possible_mask_positions = _random.sample(range(length), k=mask_tokens_num) + possible_mask_positions = sorted(possible_mask_positions) + + nonmasked_tokens = copy.deepcopy(doc) + masked_tokens = [self.dictionary.pad() for _ in range(len(doc))] + + for position in possible_mask_positions: + # masked_tokens.append(nonmasked_tokens[position]) + masked_tokens[position] = nonmasked_tokens[position] + nonmasked_tokens[position] = self.dictionary.indices[mask_tokens()] + + return nonmasked_tokens, masked_tokens + + def _span_corruption(self, _random, doc): + + def mask_tokens(i): + return f"" + + length = len(doc) + noise_tokens_num = int(length * self.args.mask_prob) + noise_tokens_num = min(max(noise_tokens_num, 1), length - 1) + noise_spans_num = int(noise_tokens_num / self.args.span_length) + noise_spans_num = max(noise_spans_num, 1) + nonnoise_tokens_num = length - noise_tokens_num + + if noise_spans_num == 1: + noise_split_positions = [0, noise_tokens_num] + else: + possible_split_positions = list(range(1, noise_tokens_num)) + _random.shuffle(possible_split_positions) + noise_split_positions = sorted(possible_split_positions[:noise_spans_num-1]) + noise_split_positions = [0] + noise_split_positions + [noise_tokens_num] + + possible_insert_positions = list(range(nonnoise_tokens_num)) + _random.shuffle(possible_insert_positions) + noise_insert_positions = sorted(possible_insert_positions[:noise_spans_num]) + + nonnoise_spans, noise_spans = [], [] + last_end = 0 + for i in range(noise_spans_num): + start_pos = noise_insert_positions[i] + noise_split_positions[i] + end_pos = noise_insert_positions[i] + noise_split_positions[i+1] + mask_id = self.dictionary.indices[mask_tokens(i)] + + if getattr(self.args, "remove_target_sentinel", False): + noise_spans.append(doc[start_pos:end_pos]) + else: + noise_spans.append([mask_id] + doc[start_pos:end_pos]) + + if getattr(self.args, "remove_source_sentinel", False): + nonnoise_spans.extend(doc[last_end:start_pos]) + else: + nonnoise_spans.extend(doc[last_end:start_pos] + [mask_id]) + + last_end = end_pos + + nonnoise_spans.extend(doc[last_end:]) + noise_spans = sum(noise_spans, []) + + return nonnoise_spans, noise_spans + + def _read_from_files(self, source_file, source_lang): + # data = [] + file_path = os.path.join(self.data_dir, source_file) + + if not os.path.exists(file_path): + print('| file {} not exists'.format(file_path), flush=True) + return iter([]) # skip bad file + + with open(file_path, 'r', encoding='utf8') as f: + lines = f.read().strip().split('\n') + + doc = [self.dictionary.bos()] + for line in lines: + if line == "": + if self.sample_break_mode == 'complete_doc': + # data.append(doc) + yield doc + doc = [self.dictionary.bos()] + continue + + tokenized_line = self.tokenizer.EncodeAsPieces(line) + tokenized_id = [self.dictionary.index(token) for token in tokenized_line] + [self.dictionary.eos_index] + + if len(tokenized_id) > self.tokens_per_sample: + continue + if len(doc) + len(tokenized_id) > self.tokens_per_sample: + # data.append(doc) + yield doc + doc = [self.dictionary.bos()] + doc.extend(tokenized_id) + + if len(doc) > 1 and len(doc) <= self.tokens_per_sample: + # data.append(doc) + yield doc + + # return data \ No newline at end of file diff --git a/examples/fairseq/tasks/data/utils.py b/examples/fairseq/tasks/data/utils.py new file mode 100644 index 0000000..128bc6a --- /dev/null +++ b/examples/fairseq/tasks/data/utils.py @@ -0,0 +1,82 @@ +import os +import gzip +import numpy as np +from random import Random +from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union +import collections +from infinibatch import iterators + +def apply_to_sample(f, sample): + if hasattr(sample, "__len__") and len(sample) == 0: + return {} + + def _apply(x): + if isinstance(x, np.ndarray): + return f(x) + elif isinstance(x, collections.OrderedDict): + # OrderedDict has attributes that needs to be preserved + od = collections.OrderedDict((key, _apply(value)) for key, value in x.items()) + od.__dict__ = x.__dict__ + return od + elif isinstance(x, dict): + return {key: _apply(value) for key, value in x.items()} + elif isinstance(x, list): + return [_apply(x) for x in x] + elif isinstance(x, tuple): + return tuple(_apply(x) for x in x) + elif isinstance(x, set): + return {_apply(x) for x in x} + else: + return x + + return _apply(sample) + +class NativeCheckpointableIterator(iterators.CheckpointableIterator): + def __init__(self, iterable: Iterable): + self._input_iterable = iterable + self.setstate(None) + + def getstate(self) -> Dict: + return {'num_items_yielded': self._num_items_yielded} + + def setstate(self, checkpoint: Optional[Dict]): + self._iterator = iter(self._input_iterable) + self._num_items_yielded = iterators._advance_iterator(self._iterator, checkpoint['num_items_yielded']) if checkpoint is not None else 0 + + def __next__(self): + item = next(self._iterator) + self._num_items_yielded += 1 + return item + + def close(self): + pass + + +class WeightIterator(object): + def __init__(self, weights, seed): + self.weights = weights + self.seed = seed + self.control_index = list(range(len(weights))) + self.setstate(None) + + def __iter__(self): + return self + + def getstate(self): + return {"random_state": self._random_state} + + def setstate(self, checkpoint): + self._random_state = checkpoint["random_state"] if checkpoint else None + self._random = None # this will trigger the lazy initialization in self.__next__ + + def __next__(self): + if self._random is None: + self._random = Random(self.seed) + if self._random_state is not None: + self._random.setstate(self._random_state) + idx = self._random.choices(self.control_index, self.weights)[0] + self._random_state = self._random.getstate() + return idx + + def close(self): + pass \ No newline at end of file diff --git a/examples/fairseq/tasks/pretraining.py b/examples/fairseq/tasks/pretraining.py new file mode 100644 index 0000000..3f4aeef --- /dev/null +++ b/examples/fairseq/tasks/pretraining.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from dataclasses import dataclass, field +import logging +import os +from argparse import Namespace +import json +from omegaconf import MISSING, II, OmegaConf +from typing import Any + +import numpy as np +from fairseq import utils +from fairseq.data import Dictionary +from fairseq.tasks import FairseqTask, register_task +from .data.mlm_loader import MLMLoader +from fairseq.dataclass import FairseqDataclass, ChoiceEnum +from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE +import sentencepiece as spm + +logger = logging.getLogger(__name__) + +SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"]) +SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) + +@dataclass +class PretrainingConfig(FairseqDataclass): + data: str = field( + default=MISSING, + metadata={ + "help": "colon separated path to data directories list, \ + will be iterated upon during epochs in round-robin manner" + }, + ) + sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( + default="complete", + metadata={ + "help": 'If omitted or "none", fills each sample with tokens-per-sample ' + 'tokens. If set to "complete", splits samples only at the end ' + "of sentence, but may include multiple sentences per sample. " + '"complete_doc" is similar but respects doc boundaries. ' + 'If set to "eos", includes only one sentence per sample.' + }, + ) + tokens_per_sample: int = field( + default=1024, + metadata={"help": "max number of tokens per sample for LM dataset"}, + ) + mask_prob: float = field( + default=0.15, + metadata={"help": "probability of replacing a token with mask"}, + ) + leave_unmasked_prob: float = field( + default=0.1, + metadata={"help": "probability that a masked token is unmasked"}, + ) + random_token_prob: float = field( + default=0.1, + metadata={"help": "probability of replacing a token with a random token"}, + ) + freq_weighted_replacement: bool = field( + default=False, + metadata={"help": "sample random replacement words based on word frequencies"}, + ) + mask_whole_words: bool = field( + default=False, + metadata={"help": "mask whole words; you may also want to set --bpe"}, + ) + mask_multiple_length: int = field( + default=1, + metadata={"help": "repeat the mask indices multiple times"}, + ) + mask_stdev: float = field( + default=0.0, + metadata={"help": "stdev of the mask length"}, + ) + shorten_method: SHORTEN_METHOD_CHOICES = field( + default="none", + metadata={ + "help": "if not none, shorten sequences that exceed --tokens-per-sample" + }, + ) + shorten_data_split_list: str = field( + default="", + metadata={ + "help": "comma-separated list of dataset splits to apply shortening to, " + 'e.g., "train,valid" (default: all dataset splits)' + }, + ) + seed: int = II("common.seed") + span_length: float = field( + default=3.0, + metadata={"help": "average span length for masking"}, + ) + remove_source_sentinel: bool = field( + default=False, + metadata={"help": "remove the source sentinel for the span corruption task"}, + ) + remove_target_sentinel: bool = field( + default=False, + metadata={"help": "remove the target sentinel for the span corruption task"}, + ) + batch_read_ahead: int = field( + default=100000, + metadata={"help": "batch read ahead size for infinibatch"}, + ) + required_batch_size_multiple: int = II("dataset.required_batch_size_multiple") + spm_model: str = field( + default="", + metadata={ + "help": "sentencepice model to tokenize the data" + }, + ) + dict_file: str = field( + default="", + metadata={ + "help": "" + }, + ) + + +@register_task("pretraining", dataclass=PretrainingConfig) +class PLMTask(FairseqTask): + + def __init__(self, cfg, dictionary, tokenizer): + super().__init__(cfg) + self.cfg = cfg + self.dictionary = dictionary + self.tokenizer = tokenizer + self.seed = cfg.seed + self.mask_idx = dictionary.index("") + + @classmethod + def setup_task(cls, cfg, **kwargs): + paths = utils.split_paths(cfg.data) + assert len(paths) > 0 + if cfg.dict_file != "": + dictionary = Dictionary.load(cfg.dict_file) + else: + dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) + + # add mask token + dictionary.add_symbol("") + for i in range(100): + dictionary.add_symbol(f"") + + dictionary.pad_to_multiple_(cfg.required_batch_size_multiple) + logger.info("dictionary: {} types".format(len(dictionary))) + + # tokenizer = SentencepieceBPE(Namespace(sentencepiece_model=cfg.spm_model)) + tokenizer = spm.SentencePieceProcessor() + tokenizer.Load(cfg.spm_model) + return cls(cfg, dictionary, tokenizer) + + def load_dataset(self, split, epoch=1, combine=False, **kwargs): + self.datasets[split] = { + 'data': json.load(open(f'{self.cfg.data}/json/{split}.json')), + 'data_dir': self.cfg.data, + 'shuffle': True if split == 'train' else False, + } + self.datasets[split] = Namespace(**self.datasets[split]) + + def dataset(self, split): + if split not in self.datasets: + raise KeyError("Dataset not loaded: " + split) + + return self.datasets[split] + + def get_batch_iterator( + self, + dataset, + max_tokens=None, + max_sentences=None, + max_positions=None, + ignore_invalid_inputs=False, + required_batch_size_multiple=1, + seed=1, + num_shards=1, + shard_id=0, + num_workers=0, + epoch=1, + data_buffer_size=0, + disable_iterator_cache=False, + ): + return MLMLoader( + self.cfg, + dataset, + self.dictionary, + self.tokenizer, + max_tokens=max_tokens, + max_sentences=max_sentences, + max_positions=max_positions, + ignore_invalid_inputs=ignore_invalid_inputs, + required_batch_size_multiple=required_batch_size_multiple, + seed=seed, + num_shards=num_shards, + shard_id=shard_id, + ) + + @property + def source_dictionary(self): + return self.dictionary + + @property + def target_dictionary(self): + return self.dictionary \ No newline at end of file diff --git a/examples/fairseq/train.py b/examples/fairseq/train.py new file mode 100644 index 0000000..6587ee2 --- /dev/null +++ b/examples/fairseq/train.py @@ -0,0 +1,8 @@ +import models +import tasks + +from fairseq_cli.train import cli_main + + +if __name__ == "__main__": + cli_main() \ No newline at end of file diff --git a/examples/fairseq/utils/__init__.py b/examples/fairseq/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/fairseq/utils/sparse_clip.py b/examples/fairseq/utils/sparse_clip.py new file mode 100644 index 0000000..633b71e --- /dev/null +++ b/examples/fairseq/utils/sparse_clip.py @@ -0,0 +1,75 @@ +import torch +import warnings +from fairseq.utils import multi_tensor_l2norm_available, multi_tensor_total_norm +import torch.distributed as dist +import math + +@torch.no_grad() +def clip_grad_norm_(params, max_norm, moe_expert_count, aggregate_norm_fn=None) -> torch.Tensor: + def grad_exists(p): + return p is not None and getattr(p, "grad", None) is not None + if isinstance(params, torch.Tensor): + params = [params] + params = list(params) + params = list(filter(grad_exists, params)) + grads, expert_grads, base_expert_grads, sharded_grads = [], [], [], [] + denom = math.sqrt(max(dist.get_global_world_size(), moe_expert_count)) + for p in params: + if hasattr(p, "expert"): + expert_grads.append(p.grad.detach() / denom) + elif hasattr(p, "base_expert"): + base_expert_grads.append(p.grad.detach()) + elif hasattr(p, "_is_sharded"): + sharded_grads.append(p.grad.detach()) + else: + grads.append(p.grad.detach()) + if len(grads) == 0: + if len(params) > 0: + total_norm = params[0].new_tensor(0.0) + else: + total_norm = torch.tensor(0.0) + elif len(grads) == 1: + total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) + else: + if multi_tensor_l2norm_available: + total_norm = multi_tensor_total_norm(grads) + else: + if torch.cuda.is_available(): + warnings.warn( + "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " + "you may get better performance by installing NVIDIA's apex library" + ) + device = torch.cuda.current_device() + elif grads[0].device.type == "xla": + device = grads[0].device + else: + device = torch.device("cpu") + total_norm = torch.norm( + torch.stack( + [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] + ) + ) + + # calculate split_norm and all_reduce with other workers + norms = [total_norm] + for split_grads in [expert_grads, sharded_grads]: + if len(split_grads) == 0: + continue + split_norm = torch.norm(torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in split_grads])) + if dist.is_initialized(): + split_norm.pow_(2) + dist.all_reduce(split_norm) + split_norm.sqrt_() + norms.append(split_norm) + if len(norms) > 1: + total_norm = torch.norm(torch.stack(norms)) + + if aggregate_norm_fn is not None: + total_norm = aggregate_norm_fn(total_norm) + + if max_norm > 0: + max_norm = float(max_norm) + clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) + for g in grads + expert_grads + sharded_grads + base_expert_grads: + g.mul_(clip_coef) + return total_norm \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9030581 --- /dev/null +++ b/setup.py @@ -0,0 +1,25 @@ +from io import open +from setuptools import find_packages, setup + +setup( + name="torchscale", + version="0.1.1", + author="TorchScale Team", + author_email="Shuming.Ma@microsoft.com", + description="Transformers at any scale", + long_description=open("README.md", "r", encoding='utf-8').read(), + long_description_content_type="text/markdown", + keywords="Transformers at any scale", + license="MIT", + url="https://github.com/msranlp/torchscale", + packages=find_packages(exclude=["*.tests", "*.tests.*", + "tests.*", "tests"]), + install_requires=['apex', + 'torch>=1.8', + 'fairscale==0.4.0', + 'timm==0.4.12'], + python_requires='>=3.8.0', + classifiers=[ + 'Programming Language :: Python :: 3', + ], +) \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_decoder.py b/tests/test_decoder.py new file mode 100644 index 0000000..257f2c0 --- /dev/null +++ b/tests/test_decoder.py @@ -0,0 +1,33 @@ +import pytest +from torchscale.architecture.config import DecoderConfig +from torchscale.architecture.decoder import Decoder +import torch + +testcases = [ + {}, + {"vocab_size": 64000}, + {"activation_fn": "relu"}, + {"drop_path_rate": 0.1}, + {"decoder_normalize_before": False}, + {"no_scale_embedding": False}, + {"layernorm_embedding": True}, + {"rel_pos_buckets": 32, "max_rel_pos": 256}, + {"deepnorm": True, "subln": False, "decoder_normalize_before": False}, + {"bert_init": True}, + {"multiway": True}, + {"share_decoder_input_output_embed": True}, + {"checkpoint_activations": True}, + {"fsdp": True} +] + +@pytest.mark.parametrize("args", testcases) +def test_decoder(args): + config = DecoderConfig(**args) + model = Decoder(config) + prev_output_tokens = torch.ones(2, 10) + token_embeddings = torch.rand(2, 10, config.decoder_embed_dim) + model( + prev_output_tokens=prev_output_tokens, + token_embeddings=token_embeddings, + features_only=True, + ) diff --git a/tests/test_encoder.py b/tests/test_encoder.py new file mode 100644 index 0000000..922b881 --- /dev/null +++ b/tests/test_encoder.py @@ -0,0 +1,28 @@ +import pytest +from torchscale.architecture.config import EncoderConfig +from torchscale.architecture.encoder import Encoder +import torch + +testcases = [ + {}, + {"vocab_size": 64000}, + {"activation_fn": "relu"}, + {"drop_path_rate": 0.1}, + {"encoder_normalize_before": False}, + {"no_scale_embedding": False}, + {"layernorm_embedding": True}, + {"rel_pos_buckets": 32, "max_rel_pos": 256}, + {"deepnorm": True, "subln": False, "encoder_normalize_before": False}, + {"bert_init": True}, + {"multiway": True}, + {"share_encoder_input_output_embed": True}, + {"checkpoint_activations": True}, + {"fsdp": True} +] + +@pytest.mark.parametrize("args", testcases) +def test_encoder(args): + config = EncoderConfig(**args) + model = Encoder(config) + token_embeddings = torch.rand(2, 10, config.encoder_embed_dim) + model(src_tokens=None, token_embeddings=token_embeddings) diff --git a/tests/test_encoder_decoder.py b/tests/test_encoder_decoder.py new file mode 100644 index 0000000..9855fa0 --- /dev/null +++ b/tests/test_encoder_decoder.py @@ -0,0 +1,43 @@ +import pytest +from torchscale.architecture.config import EncoderDecoderConfig +from torchscale.architecture.encoder_decoder import EncoderDecoder +from torchscale.component.embedding import TextEmbedding, PositionalEmbedding +import torch + +testcases = [ + {}, + {"vocab_size": 64000}, + {"activation_fn": "relu"}, + {"drop_path_rate": 0.1}, + {"encoder_normalize_before": False, "decoder_normalize_before": False}, + {"no_scale_embedding": False}, + {"layernorm_embedding": True}, + {"rel_pos_buckets": 32, "max_rel_pos": 256}, + {"deepnorm": True, "subln": False, "encoder_normalize_before": False, "decoder_normalize_before": False}, + {"bert_init": True}, + {"multiway": True}, + {"share_decoder_input_output_embed": True}, + {"share_all_embeddings": True}, + {"checkpoint_activations": True}, + {"fsdp": True} +] + +@pytest.mark.parametrize("args", testcases) +def test_decoder(args): + config = EncoderDecoderConfig(**args) + model = EncoderDecoder( + config, + encoder_embed_tokens=TextEmbedding(64000, config.encoder_embed_dim), + decoder_embed_tokens=TextEmbedding(64000, config.decoder_embed_dim), + encoder_embed_positions=PositionalEmbedding(config.max_source_positions, config.encoder_embed_dim), + decoder_embed_positions=PositionalEmbedding(config.max_target_positions, config.decoder_embed_dim), + ) + + src_tokens = torch.ones(2, 20).long() + prev_output_tokens = torch.ones(2, 10).long() + + model( + src_tokens=src_tokens, + prev_output_tokens=prev_output_tokens, + features_only=True, + ) diff --git a/torchscale/__init__.py b/torchscale/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/torchscale/architecture/__init__.py b/torchscale/architecture/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/torchscale/architecture/config.py b/torchscale/architecture/config.py new file mode 100644 index 0000000..ad1621a --- /dev/null +++ b/torchscale/architecture/config.py @@ -0,0 +1,160 @@ + + +class EncoderConfig(object): + def __init__(self, **kwargs): + self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768) + self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12) + self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072) + self.encoder_layers = kwargs.pop("encoder_layers", 12) + self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True) + self.activation_fn = kwargs.pop("activation_fn", "gelu") + self.dropout = kwargs.pop("dropout", 0.0) + self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) + self.attention_dropout = kwargs.pop("attention_dropout", 0.0) + self.activation_dropout = kwargs.pop("activation_dropout", 0.0) + self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) + self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) + self.moe_freq = kwargs.pop("moe_freq", 0) + self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) + self.moe_expert_count = kwargs.pop("moe_expert_count", 0) + self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) + self.moe_eval_capacity_token_fraction = kwargs.pop("moe_eval_capacity_token_fraction", 0.25) + self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") + self.moe_normalize_gate_prob_before_dropping = kwargs.pop("moe_normalize_gate_prob_before_dropping", False) + self.use_xmoe = kwargs.pop("use_xmoe", True) + self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) + self.max_rel_pos = kwargs.pop("max_rel_pos", 0) + self.deepnorm = kwargs.pop("deepnorm", False) + self.subln = kwargs.pop("subln", True) + self.bert_init = kwargs.pop("bert_init", False) + self.multiway = kwargs.pop("multiway", False) + self.share_encoder_input_output_embed = kwargs.pop("share_encoder_input_output_embed", False) + self.max_source_positions = kwargs.pop("max_source_positions", 1024) + self.no_output_layer = kwargs.pop("no_output_layer", False) + # Text + self.vocab_size = kwargs.pop("vocab_size", -1) + # Vision + self.img_size = kwargs.pop("img_size", 224) + self.patch_size = kwargs.pop("patch_size", 16) + self.in_chans = kwargs.pop("in_chans", 3) + # Fairscale + self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) + self.fsdp = kwargs.pop("fsdp", False) + self.ddp_rank = kwargs.pop("ddp_rank", 0) + + if self.deepnorm: + self.encoder_normalize_before = False + if self.subln: + self.encoder_normalize_before = True + + def override(self, args): + for hp in self.__dict__.keys(): + if getattr(args, hp, None) is not None: + self.__dict__[hp] = getattr(args, hp, None) + + +class DecoderConfig(object): + def __init__(self, **kwargs): + self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768) + self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12) + self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072) + self.decoder_layers = kwargs.pop("decoder_layers", 12) + self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True) + self.activation_fn = kwargs.pop("activation_fn", "gelu") + self.dropout = kwargs.pop("dropout", 0.0) + self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) + self.attention_dropout = kwargs.pop("attention_dropout", 0.0) + self.activation_dropout = kwargs.pop("activation_dropout", 0.0) + self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) + self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) + self.moe_freq = kwargs.pop("moe_freq", 0) + self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) + self.moe_expert_count = kwargs.pop("moe_expert_count", 0) + self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) + self.moe_eval_capacity_token_fraction = kwargs.pop("moe_eval_capacity_token_fraction", 0.25) + self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") + self.moe_normalize_gate_prob_before_dropping = kwargs.pop("moe_normalize_gate_prob_before_dropping", False) + self.use_xmoe = kwargs.pop("use_xmoe", True) + self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) + self.max_rel_pos = kwargs.pop("max_rel_pos", 0) + self.deepnorm = kwargs.pop("deepnorm", False) + self.subln = kwargs.pop("subln", True) + self.bert_init = kwargs.pop("bert_init", False) + self.multiway = kwargs.pop("multiway", False) + self.share_decoder_input_output_embed = kwargs.pop("share_decoder_input_output_embed", False) + self.max_target_positions = kwargs.pop("max_target_positions", 1024) + self.no_output_layer = kwargs.pop("no_output_layer", False) + # Text + self.vocab_size = kwargs.pop("vocab_size", -1) + # Fairscale + self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) + self.fsdp = kwargs.pop("fsdp", False) + self.ddp_rank = kwargs.pop("ddp_rank", 0) + + if self.deepnorm: + self.decoder_normalize_before = False + if self.subln: + self.decoder_normalize_before = True + + def override(self, args): + for hp in self.__dict__.keys(): + if getattr(args, hp, None) is not None: + self.__dict__[hp] = getattr(args, hp, None) + + +class EncoderDecoderConfig(object): + def __init__(self, **kwargs): + self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768) + self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12) + self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072) + self.encoder_layers = kwargs.pop("encoder_layers", 12) + self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True) + self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768) + self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12) + self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072) + self.decoder_layers = kwargs.pop("decoder_layers", 12) + self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True) + self.activation_fn = kwargs.pop("activation_fn", "gelu") + self.dropout = kwargs.pop("dropout", 0.0) + self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0) + self.attention_dropout = kwargs.pop("attention_dropout", 0.0) + self.activation_dropout = kwargs.pop("activation_dropout", 0.0) + self.no_scale_embedding = kwargs.pop("no_scale_embedding", True) + self.layernorm_embedding = kwargs.pop("layernorm_embedding", False) + self.moe_freq = kwargs.pop("moe_freq", 0) + self.moe_top1_expert = kwargs.pop("moe_top1_expert", False) + self.moe_expert_count = kwargs.pop("moe_expert_count", 0) + self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True) + self.moe_eval_capacity_token_fraction = kwargs.pop("moe_eval_capacity_token_fraction", 0.25) + self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random") + self.moe_normalize_gate_prob_before_dropping = kwargs.pop("moe_normalize_gate_prob_before_dropping", False) + self.use_xmoe = kwargs.pop("use_xmoe", True) + self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0) + self.max_rel_pos = kwargs.pop("max_rel_pos", 0) + self.deepnorm = kwargs.pop("deepnorm", False) + self.subln = kwargs.pop("subln", True) + self.bert_init = kwargs.pop("bert_init", False) + self.multiway = kwargs.pop("multiway", False) + self.share_all_embeddings = kwargs.pop("share_all_embeddings", False) + self.share_decoder_input_output_embed = kwargs.pop("share_decoder_input_output_embed", False) + self.max_source_positions = kwargs.pop("max_source_positions", 1024) + self.max_target_positions = kwargs.pop("max_target_positions", 1024) + self.no_output_layer = kwargs.pop("no_output_layer", False) + # Text + self.vocab_size = kwargs.pop("vocab_size", -1) + # Fairscale + self.checkpoint_activations = kwargs.pop("checkpoint_activations", False) + self.fsdp = kwargs.pop("fsdp", False) + self.ddp_rank = kwargs.pop("ddp_rank", 0) + + if self.deepnorm: + self.encoder_normalize_before = False + self.decoder_normalize_before = False + if self.subln: + self.encoder_normalize_before = True + self.decoder_normalize_before = True + + def override(self, args): + for hp in self.__dict__.keys(): + if getattr(args, hp, None) is not None: + self.__dict__[hp] = getattr(args, hp, None) diff --git a/torchscale/architecture/decoder.py b/torchscale/architecture/decoder.py new file mode 100644 index 0000000..0f816f1 --- /dev/null +++ b/torchscale/architecture/decoder.py @@ -0,0 +1,447 @@ +import math +import torch +import torch.nn as nn +import numpy as np +from fairscale.nn import checkpoint_wrapper, wrap +from apex.normalization import FusedLayerNorm as LayerNorm +from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts +from torchscale.component.multihead_attention import MultiheadAttention +from torchscale.component.xmoe.routing import Top1Gate, Top2Gate +from torchscale.component.xmoe.moe_layer import MOELayer +from torchscale.component.droppath import DropPath +from torchscale.architecture.utils import init_bert_params +from torchscale.component.relative_position_bias import RelativePositionBias + + +class DecoderLayer(nn.Module): + + def __init__( + self, + args, + depth, + is_moe_layer=False, + is_encoder_decoder=False, + ): + super().__init__() + self.args = args + self.embed_dim = args.decoder_embed_dim + self.dropout_module = torch.nn.Dropout(args.dropout, inplace=True) + + if args.drop_path_rate > 0: + drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[depth] + self.drop_path = DropPath(drop_path_prob) + else: + self.drop_path = None + + self.self_attn = self.build_self_attention(self.embed_dim, args) + + self.normalize_before = args.decoder_normalize_before + + self.self_attn_layer_norm = LayerNorm(self.embed_dim) + + if not is_encoder_decoder: + self.encoder_attn = None + self.encoder_attn_layer_norm = None + else: + self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) + self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) + + self.is_moe_layer = is_moe_layer + self.ffn_dim = args.decoder_ffn_embed_dim + + if not self.is_moe_layer: + self.ffn = self.build_ffn( + self.embed_dim, + self.args, + ) + else: + if args.moe_top1_expert: + gate = Top1Gate( + self.embed_dim, + args.moe_expert_count, + use_fp32=args.moe_gating_use_fp32, + moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction, + use_xmoe=args.use_xmoe, + ) + else: + gate = Top2Gate( + self.embed_dim, + args.moe_expert_count, + args.moe_gating_use_fp32, + args.moe_second_expert_policy, + args.moe_normalize_gate_prob_before_dropping, + args.moe_eval_capacity_token_fraction, + use_xmoe=args.use_xmoe, + ) + experts = make_experts(args, self.embed_dim, self.ffn_dim) + self.moe_layer = MOELayer(gate, experts, args) + + self.final_layer_norm = LayerNorm(self.embed_dim) + + if args.deepnorm: + if is_encoder_decoder: + self.alpha = math.pow(3.0 * args.decoder_layers, 0.25) + else: + self.alpha = math.pow(2.0 * args.decoder_layers, 0.25) + else: + self.alpha = 1.0 + + if args.subln: + self.ffn_layernorm = LayerNorm(self.ffn_dim) + else: + self.ffn_layernorm = None + + def build_ffn(self, embed_dim, args): + return FeedForwardNetwork( + embed_dim, + self.ffn_dim, + args.activation_fn, + args.dropout, + args.activation_dropout, + args.subln, + ) + + def build_self_attention(self, embed_dim, args): + return MultiheadAttention( + args, + embed_dim, + args.decoder_attention_heads, + dropout=args.attention_dropout, + self_attention=True, + encoder_decoder_attention=False, + subln=args.subln, + ) + + def build_encoder_attention(self, embed_dim, args): + return MultiheadAttention( + args, + embed_dim, + args.decoder_attention_heads, + dropout=args.attention_dropout, + self_attention=False, + encoder_decoder_attention=True, + subln=args.subln, + ) + + def residual_connection(self, x, residual): + return residual * self.alpha + x + + def forward( + self, + x, + encoder_out=None, + encoder_padding_mask=None, + incremental_state=None, + self_attn_mask=None, + self_attn_padding_mask=None, + self_attn_rel_pos=None, + cross_attn_rel_pos=None, + ): + residual = x + if self.normalize_before: + x = self.self_attn_layer_norm(x) + + x, attn = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=self_attn_padding_mask, + incremental_state=incremental_state, + attn_mask=self_attn_mask, + rel_pos=self_attn_rel_pos, + ) + x = self.dropout_module(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + x = self.residual_connection(x, residual) + if not self.normalize_before: + x = self.self_attn_layer_norm(x) + + if self.encoder_attn is not None and encoder_out is not None: + residual = x + if self.normalize_before: + x = self.encoder_attn_layer_norm(x) + + x, attn = self.encoder_attn( + query=x, + key=encoder_out, + value=encoder_out, + key_padding_mask=encoder_padding_mask, + incremental_state=None, + rel_pos=cross_attn_rel_pos, + ) + x = self.dropout_module(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + x = self.residual_connection(x, residual) + if not self.normalize_before: + x = self.encoder_attn_layer_norm(x) + + residual = x + if self.normalize_before: + x = self.final_layer_norm(x) + if not self.is_moe_layer: + x = self.ffn(x) + l_aux = None + else: + x = x.transpose(0, 1) + x, l_aux = self.moe_layer(x) + x = x.transpose(0, 1) + + if self.drop_path is not None: + x = self.drop_path(x) + + x = self.residual_connection(x, residual) + if not self.normalize_before: + x = self.final_layer_norm(x) + + return x, attn, None, l_aux + + +class Decoder(nn.Module): + + def __init__( + self, + args, + embed_tokens=None, + embed_positions=None, + output_projection=None, + is_encoder_decoder=False, + **kwargs + ): + super().__init__(**kwargs) + self.args = args + + self.dropout_module = torch.nn.Dropout(args.dropout, inplace=True) + + embed_dim = args.decoder_embed_dim + self.embed_dim = embed_dim + self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) + + self.embed_tokens = embed_tokens + self.embed_positions = embed_positions + + if output_projection is None and not args.no_output_layer and args.vocab_size > 0: + self.output_projection = self.build_output_projection(args) + else: + self.output_projection = output_projection + + if args.layernorm_embedding: + self.layernorm_embedding = LayerNorm(embed_dim) + else: + self.layernorm_embedding = None + + self.layers = nn.ModuleList([]) + + moe_freq = args.moe_freq + for i in range(args.decoder_layers): + is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0 + self.layers.append( + self.build_decoder_layer( + args, + depth=i, + is_moe_layer=is_moe_layer, + is_encoder_decoder=is_encoder_decoder, + ) + ) + + self.num_layers = len(self.layers) + + if args.decoder_normalize_before: + self.layer_norm = LayerNorm(embed_dim) + else: + self.layer_norm = None + + self.output_projection = output_projection + + self.self_attn_relative_position = None + self.cross_attn_relative_position = None + + if args.rel_pos_buckets > 0 and args.max_rel_pos > 0: + self.self_attn_relative_position = RelativePositionBias( + num_buckets=args.rel_pos_buckets, + max_distance=args.max_rel_pos, + n_heads=args.decoder_attention_heads, + ) + if is_encoder_decoder: + self.cross_attn_relative_position = RelativePositionBias( + num_buckets=args.rel_pos_buckets, + max_distance=args.max_rel_pos, + n_heads=args.decoder_attention_heads, + ) + + if args.bert_init: + self.apply(init_bert_params) + + if args.deepnorm: + if is_encoder_decoder: + init_scale = math.pow(12.0 * args.decoder_layers, 0.25) + else: + init_scale = math.pow(8.0 * args.decoder_layers, 0.25) + for name, p in self.named_parameters(): + if 'fc1' in name or 'fc2' in name or 'out_proj' in name or 'v_proj' in name: + p.data.div_(init_scale) + + if args.subln: + if is_encoder_decoder: + init_scale = math.sqrt(math.log(args.decoder_layers * 3)) + else: + init_scale = math.sqrt(math.log(args.decoder_layers * 2)) + for name, p in self.named_parameters(): + if 'encoder_attn' in name: + continue + if 'fc1' in name or 'fc2' in name or 'out_proj' in name or 'v_proj' in name: + p.data.mul_(init_scale) + + def build_output_projection( + self, + args, + ): + if args.share_decoder_input_output_embed: + output_projection = torch.nn.Linear( + self.embed_tokens.weight.shape[1], + self.embed_tokens.weight.shape[0], + bias=False, + ) + output_projection.weight = self.embed_tokens.weight + else: + output_projection = torch.nn.Linear( + args.decoder_embed_dim, args.vocab_size, bias=False + ) + torch.nn.init.normal_( + output_projection.weight, mean=0, std=args.decoder_embed_dim ** -0.5 + ) + return output_projection + + def build_decoder_layer( + self, + args, + depth, + is_moe_layer=False, + is_encoder_decoder=False + ): + layer = DecoderLayer( + args, + depth, + is_moe_layer=is_moe_layer, + is_encoder_decoder=is_encoder_decoder, + ) + if args.checkpoint_activations: + layer = checkpoint_wrapper(layer) + if args.fsdp: + layer = wrap(layer) + return layer + + def forward_embedding( + self, + tokens, + token_embedding=None, + incremental_state=None, + ): + positions = None + if self.embed_positions is not None: + positions = self.embed_positions(tokens, incremental_state=incremental_state) + + if incremental_state is not None: + tokens = tokens[:, -1:] + if positions is not None: + positions = positions[:, -1:] + + if token_embedding is None: + token_embedding = self.embed_tokens(tokens) + + x = embed = self.embed_scale * token_embedding + + if positions is not None: + x += positions + + if self.layernorm_embedding is not None: + x = self.layernorm_embedding(x) + + x = self.dropout_module(x) + + return x, embed + + def forward( + self, + prev_output_tokens, + self_attn_padding_mask=None, + encoder_out=None, + incremental_state=None, + features_only=False, + return_all_hiddens=False, + token_embeddings=None, + **kwargs + ): + # embed tokens and positions + x, _ = self.forward_embedding(prev_output_tokens, token_embeddings, incremental_state) + x = x.transpose(0, 1) + + # relative postion + self_attn_rel_pos_bias = None + slen = prev_output_tokens.size(1) + if self.self_attn_relative_position is not None: + self_attn_rel_pos_bias = self.self_attn_relative_position( + batch_size=x.size(1), + qlen=slen, + klen=slen + ) + if incremental_state is not None: + self_attn_rel_pos_bias = self_attn_rel_pos_bias[:, -1:, :] + cross_attn_rel_pos_bias = None + if self.cross_attn_relative_position is not None: + cross_attn_rel_pos_bias = self.cross_attn_relative_position( + batch_size=x.size(1), + qlen=slen, + klen=encoder_out["encoder_out"].size(0), + ) + if incremental_state is not None: + cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[:, -1:, :] + + # decoder layers + inner_states = [x] + + if encoder_out is None: + l_aux = [] + else: + l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else [] + + for idx, layer in enumerate(self.layers): + if incremental_state is None: + self_attn_mask = torch.triu( + torch.zeros([x.size(0), x.size(0)]).float().fill_(float("-inf")).type_as(x), 1 + ) + else: + self_attn_mask = None + if idx not in incremental_state: + incremental_state[idx] = {} + + x, layer_attn, _, l_aux_i = layer( + x, + encoder_out["encoder_out"] if encoder_out is not None else None, + encoder_out["encoder_padding_mask"] if encoder_out is not None else None, + incremental_state[idx] if incremental_state is not None else None, + self_attn_mask=self_attn_mask, + self_attn_padding_mask=self_attn_padding_mask, + self_attn_rel_pos=self_attn_rel_pos_bias, + cross_attn_rel_pos=cross_attn_rel_pos_bias, + ) + l_aux.append(l_aux_i) + inner_states.append(x) + + if self.layer_norm is not None: + x = self.layer_norm(x) + + x = x.transpose(0, 1) + + if not features_only: + x = self.output_layer(x) + + return x, {"inner_states": inner_states, "l_aux": l_aux, "attn": [layer_attn.mean(dim=0)]} + + def output_layer(self, features): + return self.output_projection(features) diff --git a/torchscale/architecture/encoder.py b/torchscale/architecture/encoder.py new file mode 100644 index 0000000..18aadb0 --- /dev/null +++ b/torchscale/architecture/encoder.py @@ -0,0 +1,367 @@ + +import math +import torch +import torch.nn as nn +import numpy as np +from fairscale.nn import checkpoint_wrapper, wrap +from apex.normalization import FusedLayerNorm as LayerNorm +from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts +from torchscale.component.multihead_attention import MultiheadAttention +from torchscale.component.xmoe.routing import Top1Gate, Top2Gate +from torchscale.component.xmoe.moe_layer import MOELayer +from torchscale.component.multiway_network import set_split_position, MultiwayWrapper +from torchscale.component.droppath import DropPath +from torchscale.architecture.utils import init_bert_params +from torchscale.component.relative_position_bias import RelativePositionBias + + +class EncoderLayer(nn.Module): + + def __init__( + self, + args, + depth, + is_moe_layer=False, + is_encoder_decoder=False + ): + super().__init__() + self.args = args + self.embed_dim = args.encoder_embed_dim + self.self_attn = self.build_self_attention(self.embed_dim, args) + self.self_attn_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim)) + self.dropout_module = torch.nn.Dropout(args.dropout, inplace=True) + + if args.drop_path_rate > 0: + drop_path_prob = np.linspace(0, args.drop_path_rate, args.encoder_layers)[depth] + self.drop_path = DropPath(drop_path_prob) + else: + self.drop_path = None + + self.normalize_before = args.encoder_normalize_before + self.is_moe_layer = is_moe_layer + self.ffn_dim = args.encoder_ffn_embed_dim + + if not self.is_moe_layer: + self.ffn = MultiwayWrapper( + args, + self.build_ffn( + self.embed_dim, + self.args, + ) + ) + else: + assert not self.args.multiway + if args.moe_top1_expert: + gate = Top1Gate( + self.embed_dim, + args.moe_expert_count, + use_fp32=args.moe_gating_use_fp32, + moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction, + use_xmoe=args.use_xmoe, + ) + else: + gate = Top2Gate( + self.embed_dim, + args.moe_expert_count, + args.moe_gating_use_fp32, + args.moe_second_expert_policy, + args.moe_normalize_gate_prob_before_dropping, + args.moe_eval_capacity_token_fraction, + use_xmoe=args.use_xmoe, + ) + experts = make_experts(args, self.embed_dim, self.ffn_dim) + self.moe_layer = MOELayer(gate, experts, args) + self.final_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim)) + + if args.deepnorm: + if is_encoder_decoder: + self.alpha = math.pow(math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625) * 0.81 + else: + self.alpha = math.pow(2.0 * args.encoder_layers, 0.25) + else: + self.alpha = 1.0 + + def build_ffn(self, embed_dim, args): + return FeedForwardNetwork( + embed_dim, + self.ffn_dim, + args.activation_fn, + args.dropout, + args.activation_dropout, + args.subln, + ) + + def build_self_attention(self, embed_dim, args): + return MultiheadAttention( + args, + embed_dim, + args.encoder_attention_heads, + dropout=args.attention_dropout, + self_attention=True, + encoder_decoder_attention=False, + subln=args.subln, + ) + + def residual_connection(self, x, residual): + return residual * self.alpha + x + + def forward( + self, + x, + encoder_padding_mask, + attn_mask=None, + rel_pos=None + ): + if attn_mask is not None: + attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) + + residual = x + if self.normalize_before: + x = self.self_attn_layer_norm(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=encoder_padding_mask, + attn_mask=attn_mask, + rel_pos=rel_pos, + ) + x = self.dropout_module(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + x = self.residual_connection(x, residual) + if not self.normalize_before: + x = self.self_attn_layer_norm(x) + + residual = x + if self.normalize_before: + x = self.final_layer_norm(x) + if not self.is_moe_layer: + x = self.ffn(x) + l_aux = None + else: + x = x.transpose(0, 1) + x, l_aux = self.moe_layer(x) + x = x.transpose(0, 1) + + if self.drop_path is not None: + x = self.drop_path(x) + + x = self.residual_connection(x, residual) + if not self.normalize_before: + x = self.final_layer_norm(x) + return x, l_aux + + +class Encoder(nn.Module): + + def __init__( + self, + args, + embed_tokens=None, + embed_positions=None, + output_projection=None, + is_encoder_decoder=False, + **kwargs + ): + self.args = args + super().__init__(**kwargs) + + self.dropout_module = torch.nn.Dropout(args.dropout, inplace=True) + + embed_dim = args.encoder_embed_dim + self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) + + self.embed_tokens = embed_tokens + self.embed_positions = embed_positions + + if output_projection is None and not is_encoder_decoder and not args.no_output_layer and args.vocab_size > 0: + self.output_projection = self.build_output_projection(args) + else: + self.output_projection = output_projection + + if args.layernorm_embedding: + self.layernorm_embedding = MultiwayWrapper(args, LayerNorm(embed_dim), dim=1) + else: + self.layernorm_embedding = None + + self.layers = nn.ModuleList([]) + + moe_freq = args.moe_freq + for i in range(args.encoder_layers): + is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0 + self.layers.append( + self.build_encoder_layer( + args, + depth=i, + is_moe_layer=is_moe_layer, + is_encoder_decoder=is_encoder_decoder + ) + ) + self.num_layers = len(self.layers) + + if args.encoder_normalize_before: + self.layer_norm = MultiwayWrapper(args, LayerNorm(embed_dim)) + else: + self.layer_norm = None + + if args.rel_pos_buckets > 0 and args.max_rel_pos > 0: + self.relative_position = RelativePositionBias( + num_buckets=args.rel_pos_buckets, + max_distance=args.max_rel_pos, + n_heads=args.encoder_attention_heads, + ) + else: + self.relative_position = None + + if args.bert_init: + self.apply(init_bert_params) + + if args.deepnorm: + if is_encoder_decoder: + init_scale = math.pow(math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625) / 1.15 + else: + init_scale = math.pow(8.0 * args.encoder_layers, 0.25) + for name, p in self.named_parameters(): + if 'fc1' in name or 'fc2' in name or 'out_proj' in name or 'v_proj' in name: + p.data.div_(init_scale) + + if args.subln: + if is_encoder_decoder: + init_scale = math.sqrt(math.log(3 * args.decoder_layers) * math.log(2 * args.encoder_layers) / 3) + else: + init_scale = math.sqrt(math.log(args.encoder_layers * 2)) + for name, p in self.named_parameters(): + if 'fc1' in name or 'fc2' in name or 'out_proj' in name or 'v_proj' in name: + p.data.mul_(init_scale) + + def build_output_projection( + self, + args, + ): + if args.share_encoder_input_output_embed: + assert args.encoder_embedding_type == 'language' + output_projection = torch.nn.Linear( + self.embed_tokens.weight.shape[1], + self.embed_tokens.weight.shape[0], + bias=False, + ) + output_projection.weight = self.embed_tokens.weight + else: + output_projection = torch.nn.Linear( + args.encoder_embed_dim, args.vocab_size, bias=False + ) + torch.nn.init.normal_( + output_projection.weight, mean=0, std=args.encoder_embed_dim ** -0.5 + ) + return output_projection + + def build_encoder_layer( + self, + args, + depth, + is_moe_layer=False, + is_encoder_decoder=False + ): + layer = EncoderLayer( + args, + depth, + is_moe_layer=is_moe_layer, + is_encoder_decoder=is_encoder_decoder + ) + if args.checkpoint_activations: + layer = checkpoint_wrapper(layer) + if args.fsdp: + layer = wrap(layer) + return layer + + def forward_embedding( + self, + src_tokens, + token_embedding=None, + ): + if token_embedding is None: + token_embedding = self.embed_tokens(src_tokens) + x = embed = self.embed_scale * token_embedding + if self.embed_positions is not None: + if src_tokens is not None: + x = embed + self.embed_positions(src_tokens) + else: + x = embed + self.embed_positions(x) + if self.layernorm_embedding is not None: + x = self.layernorm_embedding(x) + x = self.dropout_module(x) + return x, embed + + def forward( + self, + src_tokens, + encoder_padding_mask=None, + return_all_hiddens=False, + token_embeddings=None, + multiway_split_position=None, + features_only=False, + **kwargs + ): + assert src_tokens is not None or token_embeddings is not None + + if encoder_padding_mask is None: + if src_tokens is not None: + encoder_padding_mask = torch.zeros_like( + src_tokens, + device=src_tokens.device + ).bool() + else: + encoder_padding_mask = torch.zeros( + [token_embeddings.size(0), token_embeddings.size(1)], + device=token_embeddings.device + ).bool() + + if multiway_split_position is not None: + assert self.args.multiway + self.apply(set_split_position(multiway_split_position)) + + x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) + x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) + + x = x.transpose(0, 1) + + encoder_states = [] + + if return_all_hiddens: + encoder_states.append(x) + + rel_pos_bias = None + if self.relative_position is not None: + rel_pos_bias = self.relative_position( + batch_size=x.size(1), + qlen=x.size(0), + klen=x.size(0) + ) + + l_aux = [] + for layer in self.layers: + x, l_aux_i = layer( + x, encoder_padding_mask=encoder_padding_mask, + rel_pos=rel_pos_bias + ) + if return_all_hiddens: + assert encoder_states is not None + encoder_states.append(x) + l_aux.append(l_aux_i) + + if self.layer_norm is not None: + x = self.layer_norm(x) + + if not features_only and self.output_projection is not None: + x = self.output_projection(x) + + return { + "encoder_out": x, + "encoder_embedding": encoder_embedding, + "encoder_padding_mask": encoder_padding_mask, + "encoder_states": encoder_states, + "l_aux": l_aux, + } diff --git a/torchscale/architecture/encoder_decoder.py b/torchscale/architecture/encoder_decoder.py new file mode 100644 index 0000000..fbe9219 --- /dev/null +++ b/torchscale/architecture/encoder_decoder.py @@ -0,0 +1,61 @@ +import torch.nn as nn +from torchscale.architecture.encoder import Encoder +from torchscale.architecture.decoder import Decoder + + +class EncoderDecoder(nn.Module): + + def __init__( + self, + args, + encoder_embed_tokens=None, + encoder_embed_positions=None, + decoder_embed_tokens=None, + decoder_embed_positions=None, + output_projection=None, + **kwargs + ): + super().__init__() + self.args = args + if args.share_all_embeddings: + args.share_decoder_input_output_embed = True + + self.encoder = Encoder( + args, + encoder_embed_tokens, + encoder_embed_positions, + is_encoder_decoder=True, + **kwargs + ) + + if args.share_all_embeddings and decoder_embed_tokens is None: + decoder_embed_tokens = self.encoder.embed_tokens + + self.decoder = Decoder( + args, + decoder_embed_tokens, + decoder_embed_positions, + output_projection, + is_encoder_decoder=True, + **kwargs + ) + + def forward( + self, + src_tokens, + prev_output_tokens, + return_all_hiddens=False, + features_only=False, + **kwargs + ): + encoder_out = self.encoder( + src_tokens, + return_all_hiddens=return_all_hiddens + ) + decoder_out = self.decoder( + prev_output_tokens, + encoder_out=encoder_out, + features_only=features_only, + return_all_hiddens=return_all_hiddens, + ) + return decoder_out diff --git a/torchscale/architecture/utils.py b/torchscale/architecture/utils.py new file mode 100644 index 0000000..cf21997 --- /dev/null +++ b/torchscale/architecture/utils.py @@ -0,0 +1,30 @@ +import torch.nn as nn +from torchscale.component.multihead_attention import MultiheadAttention +from torchscale.component.multiway_network import MultiwayNetwork + + +def init_bert_params(module): + + def normal_(data): + data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) + + if isinstance(module, nn.Linear): + normal_(module.weight.data) + if module.bias is not None: + module.bias.data.zero_() + if isinstance(module, nn.Embedding): + normal_(module.weight.data) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if isinstance(module, MultiheadAttention): + if isinstance(module.q_proj, MultiwayNetwork): + normal_(module.q_proj.A.weight.data) + normal_(module.q_proj.B.weight.data) + normal_(module.k_proj.A.weight.data) + normal_(module.k_proj.B.weight.data) + normal_(module.v_proj.A.weight.data) + normal_(module.v_proj.B.weight.data) + else: + normal_(module.q_proj.weight.data) + normal_(module.k_proj.weight.data) + normal_(module.v_proj.weight.data) diff --git a/torchscale/component/__init__.py b/torchscale/component/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/torchscale/component/droppath.py b/torchscale/component/droppath.py new file mode 100644 index 0000000..cfe0a3c --- /dev/null +++ b/torchscale/component/droppath.py @@ -0,0 +1,16 @@ +from timm.models.layers import drop_path +import torch.nn as nn + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self): + return 'p={}'.format(self.drop_prob) diff --git a/torchscale/component/embedding.py b/torchscale/component/embedding.py new file mode 100644 index 0000000..b4c285c --- /dev/null +++ b/torchscale/component/embedding.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class VisionLanguageEmbedding(nn.Module): + + def __init__( + self, + text_embed, + vision_embed + ): + super().__init__() + self.text_embed = text_embed + self.vision_embed = vision_embed + + def forward( + self, + textual_tokens, + visual_tokens, + **kwargs + ): + if textual_tokens is None: + return self.vision_embed(visual_tokens) + + if visual_tokens is None: + return self.text_embed(textual_tokens) + + x1 = self.vision_embed(visual_tokens) + x2 = self.text_embed(textual_tokens) + + return torch.cat([x1, x2], dim=1) + + +class VisionEmbedding(nn.Module): + """ Image to Patch Embedding + """ + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + contain_mask_token=False, + prepend_cls_token=False + ): + super().__init__() + img_size = (img_size, img_size) + patch_size = (patch_size, patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + if contain_mask_token: + self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + else: + self.mask_token = None + + if prepend_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + else: + self.cls_token = None + + def forward( + self, + x, + masked_position=None, + **kwargs + ): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + + batch_size, seq_len, _ = x.size() + + if masked_position is not None: + assert self.mask_token is not None + mask_token = self.mask_token.expand(batch_size, seq_len, -1) + w = masked_position.unsqueeze(-1).type_as(mask_token) + x = x * (1 - w) + mask_token * w + + if self.cls_token is not None: + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + return x + + +class TextEmbedding(nn.Embedding): + + def reset_parameters(self): + nn.init.normal_(self.weight, mean=0, std=self.embedding_dim ** -0.5) + self._fill_padding_idx_with_zero() + + +class PositionalEmbedding(nn.Embedding): + + def forward( + self, + x, + positions=None, + **kwargs, + ): + if positions is None: + # being consistent with Fairseq, which starts from 2. + positions = torch.arange(2, x.size(1)+2, device=x.device).long().unsqueeze(0) + return F.embedding( + positions, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) diff --git a/torchscale/component/feedforward_network.py b/torchscale/component/feedforward_network.py new file mode 100644 index 0000000..c4045fd --- /dev/null +++ b/torchscale/component/feedforward_network.py @@ -0,0 +1,119 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from apex.normalization import FusedLayerNorm as LayerNorm + + +class set_torch_seed(object): + def __init__(self, seed): + assert isinstance(seed, int) + self.rng_state = self.get_rng_state() + + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + + def get_rng_state(self): + state = {"torch_rng_state": torch.get_rng_state()} + if torch.cuda.is_available(): + state["cuda_rng_state"] = torch.cuda.get_rng_state() + return state + + def set_rng_state(self, state): + torch.set_rng_state(state["torch_rng_state"]) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(state["cuda_rng_state"]) + + def __enter__(self): + return self + + def __exit__(self, *exc): + self.set_rng_state(self.rng_state) + + +def make_experts(args, embed_dim, expert_ffn_dim): + world_size = 1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size() + expert_list = [] + ddp_rank = args.ddp_rank + start_seed = torch.randint(1000000, (1,)).item() + # at least as many experts than gpus + if args.moe_expert_count >= world_size: + assert args.moe_expert_count % world_size == 0, f'{args.moe_expert_count}, {world_size}' + local_moe_expert_count = args.moe_expert_count // world_size + for i in range(local_moe_expert_count): + with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i): + expert_list.append( + FeedForwardNetwork( + embed_dim, + expert_ffn_dim, + args.activation_fn, + args.dropout, + args.activation_dropout, + args.subln + ) + ) + else: + assert world_size % args.moe_expert_count == 0, f'{world_size}, {args.moe_expert_count}' + + with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count): + expert_list.append( + FeedForwardNetwork( + embed_dim, + expert_ffn_dim, + args.activation_fn, + args.dropout, + args.activation_dropout, + args.subln + ) + ) + experts = nn.ModuleList(expert_list) + return experts + + +def get_activation_fn(activation): + if activation == "relu": + return F.relu + elif activation == "gelu": + return lambda x: F.gelu(x.float()).type_as(x) + else: + raise NotImplementedError + + +class FeedForwardNetwork(nn.Module): + + def __init__( + self, + embed_dim, + ffn_dim, + activation_fn, + dropout, + activation_dropout, + subln=False + ): + super().__init__() + self.embed_dim = embed_dim + self.activation_fn = get_activation_fn(activation=str(activation_fn)) + self.activation_dropout_module = torch.nn.Dropout(activation_dropout, inplace=True) + self.dropout_module = torch.nn.Dropout(dropout, inplace=True) + self.fc1 = nn.Linear(self.embed_dim, ffn_dim) + self.fc2 = nn.Linear(ffn_dim, self.embed_dim) + self.ffn_layernorm = LayerNorm(ffn_dim) if subln else None + + def reset_parameters(self): + self.fc1.reset_parameters() + self.fc2.reset_parameters() + if self.ffn_layernorm is not None: + self.ffn_layernorm.reset_parameters() + + def forward(self, x): + x_shape = x.shape + x = x.reshape(-1, x.size(-1)) + x = self.fc1(x) + x = self.activation_fn(x) + x = self.activation_dropout_module(x) + if self.ffn_layernorm is not None: + x = self.ffn_layernorm(x) + x = self.fc2(x) + x = x.view(x_shape) + x = self.dropout_module(x) + return x diff --git a/torchscale/component/multihead_attention.py b/torchscale/component/multihead_attention.py new file mode 100644 index 0000000..e4e6ec7 --- /dev/null +++ b/torchscale/component/multihead_attention.py @@ -0,0 +1,117 @@ +import math +import torch +from torch import nn +import torch.nn.functional as F +from apex.normalization import FusedLayerNorm as LayerNorm +from .multiway_network import MultiwayWrapper + + +class MultiheadAttention(nn.Module): + + def __init__( + self, + args, + embed_dim, + num_heads, + dropout=0.0, + self_attention=False, + encoder_decoder_attention=False, + subln=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scaling = self.head_dim ** -0.5 + + self.self_attention = self_attention + self.encoder_decoder_attention = encoder_decoder_attention + assert self.self_attention ^ self.encoder_decoder_attention + + self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) + self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) + self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) + self.out_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True)) + self.inner_attn_ln = MultiwayWrapper(args, LayerNorm(self.embed_dim)) if subln and self.self_attention else None + self.dropout_module = torch.nn.Dropout(dropout, inplace=True) + + def reset_parameters(self): + nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) + nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) + nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) + nn.init.xavier_uniform_(self.out_proj.weight) + nn.init.constant_(self.out_proj.bias, 0.0) + + def forward( + self, + query, + key, + value, + incremental_state=None, + key_padding_mask=None, + attn_mask=None, + rel_pos=None, + ): + tgt_len, bsz, embed_dim = query.size() + src_len = tgt_len + assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" + assert list(query.size()) == [tgt_len, bsz, embed_dim] + + src_len, key_bsz, _ = key.size() + assert key_bsz == bsz, f"{query.size(), key.size()}" + assert value is not None + assert src_len, bsz == value.shape[:2] + + q = self.q_proj(query) + k = self.k_proj(key) + v = self.v_proj(value) + q *= self.scaling + + q = q.view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) + k = k.view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + v = v.view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) + + if incremental_state is not None: + if "prev_key" in incremental_state: + prev_key = incremental_state["prev_key"].view(bsz * self.num_heads, -1, self.head_dim) + prev_value = incremental_state["prev_value"].view(bsz * self.num_heads, -1, self.head_dim) + k = torch.cat([prev_key, k], dim=1) + v = torch.cat([prev_value, v], dim=1) + incremental_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) + incremental_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) + src_len = k.size(1) + + attn_weights = torch.bmm(q, k.transpose(1, 2)) + + if attn_mask is not None: + attn_weights = torch.nan_to_num(attn_weights) + attn_mask = attn_mask.unsqueeze(0) + attn_weights += attn_mask + + if key_padding_mask is not None: + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), + float("-inf"), + ) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if rel_pos is not None: + rel_pos = rel_pos.view(attn_weights.size()) + attn_weights = attn_weights + rel_pos + + attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(attn_weights) + attn_probs = self.dropout_module(attn_weights) + + attn = torch.bmm(attn_probs, v) + attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + + if self.inner_attn_ln is not None: + attn = self.inner_attn_ln(attn) + + attn = self.out_proj(attn) + attn_weights = attn_weights.view( + bsz, self.num_heads, tgt_len, src_len + ).transpose(1, 0) + + return attn, attn_weights diff --git a/torchscale/component/multiway_network.py b/torchscale/component/multiway_network.py new file mode 100644 index 0000000..340d045 --- /dev/null +++ b/torchscale/component/multiway_network.py @@ -0,0 +1,39 @@ +import copy +import torch +import torch.nn as nn + + +def MultiwayWrapper(args, module, dim=0): + if args.multiway: + return MultiwayNetwork(module, dim=dim) + return module + + +def set_split_position(position): + + def apply_fn(module): + if hasattr(module, 'split_position'): + module.split_position = position + + return apply_fn + + +class MultiwayNetwork(nn.Module): + + def __init__(self, module, dim=0): + super().__init__() + self.dim = dim + self.A = module + self.B = copy.deepcopy(module) + self.B.reset_parameters() + self.split_position = -1 + + def forward(self, x, **kwargs): + if self.split_position == -1: + return self.A(x, **kwargs) + if self.split_position == 0: + return self.B(x, **kwargs) + x1, x2 = torch.split(x, [self.split_position, x.size(self.dim)-self.split_position], dim=self.dim) + # x1, x2 = x[:self.split_position], x[self.split_position:] + y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs) + return torch.cat([y1, y2], dim=self.dim) diff --git a/torchscale/component/relative_position_bias.py b/torchscale/component/relative_position_bias.py new file mode 100644 index 0000000..abb00e5 --- /dev/null +++ b/torchscale/component/relative_position_bias.py @@ -0,0 +1,79 @@ +import math +import torch +import torch.nn as nn + + +class RelativePositionBias(nn.Module): + def __init__( + self, + bidirectional=True, + num_buckets=32, + max_distance=128, + n_heads=12 + ): + super().__init__() + self.bidirectional = bidirectional + self.num_buckets = num_buckets + self.max_distance = max_distance + self.n_heads = n_heads + self.relative_attention_bias = nn.Embedding(self.num_buckets, self.n_heads) + + @staticmethod + def _relative_position_bucket( + relative_position, + bidirectional=True, + num_buckets=32, + max_distance=128 + ): + ret = 0 + n = -relative_position + if bidirectional: + num_buckets //= 2 + ret += (n < 0).to(torch.long) * num_buckets + n = torch.abs(n) + else: + n = torch.max(n, torch.zeros_like(n)) + + max_exact = num_buckets // 2 + is_small = n < max_exact + + val_if_large = max_exact + ( + torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) + ).to(torch.long) + val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) + + ret += torch.where(is_small, n, val_if_large) + return ret + + def compute_bias( + self, + qlen, + klen, + step=None + ): + step = 0 if step is None else step + context_position = torch.arange(step, step + qlen, dtype=torch.long, + device=self.relative_attention_bias.weight.device)[:, None] + memory_position = torch.arange(klen, dtype=torch.long, + device=self.relative_attention_bias.weight.device)[None, :] + relative_position = memory_position - context_position # shape (qlen, klen) + + rp_bucket = self._relative_position_bucket( + relative_position, # shape (qlen, klen) + bidirectional=self.bidirectional, + num_buckets=self.num_buckets, + ) + rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device) + values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads) + values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen) + return values + + def forward( + self, + batch_size, + qlen, + klen, + step=None + ): + # shape (batch * num_heads, qlen, klen) + return self.compute_bias(qlen, klen, step).repeat(batch_size, 1, 1, 1).view(-1, qlen, klen) diff --git a/torchscale/component/xmoe/__init__.py b/torchscale/component/xmoe/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/torchscale/component/xmoe/moe_layer.py b/torchscale/component/xmoe/moe_layer.py new file mode 100644 index 0000000..6944bfd --- /dev/null +++ b/torchscale/component/xmoe/moe_layer.py @@ -0,0 +1,310 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +# NOTE: This is a mirror of the code in +# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe + +import logging +import time +from typing import Any, Tuple, cast + +import torch +import torch.distributed as dist +from torch import Tensor +from torch.nn import Module, ModuleList + + +try: + from fairseq.modules.moe import MOELayer + has_fairseq = True + Base = MOELayer +except ModuleNotFoundError: + Base = Module + has_fairseq = False + +try: + # To enable Tutel MoE optimizations: + # python3 -m pip install --user --upgrade git+https://github.com/microsoft/tutel@v0.1.x + from tutel import moe as tutel_moe + + has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one +except ModuleNotFoundError: + has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1 + +logger = logging.getLogger(__name__) + + +# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity +# See https://arxiv.org/pdf/2006.16668.pdf for details. + +# Based on https://github.com/pytorch/pytorch/pull/40762 +class _AllToAll(torch.autograd.Function): + @staticmethod + def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore + ctx.group = group + input = input.contiguous() + output = torch.empty_like(input) + if torch.distributed.is_initialized(): + dist.all_to_all_single(output, input, group=group) + else: + assert group is None + output = input + return output + + @staticmethod + def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]: + return (None, _AllToAll.apply(ctx.group, *grad_output)) + + +def _find_my_group_index(grouped_ranks): + my_rank = dist.get_rank() + for i, group in enumerate(grouped_ranks): + if my_rank in group: + return i + raise RuntimeError + + +def get_moe_group(moe_expert_count): + if dist.is_initialized(): + if not hasattr(get_moe_group, "_moe_groups"): + world_size = dist.get_world_size() + + if world_size <= moe_expert_count: + assert moe_expert_count % world_size == 0 + moe_groups = [[i] for i in range(world_size)] + + else: + assert world_size % moe_expert_count == 0 + ranks_per_group = world_size // moe_expert_count + moe_groups = [[i + j * moe_expert_count for j in range(ranks_per_group)] + for i in range(moe_expert_count)] + + get_moe_group._moe_group_idx = moe_groups + get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups] + + my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx) + return get_moe_group._moe_groups[my_group_idx] + + +def get_all2all_group(moe_expert_count): + if dist.is_initialized(): + if not hasattr(get_all2all_group, "_all2all_groups"): + world_size = dist.get_world_size() + + # more experts than world size + if world_size <= moe_expert_count: + assert moe_expert_count % world_size == 0 + all2all_groups = [[i for i in range(world_size)]] + + # larger world than num experts + else: + assert world_size % moe_expert_count == 0 + ranks_per_group = world_size // moe_expert_count + all2all_groups = [[i * moe_expert_count + j for j in range(moe_expert_count)] + for i in range(ranks_per_group)] + + get_all2all_group._all2all_group_idx = all2all_groups + get_all2all_group._all2all_groups = [dist.new_group(g) for g in all2all_groups] + + my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx) + return get_all2all_group._all2all_groups[my_group_idx] + + +class MOELayer(Base): + """MOELayer module which implements MixtureOfExperts as described in Gshard_. + :: + + gate = Top2Gate(model_dim, num_experts) + moe = MOELayer(gate, expert) + output = moe(input) + l_aux = moe.l_aux + + .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf + + Args: + gate (torch.nn.Module): + gate network + expert (torch.nn.Module): + expert network + """ + + def __init__( + self, + gate, + experts, + args + ): + if has_fairseq: + super(Base, self).__init__() + else: + super().__init__() + self.gate = gate + if type(experts) == ModuleList: + self.experts = cast(ModuleList, experts) + else: + self.experts = ModuleList([experts]) + self.expert_group = get_moe_group(args.moe_expert_count) + self.all2all_group = get_all2all_group(args.moe_expert_count) + self.world_size = dist.get_world_size(group=self.expert_group) + self.all2all_size = dist.get_world_size(group=self.all2all_group) + for p in experts.parameters(): + p.expert = True # type: ignore + self.num_local_experts = len(self.experts) + self.args = args + self.in_generation = False + self.a2a_cuda_event_intervals = [] + self.a2a_cpu_time_ms = 0.0 + + def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor: + assert len(input) == 1, "only single input Tensor supported" + input = input[0] + assert len(input.shape) == 3, "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel" + if input_padding_mask is not None: + assert len(input_padding_mask.shape) == 2, "input Tensor must have dimensions: (s)equence, (t)oken" + assert input_padding_mask.shape[0] == input.shape[0] + assert input_padding_mask.shape[1] == input.shape[1] + # assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts" + + # Implement Algorithm 2 from GShard paper. + d_model = input.shape[2] + # Pad to expected batch size + input_shape = list(input.shape) + expected_bsz = getattr(self.args, 'batch_size', 0) if self.training else getattr(self.args, 'batch_size_valid', 0) + # This indicates that --batch-size or --max-sentences is not specified + if expected_bsz is None: + expected_bsz = 0 + # Note: Padding is not necessary at generation time at present + # because all DDP workers process the same batch. Also, batch size at generation time + # can be different from that present in the checkpoint state + if not self.in_generation and expected_bsz != 0 and input_shape[0] != expected_bsz: + logger.warning(f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})") + assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}" + padded_input = torch.zeros( + (expected_bsz, input_shape[1], input_shape[2]), + dtype=input.dtype, layout=input.layout, device=input.device) + padded_input[:input_shape[0], :, :] = input + input = padded_input + + padded_input_padding_mask = torch.ones( + (expected_bsz, input_shape[1], ), dtype=torch.bool, device=input.device + ) + if input_padding_mask is not None: + padded_input_padding_mask[:input_shape[0], :] = input_padding_mask + else: + padded_input_padding_mask[:input_shape[0], :] = False + input_padding_mask = padded_input_padding_mask + + # Reshape into S tokens by dropping sequence dimension. + reshaped_input = input.reshape(-1, d_model) + reshaped_input_shape = reshaped_input.shape + reshaped_input_padding_mask = input_padding_mask.reshape(-1) if input_padding_mask is not None else None + + # Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences + # Pro of --max-tokens: more flexible for MT variable sequence lengths + # Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM + if expected_bsz == 0: + expected_dim = reshaped_input_shape[0] * torch.ones((1,), dtype=torch.long, device=input.device) + dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX) + expected_dim = int(expected_dim.item()) + padded_input = torch.zeros( + (expected_dim, reshaped_input_shape[1]), + dtype=input.dtype, layout=input.layout, device=input.device) + padded_input[:reshaped_input_shape[0], :] = reshaped_input + reshaped_input = padded_input + + padded_input_padding_mask = torch.ones( + (expected_dim,), dtype=torch.bool, device=padded_input.device + ) + if reshaped_input_padding_mask is not None: + padded_input_padding_mask[:reshaped_input_shape[0]] = reshaped_input_padding_mask + else: + padded_input_padding_mask[:reshaped_input_shape[0]] = False + reshaped_input_padding_mask = padded_input_padding_mask + + if has_tutel: + l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate(reshaped_input, reshaped_input_padding_mask) + S, M = reshaped_input.size(0), reshaped_input.size(1) + + if not hasattr(self, '_tutel_dispatcher'): + self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype) + self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) + dispatched_input = self._tutel_dispatcher.encode(reshaped_input) + else: + l_aux, combine_weights, dispatch_mask, self.metadata = self.gate(reshaped_input, reshaped_input_padding_mask) + + dispatch_mask = dispatch_mask.to(input.dtype).permute(1, 2, 0) # S,E,C -> E,C,S + E, C, S = dispatch_mask.size() + M = reshaped_input.size(1) + assert reshaped_input.size() == (S, M) + # einsum("sec,sm->ecm") + dispatched_input = torch.mm(dispatch_mask.view(E*C, S), reshaped_input) # -> (E*C),M + + if self.all2all_size > 1: + dispatched_input = self.all_to_all_wrapper(dispatched_input) + + # Re-shape after all-to-all: ecm -> gecm + dispatched_input = dispatched_input.reshape(self.all2all_size, self.num_local_experts, -1, d_model) + chunks = dispatched_input.chunk(self.num_local_experts, dim=1) + expert_outputs = [] + for chunk, expert in zip(chunks, self.experts): + expert_outputs += [expert(chunk)] + expert_output = torch.cat(expert_outputs, dim=1) + + if self.all2all_size > 1: + expert_output = self.all_to_all_wrapper(expert_output) + + # Re-shape back: gecm -> ecm + expert_output = expert_output.reshape(self.all2all_size * self.num_local_experts, -1, d_model) + + if has_tutel: + combined_output = self._tutel_dispatcher.decode(expert_output.view(E*C, M)) + else: + # einsum("sec,ecm->sm") + combined_output = combine_weights.view(S, E*C).mm(expert_output.view(E*C, M)) + + # Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences + combined_output = combined_output[:reshaped_input_shape[0], :] + combined_output = combined_output.reshape(input.shape) + combined_output = combined_output[:input_shape[0], :, :] + + self.record_all_to_all_stats() + + return combined_output, l_aux + + def prepare_for_inference_(self): + self.in_generation = True + + def all_to_all_wrapper(self, input: Tensor): + dummy_a2a = getattr(self.args, 'dummy_a2a', False) + if dummy_a2a: + input = input.contiguous() + output = input.detach().clone() + return input + # always record times, since it is not a lot of overhead + # if we do not log it we simply clear it off in record_all_to_all_stats + cuda_start = torch.cuda.Event(enable_timing=True) + cuda_end = torch.cuda.Event(enable_timing=True) + cpu_start = time.time() * 1000 + cuda_start.record() + output = _AllToAll.apply(self.all2all_group, input) + cuda_end.record() + cpu_end = time.time() * 1000 + self.a2a_cpu_time_ms += (cpu_end - cpu_start) + self.a2a_cuda_event_intervals.append((cuda_start, cuda_end)) + return output + + def record_all_to_all_stats(self): + # controlled via an argument as we want to minimize any impact from torch.cuda.synchronize() + record_a2a_perf_stats = getattr(self.args, 'record_a2a_perf_stats', False) + if record_a2a_perf_stats: + torch.cuda.synchronize() + self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms + a2a_cuda_time_ms = 0.0 + for ev_start, ev_end in self.a2a_cuda_event_intervals: + a2a_cuda_time_ms += ev_start.elapsed_time(ev_end) + self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms + # reset stats + self.a2a_cpu_time_ms = 0.0 + self.a2a_cuda_event_intervals = [] diff --git a/torchscale/component/xmoe/routing.py b/torchscale/component/xmoe/routing.py new file mode 100644 index 0000000..ce6bf74 --- /dev/null +++ b/torchscale/component/xmoe/routing.py @@ -0,0 +1,459 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf +# Code is inspired by Top2GatingOnLogits from lingvo: +# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 + +# NOTE: This is a mirror of the code in +# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe + +from typing import Callable, Dict, Tuple, Optional + +import math +import torch +from torch import Tensor +import torch.nn.functional as F + +from .moe_layer import has_tutel, fused_cumsum_sub_one + +# use a fixed temperature to compute balance loss +TEMPERATURE_FOR_L_UAX = 0.07 + +# maximum capacity of 1 expert as a fraction of number of tokens in the batch +# Note: setting this to 1.0 causes inference to significantly slow down +EVAL_CAPACITY_TOKEN_FRACTION = 0.25 + +# logging +SAMPLE_FRACTION = 0.2 + + +def top1gating( + logits: torch.Tensor, + input_mask: Optional[torch.Tensor] = None, + use_fp32=False, + capacity_factor=1.0, + eval_mode=False, + moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION, + use_xmoe=False, + gate_obj=None, +) -> Tuple[Tensor, Tensor, Tensor, Dict]: + """Implements Top2Gating on logits.""" + metadata = {} + if use_fp32: + orig_dtype = logits.dtype + logits = logits.float() + + gates = F.softmax(logits, dim=1) + metadata["entropy_gating"] = entropy(probs=gates).mean().detach() + + # gates has shape of SE + num_tokens = gates.shape[0] + num_experts = gates.shape[1] + if moe_eval_capacity_token_fraction > 0.0 and eval_mode: + capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens) + else: + # capacity = capacity_factor * S/E + capacity = int(capacity_factor * math.ceil(num_tokens / num_experts)) + + # Create a mask for 1st's expert per token + indices1_s = torch.argmax(gates, dim=1) + mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True) + if input_mask is not None and input_mask.any(): + nonpadding = ~ input_mask + mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype) + + # for logging (percent of tokens routed to each expert) + expert1_hist = 100 * torch.histc((indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts) / num_tokens + metadata["unused_expert1_count"] = (expert1_hist == 0).sum() + expert1_hist = torch.sort(expert1_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny + + sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1) + metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum() + metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum() + + gates1_s = (gates * mask1).sum(dim=1) + + # Compute locations in capacity buffer + locations1 = fused_cumsum_sub_one(mask1) + + # Compute l_aux + me = torch.mean(gates, dim=0) + ce = torch.mean(mask1.to(gates.dtype), dim=0) + + l_aux = torch.mean(me * ce) + l_aux = l_aux * num_experts * num_experts + + if has_tutel: + locations1_s = torch.sum(locations1 * mask1, dim=1) + return l_aux, metadata, capacity, num_experts, [indices1_s, ], [locations1_s, ], [gates1_s, ] + + # Remove locations outside capacity from mask + mask1 = mask1 * torch.lt(locations1, capacity) + # Store the capacity location for each token + locations1_s = torch.sum(locations1 * mask1, dim=1) + + # Calculate combine_weights and dispatch_mask + gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se") + # locations1_sc = num_tokens * capacity + locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True) + combine1_sec = torch.bmm( + # einsum("se,sc->sec") + gates1.unsqueeze(-1), locations1_sc.to(gates1.dtype).unsqueeze(1) + ) + dispatch_mask = combine1_sec.bool() + if use_fp32: + return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata + else: + return l_aux, combine1_sec, dispatch_mask, metadata + + +class Top1Gate(torch.nn.Module): + """Gate module which implements Top2Gating as described in Gshard_. + :: + + gate = Top2Gate(model_dim, num_experts) + l_aux, combine_weights, dispatch_mask = gate(input) + + .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf + + Args: + model_dim (int): + size of model embedding dimension + num_experts (ints): + number of experts in model + """ + + wg: torch.nn.Linear + + def __init__( + self, + model_dim: int, + num_experts: int, + use_fp32=False, + input_noise_type=None, + capacity_factor=1.0, + moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION, + use_xmoe=False, + ) -> None: + # TODO: merge this to top2gate.py + # + super().__init__() + + if not use_xmoe: + self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) + else: + self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False) + wg = torch.empty(num_experts, 16) + torch.nn.init.orthogonal_(wg, gain=0.32) + self.register_parameter("wg", torch.nn.Parameter(wg)) + + self.use_xmoe = use_xmoe + self.use_fp32 = use_fp32 + self.input_noise_type = input_noise_type + self.capacity_factor = capacity_factor + self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction + + def forward(self, input, mask=None): # type: ignore + if self.use_xmoe: + input = self.wg_reduction(input) + with torch.no_grad(): + wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True) + self.wg.mul_(1.5 / wg_norm) + logits = self._cosine(input, self.wg) + logits = self._make_finite(logits) + else: + logits = self.wg(input) + + return top1gating( + logits, + mask, + use_fp32=self.use_fp32, + capacity_factor=self.capacity_factor, + eval_mode=not self.training, + moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction, + use_xmoe=self.use_xmoe, + gate_obj=self, + ) + + def _make_finite(self, scores): + ok = scores.isfinite() + if not ok.all(): + # NaNs here can break the assignment algorithm + scores[~ok] = scores[ok].min() + return scores + + def _get_gating_temperature(self, eps=1e-4): + if self.gating_t.data.item() < eps: + return eps + return self.gating_t + + def _cosine(self, mat1, mat2, eps=1e-4): + assert mat1.dim() == 2 + assert mat2.dim() == 2 + # mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps) + mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps) + return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1) + + +gumbel_map: Dict[torch.device, Callable] = {} + + +def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: + gumbel = gumbel_map.get(device) + if gumbel is None: + one = torch.tensor(1.0, device=device) + zero = torch.tensor(0.0, device=device) + gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore + gumbel_map[device] = gumbel + return gumbel(shape) + + +def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor: + if unsqueeze_indices: + indices = indices.unsqueeze(-1) + assert indices.shape[-1] == 1, "last dimension of indices must be have size 1" + output = torch.zeros(indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype) + output.scatter_( + len(output.shape) - 1, indices, 1 + ) + return output + + +def entropy(probs): + logits = torch.distributions.utils.probs_to_logits(probs) + p_log_p = probs * logits + return -p_log_p.sum(-1) + + +def top2gating( + logits: torch.Tensor, + input_mask: Optional[torch.Tensor] = None, + use_fp32=False, + second_expert_policy='sampling', + normalize_gate_prob_before_dropping=False, + eval_mode=False, + moe_eval_capacity_token_fraction=0.25, + batch_prioritized_routing=False, +) -> Tuple[Tensor, Tensor, Tensor]: + """Implements Top2Gating on logits.""" + metadata = {} + if use_fp32: + orig_dtype = logits.dtype + logits = logits.float() + gates = F.softmax(logits, dim=1) + metadata["entropy_gating"] = entropy(probs=gates).mean().detach() + # gates has shape of SE + num_tokens = gates.shape[0] + num_experts = gates.shape[1] + if moe_eval_capacity_token_fraction > 0.0 and eval_mode: + capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens) + else: + # capacity = 2S/E + capacity = 2 * math.ceil(num_tokens / num_experts) + + # Create a mask for 1st's expert per token + indices1_s = torch.argmax(gates, dim=1, keepdim=True) + mask1 = one_hot(indices1_s, num_experts) + if second_expert_policy == 'sampling': + # Create a mask for 2nd's expert per token using Gumbel-max trick + # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ + logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) + else: + logits_w_noise = logits + # Replace top-expert with min value + logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf")) + indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True) + mask2 = one_hot(indices2_s, num_experts) + gates1_s = (gates * mask1).sum(dim=1) + gates2_s = (gates * mask2).sum(dim=1) + + if normalize_gate_prob_before_dropping: + # Normalize gate probabilities + denom_s = gates1_s + gates2_s + # Avoid divide-by-zero + denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) + gates1_s = gates1_s / denom_s + gates2_s = gates2_s / denom_s + + if second_expert_policy == 'random': + sampled = (2 * gates2_s) > torch.rand_like(gates2_s) + mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0) + + # Compute locations in capacity buffer + if input_mask is not None and input_mask.any(): + nonpadding = ~ input_mask + mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype) + mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype) + + if batch_prioritized_routing: + # if batch_prioritized_routing: + importance_scores = -1 * gates.max(dim=1)[0] + sorted_mask1 = mask1[importance_scores.argsort(dim=0)] + sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1 + importance_sorted_locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)] + + sorted_mask2 = mask2[importance_scores.argsort(dim=0)] + sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2 + importance_sorted_locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)] + + importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True) + + locations1, locations2 = importance_sorted_locations1, importance_sorted_locations2 + else: + locations1 = fused_cumsum_sub_one(mask1) + locations2 = fused_cumsum_sub_one(mask2) + # Update 2nd's location by accounting for locations of 1st + locations2 += torch.sum(mask1, dim=0, keepdim=True) + + # Compute l_aux + me = torch.mean(gates, dim=0) + ce = torch.mean(mask1.to(gates.dtype), dim=0) + l_aux = torch.mean(me * ce) + l_aux = l_aux * num_experts * num_experts + + # for logging purposes + metadata["overflow_expert1"] = 100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1) + metadata["overflow_expert2"] = 100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2) + + # Remove locations outside capacity from mask + mask1_, mask2_ = mask1, mask2 + mask1 = mask1 * torch.lt(locations1, capacity) + mask2 = mask2 * torch.lt(locations2, capacity) + + # for logging (percent of tokens routed to each expert) + expert1_hist = 100 * torch.histc((indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts) / num_tokens + metadata["unused_expert1_count"] = (expert1_hist == 0).sum() + expert1_hist = torch.sort(expert1_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny + + expert2_hist = 100 * torch.histc((indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts) / num_tokens + metadata["unused_expert2_count"] = (expert2_hist == 0).sum() + expert2_hist = torch.sort(expert2_hist, dim=0, descending=True).values + torch.finfo(torch.float32).tiny + + sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1) + metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum() + metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum() + + metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum() + metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum() + + if not normalize_gate_prob_before_dropping: + # Normalize gate probabilities + gates1_s = (gates * mask1).sum(dim=1) + gates2_s = (gates * mask2).sum(dim=1) + denom_s = gates1_s + gates2_s + # Avoid divide-by-zero + denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) + gates1_s /= denom_s + gates2_s /= denom_s + + if has_tutel: + locations1_s = torch.sum(locations1 * mask1_, dim=1) + locations2_s = torch.sum(locations2 * mask2_, dim=1) + return l_aux, metadata, capacity, num_experts, [indices1_s, indices2_s], [locations1_s, locations2_s], [gates1_s, gates2_s] + + # Store the capacity location for each token + locations1_s = torch.sum(locations1 * mask1, dim=1) + locations2_s = torch.sum(locations2 * mask2, dim=1) + + # Calculate combine_weights and dispatch_mask + gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se") + gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se") + locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True) + locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True) + combine1_sec = torch.bmm( + # einsum("se,sc->sec") + gates1.unsqueeze(-1), locations1_sc.to(gates1.dtype).unsqueeze(1) + ) + combine2_sec = torch.bmm( + # einsum("se,sc->sec") + gates2.unsqueeze(-1), locations2_sc.to(gates2.dtype).unsqueeze(1) + ) + combine_weights = combine1_sec + combine2_sec + dispatch_mask = combine_weights.bool() + if use_fp32: + return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata + else: + return l_aux, combine_weights, dispatch_mask, metadata + + +class Top2Gate(torch.nn.Module): + """Gate module which implements Top2Gating as described in Gshard_. + :: + + gate = Top2Gate(model_dim, num_experts) + l_aux, combine_weights, dispatch_mask = gate(input) + + .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf + + Args: + model_dim (int): + size of model embedding dimension + num_experts (ints): + number of experts in model + """ + + wg: torch.nn.Linear + + def __init__( + self, + model_dim: int, + num_experts: int, + use_fp32=False, + second_expert_policy='sampling', + normalize_gate_prob_before_dropping=False, + moe_eval_capacity_token_fraction=0.25, + batch_prioritized_routing=False, + use_xmoe=False, + ) -> None: + super().__init__() + if not use_xmoe: + self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) + else: + self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False) + wg = torch.empty(num_experts, 16) + torch.nn.init.orthogonal_(wg, gain=0.32) + self.register_parameter("wg", torch.nn.Parameter(wg)) + self.use_fp32 = use_fp32 + self.second_expert_policy = second_expert_policy + self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping + self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction + self.batch_prioritized_routing = batch_prioritized_routing + self.use_xmoe = use_xmoe + + def forward(self, input, mask=None): # type: ignore + if self.use_xmoe: + input = self.wg_reduction(input) + with torch.no_grad(): + wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True) + self.wg.mul_(1.5 / wg_norm) + logits = self._cosine(input, self.wg) + logits = self._make_finite(logits) + else: + logits = self.wg(input) + return top2gating( + logits, + mask, + use_fp32=self.use_fp32, + second_expert_policy=self.second_expert_policy, + normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping, + eval_mode=not self.training, + moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction, + batch_prioritized_routing=self.batch_prioritized_routing, + ) + + def _cosine(self, mat1, mat2, eps=1e-4): + assert mat1.dim() == 2 + assert mat2.dim() == 2 + # mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps) + mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps) + return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1) + + def _make_finite(self, scores): + ok = scores.isfinite() + if not ok.all(): + # NaNs here can break the assignment algorithm + scores[~ok] = scores[ok].min() + return scores diff --git a/torchscale/model/BEiT3.py b/torchscale/model/BEiT3.py new file mode 100644 index 0000000..e5cb7b9 --- /dev/null +++ b/torchscale/model/BEiT3.py @@ -0,0 +1,85 @@ +import torch +import torch.nn as nn +from torchscale.architecture.encoder import Encoder +from torchscale.component.embedding import VisionEmbedding, TextEmbedding, PositionalEmbedding +from torchscale.component.multiway_network import MultiwayWrapper + + +class BEiT3(nn.Module): + + def __init__(self, args, **kwargs): + super().__init__() + self.args = args + assert args.multiway + assert args.vocab_size > 0 + assert not args.share_encoder_input_output_embed + self.text_embed = TextEmbedding( + args.vocab_size, + args.encoder_embed_dim + ) + self.vision_embed = VisionEmbedding( + args.img_size, + args.patch_size, + args.in_chans, + args.encoder_embed_dim, + contain_mask_token=True, + prepend_cls_token=True + ) + embed_positions = MultiwayWrapper( + args, + PositionalEmbedding( + args.max_source_positions, + args.encoder_embed_dim + ), + dim=1, + ) + self.encoder = Encoder( + args, + embed_tokens=None, + embed_positions=embed_positions, + output_projection=None, + is_encoder_decoder=False, + ) + + def forward( + self, + textual_tokens=None, + visual_tokens=None, + text_padding_position=None, + vision_masked_position=None, + ): + assert textual_tokens is not None or visual_tokens is not None + + if textual_tokens is None: + x = self.vision_embed(visual_tokens, vision_masked_position) + encoder_padding_mask = None + multiway_split_position = -1 + elif visual_tokens is None: + x = self.text_embed(textual_tokens) + encoder_padding_mask = text_padding_position + multiway_split_position = 0 + else: + x1 = self.vision_embed(visual_tokens, vision_masked_position) + multiway_split_position = x1.size(1) + x2 = self.text_embed(textual_tokens) + x = torch.cat([x1, x2], dim=1) + + if text_padding_position is not None: + encoder_padding_mask = torch.cat( + [ + torch.zeros(x1.shape[:-1]).to(x1.device).bool(), + text_padding_position + ], + dim=1, + ) + else: + encoder_padding_mask = None + + encoder_out = self.encoder( + src_tokens=None, + encoder_padding_mask=encoder_padding_mask, + token_embeddings=x, + multiway_split_position=multiway_split_position, + ) + + return encoder_out diff --git a/torchscale/model/__init__.py b/torchscale/model/__init__.py new file mode 100644 index 0000000..e69de29