From 75c4511e6b81ae8fb0dbd932043e8eb35cd09f72 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 29 Nov 2022 10:28:41 +0800 Subject: [PATCH] add AltDiffusion to webui Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/altdiffusion/ad-inference.yaml | 72 + configs/stable-diffusion/v1-inference.yaml | 71 + ldm/data/__init__.py | 0 ldm/data/base.py | 23 + ldm/data/imagenet.py | 394 +++++ ldm/data/lsun.py | 92 ++ ldm/lr_scheduler.py | 98 ++ ldm/models/autoencoder.py | 443 +++++ ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 267 +++ ldm/models/diffusion/ddim.py | 241 +++ ldm/models/diffusion/ddpm.py | 1445 +++++++++++++++++ ldm/models/diffusion/dpm_solver/__init__.py | 1 + ldm/models/diffusion/dpm_solver/dpm_solver.py | 1184 ++++++++++++++ ldm/models/diffusion/dpm_solver/sampler.py | 82 + ldm/models/diffusion/plms.py | 236 +++ ldm/modules/attention.py | 261 +++ ldm/modules/diffusionmodules/__init__.py | 0 ldm/modules/diffusionmodules/model.py | 835 ++++++++++ ldm/modules/diffusionmodules/openaimodel.py | 961 +++++++++++ ldm/modules/diffusionmodules/util.py | 267 +++ ldm/modules/distributions/__init__.py | 0 ldm/modules/distributions/distributions.py | 92 ++ ldm/modules/ema.py | 76 + ldm/modules/encoders/__init__.py | 0 ldm/modules/encoders/modules.py | 234 +++ ldm/modules/encoders/xlmr.py | 137 ++ ldm/modules/image_degradation/__init__.py | 2 + ldm/modules/image_degradation/bsrgan.py | 730 +++++++++ ldm/modules/image_degradation/bsrgan_light.py | 650 ++++++++ ldm/modules/image_degradation/utils/test.png | Bin 0 -> 441072 bytes ldm/modules/image_degradation/utils_image.py | 916 +++++++++++ ldm/modules/losses/__init__.py | 1 + ldm/modules/losses/contperceptual.py | 111 ++ ldm/modules/losses/vqperceptual.py | 167 ++ ldm/modules/x_transformer.py | 641 ++++++++ ldm/util.py | 203 +++ modules/devices.py | 4 +- modules/sd_hijack.py | 23 +- modules/shared.py | 6 +- 40 files changed, 10957 insertions(+), 9 deletions(-) create mode 100644 configs/altdiffusion/ad-inference.yaml create mode 100644 configs/stable-diffusion/v1-inference.yaml create mode 100644 ldm/data/__init__.py create mode 100644 ldm/data/base.py create mode 100644 ldm/data/imagenet.py create mode 100644 ldm/data/lsun.py create mode 100644 ldm/lr_scheduler.py create mode 100644 ldm/models/autoencoder.py create mode 100644 ldm/models/diffusion/__init__.py create mode 100644 ldm/models/diffusion/classifier.py create mode 100644 ldm/models/diffusion/ddim.py create mode 100644 ldm/models/diffusion/ddpm.py create mode 100644 ldm/models/diffusion/dpm_solver/__init__.py create mode 100644 ldm/models/diffusion/dpm_solver/dpm_solver.py create mode 100644 ldm/models/diffusion/dpm_solver/sampler.py create mode 100644 ldm/models/diffusion/plms.py create mode 100644 ldm/modules/attention.py create mode 100644 ldm/modules/diffusionmodules/__init__.py create mode 100644 ldm/modules/diffusionmodules/model.py create mode 100644 ldm/modules/diffusionmodules/openaimodel.py create mode 100644 ldm/modules/diffusionmodules/util.py create mode 100644 ldm/modules/distributions/__init__.py create mode 100644 ldm/modules/distributions/distributions.py create mode 100644 ldm/modules/ema.py create mode 100644 ldm/modules/encoders/__init__.py create mode 100644 ldm/modules/encoders/modules.py create mode 100644 ldm/modules/encoders/xlmr.py create mode 100644 ldm/modules/image_degradation/__init__.py create mode 100644 ldm/modules/image_degradation/bsrgan.py create mode 100644 ldm/modules/image_degradation/bsrgan_light.py create mode 100644 ldm/modules/image_degradation/utils/test.png create mode 100644 ldm/modules/image_degradation/utils_image.py create mode 100644 ldm/modules/losses/__init__.py create mode 100644 ldm/modules/losses/contperceptual.py create mode 100644 ldm/modules/losses/vqperceptual.py create mode 100644 ldm/modules/x_transformer.py create mode 100644 ldm/util.py diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/altdiffusion/ad-inference.yaml new file mode 100644 index 00000000..1b11b63e --- /dev/null +++ b/configs/altdiffusion/ad-inference.yaml @@ -0,0 +1,72 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.xlmr.BertSeriesModelWithTransformation + params: + name: "XLMR-Large" \ No newline at end of file diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml new file mode 100644 index 00000000..2e6ef0f2 --- /dev/null +++ b/configs/stable-diffusion/v1-inference.yaml @@ -0,0 +1,71 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + # target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: altclip.model.AltCLIPEmbedder \ No newline at end of file diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/data/base.py b/ldm/data/base.py new file mode 100644 index 00000000..b196c2f7 --- /dev/null +++ b/ldm/data/base.py @@ -0,0 +1,23 @@ +from abc import abstractmethod +from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset + + +class Txt2ImgIterableBaseDataset(IterableDataset): + ''' + Define an interface to make the IterableDatasets for text2img data chainable + ''' + def __init__(self, num_records=0, valid_ids=None, size=256): + super().__init__() + self.num_records = num_records + self.valid_ids = valid_ids + self.sample_ids = valid_ids + self.size = size + + print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') + + def __len__(self): + return self.num_records + + @abstractmethod + def __iter__(self): + pass \ No newline at end of file diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py new file mode 100644 index 00000000..1c473f9c --- /dev/null +++ b/ldm/data/imagenet.py @@ -0,0 +1,394 @@ +import os, yaml, pickle, shutil, tarfile, glob +import cv2 +import albumentations +import PIL +import numpy as np +import torchvision.transforms.functional as TF +from omegaconf import OmegaConf +from functools import partial +from PIL import Image +from tqdm import tqdm +from torch.utils.data import Dataset, Subset + +import taming.data.utils as tdu +from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve +from taming.data.imagenet import ImagePaths + +from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light + + +def synset2idx(path_to_yaml="data/index_synset.yaml"): + with open(path_to_yaml) as f: + di2s = yaml.load(f) + return dict((v,k) for k,v in di2s.items()) + + +class ImageNetBase(Dataset): + def __init__(self, config=None): + self.config = config or OmegaConf.create() + if not type(self.config)==dict: + self.config = OmegaConf.to_container(self.config) + self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) + self.process_images = True # if False we skip loading & processing images and self.data contains filepaths + self._prepare() + self._prepare_synset_to_human() + self._prepare_idx_to_synset() + self._prepare_human_to_integer_label() + self._load() + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + return self.data[i] + + def _prepare(self): + raise NotImplementedError() + + def _filter_relpaths(self, relpaths): + ignore = set([ + "n06596364_9591.JPEG", + ]) + relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] + if "sub_indices" in self.config: + indices = str_to_indices(self.config["sub_indices"]) + synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings + self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) + files = [] + for rpath in relpaths: + syn = rpath.split("/")[0] + if syn in synsets: + files.append(rpath) + return files + else: + return relpaths + + def _prepare_synset_to_human(self): + SIZE = 2655750 + URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" + self.human_dict = os.path.join(self.root, "synset_human.txt") + if (not os.path.exists(self.human_dict) or + not os.path.getsize(self.human_dict)==SIZE): + download(URL, self.human_dict) + + def _prepare_idx_to_synset(self): + URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" + self.idx2syn = os.path.join(self.root, "index_synset.yaml") + if (not os.path.exists(self.idx2syn)): + download(URL, self.idx2syn) + + def _prepare_human_to_integer_label(self): + URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" + self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") + if (not os.path.exists(self.human2integer)): + download(URL, self.human2integer) + with open(self.human2integer, "r") as f: + lines = f.read().splitlines() + assert len(lines) == 1000 + self.human2integer_dict = dict() + for line in lines: + value, key = line.split(":") + self.human2integer_dict[key] = int(value) + + def _load(self): + with open(self.txt_filelist, "r") as f: + self.relpaths = f.read().splitlines() + l1 = len(self.relpaths) + self.relpaths = self._filter_relpaths(self.relpaths) + print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) + + self.synsets = [p.split("/")[0] for p in self.relpaths] + self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] + + unique_synsets = np.unique(self.synsets) + class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) + if not self.keep_orig_class_label: + self.class_labels = [class_dict[s] for s in self.synsets] + else: + self.class_labels = [self.synset2idx[s] for s in self.synsets] + + with open(self.human_dict, "r") as f: + human_dict = f.read().splitlines() + human_dict = dict(line.split(maxsplit=1) for line in human_dict) + + self.human_labels = [human_dict[s] for s in self.synsets] + + labels = { + "relpath": np.array(self.relpaths), + "synsets": np.array(self.synsets), + "class_label": np.array(self.class_labels), + "human_label": np.array(self.human_labels), + } + + if self.process_images: + self.size = retrieve(self.config, "size", default=256) + self.data = ImagePaths(self.abspaths, + labels=labels, + size=self.size, + random_crop=self.random_crop, + ) + else: + self.data = self.abspaths + + +class ImageNetTrain(ImageNetBase): + NAME = "ILSVRC2012_train" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" + FILES = [ + "ILSVRC2012_img_train.tar", + ] + SIZES = [ + 147897477120, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.process_images = process_images + self.data_root = data_root + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 1281167 + self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", + default=True) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + print("Extracting sub-tars.") + subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) + for subpath in tqdm(subpaths): + subdir = subpath[:-len(".tar")] + os.makedirs(subdir, exist_ok=True) + with tarfile.open(subpath, "r:") as tar: + tar.extractall(path=subdir) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + +class ImageNetValidation(ImageNetBase): + NAME = "ILSVRC2012_validation" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" + VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" + FILES = [ + "ILSVRC2012_img_val.tar", + "validation_synset.txt", + ] + SIZES = [ + 6744924160, + 1950000, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.data_root = data_root + self.process_images = process_images + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 50000 + self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", + default=False) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + vspath = os.path.join(self.root, self.FILES[1]) + if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: + download(self.VS_URL, vspath) + + with open(vspath, "r") as f: + synset_dict = f.read().splitlines() + synset_dict = dict(line.split() for line in synset_dict) + + print("Reorganizing into synset folders") + synsets = np.unique(list(synset_dict.values())) + for s in synsets: + os.makedirs(os.path.join(datadir, s), exist_ok=True) + for k, v in synset_dict.items(): + src = os.path.join(datadir, k) + dst = os.path.join(datadir, v) + shutil.move(src, dst) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + + +class ImageNetSR(Dataset): + def __init__(self, size=None, + degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., + random_crop=True): + """ + Imagenet Superresolution Dataloader + Performs following ops in order: + 1. crops a crop of size s from image either as random or center crop + 2. resizes crop to size with cv2.area_interpolation + 3. degrades resized crop with degradation_fn + + :param size: resizing to size after cropping + :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light + :param downscale_f: Low Resolution Downsample factor + :param min_crop_f: determines crop size s, + where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) + :param max_crop_f: "" + :param data_root: + :param random_crop: + """ + self.base = self.get_base() + assert size + assert (size / downscale_f).is_integer() + self.size = size + self.LR_size = int(size / downscale_f) + self.min_crop_f = min_crop_f + self.max_crop_f = max_crop_f + assert(max_crop_f <= 1.) + self.center_crop = not random_crop + + self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) + + self.pil_interpolation = False # gets reset later if incase interp_op is from pillow + + if degradation == "bsrgan": + self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) + + elif degradation == "bsrgan_light": + self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) + + else: + interpolation_fn = { + "cv_nearest": cv2.INTER_NEAREST, + "cv_bilinear": cv2.INTER_LINEAR, + "cv_bicubic": cv2.INTER_CUBIC, + "cv_area": cv2.INTER_AREA, + "cv_lanczos": cv2.INTER_LANCZOS4, + "pil_nearest": PIL.Image.NEAREST, + "pil_bilinear": PIL.Image.BILINEAR, + "pil_bicubic": PIL.Image.BICUBIC, + "pil_box": PIL.Image.BOX, + "pil_hamming": PIL.Image.HAMMING, + "pil_lanczos": PIL.Image.LANCZOS, + }[degradation] + + self.pil_interpolation = degradation.startswith("pil_") + + if self.pil_interpolation: + self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) + + else: + self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, + interpolation=interpolation_fn) + + def __len__(self): + return len(self.base) + + def __getitem__(self, i): + example = self.base[i] + image = Image.open(example["file_path_"]) + + if not image.mode == "RGB": + image = image.convert("RGB") + + image = np.array(image).astype(np.uint8) + + min_side_len = min(image.shape[:2]) + crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) + crop_side_len = int(crop_side_len) + + if self.center_crop: + self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) + + else: + self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) + + image = self.cropper(image=image)["image"] + image = self.image_rescaler(image=image)["image"] + + if self.pil_interpolation: + image_pil = PIL.Image.fromarray(image) + LR_image = self.degradation_process(image_pil) + LR_image = np.array(LR_image).astype(np.uint8) + + else: + LR_image = self.degradation_process(image=image)["image"] + + example["image"] = (image/127.5 - 1.0).astype(np.float32) + example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) + + return example + + +class ImageNetSRTrain(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_train_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetTrain(process_images=False,) + return Subset(dset, indices) + + +class ImageNetSRValidation(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_val_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetValidation(process_images=False,) + return Subset(dset, indices) diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py new file mode 100644 index 00000000..6256e457 --- /dev/null +++ b/ldm/data/lsun.py @@ -0,0 +1,92 @@ +import os +import numpy as np +import PIL +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + + +class LSUNBase(Dataset): + def __init__(self, + txt_file, + data_root, + size=None, + interpolation="bicubic", + flip_p=0.5 + ): + self.data_paths = txt_file + self.data_root = data_root + with open(self.data_paths, "r") as f: + self.image_paths = f.read().splitlines() + self._length = len(self.image_paths) + self.labels = { + "relative_file_path_": [l for l in self.image_paths], + "file_path_": [os.path.join(self.data_root, l) + for l in self.image_paths], + } + + self.size = size + self.interpolation = {"linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + }[interpolation] + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + def __len__(self): + return self._length + + def __getitem__(self, i): + example = dict((k, self.labels[k][i]) for k in self.labels) + image = Image.open(example["file_path_"]) + if not image.mode == "RGB": + image = image.convert("RGB") + + # default to score-sde preprocessing + img = np.array(image).astype(np.uint8) + crop = min(img.shape[0], img.shape[1]) + h, w, = img.shape[0], img.shape[1] + img = img[(h - crop) // 2:(h + crop) // 2, + (w - crop) // 2:(w + crop) // 2] + + image = Image.fromarray(img) + if self.size is not None: + image = image.resize((self.size, self.size), resample=self.interpolation) + + image = self.flip(image) + image = np.array(image).astype(np.uint8) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) + return example + + +class LSUNChurchesTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) + + +class LSUNChurchesValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", + flip_p=flip_p, **kwargs) + + +class LSUNBedroomsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) + + +class LSUNBedroomsValidation(LSUNBase): + def __init__(self, flip_p=0.0, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", + flip_p=flip_p, **kwargs) + + +class LSUNCatsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) + + +class LSUNCatsValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", + flip_p=flip_p, **kwargs) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py new file mode 100644 index 00000000..be39da9c --- /dev/null +++ b/ldm/lr_scheduler.py @@ -0,0 +1,98 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0. + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi)) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n,**kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): + assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0. + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi)) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) + self.last_f = f + return f + diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py new file mode 100644 index 00000000..6a9c4f45 --- /dev/null +++ b/ldm/models/autoencoder.py @@ -0,0 +1,443 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val") + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val") + + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py new file mode 100644 index 00000000..67e98b9d --- /dev/null +++ b/ldm/models/diffusion/classifier.py @@ -0,0 +1,267 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = { + 'class_label': EncoderUNetModel, + 'segmentation': UNetModel +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + + def __init__(self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps + self.log_steps = log_steps + + self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ + else self.diffusion_model.cond_stage_key + + assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy(self.diffusion_config.params.unet_config.params) + model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print('#####################################################################') + print(f'load from ckpt "{ckpt_path}"') + print('#####################################################################') + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction="mean"): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" + ) + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" + ) + + self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) + self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) + self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() + else: + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in + range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) + self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) + + return loss + + def configure_optimizers(self): + optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py new file mode 100644 index 00000000..fb31215d --- /dev/null +++ b/ldm/models/diffusion/ddim.py @@ -0,0 +1,241 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ + extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + img, pred_x0 = outs + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + return x_dec \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py new file mode 100644 index 00000000..bbedd04c --- /dev/null +++ b/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1445 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, + shape,cond,verbose=False,**kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True,**kwargs) + + return samples, intermediates + + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, **kwargs): + + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with self.ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with self.ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + with self.ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with self.ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 00000000..7427f38c --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 00000000..bdb64e0c --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1184 @@ +import torch +import torch.nn.functional as F +import math + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + + t = self.inverse_lambda(lambda_t) + + =============================================================== + + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + + 1. For discrete-time DPMs: + + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + + + 2. For continuous-time DPMs: + + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + + =============================================================== + + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + + Example: + + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0**2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + + We support four types of the diffusion model by setting `model_type`: + + 1. "noise": noise prediction model. (Trained by predicting noise). + + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + + =============================================================== + + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3,] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3,] * (K - 1) + [1] + else: + orders = [3,] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2,] * K + else: + K = steps // 2 + 1 + orders = [2,] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1,] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + + ===================================================== + + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + + ===================================================== + + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in range(1, order): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in range(order, steps + 1): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order,] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 00000000..2c42d6f9 --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,82 @@ +"""SAMPLING ONLY.""" + +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type="noise", + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py new file mode 100644 index 00000000..78eeb100 --- /dev/null +++ b/ldm/models/diffusion/plms.py @@ -0,0 +1,236 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py new file mode 100644 index 00000000..f4eff39c --- /dev/null +++ b/ldm/modules/attention.py @@ -0,0 +1,261 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat + +from ldm.modules.diffusionmodules.util import checkpoint + + +def exists(val): + return val is not None + + +def uniq(arr): + return{el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = rearrange(q, 'b c h w -> b (h w) c') + k = rearrange(k, 'b c h w -> b c (h w)') + w_ = torch.einsum('bij,bjk->bik', q, k) + + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, 'b c h w -> b c (h w)') + w_ = rearrange(w_, 'b i j -> b j i') + h_ = torch.einsum('bij,bjk->bik', v, w_) + h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + h_ = self.proj_out(h_) + + return x+h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head ** -0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): + super().__init__() + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + """ + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None): + super().__init__() + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList( + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth)] + ) + + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = rearrange(x, 'b c h w -> b (h w) c') + for block in self.transformer_blocks: + x = block(x, context=context) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = self.proj_out(x) + return x + x_in \ No newline at end of file diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py new file mode 100644 index 00000000..533e589a --- /dev/null +++ b/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,835 @@ +# pytorch_diffusion + derived encoder decoder +import math +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + +from ldm.util import instantiate_from_config +from ldm.modules.attention import LinearAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0,1,0,0)) + return emb + + +def nonlinearity(x): + # swish + return x*torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + else: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x+h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b,c,h*w) + w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b,c,h,w) + + h_ = self.proj_out(h_) + + return x+h_ + + +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + return AttnBlock(in_channels) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch*4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + if i_block == self.num_res_blocks: + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x, t=None, context=None): + #assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, z): + #assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1,2,3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) + + def forward(self, x, scale_factor=1.0): + if scale_factor==1.0: + return x + else: + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + return x + +class FirstStagePostProcessor(nn.Module): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): + super().__init__() + if pretrained_config is None: + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.pretrained_model = pretrained_model + else: + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.instantiate_pretrained(pretrained_config) + + self.do_reshape = reshape + + if n_channels is None: + n_channels = self.pretrained_model.encoder.ch + + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) + + blocks = [] + downs = [] + ch_in = n_channels + for m in ch_mult: + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + ch_in = m * n_channels + downs.append(Downsample(ch_in, with_conv=False)) + + self.model = nn.ModuleList(blocks) + self.downsampler = nn.ModuleList(downs) + + + def instantiate_pretrained(self, config): + model = instantiate_from_config(config) + self.pretrained_model = model.eval() + # self.pretrained_model.train = False + for param in self.pretrained_model.parameters(): + param.requires_grad = False + + + @torch.no_grad() + def encode_with_pretrained(self,x): + c = self.pretrained_model.encode(x) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + return c + + def forward(self,x): + z_fs = self.encode_with_pretrained(x) + z = self.proj_norm(z_fs) + z = self.proj(z) + z = nonlinearity(z) + + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) + z = downmodel(z) + + if self.do_reshape: + z = rearrange(z,'b c h w -> b (h w) c') + return z + diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 00000000..fcf95d1e --- /dev/null +++ b/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,961 @@ +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape == (x.shape[0],) + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py new file mode 100644 index 00000000..a952e6c4 --- /dev/null +++ b/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,267 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py new file mode 100644 index 00000000..f2b8ef90 --- /dev/null +++ b/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py new file mode 100644 index 00000000..c8c75af4 --- /dev/null +++ b/ldm/modules/ema.py @@ -0,0 +1,76 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py new file mode 100644 index 00000000..ededbe43 --- /dev/null +++ b/ldm/modules/encoders/modules.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from functools import partial +import clip +from einops import rearrange, repeat +from transformers import CLIPTokenizer, CLIPTextModel +import kornia + +from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key='class'): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + + def forward(self, batch, key=None): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + c = self.embedding(c) + return c + + +class TransformerEmbedder(AbstractEncoder): + """Some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): + super().__init__() + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer)) + + def forward(self, tokens): + tokens = tokens.to(self.device) # meh + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, x): + return self(x) + + +class BERTTokenizer(AbstractEncoder): + """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" + def __init__(self, device="cuda", vq_interface=True, max_length=77): + super().__init__() + from transformers import BertTokenizerFast # TODO: add to reuquirements + self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") + self.device = device + self.vq_interface = vq_interface + self.max_length = max_length + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + return tokens + + @torch.no_grad() + def encode(self, text): + tokens = self(text) + if not self.vq_interface: + return tokens + return None, None, [None, None, tokens] + + def decode(self, text): + return text + + +class BERTEmbedder(AbstractEncoder): + """Uses the BERT tokenizr model and add some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, + device="cuda",use_tokenizer=True, embedding_dropout=0.0): + super().__init__() + self.use_tknz_fn = use_tokenizer + if self.use_tknz_fn: + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer), + emb_dropout=embedding_dropout) + + def forward(self, text): + if self.use_tknz_fn: + tokens = self.tknz_fn(text)#.to(self.device) + else: + tokens = text + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, text): + # output of length 77 + return self(text) + + +class SpatialRescaler(nn.Module): + def __init__(self, + n_stages=1, + method='bilinear', + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None + if self.remap_output: + print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') + self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) + + def forward(self,x): + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPTextEmbedder(nn.Module): + """ + Uses the CLIP transformer encoder for text. + """ + def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): + super().__init__() + self.model, _ = clip.load(version, jit=False, device="cpu") + self.device = device + self.max_length = max_length + self.n_repeat = n_repeat + self.normalize = normalize + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + tokens = clip.tokenize(text).to(self.device) + z = self.model.encode_text(tokens) + if self.normalize: + z = z / torch.linalg.norm(z, dim=1, keepdim=True) + return z + + def encode(self, text): + z = self(text) + if z.ndim==2: + z = z[:, None, :] + z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) + return z + + +class FrozenClipImageEmbedder(nn.Module): + """ + Uses the CLIP image encoder. + """ + def __init__( + self, + model, + jit=False, + device='cuda' if torch.cuda.is_available() else 'cpu', + antialias=False, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + + self.antialias = antialias + + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + + def preprocess(self, x): + # normalize to [0,1] + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + return self.model.encode_image(self.preprocess(x)) + + +if __name__ == "__main__": + from ldm.util import count_params + model = FrozenCLIPEmbedder() + count_params(model, verbose=True) \ No newline at end of file diff --git a/ldm/modules/encoders/xlmr.py b/ldm/modules/encoders/xlmr.py new file mode 100644 index 00000000..beab3fdf --- /dev/null +++ b/ldm/modules/encoders/xlmr.py @@ -0,0 +1,137 @@ +from transformers import BertPreTrainedModel,BertModel,BertConfig +import torch.nn as nn +import torch +from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig +from transformers import XLMRobertaModel,XLMRobertaTokenizer +from typing import Optional + +class BertSeriesConfig(BertConfig): + def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs): + + super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + + +class BertSeriesModelWithTransformation(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + config_class = BertSeriesConfig + + def __init__(self, config=None, **kargs): + # modify initialization for autoloading + if config is None: + config = XLMRobertaConfig() + config.attention_probs_dropout_prob= 0.1 + config.bos_token_id=0 + config.eos_token_id=2 + config.hidden_act='gelu' + config.hidden_dropout_prob=0.1 + config.hidden_size=1024 + config.initializer_range=0.02 + config.intermediate_size=4096 + config.layer_norm_eps=1e-05 + config.max_position_embeddings=514 + + config.num_attention_heads=16 + config.num_hidden_layers=24 + config.output_past=True + config.pad_token_id=1 + config.position_embedding_type= "absolute" + + config.type_vocab_size= 1 + config.use_cache=True + config.vocab_size= 250002 + config.project_dim = 768 + config.learn_encoder = False + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size,config.project_dim) + self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large') + self.pooler = lambda x: x[:,0] + self.post_init() + + def encode(self,c): + device = next(self.parameters()).device + text = self.tokenizer(c, + truncation=True, + max_length=77, + return_length=False, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt") + text["input_ids"] = torch.tensor(text["input_ids"]).to(device) + text["attention_mask"] = torch.tensor( + text['attention_mask']).to(device) + features = self(**text) + return features['projection_state'] + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) : + r""" + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True, + return_dict=return_dict, + ) + + # last module outputs + sequence_output = outputs[0] + + + # project every module + sequence_output_ln = self.pre_LN(sequence_output) + + # pooler + pooler_output = self.pooler(sequence_output_ln) + pooler_output = self.transformation(pooler_output) + projection_state = self.transformation(outputs.last_hidden_state) + + return { + 'pooler_output':pooler_output, + 'last_hidden_state':outputs.last_hidden_state, + 'hidden_states':outputs.hidden_states, + 'attentions':outputs.attentions, + 'projection_state':projection_state, + 'sequence_out': sequence_output + } + + +class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): + base_model_prefix = 'roberta' + config_class= RobertaSeriesConfig \ No newline at end of file diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py new file mode 100644 index 00000000..7836cada --- /dev/null +++ b/ldm/modules/image_degradation/__init__.py @@ -0,0 +1,2 @@ +from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr +from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py new file mode 100644 index 00000000..32ef5616 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(30, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + elif i == 1: + image = add_blur(image, sf=sf) + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image":image} + return example + + +# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... +def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): + """ + This is an extended degradation model by combining + the degradation models of BSRGAN and Real-ESRGAN + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + use_shuffle: the degradation shuffle + use_sharp: sharpening the img + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + if use_sharp: + img = add_sharpening(img) + hq = img.copy() + + if random.random() < shuffle_prob: + shuffle_order = random.sample(range(13), 13) + else: + shuffle_order = list(range(13)) + # local shuffle for noise, JPEG is always the last one + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) + + poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 + + for i in shuffle_order: + if i == 0: + img = add_blur(img, sf=sf) + elif i == 1: + img = add_resize(img, sf=sf) + elif i == 2: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 3: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 4: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 5: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + elif i == 6: + img = add_JPEG_noise(img) + elif i == 7: + img = add_blur(img, sf=sf) + elif i == 8: + img = add_resize(img, sf=sf) + elif i == 9: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 10: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 11: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 12: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + else: + print('check the shuffle!') + + # resize to desired size + img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), + interpolation=random.choice([1, 2, 3])) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf, lq_patchsize) + + return img, hq + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + print(img) + img = util.uint2single(img) + print(img) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_lq = deg_fn(img) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') + + diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py new file mode 100644 index 00000000..9e1f8239 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan_light.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + + wd2 = wd2/4 + wd = wd/4 + + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(80, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + # elif i == 1: + # image = add_blur(image, sf=sf) + + if i == 0: + pass + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.8: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + # + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image": image} + return example + + + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_hq = img + img_lq = deg_fn(img)["image"] + img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), + (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png new file mode 100644 index 0000000000000000000000000000000000000000..4249b43de0f22707758d13c240268a401642f6e6 GIT binary patch literal 441072 zcmWh!c|6nqAO8$7B{n3LV`kK(93v(n=FF9&gWOr7x#ec=DLIy6$XOP(=y2x<5$5{3 zs+mc-V`-Qp{Pz3DAA5K__ISMae!rgQE7jW4_~_x2hXDXMYHEV90RS#N006atxj3JE zF4jW;AOJAMT(%1vnml1{bTxP?g+DiynQo9o!I6N_%E*vbgZuO|L|mjk7P zI+d=K`&W>AKZIh#!o$NOBX`NMJA*)>jW^|y3Q#;Aq4n&kr^~q#OBBtfvCT(8H#W{9o?KF0OXT!$_mv{Kc%5DquBFg3b@sO7_q?^dupWPXl z54e1i%uFqg$z=NZ`PI>IX={rkWUC^bXM^*czmHU$U0g`pQ7yUKjc+^zLamVJ`t&iC zhXDc@z;14{=4mUN9YVU<+VqJhq?`3MyZ|P+*|}Zzzq~wlF8)L?v){TxVRY055O3&vbrg{ zA{o<(b&h;RX>9lo!|;7Uqfqe5%F4|tQh4Ef-*!PDFMfB=nY|a|vb(S<<#G>;$qqX2 zIe;GfzRJ$OsO?f{*~dj#N(O_&niw&AvlF|Go5O4z(*ri6szhcjMxh^?P*8(MDie??6!N&){dv4x%IdQ+0(SPrz81#ezRI<%+xlBmx>e#T6 zUq7hrDyIByUXJI@r^JW(+`^n|0)2ph+o1p$0O!!J-dAZDp@>Hi=#!fPK;CSaCn+CZSTJ0g!<}JmE`;e5Cp(i=ACVn zB_^PtC~nSu#5ZmKw0!9DQ-eUj&+$%Uey#fQ60p2dp@#vyGPgUkqaQj<4;mnkq!R4< z>0nSsT}EGEo)t@b(3Uh8K9?OV;3idhuuhvts2cgzpt(RGK#DQZZ((n1ihdE6u>jy# zeGPt!1cma2s@ogNa|Qa_;wYcVy~Rb&)3N_T$+2w4TKG<0y~D(KvR1Cp1}_5BlREYl z?>K>@efNTET9Ev0!oIJP54PB})&n6njk2EAfA?iq^ozsjoRPZ$-Fuq%Az8T?dr&4J zSr9Ab0gvr8|hg#PRPNJDi*8$MoBXp|R<~5E&U6`0(0U>wh5lkAQ$IP>&=ijvyI# zQ)1@f@Xt9OJwA9KpS-+0CNMPdr&O>%+(=Ikh6VmLF$Zb2b=Ud@+PW8ZYagl1g}ck3 z_yG9_Kl_|+B1~=6)ls2bXKXK5JNPjBjjA}0S7O*=Ogq(lq#!VmHANHemFTXi_};?Q z;)N4_)pH^5h{?F~`FDrw$jAVPPa|wrY|I)M%-t6D)WJGgm+o7qdAQr_Dz6!G&DYip zJMQo>XoUW=gyV*V{1)TMb6I7)Zh1;=)M}Eu`w|bjoKo;jTG9o9ME-o(6?T!?o<;L0zbKwDO9L*ayGU~X@-c8024k|S-(`b>%6F?fQo489W-9&-+-!H-tS@S~D7)(emDeqNfUd4%5MoCwY7A%P;gVN*-QiV5V%)Acg zGI4HRwacrSgw3LE7!`Sbc)ETAXia=^S2;v z{nYX35JwABdK)s8$}%?*Oa`YWrS2|dv>O5G(-`p$Kmw3?@o$B)G2CDeHHE{!(L)3< z!FTv<4G0e1-Q2&gLa1*hmSg{A9K2=kPsHv`nD#oeX&VnP#IM2iyL~A_jM#%q@TpR( z@YXlW&j`6;jM_Js*SG5%ub)x~6RcY|qwS>tCRBTS-6V#d-F z8*KTw19N4|js9uRam^hLS9k#{{q~(ATa6%<-z~fYysr7aHhES>Ru#T5G}TxQ0H}F{ zE%JaFyOok{n20yL428BqGjsc2*I5EYk<-GLdHh{@M%@gaK)`LI{Q}Pl#M_`>K0yI0 ziI58Vc&&;)^(KTtCO5zYIxqh&cM2;O;=8ZxpLRBJl*(MC7uY{~ciQM&tzur#6{6(x zqkwYA^$@p0G7+&+VlKclXQ|lUGnxev}0M9+aM5dipA{kGc>L?eyROxZFEvh0F4Bx-;UoyoB+(Z!(VuCERE9huC#1EW%2;_IfrHa}9 z1+K*l5KIbIz(iESDV3(UZ?L&+#A>*|baTEpQ=Pvl|It*pvc0WjWu*baf^+*HU;J?O zCm~YwBwwgJk33349ple^+a0Q5%gRQfM4+(QTZFJ+;?(yR3OF5L({PLn7_(G+^%sdI z$QLR`19I~pnUNIrIm*jFc;zmjGrTZW?zqy(2PSPVhUO#p+`$Jq8`ywxnRFH#^l>siWIkV0qf@ zJ_<8ghg;wO_fLE9N{!Y%^AS5U5MF%Lh)Hv1OifXLN9nknw}Qjr9%&Atp}FOp7b{dp zqime?Y-PV??rJL`<=}QW>^E}^#wIX@&1N^(dO8D>w;WG(nt*AzQ_+67pt=lcT`DWv zhU-T(Z9IfROE+0l)cook%7bXT-p<-C2pS*uIknvQv_iSG0?s8v;*Lkn1bm}|Tm=sO zDG)(5?21P_V@++!-RC@<94QobG=s1eb)GV&!YeX+tGuGq*p3~Y_ExcPHc+cb>4iD? zWjQuI5%VRjIrM;Qw-&_3Wnwm>mip(a+hm;b?62wF+Kh5Iyq$U*Tj-YNE7;BzKQx?@ z=gl+-`!G%f!}Ig=RAji~E`Mm$dtPqR+3q`MnV6o)84b*XpA2$A?7tt~Ax=IN17$DWwjh?vbm`D5{&R02=->sPXIk0W^ziEd?F0>N?xkfJvJ ztEtSKI}tIP(eF!mfF&bfo;)8;GOZ5viC(`j^Imm@d#wL5v_JReF+dzY16IWVu43E| zD<96yrDOHpVAZJ5+`EN=K0`*=N4l?CrDY->4W}wU#OR(V^H+lp7Yo_f#R0~;eA8H} zJ~dHuRAT6A_>F7+L8$8!&2^n>=WKgTYfk7D&f8((0q@=Q2 z|BMdL^9|3-q5ea|nL}gHfI@lbWjIE>qr2L}^|}wGyZe}iK=CVYzZ&)hqtgh4Dl3`+ zg3ZIJ-y@{U*g8htVJ4GQML89g3a_Rn4^RB+RD|qI_5+iXmCEKe4}S0fzjih&n{x_4 zFaVx)oBNYnlV3<0=i;J*n3s~@mnGfi#kcl7U3D$bfZ4BRnTcVpAeb=8L@ zafoGeiv=r6t0>Hs(nLx%8R&WKN4un~g8880JHd{oK}u?_vG;bRV>FANDiyV=+8{lh zCWdz-n#OT^e|{uD4!s%KjOaMa{h*r6q1AqM`IW1?EfgPV?^X02tS}S~HLVQRdS*#R zaoF=6`*SbMgDi>mI9laN0$4?{@3${yr81iFO6#?w=Um@xRCt6L(sccZmM?8*yKjCY z2DfWwzPd?gGny*%RwJWhTbUtzdSh{5YT7j6CEF3VTZ==cR*rusg)4ju&gJ4#J_66J zgurZYC&iWE5S3EdcD32@2Nhaht;b3zY-=p~nr^`&~KOwC)?=({PcHe+msfS)ZUv%!1m8g0a64$exY8oud6U=|uFbO}S~V zq#gn_ys@$};Sw7i9XVFwz2t2w3{RVKctz0wG=livL*ECA$_HxjVR(UHlm@pyHy@yW zX+W2U2SZ4K+{^tQ=aex8YBTQ_17^>a&2l6&Zr7ky{r+HNNLeWbBJf?L11ZHK1-+6khzS}Vq-VcLd$q~>8ryhb&aKGV27$KBl z?O{i{{~fY4Pt3OIMWgZQtKVy`8^Yii|4@5rFi};eqDioZFVW*d8x%O0I9NH@h~1Ii zkHo6lhT7Wm5NKBY-Qpf+pl~=!5|4(#1;w!jxt{`nX+8U8t;uF~7j-a)9DXy`Yhi&> z@knoyA1xOJ6L}B=YlBx%MZh1%Nj5|QJuEO?*=vqjm=k_{&5R%FLkSS&4YtI*_%;31 zF2so)UKlvg%r35oU{cieMcpLJ@>h0slJg#A|LW-DTZwkmK;_SGFLb0jFj}LwZG854 zpJ1GVk3&=c>s4HC+~1`6O&eicT4N+VqPDgIoacg8nlp-ra?#2=I9iwZZcEYN{K%qq zS6HiaQDGtQV`T-$VB-zQcNIjmVDK)$bFT6M0iDCa$x#Qxtw6NyrJ_2VK_};*YKtt% zIT=c<)W_BaHzyi_3ryyn#jQ@Zq z%tvh zsfK;^UoMNJ9L8YYdjx(i(bQVwv_+7{K|`P zp5Eg_GaTAwCQ6P^klUIu!ra{P zl_%p$&zd4nwVwwBDAsH!X&@!!H>F?B&deQphClOFrQP^a^erz~DWDKhWl&Q?zX#zf zyA#JJa=C5t)6K0Nj#$3Jl5ZatYOkiRo#0 z`ujDD3`aR|gyqw_?qaAhdS(JmUS5z8kTz^|3YVsmD<^M=P*c|z#|R<0T)V#^I2tIBy-*WzAAkOo=WMdgdZIt<^sH`jsNmWi(ecDV_J zCNct!)RMJVOzIknX4K-!G;2WA-!U$ni4)l56v-sqGE-rlc@#-!J6QG20ChBrZt-aR z?$E;R6E)nQ7PtYjw%g?%;iDpf>kqxWqrK>kRsEwkxo-1ibaSwZs$I;PY;gUP7vgL0 z+aF>!LuFJNE~;2oL>+XHGm3Pc*i1Py_SaqZUq?UBHVQ@Ao@$@$-WuT?VovKnuIac} z$}BIO)5N#}o;yB4Rv$OE9(J;9LQo+qHS_DIF}0;3jq?6}$@KO)-c_toCm@*aTB#DI z5>#!A$wqvR(@$&{ekUSkgy8?WGK6l?`(BKXE@;p=82Zm6G{k2pK4Hu|CLK4|?@XL{N~S{r^rQMsSkIsBja9B zdYzg4^%WO&oeEnP_3U%sKgA!6zsLyIBt7N^q45dAS+aR&Ww>5i=LK>7@qNR0B$@D1 z1)JY^c~r-E;)i|Y@=*x_1TQteud)mifp6$Ysn+ExJWIIG4g8sMWU8OkP^;n221am>)XP->-Ky6SCag zNXjk12eL9jnMod#SK8qS5~)YhkO<*;gj9F^2QK}=PRy0)YLjdT{3K@th)YRR zKg<{8%!v}n+|LkjIRZZ7~uC6X$ z;nw=Posa$4@d~o(-ZzgtI57-Ak zqz~3~qj%QVLR)uFK-tawD1da+&!WFJx{1CzqIOAFmm7w92rk{6O3-R%Fnm_Z8*z>} z9HVY|V?6Tsk8ELBBdukHLjZ6%Ay8puc|k_dNq%TQVBT*>H?PTV|95W{-;#lS1HK$n zg2rt8=av`+Ip(XQwtp6YxqaC5PF_e>S%ttM@8g74zFyWN;B9(?^5%Yfu~()X4TBM- zo$+5CHEN3Uy(zTXjA0wgcH#ARq)}ApvPwL51b$4>cZX zI9i!4qP%E-C6q5OBy(Pr?66GNF17^s@Yl=Q_-|ltUzmaEAi@A_`Td23(Ttc$b5IsO zf;lJbQA&zCtND0IXPn|;D-6e&5!K(HdhC8`H66FE^7`7nNH?*^pPvl(>Rq!|=bA6L zo%i4FSj5O(1p)>Wg#2Ekaa>G;?*~&inynGbs)}K=n1KU8ZzrWj$HC0dhKtAlx;md4 zyO|@0R+k&cPHI&}H!~(2nH_WtkKt(cED(JYpPJnn1q76chQ53L3u|)5++>t)ed&8= z*cmRHD@d6VNZiFEj`$Qf`bGBb+*jK}Dn^W2I>%I5K#ZoRBUV4?c{x(zgr(b|ZP{VH zvm9Tgz_NLR@<=N<4LT?&E4i*vPcqPuv`h@>z;i#$J*A03g~EPfuu^ys8d}1Q#(yW| z2#fJZYk`q!PZPn4oxz#1<=#ewms{i=HlbKaYP2VgWPT1O5zK$i8r;@V%1UvtZcs3uNSMKL;CSd;p zeAsGaH1dE|bRdye(7fvLwU*Lc*EhQzrIUYmLD{cvd490F%+rTK{SF2MugTX_@xQtSwR~v~ust7Tm75Z1Rq^ zYeor$Gf+;_O>eo_9_mC8ukeEc)~$D2j!J@uB8Boavbj|rCYE0q&``f(T3)d}T-VtB zV|iMCVUAL>(o&-Xhyxavw&I7ZRBS}~F}Jyb7A{O`zd*d8vJ%ZH>X<<}Q!~>ugWFLz zGyiO?Ebr24R@Jj0woFL@!E%|eQaoZjq8g#&7t*pUS>bu7;Y(#z>>A%DH`u{_@VWFK z9U=9LU@w{VB1kbOM~h!L3C4wbVrYlKT0Kiz9qCT%q0o^SKh#f zU$`$_gwoT-+uK{H17|RK<%`Vyd0j5o>}&r1dI+H?RXP4Q`z{LdiTiQ@T=_Wvprmw2Z45H6&4q24rIUt8RRa;Io;Cm=|e^f~8Lk?hc2D^Gv;D<^)IosB< zEQ9Z_SZ;qnnd{K=j-NvuJX^V(+_n+4xESBIyfY0ipn42gPIlYWxmKyXtcV***E58Hq%{_<*Ce_{!ZG z^~;pZyUDD{5CpDrsOVr$-`zrEAE3AyH7vx4zV5h8ImeRdAK=8Evw`6ejj%tBzOg$a zMGihWWY%mTClo!!btqYEXRG=(j?%p#X0NPS*f$b{Od>hFsuk2hiO z9v$Y0O%CwWtjK0 zHVAfx!4bkmIx!BGEb(KRnLH=_Ch|!o5U$VFU=u-zuCg#M4Uzh(xkmoQFQV1_0CoYzVSvNA75yQn@oA8SD__2 zLt1C^O&u*H4QhC1Ui8qtG^jxaA)DAeR9D9#_veXS;wo=R7aN*7w8;l^u{#D#NvNP~ z!DYLvAN+!T#M+Cs_Pc}e#c$>S@#tfcxQj9((%fQ~zs&Z><&sW7fleyua>|!8Je@JU zXF6(C%%2#I#8HmYPhIeY0a=LZR})=0$2^zYy0fYzp#-x6i2(ZI%JN3v{IQZ-1LSbx zi1yp(Dz4{kO|R7@>*b6Pla_1q8cC{LDTM;oH3{*D@+|~h!C%B1&CK=u2<6V> zF2?tg!XG4YNa$1NCt=k4%AlFqkDU_VLLe}N4434Eh-D8AYxp1<`f#=Xvd4^)J}X?O z$SR~NvZ?L@_$uApSo`7Hs#Ku_5R5qu|5kVIfg=Yf8rOBY!~>{@K5{|MYrLsx-0f&^ zXYcOpbGX^{F(GN4OOrWTU9k27+tCYQ0%yo0NdJcMp4H8rot@3i@yLVq#gP;tX)~mi zl@(C^h8;Fwp^gbyjnR5G!*X~!qIQl@6}!(Wirw3o7WCZ=&z|_W!baSTJd;|f1 zk^QoBO{-?y^JaOt+Z-pzq{KD!v$T!w%oPN^yzujk_A|?QR?n@2zw^3xh#b48>-fFp z&CN}*2N?xHZAaXQO$;V56d4;EYt>Nv7@U7|z|h{9Iq}Nb&((KfDB@Ik5E6OXUFU_i zT^;V3f9*Z&1D*zxfr>h*>3l&7Wwkk}T<^xH9o`V};+DLzR#boDFR2Lh&i!ghk>vl+ zA_<*N)hD^+1f^6#7(&B9ombQT(a#tcCXraNsUj*0`VdFHu21Ne^f&`ceyNyDEF++!@}JHKEkK%*<+f>{lOqyn zJc*p`e*XW*zZkspch+a9>*~OKxTz`ND&RDs?jHg#lvjzYtl5~NKZ1}sy^a%;lK)%| ztYUHZO;UbbC28NQndbG+<>FsE)3YWi<0==jYvjadH~mBH@N2bwRbHOO>2$$LSv4g= zJkJ+_u1@sZCYE@#<6dp66VuO8(jutNoS&6QjcRhJdi?FgivHg;=iqz1w;!}cwNm`5 z?3$ZY zF}e?pNej{G*BdgXEvK6Z^15yn{{gkNExIgd1^c^YLBz%#B9~1*Qv1{_cBQ!3*+E8~ z1w>NUND^VU#n`+{99MWJlvewQ;NVjk(R>Yym@8nl-~ekg_qmgq0H9zhO=@_A9h|4unbOF}n5RW(?k1s6#P$&)A9&}ft?Z~8bvFz_@wR0>r5fSBb#k*n<2?~=Y2vE6z33do$N!y~btY!|Vd>V9F-z@-z z@oKKnw?v$6Wlxm?vyorELe!=ws@t9kR= zyUf;5_7EE`6}sqhART+y=LUGN#jWUSFt?@}YvF-ZEntgMKdL1NQT%H-nfi4ULZ9qO zzmaUM8a@Xfxd{6~Dx^U!Id>*+YQ`HRJOG@IO|Hc;lWds4OX(Y2 zu)MtVG`;EKB@Z5@-&DmCQNk`)I^iS+k^V*ibk*Y1v)qixstqkISR)KPS1?JLSOua5 zf+nV9OF;w)>y(OFgF6wffIBE!%Q=094}hClEl8qsJtH%_g+X(|LsK(xD8GZ zOpMl}sGGux71`NAFE{#mg}EBg0q#xK6b12*F+)ZLX;pqz zKwGDq&!e=W>>xTjy2?Z}V&{x7^2Pl8eD*?Ai@9wgujH*O1yIl;_{zE@rG^vVFFffI zUwbW&%<1za<>*8(B_#&u$$`j?3(&h_-Qp4c`VARE;jIEb!_QaPYckEbJkm|(vE7EL1mpFU(()@41 zMWq_W<(6{<=!q=4Opg8+BpLA=#c3+~weIhP=RE`u zdKQ)=XA$k-eG6Ly%teq%Nf0q} zY2gCqzs10a2rZ>~Qj*Wbze<>|=8>m%os)=e8hoc*kv`Wk*HQAwaD@gv8=<1-&Tk-At7 zxzv7AFv|Iyx8uSD=-+*gVmNOb64!R{P86>YR6tb98O951r~l5Bl@3{cxv-ijDsvoSP%T)a z{Infv<@O)F@n%Ya%zKt+jN3K;6@Q*P_#~n0nIuip4{Q6=&!Zw42Y+*D%RV6xp8BdP z;LnGG)`P9ZzfmzU;ikwsElw-MnbGpJfM|_u7?b+i*z_G#2p( zzktob@edHGGG%AqiM#3JQX{YgM3nP>8rBtXxt z?@*nqieEyp+Pnb>e8iN^?#5Ny{o_SVF!mTIwEd zVNG%<%O;m|ad{juP6c^3a!965e_vEn zbCVs6jiRCL%47pLR-JA#IYjx{%)}52L}gptcqGhN;odbn$KqLe|_5Y)~JmT z3Z?c!ul69z9lN};nob@u9P6&`n~f*1mlX<*s?RH$js{oJMn+!z`bcLQbaV2!`g9#4 z!fgQgY>+&%%?ba9BDt#-PrLV`AVI7ZoOdPIGxW&dBPC=u<1aD8QTZ~r^~7lUpD_lwElgI3#V7i^hoR5u6SPRfiLqH zehPbPug-hO*6L>9dGC&;`{5Bg`zg$Fxl`hh+tf}-y|2^qf_F!wMkru>%C{day=HDM zWs1%4V1r!+V(%L_)!ihWm`*Inb|Vd);<=vpNjTjki!l;>Qj z!YTfj6tDd}HH_J68;9wA5fA%!s}l4BJb{w(Z4Rhs*qObmd&@Y z|Cy!6YTYh6pp7d$hDtT6Y7}$N@w|5fWCKGbB%&k=ee~deG(QSJ`m=IBQMGxGU;6K| zgk*o)((WXy#4fJN&v5TfB7JgetE0Hw$_)P*x8PGl!cj7}t6% zh$9MCI$Fv&UiDA8|LJfzN-0@RShj0MgV9JZvc=!zCe% z#0a~=6&lPvg*D{hwjSku+wTI7iVK39j()vn$*GBz-wj0h`_xpVd)^EjVAE=RclI}4 zop`ylcb_(~yZAR)>)eQ%$otdWDdTw{F+JG%7rzQ-%z$a}J@Lhz>V!lIO-=V>+{L!6 zlIfBFy{}7+b@z2#_Wx+a{@d?naz;q<#~51eR!G`Z#L=^+q`8s6{dGF|?oG&Dh1p;S zPFbGe?6TbQ`PRnla!%buonn;Ev!t6LxoD{#y-R9=~+SA3Qc{QQa*G-77iYYU^X+}T!-GA`%ItURE`+*4{T-PPqimDr45Cnr)|iO!aNaiB#`lQp z>T{aU)5Hl2S_?08U-Bd?>nvBEtsUwC##!KIFVHQ!Gte^( zK|aWl_TH8KHep~SeL}#SSE~FT4E*aF1!P6EB_<&gfSu%2SMlEeBATmwdbZzD8>r9K zc3k5NZcv(Aofyuo&QlPy(dSyMPqd&A>jop7i|O@Wwcd^|M_ z(165SSlgm_^du{v>z!$z&V~73=Wd(ICkWWem^Kisdn-2fTAcfh)3yXn2ztDNx4|ZE zQ)fo(=DrPQ;YkPy?_Z|B5XW7=F4eMYSIz=l;KvXy_eA5%Jv|^W(o~Q-)KBt6KYJRU zM{ZDLsVXHF1l=q*EiY*DW}Jl1s?OfZMbGjOpnA^BIu=1l&kwb@5KiWUyX15psGq3R zstpOk+i(gbR#wM}or)NVHPuy1s@v-0?8#<61L4;K0Z-NX)%we7?zg%)R(bbQi7d52 zPJXdsLXDprNF32_ZEa;wR4FMb4Js)CQt&N3njNPUwz9D?X4ju>yT3Xj)VYrAv6~y` z@LM$5=I`z`!x$L@ z7`t~R5v`nJ{Zz+PJ#!c8cqpvl)|}^k-C!tRcCUF_v;d&=BD)|fj5fXzQ&ofhI9uSd z^uFx=D?PFM{|%3>C_7;-0qbT{cXc0{bxp-DPb5pNVYkH(D`hw;3E|bYp*!5c$~@m% z&Dj1O<}+L<1wG0U<)RR~(KJ^u8nIEX!z=ti^>4?bBC$TvJxR7uZw1dtg}~%`woO_# zQ?~YlwUUe$Bbt+i|D)Ppy0jmV@%BHD=Tq#H5%4WKBWrw_zAFlPUXB#YX#p|i?l{Lu< zA#!*MYR+c!_uq1))NtDr+8~KUfBC~HzUy<#N*rX2Xwr9IS^P%rRrwO+`5@ zMN*a|*WzuSh?JIZN#WW1Kcs ztD|6(JM&30<=dL=sc4jWhRTlkYcm5VSeU?L^&0y$aDP9gNNI3zd9T)&z3cGllY|V{ zuRjZiP8cE{e#!o;t(4Qp8X2)gzQ{Hgjk)4xiGj`OM6|ZJWGxC5j)=ZKrjlbLv2ed> zipj1J#qI6wHP?vAyN5EPO$JUwF}I(pq~%(YZDan}cYlLoP3K(O|NKyRq$|{tNFv`o z95YKReOzJAuoGUjOmtH`GEgz@VD_La$oVNpkuqBk_BnjDs>*L-*%22~SWcdwZ{68* zc{X_3U#MZag*l?Ox6f|nWRVqYvutPQLg=tLgTa_QXCF`aC-~-o)fMFD$X6Ca4JjE zWzVUKtD0SeHfM@4iy| zaZ}SkVNdCUPTZI#-p=h4$JK{O|Bf9^*%;92TkQ zmH8U1)hpczHoA%)B0=M*7EeBbQ^nc$Ff7Ub z=_k|~0fhNo+QcBo)LY(Yxh}T-N_YPUbAN@gx0Vrm<0;zA$2_jYDs?R48BrXj! zmB|MI8?Tp?TqYfXYmyo-UX;%?oC_CR^Jj9ao_VEg^`gLv+&5Ceev4B!n*ZfF*O9eJ z$%y>7>g8d;#s6!S=XSC274B)~c{q|BZrNE)Uvg#&KDAB9>7_(>s9U3SYgOxiLKSW= zVc-R4u(#U%4u37M8BijRcsfo@u&X#*P~{#smJ>)JLvZuVV%WCJy(@tSVn_U{9w0@~8blJ*eIC6}lPb9h-4y?Zr_@wrlZBKx zWajF%oZ0N4ikg_cotS24dUG}>&Xk{SWZNk753>HP{p`-Hd!B7WoN`pWBvUG?sy#L_ zF%jZqAYh6SykXW*#SWp7k>u=N?cuCMpK{Hvg)-TCNo2aAO<)4<;Y$XFP`T63eFT6u zrC_iQj?Csd2k2XB&~2~MOSR`PLd%61GX+nDj5ocGK2@AaQsvT-pBWSp%Oq%8aLNXz zV>9y^(Q>=a#u#xDw`Pey5&Qy2srvt!=U)sGb_-_IQZ{zhc5^s^=*Wm_^3-O?E8I(q zAWK`LndTKwl1|i4J^i{~ky&_z4)pO7%m{?!m=g|>Om2zyw+)tc;N!yo^0^iMC}&um zhC8&iKlNFyJou|@ka;%a+t?$5^jmqNu<+lv-5{GnP0Pz|#MABy=7*d!$C6|0nV@o@`HxGH<6{~nk- z-$`N|K6t>ZGb$Ue`@_|C`FYIw2nC1wcc6OJncAuSzsnnqtGw$?oZtF->~3A`Mhc_< zN>;E04o}5om8St>_B~lA=EKdtxz}Xz$L3~d zwe_Tdl23HyUC>jV^_PQ`7&|DPxiLh6w#TKc1E~bj(G+R)Exl=H;nS)9YH68$)^D5c zw^wUPJQsCGv|?V8YNx(vsn);$t_LK1S#Mu6QN1E!TT(#y0$hB2d?qJQz8!(|l=}L} z9t*elqWPN7GuXsS2JrwN{F>-yH20H=tXe~yI^a3yA+ETp1RzV z=H=c0I;qFW!ak+a^sf!ag)u!0=T`Mch@2Asq4(lOhAVt_cKfHDWwh5Td%Dd`P7aI3 z+73i31-Y3eetQOS^Or>ma(r{X|Q>1-(Y;1iOMsEtoNGB#obi`aRQbvybt}{)vrPE)vV)Hm zKe+-Dz;kYj$sv#)xAM#Hra|q#?e1QLRX8wldF31fK!s|~(#B=kgIbs=gGe#I{}<3H zE5J1$&N637X4-S(=o>?3Nc5oX-I|q&<^LjsQm#4nJZ`G=E)gv!V8Lg{xDp+N`J3&RmR8vzD;@<( z$1VAxA!#K-^LUe9^y~U8GaZXTs_;djNIz&J^yzuAfIolsGgKm$>vp5p?>BKeuK5)$ z95EUbfo=D@D~q*E98r6inKxA%LaQ4#`U0PsX>3A(5^=bi3+g{_JUit7dVu@5rQDOw zhE;a8jF!H1S(Ch;yTf@75y~cO7h%D$V1_zWG7QHTS7Hb$>&*fTtxpt-1$btgG02n=evMl6&G(Q2ZiT z4fIfPTb6yH@i*kPQT4AM4&46LVnKYoX`&0o7j-6iuz??jMGF&Tul5N*x|GX)x1GFv z!x=iXqkO4Y+bqoup)B{6C-s@I9@pUX)KWbqdYThDA8>Y$H>>uyQbuMKQ~JjVU=T?k zS2}E!7=OM}N2Kv+(w|HL`-@LUID1B%r1i_4&~?Or5yp5O-sI>)(cDyzs$*OPbpBaA zu9Pn`fn{!@ZYp!)z4`#~x8tsubSb($K!eBsoQ#XHaNgWqQ&kz_i3Mx>Q^OTL$3VvN zCMnx9`G3X=2z2C3HAE;M`OVLv8A zL25qjnM*Qr3vK`Em7HjawM5F@xA&wvN2Oged)PTonQ~}-e6Mb0Glpq;TY;QC;7ipc z^(?$S-`+p=sr-K&opn@`|NF*AH*A0i(j$j}G>j5qgtU~TG)gx}hs5X*$$@~*Y&z8P}}^mBM(6!^$FMq-Ti^YIk9?i+vD)I zrB|05(mG^NHw>=E=MO>z4aF&4hf1o>e2NZqvFo;9`&0V{>Tp46C7e)e42f@0aFSX< zDRsIU)J7YWsz(Yb{LNbul|lhAp>DvB`r!Tj@-WLXR4bi}3y)a$0Vwbo&{J0~<+$7c znYQ1LiOWbYJZUU=_AJL+8&Ft*Us8+=8aSlQ26e5S`$&IC&uPd3T*C_sHDk0-7J~q} zDYs1TYoojMzj$@HmcBDOMOe!|ce`lQuWbkR1j`Bi#Z-u@9LGZ8EkRWwYyOD9&``Lg zVCdVN!ue7q4Ook&ClmywIW_PSWEU1{;t(n(7={;LE&;FD)j|4CDXvQfzH3dZkI3H1 zL}meo?mK^suXmLzRqsfTfp13*+DK@aYs{VDl=u~+>eeg0MijNOc6wzbyXj9v|EHvz zyCce{_qXqJFs3G)J7OP8QQrF>vM0;7?hXNiE%Aiq*WNJ)E9>|B4zWuA%%ZXflCyVT zne-pjViA{z_`m})PR@w}bhhwI%vmIL21y*IY6ZeV&nQ9KQPue9HRt&KGeZIv}6$$&)}4FW#S&GISW+ z=a-~Fzk!BGGA%99h9hueR6yPdR|&m8eRO?JJX{%>%yjT@gk&>mS#cDN!_&@%Pw{UM zWpGG~<6GynVY%Wy1(MBI~2g*9N zve2uDAX9hM%BfQxEZ`@rt10X07K9?fQk6d()fE_!;>L4DN<(!Oe}znF)+Mc(Ssvpf zvYDWwGao?DIG#i&=Wc=p1?A(n*{S2`B<0C5C+gjhmB_c``D%U322{_Td^m-ovXNAL zXK5IpH<>Fv`9=TjJ8gHgyh|1}*Ve)A(cXRxWcBMp`_ENf&sl?|s68TkiPzbhMZI3^Jn?kl)@} zswidvZ+!;P>S|4;k(sEB#1owvAUoLlyXk@IuI}ZJAfD&9QYa9AJn9~9nn?l#kgcEH&zVjh?|`H9p27&*b&K*4=76h!ywvucOM8 zwU60!$rd66f?~ruFmR9x;7mt1e(euQTsrjYS`o+nfs^g{iVoymdlLvG0|{O-_YudH zpG&mn!o8)R9BkVc=mAl(keV3-M7r7QpJk)(pYb-`8PmdD%2(W%fE(`EE-?_sGR_=W z0i-xzhzJm9{#m^kThny&>M@ONycQihO%f@AG>a}ZE_*B`*Hmw6dOYz{!g^gZjl=>K zBsl23az@V3^tyF=hKAqebS#c0mVd0nUyLX23;v6lRaJDG+&Vt9Is(wPT7F$NHLa?W zTTjzhI9e?zslvFv$szxK!5?!2o&5`^0fn0tMkwGP(Ot-Qv)S*xa8G{y7eW?E9NM2F zBZS8x%cMykPJiMV9&>tW_L4<}f=EgH1Mg22RX2JmsTLa5SC6TQH;|FmM@YXD$Dbf8 zw zJRwnGb|xkApODgIP*jl#j)(INB_(1Ezn}IX8t;qs4duez%^SJ?%u^&=o)YIqtbH$N z3`PH*(~4ETcX7fxqjC6{%R>#CB@!mJfZg+g%hhF^B=+HvVHOjA)A4g#m0P4C=P=^V zzC8L+*<0pMRp-0&CtaG}_i^^G=$^+>jI=7aaKBrWe%L1N$Fj{erI181RU)u*En!3uvZx_=`517fkA8Wu(i1UXUw5#Kc+d*{xx4vzMZB zDh~ZpTZZBy@<6s@#cw@gti5{wE;J=c`cxXHa9~VqQ0n6(Y>R%vYXU&_EM0^Qp?Lfc z&@?tuV=SuKj^A$X?)=)G?EKH|281?jazbc%Z+kwivQI01-`uo? zELAHiz%fREE;+P|6=^ZSUkxa>Cwsb(c63Yg7}xVk48RLY2mDkezgA20)|_0^78Ek#gr0MQ4z*%2 zs~{n+XA0gLoZaETT+F^vGeEge(2t*7?(Y&)h@en&)yr6u+r~ z0^2hA68%&{tgj!b)p2pYEk2=a-t5ZW15ewUkiX%b6Y5sx#`YOMC=e=+4Wc8q+2UbS zKrlqd#gk9>P(FQe;<8fv8|!u5H~IALzKk^!MfJTfEixh{T>SJ@XBP+yYMX}>73{I7 zKAic~*~(gBS@#8S8{tm~w&NY3sXZrP0~wBQ!YL~NI|bF~pdBKaxEnUUJ~g=OHmGE= z65Bxit|-s!C5Qk`_xp+-pJaU5yLWz{{<6B?U}C2?5hDWE;#mX{3$<0zul z!Sj`W*+|$kZ`s&rlIF|oKr5!^AH+vy_H}c4Fx*^sDJG>-4AES?@x(8?WsO_J0h8FCUGo1<` zK4&-dGfe4n{HQ;Dulx6K~dhb$zHJ(Ed zjErQe3-d#}`N##|yW1t;mdANo({+E5^6zg7`*iXHAwT@Jf@0qJE77(KNiFpGYn9 z%Kc+giry>VVCj^OZ?m` zK7BcGrf8dvK~YtLo9!1sOV|#u{+VH)%dLO2m1Sx2cdL)8^pV}~ru)R~(uyzhX8Smb z#0hB{{ZDDAA!PraTq^w}A9|*(?Xj4?UPnO>3-$`fccW#0;*he#E#?lP+)sv#pMZvc z4xFC){#7gd(|1fvxE@|t2>}VshQC$Y$5Ft6Yo4797n8k|%N>xOu`N}^6}#oGQn*}v zc)K!`^)c-BNbCW5)r`k$qRWl6iGhA{g|{c}>qO&wL+T<#WPBoxto<=8-c5K{TttKl zD&C)?G!2^WLfalYjSxf#|J+E^D=0yw5p9j>na4i@)iY|&WH81tWfWen#2ASw zNq9)ji^JL2g>a~|`Tl?yx?^l`W^jdyP3RNg5_$b^iPi}>1Y=#@n}RH=<|F32gPF9R zEe8#q<8miY@xog6 z|F*A4xQXSwiOF0RDW*i5b$bq*ARONDh%73bfRM?TEJ;C2LR>?n4*NWuyLtfG&z}EJI@Vm z8NO7OW&oi=sTimT^e~9APaU>i-Zue&O|o9U{JXW#b-VQ>Y_;)lZ|~2UkI^|WImVhE z2g_%P4A_x?Nunw+ejTg5F5uWb$vyR70?Kp#*rmft=?^JSo^u+|_X~>(C;ZaWE~8T#JocVWSIm)Z zc@D`$W~65Qg9ZyP7x*qm+~X*oU{*C zHYYg1s`Of2p#iV8XJYMhxL>xf9e>JAh&*fpU_Pt46Eg;X4&u=lu2sJ7N7YXJQ6SjR zN`^8bwi3o}t@4ONx>%`{jyPQgN;q8ZVEbn38&38l_M7i5;J#g=dse9DbxI`OiA63L~qG9!vp zdVSU}BUGP#_GHEUM9zv*+}R=9SYIgFvDb>K{?awGp+zcHBoC({iPZ2Rs7IIs`b89p zIO#_Z<1ocknxh@1ZU!X1O`$P6t18rhhfP(fSoQ-T|KFbMaS5}P=g|~KUrs;|N61kq zxmk(`nXo)XVv^muATeV_MyE8E2e#^(4&n5pB?Ifh(ymLd%%V!$^4Q{~%RTLQyh0|Wt|Lvxn)I4w`@ZhBOS7P!k!AoUU zP3CM7r9bPtc}S6tgWx{ia7x+BMJgQL`|QKtB~{QWEIV5s*VrchaQb@+8BW9Jfx*ju z5#n>wH#jJ>`P1~wh;iiYg~gS!qm)?~F>YESBdkpv`JSQ5}@iRVlz z<-&uza&KylK>BdZY*QrZ*$EYzz3V$V1A?esU_FfzV!*PxWKXAMX zkiuDs;p_5)5qRUH6&Z>M*Rxi4SJvn1>h;&sx$LC8UxWic6K{)XkwNEv%wy)!%BdiB zQVs2v4C>c!XnnUA6Zlp7`?sxZ5#WsEB9LbLnCO$TRWs-D6;9>G?*l!@mJ9T&V5@?% zfZTLWhd9lDLi6OzZq|G7dBzL*3)e|53&AWDknA#9I0uBLy^cInn0+n}ck@uV#70COC>k@;c%GnE3byXf3J}X;M#_+9+ zJy22WCkD*!(zE|1P2aq!3}K=vilp+O_%c_R;x+}D>Rx%y%tihdlCYrw?*lx-aV3|Y zLVl+V-y(1*6+^p2(hM2i&)BNnG&WCzx|2sQ6yBu}vxrH`+;VsHNb*$z`Go^qm8BoWZzxc9=;FVscykpm!q2ZDo%K6WoQhKN-9 z+B_=7qD>wGL`*aI2w}4(0glS#5+bougxYyP6rb}?s20@7XL76dC|HX-V;bdwE79@g zRQxRO?D7EJfWbUHAml8BGndR}oZdnLZ!d0F-a+vZ-p++g7nRGDTJ+Q?sm zaj7*o$8l{QKxzcNJjY&%d|=Y_ON`SO_)ia5K1bjQGQPA@exN;I(tr`g`#zGNX3@CX$`u? zB&SqZIy(!cuMW@3n0Zx|Q<@D9N;Xgu}6JTIL)sGxk&WhT39bH>kJ^!dBn zHp}2f1%Cub=tdz)HaT(0AlDv~$gG)Pt7ek;oZ5K1MoatBZg>@A2pAxqt$bM^9PXoq zOWAU&=sJwG=&H0Fxi8#>EM3C3;9T6)6GyU|ao*7Gy7xj*vnUPRT$w-v3i02>UKs)F z#4?_uAjOd}wQ>qjDr&EgYX$eAzErp>6#p_d5dxjL@N~2(<;IUe`j8JVCJDXmyb@_M8-wqCMkfZAs!yyn&nRG<=fj*vzQjm8EPMcZUjzE z^qv$Dqc3*Ceu=uE3MJv}8+T2l9Cj-2yX?pbd^4x$Dr+iAq{t8OP8mgT*v=jbKgTx& zpE9Lz+2I!!k;aX<6aWqo07shT8Ae{qO0Y7o}qvI%ouX*|rW|Ahi~uK@2IO~mr=&ch|( zrx86`FGQnYPsgba*9p*L-soJO2OL!(kOSJ^*qU#v9hJ(aVY8w4Rpbf6!0V`ENap%> z3wRmgT|ThNgi1(06}fPqvrAhSYv`%)g&Y=3~)YHa^M0OztQ## zJw-hPGJ*#29Z`JP8G3cQ71$B4Ca4_Sc~oOdj=$LGY68$`ArU#tAxjrGtw~B>drC6? zx!%)DJ3TdUpzPDg3B5lp)5&_x**+JtVkAo&^FmvZE|i!C4S{POIcIJN}@68g1y`oQDM;IwiOEe@fV$MZk8 z|Fih6Y3mAkNc!+dN-kZRJ+Jtc=sN2&@>%)s_M?WHQ5Kr>)L%(Wpn4( ztENrUD-pi^6NSQrO%6wxMj%GnX`bEijvbu(ES%=32;a}25tQ5^qT$J+My+TB@@56+ zSn#jWUhw}Sl?DJak{l*wt149;hqh~j^z4H_SG8i*nZPePIuDiNUc}`DrHGI7K>@QQ zLiXBf+qZ)wlCLtrwPU_OUt2R=Z7fYyv7ZwB0oJL}9kX%aidKetC?tSXZ`tk>rYUV# zEdK`*ry8TR#%7Ij`GAql$IfGh&l=i-K3jl5Pc#vy9og`mTjL>LvT0Ii!NhCOUx2J6 z#%w?bQMqa#@XCd|NVC80)&urvjRGx7&WE9vae6tNye9z#VC!4}bsL>t(HIhz^J=@| zOUyWMt6p_mKmo`DAxTlr%Ah&nZn=JuqTrlSgeI=y1Isla%1#A8I1qiB>6+_AI1Z=N zAzX6^x2nYHuGdX|4)x_eLW_5)&5ClIpPlGZz8NvCf$`0!+x#2jFEK?Nv{ue& z`Z1&QtuMb&zPqii?6MHy=OR4M;W!G~Bw&t*H5p#=A4yIDpxly#exADUr7N)9ux!F) z{5kE5HFjh10r>471+%c{em9f7P=h@_qUIlJwIz+ zoX}AKx8c>c#x5*s^5$oXL0REhr?ux=V@WZ_7gv-aphBVitUnvTSkPY{n@J5?8P4zSNWKX5 z?FTTjze*Pvg&w~aszsSg#Rmr?`pbVy&;Hc(^OqD;LfDAC#G}}VXHy}~vU7;_z4Udq zYz#d#N+Qa;rZ4^M;MON#x0tx7BC1a$;!B=6&7WoP^^aGPzT^M<>yoT7YgjS7I?A=7 z(1H?8N6AjZvXl2McuY$<(Y*idrBuaGx+wHnXD8@Ol6lv&cJ{iz#924%C55in#Y;6m z3%8Xs5`(T0))|+Q)P-$jBR8F1aCY@|(Zf0qV-x9Ox^Wl)b!mV=9NhY0JyEDp^}O0C ztL*i2>cp7b^HSA2@~Lm(&EcizE4%`uux~eQ0eE`cM2f8IY;MbKO%~I3_`stYvna>?SvUDA%--)p^$!iSU~;G2n}|e* z_D{sLYIh7|^%3{{-;iG~IyyQ^GJvan&VaN72+5}E(bd@{(~ZS?^UkgaG&3|bTPG*R z*eVm#Lo{cYQXOE*>1^q01+T>5;t2qc2>p9HgwjW% zP1f%YUEhoXer|HmX{ZJO^)yL0uL06iZ53KGU-;w7;<6ETxd7z(Q%lvm7Bh2s5mI^y z-jA!fGC~7-kJZV?h~^ zmIyLn-j;nJ=Fj=aLZb+~C89M0K#?1P4Dl99U2yE5W&Qns&od>S(?l7ZuZ)dl8Ed1q zMxTg2uBvZsYmMH+VX$+c7c{{KM}&PP=p|qiV#DR&pAq1o9n(Db(f?p_<@!2qTv9aX zq2ZR|_$?|*ZDfoF!g9p2v0YOsf6cFLV1umo{)IG&q>`6ntHgYnHxR?83KxzUuU$Fz zV<$kgn+x`mD_|saciTE=zd6xln#ONfS!hlN3EAbNBB={Gd{%R^uCOy2f-UoYTPcjH z93`JYSh0W|8+B5vzgMNKdYWU0!JSdNkf~RX+P*}U%sF&a!PqEXG;s&8Q}N#--!JTQzeZ+)~#wTxnprZ`G3SFAG0KJ5zhlk4$?@1+@D-=k<~(V`gdhS(p?8!YzMoSoHXgZDq~y^}|IS|! zr!bX>4J7=A+!g&>795weZ5dl(U;4^Y?yhv=KMs0+g(F42yY0T=Og86_4WO}oW`Jl@&O%J;*cQ>h7wq^$kr+|VyUf|YjK^~Pne^SF(+r$u(M#BL`z zvEsjg^wpcTHW_DBmgHK~?>%}v1*B)!nkA2rLS4~#kfk$PJQmzqt?I$gwKM&Ah#s(F z_qa>m)vmb5;6P%m@xI2e0aHem*NM;DkdS~tlsC`@5Eu}GNhll7$?={*TBXHUEMWA~ zgm&7EB~3oVte&0;bIYir{AC-Ess7;xEzhgwjdoh3b|4nfgve=CF#XVr2a%Vs(imgs z@fL84XZx(4=DO1eY(@;Dr$h`Z9YoLDgjJ<$R0zbd6|c73jjtXEY{LP9a!+nU^}Y=` z$k?f2;B!EHT+ZU)Y>9T%3!#|WuN@5mMNP6(# z1|SE$AfMJeaaMju>cQ2_$15oj);s#PTFY+ThD^N=IIH=W+uGm`#HJ0~38h2@$pUbAec z$7WiYKS2A}qzlhn9J^|a;`Rw`z8eaxG`W7Di~6d<3u;(1KAT*VWt+ZM7GD!lok)Dq z*}~quE|FKX|NfKxZ$(gDT6~5X2f;(RdV}iKXu)VBWsP}iHmUw_B>pZFJE%%ZA$I!} z1t>lWe?4<9OWHIBa;#tyR~V=6Qx_wx{`f-mnK%{IgS1lOiP*vP7SaWW&Pixe&j77W z?MeKS^#a^dc)5Ko8T&S8(zakwHlen>(8_*c%JAEsZ}9lxhF=q7G0o>}X=o|~Qi16a znJwIP9=G16#q03NynTtVm_k=*J&U~+!*rm4<>0zWOG1K6_ch}?Qh^WO1Y1hjeu{K| zf4b01P&i>i%L27oIL{kbdFkyzqhIy=Dwt(xI;d;KMN!?Ho+OH3I1!cW-9P5*hNLxL z*j{If=ggcBAAy&4kMpXtkP=zBnVRMSB_*2K7fV3~y4Hx={vP-w{NW4X;c==yU3Com zV9?}PY4-{_BU`(sC0>qONO~KLAP@RPPp^%^>2=?Ll{H!2;8l7+MI#~%#n`Fjr|6Kb3Jra)fYC78vYlThPqe8` z1Q-gmByJjbapQwMCvL#o0fY*_zoB09Bh)6^i~v0ENqO=TDd^Q|E3N#U4iIiVi-DWUXldjt6X zZUTe9LJ$aRxFwM5YlvuySd7|W>*hmiihr5F#UImOZVMH~_mZF4A zf>_$U`y2p&LfOp7XO((Mix7742AHJ9d52h=QfcRH{LmF_S9(T}J zcN+^?8_IrFV9C-I%rKNTT$!8Usm%>A&ih5u! znTE_DkRo2t!h2_es4;p|x@SrG@nQ27VKWU&3~F|?JYz@UN;rkDfIff(#wM#lN@VQvrKFGEe~HuldsA1rlX8e5f)?70JtEY+VOWvlkf{ zQSl}J_s7g9N6F$jMbyN$A}7daik6mye&3`T3!(TY|53!cl+B^+@fxt=GW%yu-UEW?8Wt`LUm~B@* z?!hC4n=M4dd)aOqIjPVtEsuzt{`QJ0zS|NpQFzk+&D@io&@F+sa{p%5m+z5&StTYnDq=)NKqz_h^lf`f#~c@{LNi0% zcaAqO69Ror77nEC^nAHE6+Lp<=00LI=9U(dA*&(4g?Hl6cHH{P7%N-h>R%*P-t9;!QHGpcgBCTFCycV=ER!xt8u9+rAk!D5Pl0Qzcxaf_|P9U+KVTHAJ{ z1XDQ{8HMwXD&E-Z0iABQOCxStw3+j!RKeuK2hTVS#SdK*1xnt^Ck=`mUvol%s+uth zh_@ip*ja`}haG=sxR}DZqUXw*-uUn7sI8!ha)*DPgBtAcvdwq)&Hqm3pd-p_WJc`V zqG`qL`1t5z=}va1?-Yeyb`gOlvR~YUin=6@TG>|T*OV9_)M1ZEW&(b=N#3j^n`C^M z%iS?`0vbOy-&|AFI90nDJ7W%PtCrCi^LTGT#Bn}rOhJyBE8jO?$2Ml0c&@BLa<6EqCEO?=npCZ=&AkrvD5}*o3zW)Q zhq+47O*S&H;PtjTqGkSHue*^SD?goX{n>m~Sqv^T`>?#+Q;gWCOWs6doSFddF}Q5O z(`D~J&kD-X5Nd%UaQ$j@gcs7XiF-7aa6c>apK3#tai?qdx;lB!`RhcjpGcETIg0M$ zbv@s~GnI_NR}9%BM69w^AgS|Y5HQpkIB4XlsP_KnZRDlCPA&CNVeTE9z$;CoN<+F= z+?4?l>+yX8+w7ksX+QVc=T7PiE=H6=6G~*?v02%VXnDC(c1J9`-ZV+JQ601R-5idO zj{}`2JJQD^L`ILiL*4JdL8$FM*}U=y zW-dD&-Q z4e~=g`le#RW92sVgk6Dub2(^17USe-1}b**d?}YMd*_A~x7TIa0qQyDvsZ85P5?*h z^6tptDY+bI_J@=61UyBfdQ)r?F?$}e;M*sZt)G$Bb8zN4VKF!=mLxoQb0aw;)><;A zOZ@7A>6|I4KLlh$?qDu6zB!7ub^eNGew7ltfG2&DtfvWcResC#r0`q70O|qWiKX9ygr!`q}JNww{-ocTURC=9Y-|%or4HcpQQh-qA$DfY0clYF39O$M%hG2u;2(*$p_x z$!K9u=b+tM@3`!VN1PNWZ+lW(8%i^!z$bfcybaakh6NaPAQ1zB;HuaCH$vx4L#Y?U`C6(6o^lduu|H?7a*;5?cJY2g3wpcw2hU4H=ODK}hsV zWl8E5x}2@ZjNd1#lo?c$Y}oh*ffF+j1U4}EJS*bdrYZHRUil0E1#v>PRe&2-cHzhB zL2K;Yy?-r?B8~{cAxd{d~?&b zsViw^FxqFrn*-q+&a0rWq|yyBw%T!=X+!?-B_XNu5U=5b)L{zvOTF8mJwAvo=>pS*BZAWa@gX+!IakXVcbG99#mXi% z@b%Z?OQzRlgb>Sv!aYXeU7ek?Ml}%Ejx;kt~lNP3-6=c3sca7|i)iS2_u{4%V*crdc(umC$Oq z`CW9dB$tg6#5FFtYRY-!m68=zwRoVDz6TApsN1rOD175(zYw91nELf?_0xH~M9}o3 zXZ0&?HRO~*+=B;Q>hB(ws=#{3XQx(!Y+u)^I~y8T_lJ-P3kNC__o#o$A6PXTj*P6l z#Ce;;Toe0z;T-0RHK2_Bp9+XjcVz%&Uu|uj2g~y9%L0%2lal#$Icmy~<7J~~ib!Ej z(3@h5HCM?H;^&4>HnY9A=k*dTvOp1_N-P1aiB1tjkRV4=MCB>;0gy(WMCIeG`FbEU z(yB@yZ4yBq^7&2`O_EJLG~W3<)^2&##}a*8UO6h3PQDYu-mU^-onNMHj10uG%r$%` z258%=8Lu;13vw)9y%O96TwHF!b17@f%Wjf+w4W;5+uQjmVwH2)b5CRk!ykXoWr9qJ zCDp{f#7`7X=ZNj^P0D*cG?wMq3g8Gw?F&SqrSx%AZyJE<`}l@_vy{~dT@(Ax!a$x7 z%DJPC{>DdbFI*wIQV`zYgWNvNyhL~{PW+|8&i!bD0lsneQDb2$AO9l zhURaPjS26!@}LVC5-4xZK=ZSNc%#y+Pr4BvFWPz8tku&}73SCjcDmuLC=MR>c~8{n ztSN_ryDMS@Ow5Ff(;AL+D+#w;@Qau5gyNd-=n+7+b2VTkLIpa(@;bb7ym*kD?5t-_ z1Z)qGyO)xEHODt$fAWCn!~WVqOhIHDD&?akrDcKT#LhI{%8JWcSC|^?+~Q%}a%$+m ztge92kO1j+7E6{`v(>d_anCaI9=N?Su17T=^JBv_YIBFxz+I@7E~4_=BT!ZSBk@!p z-_OP}q=vS4m1v%>Lp_g;*y;vJ5I>>*KD9ws%t-BW^bc>Yn%>_1s|%Ja$V%q}8*=&Z z-~7^9&yAaRGSab>AfFFO@qF-yk?v^b6ji+H?SNGm34|SbN`#1yh&5f~KVlI77}R{) zi*d2HzZv!h_Q5%VE0@w6)+^#7QCg7x17U1P!XCBmethIH{$6uGRsavFW-!dg@<;v+ zRS2;seWU)!jBHsohw4l=#NweIakU)>{!QdAQ#9D6TyD9Udp2_T^1+5QA zfiV=)eB$*x-XxOx(pqO&w259kUkAhZ-JVX^R}Ao^-o#1@mtgn>f~SC)72FH3duL|e zcl>?n&~;8LTslrTNTOY)GyxxUYg;i+VX#GJjJ?X<5P zjjab;^Bc>?!yg2(UJ6GQ@`>-r?rfeKJ99;~wcUUft3DXAO(tm-4PY|$s)Rl!51|@( z>a(63FvHh^AR9k&`PgTFXzyqU1_;ZM3`WdY(;pqLxipzoCz<8_{?BRRXo6naVhv(b zfl==W#D(uPpV~7ScADNKAmPvn@5a!lgY=3_5@v=0A#%Veq<=qtnv8;qxe){G2><{f zsBGZc_=*mmtX=`~rH|=k)q5J1;V0R|UJB@zjpItTJIfAjEgc==)w<5(GRN(bZBGpI zy)RbR4lXR#XkNJ5GYyF*M7FL&h9Lmh;``0_w6?^}4UadN{3oxS`OKW30{8}d+X%}m z+s9WPB_GhvRA$qU)Bf{dW#^0dDjkpWN+5=|2ksP|breV-(FOl?@Wu4n+qr676Ff#u z3icE*O;~^HS*2K?TRSFQUe3w3A5lR{O4brKLf^Nw*x-V=u|OJpA({MO(j9ah2kJ)O zH%L?hyha%=qE17UXM}_!NrD5Rb;66fGe()kB&mk`%*xtD4*`|Li$U%)b}0qNWl}tm zlh#riIy&^+&3gXQ`HKHq$4%baYS`sPHCbol6}D{Q>FwXs8SJzCt}yJ;#f4iJt6pMW zCsvrZ`$~k>(sEn&y;6SJ=rdh7<*g%BJEkrhYN zb?`u0WxYFMBF_7!E`b?rMr_;V*8S;rT|NDudEdHyY40QUUQ}7xlaFNqzx6&U1_uT^ zE$bmK;%CyE-jx^}w^NDj?46(VCN;HLkWYJPhz{a`uv#ZQ(d$6-Y9{@=OPnvleRFS~prKD1p4U$wk`4d_N@YNaYbhx%OJ1$(dtw`Wc@{gf2 z;=?f+^G;{-QV(rvC8Nrt!2ES38GKOTXuuw4v;-ua$~^1O=|LHKZJi11**Rb~5LPeePpm34zw|ujDP9*SP+4Tocs2$EB#p}yKBqzPhK1=U#d3&F@EXSg{Bk; z_@BQZ0NJQt6h@t0YzRQXE%d!tUOA=kw`)`#44HHlkFDZLb$5)S^U6J(OU9rs1#~fn zgb!1ZX8C_yE{{WYTYsV2P^w{uZ*oN6L%41_C8uik36DE|?{>(!j{!*S$<3{w?I{&_ z3Pb?zA(Ojz#^26!K4(zRapBC!L=FHBJqo|7nqYmc-<40sEn=UDCLa}?XrSO!j zv}g@M`?&P&aR;@!DoipUvjlp3D@Ex~Y>MGo#h;GfSrDI&_r2qgW}z&0+Iu&V=DmW& zerjQ$xY1hRdSK;%Q1HrqsH%Z&>7?uOWP(_nISzjNoVXcHoF;4VT$s2iee~+B>_==nrkAKWe9>Sn4etHnz>bW#Wmh)46kK zz)aC?_`Q{5w4I9W?)^+}Q&u^VCO&WR+te2N<8a2WDFOEV+|`buDtbn20zL%x%M*Zf z2E6@yvY|vOyc67lg4BA-pUn#8ox9}UX{xwf`>hXCuUsC>~$9fcxuNxE9t%8`UXy_c#@wis2WX;CQ>^OW< z_;e<~n%8=WK&SWdOE8_$Oue#+1W(n*e~|xPzMa;t+mCm_5#LbHi#l)F=$+tEd~kbx zh{@wACQME8-()K6PNysb^?y0A>c=5%sEuso<}-J;f3x^#K4z7MEFCxJTmo0Bs#st_ zkCaU%e$;8G`4^wUF6aYhcG(myLMrW5z>vYH&KPr26?+48qPwqlwP^H^V6hu#?)UdY z|0bW_>JEhbyK@gczh5~F&0{JwP*jbO_AU7prz1Fc7y54@>@;s@CVS`4GQMe!j%st; z4bQ({A3K?zg#A5z$VQX|B0wT4aIKW`&8)wFo+ADGg@oT%8qdnL{=W;Oz03_djg>TC zwTH^Fe5B2!Xj+3=xGC7Ic5!zWe~;eY64?KGP8Dn~jb^R(hm z)mJWGBjIHqL!dm7QJXYI*{WUs}oT zxa5@`I>=1e!df&c_P>P%y6g|4)+e8ORM562!}edUn{sr*=$(~ZH9R!* z=%(O5Or1(JsqydpsjabRD#2ZaE)KovzPK-Y8m6}8<-f9~_^jwOe}1KaTS@Ry$lv$$D-GPEBX-mkjzp ziq1Qp>i>`8myjgxwMoX6zS$|6H(O-8_O(Kk9T%6(WZcZi%te$vQo8mC*<8uqWL%NN zm7D#0|L&hXdPw))&wHHLInTq^=ghI=7y92=RC=8+XJhks9ex&@XN6Aqz!1x!cZVWb zJ&*jH6>6%Ftk%T+`Kea&E-2GJ@9oq!yiROkJo{F-Xtw13#(y64SGJcr|?;AKdIwRq3U^WH=1ibv8nheb1f z4Owc-<>;^TKA~4;x6yvyJ49N=l~yLlYIp;hH~wjlP&x_yA9M1aKjwpPA{46ve1UX zsOR0KXSdm2x|U}QOb1Ey&y`(%#PayEwRA&LOO`3e$bnma>g`;KjyI|owFWEr@U`6) z_)B%j+cFfUE~4)*1G3NH)GbXd zvz{1fQKkawVv2}ZX;3HtTobaOPe$CQrJJ7$ttzRugDf}Cb8~~!@d*nWbQZOR)z7+1 zCnY5Ta0k%8#v7LBo506FmK$c9drcID*MWQZwkNK8^l-Je3o2Inl}qB?Ud)old%Ol@ z2`3XbJ@jpHZeig^LP;v}tj>Tmd4Uo(sp7h;`7ga`*DtE|52EU%aZN`ROE5+;{hqW&^`x z?8dhU0kQX!p@Bw^YQCst3vj0YVu-VHWR)%!q3G?%z-3Xls9kiwde+U4bv3?k#!rO2 z2LmBp{`aXqm1qw-6W8*)uT|L{*qNcv#>FE!f??E^Z#PwT7Uxa?Lho$bYr#vVH0_zJ zE{L7(?wl{j*eNQK=YckR^cRdtFgDywg{!De)cab|$f0BbUdJEOdKn{G@2ZkisYKgH z)_hOadU${HEW9fr+@UcgK4*&)rx7Czi&<;G%&pB%;1i^ay;jdqD7qqZd&#e+-j>O2 z?oG(Z5hK**&Gm7=*Djq0t|j*B;ZevVRv#*=yWM}dq8~E9$#S0Y%S0mACf-nvAx$E) z9CbaTS}QSB5Y4Y;l@r~p6t0y$qmuuY7G%+4kY3_|g%z_s1ohlkMfLGUbBd$6PvyBb3kp& z9soYN*J57Zei&J?E>C=uQ=$hC$Bw7hjsxweY_2%b8;AX-Ji_6CT|PLFj(jrnuXRU9 zESR?2`b}7#;7qE^&+V_%Vmv2x| z&Eigv_y6(N`o%RuzY&42QF#)?K*B=u;kV(@M<w(`ZYr?t6;wmRGRins{60mBwK(Y) z@L$M7klT%^jghqIfimH_FUYp$xweMm^0t$0uP~DRMo8b`+U{E0VO`k2PTo-N;-fzY zol1wZas}fapf!}5N*NU2ZrBDgEUC!%>zUi5l zCwPlIwLM~1M&904cdZnA4r-QcOmUFvDFeP4mcqtc*S1@6YP?tw7XVmi$$VW9AwH>+{E@aWG}2j2xw=Qlbxd*B!m#wR1t z>eQdNZR^J;W)Mk0i9*z&XeIqy$YKE!3B?1eEh`iCW-h&H*ErQb6o6PpAdui~77v#g zV>*BO-o`7_gBx&XXJ>XsMuvo)qJkzPqt}t=)bCp0fHEP;UPg<9=0JhoE{@}>okoUB zIr2msC3+j}&RZp}rGB~Vqr3lnp5dL+T40X&X+^jP$fMywNx=xHdMb1N*fhh z5DL5<-+DY(f~%)TRNq|UF2Rbge-f94J6LAk<(q2Q$oY?zh=9FWL1PnNX-UeG|E#Zn zI6tb}S!{d2P()fA?dbszCZkfwGm~)g4)56}x$St!Yw=2UE1s_7$;}Z36G0S>kHzFSG@Z^J`+bo;&8&qLKYiz-(8 zGdl5d%8fS8-{(O_Z?M{KaO+r7`-Cp`?Ah%&*K&L+<=dwD?uPtvRocW7ymQ~x^gLn& zCJ`qfqF-$hBMWPY&mbNCdeNZb=equsc3tVANM_)hJd4agzo~GPCTtgv|D1aq&E{EW zWs1N3ka@}!?p(b9wg}y%zyJQ-?8q4C!#%aL%{>Ti;`FBp0d4kN;jcPl>d5#pq>mG! zp%MD(=0D{T8d0`nWQNgTqj}IiN(7!YG$0Q{J*zmJbJVuy`LAa6len!ZS|}k4k&cWW z>OPz!m+mwL=K26b`@lCZ9|G9WoJHJw?QO3V;Lw$|-C_ogIsfh43l|+>g**GSTZ?tH zv(RE64m2andg&o}{BbH5u)=wBImWlg^z;oaQR*`oH;5V97};{{Qu@|5qsJIBXEqBq0opJ@Fq&RJ{@|jq>bjDN8Lpqi zU{?rPAEd$K(>XMhQ1*FdU2gQv8-Do8TCiMRDHS-ILi$q*;AcGNEWrP6n+D+kym20;_LDkVXnK$$_+fJb_+!=`a zFUZT=vvq_h(AV>GcUS1^QjW}Y(XC0kL3c+Ag-PLeclFdKScR1P4v$LFgiSp$J(X)C zVfq)u!iVr~*4immRF_`#czZiCS>FuY!WQYMg{*0Am^XXh3)_&NDt(ZhaLYNCUF|hn zH^RD8IAeF?nbLrvlbu!39qVBkx52hOCiB~HVUo{TI- zei=w~=jAe{P3dKXurC}QvrsZcxb&(+O2%mj0NL;-fG6ze&@l`#zpy|%O&fFHNI;Vo zrJb`kr;coUsW>wV{f3MqaQAsMX{k@By(VE3O)dAAe;f6clI+0 zR8Z%6dIFo(4o0RarVcZkv-M1M!_~eDsiWqrNE4rlE;oHYUbej^b^2#uG|3=FBFVrB zVRY@Dw2D)uFwZoM>84KBh=yNu3mue_`PMrUpZ@0u@4Bh)cpQ0dU?^V^FPmSsRvX}! zoZGp2fB5@-h^=XFNx73!m9~T_{=v~^-KV!>I>s-ynl7-Kzux$(T9YFp7gMHQ&q-qu zTznJstkfmE=@JG4&vamqXyp*qlfy6SV_X+pA&Y)Cv>zqQwXmf+eHB(bym?@nFEzAq zymW!d(!#Uy2F7Kstn3Kd*I-soxo`7<4$pQyk|vZ(({m`DuGXNjHOl?uQ`nTZvyOnN ziZA~^@(ws^yW{DG$gxp|Yf(cq35{PTVl}AZu$Zbe(3uF*1;EOA>lZobI6K|j9cd-D`U=`T zkV*8BORB7u!C)8}caA&*?r~c=LVQ<^sj9YpvaG~xGEgEUsXCNTpE_{W@Xf&|Cr~Ps zG4CURkU9XbuwwVYo3SypUzQ=xoo;Uf6{mVS6oV8rKJ@ShAV114nqHDlnjM4MRD}X@v4?z zE`BR{aR;eQwV}305D+g{xcZ5N)2NpmCb{dMd+aKhzg7|`NH{Dgh!yfXK3$L+fc!Zm zJ=U4sC9EMc4-eM;n`Xz&+}sl9qzv5XXG3;^SpSGyeF4V1$ll7A7GG{ppiqv^6Z#3v zP4n(U^`8Pk+qwWSpD|J_q* zh=c=NqQ?BKkUxN1{QBj)n4xej{1{GzPoAju2eQijjQ7OO9{Y7yϐ}ewmE<1P{om13ZIR;da-v zM;oK&d?U@74==?Xt^fL@M&KFTYiZds$mqA`+L39|6!E4L&9ziXyIR*>P|HqX?G9mm zo2sn>DM)jK<)E{4sNp8S=7ho2X+4$Y$puMlM2_Xs6D_3ZX7cH!e4Rbaru0@0`pgEjmc3J{DYsRVcJ`UfBl+KLD!TmlC5uT zm9G7um@R3S5p??*kp3XpFGn+$A2~Ta7ZL6p=Q!1uc0pa8p0CV#jHmhXf`CJO`^~Qq zF5~OOAGcA-Wj-qa_AZ~ZjtDa7X1PE;>N_+lD!dSr+1PGLKgwhdA1pL;W)N@GZ;@R0 znEM#;peZN$1AS>t7<5`fY$f2OBxqM5g-nK!mlYsa+5sN>-#@8D2_>9=oTQJB`a7W;l`{M&x#!bC+%~iBoG%2lb@=u_cxGK%A?{!G8diGohMMi z>KzFp-C*3uOxkDj^j49#hS5UP1PS;aL2eK4?D#Zbd8qnM&nl{aR>lj$_w`AY2Hw=( zKM^db6nw;jXQ~BU0`Ssm^0JSdl2RMcYw{P}r6s8huk}2L%vuAlzkdZIpDO0PAmj1k ze!yXVT$M+P4@dX)th{u?OFJp-gDJ4hWE8Y0P#7<-`F5$9QStMH;h*g$OyV37Q1UYF zJoe9RMgw7$KydrUEA~>^debCMkc&^e!Ct&nUNtkEcqVy zf6)j*9P;mk^GFs!sA&8Jl(lW##_wi(J>;M8UT3-kaY&oABhLpTRy0UUjok zA{DNOxJpplE%c1H8M8X)XCDm8UVBD)7fz36(I#pRn9cYNEQ2%6vH23Y&|8zxR~x<_{r z!x^2+Q6fssA^(0KFBI3eOnYFg44u~dZw=GGoqNPx3>@l;2BQdrK;S_xCJwj|ip?bO z=^Zx{GhdjftGGz_xuQGJ6U}4boMhWl^Iy_iZ8-c1!JvN$Q6eRgL6Z=8$2U8HSHdv1 z#6%VO$l8uMZM;XrTQb8=yy5PL<5~9I;VS0iXfYFyhqj^*$9mswB|HfUvHU96BbwM- z{LqP#g1*`VZ`*T~+K_FfzlWm*eQ*@Si>jnSlwcX#r&cP(JgeZ}3kh?OUO9Cs#@bAP zyNw_L>wt4BZg~92(({wUbDqBJ+{vja$?nvYkweHA`Jt^y7GQ&e8VL<7I^l{~mETRg z$FoH+w#QkZ^i_O97G=aMO?IBt&HwUm8oM&MIpGX}xQ9fo(q~nqRZh2sW*Yqt;G_;{ zx^~ohC*EzNY1b#WsE>w-Blh(4q<*iSeqVLRV^mh}{!6Jur^&yCW2D1CE@Blgj*&kS z3A~*Zg|a@URU!?8B+>qx9eVF~Wpi~Z74P?xe)=w(HMXjKG1Gp!;Dzze(sDGTZ&%QK zyZN%Qig~1S`Jq{tVr1)l+KLZFkPjHd*Z; zVBi*DFRhTm=J;8Q2L|RfSlRv4Y#GKCDISC3VEJ_9ukc?%VVJP$!<|9$mY1ObqFn1LDLsMXPSB8ER2 zm5m|L|CGtD6p+!o!^d_13Zw&UYrIF9DHw+Mt2W?23|ogfW;AA|oC+P~Yrgm9X7z2G zeOZP!L1z`q9m(#8WOO*o1e43{=6`t+dPWbyyXiu}e}q8l4*u=GFCgK>YUfIzad9^( z<>u(s0K;hd(^DZ<$jg#c=a*DvWp5>mI40R}l&$+BbZY-EarTbaEL49!{mzVcY)vO1xHubk5b_{wa=R%Vd$jLig=GT?vdpguX5fVS7MD33ID2h|r1LM>yUsDp{L2wnj z(SIF&VI=3jC!dZUt7!LC^Fj>Mkg*;X&?lC}*eC&>`wEzXtIKb8 zKbpCsv7PdUwmqm$wSLB(#;CQWW!7Cr=D3CR7vR6_@1N}LJ!^=MS>ew}Y5aZKM9v=K zn`0P*d!(-k0qc9panqN^5NgVsl>rJA%^K$ z1B>1Uj(0iriPmo5cSqRhw=`VZV7j2Jy`V4xfe;QSxZs5>&5X6{xME=9&?f;P+TwI9 zP?{%^;RE~;jc|op*3Pc!zOxg`Mi!n{)Yco*7>j9?ndxM#znGL;eht1tQ<<&XFU()i zPE=i3nTi#a@}@1-+ZOC;+8dS6>%2bE|1)^b*ZZ|GJM6g%_1MR1Hsx1|&%_ufoe<|@SgKE?Hm$*R|jDY$f8s4Y`1smAhk=I67UHaftGM(%M} zk?keZjNHDxSv^_Nw{LH1shD09e(I)Pn0#5%KZxd4tgz*)jJ1rwL4liZg@r5N81(3v zMzT9=f|Ca8q)?dUQ}Nd_p%)k{R^%ZSVuPV!opY|GklHQQt7}*9@E5@3vDll@UtFmq z#R~Z#1@IAs*w5(u@mKKE!kb&}B`6*L1(622gF3%e+}#W7x4u-C#*zT^u#)yljKS2>0B-;1BPz+uD@_wLzrKggtbr4fF!kg%?_6VWc(@u_0e3LnX7cn$f`plna+-&Wg^ z-PzXp@%g{J)3}CJkY`GeBCN>5AI3`hm2z(Zgg1uK3)C1+7MiS=jypI+cyp`ig3(;f zv}g1cx&JDmuI$&6nb%1_H*$Cz6HTndSbg1#rH7pef!wc?b{1QPod60hGunP71$Fqz)*a(CO%k9Vn? zmnT+<4y7WM-1mKqK6En=fZj)D{h?m`NPFXgMf`E0 zj^xMTJ`OvbNw;%>Kdi%QD{N(b4IA=>%MKOaIRrdWP@KmMX3r$v|_#s?u4n5$Z(Y$b$+f7x(;%AWq< zD~xZ+WVRRpW@1LOn_@!RU%pS>a_=vY*mOhB$*}a_igAj-^B|}M5APIDNk|r53nDc+ddFN+I zN>YZ4jKZ?nVIFSv*k2rm&k^!S&G0YQhKAoR2?Y>?+2JOV=|#ey$79_Ok88y9XCE=7 zy4AgnJLf;)eAse=vzU(T%_|)%uodMox4UFYry=`r6Mlap@-syV+NzX2uJUDem3#k-*$YrdWxlHE||GF_j1}=k?AQeKdBf1?s#-8Q z$Xr{F#{fbbj@-QY9cBCqc=TnCn_O`5lXnvD2&3K+WnMzT6vcTo;|*;0?Dx>vnuJ~M zx+G&K-&>MY9QG%5a*4Nqk8-bc*X3|rs5_8ynrvf(EKM?>PdpZ>v5IYan9x3D(NPXCQdU0Z>sA8 z7Pf)B<$t5ZX`Y*%R!E7N-2W_kyhV?pX7Wh1x~K)ayFcr1>HnsL?$vQWRAoR&EvOSd zbv-Z#V%GRYdp{=aj7Hsb&HB)(-_bLKo!0ja+7l-|dyHX}3|ItTLqb$>AWv~HS51J- z^_@#2ccGsB>+HWAO}c5YH(m({n))cWH-$b8;r`C|lc#n^1_+cP=jGot_rB;^?gwxI z`IiWYyu6Iy7XD#W>UIq+ZCw=Vro#QK-s~TQVVxW#}xC3$lyb z2VsZVi)Vkm!s>XBVzQ6h&Wg`<)nu&+|9_mr&i*;&?l~xY{8q{Sb}(Su;wsHW-43MB z-*(2+?tFqkIkv2%EF4Pt*6Qq&sPg+rKDYIu%^^mS*>9PM`=5+V-$uQCGRCA9GAS2$ z3d`pG-Nt zsu>I62HDIEcHR@l9!C&w^d>{BJwo(ssOM&>;v8 z3u(YvVC(mzuRTw>GwMmiib``qT`Ps|XWOVtNnFqleHQAfhl~ZGPz)otV@V;^4uw4z z@XLJ-J2L*i_`?PZrUfl^pGfw(#rZ(Zt*q@_Hnh4d8OZ@HsYUwOGRWUxHTwei9X%Y1 zVMhqP*JxkGVZ137cI0+r^A#|iv{aX#T|QWM20g8mP+;%NP_!jv3^~`gH5mxy>Vr;7 zBC6r#=ZV^;?9}gv!T!LytQer6dDN;Fv0ZdA&6{he{LXNe2Cd}R`X^mTUR4|^xMmSH z0yL*JAO1Z!2*1Ty7#qUUCsgBSPdzt+)EurjL(|NxbiMD;(>{s2_r+XGW?}L|{;uAB%R2Nlg-D|IV7aA>HNTR;#0l8 z-?@?<{&Bdfln5^>BxkeXj~-n~iWQ7X;{!I0^O|2sSS}-hdPktljlQr<{wY$K>gA)r z>%U?sLIw-<*o)xDmUpa+NBK)Y(+$~RoMM&JVIU0O|VbomVIt!<#wx_6e`)N_E}lo z*~rP%-Wl#2I<5Ax8okj=q3o6rwXM7r)BdU!+98_=|Ah_4N^jqV5wAf~`1rb~+%il? zg6wX4Bds(BL?eDc+Y4S&JbiNm_A^FLw~t1mbHD1B>rTts1E!JA&KDIwt(!wdJ&G(M zO{+(?ZzuXFVr=TB-CtqLpE#O&bSM_RYQ&+-BQ}1iGe|N$d)N9t)j^wZ9GBbeVzSKg zE|$)%ayt1!$@ys5j?#(UdHMN%*guK0l^7XDPz3JuMjX39k&aZB^X=no`VQmcN$ioZ zcmIV45&Sq52CM|8c9al~?`! zU{r%-6QC(9?(~gVucJg@u>q`iJvjO0LG;!}T!U5H$-Z_<#;Q<($bwoyUCjXF$lH4n za!`is_Ujknv9#b4?O$W9qwTVh)9~#`*(Re=+&@Hyh(q&t*f)WM({^YZZ}Fv<1R~n$ zhJkS|_-@FA*eQjHpZ{Lm_B?1i8z*Oa9ll$!D%>%R|v|MqWc+dd-5Jx%Pe2_XOW5T}M&5eieQbY`~?d>gdZ#=NvE z!;Y3?CMPe5i-@TD3U$qU*34fS1loM}PaIKIU6NXr-EnRklRZ>D>m}2qN4wBd0=MJI z11A8;b70SW?mFowo%W1~a)fBv%xwFY>O_7WjOkqd`xlRQ_V#X{C>N}oaCHNN=UR?L zp-U3N$Ayg_9{*##o+|RX<6BKy+($|;w;<6t%Z5tO`SkiBi<{OqTw^G>SC7j z{S^6D?47`Kc~y2CrixeE1*ix)@AIQrR&Km?e2zSPinynEFXU){tvD|waz6HFZLp=Tw?&&P=m3nRa9%(^SoEy+)W^^alY!G$aW)>BIHVP520w%y5^ zal*{$Ra-5L(wj3mjA|0vv|r$ZsuVBCTGwgAS5aMip*E8Zuan~kJ0y*yz&I}AGk zPv&{e`9|IO9%v;#1?7D)yFR4_D7Vs8w~vd(V1~1yG0q%u8P9TqJ!G=$GbfEEhw&dm z&Mv3qAMo@%ho`7EHun} zmcJzq6pkOP3@fE6Jz~D9yJBH=EjoYLQloevK>phKd|P$}k1h$+ac#SN#a$(B7O6s> z$|W8D-!I{3^QN0bEY?KVKVHTSAPf%JpA6z_$Nn~L{|=D9r*v-^U8^eqiI=Q3bL*4a zq57Q0<=ET+{>j?zbrHerdB0pzaZ(;jDz<)nz=_F7bKxKu{F;Dpbd~8DFOK|wMA0~^ zg(M!}Tx*bn7DWuzU`3;?+R5|Pvmk2z#eJGqu#m;Lo-JI2sW|+ta(%Ol3Y(Xy4P%@W z%N-lxWf>o$;CHIl+F_AQ0avZ1GCk>qd+jocrjY9Ea$;YS5>(tGIjSP^@Aj~r{kq`y z*TrHS-0DcUbUtV z)%uCR|3uo^GJMQ_1M0??7+K`|%t8vf;Ak{>qr} z%q^sbCa+_5H@m)6U!8D^VPeED~DGlplrhs%mc4H&?6sb@{aIX_@Ceqp}#q zP3rfb-2M=M-3YJiZM+1{r{$0-xO?MMET1~hCHKZ#x0CxnNvmCln~3-c)?iQTF}YBg z&R1?jg{g0{D3s&BJx0(|d*JC(u(jhW1(#;k?$ltJ^6Tn6V@Ldbw}P&GSndj0G#Hgi zd?(gj@ki9R0tgXu#O7)D_&BA+cTq!E_kOpC$O+t(FMeAv$8ja2n$}s_=YWz4mjASd z{J4)Zhxf>Slw9_zxKFO}voqDZfdKpUOgP^OIqaPG|4>W z?{XO^9glGkx!m4am@C}`&2*J|ra73aZ7!Aa#QBNCrR+c3Lmr!roy)g~Syl?oJVmmA zyi%+lPLj$<(Gf3eoCk??Ju&>)4EYo>OawClc^h$d(kl>+_-37N`f=x&^z+Y3k`h

9YZ4 zrJgBJV=8EpJl{6KU=9;csj(1ndMA&S;}$g`M>SK>LLwblAAUTyOoUlSL&nD8p_TMH z-U4xNBi;T{SMDclN%PR}TAA5bZ+@9b`?TFhYm}i{!*HMNGT!j}x6LZ1OwDej(N8y- z%1Qgnm3kRoU~EQdB3?kL;Ar|V3$9{Ht4zJoaU!2#>~gH9D@heGIxxD$pC=*k);Ie% zI8%%k-!@204bMi4{uoD1cPFT*qeR;MLNbmNziQ3%xx{?zkt*4_`ZdtOW$kcxH|hLO zOj6qkKu24I7t8}9! zu*@Rh6^Um=f!CUw>#?CU2gaTt5$2)-H&pRWoO6(_b#L|M-27Ws-0f9T@#&kk>9#6L zTa+I%NHTHrS6^kbAc%`xSeT45`m>PzF?>3sZl=Nmx$NAY52>u3nZdNpwk>+~Wb@XGZDWj{K&0 zFK@Wq$g!4x@V-o($-#oejWt^5qN>SS22+bm-rIPUNK=hh^=8U7cK=b0O=$P2Z33zQ zF7X54gjvK-+=J&DJceLHsKU=TWX|`_cRLr);_AY(ibn#L`Rn`t2Im?|`OPDS)&CvL zTcHan!(HBh6B_6*xD{FbPZ^%DGWFMHk&Kn4S5sQ5o(5A`jMw5&VCcQ(GF964i4>bj zkE8Y*`ZcC3l1hn*C9x%>F_7?<5bhY4YiLX zNV~ojc(^KU{?;?K=h!FQ#h|4IbhQWWL`5~D35;so(a)h-4XMHf30Ap8U}4(c){Flc zteCL!gw1Y?|56!BrxK86JqA$Mvc*>6uBe)RS6 z9gE%I9&UFd+XIJ(EH8nBTi~;!y*-~PPidWphYs6XY!iq{vuwT;W799zWw*8(ol7b| z8I-zTU`$;!F%{focN zC>~zcp|O#~w!fH;w)Y`v+#^X27GMAC}Orho?-%%?-JBSHp5Nt($*Lv&4jcXtQ<+k!!)bJZ8?-WYK| zXhHV^Ss_nab@}k{-%k?w_)bQ*Mpu3Y2scn-HKD8?!X0=v7|gR;c*kOobBW-m3Q|XK}uK^Lhub*UlC3WmTT8&h8t=du5I4pSnukcC3%)|9`Wa z?0@L3^mamlsez&KV*=e99Y6kqQXQd7m*YSy=T_mZ*)HxE7=A46$5hn7p)P?l?3#Gy%KUJ$`X=?NTprxi$fXt|cT$mM1SS4+i6S!&qs`>RCz6`^zPO z7{7Nb@m&`G^z0d!jRD_}uVOMN%ks}%pV{0Epd*aWK``0RWzXxgx4&^26fHI3TFAoB%H-HdUCNQj zPn$RD0jli^oNc{ZSADgaQl8ggiE{1lA0U6lMmqQYz-g+QFxeZjjNszX43H*ILsMti z1j~zQ%Uc77!Mh7RfUw}^AuLOD|6Er(I2cbS`1k?`pf9vEImX69Rj1r#ica!Cl7)mA z5-J??E1dAo6`Mk2a6(mS&$Yj6tJK~d?WnXE1%(i|tIlnn3~v#hEWhB?2)!#dR!CnV z2>a7$fI7qMme_6Vf6i3O4-dsBO#Uqvn`Jjo??b+KkD}O0cUKWIHyn`Wn=smhQ&@_O z^dqf8pL@|pm)V7VEMNcOcFEMnSblOo&c`vF{*UkxNp_Ja4(VPx-^46}mp=Nw>Ru*V z2rr*%Ryd`srJSpbzsu(AaiuUYsc>vmmNnyhckwa}QVD;98Wfl-odKAFS6*u{l@q1< zQ!L+rQ}2Mhjzya3#Gsno@?2{>hlnCB9MTdl$HC7!~1ELO6+JHe`^_*PPe{P5|m_o=;s zJKWH9x#7_<*Sj-ZOfKdAF9QuEsbNOY>-+vW{Gh9CxJI&nDVq^4pEn$RdAiuFz;|t3 zpg{6&MXikvSKNCot*QDnJ_NrIQDF5OCaH`2UZYDMyPs<^kp{yS=H<2Zxzo>qN9D(Mg&gRi*d^6DPk`15x|1shE|;TO zmdzxa|09dXQQAXt!;tD{D+-rYYxW;8KSU@hrXvSw3V5U4s5aJostRWYiy`enyb&d_ z)oW?)F$tFFAm04Dtei*N$K;)%(OSH;?Rw8}5_pgf_kSAGy0o?R1%`%(qW(S6g3ALh zqE;zLUpAe3*3DBcNeu&aB&L(!Z}(|1{$x1tB@4CyDpR^6Q<=umv8)Bx#fpN|-7t~l zd}qJ(YUA%2+pF4!R;5HHHDE4Nnp^A{3p?8TH_(%mF(9k&=z-rk-cT#mFm{6+B;XoZ)ux&K`wlUxpHZmW3exZeYmuvmP~lG4X-jo*;K}VoNdn3&K7PrI#@=p*~61nBKOCU&7z5yc@Uo8_0x5ND(wm zvRR=JUEt2B=hWP`G((!vV;5eq)H7Z**{`j8ZQ0H*mtDP7ra*&qDIo+;Uy3&ZjoKQZ zE-gt}xZl)avr(TU`u*S4b)wI9X+%ypLtp&Es!FP=CP~%))L6^||%rn^{a=%Sr{ zpX|pyhRSKu^o`9xJ;+`b=C^XGAY({YJ|aTTv0SI%JvQQkvk?@cdz#|gE7}gnv`7@< z42sR-7(dE+C0^~&Q)vcN8yFHKrHB*4u)?Z48L0JjbDE2CbsHHU40@5`#K1GqGQ1l= z72XP@&G~WCq^j<(1LuuIY`33uQ9o|r(8nk}%3o#7_4NdnyN=CRy#NRJl|7^0jn>w> zb6m+HXa>c>!U-fvD)wnC1#NAU9<#K$BXvnSE(pOjv^x3kfVbb^9C2zIzkAw**|=G_ zyzU<+&l7sIthsV+aCGd={yevFp4lHm^=SKzV}12*??FLKNZYIjYy?{qbnK18Ki$F zC&3NzazuMR;u8)U*tO#6h*n*!z6~ZQh*^~?5_cJr8KWWO1ay-`BB^2LXxm?nFoaH& zyoW%X9Mpy7!|gzjbh(F@tB&IYN0Lgko^hV~<>l!r20(kt^zCs^!~~0;VlY7%^d!b8 zl9#s)5gLl%X6ONdrBOF|xAoQ0xfWB$z`6vI;X8x4)>VWyh;4&3y3Fxa9m40!e*@%!KP-3nMz{Kxsx4sH zj<^DyS9S8iZqORj8h)6I^iw`odHxi_ z&ZVe2|9vF@cX4tEbD%JjxE? z5$C25u~^4`DARJS&{|9T*GBL2aQhyFDIaRql`+F3vBz)3f_)B!vR;ys^|jfjdRsx6 zkY!~_*IL+J!l1_9LgL@Yc>MQ+ z+aH7I`40d?wO{*^D&L2)aKbB7777zXN>i8k$c;-T?ayJBtk9dQ9qQs;?aYHUwa7`~ zo3xah>_>fNKTvjFrseL+aY8j!&fm(bo;GY&pk^0L9yMg4%`R(88XL~SS(~n^S6h^z z;^+c22N8)YvWW@VRs{<$NVX_rIw3(~Z%J4;6I@l@mDSa4^m>&PE1bubdYMosKDD+e$XkB}B@^5AL-~X$L4GfY9hsVc30&Y`f6LOD5 zX*y{PieFnYP>Sxk!20k*#{A)#Q`*-8OxL`dxs!@H9Dg-(FvGf+YTlWxXK7hL0lU&D zCvMd`_dgTyhC#Hm$Ljp%I<@*(;pfF2t32ou`RFt(8Z}G~NC)&Pl z@#zxis;K~_cfoNi9#n9xzKj1+Y4`F>~8Kf6<#_;&hvJ;?6*@{sdXbuU715ZBl` z=G^Q$tM;Cysp?|*z#0$(egTAme_^W3|Gj0ceL!2aJCHf9V`Rk2W&lw~?N8WoUDz?u z3>0&G2;i^KhrA(La$(dK|F)2;Ocb`LU^3Z5r`b(Q!STC)|LltUGpySXJ|l$!lRd3Fl9_2ZMA$#UJSF- zK3&Vm3q4%Ru;*AnAS|BE{a-$gi2=qY!v~V?zdecn7e#xcDU5za*B(YdbExGf1eSiJ~Z14B;d7kGy zPPiO4{##mr^AS4{xO*p?ucJluTpmnD9qu;e1ck<2a7~r$W!?dQx|^2yv~jGJUtN&g z`_Vp7!=ke>?_O(m1>O{~)^pG|ksOvSVo%3wFC8btc|G>MxR*0-ndRJ;;o)SYW+%+QDH1UwmpD$#XJFX+1;qiHF>*C0eRA3MNZX^1|QOasnhfSN8ZdK~s*Dts&d#q}~^*RT6DD zB!8(nlsh(|I$q*u>@VuF#bE{_;1&CUHdyq#K1RQ+z)ZDYXNiOzHYHwpfIv33sgEIlnsaw_ewpg1tYzzRwP;R^_I%xbU8A`=QJD%fi>qF3z-o&mqNgt9@G6 zK+@k3)wjR3w7ObOy$bWxlD%=3E|q;oW`BNYGWluAPJp`br-|sxY4R0sWKUIqGJ+~q zxsddbO@zN7Ej{C6n^Ga^O~Bm7KgDpeff>439v{|GRE*}BF8olKoZ;{iTrkm#mm!r{ zVW%&Dqli`X$yvW_tKPXxZ5$sv=;3H$@v*VX>M5i1^Tq!~^9_3!)M;PS(SB)inH*_Y zJm0+IV2McWnz(8CpFD8pK&ux|_JJR7@ddhKSuvNT8pBB+tT*TZoqW2!7F_ui7Y>NG zhH$S514Hnw`SpOr1Kd0SJ+i!_AReDZ578vI-q5+9C>N3o#s*%;{~B^>V!Pn_p%}k- z{GJMklt3NaqtV7F6iUCuC3|AO$H5A%Lg#Q$2Pt>H(l1dv{Ibla&$kwL1kMOKI3{oX z`v-0?ZxP`2GAJ0O$#})w>`P{9#EYsWx%6w$Po;;B%JbVp+#$!=e{&_o;F- zS#bzV<%$Z~)vUtS`E6A77BO;6)EmT5#hZHk6;)LcRF{JJqLCjjy#Xcw(lFHu4_f2ka`niKgz_iczV8-vPFZHk{d={1g#+As=%DBIc z_PmlGLV}xxPFsIvwz}w0bLpN3GxCDCqll66K-snB?fjqEzp`)gAoX9!`bUvH`Sqbm zM(J9jPr8-(BwCBs% z%0&>nD{Et8%c2sbQh{vi_X%5+#psi%<5hnbRzO3c_3clcY*WUbvS4%a?+q#hLENs| z!tO-f(!;vQ(pQd_#>sr*yUM`TjBM03zM=MYak2Dqbx{7;Z-pF0S&q~%cw5w$%|yQ! zlUIb~3N9*hUxKtK!661?s)krgVfss#AavewXHAROzF@1~3Nul!CEqHz&fG*_cjahX z%rPGx(>j1udqtG7bg-4SssHHyiHUJn=jDn&?lL;;UII@~X0Yy7{LJTbUE_?}PAS#X z?V7hBtX`429B?Z5)&zR9b@iU-0F^*ZD)k{Q^|7OMPg&4d*E$K)b zd{*jaLC07Q0%7>$E4CQ8GL;xRSW9|Dh8JTb)@LOla_<0G26>0G@ZTq7@qF7l?;vx8*^s|O@sDMo^Lza z=U{I~@Njc9kXG}w@@-aYrb2d{JYpY-G8)9Qh7XIa+4h<1z&|DA=&_V>!>^ZFoAZ|Et*2*s@_e#L|i|;`%RI^!t#UM=GC)^k0pc2MvA@-DFZRs@!$ z(c0Q}4(4%oD2x8&WIZAUqM&dOplq8z?qPkq3JZ^%+eAyVnlHaZZ+2}q_I(^~z}%tz z?OHst2cbSq{RmXu!I8tQrCutj{ycZz->dbz14y#EH2r2Vr4zUuEEhu*aXh)MQ3d5= zdtf8Qx1?A`fW$Htz$Dbqj&g!`4G7Go+SCTwRDW?BvZi!!*JsncLgik=gXlS06{l{D z*D{{`Oe(zS(TkL?K7XRiC;4ml=wPRB|Aphxn?7)=KiKd`%aO-MY{}h}{Yl#zW?}5O zVVu~FF>NffZM=`1UN8XFg(9$05$Sp&@vW5oR#YbwT@=)wDB@y*;%nd9j!2`8%Rt1* z_gN9dI;8ve8^oa zfAL%Em~Sa|pA3B*Yd0N}{TdrNW7!=IM&;jMAXZnn2Z<@kX@!YZepr7pCH+@ot0RJ9 zZkI1A-P$gQX?Jt>=0!TR3o96`79i3Y+zrPQpijygp+0fI1p3M74A07T3DrMGX%&gv$YiV%ki#}QnA+=2If$oR3y|oU4 z{>>UI8Kku`gV{R`Qb^B#4zky_dzNesO>ovslF0Q=r(ddtUH2Nk7(Yuyj1az1dk-Qy zi?ojw__EUc9p(iVy1_sjtP=&s;$aDTi4>IFS=pzjGLr|&q`y{QKm9f=Sdpc&kq2S} zty1)Y7a8bEKpF2jtdpnohp!e#=;tuojg(5GEE+IVZu{E zD1*T#Fc!!DIe*VK+$u-(=iil6d?=J;Dzwx7a!Oz;A&jcLd9)f}pjcv3LD{Z`xl{Og zYn0#fxB?6PE%#EClp&buD?PcBhw=<12?zC*qGn04i)_g=h=S=X+m>$cn_?VQqL~VO zt`^jMbO~+^^I`4waCSof!zKclm+er|(I?uIFPbI~w&WL&w%NI+)?W9)_PL0#A3xoL`&FpUAHeUW)E{K9BrWJqR|(nFr@R zu8~w8NLVTQ8$8m%Pxh=An`#!crGK@F1j(9@cb3(M!?cR0r@?*1uWd3M_$B~Z_4ZQZ zgvXoO2>Lsc8q}`VF%5?#Ma*ZFiE?VunPmm7$OxW_Olg2Q%LO`96V6Yy2P?-Aqy9C1 z`hMs|-BGbQF+0itvY<7e3>v>BaYG0z{uIMWdtwrML2LNYs_~YCe70>07S!3y*uK0w z46>>f7jXO>d+gBHN+;mmojq1a^at<%n8UTpX)5ea0?=BO0=Wkf+BT?2C|uoG6mnm! zu-vnDZ9<_M@iY(mKms+l1g4+zFkeOyL%ra+&g22Y;$Q(MmmaPu(kT|+OK>^U+TRQo?dz0PeN}J{7sH>%o-x9AF7F$nzc*5BbtaH)BhmYz-K7S13Ts ztn*ROv?jN@8kIM6C{~#0TZI{0>8DM&=Zl*ULU%$52jkh9Qn~XF4m@N2UA}mFnMRwB zM#ntFhml!rE19HZn=`p==EboFXI{N__oHX)iO~2f&27BZ!uPTCaiGd%hRV5k(RWaU zHrky5ZapnLeeS?mo^J6>$&pf3)Xjn4mUmHKd7gCQ32iAKY3Yo;#8Ba;Fn!@{-}gw7_Fdoz5~OqCRK zO)dAYb(SqnAJlt?oN>I=P6Q&b{uhEMuoHD^(Qr8khWFOXMbf^72;I`ZbKbhil|jYj ziYqb$wvI-1hFppSCTeZq{$sFp9^7k4S}H&kkiFB_zyVbL#Q$Wi??~t9!bwumi%qT& zgHcfOx9Lw^=nlt+Z2S&BUaNRHnQOGAO+Gw4EV__9(j9eBPT6WdLhQBwp%%Dx)%9T}wx2r6@rf{Ts)71NJGz{z6;` zsFw_MNBO4yURn~F{M`uN&GJN}lqVfNvpd>e?z*Ks%b*(ssJCjPC;W;1)+7Rf=Tn$D zRSSkwri_m9xfbB8t1;TzTG6|^zau(uJeJxn#VUFey>pwbgfB;aLInZZj_Jc7zKs1d zKTikonMf8M?+9AlZgA`~(SX58Ar;Hv=}lBkkL$10rAQkv5Kw>ih!g0wK?hI*Q~K^{J1WAMhkD{~j90@y*IxaZr(xD%mbj-}B18 z)GN1KHH5hhaTjXPHYv%qr~8!}yTgua9c=_n9M;);-Ryo!;A(rGiX`O1^W62dWE$Umg&fDma@JEKPLD|pGD{Jfy z(;qP_N$#T%&VE>4toFo5JJ ziO7?!L2C37rSAZIYk4aA&vDUZ`ix{Mj%S$fFZ~y@yG{x9CC%E7WVE$fF|ni48b>6V zD|v^N?hp}5`p#pBaJF;SAM#>ltn-=eyXN@+x9{|b=w7NiN7leW$J^ABgx&F2-z~zl za%%2!#}647OKLI%kv)if#K9)9=cyQPZ_ga~+WsfwpqICeT`yGJ09pr+%EVH3SzcYV zK{l=DdL-SGS|g*}u6GBT%~8$=Og@g~bN(4>=b1+5@xa7ebd)B1t*NW`G`Fi4|1FeE zW)neL!vG?MmeiQ!fPZO4pJ}{cG(yv|tcT1p{Zw0;EgbZ?SbI7cDRpnKbsA>peC6zW zHwJO1rqCoiy{u8Zf)cvmjbVGLV*eZhu676vqyW5w%kwTVTDfr69dzvB6=-{r5n*9J z2BH@2lw!+_OKDd5JCF=C>{Jwm2Ev?40DDd@t-X4LUhG>JfZk zPux(oLv08hZw*fAoB+9^He`JEY6ySg;F5U!*6ck;x>x`8+dR~MsU5X%kuM8vmiUv8 z=yZ^mua}a#!XhH^`gc$VvVkZRz1zm@t)sOBuUn5z(a_%31G(p!>4-h`Gp!Wp^ z7Kw1|}e?Z_gJJ z8xDH=ilzvfdezy5tx-q&vu%sfc81y|>DVlF&z?GKYafl5^5a}jbhk9yA5weJ@fJRl z8emAD`)f}GC$u0*F^-QD$Kn`^Ad#fW4jNvgD-zw#k?GBEZYX(^QX48)u55zm_XL2x ztmpY1Y!iNMq3OYKUpICpJJhG9@Obq}$?`lH1xhk6(FdA)8 z$G*SihmSfx+qB(t3pMnpguhM+iv;}b->(u74S(58C%4F3 zD;V$SJu3Or>+xZEFK)OOL%8wDIqW6Jih&ES5IcSpD(_aaH8jUO0}>Yg$S1jMcy)iu zQ3015)A;RKDTkK?t1i?@UL`73Yb;faF-z9e_3>rY6W*bBMjtOTNrWu%LSu!`n39oQ z^OKTCf0zBE54wczPTiZl?bDRQ{zTgQw0TOI1~9Se{PO!J!hrfkq%If}Dfq!Ra&HCm}W6JAj{Thvuz%Z zwblE8^YJZTI%P&>b58RzbhM)ZlZr1(|E34kT~@b6)rR)B)%njrgyUZ!^dQw3m?gP; z{}i)Rol9&n+Oa1pi5_fUi^o#kdqUC2+iERcU0q=0l*ELmgF)C@E|&t1u0n!B`el>@ z(jZ+<)Ei?B4V1oHI@)15wRp!DAJ1MyOJ;X1BHPqlfc-$u_!^Dn=~fXb0li2Dx03I) zwL6Jl%ENbNyL3*Pb$Yt4S$i5yDjMr~6qJK0*m+C)r=+9dr)M|zbk+@?LM6%GJPF}# zq_uy3%{Dn|lNy+3>C2U?qA*#TH|{s_`jlQYFDQC}iO(xp*hu0QVz{0dd zu+TG5pQOg#|5CTpCdFhn|9&apVN8r;O%@{2IdAo27^wv`^MO2mi2do<4o;=jfQR2~ z(Vy1TuAtl2fJY1REwWeD7>!0RKc+2cQvSCo{9fMHyo0YTcbwb<#XR@eKVGfi+eoP_ zG|+#RDGYSMfV!8Iv|5Gv(%et^%y0p@7A=`@UX+>5uUok@a=W#{4#L-M3Y;*Hpm|v3 zI;{gS_japzF)oL@6}hjsR&GVfUEB&EEO%UDEU}FN9oIKC@kVP$uJ=u+Y_tFMLY~Rn zss5PFa`XMNEhe*w>;7f&V`aPFwjw`XHIBzF zq|m;-3IFKB!Dr;L4slhz1&vi0l8(_PzD8K-g-k|3pZQ%)@Aqh$+Z!t0(-2T@<};z>+T5rNi$AF*Zo0q z@+*&CkKbMFlv%%y>KFND7+q(F2!5wkDa*SRh-#RnkNG6h{LcTZ+o?^Y`TDhA9%TDU zw3d4bqx6lsY@<5Gz)9p&sQK+UiGh66T93P42d{)l9{4tU&(x6`qlOZ9kkaaaXqr_~ z)1|^@db*g6i4QQ`b)R!-8o40R<>%g6&%A&&J_A9gYGb0C&H9V?Lx*_KhM25Y@8n-v z6WY7;4_wn6sV3fC_1*>e-@!es?*d4z3mpis5eQS2l(q4D-v^ zA_QDU0oSo9>g#@S4Qxt#tLVoDRIb|huy3nlNBWCUUG|V+Af=i z15()f`;jib8|Wkf>PUHFQFG{VP){tJ_}~F5v};3ainZ~lG9>>I0FBP^NVT;4slMiT z1hfOJ)ww4aP82^(YW4}r*@KKEVIw9)GwI$`V|7Z|!hthvr5C7A?0=f4E`m@C&EA)}Hkn39O?-kykXIUK#gly{_ zOq#YEwEwd$wP@&UWC=N7q@?!V@mUw^vQ;@&6vuMo<@Y0}|Mao44KaykD2QCI9mjq_ zjSDUW`;i4qx#L;ry*jMK?H?iBN0BwLl!J*)od2um*AIX1Eg6&t_Jd9Ny+yl(;%OcZ zU&;Sje;d$JlE>dKfc}^Dn!)Ip(1$w|$CHVJ9U((^|J3HzEc#l`L4CiSKZGk8bOwsq zh_t4I!ok8ef3~^BlX2QkvWuIEz81%H=;Jqx8vQ*h#f3%`Y3;@3}kE;f6Tdk=H9 z_U6=ErlX#}+MV;rf38MvK`Q{OSz6PK59>!h3CC=i^w`8Ak1P%j-gl$p5VxXJ3?WSL zOWEhvINfUXa^T3zjGs#zAQBELOp`*ttbav2Fbx9k}V}R`7fyB zXMQ__;zM$g2t9fHtp`$nwA1vqBriEjv67at@Nk{e!jKn-GRfpTY$aG1E&s(IyzFj@ zA_@5YXJ`djWkDSZaaIkCE%ysLA%VtqwYrnuf{9D(0Pw{?y#ei*8C$uL%5fbkyn;Y% zw!QW)2nV52?{LT3^8aEImO_b1F<-Ykm!>%`@51#GQ=1tdf~ z*VY1njthWdIQf=Nv?rK@+Vzosaj;pE$)!NDjUboJgB2Z z^Lw%<(;_<@P?9nTmKTaP>g8ct0tqPMP9Z1^`L*BspD`)^Rme(E59?qyYtsjirWO3~#t}L(C=hpoeoYks#3Wn3eOvkidMrP8^f5W58d6lPGiZ8!I zPFwBkc5ySeO8%otDv;{B(W7XN%)p{F5$=wn1`MYwTXJ_8!WAl35#t|WP0q$5dp`Ti zV7!VGdSC4vef%i=wsb{4rY35^HlBS3Z*Mex(pPc7a?t(v!*0G76lG?&+)ohd`E%8Q zsat+*J=aLdx~Amaxz#fRtfOLdNTY3pe1eUYsAClKn3cZ@jQ*qZc^gas<)(2D6LF;= zcEA4OxpyvJ@CZjA$pq$vN=7{;IELj4-2G$5M+>nl4`GYSy(m@VzAkXV>N$A{I$n1F zZvFbqhdTUGzpO7sF5oRA$iyYC?f(5sT+?C8r8yoh_l40`Cte<{=|C5?*N_nrJ#^&D(0;)_w}0v@$Wl)j0<>=~Fi4|;q!-;ZtaN|u-P?yV)ItT#V+jc_1 z;ALnXZengy6?BdNtO&!UYOEDI`K#8{pn}(wksD%^phrap^gnl2nj(|`A|D)Fd@Gk5 z2LTipUpr?2a~oxKI4giE6rV|@1qd|Ukkog^q}@OHGf0hTdT0rfTaRN^nAlxXJu(X} zdo0{n(~AquH5@U7f^D46xFGWic&jGo(7Vz zETk~1ksRoQMS@usbiCYi>_OSRM4Kp1qWKd66pXk2Ua?=XT~K{Rpj5Ve?o*bJPgv=B zT)AeZRoV)Ms1LdTi3h|%mwyLq31WL7?$&|8^+?kaeYXUhFzKLJ*OnmlPGfjS=fukw zJ}^;!kN?lo@3n}IRb`nouaReRw@Y3iFof@s%_P5CY{w60{^UcyHk7U%N+6LgGrq=! zdzdVG!&P&0H?0k-qvA!m+6>AvMlO}C`qyM__E_^3!{zw)#FW*jw>)FfI}P3DwtlF z7f5ujl+o~{Qy}~t$WDGm#fr;_ExGf=<%SH5#SqcU&!(8H)g1rmKMD^_bhKiiA4tX3 zo_XT#atdiV*tk!Ni_FREKhw;vbDsI7wFJy%%Ko@vZQ>EOH>e4(XCbeUy>=@;?AKox?}R|n2Gcwo=aNjzIQz$8 z5X=lxoy$aK!CIkQ!F9yo-vo(>oryW2o>Yv5Lq*hGi?PGa!tb<(5oeT`Vdu^g$EYWp zp@AjGb2=xUjxk3YZHtlU|8+sq0A*$`x9W?GkQ&-%QBsNJqM9*Kz@%vhFJpR<4K#JB zOHy%6BN;EAE9LSzpJOUTcBHU#C>Yi`)qXLB2Dr1Em3A%JniUtCvZ)hSex~4k#I9ee zsx8U3){oC*Wq%Z6`uqoijVCCAdJ6U86%g)(~(u;}1mPN8)h#FJ*GObii@WpXV@qGj9EHYF7vMaMMH-T%^>U*ViC3Izwxu@y~YbSm1g$+wGL z3xm*^^YT7;u5Pu&w*?Ppt4BVP zY5MQ2g=f{tMWK^^t%V0&UGvA^YOfVf0;MU)W6sZKhm#TV-Uc|FyXpmS&h2OE6^xrZ zITz;VrFK=3!{O<%WeTB_VxU${mFZc<8#7M6k2XfQiDj)cbGiHu`W7{{)2_d?Zt1?j z50I;ThUmt}R<7neWjfWal-KmWLJX4OXT13NTerRYycdk(^PlpcZNSatG+`ll*N zD4jj^7{U&7kjo8Er!GWAfz7f5Dfsf)7%Rhbmp>xiS@kQ*4x{P=W5F=#xONMv3>EB; zXDpx3ylt@Ex4Xyud;gkz z(Ru<`97-`hp$3+06t*>X`)@}z9lhMT4ac@qR&K5+%H7_vyjE8niQ+Mes`766V$b!8 zZXBIypCfJ!k%K?!2w?1&vSuNUVrDqUXn60gm|txVVI==n|K14N<-uLGm_14$4`Xby zYN2n{Y`aT{=w%m(CCeVLf6#s4$Iov7&;pn=N?cdyA&D8e_fZM6%E5Ndu3!Fv5h0(l zqF(vjQimwz642BHXNO5hz7VPB$}kXZj%RKnAfz7$Cg03p;E9a|lFvT(<8)(as+Zmp zuam>_Duls|MIy=!1awjvaItc-kYv`_^UkrI2CKQUC6L%4Byr3g`x`vtr}lNJkL&BW zq1PJ;1?Gf|70eswjQ12a$WR{RhIQ+QMlY#6aHhg&0c|vuf#20GKWr`u=<6=QtB3Qq+^L66>Y6?3m0MdzaBu&TZlqqJVCV_&o@>&DJOxp(9iE^ln5`aQ|kweEl0qCy!b zsJbv}st)xdJ?LBXZ@a3rySuvHOg|6JMF6pMXemM?Eur@PfdM&_oX0!u*X8X=6k6RU7>W+L^vJ1elR-&l9394uqFXTNu0D} zkBUR!G8ZgrUPuac?CWx908~b$6^leIVPV?yim$45ie=1UgwE%G)J-C~Z7Dvo*%ze5 z%NZb(wy>EspAt_F?UG3te?e#q1R&#Nsg|5C$zuQ?inx;>J|8YSo zE!q3o*=6gR{k!G_D5bJls z3Lt7HKeUs(32MM(f30x|&Ro6?sX1-Q$Zl=kIxFt;Tb_x*_pVCe4eOI7+j&(gRdY1rt9$eY+dJEsbOVHCP~J8>ttzjj8?f&^g+^^ zX5YaorR&~~#Pl}FG4dphZg1>lH}~${Q)sc$dJ6CB^cN)omQsqAvCjO_I|qGT>3Fap zCOMBwzR-O$5NE)&omSP|9YY_hCl^265~~i%fFf2$HMK8~T&N%0uYb1jqAuW!oEK77_5@678; zSC&<+P4M~I7eHa-K!CctAT3}&m8xZQVmn;6h5Aa4>?LNvBV`K??>(B{bCR+7I7@d0 zUH7XN>mkSG$6oz@eY(1sM>`k;S#x*Kt zA-?{-NcV9Xlp?QA)PVb}mf%6XP$JkPW^FsHM*m+?J8bCA}#R8ypn?xMF zFDa|Qs2-+^Q#O8_tOwUixp#MYXkfW0K`i%6>xS0w%Y{BQTm*II=8(p{>IVg9*1}Z$ zBQzd+i|NR$+fSQc&nbKEewe|SmpLW+san`>^y9i7cKV+iZ&B8*#ys(Lxsu%HEkTNe z6S=XvDVda&M~*Eu`ws`+>$!??NrmrY%H_Vj;nv`O<-V_H{UJq@=dIWN>jC?Lmb3Pb zPKoj%KC`S^xLvS!cWmL#9n7ji8WDu>TAtN zNQ$uuc^h#|&#WM#Ff&TAlt|Q@-WN8|m_YE*jX+#E>IXuYA^7aM&Pt?wp?m#c?lY4& z-0WEuxB*-{yH;`*B3?R{{W(qcvhBArd0QR(iqBcmmD9+ zsLGfqyUfE9_&u*Q1De!&#us1BjN>QAd2Q(47l)4tPm@hwsj%L6j-(6kts7A99^xxy3@IB zd1u%)iR%9q7`oDH%EQ`WP?~m#k-{7bFRKffhmQ$f<7|8+ALq|0}RbbXL?g z))-a~0oii--jja^REM6375ny*so~*UtJU{!bG-*n$%BIfZ%|dExn8xgqIDO?Lsy9r z&QlppLX5_K{@3$gKl@4%dcwS@cxT}aRy+P%x9E+)bWE0aDxrPQp3>7`nLbrvo6la` zToAe1gO5`)#yhICy_~=6*!d|zeE03?h&jlb*)xhn)0g9cg8cT9vyS}V#~#bgy6K1L z&HUMPe|@rdy6&A|T{c?Q+EHuak8sT!u3poB#5pLTVT(+?_bGRMJoRj5+!%7xh{!M-#`htckLRuE|qtUBv?XZ_Icn!_E8Tl7Qm*>P+V^zrwf zKqAk#9smzmf+ir+KYnAB5Lh2N=E3MoS^}3x>VwXP!UOzpa z&;r|HWM@gneU1hbEgSR>O)Je+@_mN;;rCNVhunj2<&k}jyEoP~mhge1>iCBps<6+f0bTc z_-gGCQHmwTuH2Org9K0dsE5CjO1diL zz~z@=<0NJ)8uzFEiGec&b-y~i{jAspXN8N>sZ4C_D_PcXQ^<68t8fGXn5Yw}-D%}V`+^XY3jA86Bqp9j zLZ(2zF3w{>iN^S@QVe(j6`8MVk|Vbk7#6qfCH1uePp zRAQA+tIM6KVZ3sx;$(KsKAoW7Szn?J%23jmRK{uKenUJrNVB+j=OUT?^^bz@TB$b- zKR)m2>+93_8b)=w^QJACNB2g2e7hFOqO+Go{Wl&E8EsOfj{m_k?0o;ABy|qdN{F-P zk@JE|DsJD1f>bU3i%?6L2DYQwrB(6lanL^=tSNw=3=OrT7IS;J$NbFGp1Q&@L33jz z3>d6V6skG)9PG~%sb`bygbgI8mOqEs*NMIND#jN95|L*rZ<5(w(}Ei8TM^cT?ec`y z)VMyzwMOcKOA3bj#!WpFGEfOLPRHVGDYZM3R~v47U~+ob=-l{kdPrbqY+V?XZbDK; zgbKdp#-*HxM3=O*2)^#TEqRUI>b3mZ8mpAByNyANd{_MxqX)CkF*1a}hCd?(ZDb8H zD$k8err>gNjmfbcllurEv-`X9Thn7wX9zA`hdH^19p!RYp2o_sY4`oiWoNCQ z;n+?IvtjKm+M`tUoZHOIUMD|Exh}(Fr0G)oqN*sw5CA}eK7tRxa{XRwMW%H4gM;b1GdAs)+{ z0sr}4=gwmepOsc$%Y!TzFywyA~X97RB)db8nQ=SG%4bDsSjzsq-@}-b^@&1a1sT71ys-#y+};I=-VE2bO_2T` zpJrb*)w3DPb)=gEx_NI_L03J9OM+pwx2tPRvhQT8m%tb4m8M^oWhCqF?@PoQ<9W=x zK4XfCo58K-tXdN3ac{l%CPyV9QcF-i7bU6R+_|iE^slXNjOKV2|6%;#f8cHYvpYwv zV`5|3hJYJw?x58Kd1vjK$}D2-YC`^B*NO+8{G1%Vc~Fv3U4DYGDLaQChTyd(Xsn>6 zMWhs6pW{}yaWFv&rS_g|cFji%nRg;5W7S!KXf<>Hb=H-0OFlof(nZgOQ)h>22(}v1 zb9#DuyAc9exYDHBa=ZrF@2uMT_dIjpZT~W@e<8%Bk${lRrEKr5*{Cp2dgmzL#Sg(= zd;9qVzkKoAJI-8JWv>Y?DH%MGVdxH_x9#uI`MywBDEoXghGBC}WuK8}Z@BN?ZT7ln z!cVn-jj@OcKg~V5;J(#tvHkRx-I5VUQ?zsW$e6^=fKjIV$6jrLsGdDv-fD_M=JV@P&RaX-IOH;^}l8AC6LlMKyek| zCS2=^Twp^7T?f+)Q~{10Wcv&*NnCkw`}Wdxs+Rg?*t#~@m>3@9@5Cfe?c(nmmirex zXr7m7axeIqq;vSlC{QH%!|%PlpdLOY98adM-gvFtQzB)1X~~@UZ!!8{Luc>uNxPM{ z;4?s*<|mI_T`sz;J0G34Jk!FdNcZk&{z$&>WY=9#0YV3X+}68pk6t>d7BCj(>HQkc;SrJ5m|qX+;pKD zbpgD)B6<#&c4ESI|E`5;KS>VV)I1wl0@8shmZ}8)vu(J!qhp{7pUdRiw~R@wgFtd^RgC>!H^14s7c-=gC{)e$L-bO8LESfkSFTVUK5QovsRJ7j=h3rUrnphfmLL@#!8^E!?` zdj5=y({`AHo?&U~^#|Kg<%{}O&!#VYXQ&;-|7qYVz>lDc8yN`hbL3k0PYL+3{5NdZ zV3B5=eJ$@|6!(rwU0%6-J9J?iA8!{&4|wJr{dtq0pZZ81>5%L4=Ci1ZNCr{P$Q;tPD}f3wwm(U}t>tx)cgm@#+^&$^H@_(6-pqIk(6#rIGub zPRgj5ytcO16yU!4pt)ntH)eZ`Dqn$LoIuN%p}Mbsvc>pfpb*8McFn)yx7-cgnCSG% zMng=RxT~aM@y}9OYZ**bYG>Y;ePRj3su2`(UbD!gn&^G%@ST4o%X)*MOn=G*4n@%M z^wZy~GL__jJ>M%mGBZbVItjCTxN&C;!3pXVU6SWFP4{msZdtzahcJJ~LV4uYsiWN| zUN+B|HObtU9d%*!!LK)z(LC&l6ikqN6^398%#=4E$x3MmL`iv|KuQ6jg=ym)8&nU9 z72odXuqNDxRu1W_B;114&t{k%molZnOwZf0SB`hM)z7Y5XRz-iJx1eX_M|L=n>rjm z>$NO`HZQvnX{W@o0R5rCrEfht#U^*31-9RTmtv(VEP|LAw%-iAr^bRstEcPg*RKjR zttoRu0-tYqx%a1M&Ay~jo0v_lsevN2Hy=Ds{rz4bOlf)B7R{S@U9WpbaRMqMmV|q( zfEGW+@&;3Ov7DKsXNSfQm=h&0U-s+-_;2q^p4K@E`@0ZoU-M2^&YkBaM;SH3y}^d^ zfaF_OD+B6`39E=8P#yuSL@CwU#P&u@>UIUUs;a7~8*Y*I0 z8xUGRK)L}f%Q0)~pcSW9*h>i36gpGkN8zpbmd=|`kf zrgw=g=*KNi4mKut-(n#1(`Sv+w7`6@tUfPzwuAbme$Fp+Fo3Jde$iJAL{1gJd|;DTfEg#>;JaSS?v7U9#mN41t0Sr zd=|nXNsuFKMBL^s8?qu}yT$UCF?XIJ{NwSVTFc@pE}d3vydR`em~74{B=w1hq9sN^-oA`@Z|d*LL|)S-fdLFL^!H_LCCAC;aZm)%Qlw$QH= zysg8HYYHO;i%i-jdXn4nC^SWEO?czv6DWH2?vv@PEX2sT%s5^-r>NsCnkhbsVyXN$y(8G2guY#-eHW;{*Y)^tAQUVLa=^5xz zX;tGmTh}{KHYXJvA5cM8@3;k9DB-?-{ptln8v?+-|8I znNG&+e55RfS#EgmH-4Ds4Yqah?*PB+s74Y_)Ku1ed_;q|?dwk(B~6FvY4Q6-KRk|w zrhkZj*j8+|2owvfUmt4bK05^GoN5+z*AS>J=Ucv)$lr@X2Tc+w8TiPal9PY_G4m=L zNAr}U=rKfvdPUZ7U?t`UQXZtJSU%vF&lA72Opw|dK$Q~s{`MXHv*TDAl>|;njf!7A z{v;;4X&bhL&!$ra#NH;Zi`_G{2K z3;jXX4=@%w>xs|A6Dn*ykA)uN`^g{Ov6Lz`D)!$pGZE#tKWT75+UpYidoMD zNQM;GeGDgtY!s?B-0;E$7A^ni0saE%7ywBK)-=X*>f&2{@k~QbAAWL$X$sCR!>POi zh2@edAC>|R{%uew+nTJ$tN;HKA&_^v_^_Txrfh%NxyX1G;}XfeyQieXEW{w z!qmlvkUN>#Lrr(Y-|A)!l*I|I@cnFjTe92v*IF>ry<&+4FRPPh({`wNN`{M$4iY41 zB=~1co;#|>X6E_>Y=4BtsFVG#HNB>r3N$0e=TkBn7qs7vdhXF*nc<026iZ0X4h3E`oTGIJD9)TOW&aMHB$}RrOE7TMLn!NV+Hq=ExGDckf=JBv@tP9Ir8$F zI_oJ21GOgVDotIv=ONs}R2QmE{9b%d=_%Ha_h&{?2ue|p99e{4 zUN-+fj?O)h>F@vJBMRNN+@&y7bEmmXa#teM+!x9vx4B<($t}6eU2->yn)`HNf5k|KX;s&i(Grg|?AOG&z%$UADk z3xx_J!3v)3jvcE(kh$ zZaf3RQ!LicNSGaoAq=)$N3QEs2c30Y5}G( z(z^H73229&1v=P>f}tXv%m&4fH(woYh)WL9-1yk2vC0!Z?CCCKUm@Y zpEdd4kNtwl)92p5^(rn<3Z6D|KBlSfI#@b=TnMrgfJ({f-`IfZwgh&m7*hipZCE~q z&=r@x+D{fUY3h{2gqkNGrEUcBrS_DC(+~ORN$2dCb5JS(iu4Y? zt0|_*<6wkW8gj{TB-kT3OhSCyw#+aQ&*?viU>d`ND2igwd6mciYfeeD*$v;rMcKu! zn-}DAS$Q;0^5}AfcI45-&y}Cyu=sfU?86y}7wn0VE&j{|!)n&3I~`$6_BBg#CBND2 zs>XO@(ect*ufkDt3-YC!3?NtgXp^;-(!bu_*_pIdn603Q8-f`42oMH+2yO)mKsfi#14M>q`TP~(m`SyEJc@;);Qf47PP zO@vQ@Cq2E|)m4G$C@;ePG@Z>#hg8Cs!r9%e%=$C-*$#g+Z8(qnP}+tGcZvxL7cGH` zBi6-0bfb6-GnYdm$pZdLx%@o*7EV7M?8TewG1IFA!=AxY+5w zrGMPbI#{AZ`{sWk7n^H>rV35UdKhIJ3*}oYG`<-@#=xkOk*Og~tP{^UhG`UVtlst< zdu02F+#VDe z#jB&M9-}g}s2B~Y@q9!l`l%xKB?zCQJ03Fx!Rfo(x4h}c3Ug8~GM+*yl+Kp~wq+}% zI2wGxJVpT>LMrOAC!H#oLMmuue5MV9B*E$MZD4W)HUPmm>~x^MYp;hg9i)YxE!)2S z@tY`w&oMH2C~|#|xACXZ^lE(p{$%uPu14C}8z8XSyawSbOE4U)h1?lYebpqXUiSLlW=){4nlSSW8tq-R1N6 z@&2-GTja)Q^Dqn6R$83QGJwl9WODR~DqTeQ(&VA5o3TUK@PG`43vSV(66Mpw^S@tgwiY%YUU@yhycuIX;D0xYoEe;lxc~ME9(``!ePhQh(Oy9$^0Q_41ZZO2k~Hfw-hR@3%B(!1{|sA zsH>%$($M#BL*+!_S52rmSH9P7_mIb7J#N7JAobOIzuqli#yi72p{{8{vZKl35$PNn zgkZib@o^;TosSNpMU!YDQ5b9j!WR)f9pssVPW4R{KY10F|6lAl=GD&#AsYjK89>Qz zQ-OTq1760dmhh)IK&uNae~s5>Gde#}dH~AF7kd$cWmt6)2ouQM+n;%JWZC}ja`?0n z6~rRo1`YrVezNu5BLOf|MoZTAB9oIk+wJ?4 ztLV=rLpOOqu1Nu%{?zVgd{;4ji|+N@!Kor?CTlxqDNx|1qbF6VeP>P<3%W@IL{2~9 z=RoIV{q%ouArVj+RFB1M(omo!0)_5my(c}8zI?p;#eDFE<5&)UZ6x`GBNXjUkEk85 zH@ceM*9h+g?>V5feZyO`>*N1;d(q;jUkG2{Rmt24g?q#N)okN#9f{=}o z5LQ`&yd2SfZO!!NY`jkLU0bu%9$-ZW)D^OT3o%fp{7Twzx?3~toRROjXDDY#OLJuF zxJUM&fTpy#k)w=w_mxh8lQPVRo6&mMwH)TZ}957vWRl*j?LVhfeX9XOODY+AO9gEP4uW z4>?afh^kJ6-O;z|E#H1?Q7nOa$mRHM{j|?Lf95N=8@d&#FrHDRl5$xN8AB-;d~bSH zrw=${s7T|k&3wN)vhV|uTtaDX+>$ui<5bA%)9pqs$Sb8A1TLMbSkb8Z$|lj_Ym=xl zo42#VM<|-!9I+_ z7`!7p&4&~bNwmk;1FW3#K}?DwEq-_#oRj8*K``G)J9~CRaC^Ts@DXrtw{A@Fmj$i( zTtHXA!9}{C($MByv~-(?U7p1yIf$Y#mY;+JHYO7xjniXTsvkbyn`+@Ne^aC_tt|_+7hT`P0)?sDpf05iFu(7pF&)C=v zpvkwmX|sulcs!yXEX$-0ng~thWbPS7QjxTL02LU-i49QI;!1Z{=&L|}K@9+Szwqg6 zLM{Hj`EUI59hm~+JfzvR{BG0o2OrhaSLvniSa{@(y4Hiy+6IFUo5!J@av-+BlXq&e z4QPm|0Hs~ir8p+&h<6lISSEAkE5($0LVI^1f3#TkN~;DCmzaS14}gQUsmMy{F7u?* z#(0u>?zxaM@|t5xqs0xe)$>M2z(gL0!?`08un22iq(`Vgnc;^KOgLxt#%qV1&jZQA zWz>!K{h&07f4}yN8lzm#@H6Y+XOTnS9Oz01rAhwPXpqRTzUUS$8Sv^ujFiqdmmrN` z{#Kp|`E#++2`UxZ{nI-vW8j**$`c`mO)+1b}nH&sI;xIW~ z&oK~i(W05m3?cK0oy%U@G^ktJS7+SD0dXveC{LLXXMMG_*VAKS0Kyqd-$$066^iW+ zae-=mw4%mbn#*+f>SU;$5&P<+f$8t2?xO)z)o+)m?k9SB+X8;CYo9el^hHn+&2pAo z#qu!&S;^v`&<4qz{)#c=I|v&CUy73F`FsdGa&zpcFXVDd@X8#fuoiRgS)Qa56C=6y zG$doY@u#ogTy2$9fakTLjTc8x$085tHyKX@JFHH+enaV^%RIQQ*o?hKemSEpugmnn zLWm8nIKV3B{`ri}pArZQY~CQQx*HPd))^_nDG)?##}8MB5!~HPtsBNaN2^>S(=~(|5moMJEOF zz_QMirB)nASf=`i zV8uUSP-JjqPoWqIy{hy&&zk}}PK818ppu&p5r}_={lcB^e};)EIF%iO_ji&WGa#!t z?v?V!_+DgnZSA6V@XC3-pav&4f^C|p0^ZaifrRQ@5o)= zqrHYB*d1lxwaBBhM_ZPYwnR0?znujw-ADVAmfr2|`TyfIyg@Mrox$X%0R34l#aA$< zgYRuz^jQ|+nEeC$+IFDo7%KBexTR8^n~!}g16m9vvVgYHyFPgkjMR-%G7`1HBBFqi zyXCQP$xLXTyQrT zgV-4Ln2Zg%d-U2>UT72$J?Jn@-L4P?=9)-Ze|fw z!=Vq2%x3UOq*SR}&r~i$|C8_-@is5KaUWS)CA1Qn)Ad8 z4o%agOw@a3{isst!ASa_F8(6ifN_V}*Tg0J*Sxc^Ps=pzOVTxoF(CTmwX8-_{Yn>K zj-avJ_9fkAT$g=N+ACZyI{xY8v`Vd%jOTHo;8bLP5>E@BK<}m!0_&NNwMDYn& zI@_~Tuu->+mGu#mGcV91Z86qO<(!FLlC*=9GTAL0!#3f5I2`0nTb<- zGV44VJSCY4JXqp}I*ZdYF!;Uk@nIKXlY1M)@e7=+0EVo8XvtLA-;KTkW}XhKmZ$5Q>M1S6ZZBMp}u1n zrnUJkO;>Fnr@NjDqch569NUyNXJ_eZ8Ax|Fme(^M16%;rLQp?$?;kvBFkM5E%U5%7 z(2^h-6hL8vs?(b|_t`PIhvE2nSL0~W@~A7VU7q4f8+jx9qHR;Tqp{v|KIAw|@7-QR z?ew&bjLT_J83H+1V0HM%LID1dcx70L+>qVwK7=9e7USuf!KRimqiKXu|KEIk*0#KIOZ-8H8wBSwCAPut`@bZR1 zMY1Cc3pP3E3|n8W`cBO%#@o=?S5S5g@b85#UQ{m}a>4dK^Q3Qqe}{)te5R5;$YU=b z-B4~msGn|$JURqSKahR6vO>dd4T3_Wo+Iyt~9)Sg}r6 zMN^h|#t+Lh4fg;Y17E&4^dbn5ZEa=!&z|px+EOnYGdFp zMCL=i8%dP|_vc9-KYMa9`m;)^5!Ll)#o3F2zlG7~xirg3p{v@i$s&Z)vO+A+{OcDT zV)7&{=R@)tAyZxr9gkrY#CdE?(xdH;kb~f4Qj82^04}glmi4=#ecuS7ce80uQ>-Mg zhC>7=mnH=thw7o#%j{wjxBN$?&iti3VY_-X9z-{Blhi-ODk*!=(|;p*ptH{u8w|q~ zp5|VmNmoE1u7kvvhSJ&&4pxxf*zXN$E~l+5&b)#-jQ|@TEjlXo-+SXfvd&jqay_N1 zB)Ogl^0PnX<8F+8hK@>-V>8RT_R6eJ+}>=s4NFIwAS@xG4p1qGXtA7Z(e~14Yn_oi znM?5<3)Dac=b8e8Yx_~8PXkKsoe%C$2~othYkS<+ce9piiW+oda9QI}CN>wE#mk@7 z^I1{7BoN#(9|EXlv?eqs+dW2M!(?gr!Te^y8#`-9*HqD@a?xZOyHl?scIkE9L0Q>l z>*FMOGA?dQIX)Z1VjMz20qrY-@EV8B&Ii_ORagZnl%dVW*x-k3&dp^S5Hsb6ZYkbg zOq~8ZIabi-xrbR&Ghs;-wG6zRKmX;P_kPL`?&irekiQ5NqdG9YiwV{`cBT59V*ss0 z9`J#>?b!06elnnLZeHG+_c^B7GmDt}H%fnVSfz0^bR-nHu?o>lIT5KGhA&$%o`B{< zK@~cy683NB?y7;~ZOW}a)>CMQ&yA_ ztN>@bzzs;@CgycvEqNF;G{||(RM_SgPzO%6K5m#^)^^38B>#dr`1O(G`1>_kp_Ou$ zJgyUz6GAOP6k!49BF9XOahZLKT}=*^N-YC)R5B8zy@Eu;0#7y(hfBO;PPhMn8;Pi87f}IYu*1RAs8lDCQm z&M{Pmd^i1ogaq26XtC{?KPb&?^AHa=e8{7xSy}Oh8gHAwx`-S!B^th~Ch=jPo*lp& z=Fg8?k0FV1BP!sF6!{2t+((O}OB=gC@q-ma8xXCX#yN=8zw3)a33@R-NRUYqg+uc& zyM(L5s*B;8$Ao`W>%ph9lF)(0J2ItNkki)hsv>$ zTv{U{`B!bpHiiIqN0%tH{ZRAjJWlFyCUy62*=@{1KHzvbxe(8+OrhMbU5_MveJX_y zT(;559yP9HJ(-Eup~eiiDCP(;jtPqh48FLbBi3oSR%)z0@iG03!y@y;gui($ZyB&Y zK22fWYT+xl>s){6@_m#OwzT+B_Jc~{gJ7Jv^Nkx<#;vYf2b_po3rXg*{`d5W_z*m+ zn9pN45Q^uD%7|qpTC>E$GYs+0Pn|sJ_2F|EiDOoGEDnCyZv9w)_YcqKh%duP;>y`G zRr$X(MJXGwh<)`gnrCM!C>0SqZebvql$kL)>891`Cqwcxzie)by*prC&6@JlX&ysV zgk;Ri4>~5Aa;M5G8u5C_O<6OvU2(k}Ye=JN*Gr()A+QVpsgx}*)2m+Kk;Kn~s`7lb zXjECeNzTj=2=t-<|&nGiy#l9-opF>TARtu_=FxCBATD0p)*>xS4h#zehsIedVWnTw{l87F|Bzd?|p zosSxplnXKp6Tf`_T^P85gz;m=$kv4^5D2@q7_vC?kJ+rocYvc;qFm!=fw&Vnv@`Bz zAj{0q2DIHrwe~5-kl6xNX8jfm7I{F_`otZ}3N&$N1=Vl1(K{2AVq5>h)JLl*fm zS+M_Ry0y7IGSI)4_X$2p-s066!#QLb-cQE*Q~D-(zHP~atz#DiN=64Ue-qm`YZUPC({sYozmUs+#w zOT7va^g2sv0upWY7qitgpX%^zlEpDn>$VX!ApDJeF2x!GO!xt6H1z1hS2WO2a59UK zQMN-m)(-KSug}?!ZPN06i<0HTr~BbS82!gFFEgUDd`?ZF=6IIQB|&bGdRU-O>OCh) zxzqI?wtxDs4;UOS>e7{__9Zr#Z(NcmS%|TTyk;R;n18@J`7a4%&x`-C?sHbCY{JQzWDvUT^ zoIlseEOTTh9Nl;>WBVd()QV=i{yw!%2K5HnSJUQ3FY~T~7$}QW(#AGpDJOoPE$=~3Yn%Syxi)O>8_Mqe8(ZU2jig>A+S`lj#tEeR0R5q!`&IvY0^|){mK7@spg`_(58AH+m?v_vnnI#<{C@Z?S+6;-!my= z`=-iziJk7xO;_(?^X|QHX#eCJ^e!zZZA)M%T@lpesFMR-nso|r#)YhiAm_`2BWe16nR};K(EPX z0F=q6_fj==5#uT8CA%eYnSQemA<~YBn{&+@-^8?v{zj%Los7HO53LGwC{vgXpdbR?$ z;uuitKoobx2NtvE+Yl`C)up5=uAJT~w^_net@Iu4wjKmUzF@t}WWxf1*JSnnx<#Ba z{d{3`)_Jh_?o75sxh7&k_vOXjrZglOF+QAb&*UruCq;bmP1NE^6^S;hAV5mwd0@H{ zauArsrT10#8pYOI@2{{Sw56r+l?`pAshqux7E$Kv=O79(>Q~Eln~1;4MQh>20Z3w) z`zFRiMOTdBaa!KeEB>lOLoU`}J_gmObX$jz!5Uwzz>v^kZ7j=~Z* z{Tc^0#LRC7e?7Sp+FE3M8S-o2&hqaSLB>L8>zN$M5qBbMHhyEatB32A7B#S7(2J@h z{XR3}+}-v`40iOUD8VUNo+eQouZK=;w6MSASX~<(+=lUoXPy0PWlu(W@Xy-?A~Q)X^` z!P<>R=99lKfNG zF%J33ZbU`eE(ZHKk2N2PJ9QN-h2>J49_S zt=Sa|wb#hO4Ir7E@JjSgygHT`gP=%t7Yx@IXU2weiJHs~40w()yIxHl;ASeUr5jBL z--lH(UD`AZ9f^3`S2+eCBg~?stop4#dEkc|7_v&J}-m{qEp;>26X_Aqv1Kz)aGG4R971M_Tf4XhLNJxI>mFmlHiV6%KieYSo zQshN~8ULJXLn1=`K#r>cB6R%9$5-;KzNyAS6S5;G4jhyHrET0pc4I4xMVX`O-Mr5z z?^Tk5l;V^Nbm+k;yBB4i8pmAKJ%CY~D}TSwjI-}WiAE{X1geJhh^!E{TmDUY-_qBo zm_Div;f9gY{9}^jfltu>7 z66A4pXr1-Q4MxM!oA!v2&7n?JLTq~R7W(C1XBJ(XFzdh*ThMw3 zEQZnooTqS2#`hj*S(LuwdoP?73p;i7>08CpkwnOGH-wmhl}+gBz*SzSXGnuv1i9mv z#@@dB$o)D>PR5RkcWAPUi>Q59vtC@l;P_8hB2Pj%X?wKtXBA<UM@#53umDS-;pz6>KbZlr)F~<-qC>3bZY8C z)I)*Gf#DZwESW?;x#iyxW^pN8Rt^G#;k2_Lxa#74((?3~vkBHF=&gFQ2!-14W|XGO zsluTgXv^2l{z@@mK(QIuH5s+bHlVe>_$u)-VOJu)EaiJ#hx`KDy-Q z@qK)-!1@O_Ce9BuHBf@X!Z z1S*c9237e)Dy4e1Z4F~6`_%P6f7g)4j-RcA{*3jHV)jRsy}ehx$^aj9#*vtn4f&;T z#`NKtDMp5R*v=h;ihjK%@3RdYv;~R&$8@CD>wjxqXLB~XRUHc-11>+?3?La(#_)y+ z@OJT}!}y~-k}C+t578g!s4D3f+v{9tfw-cs(yT#FxaAxw>+!`DPu2m$Q9o1o@EddiWJB3#%;T2D`zFW$jmQO+fq zdmf!De$9K-+DHP+93@Xq*TPIOLFy>{oo@NowI7n#JN6$ucFRK%#sxUF3pP#(i@R(F z2GS(zUghMaW<-69dD&Y_p%HkznPDP#2U($peFK7y=b)1bi-A^>lDRG8Ufe`5;(WD# zG_Mf>)#8%Z4utK9l*fxCFm+{d`FnJwjMBQ0iv+}zEu`e=R1jH0)PDg<<-+br7($;O| zcp$gA=4gu$7vCI5x)sIy4#)cUH7Z~?yuyW}%{Ve!=wADSv~%zD=`or6Zoky!%X0JY zC>2E1F1#7*S4jmz#L+5Ckp0ppKycEtLM`hP9SLE*+D>h}H7v7^E1!xPqD&DjmUjISTFJsOogT9C8h4$PbN zs9tDpTer{eSp;x-0W3o${NG=zlP2;d@=gOYEAv|`hdv)BR~l%I7_7PF?G7$SYoB9a zDK+`~jQ5m+Fo$u=KgcZGp=pq z-^s2}np`w1RZRs7yk#}Ni*a{{f?|990p1CnWMh`M=U(ciD>`13m=p+R@*gOhb_6%$~Agr+abTt;pFiMGm|yZ&P(+D2;8ag+Vr> zQMT2j@agjkLutT^B@Aj zQ2k+MEEMPS!7fFS4weDDj1WAT%k=g;&-_U*Wn+EW|`U_bY5yP^dCU^ zO7DF+vC8!9l%mLMe2U`ry9WG_4>XkV1>pb37*bfFehCsj{cg0y_k4{^i+d7CBY<-* zw|prtCM5wD(0bh6{=}^Ql8851yta2L2o_4Z?tQ;n$2L(XkKP>30HRUOSvIjd#lJ{t zClxOyKqX5)YuJrgR7H7PlWV%U0*0&6_Z99y6-@^Zoj&8Nqw>FW@Z-;* z(aZ>-53j$6P2OaJR{JAv2z7cjbI$|bL>JUlH>zt;gpi9aDzkP=A&g}DCxcz-lK=U* zuiK!Xr3v_}-_NfH$gW8a`Qk4@@6Qke1Fn7 zO}g-Fb^;SW)Jj4D`XzF^;As75NA|FPP81@C;7%e;Fcn(LV+mmthg-s)5OgD)4~P6BC26Ww;yJV>=md8KuF zhM$Jnb!^=?^EIekh+yn*fxs*^ED>Z;lFIAU^;UbY#j~Ax`P1f)pNgR1SCumd!)4Gh zd`^ftE2}i|qR>TJ;Bu2w!Zi*7eAj<^PI)ej@0*)O(}DHnG6ZM|+z&hOeL#}w}|7AjLpTxj~<8P zv7u`<4MDCXhW4Ywj*3lTA|<(8W44EI*ML_`dqgP53COHC}U zX@o=*SPv*-xGgl*2*no*Ng?ZL?sslg5zp^M@YS4&_vQ1z0%okAv#u1whV5`>eLX;R z%4WJHZ1;~S`^WORgW+x4Rh=u19-8tnl#3>uIUD`EJ#F@vEUH|$wJekdq>p4E5mdMDpA2JLMRI;Dvp!$ZzL+-OSL=fa@vWP21Vs4auj3 z4C-g1n;IMKvqP+V?~Vn|egL3NUBf=j+@_Jh!uPL7#xG10xWcZ09gXeH(%zM(n$`G; zd>@^U8CB2b?Y`U4{T*nHspXlSo#A8WmA}bc;a=~k_37hx_$!ioX-Mu$lBYJr>vn1w zTfJ`u$yjxK zp68@3tp)mpN}W9$xkIPy=O{OHG-k!#^G}spp8D!=id7qajbS@x{XTFys7s~oL#yKB z)`!yI`RrdbHLx|1_42;o8k*&xaxpDvTWHTq#pZtG)`PD4MonJJ+Pg+rmcFxq-@q#5P0qf2x>1Vr*`@Zh9Z8H z#V^rw8f5Q(>l9)_eDa(g_@0}y>tV6-GWv`mr4yX$jnkDGY4wN6OHJx8Ohi6GOAvO0 zs4)@92u2|hSrcgMkHCE*)@L%Uxkn1$(f+o)mD=moK*J!i3f_9unLO13Vx2f9-UVjn z>_FRjen_%WdXq7eAe^&8A9nmT`L|#|HfJ*+%F6uOB=fzV*zA^~rDqg1 zigDzbiQo&0zYbRgZpcZwQ&X^L5LFg97=O^Q`yamcZ2=dje{*y5y|LIl)3LnT%Z61( zj>*nNVlOGzmUv*krh9KkEDHUXBp6gwj9BLqCYK*-CiW@UL4e$G!rO3x&y+~0pAbk& z6dl8G1G6L$F~2Z@JU~e~NJVB-7g}nn8fC1jURPdv@ojm#*}j~3;r~6ULuH&OxF5mE z<&HFS?KeW7lt{Wig{Ah=1BFD(To+Cyw%UXfY=O38A=Wi--hk`^Er#M7rDLAdEEFe+ z+zx%fGcmY#xLgTL={PlSQ=LK-|o!DP3FqrQ~@y9fnqjTG9_hSunrJlDu zgs_(&ADxY2IdE5q+Sp-%xiOZd@b&x$sdn={iGiwcI<6s)SqnOm|x$s zCtVSoI5UHexJ!Pl!!!X~8%whhVe1dMP7~D5?-a)K6jS}ovG3(^e1-o1;qx@c6~53) zzE9+zguzv{wQmZ4-*lV~N_EO#c}AHc0V9-m#Lmy_!y$vxjF2LOJ_uA>lhvSi00s0| z8@qMYwUxT7wl7<1hzc8*%yr;D~IULvJ)vzHV ztR)i`87bt5p7Sd?Oqj&RdnIKX{UvqJ>DYK zACm9xS-K2~iV$p)$+Af`xygE31R5gt_ZK&NtfKv5l7l0IUKrLdoRIN-_EYfW zdD+rc)nk8?i?=~c+0Kqi^3x!%!CkdNaGJ*ypZkYTi+*$`)8HUmMH4dbELqnz^Pn~V zdd!VCk_0x;1^}ZG|5ZI*gTAuah0%WS!CL`6-VA2mpZMV9(hT`Q+zs4MTS`?qPH*L$ zr;X7kX9m9e8-rC+B->`lU0{PPirA+VTjd)!T)&>Zak(?*A~cwDH`;^sgYVU&3xN@w zMv;fh-lLc?wIp1$pW_rqtqMbPlxHp2cjc`GJ)~!Va_wenth9Q1#BI`~JC=y>+A=v` z8_fnWs3K&C*b07$3xJ_+PR$7YYvgVJhh2?&BBS8coN29;{nX?8g#)XHgalR1y52d?7M;$!ik0 zU(kD4u;L=iD?TY`?TPKD&gx*fKV_IY{fZyiwmk)IS;FeE{LuC zJ1FR-QYD}7C zF4ojwusv>m`o0bRy5c8nYh@RSFXmciDJP!&Mk{ToGOCXk6t{VXS!2e^Mgk$}hBd@!G?tKg&f%BM z*x7>)A&>sQDk@_ADr1fnAf~=d_Gy8m4D5wfDNZdfg*j z-+$?3spB!C+g6Xra*yQ6;2u{LWv)efO>jY4d0FJP z?@8X}?W^CB?%Q%07wt?bP@4D#uJs8Fsaywyag*KhA;_5Fph9M~xw41Wd&OsEa5&i1 z?JdXhitt}CED-UW6b{K`Qa*f1^s8gNla#x&e(?H?CxNfKN$5DYxtUjT6a{wmNLr(D zKWnwnj8(anRhZabK%A zJa31?EOfocAw0MQSDPR)-ShYBpRFilFU-Y4_!^%o9*a^HV!f`~p9Z^=QwwzvDoL}w zXTdFh9?As*#^>1;PPKOXE7!*PQ8<=wrd@%}m6D%)fWq81kW4<5=c224FGR)1lnyNf z>mBw4HS3z^2L_bKoCdzwDK~xFu~mbr${KF9>shkxO-GCU@f%m&mRfupek<~a{l;TS zTE|4;sy7Q@!9-e19Zgd?C}g#DTT3q+2n#DQAVcKcYSY*oyE1jSCf3F5|T@VORz+GSS2WcgoQuXFQr3;GNQuPwC4jCG@?2|nM_Y!hvhr+5GP zo8``T$JCX6FdLpQ=;P-G9-S7c4Yh`enm1<qTXPkuOW{aWa2 zgvMUM(u<1`e}?`ozPKDOA=atsR2aSN3j}9}LzRn>M?!}^3FYQ?v_Lx^)?eF}@dZm& zRg&7F;4#~)dXp0py0HL4>F$qWrlzMOgZGU;ET3)8{9)gIvdC<6S$SIRx(3~cj&U2v zrq@aJ?}i=EvM?MAETi7)xX6q*?s?Bsu*LSDnvr-82#*IeX|$#L?%vWHp_Nr&Fh*Jf z-&Fdi+Y#*=0~=I~P#Th-?Uv`5SQ)B%0eIY+B6b$Km>^>L~rk_tuM8=KAh;;}NILh;j@sLYZ|B2NEa zZO3ta-5cL!N$^^Kiju1BV)V8Mi}@8&9W7!ZXzGyAe)zUK!>+U9bPeY*ST|c#NqLC@ z9KPuKoP{2GTWfhnnY}*mVTS1JS-QpGZ)72z=fS0mnwrAxgOOi9o{G`?eVR#6B;e;T z3gRgR5mJV52x~&J-!gM3K1KRkhsWf86pwm|i}JENS({kk(cOM*K+!^QYc^4*>_7B<1L>3&E&9KW(o2WJHJe!=2VKP1MV5VcBq_m zTrhqylTwnzGx76$7q|G+0bkwRA$^P#sC z#(}RtvsIyLNS;MVdyG(HhzMk}x_46k%xpT^ec(eiaf>r*wJtpT@uGL&DKaUMm-;bKRW?oV>gkD@6 zYmurtuwk+F*BZt2zXzxuMp;=rAi5Uqw>7xHU?ydpu*to9Vh!~BDYI=rqs`z8Ppqit zakvt~3Rx}BbV}Kdq2TAmVYiKc zn!U^XxAwR0=O}r6(C@U5EiK41Xx+;tah%_}Dj+|5 zZ8Eb%P$vElw?u4gZM*q~r~zRSQz^6kIkLdeSV5Yc7A*-fHgo*D-(|17V)b)jJs7Ml z1)ID~U&iw};7T8wjn*uAR@BbhK){3}W&#Z3OMlf#89(Oa8sX`aCr>QyhMf_);+l9X z8V*;s11?Z`#%0E@up%Yvf^vzU`03WiZ%RPQ?jcCIF7yW_m7-kE8D`3qxzBmxdLq%>V~A{ztLi=`^>lU{JGL(dz0-TF)xg{y(=O+sAdOR|-52~l=j zH+y7frtDc{ucB-3nQ$rGYmba;UtIf!-|O@9XMYrV-}meFJkN6;M+0L7e}Z&FRIBdK zkmrv7PTThhxWCNu5ew1NSQ?m(tD{JHR~m78hevQ->y@Y%h@dPuYhyged10`NzpoD!s?jV{IeZu37E{EQMYf($1 zFNe-W!(Tqz5sfbXqfu6uRZxekF89jH;lp!YDqO!n436W6q_z4tyk(i|nHWZ7oEKQb}AyMsD7wys7 z-*#-4CN1P(Q(nZO_XH^;%Vs}N6!px`!w)sOKkuuM6*uThh9_6AH+0mtG&gT~EYz0* zY3+3xny1-!U*IbuMQPPNm3QvHQ$l1x8Dt2vrB9whQS~NuVaz( z-z~)1Bh7#$=mQwjx232_tVdM9pY!SS{qw@&^TmKAF72jQ^Tk&(Io<|8*7MrwEqTUW zDGV_1-Lpp=0jJNhxwsk+z23dDO>M=)je_`NRG&5W}S=(OQ?@N$pt@h|88)$Yh zUkOJ#EUm8cR9u7A3qv4ZlJpz~pdU!PzP@e-atC?G5SH+_Vr2jqanU>uO>nZ2~VTbG$Rboa- zS=J!vLcDbg-qZgzO%HwxCLHQ?VrdI^>@JJ>z4(~Tof#&x9+{v=-=@$}iWY4yH++a< zfQ{OulG0+LWJTy8{e3~|`Buw)zt%S!Y5<1&zoB^NZ#C{@KxS67YTx%lhFSS@<;U&q z?W|}Kc=y3ZeKUVl#9^D9%Yzh57IH0i@Ns(T6Tu~bT)|ee-Fl2+os8$GQbmZ|7kwKG z>-Evskj;I^t*3$HXK~uMal>~Pr&-B!ULgzv3_cfm)mI$eFrZiV{Trhdz3bBQ&(vGp zVPOAvKt?(^_A|reGM^GwH-27|eZ9)T#d~$2TJ@poHga}6)PYVqmxKMUfIwek|1hWi zt*=0iaCUxDu8B}0y+2zv9TPkgejCqbEcVxS_=_NA1g4TVH^KkM`F6-xX!4K{9Ypmv z7pBGZ9W@#eSdI~5{Pjo?ZZQ0oigIY#xOTHc*vbi^Y)EIJOHo;cXLKW$5yG?e_mhb#=7+#0^ms<#p1R6hGWiMuP|FY;+x8 zh6C+b6BPO^%I@k6hle1_H#u`;CE%2;fgS(&#_1Xa0$%Gavvee&@RZfm)I59ktY2Yp zSX@N#s%l|genN&vnRM0S3tLK=y-YfuW6>*q*roaTGs`r@S22)6q|IMxXm;r@7>*BZ zDwtnB8SP6BFKO}zV)}{ywj%e?s$?c16+B%2$6ahOUM-W;4-{(W>b`sYm?ZYQA8k#O z!^6XeclanfWV;g>zi@@&vwe4gR0hDym+;dcRH8%785>FG!P!T|Q{p)$z~85|^q%es zjM@Jk+gWJ9sl~tSf7a)@5cy7bG|C4v>FRK1FLNW#0$fsvenc1l{SK~v(i3RNpISQ@ zY~JXGWyF{D`V@CgsgR@bnwgD5Kv0*|ZX?e%nMZ#F8Ak;zf?Xx1I90nGJ>X!D>uSs) zTYi3VDCoPpTCgCRq?fXvxx(i%!bEj`>mVugjkS4Tx@<`*D_r?B|W0r=z&*y z;sAZSk;B_6*;nSGf6&vTqlOwcq6~|A-3<<&i7)Db;h^EvS?VCBP_#gJTpn0{YTI}C z?)>v4A7mDVfwq_k91i+DL0m+$*UOldkQsLdx7UJq?BAC7=haw$jAlD#GKOC#EH~1&a|GsHq7_ ztv-d>W=~inu^!8ewpVOqtDya~2h*F~?+_g;9(4JWQLVPp^#Xqx9`+4+u;5G`suy)> znf|lHo4f?D)CryTm)Yy66z;b=yMT*h;wP~N{_)Hez7Gl?1)pH=Pn=lymE8>UXAgf? z;XF9}W4UpbXdQiFhJEFw#Fpfs7gApXNOC!#(#FAo2WtLf9&BTSue65lJS^sJFIJ8S zWLGtyMpb$UOK9e4gyfiL2$?B9l@?I>o%j(bdKB&}WM+N(orW61Z1e1lA&i0jvtj(IxsOUS-#$EoWo*^#oct~*iB$;S2b_(+a>=+$?Jmsvx2FI!2Bc3ZDP|SOb%o&2+_>&Vvq%_I8m{vFk6*N+ zTW^cj8<;Jc&c1S-u>Tp@9N6E-kg$);xq{*-K4kE!^^~epvpMKZ<~Tnf=0&X1aGhI# z4#%3}UF~y^^J^g&GU^sV-N9tQg3G@NhUW8ADG`BEy&FNiB!QRfGq}d(M#?|3TjtH4 zzZs|?3W|!39vReh0asqWNFt|9rg2RxPtPu7sOt@^_KL0%J_w<3A*!RZwq$QPPM5hLE7& zyGh|7c5#0|xDZplXr{4QKXczD9*Hqx0WY|9AG=cSjv*f+%N}6K$34zv31PH~P#p3P z5VZ&(_+mrrkZzR7l2LI;FoV)DlTzK}rs9;}PdR+Gu@5Z1aL=!0mdjoU@uU3O+;ayz zr(CxbeIFqf*!3EmK$V$=MGs@c$hB>>VkSs!uy=??Bc3}*EYCZ0wi{Bn0+g|XSZ@LP z%cEw(+%GdM*g{sJB!^zKbyh2}hs|t+$-vr|8FJ(*;EpwexG(oBt!$+$!6lj|&?IX7 zbX;J9_3GRA_X*?#;4uvM=j^eWz7A96b)s^yTkc+=Qd2qV2w03e4YfEUy?qgG2??#r za5{A~Dr~!GJ&>1TUqY+nkeTnr&fWcb=&$G=2B){S>N^o*jYs!iHqc*Vlo9czaVj%M z-l#Qv+*vj-{Bk*OMh?zqB{>~5ekWtFKp4H4;mQ`%FrOi@7#Z=lA=7NiXw~Y2<*VP` zg5PB>J~Y(YUrM*Y)h${5jzUluOPf;_dI!iG&f@|OQjgyNg7_=)(UL1fv_bt;{Ww_b zvTYJcS>0aO%=9lyD{H6?Rnnx@(ss}K&?BdpeYF3xI9G=L8GkjOLDf2uQs+gePk?Y+a6W*Db#_iczPmbWM(uzA!@gXe+CQ=;|`7(VX^7R~DOe*sQ+s6niMP9M|->e4X((f(HU7#oEbh z3ViTxQRf&?LrUX;Ji(%s^}gq7rUm$6fsH zO{M|ksOJvr&+E%2RAWezr#KhCmgLDn*}b49N}jt;1_L* zN#y!B7tRpJP*6jxlT!BH*&R)n$&P8*dQ%(8ANE1;%Xc?GpDZU~FXae`mjppSOCNKc z*PIod540Q&e*D}^S@YNC>oOPFdZ0(?aPYk9yd>ZxU}tjnd_V+LU4cHM0XYUgz(kZu zSP5cKlyyN>J;21Ks;YzQ1fU%Ztm7>Lqz0={H{+Ag*G?4kggd$as_^$Ua<_@ntB;u6 z&wi>f82bdmWPp}>H469xZ^1ofPG}y+HD{ipZ7lk72F5M$` zRQC5tDk*W{(NavjZdflDD$oevvpcij!Dw9=3WI9EMj3N1Qw$%2Q=Lu30xZWPS7egx zESH7R;Uy*0&IR!G$`nG8l>1v5C*+7^=v<8V;XZ!WjG9?Iw!xC~u)($E{+PTFHL>v? z#*^@!w^&M5dg?v2i2+bdhsQssc%@yh(X)3D>Uz0gP{y!mhF2pr_Fmekz)$4cReqUQ z$$!Cw2JLDo(l<^j`@7pcLh`CJ*odmpzs*3mOjfeo(Fd!_?&+&nEp*GjR%@cb4fG9O)SFDTnJuC?R6`1Dy=q6?N728(*4<4nlHFuyJSJJmHYr zgvS1;Z$PAA4~{JFGqLBt&GdJe6x`(>#^~1qvjErvAKUUo7)&0GjqzhB_ClHp{oIRJ zvm0@nItu9)+N(y1M{rO)A3GXfeq|$*A7N&el4EdghrDox{QdwhOG7#%$n%jkxR2#TwJ zjf$Dj`?Ohne1w;7zL3&?r9b-c@6PMd4|}=A*I6>079obp)D9FZ^UKPON)eIpk5T{m zWUhn*KR}*=!^XzM;fxoT$k{Qz;)H33)>6m4Z=} z?#P-O0?Tu!lrR1!rms8H>V$048N4*cXFQ`3?JB(CZ>gC5^g^u#5*P%)Y&`9}hYdL1 zPaf%*P#V&1Rj9Qp^UEng8Fr z0c#Djr|+L-H^Re_8{MwTAVns;wA@8Ux+c#RM>=*~ccu}GEQ3GyFMQ_$tu4P-a#h#q ziSl9-<*`5GaH&PU9Su!x(6}m1y5w*9mGjl}e*s|;=ld;(T*tVY8Nohpg_{pS8`>A5 zel~q4Bg{d;bOHROZxD93@m9RP(M7~?l9#u?w=emC1HPum5XjS69#mrR%`IB&U!Rt76Pa<#Z|dOUx3F~+DCG! zigG+647GPaUaWXidwP49_7jH9Or@$60pRZvEQsL`1OFWWB-rBF8!YwCP`0Y{I_B^yM9`}9oQF@g_i`QlQ=Zh_EuNDvhFrq z1y0|gebtDs#mlbGG>&rvHeAS2=VvXmhfTcCCadr5&IELp*q2O=jkq9ZHxC-7Wv?p@ zyp+y2p@OiQ3?&CtO&D4sbO_@jJW{Kn-i>BjGF*23@&zMz~SzVQmp z*YVQ7isw*RtQYK|yVxCj1MYkAOUTT@s3t37rq+vt?Ikm{k-5$clB8Dt!X+N}BGSyE zpM||snH`hQFt|b|+qhl<|KEoTJYkS4sit;IU43k`$6MOmf1X+Vhn*&8XckF@tYmPV zxT7(^-Pm%vT2P@)7(Q%1G$l`C>uUSgrx&Q?l_8ey*x8w8&(o7n*=MQQ{-?bKlvd4` z4jJOG4!bp{%#Y!)Rpc6TTMGzWY58R_*jKpA}U{j~Omo{QzpHflf>jISU;<7HrjXJ}5 znVp?=Uu#wk)Ajm?Vj7FlmshrPC&O3-e=xJyK79OwMEBe|(YQPU{-G#^#Xwnk!^Blx z`aVYqRkayg3F=_-y@2D1C`?BO?jQr}y&yAI?#C=ipP&pgcti4my%q#FVxy$deV%UM zt+8Z}c$tET{*D1FI)}10dO~mw#;7d4FD}BH2tY)kEE>8B5!3_~(=%Qca+p^Vm%ITN*#&R-4O z>gwyydb5u%8J=#RsbVGgE+bw1T%-BX(`**L$6d)5Oga%jVkOltCX2Wz0s`c$@4=mr zA4+C7%AHl{=giDC%bqOuS_|j7Y9e1xoMX;ax3gyh?qT1rCZm-bz z|I=mp_o?MPE^Yx_R6yB!{p==q6WKe(T+`7sj_Y}aI#Pq&GFh%0@kXJni|3m)!>F$> zEW6A5?~f*fO4F1iyOl6)rQ=)sdAMp}pbEjSQubXmPNIQmVC$HpA|y}UgvSxMWkZ<$ ztL2s0pOp57UBOLnrbdwc&B^nc8?|6AzO>H!wGgyelsP#^QA zwq8T*BWlUP`9WG;(FJJt7mo!srolaUTW*gDcN zv$f3z<7?Kt8!qlT;;`m*%NySYLiP!?RE+7$cV_>G`S^1P*4H z2>;mOfy-uKSooJx_T&CtQ}eiU#psdaPa0IjO*Rtz&(5#^v zoMHiCRx$2vDa}iX=`q?}T=CVqD77zAcXA-u`@vLgy4KyBBljqW@^C>kOfJb6uAkjW zW>AG0Bp40^b!J_o64(8`*UP%yuht5acX4oK3VlcUK|=f%-z8|^giNSOLi%g#oq$&v zJ?R_>KYzui!(Qwsr_M6bxfDF>^$C9rs9^>+$Yk_D$ci-QjCdN}sN`^Yz@@5CpW61p zP3^kUGE8GB*FIoqEc}x|LEAQZpx>onMtd)-N`fK{8kFOQ2J6}npCU<2n0J=xpdTS~Ub%r0P5on=ruzaOv#?$BFN z$u;EDvCH=%#kA^KzKpGm;U5ASYg0H2HxS{jKW{Ej$Ce;@#aCbA{O{4;5mHUe9k3Deb$$`<{ju( zA2Ls{)*kKjRV&e+F9&QB{RRDLRU(9`-LQJC^ShL1GqZ;t-R%FxzkDA-CU3oK2kV$G z7R#$04K*b3`@1Cf>hN6pzM-yrU+)M`n7yI8GdkjtdAygLRwwrC0>ns500BJwJnYz9 zhAVIS{P5lFqdd^op@MWa@Lq6vzIe9va1sx4PW?c!&IV1kcsYV_{;9vr*YahSkV7&u zS>Djg*E=QIu4eLh4$6Pvr;U|M){~T()cbWhttxyE2BOVuBF+=sVS%A@y>(xRe$eI9 z;R5HDN%9Z$EVkA?MKXk~S=Z^M!vjxOD;dO-p=*mwXJqIZ;)%M?xS^+01 z%D-lk$!mEE-4qeRXzFAOwX*4*h@i`(yz;E=Uk1V*`7F@>Z`rB;EIKnNO;^7xy(a_> z5$iUZTy-}|>biNUp}<~9?iIuxL>Q}N!}~V@t%1=QEc#Vn#5aVKCrq$3uEm1O`>*JQ zrITYW8&_VpW8RKAW3+ak(H?%;*zA>xM~g`q=agaJBRR4BuZ2TRs%R|(LhIxn^FOk6 z##{^h-(ptQ&4+F63mhoyy_&U*$Dpu;b}hqssmyS)Bb3o<4e!WfZ$ByEER}5kAQJuV z;D<$!(j^knPg&Q+0H3M4t|z^wSJijmxf*%BKvxa&Kbe3JT*jV(y}Li@RT*oqGZtegT6gyPCphcT znG^;H`+IWw4g&`(a2?acL^XFzbQ$~QJeiwCp*B7Q=>MLFB<7?OR(E#Vas;YB5~G>J z?1$J){NKy`&(MhyvaF*$bM3jkC_~{#`IhqT6dg!rvT;6XRrcCCzVmj(>z!>sJkQ_C z@-iu1Eiq6rg_@JrXBtgHl}L4Z%5s*MSCE@4Fj?A!@P@~bBe2G|X!P7UI`UKmbxm2; zd#7V@3&LULL_ql%?H9%*X)AdF`AO5(|8O2F7l6jJ)Hk0T&+en}_-EK-8JC$x?|pC1 zi>6*i<))wCKH<4T$IH0nm4;gl30x3`v?~l2x6gRFg|C-@!ntGjdY3YseSvUHgX@Zq z_K2HY_MnCT@zO#^2YCFiTi3P($YwrO|C#siMhk)ApHIs!7vl%>f9Wg_brY{CTCvu; zZVqgrOc&s*w^JEmn!+<%+Y0>a^+yYqJXe z$39BrOs@gOTCc1vB2k+l0qPZ#MeiuYPyepI;o2DU5HsqPnt`g3kwMQ2zA-Z}Ez7a;HYZFCG0c!{jKQ2U z+Y;bnB{F&VV`ABc!jM%Lc8hX(f;Ju)OPl4*Hs9WNIF6z6Qp|q8eR7h&{-?5ZMf5WG zC$C+xFuid?0l_t1pAM`WAX!aJew2tB({k!pMP%gHxlV3+C?1Jz`qGzSKGMP8`Z3gT zTeZAX8hBG3<(X>gfMNCNR@_Z7+-LarPH7uVHOZR7)vZ^sc_Y_)+IGFDJ`lrrJ;4W3?*!z;&4w92 z_w-z(@_F9XXt-(Q9vcE{RURqB_w>m+Tm-JJwC|pXnzAo7Y_9@yi$NmB&ZOJ5vyThH z-m|l|X3rj$0VX!UsM3f9F*gM~E(6t986@^t&87Zq?PS6yz}FuLJ zKI9ytx9tIz=^Yer!l~G3pR^wFRojK-L*Ud{z|H$`(;QaT zw^%tY1GI6nS5|*K(N{j;cqzHB^0^w34A$SNlEC~OCE~-`W4ZDvLR@pnNVBL8@*?_w zER7@}3o!(cYGwjF0~7~xRMbJ442h(4PI?x8k1Fp{1mp_&^cx4`sxM%bJOBdW{>|b$ z*9;5?6=J_@@)Ia82OU0>dA3li$)EI*;ttJs=wZgQ`6SH?y4EZ7elD9q9_2}H@;BpG z+!ajn-w7Eof^Wu$%?j$=Qo#{B$oiuU!jJy^Mp65KE-j?as2l^cg)QOwe+T*AYPZY6 zOHEb`c}&-P4~qhuCU`mwJ%IyUqj$?9DmGdSw883$UmZ6#Ok%&x(T}|PTUnjITTe6k zl1PX=-Cgmuix5xZiGOW#oxi`gw2^4kZ+ImV_h!ClqV;7+RC+oG@w8H~aTINi;;(z2 zGB3Jk`r{@<*MUV9ifx#%F9I%lKv3GpuzbU%k(@!j;(Q*DC(#VswQ*otQrzoPaFt$5 zC@-S`1?4#k3_-4~STT*7NbKA3btt%jqbN+_I~D6?Qhy-<|TPBG#v zS7M2q3sA7YdJUINPDZF-YNduaV8e_kAUp{smwLTkDeuhg3Z3JdU%wT7D2y6=bU$R^ z-HnX12>Xb)-}V=HWH#lVH}r7_C4a7r3n9mRdS|L4`A)Pj@+x;--$#ZIh7<`hn4GHY zSNamN28?4F-4SXpCS40D-%Sd93gUBqtJ6=P!$Y5xV(%20w)trc8a0*n;4Y7+GpDL( zTH;~WPaeMCaR|5JgjA_6J^?lo?3Lj)imsC4NZPA3M3&E^_S>brZf(z)R@s-xIgzAf zw|ZSXA|W;0@|$_E@Sk?D!0`l%ia7lbOyx4#n6+>JfBd6*YNKhhQWdkl`}-g9!#j1j zzkDB1xA9oU?LTnp=;cd3j zi%CWFY^+kSuX1X3wkhDe%^$PAe$UNaL6Kl);A#BKInnfVdiDex@Nb`?=3(#0J~W+d zGM5^~eBP`2IgxrXmQ5})W6j4qY->4cAr^{U)M~_yYiyQ=EUcyhkiTjgcF;gR3V9W7 zgIljod~Qi;(Kj-;Lj;&8j^bH14op75qgDe$$1%Wnq-4CdfZI(A^0I)+QPm~eO}?cnpIGgkO4}J zGcOCbTg5tIe=o~|ZC(?K8a(JGzbgAZJly;SprRsulQk}y{J>F@533Gp>+e|*BVZEx z1(6Ytc6MhtsM>dNGD+6kMe7+J?Kve$CZXd$hk^IP*cYn)gTtc_NX{U1HhbB6Taf@} zXTIWUuuGZeI@WPV9>&Mq#`*L7Yg5__Kg$dt3?gq_c?1>bMOsR=79?0j@p1XiqC18v z9A*p#;gA>GPs)~-qVP#8H+Y2ZPnr^gkneO#6CBF5}2SFWd`ZvAI-J?5wyJLhUL~t=*##V zp^TV9#2_wE7i+x!G=|$Nz8Z8pl0UUVUf#|3xaYt=^r6~^49Z`RcGwC(f1ZkCwB8*BgYW)`e{aCTm|9mj(YnwydkccVxpA~} zf|w8}e93>-p-J&rWvbj+8R3X=vxe_3`a*d;$8+8+R@x!%y|5o0{TtTfs{Tb4zTVa- z&?!5hFQi71PrAm~WBIyFi{z-J`so141|rjr2s{7jA;v zKRzx38s8RqYqSK9#PhU{TAugb%?2?d0e}Xe4*7@|`0Kku)_wBAQqL&yMJdh%CA^5j zAI4HvepR-u3n{)$m?*vtt?2X4!XBj8CtR6M(e(24e7<};>^td$=lKC2LIJUn-3jo_3A+pk;;XL;+3CdMv5lNv+n33 z*XVHBrUy5)>s~VXvfmm2o3+lwpFUhxPs+ZBmSfCfneMyKHJLEUfPdnWd58!a+N&3c*B*v^O#;TS!A0MV zoV=QhL4sE*&6}3l^iQqsuD!Z}dL_kaO~bbJICQlLRE%5YVR2dFL=u`JRO;0v#vSR> zSm{qDA@G5#Y-cFVcEBK+(!gudg&qs*9laN)jotw?4`i(y92I-~W}LBRn< zUi~hftAOY{dw54dCe9b};=;X6otp9~6K&h?zSlU6`ad&33`Yu&`yC&?#%Wx&ba)a{ z{8NiQ!$8MYrwn2>{T_wY*MSvT4c?8no=>8SVy7T8oQ-UWoDbykL4HaL!JO=ce+%8) z)hnfr(S&zLcSJcc!*cHGs;RKHmOZ=uT~dPdq^eG%`rcp4S|<~+BiUn$57$ewS`kAp z?UGp_ZMwtujcoTyi=m4NBh^;s|GK?NZGRV>{?RNfQB6D@_z>O4V5p-nUt;p_hYeSx znEP1L0nY`~;rEe0jM57uc|TGAlpY(!3Cn#CEQA9?Sn;X0zTcSk-Xo^8{9b`q*g8yN z-TUIhnef{Fs?~b~k9az3mqAHXitUun^&7fC!EM+`+NJKMi2dZS(r0A z5>A?lz+t}yzPPr+EWs@l59UX&i$Mlh0XG3R(^W7!2B6Su&pwqo}B$P^dVi)Zh1SJbm!Q zKZW3xc85ml-A%Z;1(NrL@U=_tU!;L!$!u3co%LyXu~yfVN=$*JRKJ`g4TpStAGNJD zg`>Gt&ax%Li;MJ^1|n=o=@d(LHV+0$>cV6qespM<~j&?M7KZMOM2;M?sXcAVS{IsVvFpdnqszMct@xCh8_a51u^} z*q!gV$n{S)I@^Eue-1(+veDMtU#bC5L!(zN`LwoNF6y-g68ifITJ39f812-A=w5@X zprJ~1hb*l?Ar8m5v$_K_Xx<+$=u2psuNnQbWug!J8mK;8*>CX_m?>$hvIYs3?B*?s zKf2dVa+Wv*`$#*-fM4|gC~dScpqBuXS3e z&bH5yE4a9uJ@`-{$s1P>K#Y=u1Up3z32e?$gt}V3U46@ch_&TMvhGcF`WL`kh@^0l zi?P*fHcv8|7xrEQe+b{mtns;(_hWK1{}LYXMc;sz$zeDZ1VN;5WxoJJ;=8iO(8%v9 z=A^pJa7*Y6ej>WxCthN`vhDisDaskUkoT1)tC(}N&QmmlUfW;x%6q*68RfSAq`IW zuon_(Wo-RS)K0XLCZ)yS{A5@lRn7+7uS~_LCw^TGx=K;k#-Qj~HD}*XYo^JRIOn@$ zq(KvE7HLUk)VzWzIVuw@E{^C0|~^BsPgOT z{`}`g)iLB{f){WmQsy}7J83P~m(Tbpmtj&2NkfMn1BSh7Aq~P=+Jl0X9EXS>!X*)3 z*#TF^GC&z2K^7ZnTF>!F`KZJRep6P5yQIMuw4dKT5HDw@raGB5h3gh;Tv`BX;=&hM zg4KqK2E5$h&c)&@20?Klv1-0hu}7a8g}jUjXwHkvMjGW_hA^Tp)SdJq6I%sqy%Nc!$G75~_)-Be#Jh0DtRowCrmuoV6uJjiv zXkcttKT_SIRNjR;ldBQ+JS2oNe?Fu9*Fa@0ZtAmi@qI)zUINAeOolJmNCuSrNykg8nHT z0&Sxsjb{T$BI4_M{ajNft1h}2`Xc`>Pl)T3+~Xuu%M8`~#R3AiVlz z;?-ov9?NiE*s*q^GE6L1r{(?|`16y#;X5ZwiteYSG$V3s(St#Y?2M46V`@3PJq^*Y zt38nxN9@lWcx1)<(|09s$XCjN84s4;`-TC;s0s zUk}^rl&MQi?41X0(agp0ZBA7RI+!=nXkBUvP`L&4u>+eensrq&D*W>s!mB#Qr$s zuP)l_$UR|hN;!h)hlM##2&A@qEp?p2**AcCugwSX%$W0zj>@g@?Wlm=mNI*4rnAw2 zb&IRE#A!gmJF;XqxSfBrJdo}ZKp+7qeM7_SzumqHRPXX=5((H*rWspK#|aB(K;m1r zGhSxknmO+$9^D{MZ@DX=`J24=*Op2qzYsa1w6-xxjkaKZoe_WNTv1`AQL2qGLT$Rv zz+{y-^w_M&xZd7Q_A-a|>Oy#l@^QE(IJhf+`+4jRfMpfcS-lEu{S|%~6Pm?Lj{l(Z z!rgCCKR9|4ykR$cT)~p%Xt_#THKOlhvdw_kCi(1*tUFR%TSet%8;<>w@zH-U?Z+^3 zTR)5s^Sy`X{eCnqL`CJrP?Q)3LB6^aSnt?@=xsgrKW#oX3b`gLSo zq8_BJHXgyQADK7SA*c1J&EaKJEXAi7R0*k`J<`T|8;yKW2lNDZ0d2$d45z@x6(Njp zpVje18HUkX?2sCQkd&GfIsL;W_ARzG0)laq(2C;~k9vw=wi9d=zE^WW=|aG`RsZLa z3{t%6 zjWqwskF9gMP|w&3c|murN^$?=t!VYq%J+W^_W@cCu1juGi=Xr8X+MEh5c9ko{z8#gM*Jhdl=f}zmvA7 z$4xOxL)fhDfd4j8vqaNe^~v}3AFKW?GOBDOB3rHdLup;7|Gi`5+tv#;7?+I4W$~F! znaBvNcWL_gbk>MnyQD_V{E6YMh>L}AWd7>7dNfdMRi=z+u=lf7g60}4mu-Z~y1vfb zhqzye%Q+N=%aOYE9__y#;bwVZ#eDMMye^;}J4d!SmJ#&RFnrs9ytjYWfy}OFZx3%^ zAkNOFQVY;{pfLgayG91hAWIJ_PsK%mxJ3XQp2oW7Gpt7o2<0p&eJ9%>lU98dvI|67 z{K*4&;C8r$`!@(C)JMJ3O@SFM93|aV&+mG@- zu9J>{kV#hiwcw>ENg4Rt6iJS5|3u$ zeGrSN!#Miz)38)Jd>HVWtK6Hw8Bpq(VmzIxZe1?Up;PSYuYOng>H>T8UU`o4MBa}= z(W=O_I*Z1YNdxB68mycsh!;P3>#w%Rb5Awx;{h z6l6)u7M%(6!+$lNKLktLJiy_x@uZY62fPYcv-gMHJgUSc*|mPyc%v z*a5}nY#E$QR9T$LoXvDN>|qX0k6QFZBWs#mUu@-?BY?rD|6lLZS}i5TAKZTvnnAVD zg0qKWW)fQJX6O7H*^NUi8KkjqWNVX8n=x+{6kjbb?4;B)>nq&MCU_}sbWf(Ajxq3r z{Sjv_!f+0fo>?@VuJzR{)RI@TkBcVRYOp>k;UNX9(GdQ;axS&6nBqylkrdN12(@E= z9s(hH;Ad^b2v12~-I?DxoA;NAee`RL$ROfuA?S6wyA=+qwEd&#C$Lp&C7T}B^uPb~ z6xjI#sKWX7*+1V3FEwSOshE!S^pbMoGI|qNsLs~*|`~2SLPJyqq z9H~P&!j=y27-2fDJ_|CZtbUwWW*r@wbG8o`<}SZ&jX9)>QB(VQA5qb+$DzrjHnGM5 zznbu!oR|=zgZyu*b@l{Dv=pY{7WQGL}p?mp1Lp{-Lw;@hLU&W!GlA#}b zcgbg(cOfj-;#w+&*MX6;se9k$G&f*<{>|83p_9Stg0iT&mIhYPYV=qm2!o8xm~BiH9QQ zO(%=P+K1$hWDCFj?d@+Fu`%g)iJ4qsK=-BEp--rCj^+g2YYj%VuJdm4HwO3$N7~DZ zf*5A5vfYr|W!A6!@~UOl18J`X1p&PJtk|B0MF*$DL*R~R=3+!*uQB^>&v-2?3-y^e z_<4I%2F?MO;ls{EJrgR@tO~-EF|D;P$R|Y(y*jL|lv!{Lva|h;23M%&HOpegqu~x^ zKwsqAQXJ6>W$}MnJPS&r0mQ2ei$^1eg(MsUgT@$Qxre zNY$P1q}vy0#zVLKR>I9wG=;G~ys)t;BMqrx5oh{9EYm#)L&EeB16Z~7O?n`moQ!QH zxJ3)fZH(!?9=02eH@<++Z*972*jusU z+C-nhPgqTYmR27}{ToVa=MwjkA8hxo6&3>%a3ii8HZ&*~T2Z z>8UWIq>wpC8rlrlz3aa`S|YRfL5&4u`*kI?xowwi{a+uH!@LaTvhTn(#47z_+w%WbGUP3SV{dRU z&I~1&&1o6^U3>LJ^g_5$#m_Pl*8fCyOU1pok@&GNLXwp^zP&vtofI}%&N8J?5JUX_ zCFp|bdfd-l%5=W#h(zho%IiVpD>2V1UdO&zyFp7C`DghVP3+HjDxKv=A&Z=!L*^n> zG~V+;=BOpA7NvM?3D(={ix3K3trt0sskL&1P!39Y_eXRW=-%r!2y>s(W8N#s;U$S_CsF{PWNYz%1ArU&LjfxT;Sxcesgvb zRWnOIWBdGTtlR$y^2zCn{;TgJ5bhWXezq(n1=02W0jJqGxmP*=Q>#zl)GNt*$m3pD zN>TvWE69sv(EBC{;k;3{ZncItOdSQ%F6Dje!L{#x(l}?*1Y)?VL-##`x!dqT@e5fiUZzSKb%p2>4Od<^HK39q0 z@ubn>k%o~}mL?D(jUEx^)CSg(g7w0hGSqCq@q$UhrQLmu#oU1iel1?h)T~yXILJ$@ zKVvwLw>XIkm=`g9!0RTEb|Ojdw+4q*z5%`%#dDz+;vvCI1qyjh{+CvDc%?U<)G)o} zt4Jpk{6K>r`kES0ocb}+%FqaPM~=J7Z?FtlD)rtvOcK5>Y4j~myPzQUqMP%NrPF%c zgnbF+%#X*u|NHaJRtFIh`awWu^zTnOqAG2ms1}NU{O>Q5lHm3CQ1*dJvxnhl|M_Vghx2w04gQa%^Ny$T|KIpAlcb|4 zp>T{GStn$VI3%-}%9e5L6DK6=cYhy`pTGQx z<9^?-_v^Z@=M}H4mJqofNKt;+^9=kdr@r|nnh|6cck7i<;xxTJJe`3pHvuUMY4ao8 zin|NhSo6H}f!c1*Y2^+S;h_0AlA*7Mh0m6nr{a~>Aml0rT%7C^nODECdo|s02E43I zoNh99yxsplQJq`&Pz*yDfmn&EeDIczVI$`Uef;cc&)Wr-*>YiV6X|QBqPr=1Q zY*q3TwWuy6z$edb$y3cU{6^H-XQn(oasI}7t?t*xYi_MLz}2_PVkdegF91q9usu=3ZWdI6e2EfH&{oPj})MLhIeZ`hb+^+@(9&c3lU8)jic>_{E>M_Yu*K7SG zyTu_aLbQA9s`P&c83|l7a=%xwjpEcFm1kmCFvJNempHqUi%yS9@eRw6u1304+~6 zMM6F7h|MNx65e@2cUeRj7PnR5%JKx-n}PT}s=#jqhOoXvU}{dg$mbd?SHH`V8%rvG zzH9m}%OcUo^DEQ2fHXJ{9&eGpN%f=0%}#mJO2fk5kI}A)TK;izUVx>5t6$g^5 ztE)p9W~YG|qwZ&Ueqjc18;tXcSRm}C)95jxg0jnfc5?Bp%v%g< z8i{~MJr^5h`b^+>1hRNSUso|NzwZ_-gnM3v_Zh7p6w~DE2_oAw6zRk1WYIqJx37le zmW*%IS5+MGYQ$JHI6q}$(wE>H>U;Hy$F{-Sf%B2-JrO~P8Ox%b-kj1i#Z?C@^`zy% z>`?&4qF^jd!^_ef5%_oMTEuhL9mGB_Q?~@O!rEb|VwKLrilHCL-9Rvk_H|H>&(fm3 z+s@uzxenfkD$Q<+*VcX)1*}pee9B$1BRt`6f$BGjejrtVy^MTg4~l&KKJZ!1pSFBS zp6shzfRWw3@kp6t^F3g23Y!WzylVXEg?&U{*i`XH@<}?~?tFx#K|cUXoNM(c8HdG6 z=#koW2D1;~FhAa2)*D{qxq}M4ZShYYagdnaJc{-d0I~A3#%|Xnm_B{>lHpgbYdOZH zVXExbqmxvVh9dMfI{P+=Mn20fOP>4F1kr_sAJ&!X)^5wEXL$1ol#q>1`3@jz zimv*~f2HWcwc1zSP;zz39dx=GVMzcv-{@TiLgkOTiqyRK9iLI$L%v7v@>hQ?EUP!Q z$R{oTJ`Jr`I1DYw1iETm0E{`XXb8_OWjzH>hmgJf;wS&}vXKF@UN9h;-;nzs$Q4kG zzFi$xB)z%(Sw~3@c=R0#9{DvcSl@f_S0n5GxCC(|(UH{gP|6?O0cZ)(8YA$E` zjC`yo+A+mu`@ponD+0bwsXx3xZ1GX!#ImgxZqhk7ou{`aS=13Q*+O^T95H}0Z#3-f z;TLxL*t=WS>|Jrjsyt*CTdojek>Xck%kwiopS`XkU!`R6eXAHEWkjmJE^I`)tH8C>ELK%VmNjXsf4=J&1I$p0pwPNI{${mF3m z29BcS;uR6$c?H$$Z+SKx|Gv<%b6o3-Tzvl+e)HjOA+}Zbb=XqSf6oMxy?<)ub6a8y5*gc^k%v30(IU)J~NEi zHGSXeiDfeT3{o9`Fk^DX#xNPVCTZw8>~Pq{orwQiCM0e5S0|I_dzn0oW zilh~`yz*TBR(iBNzXn*9r$&MHk*-$^h*z`ik5ry;J_C!m&K=mLuvGN=cF;pBK{<$k zUjn=N{n_a^J^;gn=D$lBmge?zPU))fhu-DU=HB&ZeOj6WMw2F-w7=c&muY5HU@6zEiljE%lzxy(yXruuzJm#=RYpDg8k@K@w z0Wdl8h}81Z`O+opP``kS50R+u1B3tVU;(qS0$`WVm0F%vNQJ-mN&Y#ulkaeOt*wWB z zes4Xwx~j!i_0K}+@#>Vy>6hiXlhRfI&#s)&7ECIkK;tBQk-i0Bgm(E>T>Em{8oS*B z9hsSz+S7#jxnd-Z%?~MCH8H=Gbk-+PATS%r52<~_v2M#K-LeTs?2Oz=&R18B<;Tol ziDs-ZfPMP*>GE*HZo90HyHPqe9+p{NkC~5>fXLDY3?5{Hb008Vk#SFX(i3GCOPw~j zRy=9q;-&m|QFM`$<>1#&92!ll8k;hpK#mW)acnD>0p1GlXgj$G@)-042wNu~7tX2* zFM3I&3fj3n5X@JEARF1xzAoZ15k1(TPeEF8eafsDv(RP)!0ZM1LBdhV2)p6 z7q`Z9q1>(3Dz@?f^S3P76&f@$E(Ms;fQpMrbDBu%HxyjM@?L$F8QjHwoLmu2(f=QM z$F)LcYHHbC<$X!(wId*0ogv?5d3#d-!_g=H;wa4R*5WF1OE{7;6F@Ez2q7i^K7-jj z?0fE7f#do9Kpc0IblQ~|tv?)%R$r)dsI{ck#Y)#D0iF@4rm=LF)~Kt})U>d9AY0eB zqdXZNNMbBljR9x!SPRY@$l@3UYTx?To+>a^c+y;WG;h|Lv~Dy(=o4ddv8%FtAoqtC zwC<~n4cciH>#31;Yp$$~V>i^^YN;G{37Xf`yzc0`{{F@q@Y2HgB5&E%*Kk56lCPQV zz%7Vzm^7a(nUK2cR#WeCPO4-c4EIN8P~`PU%apoR0LhpxB_4!)WxTYs--|P$oY^fo zjwt~@nQIb(C&Tiji#b!`fc|> znB?Tl8aW*(x|Th0OYMHY&egh`o)!M^z`;O4)GY;CX_o8}yCKez7?r--G&A|@JU?kb z82V14`A+Es0+aLm4aI%tX;0H}VLY}L5Rbu(D=#IlI-^b;RO|`pWZ9I?n8U;9st_yqWjoJX4j$@cYY z1c0n;cvJ3f;hMt+!sEBS_GJcU)Y-YGme;T%-GBNG@=VKLFRHBno+>On@;#Y5Kd3OM z_V7rnAySta7^JT8v#QZP4e5eQ(Hl&VEcHt%vMGm*v2t0`)%QX5eHb958*mbyHs{{K z`S>O<=rCSi1_LDp0kFRDJpj@^fZaJfY3Ke&)wI{LhS*dTwLV`A@#r}Hb-dJl{O84@ zh^f!EDXq{7gAG*R9nes+(W2ZNzH`z}{#H&c^A3acblT2fc}HD-&uhibqf3>nRCmZ* ziM#A^TO@g_J`?d1lO?obUXhb)VNW;P^yY7hlrL{%!gSL8;u@vF8kXy~aa6&4B1V-} zxD}_FZy0qrz(5jCf|$EJ;{IqXAoWo|N5C_Z@l)~Sz=*=jwHinQwXfG}nDImObva|m zt*3M!sIF6w1%&s%ncBxMz~OWzy|X2t=G?8#uME!~oK={LtAtVLSr+hNjcc<_$=!=eaqOV_j0aySEo&13 zq-B#7iwnp{ae6F8(&03gv$G`TF-Ul3iUBR%-LAz)=|oF zHgPT_N{{h2^*^UKoY69Z|5`-0S`LwogyA-k%;{H}>Thg<_6;_^ZVBu%&~kpA2Eo-Nbw6m2fFw4M}sv1-qcL~u_5_stG~9(70TP!PVV=5 z>GxfZE&Uqzt{wV+>4#J;P2?-=B*N|wEVl5hx$rF6`oa&C3^XT!c|U!eK-`yd7g1Y@ zxAYwS`(j>>ZPk%jv8{A_b@Ywsu#wvDFc}`i@*0<1WAOmdjtEunZa-$ysTO>!eIK>xY+Xr#nf(!`ru ziN<^f`=~mhgrfqmJp+suR7f?|b{$+N@e-s5`zU$#mQ69{Z-Yw8tjhkqi^Yqxg>zT5 z5O)B*Q*S!^5A@(n1K|fUQcF-+L+1%h+=zwkDcsxc+&9nleR7QED;72c4^PDlS{}xm zMkS1=FdB2c`+XqHCTQmh)n$Ot$g`#s;w;gc({lj}}{!dNf2NzKRZ58s~*i{A%I*Z#JX z4L1=9DO_(Pzl=yQ!%F^Dn-{9++vM}RT>05IFxHmW!F8d>ZY(4`x>M@|>N*iS5G zrS|P1b;gjRr_imaOL8>Qo3ky-vMfAR>vcGna>$y zL`w?ePrbRv8EVX~CM8hzWum!$#oM`(v1o|N75aLn!I@eIv1;zB!rWc;xND{J!`&44dYBdx!oOAmns0ShCixvG&sth9%+BX3ElM!+#!-%a0|K%DfR(r~ zX+s3kIu{rgcE{cbIBaE;kW)9lw>W|Tn06J`%N{bjb2$3FMFhEUEzDZ`wfX=2T0sBA zG!Mcx8Ojm_QTfAL{O9zvt;*GvEv>uf0Ni&JzC$+l^-n0npbT^UBY4 zk-rPfG9BoOfR}<_*!wq<+bQoj8y%3q4Cv_OZgR0@^Kxqs4Q`|-!hW22fLK4sk_c49va2FvO_L_-jE;dNpjeym zCn9dhn~6B7w)7m)Zxxw#R-q-*)Be(ogsa(ua*oafo>xp?tG4>R+G-S#qHa;mTX8_% z^E@nK^TO>!Lh{ah${T?XV}i}kj34o6@Ygk=M73Uv{d+i(Tv^&>LO~DkXXhnGM@B!W zO(>$QqX`HW)yatj&X89#D(L8kswlO*WR!P>Tk-b$I>%`-&6btUpQUjOkXdt_jfARq zmI`Lb5-9LdEHw6&9HaS`?63xA;|iy3)SGbB_8-*{+oZ!g70LK8JcZN3mqcSmLdb)1 zq+f}*#NkWlfTI>z7XUiI6aWicKHNV%1aJ_?qm3Om><8p-*lIt?X#T98ipCkMQx8T( zCuFbU!N-VP41I^*uXzRU6{YxfMe$@;sM+LA-WTUZ+)<8?pG(d>76S;HD}K_UigAH} z?qn(>cgtJsuj9GnLmv*h?`=;FfO|uk`k9bJSiQs+M5_eJUE*8={*HwXH2zLKcd7wXYB4wW?!;g2y-N6L4WTZ=CiKX! zJGjh)aGUB295bM;Bm~>nvHX_lozT`hAC(hT7M%_Gwd4s(YL9ca%%%nl$y2!+%H@2d z5{?91MUO8%t%kFdyPNRyZQu{2&-)B6dP`H)M`0y9clG{^7oPw5dcK_n31g2tJFl71 zQy-<&*6&s)L4M#S{6z7?=QR?7I+xUf zHx8(mO*S>s5k;=}DqD-0!WTP%r87ZJGG_+n$0_6t?N!=W?gT~7=Z`!Bg0(l&9n1hX zZ^PN!oK}an_3J*!#B0yI@(ddC>vHK9SZCpN%TniGxX)KhQ&l2|0OKg|J8{NUy2MV@ z^DW8h@xp(|Yp4P&mzEwsGbN6RfM=B2vcrtpYLJU4Z*0Z`-na}fEOtHm7RUzjSd_$j zRW9UZ9|1>Xj{;^I7gzW;=cQYMU?2hplORw44+^Mk$eajP*T4KQUIn8iLl%N2?172G2SCj>|*yM<6YCg(rK?zsEOO{c0;E&^ThV zg`g68v~#p@>aAS(zpRjf!IolTLO{QGyZuZOEBz;%Pj+QIJVn#SlLlci)5Q5;8|tUd zE|8=&w(l)Ps^3^`pN{slvNL_toz0$TNzqM&FE~Jd<2Df?x1K)ZVTN%aoJX<`15hWX z)hxejTQ&8w#-C|?VU5ppD45D&$a!K+O#%;A@8DQtRkhIDi@uW5J~PsreMU3N%)%xg z|0u83_dcF9v7epLY1bFD|1GQZBYPt0+3k;4*r{qNb{cINa-Ao-OAB{I_Q=s;JEg9p-D@xXH+byfP_9gUC-M0CH#s>ohM9%EV zF>R=06F|Jv17vD|-RE%)h0g&}e@Em6>*^^#(IWX2dj^*84qozltzgRy(75dE9p=)l z)o#=^bkhQNi<8Y=&0G>-Y?<%v}G-t?mVhWhF{Z?n}@zD}Vj%`Y3BY z*z_6b3i~9}2>g9iaVu~zEp(Lif_UH!K;W{(qNdQk6|eP))xrmhsw@Vx++l{Vhd9U- zS8WCJjYON>N{W)Z7Q}I{@ZI(C)&TOpL<@(Z%zgHieDW@NVkH>gVB%KM`fvqcnjToC z_K(+AVm6tgdPI>x4a}XDt}!r+CNl>OruVN`&sx*NbBnphFgh4?lBk z@$w+tc{3zwv-^Sm;1*l~Jx z*6l8XBpsigpMaNxn?p~ARYEQ*thLFcaUV0&j_ystI2c@p#^WsBgQt_Jll8lKz(*Hn zq=w5(Wu>l7kzgkSwcxjO&c%etD7Cfh9CV~QI#%x!f;&z7mMRGH*)yf#h4BZ$n_Uuj z4)5qP$ zlFq-?+}xDbuif5mID8OgZDa=6@%>gt{MdQ?`TX?sNAOF#sWtx%$29Nxosv1Js4oZJ zcq?(7^u~2VF7NX}gK)?lfN{TYx@p>-MhZFyT%tgQ`u{))sQioVTM>tU|M)5+?SM2! z%5s{aB2eX7vb=X3j3>H8bsflmo$6hf2l_w%g*Ko5rn$#_6o8WPu#W#>KC}^9j@vES zTRQ(>!6ar~eFbb)Jp~*fc>}!H0#HDA9ZijcnI5h@P=p`yzdGdNO2^I2$N&$kV=^LU zd=OoS1~l=rI#vb> zA0<&xsoWq{z_b$pb#OG1mdVC!9t2l8Y~b_O(`&kimCrDVOod_Z^T#Oc7vMUuAM!mcTS4 zxI@TeK&jDnd-l@7{p-N%C0C$|*&5V|Z(!Q}JW8|g!9Zd$N8&0NOlyJ4uvEpx>? zBcB_9-qe4Il4{4(757F=~x|A*>nHNS9EN{Fet^6VJpj8X|4oD87VJvSjnE1#1M}pXg z%)Go)Yq8d?qm-7VS3JdKEkFOZZ9ImkF`fJNI)U(0#1<3|;vR!Av<`f=-FmTlWbRJ8wDzGyIM-W#I{^3aah<1KL{9LI^5`ZWS#8SC75C-w#rbod zrSJ~!epty$#~qNR(}ekzbfwA4qq}NeYB|L zz%f@r@nv*8)Qnz}X$%VK2wMH+b6fdYnXdtG)@v0~|4AKohZUhqxS9_#4XYgddTV56 zaFfNXp#3)CNz8j0XAXIC65*Rsp0vrL>cAJKNiI+vY|Ra0JLjk87{>z#r?tK0gMbA2 zrITQJQax2lM>XhROMWVJcABltHgdSdvg9PliW#*J*tQqWcNWxoZ8^Z{Py%Itr*R=X z`1BLzWG;O68m|Tju+<6|pAtRJh}M6vL@cFV{7t<$xHzp2XbPbmm@Z2wh6h)<0;K}D z7c9pB5yAVe^2zoNfDGFMMrH>y?ehQSBSKuq6w~I}mb64i5XG^PTVwic4gkzeIY1Y}PtG<%S>Ls`#*~I%0_Xyd8vmevCBZ=q zx&&o&6UPGo@q*u_))y;)THa<=9dY=wwN%Do03y4>x}~oyK*1T3C{lf2uDMjge0ZYHSrR&IFu=%yJi4pw1hm zqSC${c)C`Jv58J#*XmTbcHqxQH3(v8$=12NQ&M?PUE&7*9XE~X4}UL|i7Ncv?vvMW zTA>p6;6v4n9ot}Na)J2Iisv(b_h4{lDymOMi>dh>EDu6f>xjjfn=g`a6F~*oVp6 zf?_gopRPpe?szwUx=B+v2P|pUes$v5SRSp-2b1SIsZqAy`ntakVUR;J8z-Arb5nP-deJxbpjoS1e*5PrPJnd~3)_5LW>GbJs#(NHq%vE}Jb}p}aSirSx3yx8{f4BcMU%L_e6$?qxXb zH(@>2H<0^0msBz6Nz{L9G+unhv=7}}TEtWrabJU-nOQu0!Lni;VPjPT&aHgU)U>py zf8@ylkJmYv94xf7vqiOi}j7;iYO`@v#jDuRCmFfR9oui!2m9xbLDXB zU^-+$ueA}_s~zsn?fG)`KhpzJQ(0T)cU6=o$7H{mn;Z$!bOPtW(Z+cH|BODl{^WUl z$k{(>P$*?YhBWRBfb@Kg$^sW(>7NTqUSj_8{&^M!u;zsVo63`2Noj?GQEPy}_p%W0 ziY4H|KG0PZclh7Gdnn3keV+eOK!6;Hn461G zU&Bc+RfgjY85hDi#j<={!#X-b4glVz=Q@6rqA2cj3xM9|$VlsvIeXAttqJ(XX-mp= zFnHp%ViFpc;a~>(u1h-Jyj#AQAL64GO}DmN5(tndT0|&*CUt)AL#olh0%1et?CbgH zLQv?buYAD-K&Ab;6A)!)G$Ij~hG-Y?vlT5qG}msn5(9euEC7eJVtcT({#%k1An36{ zaoQfcfqp?#Qu^JpOmZhs#g&hh#`QjnoqL|Lf2pQgArLwq{v)qe7uAtzO{o;ItKJG5 ztD#h$DwT)xeO~+T^T`qBTS89sa;vuNRC}SoRv-)9wIr5l-bgFv5#NbmSYZ~PY`JMY z^-Q}e!;5NIHpQLU8QXH&ZD;_tK~8NQAS}+dI;7A3Q)&LEpQy^l$}+*kqN|W&xH1`p zwwQelm8E55u}SPVqDb7!f^lBYiK=r)%9U~cxb=+yk7U1$w&`X9AUt_I6#t=*0sg)> zAiRHQIhS6tHKK`PL&wjk3yB+0*H2pcOzqf`K+8NXQReh3 z&4;>&eSdCAIrfLeaF9^(zgZ|)l8)^vto)0gRXTM)dndS==&$^K_Ga2xrsn5NW1RTb zw7Jh1Av{8LmSLoQ=IuO{*8o-e#P<2FZR1Ri%jMNz{Na3N(B9g~NS@5tpFHc+{j;M5 z{Kff+$t~nm|248fYGJUeepgNMXmsqS8tm=({K}P+5j~NWFGz9wCmq%~8+t7+dJP0Q zIO_x04V>WENrS`|?m#))F+;XT)k*l*12rdlroQm2Pwagv@<_(&zq zhr^*FE?HZ8IGrml8w7hGFEbpP&kT>%C9{g^+P^84RTo(v`z(Gn84jt8{hk*6O&vD<5deqTHP0<wk zmJ8Ux0ry19j~PDN&}++cbHIz~>syBspn@rXT!LwMzvRdtZ&}>9**cRg6#3uxH0NA@ zxtAN>&G|ZRu~NH-1G6s2r-@&~59=1Rq2gEABYpAp4%z+zM**1YKIX1+*3}N8Cuet5 z%l)oK0clA)^3LDDeZP8tDv9p}2<<5Fr%bzP6mKc3#v?roQBoatYyULVJack9z``wK zFuoqt*#mVjjX_ptZSrp5-uYqucEXzS?xLNZVX;}$I?h`4v9Hl;47#+8PD4E~hd)8N zUA7SbZIM$jH;XR&92N05RgEbEn6wMOP91yW0~dGn)Z(~k+-|QIR5FZKBtu6iY`Q)P zCBm4M*o}LldTC1bkZ@qBrg zB_mUfS-|OP&!^NFyc2Wodha5Q{Bi|ZG%AZT=8JI{M>FZw7mam4uz3886tm6Y>8Viz zZL7%G<>r7rt6>(CZ105IEbyMNp6$<%BU`=HAk38*UQg|DMR9!NmV-M^fk=G6hDnGz zCz8n*OC1V1!my7^C71lIYYhnD@RFllfaTs|_VGJY&xF9N*akI-u9Ne|7rrh}g+EGP zpL}6IyRGVZku(44d5Mc=guwZeMOHP5p4H&@S#oh&GK*mz@3K9n!u+bE-C^!Nvvezo-9g)fUdxr6U^|5Zx$_avR$)LKnxZKRQ&C7aQ+T^J_HS93{3u5KaXxm&&+MbF?EV%)m@6_ zVT9Z`A^f;Vb|n z6LxVPmayTitgI5&Qcz&@&#N8Szg2BMF`p0TxdQb(bL&FBLl#*v0Gvi=?fMDgT!7{r|GdrFpRf#`1IRQvRv6o_XixQ{D%>w{4^dF4Ses2Cz|_^rf?> zAIxupD31)(C$BB2IO@YP5mN{%k@hRgvP6^pwD$Qhzl+~4W|`#n3uW57eNKFI+fDe-)w9B1VC zXv1F>*m1;Qu)`0&N`hiL$KAQr0o{Q$u<-EOwQG>tDxF(Y-Q@PLh2WFHDV5_LVB9C6 zPRIboD>S^_>edE^aadQ`I+-^(`$MVb!8|}Ug$Ae1028Z~IMgevod9H$K~+Q^n$N z03HDB%XQ>0HdM~J&xbGe$zC5wrT6-Sn~!7BbHreU(%;^A#qfW=flUkKYi-!>sGQEO z2jZ?~I#4VQg*st3tR6Al$7KdtfjjT8I!>E=V5dy*Q@$f%9RK<5Aj3q~`m6no-+~^E zz+Y$g*{)+RGlI|qtKNRU5O#u&k`_AUIBHe>!D}UoFN`&6%(V?~$9;L7;`ptVCt8b) zyh?bNY^4VJfLqJH+#ZzOu%RR=Nyn_(B$9|;$h}e{$6w5w zmB!%w!VpHA6_cKk=$%gW>FjTyNUcuCmUv03Q$PQo%sg>m8o*8x_eHRB{y8UQ%;k2e zGgq8BVAcXw&~Ilt9pZx{Mekd#4dRH=53bk)EF@U*g=JU%}xc1^AJM{oJ>7ON8-Lap)g3TJ}q< zT`B+N_JN%IbH%HvcS>P>k-8A2T*7oO4bQ{WHs7JX1kRDWX(g z#?Y%KOuN!OWp0%LY3P$dNubP74QCiP(bJR245!D$N}oq}J}Eg)s7< zDsW-riu0-DM1IL>0(r{uT9K2p0)}g-)|y8bgejxQiPrw-ne5$wFRX8_ z{$m}X#A_SbezL2OrG+yAE^|}=Ys0+=hydnr1(?G7HKH&mvsUYx<8}KV28K5tfymS$ zSc0OyebUFH2jT$05PkeNRQdf^-dJ3HZD#(Mtp15oX|{$y+_zv;;}WnTK`tcF5xS)) zED1p=0?GKzUhxdJ$7lx zD`Ah`idFIis;Z8NhT58_*dDW9ik{#RS>C;i&r+dzqL@nF?w43E~G_ zzR!=*tB-gjU4gE98U+QL3TC)0Q@3oA|C=qv7%JZLml5Yn7bn$sCwodW!o@hV6Fwvc zfGj<96Ewjf*DUN%n{ER5-*6c?lEzKI#bVKe8v&BIA??5bUzv`$SS(|3H#C0wjG2Iyt?<&l~A?p^y1PAc* zqwaDq1SOrJaCnV71};R^oR`kvw^YYrChM*HiWW4+5mYHl9mHCJuJpNNz}lB4B=k{h z!oPpu1iI3rTl~&RK8i8Nm*t)N7+t;Ta^>{gPz-fJ;geVR_^5Y?tY|0E2YG(!^T-W5 z(cUyfWG7d4c4R}KdGr8}%XPKUpyz89kNot~uT<@C{Mo!`4QOF|f7>#nL9n&flWupe zV2`i;_gI{91!9!Og|MksaFG^G9q%>$Kz4gwsNS->S@m#j)z(oE@KRtPKwoZ*^^ym| ztV|UO(A=neN}5hNgAkq%=r#{K{Mnjche}P<_SoEpyae@u4HKaBn)^RH6|KK!CH7aB zCEV5>|6y_Q=P$cGAhJdrY#N(QvD5hMm+wn3Ez8zZ=)ly5JVUKW?R1*PweyD2D8g^l zHEzH|1b*7Wt~w-F!Y=gVh2vsj77!o8;pSbOu_*9bkeGvX-PWqPg&vfY-{x`O_1Wv= zVBCzVVvp>~Ha9oCO+(v7zuf(2$Tqc#Tk-wVVwKZjTn6P>YDe(Wr7VQDXglHojpIJu z69%U>L*P&9#UOTVxSQV&zHN`spqQJq3Pd!Ob>*P&GLJJAtcT=^Tb;8I`=v^re;VmS zSvY@)x?_!aGzE|lx*o`!_uQM#xmWu_H-ayF$Go9iD1RpP0FxltF*@6Rna_MQ+xADhqF4H^vX*oZP ze1>3aou?gVThXi z+lFh1hI$WY;>Z(S$`%#sZ{CM=h?8>W1Vlf+y zh~G2gwKT&7xsSZ66@cq7+o`}}G#>40?OKjb;=%P3?MLI;(F5qTH1@01&s7;C;iHFt z=-H?S`$#(M{SJWX{C{6)n+}z2|79>AwK4#I9j+D8*}AT#C0CkFsIPQ&6YXm%ylcrC z91ySwX4&?}w?$l>A8$14E~iOSW6Yojy~@vAX`CDZ(Vn6jORpIVYq6X0KZ#G%fg)Fo zwYJO)3Jc*qRG*`skC?jxig*DjsNl{`0)h88@IJkUib)7PIuDU-$~Wkion4s25wrY~ zxhN8S8GKpLYrOP^K)@wwxy+Fg>W)nr&p=e+xKc^xc^tT1$op{l0+!COh2aO{S4M|* z;p%tce;yP9g&p&kql7xI8eamz;{1>*p{Je!M~k-vA}o=o60=$0sR#!hxMbkje*^|O(f8Pu%zTon%HeLZ1(RRqBi z7YdNipblm-mfkjM9H^!9^La|h`Q*Y>RCjzWVmZ=hVzhrCyV!Zt0p-+i~Z2^0whUzy`D zn>AOUA+;^~BJF~}SKzxiQ5!$22ySwf=B!mfwqH*2pso`v#EB+vFCTVsN_P`_h zc>vYF&i*_ltc7ZWcwCFGb1kn30WDXOfjDf?Fq-8U)^gSgVrTt0SyE(cmW42JJT&d= zU||OG#rL>_clLDaz;+DK;txfT=@5&|V%%Ne*gu#LHDrujmR;lJJ_K0aL_Yz$YeKwT z^Cz_gxCk<|PLo0Io;2kX6Xeaj@#JR&=$nh*PlJj{&ZZ1+8rmZEw;inqHIa#CVwyS)Sd;&HSl6>OUWgi$gIi>|V;GB_LDlQ$1nT-kJ zck<4HmHc%~n{4fqG}eCoNGEy^MNvC3MWL0>zc5YIizbS_^S&E5S8{Q+{5GhF3YymX zSA({mii(p{GcqNTg_jvBLGv+61CsPepE`4V{T0 z6enqhX8Hzh)iTzsqkc}ssYY}6J1zz7Ih=&E*f~n={(HLd_jDGqiT{VU%@u@b!7}`i zYd`-paB<7p0)P$-P?93kvQ#omQ1yQ=w7`Shw9`*FHpZpGAL5gFLqkJda7a=PI=fqT zCCCaSw8~}A_Y3wIa`-!?dk%<@!s;Rb?WeX8$;2n5CB&U9JD8t>tZvJO9G^gGk0IY6_9UxR^Em49m(`ZOtyZiK+fl^DZ7 zX#+~wyEgJE68Z|H^x1N#7t~O)_f8|&0J^;O3SBC<_NGmK^=j}|qJ#IRfn&jK2ka=L~98)-23It@< z@vY)Bq2vh{Llz01E@$?1=bWsqgCVM;6D7$;a~V-58|FX%|A6j~3=1c?v)Gj+3@}*e z-8Uy4`Sf8t=7W(^6DF$pm{jj=cS>k~gIm|w{RUDy_ zH0ptQ50atv<|(Z%(Bym64# znjgRmaKqlPONwGZ_PW#4ZypOWu&b>jPeT|j%985mpI~(;MD0daq)%qM!H;-o?0WsU zgBd~}(~|kiL)YeR>EPVH@UPw&uszy z@)1edp5^$U>-W@7R<0}S*}=Kj)z+}jD$L^gwH7yCMmF;ARUtUg{?f4VyFZ$}wC3Y3 zsqssH7lsh!9n?V^7@pBtIvSr7Z_`ItfqOzLV~#C+zx>vhwM#}1t|%+$KZ%v1wZBbO zBh>PiHsf0-EiO;0x`;`W%Hr)j+z`$u@yre6_*BC+D^7^_!*~C;^I{1y$AUsD*CHCO zJTn??6k0+gR9J#PU~yH(dd3dGS|Pv3m%SgXb_;J+oi)5V*9r*Dz)`;od;?`Z_dFCE zZpqhwDareZU5$*yhzxuG;I_hthXvf53UbF%#lnDHjsoLH;)9fKzzgNAkHt~;ng~-4 zIpR~sMgReHi)_!tLLi!B{y3POdfZeZ2?HOP#TF&>D(|6K>GEDMprlL^(mWgUy!FF< zs-X2?a|C2j&!k@R| zsZpvT->y>gH+6K7@r5amK?CaZzK3vFaVIZHF{Qgrq(DMqWd)^tBjBf_cDKYzKH7=B zR53+P&d3Qteo34+gE{ zC(W;>fv3hS-|ktTewn#Av<~$YDClvau61k)W;D zo6ukHp4o}@UixUA|7av~;Ic!b+`RuSs=ACf)lYRr`;(l+w6E{DqlAjJ$n|%A+YZ=| zK8YthwB^6ywLacg$y72PO(5NDe9Z<8(!)|5TpnXHs61!aGu7m9EZX>L0-LS@kXY zmq6xZyfow?4Ko!y^xcXpf56u!lM@Z~RVyRO8^+dbNz3f^LdC(^uhC`wAF8$r&jz_8 zj)txC3%K#Teo=z%HecCBYs8-mLH+<#EW7bYSX2LaT|!(Z{&3jOD$}^wu(SJ|=T|qF zpAqke+7COK3#h(pb+&!}QvL3<;EQ5*`z&Cr?;i&2Hb_zTjIOZ|$->yyIzv{cAKZsf zlKHR%Q@gIbN@1o0kcm9B9boujq3~Y+DB!3&O$8wIWk#0>Lm)xZz1cu>uD=EF&N*kl zlS&F|&uDq@9ry=)lXGY?+>#LeIF?DBSy%2J9Dp|GSNi~P*a|HQkh(ko$oPKAuw*SQ z{s)KiL_f2s$;{~XL+<|0B=ssRAQ83X7;B4;5V)2k^%Nk!;Rv@mp9Z_6+BrQ9<5skK z?G!?m)|xB)gc5PFY7o(V85Aqa6r<8f;F61F4OKPbmKPsQ_(shY>S1YzAjW0ky%Q zo-$0`Emc_K!uN-qN^C5pUiDMe#<)psbgQ$e%bv>ZOTOok1~Eo2M0+QN+>8H@qw|iZ zdjI42u}9(@sZPi^Nyv;c65^2Tj+J8*#j$5b93>9PDA}Ww-9h%~7@1|2ajYE4cFc}F zf1mrifA_eLhr{=KKA-pd^?E+(Anpp0{O&4NIq9|?P1koGx4t@-i4}HBUNO1;bij1#G*mRqkoW9go~Lrg^XJTvraSXTyY*;~f2(@CeoT8_XZ#AAa*OahER-0< z_#af?#C&~NDrnOK&QMiF8mo zMBOD?WYv9B8sn=!RiKOtMP;{#f2noC6BxT7!v=TC!yq(iX$h%mR1@x$w zVKsCO)IwNl8cfZ8{hA*Typ0e)v$va=U&rWrGY5T|5KrCAKAAd^kY7OkC>P}qA1R%x zBmxb#S3Q`il#%+Aiwlhn{eo-h5RzrY1`zahb{z!!$4LHremByK{1-%SBo72CX95{| z0GysSCaWO4HeC>*z!Nw7$nC{($iZ^Ag#RHhI~5Y&poAP&fq9nbyF_4X-P`cU{`WIC zyYDgW#_wrCv6pRaSr3-}nfl*;>X5lm?0QI}_MYhQFC8z808w`$*lcdBO@?sh6HScg zxLofdv*S8D^A{!`QV31xUb8ljcoY-?7?3H^?!`b_j@s#Im)< zvtP+6D15@xnF!#|C)m`~?3)8Yu40m>znV^KY<99wR-0yEu_sGA5!_~we;#n{69Fi3eWeSqO!vkp^@t=`n zZ7rKx9RViL3&YL7&67zq4B2L>0Y9aSG@USnca`Gh)n@9CGmU*~(o?`EEfBfzJm4<^ z8)rq4g#4KSqr}d*!^sad7t{=}~q}i`)%HKkxH?_X4x>+Ch?YKJ|ozEWe|B*;i<(13|Z2#Yl~j-rz$*VUwHr zs${o3+stBS_WCsrmN)Q}1+XI~J*S$)^ZooRlEz{g0O( zvKu$My>G65Mu^gM(nTgnLT{*E8`Ok2cKvWHgk z?P{i%EX^+3&2EU+-rt*^nF}1#Z2$p&k%ieYD%r5oI^K`F5jdUyp1Vj;nzJC+Pw{4? ze&_#`sF(i{$J+Ymqx%hN?97r3iyDq!D(KnG-q@8|tH%(&9A7;C9bly{`oRa}Sn%f| za!Sg(laCyW*i#q*VqnuNgq6yioLfnDuo^iZ7HBHqD#OVCT2ULAh~A6%b)w_^6>{PTVjtlzo?6)X!ug?ck`?Mb`$O;XR%RrIOT%w~d^?s*9l1pb1uJEqvroEZxAKM*W< z!~Nrvy*V!UfwWdW%i*-XoFXUbi&5)y98DiWVZi#*S1i2t_6n?^T1zv*tfl!WzBpfk ze!hDft^KTJ!+$9ki{rD=a^k*Mf42_Vwy2)_+VNJuG&|x;YMnwt@V!}%Q&eWjLY}1| z$m)N;IEae&Ua|9#c6Y9-{_S^o2j-g)V8;EBcwd!Kejx+3j!!E8jJcD2`cMB)_UR9| z?x!+z6FSXnr&P(Su6Lhv30&d)ZpO73_q1+U24Za6-VIo!4oT@%DYU`E?=}YCJHc1gHmKU&| zjIf@;%9iTO6>d8gp;punyuZJU=CjjzQ16pK6ah)hjJ{(d=XWJi2M~@R%mnI10C&D; zhMjwCl7N?*=Z3#hUYi@tlp-htAeY}w`Ezhkw+9Mb8`1KB&H#uPlV|AN{~ga~8o2h3 zq~NQnz#PL!ECQ-3IqYCbWfM9ojVg2vQBqdE9H-w;*$2{u#y$Gr`UcN916ic9!c&^F zZIIwFwMwZERA04@>!(kA^LOk`_lSo3Ji}HUe)B9^qgj=`s1poL>M4~DlG}?}P0yYO z6^rcQxZIcL`0FQQ!`-EU;jWmWpTUm{aLBeW6*$sZl(FCqMEC|D>^%$)Z54Fz%6^q& zdo$%M$A@%ZT6)J@M$)SUrl0g>Qm}qU-+pp=CF$1X*IMVy%;ScZT0>7m??tPDFnHl^ z1cbkBq!a`?r}PH}5cz|`9gBBcXXKu3zbysEA4ThUCg`2L*eLn76Zmx4udSNs~BWD!u!>*OK?f7zwI1ltb&vR7Y03-2kx3NUPu zyV)corWj3%VuPu`r>CKz4D@)WbDJj~O4)bj7^0#yKjF!NT<_k;;lK9q+49_goo;N- zgpAb)4p#QJE;2>x76ACbE@WKa9jFupu&|6;R}zqvy0`|STO#x z<8$!lJWQ(Nlh@Au8Od0XU?37e+1{^!F0BjVU6qXi3C)2t0nTEf-i zikgP?E{>0l-R{6u>9n&gF?|5@0{e4)V+Y6Vw4n%Ze)d9i;X>`>? z@}>aP7{>n>{>JHM8T2oanO%ONCaE5MSJF;H+LhCePk@U5chBKn_2d9`K7<>-MjUUl zbe|7&;!E$M4l+vwLe5#X;qaELqUTOy3h#iuSSO?q? zQbF1O?R*9KLd+olbNfg8FimxPocv?=2kI|zb~;_ZwX#d+d=vqEkuq1lr*WBEi(ssw zZ)GI;eB$%^Ek*&ANf+qEzjHN}7*$%s z4Y`tL4pnXx9F-Pf%k3TgYg4h&XkE$b5rXY_DbF%v^_Mz&RSOAz`TlRQ)It%_F+*xB zZk8xM8G`Hdt-An+{Bf8%VcPNOIhZ=~rW76YrhxG~5|rxu{ob=@o4>ufSC+W_cxd|I z>5WT&Jx29;L!bt|GqqEzVD3FiPu%v9t0YD}F(Xd81ybf+dFU0|n`Kt=$a6WDRUgsd zgaf&=H8YdRi)GHV^FN)_N7{S9c*T&Ilf$bVq9cs>^HmgaCl?71yeR4OR0a-q3@9SX zlvXiQ&pf_Rfi#Hxl!(Git^3~&2Q+EBrYT_aNFG413f2$h5~&#nW%gpv!oZhq1&HMV zhlrvQjZKUc?G9~ue*VH>fUSOs6a;(hhL|PKjhllNLk>2WP93QwwX)M-s|ixH2Z|CD zU-7DX;(g;DBjmHX5@!T3U0<@lvsmoE^vrB%RT^>ZRY$$2sRmi`367XR5p_Qn9klv4 z9W1#Yuk4;2SD@}!zljQ#HhlW<(f)34b&wPrm6#@dUm5{t!DMFTR41BF5(s| zs5F}3Mw6v0heLFwR{aL$&guj=W?+P0H!xB)yZx5(!oQ9`_At=@0yVBe{qXbSDiFq@ zWq+}Mw7;*BgKcYr=^PFI3c}-~nXgd&jjyQ5(gb}UnOIAKdh6^=Dy$m@O5+`{{px$CsrjGQYg70- z8Q$>GvsTVFYg1d&=0;~*+?MY){RoG%R1&uXl}1dcMCn;=`KVbl3^l$9enU#oUa+1> zk549!|FAIm&vr)sS!FJ*LNVBe1D9CVpypY*qs1d%tNh(s2-IFca5CpMQ+&M^SSYwl zbZ1aB`8D5GC8Fk!-AbLSb_D(Et)TyTRr0<&Y~c)?w>k7=JPy2km-t)_gkTbHS}r*x zcjx+#t?tM_{xv^F3&GleNm*()n#!fIFVMR#AJ0v%*Aqq)3AZO-?nzO@+^|k6A9Ap| zI8s6sL~s?gIfTW!P*YQ8vFU*uxfKZHRRP&^Oa12J(odrL!^W$G_$i|sR%M&s0rE{JUP{&_rig=; zivC8;rDL}B(vdoI-*b=2gsS;2CFv-B=k+kTmrz_7pNFPA|A%O>7c|ruvh6Bi#^7O~ z{>s)+4GKzYWkK5;KLg$IK*#gnJhr=BQCa!i17U}n^tCnChDmTyJK1OjCX#Ec)6*WZ zF*H)kjtgL<214>wpeFgX#Me?=OISk?F{PSV&RK9Sf9Th5XS}Rgkl7h2sf40ow24>{ zL}jb_;kz|XobO9$pqB80t$=DXWnYnHaQZ~N{i%s-)yY`)(J$Vkxh4hcQqx_zbbb{^ z#s1tRuhI2miFe*S`AiTwNvbq7u7$-edVWly#QG|8nDH3zQ%7p7uUN>d%aQyMqw0Kr z38BE;a6`&)(ci1EsGM%>~hDgJ->hB?xKv3N+M?jTYJ#Pl19 z=%9|e&02M0k(!SW0{ zJ4!1`EuN%ZESN*hPmAn%!T-&zhO1LJS3ty3l7%Kk45ZoD|JGf5n0D)d&0`4u_AUf4e8xBz|O_j-9u0N&&s!RM360<&E|&>8Eq6Y z_3&h0tG@iVMWFFGkh@)!lkXQ8eBh(NWB(($8>~K3sM)uOS(t$1gYDL0Q>k22wq4NX z!XPcW75cR?lPt~?F+J(nB*tDw{u|OT2Id~{U`?#=>e&=m;T`VF!=@F1lD4yaCov-9 z9N*cQjpboAtxM3*gTblMRllicyOa6)*(D`E zRDVFXw`StqxU*Ptw|cuB3*{>mKT|-i2aR57%8HhcNVwe1h`@E?t++&qxZ|jz+)9!f z1ip24?P`IjxnfKTI+d+83tr4O6E?Zvv z=r5m(5v(&DDZ2RWHLc=wzrtivqth7^-v;;+^a07*H7_ncwu^U zyiDK{a*vR{MH4fcJ7Ut{BN!_c0mnVz?woX+LQh>_cNEWHUqVb)v#COF%AUSc4O{f^ z_u#g~aT_vAK~#!;o0P%F?qz7f&|Et)=;x^}3lr-GZYgxMD@ph^_u5WFVPbucDN?`= z=3tU9NYARLe(_WW#%}mGyxFZw8yKcXP)QELxua*dm~C1m*LFd|!lDm9%+D6OE3tZN zeLDCy`*xnjEW1^H(b{_7Kl7h@`8psC>oG^=AV#9%tf-Rl}(H{ez<|Bj;Db%8I|&cY`2L z1A`SZ`%b=p1t-derdQzu*Vpn7jJoUf8ZNK-VWD!^VQ$KU?Wc-?%@eJy&+yy(dlr~y zN?9p;;Olekq)-00u)68*XD9oQfoK@Op2jPZkFBxf4*nh&f(H< zF@s~;0!H{cgnGHY#PKq00dT{C@+sY~&sGFr1^(V;8OWnf^Zh(l25dF5S=l_|oOZ@E>(+iC{tJxb*!$$*oqu)av z$@lv1u+Gute=*z?5j9#aM&5T=td$2!-1p)by1F+m#(aH_`tn}fEeovqlinT79*W?$ znx_52O?r8?M$$bi!ZlRi#)fbP@+L*y(4JPp=8r|JYf-@?#QPjVR3#CK*IwX})3V7E zXD*6?Qy<>pv1fcj)$?;CB>KIv(T8*NY6b?>H^w8DYYrUJTX|z(i7vZ)dw{s=$!YW` zwclY5+y|;n1CZ7uVmahiRA60R3$0s4Z77dWPm*dKdnZ1YY0e-rF>&sSGFig zHZ3LnUR|e-9RE>YqN6xF4d|^!oT@^(J>AaEZh6!dinAG!Xl>zFtuE;G)nK=(7&S*` z$~GcBj)dF>XjcGG0cIi+a-MA5OLPFtzr*z=ZU=n#aMDedbp{y*vFWO- z(#n;jY(CBCkfJ?UE-sx9cow460f&L!t)kK$_JK9W8|F#vbJ|k*i3llbDD@g!&^0G{ zl%)|s^N?lTcMdZrTBjqn9Ogmj&8~~GbfkX*)X1$(N2a9EQ!6w@a&%^MQFj1@F3po) zwP(6;*)#W48N~_9VjI*{R3rE-k%7XNs2#(&sL|xeu1rJ%BbfSkisOb^btWmBj>Y@5slIV5Kc=tPbU0_&TxP>tcDqICY`8Xx0BV zqTZ?$d2@k3cX8oeYD?MicEc7a&D8EZ4b~QZMA*5T8yBc(cDv)J`Sq%LZYHL9813mA z@7DwO(>ZA@LIfiDX0L?GK;6R}p5m_Kd9DnWGJSUXrAi3mi}jm!sMM!N^ft_0O03&l z12Q`N^tA9>;~(7nr541hh4Wb;HLFoo)v1QTlDHb)K290Kkr#Y+jI-jxI}dps-Cw%r z>U}PZ#BPy{s~M>)bZry8{pfUYlMEMlT;v_c4%ch(T;ppAuDZMdc+J z@|~9jB7Jrbq;vSR>_q4hIT|NzT;=O2)%P>m($)A_1t;&>h%5cp2(3vDnh2%t!k?-l zx8I$I^G9C_^m*GDb68_VcKESKIP*I^YvWBrIg<+a0^P*7W-dMtsLWSJ6&J2d_Xj_5 zpXtMfp;hibLy}VpA(HGqhM|?yFAd5$&lNa_JBuxNJ7VUI#?O~ z;t=+FXQh4q-C@IR*#186@zHAWAJm{A zo{(^-nk%tj2D`NHWSJakTDlg|`s&=~@!>9jTJp^|5b{Hf5q&^kT?=qySC`uMPL2;} z20>?Jy7uMh76~BpLTfMnA{~N;^c`;2kXtt>@f4Z0o-U(?F%WI#;o%9{@3ubq&U+T zZHE$kyfeWI0<_5iX5f?%jcbj!XX#n-w3ffc%!Ao|*}hRmacr8+zfB9)4l z(Izw=zMng5CBI-Lf`)RVj^4nA8@BX%gg>~=`l>=0nxeYoSRGBAAN)GD%|> zdtBQ7J7AoB1Y!n<0F+#Bc)O!Aot0jTv5H7KK%?F7Nba|>0UmEM7|dS#gg&e>>CXAz-n!(~=r0uO{XC)8MO8oz^Fp#PkX;NBnoSAD$`; zJvg)R$wlzi%_jSX95xn4NIc!pE2`geZy8m(RJtY_AHC3?93Lxjl&>r6>zP#!qNMy) z!*2&G1--k9P##oeoROn?o;p$AvJNHJ4-qZS&Bo3j9^Qs*U|mixwz#(b)ljR?ZEe;F zhZw(Gv$FNDa%22udu3^-bzK90%Qt};o;TWVDcWdn#rl)#4<@9?!Y?;Ys zu&T%zGms;cg1h9i_IFdtveKWbyqE6L6C56cuNoO3V5!WK`ErVMK4!3g9>)$35l$DivB?tlNj5XJnBv;@TJrf2R(P9CxT9E zPUok%^{<8Qfld!Fb}&HXc=XBj^Z&t(I>0r^E0(Yej%}e*EHAdKw2UduYM=37oh6Y> zE{@#V+5&^%D4Zs51h?vFx}`+&I9ey+yBP1X*#chOJ+^-VoXUNE1z=Wv6sQ(&NR*IV z@RlW}Q~k#)kChKMHd}%F+Z@w5_`O2iSquU-9m?c}uL7u~ zV>e!Vj&SG|sfCQpl=Es&#!-qE&L9pefJb88|M}bQ$m_C(CCJm&*FJP5M zB$gASyUPp4r#)0u4G9)~IRIlF-K;s|COiA38jZ1Mx!lHLbTAO42_QVlMi2Pyj5iy~ zQ(waC{Or|rx9la3nc7?zOMRqaAP8hsvP{$WsbgcXe?+T zyxa5ooN87(eeUPTzmU;l_VU6n?sqIAiikn4U;`1S8js^z1b5r+5}$|j zS@y=LEwZze$qMIEQGL-Lxl0vP<6t&YK7>ZsI6f+{Y~NjIYs2AXZ_*hW-i1X7-|!Nk zr7<>G?R%ikVGyCKR{XqXPO)HleJjhrlI;%m zmH=6ia-A96E(yxNUyOb2iPMkUnf@pt5Me7LEt%mLsdBXC<9vKEvh^gm(PH819B`XV zw)G041UPl~z)Sd~Om)9M1!iHI63VuZ;^a4&RJef_!3tQMC6X(qT*1FR+GI}v2&rw7+)wa~ z(on!Wf&&qJc+e{my8lYneTrx2kd0UQ&a*(D3|&S~!;TxfB#f($`H$PbZX)`DkOcX*jOoDFQmg(!H!~N;pcx+4T6jDi z2_W$tk=i)ub3;v4JsL#xj~f!?^7#zomnk&Vu*2$pSN1jIMN4PbZuESB$*-mjzY|kl z`Ue7!5C3*(H&3KZLA!oJwyIBR_tEZxywZG|p#=o#NNA3>O3K5tCkK;3#vvhrUOw2{ z$%?dnVHr6HOjBPndbxV$S~Ne4ynL>@v0KaakP)Q&w~VXvpT5|_tyD#|F)7v-tKxP@ zR<8D^@Hgq@!05ZZOt)h4l?4_hZ_#)dp|&DoCxG*WpYz5!W&aJSZJpL*0dMV+k^TLr zMfkPd2I3)@d0NtXs#Q1qS^(D`W#z*0eXk|LLPpcx-)`_L=eNbI1}nXu**n-KgEim4 zt#wl;Z2-sORsNbRPEvoD3!o?0=N}PSE@9Q+?C|L4(e-}{hDAie;kPJXD;|wb>0ZPg zMk3O+VkmC0K)XjlSDztA$PqHAox}+No&IQ_d>fQ%0Yl3M7=T-2p z0`mY5El*SaC6@k)&|h&ShHPxKTZh|(&Y|Yv0&u$lAHeQm_R;LMR!OLBpPJaVPvHEu z@pV7`j!Bu2qb-2S`7eOTR?6eo8jb^J;6Q=`#|z0>PsX0yp_KAV)x=VaB?gZN`OGo7 zn4(+|tnRdftm2lVXq-2*|Q$5lKVwbXw{q zd83mYER##Xm-H=_8AeFo7FWlIst8SfB*fhGT1)!vEpsv<9k)fo3Ag_av>bUS-&h}- z>+h$SSwf*}Ir0e#K0+cJH8J=d5=9c!wcwy`<_Yw5>wXQka6nxsttfIK`!z7&%g{>^i~P#&C_8^%g2T>c_6u*lgDl+$GryG@=-YVK*!Te zf)Z@}5~U?O-QBkwA98Jf8v5fLQw(;8js{PJcF0;H{v09kz7#6S=NVY z8ve)y9n+PSLFmUF6D=~~CG@v}kY(U{G`8*i1fQM}+O;XvG&CgG;Jh7V*45ER8vB8= z47>ERwEN__%i}v#-!#7e8zRB4uzwI1iE7n`5FIuLZ|6OtFPYuSG+~?${0dkWmQ+9S zx)Wmes)&CPJgR^HxQSb=vq$`eP(dK~k#~)>J<&D&srCQ@sWAK#5|~V+{rvR87m3r& zh=Q}+illlX(cF9dQ)C+iTOSLPs+?18eUgJqQHvn`*f(lW$`rqFWu2sGDNZiW%kXPB z2JCn#-I;9#ux`(a1uJ>9w%G`YlkX*5`Sj?O6QI6>v7B2?(BYcp`d@jv|Av{X9iD+9 zfsIyrtdnba?(Dea!Kf%TTyo&e_QR2NyK$;(bo6{0CKoTHaqX+4F;!bc2!Foj1Xm>P zeN97z{~fNb<(-H7Zw4>BSb1)>Q}2DOLyVR-$XGM9B>5d&;f--0Um*tae{X1~YkE^1+Q!lizS5 z8P*T}Od`$$OgHc8^|*Yej%UIl-WK*^BtCdOVhLl?{cya*`)nKRRMtPn^JtLq;vRbChy|yO&3Di(M6tY_0$F}!m#pzRgVee*^g6n!(X+|?p3jwa zA`FcaeLZ=D`(6ku^6>UwTM**B2f`b@;0rv1SkY{Rk8;U>+IB~gLv}aazD&uX9Y{>mrQbj`i z&=1{PzGpCOtU<;2UW%v)Zq(U)#5XIsxgRKc!1>?PhmnVh_TI5L#l?|aAMFD-Q zZ@|`oro=~j!vFo@;9=!fqmAV870dnF^Vvw|6V%-@EiFspqKe9+E1L1#g@0~4Az8SE z${r`A!zY!But};Md{YNcYuA>1{;J?h@+n_#+OlR*-_bQpvE!j%P^lj0zTd;GsQEHg zs}Rem83Nmtxrwwv1lV#HzeB6nr{*Q9g6=H$4J?kZO>|zzmyqWaYKU)6$?kt_Or@h1 zzkbI1`m0F|uHR8ST{?2Z6mg6;+hFd^(m&beE&4HQF0uYIB)BnEMCJ*<#_5kdOO_H! zaOQ$Hp!)9gT zOeZf9b-sY}K@4Q5KD@aqF`&`sJVLA0qZW)z*S-1@Kqse7v0oZd^uvpga$n0cYhiG6 ze=u@4fACAq1)3mafZ0Wvo2~U19*nN8cP>FGta4QD7EILs`ywe?>s2LHqE#1=w6DX= zbnj@vR`TxdKA&m*>{<|-eu?DF=7Jc)kM2$OTd6i}jbsjWTl{mpaOC^0&H6M+H}a_= zBF&BT)%C&m#c66Ht@GT?vZ8vCCtPiCzh496)exMxselt9`g2uj)oz~U`|EymYR*4l z2`{gDTy!gcCP}Trsz@7M5Rb#3ODhtq>ctdNs;-|!6Mo94H> z%eFvSYK@HB-7Vd1I2LD1-ShCTS$fdorR8qjgfgI|Zw>%uTgRW1n`d-&cN+ZJ{QYgB zHRapM`zff#rqBj)@{QLexQX>0r!F0%WoLT%&f}AK)FQqZExbL$2ClXC7*|fv6gq5( zbb_k`HC?e5kJlruuzcGt>kz-ub+9h|P~z}gmyNA7JBw$SkJf5I-H6c!o`3At$zS_C zpkcN^yn7%$P{q=0#6fIEMjjA)i*aS5*AeVrI{7}6qf@M#0`IWyZ!UdTW#Ok~9ihyD z;}ibYJ)w4$K{r6ZuLRxDQd8_0en&Xp7dZY9rholqU^T$NOQL7MQu?L*bh z)?Q_jZ_UJ{^6!Qn z@YZrsO|Wn2K8r*rm*mJ2YW4r5Z$)TA#UdcsfppGG1{ljWyJi0H6qNhrc=g<;TdJj& zl5I`EUoqmD_#h0F8Y=m|A+>?2g;-YvmwJ^f zC`q%IzRRnZuPt0$^#1z|iEud0ZV4HPY>lpl(>B*WyB3yW1U`V0Vm1L51(m|8_R;?u zwk@fTvu?h?b=N1JzcuN?_^H17Z?Ne#f44wi#CfNI%W{8ILP9yz&qGq>Y;f`~6C>+O z2+0|~@M}CMr|2gM-$JW=c-2I3VKGw#V6-e@>A|bUy_IHmuyzY(n6$RO{(w8 zEws&;U_NE=yXSwVM5XA~E1xR=b8)S2PVSBhaHO#M3JusJh_`7rHa|G>3)>ja>{yot;?}|?*62+@N z3i-8Eo0Y(z_g}tDi03j^eb)yG^R(%s73vX85N6%>w{awKdeya5I-(gd zggzcF4cRWXentZU$%x&^hN)A+)IzhqzCO6~Z~b;n(7-tqj^wt%Cs$U^rGV>#-k^-$ z%!I7F{Ug`L$(IwTlKE*UFp8Pr{u~kv zq9bl;f$k2LFsXb2!meAZ+?sy_Iar=YzbetGCcR^+Oj$1Rt~yG~SB$=TEZTefqCPUl!H(c;nmWN@Eo*Wt9;PvZ>Wh0`2fiV?WV= z^{TaLz%?~;Wy;_=DV_aSxj27Y>on9}F|&WIg#o}$;>UD)!}*{O{+u-;nwuu9+Et~J zhIDPpm}XTzseLKy@n%W1n{TcvVc{I8oT=T(qUoH3=Vx->;g*Q$?vL^`r{b#N@gT}6{R`9?1eZ+@ z*euAbtr?PF_f*&%w`lE59SUkSJu^of+{iIFo6A1TE!R9|HCcJ{Pc(mSm1qB}ud-!1 zW^O$H;q=M=ODjJ&iKI}Wkkx~MbTau~6P;W2jb~2%8+pvswVoY}=RaS08g9uqUU{y_ zS&)9feXJ6G_X!fBKDJX0=YiL&V z`N{bGQh@P}-HDUJEtUg0I=CkEQ2?c}P5I!Xa^T^7FJO7acg`&BP5Mt!tMt*Fo3kzO zrJ13INKGmKUiUvt%;@Hv4*>p-UDP;8MeUC@^f&JPeFdD6fB*zS{>QDrB3Eh`lbf`! zaOl}2O3Byf)z7!@d(mAOw664!t8;TZT^7Wrg!2Ja8jO6xp-QJK-L1A$%g#0PBSB9d zfW1p{_f;2i!3BF8R@uUr0r1hv^xs|A5F;RHm6=z#8zOI=m8CBdkMg5GE8R=sR(*PW zFg#HMgc0k=Pus+z#JN%D`@(5s<%y{4umHcyu-7w^tA1)cckVcWl*|eckr_rmFd~*m zK>fgh_~WIFzI^;M{vWM>o;yi9#bP;M$NfJt@{}w{Gyf0QuN=xNsutweq zfQI)c>|H2uMr+AuO9;;$pQzxN|J;+7e%W01xkTrcFm-deDBdJb7v|=e!v!ePDUpiR zo>s}$$AEq_Q=!QlX`AL3^4Q=V$r*2fxj6{FBjvbnA)VKiBKzeRWbeyT+sOVznIajb z$|)n&#_)agFQaqsi(b!^rSIe>ju)N#cK`I$<=FJCtCQDNT-{-QwHyLb()HCW)htSt z>MRFViq+h6{E04ScE^*WhG2Ml0DRow?CU4gcP3wI(40-w@>S!mYtR3^|0gKBmM$IR zV4PZmh#g&6>7N`yH(nSFdBpMInW=}xAQH05GwAWzq!_iKyPo@aTFU*c1J@_hF!|I+o0yuzd&1mly>zlMG3 zPTiLc)i0m@CCB0&;~i~LgnPIkd9^Cln&V5o$*0>$TzRa;vLm%<;O2ExRl7$>E#a}` z*6zuWn$^QRj250Emtrx|Tg@geC215>@o;ynaq0kwR=`E9)1rP=sq)zmvzgu6Pu+6C zM|+#Rr_&=&SNruwG3@8+Be)=|DfukS%elcWoHkufDd>0ylEK zDQ2}CXr1`RF5|D_d_b2I(K8YPQ_O*D6Xk_d-4$9UdMaZ_HEV;jN?V6>>PBAG_L#kz zSMvy?OYQ{6Z0yVIL`gIMrX(l#B7U3FE9G$nTx!y z&(y@D=xT3BMnR<|5z=p3M6lBPOfq+!5RykOk^LAOx>LN zFHZ(yn+ln+@}MsH?6j3_P^L;3!!pNx_Qwnb;KpU1Y^N&>C#a_-nzhMKZC@TiJ3FZk z?pkUEq~#gLfSnX#ApF0a6o4Ut1l7XfaY(}?`va~0PF`ifvtR#?R|6&{X2OkIWA)K_ z?pqz`1%TB)-PqU|d%VsB#%iF%y6L0jR&&^y|CsE;rCR6!r-w$#cfutW-in4$Jyo>A z6fV;yqZGNV!2YjODqO?2rwGN5C`3@tNaiKtzYHqQS$wXV)rr3P-;T+z;f{3Z;VSr1 z=HH~Yv-`T*XR1Rng|06sj!`#%d=IFYaJ`kjHs{fE)f{7nrAs6*w552*6uO$@KetNa zkq>B9?DJOriixery!D{%ovpri|M_T?4_8{PkB3i7IWz>qjIR4zYHI@Xd@m|vMy}`s zY!7NP?KkG!h-b@9N?}J-TMmYMJb%G?*=&I&6+R=M41Y`~M#Y!=wBorznfO!QK}BVW zKF*0=dM01D`1W@7$|m6|K3km!?U9TR>*yB(rHW>V%%v?sL3fewFl%I&osB#?)hdR#z3d(Qe0KfKvh7yL^6hY(uq%>_^$RdG_ z)G4DYjgfRbrz(Cmuj^WMpOr*%Mg{*lPWRWoq-8xD;kv|nxWU~QUf8d_PJWyNS&V9iMMRf^?x8Y08c zvah!Q@?o_{ZeqrOVqSny+8GusU*Dx0Hc4{M)~GfuZN?vHg3qX`fa3>Ks$&H4XX#0= z!A!M!^-0Qaz7|s!Pls{0rjTo)}4t>MNKFxk_RmH9ZBATdG+<)Vp$ht}+c5e$272eDb}?C9Q12HF6rw ztUoBHYdE#rS2yWbypQkEd1S$;B;&2+D*3?)DmC}UzAf>f*RNuNk>YXr?;9D3K#Fx~ z>*VC|M2-76RO|Z)c^f=(8(vSw=RwZjb)p?^7-QTv+Wz9t@u7cTUF-DK4gu;uvxTFJ z*`X)B`c3?5k;$##VQ%EMdRXf;?g1s}%FJbBmYYvU zDv2!FZ-{pxJWdqpF|M)N0EKeOcHkwA1scwLODFN%Px7>({xuVB{v+f$ueu`jz!5)E z(0~l1>uX&?=+h@@^K*0m8JM0dRscfj=?NjkP5!Z~udx6x;AVEDbiMZ`9mb>jmw10{Q+r?cevGQlL^7XS4Yrv58W#=DlVWW&yUo2+!L^PmHYZ~)>z(M(vnIRU}nG-WEwaHKTS^jcx z>}`}!cFGFRU9Zs{e5j4=;Cu~vy)0vLJ=CG;_*STT!HEv8xl|Mf`G;b0w)C4PUT1rr z?K8&6_}(+`)r(H&ZGj?T3njWrqLg-qVP8P3aEIB$p0vY);r83UBV|OmN>RD!-KpnO zN_Pz2Rq)+-^&z+M%-0We-3U4e72^%av|rQlGP(41Rb=u)e&;oDbrsP!@@y|Q_3)X5 zUEPJBa(5nnzin{+QDNJdmT+C)jAHE4jfK|Dqko%BDGPG?Bbe9PF(piydxkXk%q>T*$}wliZG_ynH1}09 z_fnYH56Q72Ns=P`p6|<_Ugn=Y&*%BPPqSiJ15Of`neOa^UY`q<+|Fcqn0&7%`L1Sv z1a$LJ<{t;GP2cH8`KZ;ASK%lQej_QH-$T|A4}Qn{cL{uuR2u{&%@Jv8Cvr*0#zrN% z{tU0qiAp24k#V+-B7@SH=EpiQ<|B$o!8oZ$tV#Y>k1>C7$KQjsfFswgK@c~u#oe)A z&-9WIEDkcDm8hD{y}ef`un6a*0xW23iOYVr_kD+3JW2K49q}9Yz=%YxhMub*<=UFQ z`G$d@^q_eb`gm_a=qy$=_A-a3TDP~YUOB$Aeq#+Tc8Wl#Za5JIJU^5Fek^lG9y~wZ zSy~-<=$bU%-uOdB%^zG=N{chUvo=wdc-`J?vBzlLI2 zY>mF|+mrAxL0w(FxaHUItu78E)+WHT$_ISGx4=Jx+yiNjA~pyTH(BHmqap^0{|$)k zy}hVEej4Vcy9Fnz7EZd_jSM{WCMvWmy1ow*B$5N%Bda|wV>w+}`qH2IQ-v?Sft6tr zf{wz3kG{|&zdVUFaL;=Y>NL8KuNkugei?=AM9DK@5Rf$u!qJCxs`5ZpbCN;j^S1gt z-t?dcd9KPU1>BOJYG=oJl&F<6&3PJMA!xzYLv})@FmK@58O>W!BLxyg7bMOqdwh9e zpddWCca1^x>B)k!p`wmqNUEx(&Uux~tQjwThGp-1Sm0y!>~kz;9h1s6XNc<|ZKwJT z478&!Fl`W3Sp@fZ%r??(P=?dRj-B(+&mdB^T(Ybb#}AhdOyXmhH{aIoOV-wzcX)+g zWO$>6tQOB*V`Cx)At#}IU&voWZLVurLC#2RvT&C8n|O?q@-qC;e(TIqM4BKb-mK(~sod{8n1P89jp>WO$cc&p9irl)gT_}S_Yd!`M zJ=6evQsuRfncwP&!I#=IkgWoLiqMAxX*y>gIj7^J@YkJ3K~(sYyrSag&@|uC2QG&0 zst#++Cnr8setj~=cu%KJFN=Sdc7r`n<>sqdAi>={@;ZK|&>I{Q5%OvFViQ^HYvSv*?$IU_67D%G^kCH78Y|=@aPnk&TL^LzbvU>zyV>Uqt}RW za7{g`waSjpy?3bJtKeA~psDE!Jf;_wZsO)a)#P8G?JImo7mxh+F%Ucw&+D=JLJpU1PE~{?P(Xtrzzdf`tJ(wb%lQ}R0qt)`|0{K4a2Fa3Nukwqz21mKfOlLw z#zT%e*Rz#3vCa&>>+&GV41?n;{(8{jK9}*q=Ul{L{$kkid=Pz-fo-AZ;Q?re0yS_m zcit%&XdoxQd+l62{`P$LaLuZBAB3&Y)%U#fTCR!R(c+}EcOG;;n7n>nyDUR@@{5`v zLGY+zI%qCHE*~1j>T2M%Y`RwUF&X?^61h?b~HI!7QzC? zgC#bz^60*B#lKgfbwoe?vR82Vov)>Du_D7u!r0C~zu>P2VvzcNiLo&Y1Zrebg`6>D zE&U8ot*xPK`u+c*Q=@8&IM7>%rImU(oMkWYTf2kdz-4!CQcd%7K}p>5J+BCVj!?9!xl17fZI&0=)R z(=9Jz=67c~Ej9v+xvP<;@_mVX*yo|&Nt}9_zm;@Nz z<&4%RJAKfXf9>5CwpH&8Kz(3uNq~O}yz_VO*v2yTE+U{$cPc)?`?AOz9o!dUPx(k~ z3T4CLlApo8vOWDxOz>p5ifI#{g%ku~8dycKZ|iZeXU@unXDXAw$%VN3kjoc)fv_?r?k$>VMlav_c4|EX0nhrq93+n0VLjEIV%2#Z$SYprd zQKxSk8rxaVpXw{h9oIj)G+^yYxCz0H)o%rLg59%%yQI(^Nn1D28sEu)vajDq*1hcQ zY^73BgTB=y9-jA~o`AF$Zo8R(pWj7n`yAh66_)z*;~9{C`ZfmuHtQ~(tLQuQNtNlA&8Zi~EB}j*O0-1gm7BKS%$(G4o%zIFNN=~a z+^!*kK^|bY_2lFu)MaQU#t7E$bFF}a2QN6(h$c*1TbqQ(CFx1wZ)Y9{QBDx{2`2y5 zvf!$~0^)h*@tnF4&WO@F<^Xv%=~qAWwXA7eP%M-_T8(#m&UBA8C{prVW%H!A}k^=xS3>VoRoZY0xHv3IT@ z%=3z;E&T^NdbT>e;%_>=r(1~Eg2;CYWe%OqdCK`#t*T}OuOy+aeE(DKfXzFyAJGWb zL|$I{RIs<;P@k-pv1Xgg2u>waP?lz4sN!Dq?^UAA6&qX(&RGQ!-CFgRW9Fl#r=m;5eDmE-kymPQ3=GjHH>sA zq$(lj4*Uw^H}!B@i_5GKjJ?xi_k$j+qW132{QWmp+E@ zZ0TB4=KjiqsYH50tCD##rKkPewJhZgorM}l6utcUYePyoOCH2Ran&PuYWgmBegDiw^x5awp#Q+aZtgW7g}FH@u$1{TW4VxUPlj$^AlyEDiHos*`eYrC!QsXV1V3vBun3! z{R2Ory7Tu_t1~cx#S_SgGJrIo&+5AU zdIG3VB?{SE_XYiu#Ma*P?#N^9GV}JZ?WLWDo}+#4qfgxOGusNsKNTVmHx>iJd*>Gh z_#DU4qOk#*&oQjx+Q(YtV66hfs?a#u)LMh24zE>lu>~)+0(rFX6Y?jG_5Er|;g4?oM}UhegjV*KQ#73c-^Z+)b(2t;mm{l=7ys;jC3Bv8MJnvrKmx~Ez`3!WT6 z>S#?z8MA(Q?7zS@BU!EiL@3}YQ_oLUy~*U^%<#TZhfZv|P^Pk4Mg3fR9y@jS1)HN2 z1uj@QxcvU1IudM%pg+N`b>6eHMb$n$Zun|kW2~zD zC#Kz@wfs3aP**?S<4)sYoT=XapGEaxYbA#`S}+{{8~?g^__2E7?o*L_ zH>Hn!UTayb%MwJ&EduM5PF{XNaqYWn zbi2(5N)DCgXy=Qy@^|IEp{qL6oy_iJ@b!*!L%gvwVCuLBr5z!#UVhrrfMsbBy~_oDy8~=<~}_Qqnze$X@YSmEJKu<%m)2 zJ9{~>=6KwncsKpw)XpbBnog>p$ZwT8PyS*11{#HI(EsR2IAi=4>2cQYAZ` zH*H9x;x|KvKLg}#oMKOvo>K!XHSgz!$io_6k;v_e$fMQl>mfi@ zJlgwj1JD;U?dDSc+RxhsGG!`7Xa_uD1Q=)pgm!dxZp>Ns^1LK&`Liwo8xYtO*}qk8 zqv`_lE|z?6ez$OyZe|Dw;p`2ICn;W9*RPyao>Vz^YLM8h#MiZFcZ*zU)7gBq>QhTv z8-Z5#^u3N7ndUj2D{EtO2AO?#I?&{6c|RwzHP8R#R#%S;*Yg0Xuu=Ye&(>AQrLs~X zUYv<@+bO3@tL8Wxw?JGr(ps-0BnOqAk!oy86U3^~@JaqKQr3EbVCqDrw}Si_S6UDS z-fB%^^mtw`;gkT+zCXm{NNe#1$u>JbE|jYv&E#hv-pMkQi_3mOdeBR&VQ0B<-x#K^ zGf{V|n-m!HNfX0c#m1GkB-3F~!bSl(WM`n-T3VN^tC1ZtUzsIVIpGSf(pMnn^`W?I zj8kAv=5*g1^t0yoA&E9gO8Cy(u>XP)P9VZHO)W|TVT0~@Qy`zLTAdehZ5#3W8pK*x))D{3 zD*pV3&~irqz?CFbJWX@MLyq*a$ip^Ra@ErFwx#4#)1|m(N~kgm;=Z9sR7JzBoMh!( zkAIR$7zuw^yI&KYmLAC2$7x11gC+S0P44>T-VYPOChcn4nN>w)RJXa<`jnGPd>QZY z8uIHfsyx2rHubG#=sqRasGiZ87bYRR5q4${**bgBa!Wa?G>=(IFOi+u(SoO^n#PbC zpqfN|>RsI0<{=s;h2yHmSB(zv^4AMHb~{0Eyt-j>cAASym!Nxhs^{=Wr)_pc8Y-9H ze0hUsHcs5!oK|=yv>@ipA76X<(ItuKA-$v=Q!_YF#$h3}P1&?oib}*AxL}G>^0JQ8XHk8r=uhEa_K%(f?s0QV`~=m( zYGi985=9lc^Lw5al1KKwh8Z!4hL{`XR=`$Qi1YZ?J?3-C!jg&0^6w-6&2`>&Y-Op8h&QiLo^ir+z19}RZv6x ztY<4gypme&cNPJ(%iuhMpAJ7JNIReXThCM@xppD!+F#96&zj2Jb$e%l#-E|E!bWa4 ziKIl|(8vM@mx_i=RTtHTQfvmlxflLo{WL;p|8Km1|Nb4csDYWRG{Xgulb!mNbpQHC z%u*m+`75veDZZ7E6m1Qh=SLNYMA6x3i@F#-u_PF@NNfTjWd8GBa{XO$29cC3QaM^u z@na$Grk(uq=7I`2VI?Dxa4GX6h{4$oiVC z_zWF%o598nzH}e-{`1Z|-f$?m0f{&NlfXEd!2hVRGSP5V*MQ|wrdPnJ0DFWv1Mle& zdO(eBn|vc5t7IlBq&>jsTduzVB%pixD!sr;@or$X5*;OTA_GNQmV7IdNKgH3L4|k! zj7?HWiM1zfF6ZHYvO%`Juf zx2sJ)$woT7P$^5A;`@egXsF;v0jn=XmY44OS1GwNebJr4tF^E_RvGlua#$AKVQkDn z$Hau>8LesnUEUrr#^vtRFM9SIeDBQeFJ4JEV@%6fPA3q4{P~G7x+eB?StX9gql)!D zCmhEU&6;ZyBqr!>ruX{f#+a7t(i@`D@V-q3N~e@-X=!}kt94hKa|TX-xv75q?dfrF z>!yRf{m~EC>wA*_29JNK_x@W|h;#h-`=PE`^NDEkL7ckx_un8v)T?zdbc?}M2PH!? z7Z=&O2B~KG5F$8tFPS;@srYq(4G#)+@8M@c1o@UcJ0G;A#ATaNgmr-M;RJ7$@P^8% zGIRbp`~&bYINiHTt6JkQ_reY&m@QailDS^_mZo)OwhV&&u?1h(JU77I|E242zosE# ze;im|M|%&ycZxrt9R+Juo&T}xr$h~~6MU@awENpG&)dgv1 z12KsI%${Laai7*5n_?PqSyaEsJ#XHN0|Ez7S#?C)%*M1PmTN$xB4>h!AW;-3U!5G) z6xCo?(kS%grkNGz8fI`?WbS6sspY;gs;Tia#i+)J9!f}G5`n#j-DsGX!NP3BC|y|y zH=OE-<+3o6#^LoKCzzq~`+9QE)%)re10bjA*0^yq)3oC@|DWhNGF8T5WQKWMX?@ikBqr(`c_W{gZ~Wy+BM%pYFGYQcm2 zZ`kWT7!sk^d0Pur^SeZ$P%;*VOUwe+r|$hM-bK3B#_>zrew6y>#9~w2V^wM40^TfE8#roQ@Ht=_N@ei5 z{b_1Q6JwtMoqZKN+oVdi!Estqs!EAuU%+oTCGAO;^U+x$PMkBEoY;iz>&KgmEp}Fh z_&n-dBEfoNEhID|q6Z@n(%>Tmz||E5N#H$c-;m!Asg*Ae)KF+^i>kROQ_pi)mwh!#ayIP61X8 z)au_>NE(wkIm9FHcg6L?eeQ_g<3Ol;cs+7|I=lDacMxbgWMgBahy14W(t026tR+LZ z`th4c{GCj+-9;xCeC9}4P9JVD&l*<+6; z7*6-SBbt4%ea$0Rze%>nnhT-qHaq0Y%neguGIAB_`K=~(=BNblJjc^Qw{kWVF?4t6 z2MEK9jwBR^^16-18tU>@SCXp=?(_Y+=}+mWJbMQ|2hN2l0jUp+vAv;GL?|xwcWD1nywdrG_D4QbTitpqLGnt-jF^ z6G(PWfk`|1E}Yf#PY6qk+~NAy8)6kiQgWEf3Q3oAtyIS9qDW~5lPVne7L^~*HDXyg z`+eH&%iTW!-ZbsC$ed>@@`doIOL0(^3byI^M?Uagfrn4>WI%gT`_zU?1qv6VM3yO{ z!XV@jdYBL8?8_RmcUoUsAT?xJR^3?BpdtS^*$p6zFe-K}~|PU6pKN(fyqr6&v1&RCsLU>&^LZ(S1&jl=y7k-J^9 zast8Rzzlz$>Z*OFrvP(z*=iGxO?v6}BWzMl{#%BZFGXfoX3QY? z962NfQF-HQ*~*Bj$~dqJ{aAW}wmgevSP9S^aHrmRwSkt(D<6K?Ivaqb5r$Om6mS1$ z5iMU_K@p56RTyiTPcsbZ%)|w4Evg}}ASZ#}Wv_EF-0Jf2fWyCm{8nyiyOyAyOCf#d zFUk;1-o?n{!<|u}!&(fa!s+3(Kmh)9VdB6P$duX3XZJ5n1a4}ilsS#>;Ao({ zH08yJG2~Nd_oZU+D$T5se5@*vwTpMHGxalPA_f+3J2kH8j^2NoE|m9XEH+8`rPCWd*ydA7{XD2}0fu57jE+(9jZF(RLv9>Y96;xzi z=uYNwN(xN&<`6oMDYewOQehn!hn|i^IN>KBAUm6=&<`@heYS8W9!z`r%&qL1Dm~l$ zOc~oFKT}R>^^vzzE%Q%Fx<+=o^@+T)1lil(>OLsvf~e=pmZQJ7@gEs^?Cj05%VW6A z=DzJ_|Kb%A;J>;)QKl(jKg9O$)lpr-(BkFEjYM?eyp$m1@2u6@Qm8Z*tY`Tb{)gh%`Skbi->@Eu`)70)gFOCMa;;Xi zCF4UN=mYas6>VSnOzbQ>N7>2occ)nAS}H z$0nYyPcj)x+fnG*{Tz%QmV`jdS;LwOHv{2={=@&3P@L2~4O<3@iih)uaSA=b5n&5n zEnwpunB2ims#RY&2@wy2k#JXewa&d8oOA~OG49Ocol6qbSr$EK?dS5KyHDd%G+Bu8 zk@br;jK&P%$MP~jKtQB5KxwEpm&eE<(1b}*dS$s#UYrv0{Apo5%4|8CcCcga+U*^M zSH)7gF7LyLBhv>HEp-WN$^NlP+^=mOcAwfICMmr>si#J-PDIXmyE_$qsQeG|JZqUN zotVIHQIWz*n`e1M_I*WpV0YC$_2dq?x`HAjd*{76>(4`R1xn_^fvXguE1b)_q2sWi z@LnejEcATI?-3*RANPeMJ7S+Q4gR(E=%#trHj8FGX|v>ISY6=t^?g2~-&P&ZnT2V* zq^al9G*CklQMPN9JmH_T40>&nn<%sCJ9ojkxahYbd*z`Z-;VV&x`C7}JxK|0uEzXo zPCR$9b!{2W*InT(D^y|JmI5?$KuD+Wc>Vw`osBYqsUmT;t0OWn*|N&=I%Rssuh%HU znvWj2L|wPqz`PR;)=JfLsbwo#gVr*Mve6*N$TgdCF8lAgS~44O`i0)OmrI#i>lMM} z*TYDx1d+dzP!0o_wCh2>ccdtp7oxCi?y=gD#1sVy{X7b_e@RXAC2LAGcgWQJ5sp{H#;DQy9BmN~msxa&Ha|~E@j#-hjhQu* z(dc~!!W46xI@!_-b_b-zlK_3POv-DO(6G?pG>kMMK) zwebC*hx+s1FVEW9t-WZO;N{7$vW!%XA36TBUg|7atoN`v#_%0=cTG^BeHLeJHz=Oug z33p)X*P~i{X&a|oL$}s1>Ad>$GLFY@m}Jgi>T~;|BGZ%v$m-KTO4Hp7^_^0Di8q;~ zuqq91N=K=qKr_9ds z&`U2P$8^Y-@_RQPh{T0!KYvGmM$n)SPk3~3%%MD6f-oD3DP!NdX9#pu!iQkmxBz7C z!}|Bv$xKu^ahzO-v=k|)2G zwrd#-VG4j)JO^qephspJ*PTqZgEq^r-LIR~?A$duvnFH3pGNgClBdFB&SZX3k7X*h zd|-kyoK&esIbN$xX6Fd>@4L%fgleNE=}^wf6N@2l>zkv?GxDV3Ce)A@p;QqpX7ZK# zk{E4asIpIn!KdvTnfz+0mkl)+tp3f|I!OSu{~f zJySt$Egns3bQ|krSReG7=$+ziHvUvwtT}3qP7Ed`ZDRW9|UD1BO$UJ=B8%=AJ1^HV`6BBX@tO}8u-pa>-htice8Rs&h`74zVB5o zN&HeMn9XH>(P`SH_TCpmr_brUztOoCc}+j@FIg35^chXfe>@Yce8XR0%Vv6A>E@=I z6B`ZFDae0%eySb;`2d)dgSZTU!*|XDO0OQTm$bdQTJ-GogHA5w&Mf`;YDO9>FWq!N z5U5eFiE@W6Wqbz7TZdj-*Y`3(rm0NC8A!b}oiFU8XDAL|h?QRr1K&qi=1GgFnU(_X zs|X*evcIvf&;`XmKrTM6etqXPJ@26Y;lTqs=i#4zZlIoVu-(`@=vMU-y~XfvF!Imz zs^1Y9$aR6r{?6`QkvnTZ)dkYFFQ`NmUfk%C?p)jwqYY;F^TrRQ`u9-tP)SO*xLSX5 z9=aw;)>ehp+s=3n8r}Y_ms1Ju{^rl+sOp8lNc8izqUr9Otgy)0e zDS}YvmnW@cHRKsxp?WJ`ohO`cNQ|HQfu2|0yS|^4Ardz6>`Q1jXa7WC-J0hL@lM4~ zM6at$7Q0)wI=-`sTuSsCDWhs8!(F>aSxqah(LIwuYi+te;7Dt)Rgkh+3w9-VupaqT zxRs&lGIn+IenRvVI5H&{8%QZJ=Wju_7kIy#H?F8>rZX3<)e2xJaC<5DvWz#GW`?rg zhw6IWcIkyFShWsb;ky2?JvCh5k6(;1A~r6fs)%CXGLj3v@W?_~1%M#>usUm%ZtvVOk(jsg= z7Bd&4b7as`Kgb$VAAUlM0-1PUTc3=Lmh+QiC2KBs1>Ud@Nadb)Y3}syth4#1_9Zr} zn*#qi+W5|Xv%LZqRy3EFNos@=G zSv@)!!+%^c7;)sR_Mxk?6PFN{b;4#VOenTVSsjlnAW^%Izcst(r3_B)?D(66M7DyG z1#TqGd@XNuX%$fWH%d1;P>EuZtUlcsHT=Q%Ot#%=$Vr~hyMAByd$t}5 z{P9Mi2l@dG8$t@tjJJUzt%MC zg_BzdasJuSbE|J7FgPS6-q(Jnur#CA;XP_(bd-a4v!(MvuU@IfyJeMCE#zc01Ub1v z9Q6I4r&?pnll`*Y*B~y;pL>t{;IPMBEUtL7qugCR{CYBW$xt&)>GJX=Mw0oKCrwQ@ z-R!=L9nS*__`ICYtA2&$?;kd#ok1(UN&^z4X_asW^&DPPqsQcojFMo-P`?HptI)N5>*{wS4aHwvbR?>Oay~7qRYB_>wmz7`O0-KX-l{E)H@-LJMU@d*uo1x8RP~>X zFX>i4UO#+}O4u2LTBy%?yml^Am_Z36oOmNH_Y6qnWzDv=ywqf>VsLeSgs!n`s&{5e z&)i1;P=wy)7R42CP~pnI*z4+qi;6NNAgK&wbTUN;Qw^1%{Pk+yYCV{E;MC>8s&Ba= zwaMWn&FTG-rWpW{$$D%s@`zV*k@Xse4Qj*Z-Dobjyv%r|XJ2IGas76O z=Y=AdY;{eKrBF1lvFSBgzv$_R@L{^^7UqNb|aITor@Ob}v8Baw& zGQ(fjN3`}rqAgudA4*2kR$r4wmg{mPqY8P+_h`Gc`R{)NcEJ%SuzU7K|1Ekn^J&Ek zs1*Kx#*;&`3qaI)HT+5D2FPAHK5E770t5`#pHT zWEjCfm3aM@x+D`Xto;(3W1T(^Z#Bkj#Ay@kOye!bYZVUOYKMzQn~TA5b*;UB$F~3j z5|^F*f|P<|)@6A`6$bh^RYX*de_qWSjkPfC3YDkJ8sHYz*8yC&zvlCD-}Qa@>~EMx zYRoVUl=RmKBm`!ES2Ue#*+#&JdRUzrFlV z;j43|TSPFp};6q_K)sS4u*I-3}F+PUa;*4%4-~TU! z#Jbv_a@E0b-b-&XdIWe(){WU1GjYQMnzW#E3Ad%`sJe(JIEzcS2TqOMuk~F>^f6)M z)#ZU`h;k>KPF~JIIjvoYMvaKtM8{7YC=Oc3If~w*htKSdJr>B-zvkZ^cgON(b&5C3?yCyvHB^kcFCO zJ*_n=Fzx8ZeX;I6S>==`q?Qd$RkwQ5=|@VjHmHQY=^EUy#+u5RG>K4pl7G%!M4P$J zz?0~qFYM)oh_NKMgbyx+vK66XYPvrg^8A?r#4kX*c&#_UdlY@kAW^z2u;a{H zH_LpJroASPKa@ENh3h!gaXNw3qde^D7i;WS{i=*zg_w|S)7nsFfBvOaJx2!JmmDc! zPsmiS!lz68^<0D0q#Kx@*Z``IwfhYO#7TbL%)KtCXItA?gcKE9CO=lduAHUb zXrN)KK%!Zs-Z8BH#D&I8U~R6O*k~wK75UG~%uXB!g&+FmQ+_>7V6jm612gU%v&6OA zy45jXwEMi|!106e%>NL?1}h_hVbOF+)h21&Tzw>6MCV(FC1Onj;EsF9NrJtNAD(tq z+%JKgGhfdwDTIyP_^}NhhP#^phuYf8&E*`BjQF>`Jb$RIA~_HYcR-m)3+k=;^>m4X zt`q9@E? zL?sow)5-%E286`LuM148bDYJh<=6RJx@VTV(_XW*RdTme|N`CgTz?be*HUovU>Hxb35^W0i zQbIsVUjE4d@R*>8glL6Q1t!7Iplqe~=sN~LT+S#X8H^0h>@8GY%wu4@VH(GKByj?-6$mv{nQT=;)<$PvI zX)*aN?U;rc$3Nei_m2NX9ylC-ei!N6PCTX`zdQc78Pt1_ef-lwV@p1J2dHNr&<@hq zK@{|+`h0oksb#GSUm?<@FQpsQ9|5G6l7kvi7;G$0eRg+-r=%uEw)?# zM6o@msIx7&T<=GS+D8`_9_qzr@o+7A26Zr3q8_?tN3$Y;{HNi!iCLY#Z$drZ2v+=F z$CU#DTccOFGJH8mrU)wyEPvIw)BG3!s@}qpEgKdu@%oOeudYy`Dc9Iq`uNoTtQlwLyy8fvG%t6U{ z#y-rT($i(PpYnVO1wdbr1+0iO;tyL%Z*T}d=^Lh&opz+N-ay5m7w^}-w-z7 zRPf%ZQG}jRGUdTj=9k7PO9ofO-|_?RW8zWgu_7bn#EBC;Hdw;g@li!!Z)CQA55|PB zLey6|bH9$CwSbh#5e4$2hvXk8*2<>=LbEip9M73nzsk#7TbuL!9t3(?=CKUp=z?t> z&fMLO$jqP`-ni05v^7$meM#J%o^d$(`uun{qWAfICtDA)Ocy3n0P$+0oHjSqUz15y zE|W-aD*hVgM)+3pGg#W^-nqgoG?d^}a0VhFfeY6&z|X{#tLOP%l;{`>(1UuT;%%9` zGffU3F`xf;G4imQG_ekYVsLf(Oi@EJ##2}EN+Qv;r^^whi!^v13adh_ZT?VUyzAu6Gwree!oVF_#>UK+^Y1z;Y zt-w>GF=AbA{XKrxnT)P=9|+mKom2*fAq%HBJg~ar@tXBT-WOBNWOZf`P8CF ztZf_cC1@6~`Vh+<^QMT<)sHQkz(v*G+q+m3SumvaF1WmZ+Zu=O?AbtL@pa#bk-Eh=9}KVU|r?j zS`2TquUEP$Ar!Y1pgE0hJvav`#Z*z3LP=zvPbzmR`j>*WtT&QKZ+`sxC0lc1VJSex zt+U==@)e%e+C8`YMKFbsH{sS_ekq2uc60=Uay%=8PoEC(Axg?jrc!PMTOwHbQiibPne&(6CTe=u)}NIwly-NPkb}^X`62q^>_s}G)v4s? z^jX>4;=}b87$F|OR7s41&GZ&i63k?o5EV$vk$PTE>L7q$&E1>ABrEFKeCUCbNobH34C`t!1a7|s{f5>qM+-7vjr~xl>;^6 z)m7m*)A=N!+!&*}xyV;YTpf&*z{;+g!uxIWO;-oW@S#(?i5+ zwQC^J&W^*d7T*+uk3n{efzZ2t0;w(TNlHp&l(50>_WfGHETLygHz)CH?qYE@`w^QD zYDZj6l~+2s0yrr=!5mb!d2`;e@`dUy;O?V`(~%ZfD11>^E>7$Xy| zcBdyR52Zr2(W^}JWDFVBRwJKo?nsc;{85Y2=j9!SpFD$*s2IcON8zNUF0imLz(^Tr zi|lvK+plI=IC$5qS8rm48u36id-AQix7g7@?|ydv{F*Yxe>gMq<3%i=wWC0j#ATnr z7wObzm@FlpVQX=;I9hpweW%Vo9GYQ!DpX33U@|Mb40QGwRwu674n0h9lhynQM#aA5 zJ{zMIO;OklQkO~lVY=+@CsHZMmt-vRrNohAK4G41BFL4UGmJwF4a~83*U=q3#r1)1 zk}vp(2tMRZ`-JzG{|-p3oBWYUyVZ943Uyst7cs`~faLh9mf`+d_g|^zQQ;AfRp9uh zHd)`TIY$CCHop;IWaatVc!vs=g^8Vd551dUbB?a_7Vet*eer*1u_hH9?S zxGu@&`OOOV(69)XzP+tQ?q!X^wqBaI+WeA_0m=Fa@K#+v5CkLe6@Et@n239hw-01~ zfL5S?-&`X<{`qbN6cO3r^A;x`hU|SG2dIulp~)m)MF_Pd$0&Knlfcus&A>}{AAzBj zsX2K@Frv{lQIldeCu2$`S30Q>wBD8vGrK0g6zZt!aB5acB%QjEQn9Rol=cT8j@S5`l>`J7_kWU*LP|Tajjpwwt*9Id_O(m`c~EBT3$6G#Rw^25X6QcGf7Dg~6d2arZ+k%!KvV=F zta-waP`;Tk`G&e`U9*83QTmCj?4=F{=ve=%X7aNR&(aJApWa0hp1YCtGM|@es^Xxz zVbjfKc)&AGPE+~Jn)1pQStY1n7l%{DV>eWiorVsIWL}rEb;QgE75LZtB~K<1$N;F8 zZlb2TTWZ?{{1s&4A=t!jOrKU=$V3%><}>@=K?&i8ENh)k=Sd4u9S^e)tq@qD%JtM?;GOYWQZ_3z)Vzk@PFDh@I42iwc{Hb6S^B3*s zO<9}?1_`jTFSV2YJPaX^7}$83as;+c#Ooy~DO)j_2l!g}CI-f~o9tjXt~^%k4d46z z?&#Y);Nj_QqB1<@j>rKD(UJ7j>9>O@k9d+mr)UFff^MBnUvSM$9coPZ*Zqlr>hHEm zpp0aXy+Mwji}&s|qtsNU8-Y&(&U}>X=;4pCy_q^g54%S7K11~@Z=jMI))O*2Pu!5; z71-TyVqsEkV=o>Z&vAY3YQ{gR$9&J!@_lBZJ^Dz%(qrI+l~R0^Y{J-`;_21o$?DjX z5FU*Cn=*JXvs>izC0iC*Oxj)R0g=(O81_xBn(6!dw;LKyQAG5bwBC0(%FNM9-XnZC zB$hSyCn!oloRQJY-pBL_(1PMUe{^*U#l#!>Q~k4u749OjO)Klf|CYnuP`KDr!k^!p zo%3G6C4n*q=7rH!ns@KP{ze1c{b=4hvUT1|Ol;xXUQlO&W?uqHjSe28i`u$0Ccqh(y@$ed;)bG$Kha2D;^nL-ai>h6O0~?b+naU@;pdDc0^ZNeP~MjUPwY#tPjsr= zmJGn36}e;35oqgIP!6Yi7PQkA*@m`*nH~XuZcCW}9iv>XeByO)mqy_=v2xLk7eYopJoHWWHH0xn`2$2(HK377 z9obRw2Hmh;6+um23h@S$OuPm*JBp?+Eb_|XqTJ519*bgTAf+gOoGL`y>kdV^<>ifx zncEjf2)xo^xzyM0RR1kJD#NhKuHik+#-UV+B`wJ&I*$;H+GZ^Yq^9QHXG}$Vd@DJP zUKb75E?Cw&(Wio3*HMr_vL1AH0w}u$Z$!y7Vl^6;EI|S0j?!N4{!8BSqQmquY$ns6 zkxB1@G(jnW;IB=d1=5!pt#buNKmmN{g5`{#%J8c*Hjv)gx+hQ9g@-ATq0MO=QmnIZg;_PqW0prKDh8X<72 z2=g+pFB{5a?g48zuO*!D$LFb8yC(5gmYLu9RUAk`Z^-x$R|)7BmQH;pTCLY-ynHLi z;RYWrtEBeI=GN#q5sX($?$!~$vptb``QxyEK61zVXk6jw-#cQEgkO6hHh}`a=9?^S zWbOx3Jzvtq!uo%X&OM%~|BvG%B=>u+u@xcYo?8+lVeXW<6mq|t`#s5>klY$d=DxX( z+{-oy%e!rh*n><@p&vIXWwq>z-OEJhw zRLjz3eeT zv2FuxP1^m2vCDJvQSL5;LFJzeiA@~WbTpqC8};)Mnl0($`co`t=fo-+K3Nx1Ym7d4 z5NAEsv&9P9ld|>r`d5+lkw!qMneS9Wg#W{weE+w|+3$;6`NV-&M*j)6lXkYZ&$qYJU6Tx^ zzu-+n+82CUR2>;uQ>VW9C<5fFSFeo3-T(mxER>BPY4E?uVDuP(Jf zGV~}uvA}(*$w4iPfwuHEJbe|b*9(xZWy;*F1#JL#lnYkCf$I}JxH1YqDksP1ft;c@ zgz8UwE;I!PhifFtF!g5W&o+JRB2nBxeGfvLtoe;76Pms5MI6@6dn&&5k_0fbyAhp` zOso`c@Z=qbevymSwiN@6U86&8Nf`zk2C_+KtOQvCTtsmjNRNdvZmnndW--)b;)&rK zWW`TsB67&cf2@tF%%U!P4P8mor#vSw34s#}%C5Yhurc#+aRh8tdR9R#`ZBYBf~>jf zaaB{W=KRF6P{8F(R0ZiOzd>fkoeV@R^c-gaOLR z$eR+hAzeuQSx&c?0UE>vNE7d48~xE%Ho$1YpMV5Dk;UEWqNrKZ{T8A<2VVVv3vj= z8u>O8kLht7_CgJj-IZ9J65orZKrj^sSSY(-BsR2k!#3y%{dIz@_5Q-r)v?zvefph3 zr3)RSl;jP7(k{(BL+C;GqR(p;FV1e?jG>H~Qlf+)6lRr3z(MCx=GVb=Nsf8-aVs0o z4fD&4`$B+f!ARj&EC^-L0X6&mm%r+Yp%V>7ps{0)@|a%J?2L)Q{(;9k+6H_%y>yD- zHuJ>>;oik+ar+N{DXgU<`tn0%)#us*j&W9Xv(_=XlmTJ?@`DMyNVxE z>aHt{shQ|ac{Krd&H)_LKA9}`XTKxqN)dYr;3KHw> z+vWijyxPCw|6Bp_dPsx|7EH_Is3(P8&xEb|Z^jiU*GZUkwHY5r^bu}`iDhn6;182=)t1b? zaIC*VW1##(Q&bt4i+sIz@#Dp#&F4WqGJu4`m0>@U5IH{6)4WwsHZg7ml|Ug4fqaGI zu8WqYuv$cEd)WMBIl0_9@#ro)W>Xi*0{ppO&}H9;^Hd7{_(Lf)`KO z@%PSG?|HWaK`I?;cfoZB*J2!zWp2)xrBKZp6dX7y=s%Zde_?s?NsVBtFUDY?n|RG~ z&v`56T}~BEEqxTEUwQPR1}wj%oJRDr2Fgl+GL=toO$AEgMWd*{>@QoT=28;BFW$AO zy*kAk*lxT@Rg>1SE#1#CnyNefZ?Pee?{#9u-FKjD8`Ck}0QJIG2_NF|*+|D!fFbkg z_-CsnkRY0um%GPhmYg1d`*s#JC-3DN8{9Q3%YbqF>;I3nVBbh@(^sj<_NoH?x3-}6 zd_sr-M&Hl9NA0Uvx&6mv-9f9j+i>}RS8#rliaB#f8h2wUG=ej|xe~(!a&C)CJf*xe zz->f$4Y03nm83V77M1{5pdb&l0H!yPTh(1(5|o9|jDyo5I_++wvH_uxda=70C3goS zSm|oV0a*TXxTEBw#y|OQ;H^1q781!Gzpcekq!XZ{;B)-XYXiU4b^O(V7SL9RFKR=@ zr0!cnXD=@|r^$F9S%Q;qkb3c75XkF1e{qK_$i^Q+xt6P1g$Fj(OUnoYvb>5%=YeuL za#D%0wY1+VQ~OD(or|Nv5J1PKxUZj{*q0rq9XWpbh%{hdGwk=-p@w3G+iZwBvj!~v zNg|gRimWMWlYv90V^q>O=8Ry~9}P>7g(aMJFC<%CV_Ov$+<)EIg>vP*LC;0)6ctL#aZ~Xm8DA>q+T5-(~}))LC@+`$4S6)b7$>VL|Vz%d5+e zT|Ud;y0EevJ6-d=h^wh`zElOneY7b- z_m8Mn+Zi7hho#oMr1O^e*(My+d)=><<73Z@+7FoXHm1jP3iRLA>((A}8JH*Th~${z z8{JfV89{+w?SO@_oe|QnRI5=8?g8@Pfrt^2xb+2}=&O!+@BdsAsM+3?cRX||PU}cO z;q^OHiZJ}@UR15_m#FRwAkVV6xCmft0B&q}4xzP3;54Y2*pu zFDjOrd%p_?xXo#?e=kSxm~n(eMV%iWuD(sU1M+RPVH(h~7b(q4KWRT%Jl{4t-!lS2 zHdFrFMJ`{=bic(tXUV&N$1#9c8b>R{YvutMfSen_j_dy|F4WcUE_c1uf0u-n5xMIc z5c&NQu5YTlAJ5qh- zp$FkM@@`ab>U1H;P<=o;s02!aLUgu5`85lVpl0f(f2Thh|E_|&<_cIMXdVjH1^Pjm zb)=ag4CJxZFqepf0|h&YL~6qn6OMdC>SyD}aY`QFU8m%e<$vSZA$4`@S*3t^^FPR> z;#<30CC>B}dD{C=h3M@))BKi*irKTA82V-?)Yp3bvaYhNRL8t$8^*;MdsRo`m2oj+#7{5|Nu`?zARi8NSy6FxG4&fvL`r+d@ zvtXl~!ub-NeW475j0o2n3+}HCx`VCL z4z#SL+5D@2z}d))(K~-H4j=rin)nDxwDB;MS<6E-&&G?mKB#HO#boL-a-Z&olnS>7 zB#oN)yg~V+=@cG0pFW#!offAQU*@!mb8b^DHIWEgdXKwz2^8z2*}ZVXw&NBjNDpnA z3~{e3jdvJq_#;Cvly&xN8ux}S7WMprW6dtGa>z*4k@^XdO0YJ`S~JdB03C?yJYgm~ z)rGu)OI%>9`qnH4g(AW;jOIUR8~QtT+8G7R=nK8b*!c0_w@vJ`)I`5AC8Mw)%zc63 zJ`_as9bGkov$qP*Qs`5P$y?GpTG4|^7VJ9-O|AN@9qsHpT1RPAw~DwxHiA?!xet~T zzkBjv9ZMd42?(8hdGGOGH*d141p6M*;>q*{R3$hIdltlpM+>Q3Wm#!9Npz3IeGlCF zFC*kXkd$Hh#b4&=djk(u!yx{00$x!Rhm5{&)7N*tp5OInz4V9SeDG#e?fUnxl_vhi81BFtoVASF6hwZ@l@wgwDxm!Y>!Pa+0OvNSGBE| z9Po+Px1(OszS>P{9U~(re{vO@TE4$f8^!)r``UK(OU*x@XXwBsAH}!G*P4l|O8!n1 z0NE0@+A+1m>LE!B9A!v^@jVV3R%sAPEo`4z6IKF-EL#77TNrUf=&DQJvXY?$&8gTV z40U)8@v#@suqq%IRD%Iw?RGFk!h)O2)e!wKPZ>vBSJ!-OKLbD&LsSH?*X02^ndhp- z)P{}mlhN#T4i~K)2IV;w7i@6LFLNeCUD_T_?_X_Yk~#F8q);rv5`J zLKVfdLbIm+hD3L{jhmQ-D6uv8K8;Os7nSTdKL==Lu3>IuD3WHgWS}~M zlpY7iNpUM?=w3AD`}8tpL&0D4C7g=(U~&<9Btv~`E7;ydC!=WNW7SDzpAanA}D zJnx9#x|}_=3A_<2Aq?Pbn?wVrsL-h0rD~m^bibnVY&ODgYB$%ut~5?L}nc z5bafpVDJcvsm3zI*`+2h@tMSmzv^=zUi(2|Bl0xX*0rF_OFlE_3rPI6Nk$#9&Bour zgyJ#j%>6uv0FVLl{PEs*%BPObx1(AXfP}O&=6e9Fiu*&b;e;*(sv%oOR82F3iCr&X zFJ$_w-Z(0$T_XY-@JP{P=MI?jj)c ztArriWNQt`%B4`*LxDvP?~b)M6Wa~<>2Y}&5BWdTsxh>=z8_xAOq{cl%UjdhLd?hrJR{&Xp*MeOFkO&(_kgqCx zHmYSW={Y5j%GTOxptIh4rBJrj_9?Z%QjS4J2VP@m{QLpLf9wMV$rC-&GuGy&m(aTc zFywIDnj0tQ09+Q8U0!}O?P*zm>T>Oa0wody&l)VP9?qaw*+a>dh`eSZZq8actE^ce zT&6_GhL&DQV`Q=jfZ?o&$nJKzS-3{P((eR^v|D08(kmPx8CrznccJo~HIM;p`tS?!mgdU*9k(J90>&GkH4Ga{I?OO;iRL@}fyLFn)I8;`MQ$84>N|VejJ|omQNU z-H)4VIDnr~L0|n*2zpFkRikKj%-*m;$PIm|?W+Bq#c#sIJ>YrU)#`o8_woH!S5vVT zu=HEY$$MMcgPL;U26PQ}Y5VWh#j09lX<9%|i(Y1NSDi+gdjlJ6{H)qc;`FC{+w~Z3 zonbljJ8Vt+jQYhT#Dz6qKGc4ipR{X9yAO`xnrT9)Q!O{a98-aSW3_9qK=0a8&(00g zf{lRV-tgdntzi6s4}q3nJP6d>=E++hM%w&p2Fp(jKLkh*Q z=ID+_SrYE@$PHLQJMO@yxE;MQ0Yj1=5!bExG^d&v>8r__gKGimz^;)S=N1(fb`A_- z0V(t`?Beh+M6KxKEYb{}8nYC-T7ys|l!EO(MwH(8B)0BWFknB8UFN>dszM5IdJs2b z^)I5!_4ess|3tqfPes^=V%`l_AaIF716Eh&$HoFvNk>dGifc>6{s-jqS?*QtpT;lAN4Pch^a5}3{&{{5?F_*%iad)X&g)kl(t$hXsV3k3`}#wBReY3EcJ z=RyH3yf8ujE6SN=!g##^AvTlnqi}Uddnk6C_XRB~8&GK|k7edYKnb0XoT)&0+KuxL zz!V%JJ5p1H)Mk9fDAAWHiSm_vHqyZ2oOA2Ckoo?^V@3=Ijd31T=ChZ#t_*Td_k}x- zK-8|XvSLh8yri}+pLRunj|a{)^*|fYV*h%?r*y7PX;*myo$!N#%oP6Wj&*ID7oONwK6jBbUD00$dtunN1lKCXdoWOBG z?F+$?Ps9OT3sG7g{O929S=1{4xFkcL>9juTOr8P4ACBj`yO6hMTiegN+zQIe30pzr zgy=)^qW8hU#Ttp#ulv8HxFEm!-KSe&wkr712gah4b{0^CZ>@1bqG3gn0p5;mVowtKK(U-^7!rpDe6nqh z+_G(SAW(AUG+g0)wQNKLVg(J()1Tn$QwDxETsnpbz_aFB?*ocK5;#(L4cmSwz8wW{ zP1rk7iB{03-rmlxU_uSsy`^xEz#VF+ueg1n-CGyO8efx(^sH5-uf+SeY2TfIP}x~O zZHCp1=7^y!K+?cM**=ZE9(i}C!{7!MpaLTk?2))iw*uI5ZP1zor3L7U*3JT;F%#+k z(kdRhf(`^r_|a%B?wm(2W10OVDp-l`Kw2dOqj zM$sO;v=uS$mIUS988+IvHeHw~Vx%_9m&;(5U(ssW`*7fk|hWITMT>1QToOaiK)(tGuhH4g+03LU4eD#Sty{ z>=xT=ippbuyM5u>yqiNmZLiez1h-se6n_~Ox#|9UU(B|gnt86m0R5QuV};10!s^-b zM-%J*upJ9GafzDDyiY_dm_k&dmG-)%+^u2Fyvkp`=v);g0P(PJJpRs)FdWED!#cm$ ziNFw52&bD7D#eDgz&lfjwcA=`F@9vGkB(eU7zeb%EnZ~O#o5K*h2``0mGk?O4BOj+ z?qXmnLd}79!CA}lz3z@LY%Wq@Dv}zjrFS6~f4iKc+^uLmlpC-((1HL+SUpfv z1LgqJR|&nlAvYS{^8`i?@eU~u01%USzmGNvraXaOUq3-Y5K;uBnfS}J>!cS7(9*JM z)HLt|)!>f@geaf0Hm|~h*8K7kMdO?oDd3L%HN1}8ol7XkN zI8lO9oG;vZR}>2*7gT)5f@@HeUrO>=22rlKte=xYN=(2;aMic7EIU&)IF4 z;q%B_;P_B~Bb9DQ0cJdzuI)Ynu1(ZkbC*zn0rxaFrhK?EC#jFS zvWEqvPPcTg*#TRMvz9b~^j@ z_R-6_M0^};QVlA!YBi+O24f>9M5m8_+4_xoy|nC#slFa!Y%^@kG-bL+;mM*~(duRh zfR%@tp(Q%t+mtcWiXY3aU}piGj#bQf-j7Ql_vG8vR&iutd`xt=cS!+HHG7R7t%XR> z*4&j5XMKux;$*^HpwnW<3UD>o=pi`aOaVFjp8%FQ!){IGj*iov-UE{MZ(hyT$Md!7J>p#2k5Rnwpm!ZA?3kLbVKsEp z8(-W~%^np!TEX26Qg@?)ODTT*?J`DHGX1 zZYEh%A#uCXX$xD;ZoD^YdZy$l{t})yx^H&j|pZlsF;fvt`0vj7~>osOUdBL@E z@_7OxWI!4E^|Ra_r5vaMdlO$7{X$0`%pCkEr(-+_m}COx{n5XH1R4r0THEFQ$d-A} zn)=%bxy1b;EM`J-oGQ9y8R%+(@QCt!A|Zcp%s0(Xuw7y!@QZ1Zom`8sSV zseyjPAsv4lrKOuobmA?CSCI|F-?3;K0%zciX_F9#Hg%!16VM83O@TC!gz-3$!)yn= zI9(tQVDgvT-OKG=TAzTnvCNdtDg`>G%C2eB334!WOK;`!TVg#T?(>?_#Nk?5cnLKM zV=9eh1EYl1&7(~}-Up0l1vr_}ObIMZ3xq3R;eSNnpD`V9n+_Zy5FWS`=6r45O>1eL z_T#4RvvMhP%*lQYx^7sJ2wwkR;?H2fM28|tPg0kKx7Z(p9LygryXtJO zh+EGZ@N+G2ZeOI;tVCt|4?fg-LYtZDY|A67^0!_^hKJGXs-f7Gk{yQu)Jkzl&S^}d z>)@Ayu?cxb+ES$12sZgA1_3x7@v1^44HM@NXKX1@^`N|WUSU7a}W`-rVX){p*T!fQ}n*v>)l2>>S^ z5njvAs}3}u7os5mifBNlxiAVuI1z00`ju13nm?*S8s3fB`=t|xl?e_E&tqh$K0T{(ZlUDtxJj^A8 zi9>(JL z)5ykpn-MOORvtgq8td2++zJBkfy>SHa{v{U-GqsJ+_O|Y?#I)5?LdgEXQ3kohzR+7 zTs8~Or~a(8uphb5G!|}ix=qF6pI+|QgoA+C6ucg|nhqXU_Ge?}hJkb-HTt@1uW!Mc ze3@fFC_36*+4q0jN@^NEBGOYO#mEho7Wc>DlajPhS(-QUP84snjtp$P9@+J>x>#9_Aly)!emZUOmyIXGgDTrXa6-hC5{cCfz^{P%vHmsLiI=coiPPsM!WQJii zecp=TILyqGBthntQ)*W<=9&RmsPG(yD7+tZl zf)qu@S>@;E*eCX^F)Xo{ji5`a5XZu`o)LvO;LlIZ9{4f}jnUDz=YPadeg#qlgkEL# z@6KgU#jpqak;{R$$cz8t1H*63XDZSbDD^CAfpkrhpWV5qa8*_H;=*TS`;Qtq%{{2` zVa^vQCF{y>(Z){c15i~k8JyBT5|^maSMOMBZ`Hq*%7p~U;0L}P3@-Kmi4JWYr@jvh&H0Tvr3KRH|!AlBa;u?t~r(vz%ihl6}qMak|5z>U7 z0{34^RX<_il*ubZB4vPHg zQy4z6kn)!e%ebl_KE zLu#gh&vXbo6U9AZ;zD>;(Io!X^Re(7_sV%^-|aT;c5iOOu>=to2XktN-)-sNO5fC7 z3g1B95z(fKZAsPpwe?^SJ?qDhrlCMTn{Xb&+;DJkxFHC=abK2y<`a0nv)TKWyQcF7 zb77TyC74EY1j`L6Kv)QAc_is_*v_BMjV!(NdKylm*f=7RmC_>bF(iPgmKi%=O{g+~ z-)T{yMg62U5Ef+oTF?xhpa}DoQbh?Rg&CXBmo5n*m|RJ2xO?HNHGD(WkpiL78OZyL9Aa z#UdU51Bv*0YOj(OHv@{wUuM)yF16>G9uM4$ zYF4$HV}#)0VqA7NO+E7ASWTDkFaNSbND1#>$-r@wYOjXdt}zdM3na_ex-qKvvHah& zLf#7ULMsy-Y0IYo!PIvR|0x7v*8MeC`!@ zD8P<-{Hn1+TBr`e;H+X^l9rB_`y*Cv<8C|U?%_P9ZQe;g*6L(cR(ZC|9Q`fsWh=;4 zY#yTn)C@v64ACKRO!f7>8e$e=^Q4Q|=46F*97s<@lC2uVA0II_>Wfjk;kdmpj&3C? zMsg6Qvrn0bAy_HtWCiF`J+J`?$X*t?z*33<1k0Yo5>*BmXwizQ{EmqRsdOn)V8BJ2 zd;1CKX=8L=F549qcdNfz9o5}M`ilV)%)F;&MqswEAg&Q31K?n> zF*B@~4OlzrYWexb$!gXKOW_gHP!vjZIir?;oX<`0Y1uE${!hgY&qcSERG(+cW3a`g5!v=NeS8Iv!kqedse{ z%*~4Mp452L;}IDW5f%}#(FP170oGqxc30F{SZJrvO7GE1 zfcl3)Q*KG`57WtsfATwt9LK%B-0^<=od-+Q7J=%g8#* zj-nCWN?Xlvo57U1;tEKvTl?_sYrH#n%bhdUC5YUxSP$*fFyzEyzAFI9vh&IirlkSQ z6ihb_gon$CJ=bR2i!{=$EQ7e>xS$#4$ud2=t!R`m* zY9`upAAb(l@fSm7A*wBqjTn5Ez99|0s>9wdmU?2FR6PE7Nc;Ke@NII42QVZt=FCeH zqWbEZ%V7LwZf*`0B+?c9j{@A2; z9{8yWk~qOZ8pMUL#=>qN7c!Uiq2OHYnI{fS>%5@5bpGYvhAQq~(5%n6fK2`JU*fmR zB22b>U=^49z3+C`XWDC}Xf;OuTxyN^W1Bc$QAx@9sbh5egnJ2>oBYMr{0d*j&3D%7 zBwLw_^B1Hs%AS^2@=tjW0F&ya7ll6?tPFiG%%I^wh&Z(|XR^B#P{fh{v3_1x`*ikV z`x*F%#o_7;l0)K>^^9?qLM>6HHpS7!X;g;F>>Xn-7$-$lY^qXd$V_NVWEkjO32^?I zn|rmryt|_#BJzA_SjO{TF*m0#LZe?MJ^?oc^5S8VCG80wr*O~9eMJ1|45nv zKy{uS?bc@}H(jC7+&LQ;jDNwr9oAaV)1w@$Qp_Ul!VD2^ta20T62B?ep0ZYet+rGJKakF zHhjRZNjHRa*UYJGbypNZmd(vd3fVaMan@GTszB-g{Azgr7r6{C1l*^&yOSQkZ=UgcbUyBySdN7;=A zmiGsrv+hHM(N{RX)gL^+GGFiZfb{TA&bMdryUfMc@5x02Tz8ze0 zU|~P0yIx|+mmyU0W$bz#J-Es8K5VUYxD7L0BQC-_DWVa__lp)*pv)?nAkduB+2r-v zOQfVZi$YV2ca-u`Qqy|@S5Z?B8e^S6B(R_GjZzEJszDpB(#C<~zwPU0c+jwl=x7om z(KE@5qjr>UO(Rl*>$2^QT`kP1{z)%09>>81Y2#<}C)hH#KX_;{GJ18$DIJX5{4hfz zkeg$?F0g|d2h^Sy*YPcv7p*?Fb6+g(^+v{nRb9kYm)=Zu^A%l$}1vH68UQVip-brPvpb5qD8L{J;AC|UB%wr zE$puUTom61Tp;OA_T2GQd)L0T*Lsp>mq*!Fkjy}J_L1eA3^=ADxXX95ZTr8Z=@l8= zmtP>0(uZ0f?DfRHKIw+rbTYr64dS^?R-2jClS<@vI6YiU=pvcv5gSUgO<|J-6qM?5 z3tK8M1X}WNoZ4sj^H$gykmn~4qupugzBnV20U9&i&Q8d-!ySI6b(&GJ=)osNMc1N_ zHvtd(A)w%jHW0h`O9YaQTB6_PVp9aUmGsbnP!IIDO1~;7UP?|p-@ZCs0Mc>_2{x2Xj+Hg%4?YpIcRpvG}D9fYsoA3su`gb*%%UQ!<1dtOh@!ZEeR)A&u& zwgUrL$N5_PeT&hX524B~8Ug9;U6*4Pydt_HZFBpD%%EW*A)^0%N42X;`?fxTGszjI zlo~BA*%j_Xn?dcN1paVNv~^mo#Bc*>3uA&@^d2^GnxZ1`||U2+qiUv6tl@||@z z+4eFGUxsKR-+&2mtvSP>wk$p zUvuZ?vbjJS_;+(#lwRkOnF8NH&ZZRP-o+;&|8l@W_@O?&WOH=tJ4Rl4_B|`#%R*JI zQkzF>z)NGxAFYRD?_%kd?LSK{#Zo@k1WyEj4?J~bzY z-b!1nl=_yb)4sT2nbeuazsO=j-9En6Im>tC)XV92=bBFt<#(QjV8z`|noz+G2Ygx; z>LCU{4O#a(!$Y%tJpQEb7D$T3ZH(S{FrF+bDi?illrS*xz}X#UE)KHs0ZNJCwvmM1 zoxMYOLo>Zwas*^H9{s?-ycSMdpCf^fe zN+1yCOoumpfm&2EBu!?FTwtadBNYVk%6Fy=iwJpITE>QS>~n$*tCeRn_4+DZORx`7 zDi*vZXh5g-bqak$wE}8l3i8DiNZrbX zK!zKBA(O`2(D|WOx!IOlKfGq->H{kk;zQJ~nGIo^@yS)x75d8ubqiD!O*sySsbnS= z2^)$lg~8Zq;u>7f8~ZPk;WOWc5S>_gD6g3wS|P|YD?i(GX!F`H_a{A*k?z&B8Tyu> zAnzgqv47}BA?0J=k{KJBM&>Ss`xTI$KHFu$m{irX6=pHc?U?vHF|RPuK)OnteJjJY zk-~Wil%|p` z@BaHellX=*)8to6^^Fmg+>xn%R?e9Prr7tiuB>#i70^#3db$QeqC%53{TW+#zn3oJ zSXuP}(v2H;Isj`^97J{5NnMl^pzS=aO{cb7civTxNK$jv(M!`xTTC zYM+?F-+nodHNmgFB&%JDxe}6OY!IX4T3701Qsp|-n(bik4=I>Up-(luTaT!*c&-Q@ ziBhkGz_V;5M-zON1oS&aHG=;XtE+(lnB0HoctHcN!Vhi`3viOoQwz3nO9sU$zGSK+ z$7i&jE%`ku_FVdAYg!ZjeMu5%$3qu`6$YZJy0ne{KsvX4mbDXt2NS48 z7AL-VDTEpEzc+D81dv!vw;QPFzuyQibNKdBM3KsqT07`h$18#Ny787ojOwysB({lN zzXop;(0y`ydo_56I%}DWs{7C@X6aV>3rSX!Tn!12 z>;!&>n3QOhqMy>5>xWix=*e1;??Dy{M9PaG)K46D*+1WjL;tzlAcrD&rX%B08 zwSyv92KTl2o~oeU82FHUBmtRQu;4t7;vR6bW0RAuuc&bw6{{UihL%rN5|04`=bQ`i zSw7)tb-<|jUbIdhoT!v8wr0msO9!>TQgkpk$LGFGBQ4lGH1i)99I)423mo;*4D%dbvVxX2xu3ZJ#zU>;gUD#U2d@X+*i&+ZoaZMi zG86x-CxhH<+qoT6)el)-B<9T@$*ASPwYA^wMY_=Of^VJJNZEz&K=;sNxHG;~-A@4X<-mlDb5bN(zPj!R1 zJmT;%N)&o$ zm58`z?mjrzG(wy#RT(ICwNe*{Er6+D(-$+y=UKdkxw5ync^&eihq%@ce|DnPUFI2sT08joM?;ly*IM#yyVS7!@kEi98?RA>J|6vz5b7as_n8hI1>SmGt@|?o9Or)&Lv*~ zaeioGqV3p0(Ja9b5}qA?eXf1wx$5)Npo^)y$>*@b+V0hle_Gtw!@s$J=f#o1KN+C) z6**O?`*QUA-d@YKwC%^e?o=!k8Tpa4-V2?y6z*QDO14tD+`U_hGjRP%H(SKRmW4#< zK(DV_p}yGd+qES?1K=OpDM186iatdH@m_Yf=bUVrfYhRfnv~E=GkD@L;AUGHD2|d) zxL6gux9shBkAc?ptnAe*z#SKTvi68-FCBqF>v{FJ>Kat!%_0$Z`O}NlBgd4D03m%@ zmZ&oL=QuN2ZO#<9G+9~M1OSxQ3b#YTJ0n6uarC(|!RBq>2CMX14V|ERlqc4z`Hy^h zJ&_4$D?^(4r%&o6$ZEHXIuD|d-q5{O=6iioQD=YG))uM{KJQUab^OZmwDKE^Jl@Nk z2L_f-wxiaJF(~slD6Dhv?dtI6tGEVCjPr#-i7bOw^?q3ElK1LXkX*IDRKJ`uLGYU4 zm@+58C7G5y)~(Aibsuci_29TO|2{b{w`^wipp$$K@bU@Kr>9$MsM(+22U!+6VUvi6oURBTPmTDII0T6iw{ALs+ptTEUo*qC$4yI<^SgYU~!7CG>C~^_`Tm8 zwW5wz6exiSYAkZSYEix%^A2}?`!$G#Wzd4(i5$~gj0?hPQ;U9 z5XHQ*pyo?Bx$3~j9l_I!bKbNRZ;lSNh`7qo9sPUsM%}CEw=sP05>AxnuVs2x&ycp3 z_$J-{q0wl>i+jLEo3FruU4k(=2K~g+bkN(Qwd3~qfxUZ+2+4L%Td-rsBKCL3$Z%W5 zZ`s=6hR1ngs=+D(g$L26y@KaE3HJ)6j0fvJO0uXIGnvCyQQT^I1v?OpNZW&+|% zq#4jyKmKy!M=9bsbM)y{aeUVaS9jV}4FuIJ!WIFPiBNc@WM7_{Izw;q#p%~7-`0-Q z)jWjX*j;tCYeqMnFB`+XWCC&`ixU0EhShAJbczG3!)wf1f5Mz~fO>qNOxal{TN4-o}~C3XCFAr~~gC!u+6pwvbY>?Y%Z zwg@pUN$@1_-zWFZHjDpLU%z!?P4oe#&qP3PsNl_uEFuTZ&lA zgmd}5%+AJ15eGdFw*SyOd{Z=usC5P5%F!dB0M4@=%TFrFe1_fYt&?P^Adralqc^~ix2pyA$Q%e)h@0;s=?$W>EUF++ zm_+KgR$eWZ%w0wCjVQUDSIn=R5W_vwB67}eOx>%4XbTDx^YinPUQ!MI9pKS0(-XdG ztnt(4@9?m`o^pAOIR1r;PGhw4)pl=sZO_rr*G<%fkJzAAJB$6*0cJ%gtk_iliVW{V zox~9)e<|h2qrk3?E_}tmG1ja>6sj<;RXQC9*y+%GEahY6%E5V1VkrN>!hYFg#+T6U zLQ8Z5f~Pj+o%q2B%Qji0_QK;BY8&{sVL z->M77b{6&qe@W+fX1)H8-DX(>w>2lQJLv`bysp?xNMu<%zjMGyvx$Lq^kyAkx4hW# zbsb)RH%MJV1&wKa$}HWKLym+_c;`u=)l$wVz8M!Cpx7fvk#s^TqMKCeB!+| z9kx&GxtYrFrc}e4)Fx11K4SDrFnvI43~1S!v-1yu`Qf!!K(w%)?|Fne;0X<~y%P{Q zBvw~l`E56#J?&!P;=CKU73zs6y>-Co-Vf>iy1aZ~e4m^(mz|rP@5UXr2*G}Wrvi-| z=xG5Cc6`GWjeSocB7tKUZPn@)_>)jNm%GcS{TK^OpC*+6r z*P9o-!RqYPHC2HhgLNO%)2eFV481UTlEAwg=bf8%(t+)60S$1|q^E_5LNx8nZzP93GPvw>HG1Ec zr~NK57}r8+ZK_3cQx?*hE$KBo*r4fVk*E8~Z)<1)AA=G8>u~v4{SNb_g#m{>&&#e_ z?B4Njfv|T|jsD##WG;{V0hh}*D68_9y*&=M!Ou{%NR*}KvlNSZ!0_JVpr|~6my_~v zFzIgd$jx*)F9Wf!?BjQWN0xBL1(+GG@i-u9IsXtDM zLs&L$CTpFLS0WGPjjgqM1ea2h!loH;$@nW*d4CrM)0$f*$`HLlR*!5f9^^{qdTLuH zx3tFvUIL&PD%Yyan4>*k#ER`dG<_jZBuU#aP=>TBnOL_M7uGTLP`Rd4EGY@x07Cy4 zMa6zRC}>s;nSc7u^X5W>G+ORqhB%Y3u(yU?5IgIJs`LFP@a}+n&w5KMbD8^;Xn8mN z<0lQ9W3Jn*F177M6bc_aISRVW%@=v-ZA}yta=tL4Ub{b7a@HkF$$#!}*4r?kbrShx z&P=yACE}H(1wh&unF#fYrdbmQnOV;Mc2lS?RZF_Kh+b+cH6#hGp4N@At_5Yi%pW0k zam{NhW9WU)u9qOH%^!l@FGA5)2-;%4e})8#QOwrVzr)iuP1j9FpI_&ZbdqA_>1p~l znN78cPCRX_G=u=aAwB_QI4qN&p300~;3!}%2WF7cJ9NzAT-wI3qNA*;&%dP$?6mShHzbzuR9kGR}{)7(#dd9Inj*Ib|b227mMZOlz%y2db1^ z29oWDD@rHSz|WyxG0?QK+rX8`@+hX?fF6O`=`-Oefr?MaMqmAzPADZfisfKd1Y$oV zejqf|8ldPcpnO*o_4OanwMx&itu^0z>c}+QUV9@tw2_U$#JzGXo+-zcrGC|`j#*ab zs@^+>mUz7dVF>${HqS>R)(dGBD4LgUZbLKb&kHrewTeCX;S$|#c~f%gR!k&M4z7{~ z?-E_YTXbT^6Mbn(h>^vWOLyx0S?38bA`Gu~XHoPo2qm&>IzMAMP(29O zw1|LGWnqsFoQ@(5OuQm;Jbnn|N$2{Bdv5 zu@rOk?CfuqV+iko17^XSw2`a(em21^Dh8DBJgRl$^4%u?vo{IROiXXTW4<5`zpnF^`;K1lvDV(YK`<#pndzFg~oTl*WcC3OcT5 zx2*gND98$txoRM-)PIOKiK?45)n^!Uv5q`FBEu-az85I(2GX!upGMD)ea>2IE7SFR zDY@(tP;_gY*>BaZn>){x3y(X6j)9Al!$F_&^B7oAFah`JeuR%R{7*rCo1OHsVkXQf z$?MC*T{OlImg(Yw2FlTt9Ry5M=JAdDwLnjbLgSDAEt_c6!tb>Cq7b~HTPKM2ksZiH z_#btKtsYnVXC!tE%zrwdxWUT<>}}2AN;f4{Q(*;V>298r!anO*STC46h=2zeNtE`c z_~X#rTiP1C;xykX>W2FjtS?WsiqZ*-53qu!fz_tt!4?4qA8M)^a!{lbvZ)3pm%=I2 z^_Wca$cTuG>bnt9s)B8`jdwIvo+2wAXggD8T#9Gt3B=%bDX+wJw= za)AYxZKVR1O4Dl^0&23%iZTO6><4#wQC6d*Aciq;L;iCJDsy6Bb9vxR%;E8IXq(-| z3%1kfmGzDbGF6^ZTr@d~vGzPqS~)I8OO(r_B*bQi`&jrwbSHAJl!+BXTIAbUM9i4R z?JUq3*6-WDB!aujnjVnAsdb6<7C_Xn+c@`CWA_%kpkbo>1dB)2Z+;(ZSt<=7cSfXd zJ$D7>>7onaa(6s276z4a`$F}V9_hTD^EJR+b**WT5BnwkyG$yZ*`PA&gz|J+?B^3; zTP$Q;ezgq*Dr=>sqpv~VZWF(xZNFt9nsq4tW2U<)xwk(2W#$E$YDb*)y?m!B2786q z3lbfLKilS(^6xnH?EF#Rz+Yz03I?gz_2HCAOHEHkcgBPP-3%D(8vaDr0t~<^^6hQYp+eo!|RaT&R zQ;hx!)E`NY=+p{K-jE0Izn&|y?5LjY6SH8%pL@kc5sQV!Zunb|a~6Jvw!9ha+!7MR zarbWKUFgoyZ6NdyPh-W%6P{8n%4EkRWK%61wRb%MX|)Q0$n>%oPo+MwyS2J-4|?^s z7E@8KLA-8#jQzlirhbwQ(?~918rv6zE_~^yCzfHj?g{Uv4>X469n@1MauHxg;jLG4 zfX-3+LRjG1OSS=gd_3m3T0BF~GJcBs*1W~8HZeebN-Ji#U^bqa;HmNY$xu7q!01W^ z!3w3(9eF>a)f=8rh0cW0d})7*CU^k!#QMazWp?ej-dmq;QF5C{-Ya$b^wu@BH8#G+=w zZ1%gghU~8>V6xxwz`!%^8Svv=(9YjH1$q@kllQCra+gvI~N#A2b96p?iTX zD|u9hZ%WhneL0Ni1mT!&(Y~cMu z)pG=1kZ=rp`+dkVVE2A-?hCKV7spR>vuw&cA8Z1ciyP;>fhel%^2VlcHablx77u}j z(PIdn9?>?(KWt@8;r`53W1fwJ!c`#0Tul*~NHC29hYT&G5)=k0rtBRro;9DH0QsH zi&rH!Z_MvMhUY-gS%7kIHL{R>{|J-+yW_N*p`W-In{R3M`BCtAZ6)W! z#)NXKYK$4MBWi+`S~hwwY^!y!-G+k1j!&Ep*%rT$VrZ%64s2mIRE+#Hs$lcTB0CHU z_h$fs8Z;#=r#1?8zb!b4Y{8|3!R+0q&)3u>qJ&&Q(azH2nqj4OFplftz}Ww}az+}U z6UqGhyW^@BJ{6EczF~M(@q62s_qY=siv$#u4HhMkFN!H3dz?8qA|zx`iKrl(a+l}d zQTV=kiG0-2->J5-gA?G&Ld=ea*iQF_u~no*xEV?o)2P_k12pQJ5>kX@gC?#Sq3}P3 zMz9<=N-ZbXmYU2BGZfm=nyfa!%}K*g4HLAM=#x=db+Mkv4gWJNO?S4`1879ZWGIQ~ z!RNT%CY{w#HkGzG(E#~zpb`GNHQV2M8bDC>IxtlQLhkl>LPhC(zR5!L;7UAB?bJ@U zf%h`!KBPk@18kusd9vUz6I&j0{e6~$6HqjIYGlN?cFR@~a?ea?H2x zu-O|1$Ld!Sc%9a%BvlYT+y)qU`=ds_zC2g)>oe$sJU17}6~iOClI|}}V6`!jZ_74% z$_FbuH64cy$@Ktx4nd)8U~-UdhDy%%6eII~+1e8Cd3n=G%d;AtuBHa0nfKfV8q0IT zwSAPx3zYKzGN!1E&DeHX^&|Kg=V=|um=e_z+jr84m$lss^*7wY7AD&^)JX|x`*MHzpot{!oTo8^m3!z* zXsD_N+BV-bhnJ%DV|G+Wb|w!1va9xD0O${Z6achaTTk9e2@+E>!t#YhV!s1~@P0qo zv=}+O^ofR2B=!Hc14ScE7{aKG4bA zS>?YEAeqI1uCZx#LhH}K@}QE!Hw&($=|Acfz(ZZ)b%7sTV0;Z9Z0-E>*Ymu78(g)2 z_OnY<tFwyKk-5(8mY&$ecE}yiJMDWsoX56OW@A7oDq3P ziLUErWxC_xwOJw*LoB)Y(3QCxfNZ(gPCNIs>GK_n^s}BHhTepr(CKE2L6NsEHNc29UH^L%(yHdHU|v^cmH1%SxT-?u1xLu10cw5zgmtk5VM{JW z{5#37>mC^gtPut3k&h=DN-VdGOUjr>9^ajbnQtiN@g~xGwZb{I8lvmBk4n*>O1}P! zI^=Dw4GRxHjyg3t!$%!`I+O25BIj=E9&Xf4hsB6Q>sOb0|6~ok64qd9BE!j=p6nOW z#lNw_vniETw{7PrUUAo%wR^1b0M021LfBmUx@FV9zu<}OTCmEC`0*=Zl7=KS9>L&! z|LdKs_KL|oLRuq++0|sGY+k{~>$MitLQ`ul@|{A)z=<0sVw$h0-Qd?Rh_3j{3CH|LWnZ57wGYWXIuC&_3zzFqAAnkwJ*xDSl9roJor2UWRdLmu)*w>2?YF8X4n zq7&J(VLVQMaP>PfF=BM4rm3}wZ%qChKMuGU84d@c7y@t^PmDC9)C+S$3V5)_P^hbJ z42(}k=uMn-;Le>K2ZE9hAJUtg=_=cC@f>ZFRVg~%**0_e=y1tI<7axmA4_-C@xKpK zxjQg;?Z~+=KSM(zB3z!8P7*Ndz0;2-+e8J%aS{V50%2QY;+`xZ(^6D(^#aAxnN3mG z^a|@a(0Yakuq60HDwcyGJZzKta>_h@qS3CGr@bE%~kkN@d4Xb%^%iTw&&%j*Jrp+x_ zm2xr=mK)V!>pz4UnXhg3(Al5+&%L$laav!tnB)(uTaJY4 z^qYYLWaL4m+8sSvZn(||93?97h zh9uc^8sQOnBtY5q;M)mAMF-t8gg`qKxx)*?l{3M{WAF2y-8ImlW7 zQ8uMhglxsY`95H?L+DlV5v7q`>udG}J)duOBtk4g)VzTc=Hyl9*NuU{lI@nNuoX zU1tV@+cY;;8+zFXa@MokP1Fv?HXC)^xK6{}R=c+YVQoy7TBUo5~yk71s zXcssi1Z0pyzq;gXO7tl4M5~S_N*RKRic|9a_2@|V+oDy` zhxb@}9BhFBCE3hVdv@{sW1`i!;#Q$7X3!N!7WJ9@FOP2+3p<2ah z@6>$I7+}waE6U{md~ctyQ4GSAK+^PVr=c-8;-)c41+J~D8R(3Jqr}Re*l2s`qG-MV z1uzuAzQgrz>~xbhNwgalG#Jf~Aq76uL|_w6;5e_-x3WwMtC@-s+Skhp$he1!vN!rQ zsZfpfPswpiikN;haCcxi`#lo}^16(NM5z^*scGAvDJyV2#`BXVy%ahL@9w0_1qD{> zUq8&qM00>q1ZRR~^G^na{!}0`tGf7(tnxhCiNWV>G`ra=*SN63KJQEJ^o5QoVJ4Ri?v<-EaqxlLI&#yE!O0@`4@UR3O`;ceEo*=@1O{Cf$WKXIq`St znVCOaCFe!Ie_?I+YI5Yt`2Nv;$UR;04?QRQXJ#V&+*`+HeT(*r3FTe8jIg$lh9 zvqb%c%`Q-&&^0H%?nw^&fg$ct8vTRk3kPj;y4AFoe!Z95P8PgcMzv~F<9)wWgq$?= z3@-l;dhwQ1(*X-1iZ^$+^J#nZ4427%p>{=6Da7Y*16xe#xIA{`ZeeTYT5Go)Ev9&Z z;n+zGQ!_DGIp0xEK15$kO;wfOq}&QFu~!>uBvJ1#qdTxk+-U~Ha9dr}Bs!((&aL>6 zky))IU|!As>*r4ZKFHF;UhI}q$|(wnfwusWBPer1I%_(>7LJJBdQZThT?H(PTgxV4 z8#)#r()V`{75QVO4}1K4Q?gtnN^if_rW+7BEN6RTVgkhEwRUd3Ec_w&MphW)&C@`j z8!QCW9dV5ZOMwEnvJ992n_Pes0cKfJpD3bC=1g07l?hZs=yb!FV$Zh{;Jpa9Px|Is zLa`qx)NO^$*#>^VzyY->07S!jioN$7%zaXpXXaL0>{iA40n*M)P)CNPJ{;#)ji$8j z9IfZuWYL3LJ58v!w*EONX5%=hh0@)1CeW+qXTM!gu?NJ$tyOw%vG(64hZUB5eUk1w$tCV`s5O@_fL~R8Amgz z1qZ8yH#Z8zW7?A=9?jjyORG#qSZrWpI=3JDn0vXcaDDmWJD>2CMTMyGV_}-4u`ckk zqPR}R9u6b7;tYk&N?dPe97hgc1%Y##v6y9ql)QEWE$q3-_4Ss&#NE)l1)!({VN5B` z`PIR2Br#VZoXt70i&5BOf&O(&No?CCCSjTdkDj2X3>SUoqhY4tId-P`yzS!Bm9)mp zSdiFH3gvad+D`fCn<{1?a~fE8uRU=`0Yp8ID+ZC=slkeYGFt0QetBi-Qv1Fj&;{+# z)BRywWfQxaY+;I9pdBXxBUypWY#@2xbyd$d8CdQPn+}lf=WWB)%v1q{@Y^KsuG4)R zRG*v^w`&E+sOf=rul*5NdJ@JZN2W6aPstGGJS({a50;kk07CA770g+(Di03}4Htgi z?AN~Fjh6%Cf$Zah1>mHM{O4S@{PYOhKO8pN&f}s)X0n{SGrA)O=xt%VX||&^-#-&r zK*Ke(%;hd*QEs*s-r3*9wo0_ zlHO38D9K*`5NedW=p-BQ2V;2?D_P~*zSmvI+5_~M>#g5Af~2#|SR8RmSvNi8u`iIk zviAT4GumoHH_E6I2tvqn8C5AW)0gU#;n;D#kB~aICN~9ZKM2HnPb8tPmp-=z2|Hp6 zAL`FieU$}r;z4jYM!YO9j}0lu0@!6K4bLOC^7+bt5qD%|3P;O3x7v79Zy%Zt{UTzs zo<^SLAT4Zo+AK5E%*4L_9($fsggH?mdm*?|=Mubgux*3)OVW!oeM-jkQ<;lR^{u8b z4;^c&di4yU&Z*r$N@~&f403h)eh(#GGY9GESOa)07QP1!`66W*pqZ+lA=Pd3`#xLw z_wWpwA}#q6k4R0ne0qI2uU|@8^Dv@P70Ne<{jF&(h+PHO;?6O*q}(RvbI74-UWpRX z{PTh0C@le~J(Opn*|;pVo*8nDeOIGAxYy(*dEbzUQ~UXFslx2U9w^7+qMtRj$G%f` z{UrM$r)%30XbV@z`W{pi+p6yk%7 z-*D|9^o!9_WE?o4wCssR zi{3B$*}R*CzjX4vM2kF*s;){0peHD2E~Uaz(zc=zDnsrTnse)MwxMXn!6Zv7di`Egw;ZB7tP3>Gsy&?1R}WGky49;fnkM zZS4P!{_#4^hhEmq)#J%y&A(;_Hkd}CR@2ELy0jF=U(^qY;I{z1q#P(05sL%0{-i8epvY4%l!%{{ROu7_%dty%vPsmxWudmtyK!UK=B!UoyE2~u3V(rE z(q~)$!&HA%f(J8F&%$3C>3$M}PER+014}a4`zlp+RYkHV0@z~l<68wN-2I=WFR18# zaahy-v(uivjWxTh(<|)56Fz-Fn{7Zt|7PBlzD_=$y#4cMDdzZaw-;xV5>;u54kdeq zf+0%#xJLh?u|3=MR?5_WK&s`v(DNdX??1p<8_2@Oso(i+gp8^Im!y(JMgF2;2{T;Z zCs4a&b1iOLrV;SExEaGhw1Q|$2U(#|%&$d08 zx-L>@z7R4!tlC|}Y?OXwKES4T7^3&s02KiE^q!^Y;rt|Yx=CksHxxFj7&*DbMgKAp z9_R+HvpO(|)HzX%?9BDy`jx`8kUpuqm%uj*;{;j=xqYt?2OAexM9KMgDkU|Y0%z*d zTW{I%4S$?qDJ=BH|A(F)HDN2Bv&fzeDTFWlQM~h5e^S+rSu*)8O%UA$iP+n_ert?$ z>99->LRs<^T17HvQB^H`2>l#%u<%^S-p8B)r0^k@#x!&kM2B#Vpx^mq@Kq}$jNsh_A%Aqx!fW}SJRwbsK2l9*`N8|W)P%rx^Sr(F+hPpNMTiXV{12|-8INvJtyz{K)dfRbOL>nX0 zf9h)*z2xgvoQ2&B?GP#Q*zP#M1%NWUjL>L;sJneJP?7rY5`CU}H&d2;*IK>#2Vl>T z6OnFqJZdS0I5`?1&tq4#flCrIPpA0Xhc>x8;k*cIslA%EU#V-ph8^leQ}s3Xr_ zm9122gL%w-11mTS)A&(WOv9J)NqKFJPjzRI7%PME0LxuTgwqRCoyDI5kollb8>$Ld ze5zHuBic_`Pl%e`&GPQ=o_bMUGj~$q3WEwE0}*B$)k#3g+h=nd0}CCPrGzAx<_Gz4 z&!x6i{~qefy{4w4h|6=4QDOc*>6d<#B|Mr-ze-GEWY>(mTNISBqA-G)(&w~j_H_n< zM7c6f2(JD9P^MXu?r93B1K+tV5b+(w&RgUgE3uoBBx*1+bYm$J9Nb#~ee46Grcss-%zm_Sw5 z#fZ~z#odTSmHkhjrf)rsdQVpBXH5l`Q|7s4GUa=LeA?^bf6&e#cL){;kOH5xXY#Yf ztD5XTV^0k!*f{A)X;yrs`K-OB`ek-C4=>Ud-cS}``zY`qgOJEq?wJH`T^A$Xp4&6k zF*)bQl>|9qaHb+jXSL^~_Z2=T7|fh!wQ(N(SPsjxt@q-V?r6a2|DW%lKep3SK2f6uzB(@wK4_uLz#~U?~0-y=xa{lOPF} zdAPU-s=Jw}#~0hJ5Z@%O6F_yV-g%__tAR6~>B1^gfuZrh*|x3oDV|pLaH}2D(enfzWD1!L^4nn!MTO zb|YoIP?AzcV252YJYMFK8S>sebGw0%hAigPkbryz50pgJ!Tesnlcy1r$J)6|3jn6L z%TwD7mW6Xd8L1qb)&7`f5`gQ zbniB3;tJ5aKJdtK0~})(1`HYaZEd(zxjYoe&+hpZ42_{6TL0|_v@ZZv-?Fb_FqyM% z@^^$HKdT;c9lEkSF#2EafQiQG=NfF+KWrDTAtR`i0K0Ho!d@bvpxng>*YaO;J6t=d zd<);xlaoORK*NhMWwcf1(m%02PXW38)SMPme^I&x!R7Km{mQ>RioZm_6|#q*d8JXz z?CeTKDP&^F*Hkod$|39*<5PH}_n?EKqMKf<2Jj9DCG}8&{=ZfFWE-%-LkKYMX1{`8 z+uP$xw;-qtd9q(_t{e)cFN1~QX8#5tXJSo&v4}`N&Pt6clTcCFn7J}uNW-C8g-8XY{@RLR+0UahGFIzk6SfJU**CaTbx9BI>C}*yZZsW z3_IF4@fzc;U@0YbE_$!}=M&>{bEfV*I|d@_$@jdc(|xo&*#|S4#pxWfVPu-72Z=IO z6~W*Uw3SEOWxuE#TFA_G%Mqmk-u8oD(cI#(QbmwXk09<}5_hB!b6`OJ+;x(-4Z ztry%vJ%m7>lWrjWlfLT@0{$Gp=;OF1!MitJk%5^UVPawEO{9(9A;sA@5@17w5aS(~ zO+?5k=4SQ6HAAX}=id50Uk5T;I25o?S_9LomPyZX(t5fYwke(Sl&u8GQ zT3hw7fNADH0D^{P3}yzjl|6oK$5l^!wki8wr^0*g?=`~ymBKYP^i_8VJ%zzIq@d$M|-6OI7Q z_{c{#G9zDKzXN&3lz+8T)=DJ!+LmMlFe%l?BktA-39PHccp@@vbhIvHitYu~Ke zZcF;nX}a+}XEPYmx$Qu`)gG0Rzc&}lU5vixJu|0}^Ssa@`0OyKtUftRR)0ic$f{bO zo(o|q#W{XTHz}udF&f&<&xS;9nT3MyNqLbj!8h@S$*G~{E8qLwCZvfoLi3sG#mtEJ zcAwRtC2h{*AT3=6^EdO24HlprWd5?$L3jN5`wrD`Mh*-=j1G+tOT1PIjsT`&7I1j%xBkub!o(LIAE0xV=7XoC3-~ zV96I89L?x%WM-U&1@=&oo~6U19rBWIMd`fvGT`UDla&YlPf*XsC?0hAQx{;~3hkKZ z@G?LEey2Z%Xmr=f#umzw9vx{DcGph&kw<8xVx#+z5}gLiIpKQ$DxliEm|dGF^SsOe zc+<;Bs`)q}jmY&@Z|g^iU4KRRSs5Wf=)84SfYGN~sa1=%tL5RkzIs`K{FI~dSDxT#IbMdWnsOJ9 zLh`R$FLQrxgzlQ0F#mCW%zlqDUfB~5A>z{*pWGW$ljc_GAc0O>x)5e;Ah zDkdH8z!N!P_^PU5er+$5ozD$0z>~TNNGLk0O5`)r!EZ5tOw7QJT2p3x5Li-!6XWN~ zy_W~;s$X3X?tSg?V-lJ7HXm$Rd4j3CBA+tA`)aI@ce&HhH;psgG}?$@!c@#1`gM3< z4E&yjCVMP2q;%4(I>?)sb@;P9tn$jTiV1-2UQJ84+UPHzL%kIpvm zMnQmuEBeanOA<}AoQ&T(qCF!I4p0Ed=^Z=j_Pap-0*;EgPrT$fy(ifqFM#joc#I=# zdh7sU)ZrYUDYHayaP+s7S8Sd;b|q7-339V{%X#zICUTd~K=xYm1k5oDPK_ z#FD|jqe4e+^5glY7n)2B=d4sXms0iUGTTAjJmrzIt6Sh4Ji?raqnl1TliXWCNDcMb}EgxFONk zi?0Z@Rq{1EMBB@~vCyKO`X0_S&YAa_(R8L_<}i=?*KhQ2tT|`jShMTFP|(py)FJwo z#AhFmuJ_juP-`bCO(M0epJG}KB$M&7kcXJxLx^>DS!Dd`1Iy;Y*PmtW!}>FhsF+Wm zr}XB|;qyU_XFk7EFG}~eNz+eWVoesDR9lV29?`eq*k9-v<@AjmDrlMM4yf*)+}S z`;+SZThc>-WN$y55Zl21T;$2#qc;33*mIU~Ut3=Z7)P2>mV1_$m$jz@(7-nWaEo%8 z!0p>CwmdZU)|WV>@OM&{RX@FX!zQE=E<6B;2(uufpj?BNl)IocYNR5t_!8MNH3+Nu6w zAgJSwT#-b~Tf&sFrqD}?h#X*TDq*za{NowquShR1?44>#aj3GOb`oB(%2Pr<>+3Mu z3ad~f#la0k3X+r_>E{EG&a7I>$PVRX`Sz|7S$t%hA@?%>BTsp}CCj*=twak!mx+*$ zEV$WWXW3SJRkZOhC|X83#+l9N;B--Z#9|ev5=Ls@sjF<}qS2T3uRQNnx zn_Y~^anln0Q7g-N<3@!-eHAD6_Aaikc84?|UDkvlv>!-hc;tEBfQ-}_AcZBFG@%2_ zKt;{xQz|NM49_jA%=C=4qZjEX+~iQ;co#%c!ic7SZ*O?#;sGgj-U=1B!zUm~e4it# z(F|JxjZ4O)mN}Sybv|x*I0;@Acz>HTJ+ZF&2`%!$pOpqV^Xvf-F9w6_Xboikv55B) zyrHswYz!^)u*)iqztV4vvwr)TOMx~zHL&fDjz12<-BfL_a0ja8>`0FwCOZ&sA*!oU z^g@nI#rmb+GS{rU;u^h-M4cN@26_GmeA!M`$x!o^$%Qf?`y#DcamJ7 z_j*Z7J7g1?<8G3&q5;XGB1PG zBaeeaIwEtr8vKhq!F20+C&L@$AIJq9?FTNSP5r+IMLC<-AqkRKkJ(#!J@D&WLorl- zBJ{G)s};masK%wBwQhnKm8#1(?*{ft<)8-rjr88oW^ImPJEyrO$S^^hWVStX6#g`Ojk7*qJYhvdU>fL~cYvnK^SoO7`E(|wQPm~7W?O~YxF z_^CTRN5E7gGy zcU0(CL+n=R*<&4$_1M|o+|KD zkPGRwGoLfl%~{6lOkFXbxgS=RUXgO9;h)uQ`^fuB!oX7WXZkZqrD?MlsR|-LI-=u8 z&ffa7b}MU`{~=8R;ae|qQpGK;U5Ax^+0o27RpY|TBKvN~)wGM+?bFP+X{K(8f()-eQRQa^**A+lM3sUx z^#|48lvZ%mTYeOIC{ZeHX}O_=d2lXA3yHFV{*}p%`1PLqjFAl$(ji_rx0q^IhrPe)%)jpV z#^`a^87?BUlElSQcQyHQ)I!j^C3SlYD*=POx|<7WI>Xmt!Lm*P6%Bd>~` zoJ-6k*8s>DqM*EI=^JUGe_Z`&x##?O4OR8Mg+HMow&b7N;rk+GkkW66yj(|(U8Cq6 zePia|`OP~A!HTf@lD!Mlur~wC+rc$cp0BffMmc2n;y(5zK`zN;rjY~G1laz|JyY#BNR(^Q+t-a7?!ToCJL#C*R7S|kDP@%YpNI8sGYK0nfK^jJLKMT z%F@t+6FIpY$n8NO;7`qT|ElD=4W!sif3OAPde%g6nvpZsM8O+9b$O$W#OR|@@%Z^y(0zbB_J_h42@C+B2PE*Y#OJ&>V*K}cqmCD^2`?a z+;a+0O?u6@-@@EFNLa;ci><+b2+J5GaBYQw{w)#$I+N9*`y{aJhoIW?wAZ`djc++7 z8~wYsm6;vk2?Cih)l=e{^(8-+JU1sUw}JO{A9M&D5CzduTEte8m#t4|xYN{r_pUhl zey^Hw2-|4Kr$CT>h!ci?Lw0yx6eqh~#??+Sw>XGdZBEA9MNwC}1@1L%?WjvPi-^%i zH+_utjWhKVvQt_7d`53M)6q|CmmT9nA~)fMwCCkpfjCAE z$8=b7@+TuL7E&A`*U5F2{iFW-%%iNQQTKW^PM6r!12xW$i90)uP5VG<{`QW!q&yk8 z-1L7r8b|-9l^XTO9ld3XbqyA3Kd^G`s-tytUsOcR70s{&F;{|j1M z3T$ED{uTa=q^7Q(+Y6>S?tBz2PFwY~uq%HQaG@mWwQ!T$CFML!MIUWC3TfYL@j90C z5H&P}5p%7=CJfy*W0c4q1%O`)-Lqsb4cqDI=N&N|VkyUeSqI$Fr~tc6?9WX9*Qh?9 z!kYm_W*aq?RYlGMn6z4wND~5H1O&TNKzcj6Rsm|AlaEq9h{?-|DSw zH~+Sn_o|AZ&qpq%bk9ql6PVT`Y@gv?=KwfgAR z(%sJ6b-MAu1!vtGvgO1SvG|&oAV0EW7(+TT^VF|)a?2eAP6%Bmq7Huuy=Y*}V^4?T zJXsp+TivbF^;^Sg;$zP5ni1y@%7!TM8`qoh9|;Ms*T1Xh8=s4=STS?t5opt$@-jok z+xGbbUGZF+rvYjqtS0J9Nf2~h;Hu1aY+ML>4i3JZHSWQn|uW1>oNe zLSh?|ns0b{9NXRVl!6*Yuh+d<@KvSTvJlIf_jHLvqlp1ErMJ!3i-S%F3OmpCvI+;b zt@Ih_JVxAa(%Or?)O%x{qn)5dl}K|s_jd_8e=#G%AA(MORRd-0d;p16mWzd@IFK&5 z)~ZfcO@8O%5C#C$1Au}SfCSO9L!TQ?Q>4lRIFJe)m9*gfI3SO|xAtlodxUf(KmN|G zUjCO`j^$O$a#Gl@h9pK=1p7NZIM>B@m8HV7!&{=i@@oi1bHFNH`bL%+LHp`0ATEi@CFnEMCvvhg*tr`A>rW6y+DRrtLUBEsiEkeX1uO2Z%r}76 zGGF?ryxssA#c<^vbTiUbqN);hj4f9^B0Jg@M}hEFGC>}^9d$e!RZxC(gl)`FPSmTm z84wil0bk{-3)IL6vi?wY2@ymmp0Y5;#@Hs-F@2@;;ydc@T+W9@tn%)uM(|lwX7F|j zV_VdPL+Pa77mJ7|4#f5ub;izz=2Y$3bh#AZytrRoKO6KVNF!r0VtD@r_u8{URqBDs z^Rv@+e=lpSjV`$@2ma-EEW>YUi`h@~`)%UWz9PDHfv)?SNt$c4e(qmdrsjs^Ms`#= z8r{hepqOcj%gFijg@Qt!2w)5s9v&VX(eZDe=wpCD$#*T#AWm?uy(>E>kXF%AL92FLQ~x-$N{yM50_u za%bc+_fZiQ=00*6a#>m|VRHX{e*4#h2ea+{dcV#&&y$&Dio;VGZU8`Wj${Y{n>PNk z+f7up5a8}sHo4i8#1*~nQ-D?e^8V6N#G?TFz^|oE+xt7~+vTum#Ku7cBM5+q0TmPM zKL-X^8IdfS`NoOZMo1Ym`{^88xzF-QOYK{{&jd6+8PZZs|a|6tT9 zYUv7)aBj{st;9p&Hw67cZVA0skAfX&nuFl~&S`1Hx!Kv^#DPr-RwyIcNg3NTusVjq zQ{V5AS^gtyg+4I5n2D*?1NtnJEgs{atWm=4-F-zxJH^*0ucQF?L_DdM>XToCSLx3= zH;NiidD5hat50o!p5#joJj|9BxZqjs#CZP26`;NbS4$PpI$G}say++=j%l&o^L`f3 z%?q@dD2xn1UPE^h<&BP>Qlk*zLP=myjg5xptN8}09JYK@_carnG$_P(_90Pv+a&OH z89*z{Ce_kVzK9pu4Tcdn5-w+Y3?o5u_ZzuihiR{KDs}XqYhUy+Z*VE~dx>Ny0xUFL zZ(63}l+Ii_2YstkgEz6Mtjx}Ctm5c#Ow8HqQgpgPnI;C^RoFkY-(CZ0>O^tmgz85U z8qQ(^5r%oox4*#ZHE~$eY6euGyzmRX)O6?U&=$-Ez}xchu0A4n%$vvj3JVj1D=mtZ zd`9iXO@@mA0K8y3&!l>?Rb29psd4_$A%~|kf8;D1w(N)Ih{b1jIBnj}g5-Q(>KgpX z>^pMR>) zo*((d^r~?C%Jo3{?O%saBHrA6NU3$Ssa`m~7;QsPBAX(i9|B+hcl-Wc*zrbN!jNu~ zXJ*nY-wfLi2sjhgnb89sfkKnXeK3__^_!V)dTrS;us~w;LU@Rwj(M(^@rZ?l+R0Wq z?aie(5U*F1+SJ7J9Y)Bp_Pdm)$BPM!m9h(fEj4-Z_V{i@W$BMIg!x+hq}OY%{-v$; zk3SBj=J9%wC-hr}&*#B|?CUSvA-`XonW@=k0Qt34-+rz_$vP(@XF63Vq-Ly#pcMEB z0to%4E=N&`Hhnbf!94fE@lO96V4?#8AuN;QTYv7H+&Sq#UFtsO#!sG}ESw%XtaX<( z8I2o)BP3%@err+Y%a~#1Uw6K}SIKq)KvuiW#2adWl&_IZ6G+$sS~2a45@xLCk9 znr&cIISJ4`{T9dQYwykenKWhlsNaMGXY;?K{pa2;bYdF-$(b6E>)aJ@0)l#Ea;Y=4c0zLd;qQo{i_8XEKX&>v+_)FYZFT5-p9X#vM zeG3TMOb1sjhEeBcU?TnUOjl4-owaBbpMk!@mr;5DUS_8>=-^($2oef@Qow!q_AS!$ zHK%CcpxvKA&FS-@Lo7F-ob#C%@e-oa<-NThCQwvvZC05&J>Tx;*Uv3LntA^mH|1yI z_stWw6R+VOc~+u%aHa1ta3=5&)mxGeD#-AgaKo{zi6#EZy{2(IJ}1yPvmuoI-{Mdj z4R`({7ZV6vba8|G?RHEh4qU7dKvbcj5zT?}BNk7du(G)>#$jqFZM}V^S8dUfrTkK> z32KGh$8d6AfY4R&?n?fAtyzIb&@~D3syNZ5PAR}1;?Q+^a>y;Lez53`j%PERcfVtN zg^S4ZP&)nMb+uV)!MnG#_)y#$uuO~IKTu3$Wn%y!XveNk`vAy<7fe;mUG&Ou6nFTX zZIA!+WutMQHIyiy^Btk~+HD(6#yLyGTnJaRk{kUI-Op{liLc$}*5C#QJ|C~e@ebW=7<+YIv*JZN5WX>~TOW<)ZS}3FD~}O%z~}mFI3_uE&N+%ugl_^+0r9(NI7no z4wjbIm!een?CvL4PGkcuIxcAWyR`YvnkU~AK?4o?`M-mjKdnydh<4i5m6{oge@aev z5k3SqPq9V_;Q1AV#{lkO{3PrUMO`?dw+yTTd&~vMXt+f%P$3iCdn||jLS4mEBd+8D zaQlYRGS&X4X<8bi7F5|gr=N&$=d`U3n)>BG-6z!UjLwiKmz}OliAqAI$$?<0K|BQPl0V_J=!qGO;3eK1t=qGzQW87 z%0YR5UF%OV3^Y<{%eIJ|r5^)nw4IMxGi3p`<3Db5_dv9eHKneI9|fVr*&aWo(9_L7p_pyTIJ!{-i#!0_pRa`C8FAZ4cddIOH0+!>%qV3BdKq zu+VNbMclerc)2p%dJSd3N>-u0MEjC5Qn0_8DjBi~DLH27S{*Z`T@~YZGbP0Fw2nL3 z5HwEZsyX<>6^4d%NQVZ-QIbC0Dk(Pp8!Mp}^3mlvitSmvcjX1}mwkw6(YoL}%w_~Y z?9~6Ar=Z3OFtg$_8LW@uT>5p5%zOzpWtHanw#6V~hjhG26DdX1P>eE>myHw5`|$5p z{0Js==a=eg`+ueGU~w-2n0hoI2MBi$sC3)%cI@2PQYNo?5!-G>hlyIa_pi|B<6xse zGU<1xsaI(esu2s}k<4h>-CboU{;1-f!D_gA_@;|n<{zY!y-GvZ(d7KJ8FW@?l(VKc z`q4Dg>0m7l(4E304-!HfSBEr@t_;m8;SNuNuFg!b6<%<~=q@$KN%|_vb=!RSy>Row z8HNhKj}j6ys0#@+xiE`9yWjPXo;W)efwhh-${suNCaDdcq0yxod1A%t zmB=KS86n>=OGAcEy{*w77MWH)o)O%=Y^*?W%l*?Qg`q@YA|QxG=b9YcIi;PVZGkAW zDe-v8^Meu(&TXiTNw{y8^E}XXo&4tHj|*bsbob7w?Dj1%fMJ%uu+X#Yf{XeiHG`dnJZ^EK(N>cO$XiGZ!w*+W) z%bkOG zX6Gy3Ft*p^7f?&DrCtlMyT!q%Yh!AplACoH1DF|T?SK?k^=m!oTd69&c_$I}-B+bb z6cuQc$oh-m;_fI_3K{R&2To>NHYisQprqS??v)ud2hwo;*i01b_*&yInYT| zV~zP0ADewc`moS708N=GdkUyFvyFAl601j=YHB+FO{ z2c-?|JWCQ91bBslNeT6S?hJ=O2^}C%HPsF%7_|h<@z!O%!UDi3F)dICSz!dn47HwG zDZF~K#hN9~BsFx3`@Zl&j9fDx)EkF)r-pxrhT_#)<5elF@0xPZc59Do#&nt;I$yl5 z-m<6L`;2{a-guf1`_{x=^&|Sd29N8_f|QFlmKvmHZl2s)wdD+8-f;0OYxv$9EFwX_ z{GLb4+!$Z!XK)IqK{{YqAYjOU&`?ztxq}vS+5wd4+;~V~Nl!CGoGx+4 zD$iXC884A=8<|sg_4uj^G%58}so`2kHXe@d_Bs2zyQR%lsH-nlCQ!I4cVrr`nVmEv zz1{x9?#cpLJ+UQ5jzI+NE7TVtP#8R`(f-$MO8Y=Hj4U*CN7^X=P`^3w>rA9+ZZXky zKC5%jztF;8Kchvnd8n3*_7(9~^v?@jB;05^JF->oA68hNg7EZ`wLe4Kow9gMy_((^ z9As}yG&HG@g@L?Tmdx>QUDy`vHKujdy5jQ=90cH6qIb1KHP4~JdTHQ5Zdx}A5XfwE z&gZOd(&?7n2Y5VCv{8?bQSD+_ge)s3!9K-wJCNLBP||t9b}^^(G(f|%cd%deUSWs} zrCkCR9##hXOT!C5_svt6YFr@DU*9tO*+~kpDPd;P@xe<(QT$~1Ym7c|ACjPz>H2fN z17O@Z!IIs8?Ig*{k3gblzs>;Ga8c?GIR%Y-m*k;N{?f|+g>Fm7PIZ|=@IEi0uOs8* zrqYvVD4q9^qBpoYg7vyjdqQ#eH^;wcVaxl0mH0kbt{ZTn1qebj!gM7iC3zNi^*8r( z2j;wi^rrl#l`>9`KQjHTb}xiLw>x_SC78Y?4;l4q;d?avru$ZB1aeLR;KLSC^>|f}_bW^~@EHbJco^bjxUiNSvapRr$}h7x zX!)<39B#)4l|zyvwabF&A0I=`3%e{G`^72?q0p4=S86sC&{nc=%+Urdrg|CEHHe^W zd}m=|fBnU3Jm_pudinbA95*5&eHlRC7S7&sh@iSwkPdcsR*v~I6?O_)bH!U`0*&Ok zSmRhSjjwuEOW&mxl)6v3HKs6zQH0{g%l2e!s#8+VUPzl1wyO0bnRt7=25QFpl{tPD zQ*mnLmBI?c=&#lqFH86Q&YFs%AH3}(wF_Kr{^<1|jnFJ1LWWJc^YBAPx7=})YuvQk zY`Pu_MHdO>A{u@(*Qt@r_T#shb$F3` z4odq!w=YY-dC=AHVIRyJ)%w^Gr*qCl>f8&IH4|iVQ$ttdAJ3&EQP3R9KizeCJSvTw zcIS6U&%pYVx6^U|U0{9Y7%HeQoU`MTzW-gedw(i+dny3iKfWz|f)&w7Qb0>{Y10TL zgFPdH&y9uiuYa6X(8)n{bgc~%T5?QJcGmmL%Vm*QnEY|@1cK$~8#VGoH4O!cJ3xo# zAq!@G)f_A**1mZqQJ2&%m$t0nkCrta^+!@h^6ywVg3IGgbCgOimjVe3^Ssx zyQb}OsY1`Cbhh!tm`t+A@%raxp=64{WV6U80@&VaETyIFx5lVH?{&sM&wGph3@a>jyd{E#i?Cxat}mSY zUMR@9C5x8Czb3v>ze8SFh&~)@S(b}=!v^}jcL+NpeAHeu?x*y$r@+Dk#|l6`K-ch} zVQ7G2w$X9$SvKZ-#NlS>0->vGGo+*Mb_)<97b+(m0Cj&RU+3Bb=$j`?Cv=O%T({`x zXfOz!2LFX;ruI3F-0Hc1Cnn-{SD*(TB%Ja8rtdaepvtCG%;Q*@lYq~4)6AZoo$WYO zLv|L(;LItX6b$8=M53aj+Xnd{j!D^LkavxKg}K8SZz$pFb3!OeKc#%Y%|d%?J?P|U z|AxrZ=m+-$a1x@9=1MP$19)^cWQ*cm61iy$-I1p!fhu-HOl9zE4@&1ZaZ>QcC*0)r z{`vSl_|I50&X*8LONN;mv5DEBorUx((Gf%hdiqQwY&pp(h*)`8j_1r<#|3YC*)cO^ z7BsRdKYd4XT`|et6zH#?sOsrUjss9II``!tAD&-&P6nE3B?kY){*Z#EFTZSVr__kv z)tXUShx0DzK?xHL|YTA=9Oy}-k9-R^r@5J z$M=ELbYEh=A<)fl?bRs!JKqJ9<(caErcF}z?C<<#J6J7%KC`ZYi!LX~wv}XaWcs{? z&8bbH61YiOAS^c6Pw3v}(ahnkVQdizOH~w5WfxIFb0=$02-!fi#=e~OOY@B4!0n+L ze&}(0E3BLyfsE2gTRC-aX*vjI@E6gy7k=8QrdC8bKJQ$*GP3kO|9r=&)yL_3FJ|Gxs|Z zwCdZ-?I{B`Rbv+(n5mrPb3N=?X>B2Sf2HP)nb<@>kAheWGr&;>W9fbh4Q(Sg1B%s6 z@YzjLIPWrVX_cv&G<=^k-_evyUQG6 z9uzcRU7JeTMf>xZc-_zvNpS6j-Ml-(0EbDEK+hm&;f+DP!m&s3t~)yz;$zsI*a)e% zn4@3S+`?TE(YKS7t0gp!h-<#nC`vJ%aQR{;}?o-fJ!`)V;zQjcr zp*1(43mZoF;iZ4SeDu#!jL+*Ft0|F$+gDJxulKz z^%w5?L%*t)`zCjIS?V7Hi$W9qh}S;@j{dCz<|2NL2scqS`1^p+?DdW0*s@kx+6MB1nSsRJm~`~ zF>-Tel?>6${Vctu=^~bhcG+|j0v6z7?Gq@k?XOyt*CwJR5W@9LflFnNUscsAlvLdY zw#o%+7*;1>_<=>Uf=tlN8t6jSh8l(+gkhCB_RDN=7!6gu-A+e&`|5k4Kg|hlI;Vj@ zkkr_$*hwOA?<_H|FRDfGafn{M@KUXEG_C!kpvwZ2hpw(h)yYphA~i{{y=lH`iLLWo zX555DCTj`fyPf7Tb7bds;soj*lY%RbQ~l|9NlemA9`R;+1didUb@bk+xx-Ub8=bG3 zJqs^Z6mTO(sOw}ce~-4%9}osSBKEfwu7Y_TANN^>wWQ)caRc(^=3b~Uxm&nAKqz^< z^5YASU7`&&@GD2OKqzq^_Gx3D5TvU@ZNcY2(zJ$9FYOJSc|)b^6>}U7K4%j<*8o5X zX4Kk4s-kqz52Xh-=6fH7X#w9I9iJlxV=SAIY4mo*DVlZnCeR0c&W7>QgE=p*z zH?Yrj>ndpqte(=;@J5Uech~a*bH4Pohs&*zb@-ZrPMOA=3UqtSH@qT6TPS{~j{sfD zS!}Wft)z>2tb5Nt7%fR6+;A}bhWs`P_j$4}I9Ym_Ek6UT)U?U>TTLeewD`MXvtAn? z1vFi$FzNsAjRyp7b-S6K1x20wE#_ABz1?hL5E&l6s~)wz(K6Q-7z+^WIe68Czjcqc*9}7e$_5)QeWsLi^2w&CMkr$!^FAtPIvF(9&Ww2qjob2|| z@tad2t%nG_~A^M}*pB7fuOxpfP`z@Yy% zk{hd1TJRiyrSD}RX@!{<&5;l+gVPff()agwY8_VqaG8)|jJ>u-ttCr^`Hk4ddKRP! zDv}ClOh>?xEhK4>LW-BmH%nod(E*s2LVZWLij+Q7lLu( zl`lD46*;xD%Cd|!3aR9_Y@mxLlRwF6Nat}l;wrAhdGxQttB?*dQ28PHqu?Hrm!$YP z@;*x>BlDZWJ!b=~eR@+; z4QrV2=B6;y!&e^Y!e1%oI;&^<132U>uC_E^sYw^KeF+}5&ERwK8jpU0)xen(cnHBP znJp6*aa2$Oi9J-a*F>}0WJhA7(==EdlS{7plYez59dyRY?#UMc1747gi_^_MpO3Sa zDVp88NukjFpONUnbIu~@L>-P^rwJ^-h^xC@9mH6Jn421BWk6Cp)4YZXoJ`CkO)EEp zaKWW1iqDu`F*Y6-mpI^wFcuBhXKzk_<(|_0Pf5umkI8#yJur^qfV3I2=rK z+M1fsLRvDv{ZWb}B0?v7`;zb7E{>h~@MlAee2$TWAC2Q)-h9|vNIxUY=r@-A=GpC; z-pG8RXJCzii_ooh?M482Z7&17A1AV2&F=nkl9EjDwW&7h3}>%=U|95CuB8gTPEi;p zm^*FaNGJuCCpKu@%%CC*%dMsz5W>#<%{}uBjoh2!-)aFxN}S!-K%RVgQeiX$Ir>J{cOtrM^=tcP zAfNUY07&S<(b{;q>~_O7ZVn9~oUyFD5pqx|7LS^3FM~t@jv|AeNJQT?w|(;d9W|4$cg6j^ExPW|1uN;fNpCVx5sM`EP29Tq;Mf?*M|*QHP>&q z<(=6%GW2S^1~-w@G~k1dhI^zpr{|%E-2JDl%qh2=Db#&$9J?mHBa<04)F9(!C~^Fd zh!}foI;3Whh!0GY=c>=nB8f>^EK5JbVk4)X8#QhxaJwtWBVPno$Itmh$murFiP#Se zuAYAqifvHgH+n0AvN1+4?^Cu{qegYbGFd>x_5cAFp`CulYY{x9)dr!ngAsg?|2*p? z%3Fpqo_SXif>Vs4IFul31VxLrlvn$1cl=a};#|u230ZVHBYchA_U_}nUwIaBk$?}e z%OR&U?9Gn#%$5FfoArCg6`QvZCo+eI8n?@nP@}LRwnegyZ<|G{Q4P`NL+2GZmQ`Sd z_@Y{l^$W|koWDXkc|Ue$|AT-_jix9si*}r6!3D5~9i=SaZCQr!*-1kine+%+e|-r$4f8X0C@J4rq({(*I}kIJx_ zBvz=6WkhvD5OU`uH*kd1FImy6=EosJK19!W*77^}IXLGa>9l zi-CQrl!U#2m-LSO*?^hM0M=H;1+dQ)Yq;q!yThtXd)C=!d5Yc$uUEPIorAdrYh9BI z>y&|W{N`_u9>1`96IZ=lJJ|^cY}Ui&1(xTGBXNmjG#WqYoOUTC_G2eFE=Wf< z(*ILhEPpt2M5zYU#AtiNHwI{se5eJIzM6=u(WKd86TJh?;!$7da_{S%Ii9a03lLib27PmkW5ZVwDlm)t$l0M-VI z0^k;I87;@ObaZz|?wt@qyF%UlyzKVIbb#FNyY4o^JS`O7O8{)=;YWY1&_j6X0XB}l z(ajSXmRL;d_l?jhR*@ODSIAz0sfs-dG;a9*a9#g9eHZZJVYnB;C{Z*@{UQd@SCnJ2 zrYN0c)(o`yG!$Tr0GR2pAR4q3rM_C8g;^pVEIxI4Pei%Y1m_O8z2<|kxPZh;YYs~< z3CXv@lF`$hB~6`|93E?00a0QkQWs$Z)O**sCG|{1-ft6$t1ru*^PXIhMswC%Ppujb zyK`K0u$L(x-;V-o42`tg%Jfao`z24U7-=UGcgbF zdUF}L2I42l{Fx~KZMQD{V&r{MufqCVlV=YE&0qFf4Mg4kfge!2>*AxYw?$y*eFeg%opV8Y7F>2w6nI_AsNqY{qmnY=$O)6*q$h=*FP}Qk zMAY8g*hiX{@Vj&!(DRc+X~JZ};r?*!$)u$)_#-MQy&PfTH6FUAMQLOuxA#fk;LjK zP=1(v2um8L>Y%lH*e1F)iI&0U|EVOZfF05D-%fRuB51 z54TqwIy8MYSX<=YwfXa1KUk%>G zRm61d$;{Q&zar!mUySaxGrNU|pP4e%AF9l$;S4N#wv%&QVN(YvTn{dTZT0HkWG5zm zY2ZXpdiq`0i!-EYCAv-@{JM<(9nYrJOdO!5(+>E$)uTFOy!0^ADW@S~`}FF%)5Ft~ z1wdFi8CcF8-+yfM%wQN#o)hx(^SA0zl*!G;e0w4_FGLquY9BN}02J0)LwOcy+dXo+ zya>YIP^GkAHluANc&K#33FZ$}2DP-vGjZOm(V0|_w&p#)l~g^UoU|CN{F@u$f&S0q z`v$i}`+#^t+?v?BopRlJkm`*A<);VCz}?UlPgO~5mD*k&A72gBLohmey?6}0I_8mE z0l3WIU`l6wb0}vq4|Iu$4T_9R&>fU6-CbMzb25LZSZb&@YGaS7G=F4|PTPCC``W}} zsEScU0Xe6ZYPR`1jj_n2O0M6jwS(Y=e|?{lZ6aY_%bY-JhcSL&aV{{rcUxvMP4($= zd##9b+UN&%-dY@6sy)m_NQ)x^V8L2&@R+1&%K*Omg2vh)tS(D#@GA;_QbNT0UNA|8 zR)fbio1|i4#d2Zb5kMOhOhpP8Y-1+`%d-`fzK74+42#>i6sr2VNv12frpdQo^Vjlu zhVHm*mmvMEy_OJ)PH*ILegIA0OK7GB^8!p6K*;*ybMvy+^)jsu+*eYuLEgvSnG$BX zM2NHNaEhoz;588vj7E^>g4+12@b%WFzaw`n#`pcM`m58?zoRmDi{lTy(?{?R(>qLU zuhT1F^r9BXTLsqQI(NswAk3uB1H^gFXI$$+F0;2nAPn` zhBVz2nP#%Wf8EVxS94i|+!|Xcj_qrHgN`oQ{6%koZxLq>8i)Jm|mg#!7X?p>slQ*4mMU?5mmg@Oy*U~D)8?}Y{qSFK}RP`(Qx3htm#x#QGu(c-W@z)j*moMi?sGI?}wgL20@7N)gW9FvulVK;s7qlVmh4y)EGxw(S$+Cqix(#vY?j zun04?{+M*k@3J!{ZPUR16~Dw&xAKe57Hc5By=MN|w6rh@JjZ3)Z(ryfr*HOFMfG`etsbx zh*Gn_*J0&9N7{jt33&Te!+t-++%Qyg`O;reoR1pF%lh@6TPfKhWd;V)pnVxMxp^C_ zBTAb;KJGt`x!;#<8)#H|-uzWR?8zWdnLcLG_vLq$9s>HRfIUUX&!0?)ee?e=JCX>u z%Wr^QZ`N*-`;(AL~)7 zZ-(KCq1@b{xV#c|s{4r29a4~< zuk;I)=Z$I=Xv1eJwa&q2+Zs`B zSU1kLNowipU$8;Pb9c-eWgKQd=XH3bFND*)&M;VLuJ!4YF3eZ1PvF~3&`2aa5v9jm ztm^Oj9CK~wDl(Dbh;K<9<-YI=us7$FPD&6Eq$;!A>yo%o!lRa?J`nV5Y9NneS|jmX zGYVa8*IrNbdEvrh_GhC2FVnj;-_O0Q+y@!UYu$6R?v6T87-k1epwffmKYYSlHvGDG z0rb1D&Q*Pf%<1!w7aC6aG^s@tAq0auV?$550Wj{u(UFHd7sCV83nHrzD_Rm~j(0pa zGbVX1@C;d1mN_xfv^E>t8&0*=m}s3svoSG1Q7)P9TdsVHuv{cR{w%^@Jih*Kb$~Ch zRWr|2zotKKt*%L6!-jBT|Wj$ItGXCytvC>~ryKK}L_<(~fy%L-gs#!v>Era##aQ`jI3snN?#1lX=+yiK;s9jqp-ikWaVj` zmsjJ@+|Dw$uU=oNoidaxJ}C88rShet=B3b*sTeRKDH(l0!db?9m$^(h-I%apnn z-Yzt{fB(KpDLm1?>!d;e(V>U5wmN_9YP|A)!rdnan=<8!8+#pq91?6DbF?E~y0d8~ z+;z!1%x>&DkaXK`=;iJ>OiqSH>7zZoq$i|ntI%41!qDG5&L1Pbps4D|NLsm&s9Uu{Uvkv6#agW zr$3xdhD7=!aCrrJfzG@`*Ux|pcanQFtqMYWl)1DW{jd)Vc%WKAWS-11yFxXE2m4*( zE`;{k(%z)UWz z(^b_immeb9@b2lMH$f0j%#6r{M37y7sjqpiuCRF;O5?_*1V-$44;P(e<4vgWal~x0-QJU}|x3VbsSU+&Zo@>a|VA;TG)_SnQt& zpM30hLi^r@crp~^>~hEc8w!p6XZY<2_^TDu?-$k`kN7!Q-^E+G;+Q@919sFy$U@Hu z!RO}0Xe5{mR1box{=lIDPh8yCKLR)_ePQUrJyN)fG{2cSwrX7(zGn~2y#BNW&hd{D zm$06M4D0I;g8Mkf{bE~Rg&Zn#k?&CnSL6tfTN!m}bKc8DtY^)I^AJD6WP{#{;=S(z zmq~JaO+XGFh4^GR(HZq3_S(wIGNfOgV|4#LzDcgdWDG#9m6qU3+E!lJOx`ttkbYfc z0vmTV2h`zmHlt~G-`+L@)vsik=uyp80w4bc1*Dd)P@5A$3_%z6{Cx-8-M-EWnf`t- zc{iV3;2Cbisp2L!C(2J(wOigHe>w7S=1=OnByE0U!?0GdcWINpNv-QTIH1<`d0j^W z`W}-%SBuIj5Fk;1Go@%ZX-j{9CyX!f>w(^-Bqv^L9ewG61|f$GZklkA-IePaV`?q6 zCcpe}+)BngS&1sRf$I*>JYk}lZMOEON5^|AlmMa7eOz_x6kVM^mCfCAu^2P9F< z%G_QJmz!1+CPLH;bKLlo;9SP7f;=^t+o4cm+tHt%JukyT@uWq<8;VM7 zKJ6XqA!`&fGKS|mwqu0)Y#Z|EyJr}uwcN|S{;QlIF%{KS|8)fh7ST>P*Ry1}M9);j zl$bZ+vmiS0^RxQCz=cnU0#oJ`>m1+4(WG39YC1DdCU;EttXEO)6%aOT*$1l$vZGVxo1WK1;-jpH!IpT&?>bxeoQ?V26;uop^G{pWQ6D6|T zd9_O(E2m)O*!B5(QQkMdCcoFq{8|CzA^b z;WYjK?uu2h!sso(KQhYctBOtV9uAG9g3vitOI~+TZ-AmlaZ^NojMNlyu@YJBEJKnQ z(Sug^G(3F4Di`kq5$)ISy8BPfW0!N@@6(nmGGr@00^gDwtS-CV_F_W+N1hw|Gjni75u^whDS}Ckf3m8#U8T2)W+dRKH&hK~UVwbPMLC3%vhYMK8&w5lF{!2fa|B%J_TkwVn z<8pA8FGem2=6vfnofN5|`g^Z0FWegB)-?#=ZTyG(G5P>=+gycXzNUMho?oXPvwLuG zu)ocR7j_cYSRujz!!vMq2-&Pre2+IjCsqRM0d&l8bvt6;Gw|Oo&Mo@!geY+LT4tbv6*vvL;ZP7xaNAE7$^2p_FF4ds*V(!Mh*?9$$uYdA~3v z4btPe@&U!(M<ZLjFRM0_NaWpyocsT0*^u&l59j!{dBYKHe)J z8KAZUJYq$~X)7?b4o#5xE6-LPFVi2QzvP&pa0K}Zlb&lTXTo;-d^H4%BqVV4Er-z@ zTrV#=SFcB@SW_9k|GvB#l6*XLeDaW0zS#Jh^{!{lz}XwQwxsO9Z=l6#e&jQI8#{_P zZo6FuftwQ)eIjpeS(Ih1{VQ9YL35@^$|!#n3Z=i!AV14Rl&_64JZVnkIV7!D8Oh zq>B7~d!I=JRUM%s`=%>O2G2zErz`a#wXWxA#hH8?Uj{C#`d(dagjXS^l&&=~hK~6a zoQ{}qLcJ_mO#>J(fx~*}uDr=JN1VBbA5o_jT!f)N#Jns+l&=)b()K`}SdPr3TpM&j zs0!akk72m^1Ea)pFD;Nup+4Nih|n1Fw1!$Gm94(;D`+&O%&S0)qc9S;5eU5dIY9DE z6e%nWO^yxgUZCkXTCe3&atET;)0vr|eNH?1sFKMUv&@Q*26JurvN4dG7EqB+N_lUg zS`onvo@%RnTFB;e=Y-)q(LVMG&Ig9nYFhODXP(m!&%40MUtZ{%iW^jp5*U`>_t ze@!MwbC8)m2zcI`)||P)m5QmlFwxX7VFQ_@3)-ed8HM65Qjf%4P!v>3iZx`^ibu{T zfKaHEshWxL#AzafnX2Q@f6r*_o+g`CUxh!PXC!>dvb^07LGvNTd;XP4sxR8fUBu>*A!s~~B z`?kaD-=E1Thv?(o{tO2j>P<<=IS|If zj|O^0iMrtXz)@+fJVLyIGzb96$-Ny@h6$0zw@iBA` zIZn{$LcS$map(Gqnl5$sTlLg~R6Hr(4g7mfUs}AJRQ2$T79oq*9T`8gSmXXrY4Nb! z{A|mSwltz=W7hxQ<#q2lA4fjdMo}N=CBdzVA!5;MEM>E zswaA=t)##JP1wEU%&HznL*oFetuTPOcWbMuy4u;+*4EKY`$TPDD`?%zPW&Oq!6-UU zp1VXpmK6QDM^JqP(R>*x zW4?StqZjfP|844ph29-0(fBM{q#pG4!&_akDnNzf}7OTID=# zxU38WFqt9>W$^;q-i1#slX>DedWvjPS?>AvnO~PEb6j(KZG+b3%dF*xv|pP*X<E=Pq(tOKMTe)U_l6Dp!eI>#5g!v1XL_OFld0n zAUb+tUiNM8H#W1lSyQSi$ftnEQzVIm z-BjcxRDqDY2|Wut5-SZ- z7V6xac`X$$75|Bt1UmyBM2{_A(9Vk`9K@75 zE(m!0{e!CpsP*IBSI2cy4*mfi0G;JlRAC@PFkq>QZnY%>wN8z7T_^w6$0v^OVmKAp2>|1A6p)`aWqisl& zhd*mvym+IrqiB^Z`DMAzOBo`??jp2f<^QUFJV#^Mq4V?eGTQ)iusmx(VS|2nv9ZAD z>UtUrvz4B@Q+PxKQH6T@0}O+~sBt7zwb;;n0TsrS^o)He09HPK-US5GR^V7xNC!O( z=HCWHU_x7TM1(%#J5|p&JsmFc7H?tI2`o>EsJ*{`O#-11bWJc9H1MkmA8l?mYX3NN z!A^!-zVH7*aiuakk(w8T74<`(0RbpwJuB;=!43!`8-oB^$11a)B*6-x^f-Dtr3O0X z8d2|9&lZ^OYWgZBI^w>sC1dg??zx%?Wv~+4;CFqY1GjC^;J2YydS_mRcZk}W3g(5r zRs(vbBCZxM_Rxn39!n%aXk~^KQxhI^6ROf6XE$Hznij--9>}B%A1Q1ADfCie;oya^ z8zpmZ1tuxsT^$|lA6v>?E`UC$lwV;>A0!o);WwPsIWk$JIZ{$X+|kChMvCX<`d}V~ zjYm%g5Ne zJ@a+0+RHM9>2L;vibh7}b@~ZFSKSgmhREiY=KNh<-)}02q>4DFgEhd@hR*|C5~m|+ zuUbZbf4+f+JW$Sk|fw?lav?-Z!$?tI?S=c(^2f>T38RAkicOBr>fm;aBObj)@*!2sFC8O!>G;ME|+z}e$i zqsRjIv1!Nq`@pa`Z;9J&xd88jMZGMW?*^O`5lS2S(x(Hl2X9UX?wkTHnzQ6d`$Eie z6NZ>Ez7l`tkp)KI`o0zKSs4-01fh(pF9OiS&kBY_l||_*MJrhbF)`t{-xz@&#>NUK zKN`X6QgwLB)b<+A(`<G2(9iw1S$IseMXf(AaGYz0ps1f*_S`XyA)HA1Dz6<2V2Yc zM62*J1I3)zMQehH?PXbwydF&V$&u*)5N#7D&6#&-XDwpgp&jN^s%O!fO z2=Vp&*8fp--qBS5e;mJrWL_#=qi~CFgk0HVQxTOF*Y4UkWMz*K;+ol`Wsgh7wJ#Z6 z6|StSjElrg*GShEe(&Es{&gJpGv2T9d_JD7O>ItA)*mkuV7d}=^C;R5390|U<_!It zx31AN@^?e8D8ge6hEFornFAFCw3yp^JYD}AFx&1&h83T-Mzj2*ROtiT**5h47;Ath zHtSLR*HnkE()jQs%I+I(MIBUpu9cesh>@GIGID$6f?hqEEkuZxZ2Ku!eC&xUzsn8h z)-Q zjF9d(57?Q#T#i+cF`#PK;)@)k9D~R=pDzP# z8P-5RIM;RZXK4V)5PT`?9ytU+H0sb zN&{aHsDy%m!6o8eW7-0iFGL$3Sm!IFO zFc*4*t#@(pGH)Lffp0e4PtW$$+2ZYo_A7%PZf=H=_DQ10W{fk$|I}V3_e%dVas1Rc zMwx@Kd*UVK$2*~&8OG;7KUfpyN^d3x89M$UV+Y=C zD#TcXczgxR@3<7f7r*xR_Yc)0?EJ0rT(qXvo_fdUeI^WIo6Jb$5Pdfo6IRV)EYFzR znK~nD`1?Ic@`@kns)OV#PnsNd%}=Wjs*SdZVf^;iqYToldFhrk40VG|7e;01&;Iw%(RY(NK~IzdA!t7XrbAj0>nhSU3A*RkXW~ogmfG38dn$C#cSY?+)-Wm;_VU6dS*gVCIkex_j zAJe&Ami@5Mp|BmX415{)PBzs`8;^(P{^hr~hftysBqg*2VxYbtJR&qac;6>?!}<@n z1<%G%DA}U}g!e4a?^5gkbJdw_m!juY) z`X~K6$0hd+U?qN!u_WV=>pmU@)1q8LU-HB0YE#qd$SN1K1*!$L1VWi(2lK5eo6XS+ zL}vR>pPpX)3g0+hu9_;p+d* z8W5&?=H#tf)O$JG&NXKp(jjct@3y0fxJ?H2_jKFS!pQjYkgL{~X!ybRt@9A_>?~KV zxsQ~Y&mDt{8w3jD{r!^N^@53O zO=`~Z^V)xeU00vCb&q>(P4uTkY**P$RLTP^(GuAJTPbpb;5znAI$0^Z(OJh6pIPs8 zq2dFl&L^0|9WhB7w-$`hZ5f~_@c#lZJmQm;{0$z+z6i7>Hi*>@!y|og z|4sUze8t5)M`}1ZIT1QnOj)1cNp`rY zWhe7b!2$$E9Y%&ks`nPO2ZBg*Xu*FHVi3c8V8IO%k{Hd=R|Pl0+I}EWw0j>j7`7x*skDgjVJ^A z83xAh=0lZshcc=Pcp6Dgi=wGkRoHPPoAK5&I!s;ttiflC_e>CI{FZRslP}i z7A}&+foi?89rk;G?>fOtqUIO zKeHxM=8le=Q|{fvk@iD(h{?Ty-!?YZJ4L zRG(zT@j>nJ@133XI6<|em6V7p8v@EWfk28W|2ujpb>xO!$G^sd4Un*ui zm=mZv&a*TS)&r7tvx3w*FEQ%03T#X;EI@QF*H`<;=vfL{pM6ta9nIzz97YU!b)D2= z?1Po0tm47+gzFPwyYrh z-)8?J)!QlN@wIn!$>6KLr^EaUzkzRf?=xucA&J|y zwPII;X**(kFWpxOt;CsM?3|)SIkq$)n9eZjY?^VoVQoeXWL+_1Z0I@?R2L;{Y?I@} zcZ#pb=~*yd6wVVK76g`{oSgg1J9;s`Vli6w{{oS&<8^XsP*9km7+vC*Ajns~)BMW} zK(2`z5&^2E&@)6gZ#sD03HLq#A#EJ4}rB5z}wib|_ct~IFIF&XDz!+AlcYabw zacgqZKc|kPJ}G;JeT@;P6!||h6@rx83q_WP8Fn4W&D);P_4v;`zLxfHiSwHNe@N#8 zw`w|WM!_djyczS=!V+RHmJxQDM+quk9%5GE<2*5<2Q;YdS$K3}uk^(PMC&*ypH5gi z={fK1b5j4@o>s^(I}RlLTRKLgJgT<0wZ79U;4FffVhlI2_rx#wuL% zJ)!)ivq97*`8qA#m<5qxP=Q-cN5x}(w$dUK2AIeBR44%Lu<18497O!yFWsk^b3Ge0 zt!HPqq}KAXq;u7|1(gAwTfkmGSJ^&ZA~bsaA+uS>=3Q8oa{xL@!LJ3Xmh&L01Q zngw`dg9FCsqam;GD%lBWT-C1k4#AbsRcr62&K=!`G%n_JG z5ml~14rkRaV5_{S^?fi;4h1}pn3T)@Ly?WdyMB=YT_u4h1$Pb}&ieWKT7c z?GS20TJ}X2r1U!o>?$;Cku8cEtJd!tbqE(-wq7VtYPZ+?F!b~E)eg-JjFn`(IOjs% zc%|@-U{ClvO?a_W?u|6s@lijgo6Ws`lh5Cglj!qVw(VO;#EIMfdOx_AHsjP!ypDIe zXd~_Oj4h-uLN?#ppZJDQJG!i;sPzEt@!^-;=}0KP0ilDQ^{t>S)${)c`C6|5f$&gf zpr;vD`3}2hia}PCAh<7|gzS-!sHRXghjTLay2|P^>Z?2L-Y|m-ky_k2^%!Vuety7` z^#q@xmw2=xy!2FcbE6b(DSd1zm_^cbzJ`A)BmC z4a=nffrrrE6U9}qY~6_av$V;Yru5oqK=rgB!Mk5GctiYGJxOgwZFT3ctiOzGcfPTX zbdjTX;r+vtEdLm$JM+ss1laDu0buN}R?}tV_jW6ByX|r+EI#>WcnI|4pv3+hqKPX3)w4Gxq*YHt%F|jSZ>IL zP+PVTVUB32!q%U((3%oH)YHQw*`(%XP)cTJXFJ*8l z6;OBbD?gEHsYnwMxR&vWt3b_WcMn^~(K?)fJnz0xL4&c_=A&>5zTJA6(tm5h7)klm;;nAfZWFKB*mqiEx=IU!ZOUlq z3@bGIY*@|~8(Dg6sXhAO`SSGLQ!<1VO4>8V4@&nOqXL9EbjmBP2}$6~2L5fWoE0_- zP+%S{ITcmM#F(q_BEdH~B!Kk$S<7-Yra<{UFT=BVQ@a{^DEodp`Bk%&y{1;t?Le_S z=t4mvSrC+p8|!}BjEI>t7SN;{B-rl{!*-V<0FWqRpK2%k$m=3k@M#`Lo-1cjek3N! zq`#q?9D!rx)T95d$c%(AJjP*Z;$JLfv|U^nQww~%Pp{D1z}h1lkw8Sf^>-`K*H_xn z^kyUj|9g>n{M?3~psG+qBbnuGAf-hc(~kW3V>-RD)xSCZ3SPW416(kXYeSbIg_bn& zpDe^<*@K#23;tY}AzE|{KrSX2#YtjxIc0fE8Q(N}YhR5-f8NukhOuIDfw9d?oa7>J zmni;Efy40Z2Z(HC=h-x04sV_H7@-+==jEE!D1FIKk9jy|TJE3YLKU*Ux_zC)6;nrO zgM|rvCQOcr+?hi@gfjLCA>KS@IW%OCnij1HQUA9#c~aT1MT+Jmw$6HTcaOQ*X2}j4 zpeL00wH>pZhTt+B-KSMJqH*SscPV(>;$IeeL^9!k5N4T0cFJRNu@*DC_;pbcqJ`ex zU((G1W^y1AoCAV-O%tq~S?2@^$ljy&=0B%Ov)Bu~>&ze?0`~=*(BDN)^^DORC4y#t zUT!PtN5L6kSLF-5f60uL4qsBvt{|X8{dtyZYYT|ON-jFT3sLc~I57K2T9zn**W-vV z-;jvIKTp&#he(lZfRTCi>UEYA5J4?l+oRNlDcq=z*6%k4WQOIkK|J1be8GyZ%!XD~7#d^cA3WrBq+Yjy50!+NZIwklUF)=!j z;?}36saa2jB$bKc7^shtyWsyxN2kdwut)&wC8V|`BGKYgKdF7XGg{a3xNYE$9vk>7 zLFNys-lTLyVMx5D*LgmfJdzn>`4oJix@hPgp;}mOP#ADM(Nz>$P`=b)$+XfykHNZ(|YTW52b}6?RR`8dG0;=^l+!4;6VQ7dpok6Vp z&CB!4c7G8}Iua!beA^Z}S0t&~!*$F{!5J4fB%D^ zbh4#y{o;e&$wVzl?#3t8>-SQI?K)NIK5YixbrV6MnDGEGI_iEsv;SAXqBQ$hcyj~s z)o}K}(u@_SuxIP}lAnup8Qeu(UF{07wsy%xOPR&P-3<()w)Q$M>vb4y+jDA&CX($J z5kf6k!}6l5>)_Dn3#qjIQKU^)CYzgvrOV1pE=X;8p}z*e8KE9#!+BGD#+`WlY*M?i zftV$O?JK)iuU}d29UX#FAfPQ8i0QHgc)uulyD+vu>oQ~vUm|vv_6?9@(tS|irYjpz z?z3U`QL;mNfKzCL+y#yZa1ueIa4$(B!x!AaV96}FqTN&sP>{&p66^)t>A8yz9#E7I=QBVf21%wZ9a|7KkqjNWz4D30K!XK)pxW`Sn0GJQRMPz=z9IPu3v)OBt)1+49lJp7gIJBnAKb;fnJwXq ziiFeujX2spIn97zDq-F2l%i2r96vP?Z9V|vyWFdh!e|?N-_ra-yMlYehWVP%nhdETeW>O9DzuR^O;-T=Z?Mq*F2-d z)r|NZy0)wGT*X+w_36ePP1LN?jGAOQo65 zRiEgbZbgt_W5|h@ChS4+<(5W~8u6HN;ke+I8@|Yw$Bev1RTHs`uAa66@pr4RlW@3~ zIeVLE27NrjIsO!Uh&EU#S5ZZZx=n7+Xwbi^{YT<1!NLU(gj~8IrajMZL&lqhGToEp zNKHs$nJ+)n0umrkUsAEhe5$tt!#*{M)Hl3w{WHA;s+-BFoOaPXXc}<5rmYvS3J+%M z<|OSRl65XI>`nP?6rdhyG?M(B4R11P%E-pJR`bRol238rklSlS_a|c8Ys|~!XJ7fI zOEyJYzR^F`Wc}TG3T{dJb9^L?c^$qd`}#WH(jWtPbIuqpcFq}~C6~@an!@k_eM7G% zbRjVq@1MK=MH#+Sj~PQC@)~Me zc7RzhXpsEFg2L1ahf7RnRVJMnBVnlcZeDb}PVw$I8rWP-^bbfoq;pnOEN@H_?E2Eq zjyz$Zx$DqIGB?%O(#|d=>3sG2OxurPocu=b(5#J-66FWpm$+>0^TjH1HN5t8&6fa5 z5Kv3Sy%{&QeTDVM<6w&mPo5AbrjTg8XdZ}3MTwaX29dnjGJOx?iuK6vjL`UyL6aFK z@yXTwI@XNs@V(~XVW_F=%#_yYPfE=jH5HDEL2&5ba=RVDOjq+IkZXm1xqI+U$S8oA zDyzjD9v%XK8;JVpL^)T;q5q@+8`6BiV_uZp|5k!EA3slk2a)E zog)j=>j2TxtD2^9QhOso0!jaZ{$fj0a%{owRr?PR3p0K9n!pMV703H)bcB%L_TA`9 zX9-s=kTtW#5|83rd>7pB?vhm~@6V?4YGsg(`Wxz;lRn2q>bSD;YQfi#9$2vxofsWG zhOKvcB<9;#GZ7oyL^o&`=G?mgY(?g(ZY*;=Sy{4~1#uN}_jaTImqg?#Mu(ZT@Ez6w zCv!WYre3Hh1G`>bM{^ZXHvVZ1z2o-G0s{fBBx+8LfInwdAg}Wqz@cQVV zOV_{s;mLu|o$N2FOcQcjoAXHiHlyZZTTj$Xt0Rdl@sYqVa&<8?ZE=uwGdmF&Z% zuFe|rp>abN>dccrH5?<#7no9FN`8{78XTc252*;klM`w3?8T;uX_)3Yw?Ws!X+cl* z(7+3*YTOa6>)+({lzWIihxo$$sjJ)c;jPwx+b-Sf&(6L;k?@TG0Mf9V;b7IdzHT&K z7OMy`_UJf0-e|7vY%|#?YTz8dx%}i*RLVWO4E2`QRzM@mBV4xwzrMKSbLj`@?D^x$ z({=)y;{fM1!We%_|RkiB_ zf-h#M1Dh-Rxy*6ip(pEM%;CA&)_;$VzzpW<;9%ZJifanxC2slXaJN^{o$trOd%?K3 zq#0hFpEAS!V|7hCLke>xA=d8%T@^7sK& zh~B52f7^{sc#!t*-`3;8i$9=R_sJ!=r$k<^-9tQWeeWP$AEAeRd{?LYkZc(wGtqu_ zq;cBgG1OPTV*8RpJiWe@n2X0}V{vjwSDJgw?c$)8fi@u_H#Tk**3e|G)LSNcB;*d# zM9Q+sUV7BG+I(a$9on({cT7PaAvnY4IQ(++*j5T8)_!ZXTI5R8ZF2R~{yja7 zTu+5~NBF{G5!rAFQt*Zol4~IB` zeVEKH;LL~sknIqPm+lXebij#z`NqNzZ`B;`9_A0W`kk zpS6e>N6c=wWg?*iUoD)NPBl^g{qpO)qjS8HdJ$Y0pxa_45A8{t{qY_!8X&ZfOCHQ-Q(f;~C0Xb2XyJXAH~Egw6+~fh9AjAOfgjGUgJ%&z zKa*Yg22S22dB)-IBvV2x#4K6Pm`g9))P_@?Xyx0cKSn>MdN=J zlgAX1Xxz8eKvl8Rmgs!K%Fd2n9821RM|WZhH-(uXS4)T>quzI87FqTv&fgM1VO*Tv z$A5xNtNiMCfNsr`@z*8GR>c#)BlEwU-qQV6S*IXlCinSGS&qIbvI;Jp`&lIpirk2N z13c8L(j0l;9U_1G-(P7afnAd&`kMtRDT;s~WSB44nB538<95?5FTJNkk%xBy-ml~B z94ixD--nUNXZL5Go_pl*I1<3TQ9pRRi<$TbgpAQHisORa+L635%ZEM^Mbgnp>ar45D45xV7PF3#5HB$E zt{Q?BCH(~3DSM_5L9ezk`V|~$;LNY8?E)i;Z6YFI?aHX?IsaQ!&NtJx8on)f|8Dnn zd9G*PaDiS}SbVsmZ8APU+wn7V{DT)Ak6(mOA>69lLt4O(*=`*UfY_26j38#klB2u~ z_c3w-yPO1L?DN)Bf6PateiiT&-Y$v43kz=#EA2WL;Dq~}Z1pS1%B&AxOvWyr30N=& zkw;m7ORG^#3q#JFTUq~M=ifbeY!iYCsTzC)fqZ)I04sH9X6^pXuMk8p z`iu6H@ObY(4o}Ed8`$MwF3%qvX!nXieAZ~|8fV;edwYL>zxr$2Ia&FZwD7hR4>Ci$g72c@p8twd95-JMa>L%vb`TJ} zv_6MFX(`?MKi%+N#6c%co^3I1SJ0h;{Jj}SKM3d62L7H0xdocWX5UnYuV23|w$!^I zPxz*SmhNBiGeYx~c`K~xI$BPW^1~z1SZdz*x$ia51SY9pT^HjPmY1EGH)R0Ax_|`L zHWJf`Fi^-2vdj{Rv1jx5+?c}y2;3}~=Bb4pnEJ>Q$g8_{VgHZ*2ocmGIu(G0S$=KT ze%ntLtw;h=!A=ToG(0@KsQB9nz~6i=vx7RVArJf+HZHCAyEPa4TS~=ZI6HTM7_ra_rRPq0(?3P&=q1Oup)MInrQr0?448ZAR|__SB99}ng|jzA)X$2qK5f$ zu`fxUO3g!BaOa6^dO$Dr6gaJ6>aO!ym0FEMwwvWDm06>8uTZYWeCP>&oN28;d%OCY zXlY+Cj@>6do#No{OXC9t>vm;-Aaht)1$X8ty^l$ykGv28_ zXUmXJ?hk%n;$nZG-+Z`Z~DKW6H^6?ExE{anX3urIaFfNVBZWcP&Rn3Q$3(5et>2 z++Ij4Qfl}V8VPZS4h2ss14&*PQCEKM0+Xvd;pL}Hhpo7A)0`ZJno;>?owX5zK4BuE50f<|`pA2^0 zzj~o@BdzuYa-?_iKD{`;TURJQn-$SFIC^Cb;qbYz=GQ zKtKTTP%vU|p?&UT69|mxMpk#@8Z#EXiC9G{-4%i&M{j!o=k6|NJkUG?PNxsh%h70K zi!|CbE;aQo7z${ta0M?D7V(MLW+YnDO8cIE>-gV4PflonQOp(bZy^p;+DG%asrdVe z`o{VLATp9csC@r`7&Bi;16V{EQq+@;wd3RU#ePm|o0{6q11RS0tY@>O)5Nsns$y~i z`r=MNuXLt$jM9=SC>=H(F=lr->Qmo(^XXn8%y)yDzV-kYKmxUEIvz% zcsB$Xz*!Y3(A1EJ_N^K34sT)2KJvw1k?3E1aMVFB-Xm=H) z*!cF@3}XOi;Ec?s>0&SNi#wI$D!*tS4JLk)*4e=S{{Hw0IS38Lb|jWaan>!e6&hVA zM|lEcf>@B8axa|6mfs(+@n0Z^2!={-S6+KtkjNpA!i-AlxOxb|&%48;0@PxmaoxzW zl|>~Nvs$`HU#oBF?pBbPASyegZysy@I%d9h3fjb@vMYzxan2Kkn-x=~nIn7!y$U_8)gXoTDjYPgPl`EERH9bjpkyzWYV}o#e=+y%bd!>ztLGQ4<&uUk* z$;H8F$S*BsZ)R<_-e20|I-xV^Ec1pe_n0RAH6fIn2DR8s((=G+$6?n|+{vGlJ;9S( z8Y4VH_S}&~$mp5X{KlXAD{Q|^j=f(E)9Nv(ReV1*sg8&gY9tDsG5D`%KviA+q&4Gr zbyo=9pG>Ns5iA7DlhSrQW<$aX-gajVoBXWfQYy}8=ucc!Nu4d8rJ8pn~i2)+nVe-CMd+* zHH|&BvC}r||IkLSkVWGG=|06!iB{F{pee&K@M@(6+3Jg7Cxbc_T^>mjQXw#u#?O_N z6(3uaK&!V@5G)o14}_|Q?f+$b@pMc&W?^TibYkc4-_ekipCCL9uWctzTPqHsFriL{+0vExBFI3*S8Uf3{9^N?d+4f{9BDq-$GC`EE$Y zNV66BT$=PTr~fdvKOQnCxC6O;Cdr?4AA^W3pJISK1t7v0-LQ5yo7P>B1Jv25}jb8tU-m`u-XP_6FS?hr^0 z#b5)&9P8RVM`!#QNGg>s%zR2I4?@PnI!=8cRgR+Gub`54Zh6_Ydfm(gam@oPLW{Cu z*455MK|1M`CHfMjY`C{$FmK`)Q`r0kr4Jo3)FOtSG#ayA^OQ$p~4Mk47nH+K!|$ z*TOP6?=iQl^Qni}l5xYJrd3r{4McHb?wI;4&7XqLUE-L1-&HUSk|?#93328O1)%yp zGS?vl6_#!KD82aP*?cw)*-CA53x<^q$PSBKz`Jhj9@bC`MQ@>MGAThd&$B@qx-RF} zCbYKe)AqMn$W}EqH4AD}@ip1EZqd?k7}{5A%zFevhpO=4m*x3{8I!5q^y`EO-4OKT zw3)tik7E4Sv)Px}yjEbGM-VFwRI{^pjrg~DifcWfz1FrXC^)dKz$k_D)-qpSsarl# z-m5FPx5tWor{Df)$&du@fEU`+{+A@k3ShTmRb|Cs)_G=W1{03ZeiJ0!d(N?S+V zu0ZzYlMuN2N+;Cao*&Ta^OVP9O#X&_El6sMInS^J;-kN%*)-l;uJF_||0Hi46C&=( zXT}#=%p@4)CLE@X%V2x|mIHV(ektoyd4Eav!L(T3K2S5)H*-*(u$6c z;TJ_LN$6EMT(ApW@z0pZ=dC}jS*oH;t}w!!!kU~J^(wiV{i zGal2yl%}vtZz2_`+KY7Oe>0S06zI5wv|J?N@LYDkVo3&i-bvU!LT0)S3~I_!dsjAV zP0B!n;Y!p~U+YN3)8=VJcF?Qea(4Vs$bN8~16>nAd2X*%Q>UiQf{URpwvU%VGaAA` zf8PDPC>{K&S^PPDIQ(;4`ro~!<4$y3e*OiUi2IJu|Eql}!zwh)qRl?_V7G-v7Rstd zttT14@|xBTny7_d7@R8xmp8I7zG}&c*@pb;K<63F>XLpBVE^lUA}LlwmHFtDJRq=` z4t7rEX`PPNPm)b9qpNXQLIq&Gbg-ba~``h8A-NO(@fd z=*<#~YgW{$CXY5LS*$oFl&BgN`Z_Fq9EqagSeZB$)Lg(!`^k=NYqM&9-WCwzsj9ft0tcIaQS_ z;b(jH_1L#vts&ciCs&{Q4$}Al;Ss3MP6GNDS5Ee7PqsG4ZlQ;+hi|Tdn93@ul%cpc z`MYCY>G!p#eCrryk7?-0(NQmD__av_O8{br9M6rst(SE_%p!u(R~Telw7 zdz1k&Lb=V8u_od)Qv97|R*ugL1W5Nf^}!Q{sY85hS_5XZFRdBD-@q+pR$;3TlJk-& zWs55nq<@?zf8u(>9~!We*PhODXE*UfeukM3mH{qv zEqtRRqNPjy^>Fgt=_VygP%guH<y?Xr#$V2b~y2TJnuQ`du79VS}ttz z#fKF?V@`8dqytEz>;2kfh-IX+>j>QZJyAv52YK?hV@P~5j^AMD8c8lh;&_`>m2?0w zxMDeQB!6tj$4aM(#Yy+-2M1jA2^t9%!yKK^SeF{AauQ0sB;%bj$HfjSRQ!{6ADOZOj=1RAzFz)p z}2gM0=${12e^hg~J+our46)`^|2BH0CBItEF~Kr@nxd)hT;Ny#9l zKJp`4ZHSa0TS@d1FBX?&lDSNeyd)u!Z#bspPeZtDPxK(r)Gun#HAtc` z>T~+Y&_y`J*AQ2kmNJ3J9ACU{CjOQwFpFw_+mQ7+6bCG`)DfSTx@j#ID3HpbntQB z?7YU%v6#{`Z{-MX5n_W9W*L-j_r;VF9vMgNKOT!=?@5*oZWd65cWS~fo-F8F`i-qr zOOgP9gp8o|_JVjFSnSE<$PUW7mk0(d$HP(PQEblyYg~VMPJiRhgUyNGc8F8|(adO<;?y&c7lcp-^cz4o& zO!Wv5!c4s$kI51(;mE8dKV?u62iX+4zd@hh7?NJaybXHguOwiC%u*tpPn#;;B=e&| z4~`@CJl>A#;@av*l+F0hEl#i ze9qqdyL3#3UgS84qoz}*KmDs;z zYVUXdS?(Fes~?m+ zxYCpUL6B@c>0 zh}^=<22ygqhsJgX-z6)3uBn`<+omtd2~ zswpRTNeLA}`l`*upoyv+oXzx-F~Qr%2&{t;d-pca_lJI@l8Y9Lp+?Dra<+P&i*5(={vDPE4kZX7vg~QJ&64skO$OVT zE+;ShtaT?$fi0TnKRK?;?^raQE_;t7(HLr*p7SL*S=h+wGF3jD;zomeB&%R z2N9nFA=)C>%m#v72D7lj^`E;G+NIzuP`-w@A3yM!v@CODFH4owmv_)Vh^)oR=Sul8 zyT<#7DXVJ0HH3tO9$a{)S&1X%{Y*I3hmQ1L%6k<26zd^TP0eTKvzC%J99Zo2&ZX_k zz;P5BEcAjH6ZL83$~P9+!!;lM3W-P6b+O|qPWqb!XztfW!|8ydH#K7L=;_5OtO)dFW-Sl<88Q-W$yLDg ze92FBedo}t^Vqq0F6`teU_h2)45`~_@7&NS{@DYUd&uLOm&Mi*aU#qxw+q$-#s3br z`O&klkGJ|aBKEANcO1`Y+`n|`QkK&zij$=|yNT`O_LFaj{~x*fVmiyq%hJ~yf=#!j ztXq*Tac={Ng+^uPULu@r#$*i#>Scw$+1qsnpP^q2%%xON+wG42KE#m#lQDch^aLcT z_UvfdWZ}%$4~Zai$05rp*Pvpct$ILa^=W*d5-=nSN@XW9Uyb`5M?%hCF`bhY-a}HC zd^!gQ7LT+mfYD#?uot-L>9d1IsQ=%J6(e6Kg)|Z#*4lY5wSqhFHwR}D9<$iiSLy+j z?ghTyNds->6HmM@BMFg1 z%BgZFYI4XqQi&-MGG|7F!Ynz=`Bcd%hsfCwG3PnX`B2Hk7#Uj;l2|N_mUH;MKHs1J zT$jrn_I@3n_kBO^;;l)(^eh|Cn$~H0eR}1yYv(NPwkOk&1KcORl%!{t5?YsEhv&*T z%amkW%~!J*#Fr~-@;ldf#p#lp9Ln-p<_LNBGWCn@a=%_h>#JQh-q`J-QT0VN zsniPAL~>A=Z&x4)Gp_P;H7d8?xAUwl737b=Ii5^aeI;2ctQn0(iSk5_#1|fTp16A< z)__^ck)e2)@LDbU+uuvI?>Qjv%MUlE*R)*_(T|hkd<+FNCkZNXX{$eS&$^*Gv-rg- z09=Ulr#DFNE)UbG;En`3aMr6@blf>^%>G9G)TL-;_?^97$u1B##^t1wRilQ zwPNu&54$&qV(DDV?M1A#UiN(DyqtYozY9`8xT3cYkrz~xXq^hux7@TZo&yK_CI>1b zw^@y3?=t%GWOtfMnlnTd%N{_%JG?PJCiN`F;vJ|d$5UMT4rRfO6PDnHGmOA@8d`B01UY1w)*10L1v?SeZC3nJiAuod?2}6Xh48Wv(yjxoOJnw6 zb)L%T7LCa-8fJTQ{utFx_C-{(u|k#^EXElaJD{bVVcWhv*R`)*+uNE4E3FiG%ySnp zQ5rlr9SyVkXkZHub2122DfE zGSfHpSVlV{;S&8PoH_~U8&naz`WF<{Q!YxQ{fSLXcTZcJW?{eLz2D*~w{Re=9OHb6 zu!ucN?F2R3n`wY~DnoOFb5jkx|7{()|(!9<>dM zV&Zc9EGbW;LDQ#yTwxsW8JP4{Gh=zk8izIe7=nJnVcJOUqvT;g8wj@Zq^ ztq^=VF(Y;ywjXR`Pmd2~SR2C&uodYr7=UJpBC9J()x%%JF{q!Y!U9!>nyCIUjc6PE(38RFFPjqce0r2{}d3I?Ag1 zp<$^n$Cj;Hji4np)~d`e!d{!9w;M71SSco*fAuSA5&lPv5oCiKM@ zQ5T03&T|$)j+9{Ark&+ooN2nJYPMvS-BD_B@zn?H$e~f5YeA7G_H%|ep0SEz?8ngL z6oZva+;`p85M~v=Jk@D8`?B~`p$E6`wpH$&;zw`VBKuI+|K7pCBgN{t=#Uzv&)_du zm>|R|@+?kT2p7_vD#@xNC?DHKsm{1qhG>1yE->a|$;TmLl45#5i`-V!TCwi0@>Gh> z;ouwwDqrLZs4{lk-v@N`J;wewJ#$ZK_z{i0q5Y`ks@=ca^Wq18YqN-FA+PWtmbtO< z=x1Qz*Ya=gOk!hUSBV{ z2iJ!>@k+zLo#9@@1`8^1K3 zA7>#tgqUU?hgtt{aEFyuzjGXS|D;SYsAfj^B3^m3@}DMxdJfZhua|6UE`A-#khhVb zW_+^wbU=sTe6FkGN%R9%wO> zuK>pENjgHllo8vEQr)@5Sd`9cMg?@7R=KB;#h07c`%dHnsDh6z8qvHCPy zycX{~K8s1}Ch(7>AQS3dN-7L>e$r=oz0z)7>!>!ou)lvTtGJVy?m`Mq8nB_p-Zk3$ zx4>+W6XlhcLL%(X(J+!c3jE5|j~{;KyJ2OV8+YOTPDvHi^OT}y)?S2rZt`>UJ9N>S zn##W`yUA^pO*7e{W(G=wa=vGcDq=5M}lAfO8#>TqB4 z>VE{z@svrKXi#JCA9FD!Jx5JRXF>~aMIQo+Ol)@AF$28ir!?>CxxvboWTQl}jUjY_ipB3qoYk>wKw3;M#+%$0r`R;O ztL8H896X_*F`U(WP+I5#=etnx$!mD8%pUOD)Do0;frt9 zNNpY#7s$w7d=G(V^S{)O1E>6Ym}`9NkLw2uikrN=_*F*4!4X=QvTO^t%eci&*BCzC zSzZY>8i$3SER9Gm8@b+W9QbchsOMi_?Nfj#F^$|MZi-(AfzF^N(*8zZ&)#C5cNp1S zywV2k@=>&`@kXoTsGouzpW(oGQ%qgvAZxV0{_a`+)V@=|l_Y-VOrdY3PYoVo*QB6Y z#3NZZKEJd-pEBDOd63_gKto!3szoUgY|M{xT*#l^`PSuK!z5@*y;-4}hCwHq4*fef zJT>{?%cSHSe6Ssu%nJIMTBe%P--@eb*K;+&j1Ac#shaN-(Tj#iM6tYKh>PB5=+ryG z+J(ojkSGgFha~LUg>}@`>o3H^8_j>VZ%$sY+c{?SrJ>BN5nI|V{_!(hQ}x|TutEKR z7bd50-So`euvVPeUnlk#ZaLxi=jh&~EMjHwB(#=3$O;Zg)~C%eq_QRLCsBSlaOLrt z$+EL_8pAtK*E^gewenpODIL~|_d3CmBkYi{9e{B+O{_IEL`ttdaiFWvU5!7&Z?sZ8?keU zjPNyM!;D!c)n{bfmf?8Kkr7;Q(Yk%{#j>b+X^E0J%vC<_rBaNDQH9b?%b$GC2%!nT z<%b_Nsm(>GE1>@2M;h#P`uFOI--hy0#CADm@k!Q~C2|h5=G&*gQ&M91Add^M#knNA ziQ+n(8La7337r>a(ddUm+fw1OQgM&(EQNvZF*T}c?3dK`{oB8EteXR}CLvq*J&vxs zO0c0g-a7A#@KdP#b@xpRj^ZiL>Xcveo~bPMEF`@YV>GPenm-Jc)Va=$AFs#U;ENx9 z$#RzvUEn13`U_c$*Ff`xdUUIFp;B4);|@h{`4oj$&CJtg#AcF#IXGAjo?zH}vg;8f zF0NTC-Fi#W(e}E=KM!4*u<|ScBu>wAC5wm~ZOm zj>{adJb1$PmwemfqIjJ}`?IA)^EKb#9czkb z4(qzkJwCxFPlx*smVQ7>M&mp(UWo11_`?&iJ1BO=faol`YH5912cs0kS_*?t^;_nfj zx9Q5J#30*6rLvFG&t@L-$0stoS9WmXVN;@v0b#c9$foRL#{AEm%YJJ1%8ID6GnK(m zZATtW1s#0vd(n|6giM&fY)9jJoP?C`183HsI$&)ObMaSTV93L+q_#{u!$%!BidM3V zf|(~k>VFpBXlViI-Yfh$iyrZ7OG{gS{S$qr8DRD1g|*ItEnBP%O1obJsy&IyoDvfG zCg)+RGuAjbb9nf_`;`&2E$o}!;SYA?=F?LDjq<|QeQjwe;@2}QGTk^O{ZZCc*sDey z?5T!ONbwuqy+(@N^YNdy_dMAlE<=}oWe4Y$p>6v?_23(Tpd26MSpB=@-B_u+|@Dm|m5{;xlA|AmOlgM zt$N-%?jS{9?bSqjJ*LgG40Y9HF%&o13U8|}&A3eH$T1t4V3`F_iiKU<%8r#`fbfcDo6>d0vsY6r6TDSUnDXaC3OJL%N zl1DpG-12Mi8XB?i<@TG%q`*h_KWg4n5K@y-R9Za6DRDW)x~(%)s`5wIc@X=Sg5L(# zj?#2rD?%RAq+cc^I!%&QszbVXAM5%=X2FR;h!8$z`%EIu4R^!n1J+k?*&yF#}sS9atVVpp}|CQUrqBwp|Bul}Y7 z-upc}Lf2Q3IjSP?Blpai>sRPzAA`wF-GWx|Si5<%mruCklw1eWDtVs!DLj44{jLQ? z_K~kD%vYNcRX)S7UoW+sd-I2E7P0NeulyGE)zg5BV*J8nh32DdbK|>nk15^gVusQ5 z{@FaAC*KU?>PZ(37Cn2z*gd@L?tEtVsjK+0R(AX6ge?KTC`@%LSPVAwsVQ|ohrJ(| zNJt;XwL#G*;J@+ys`+UVh~GIGb$fFdApkKnfU34yzZFoC@2T{&FD#ocQOs!Yv{3$Kxsk16^M4B zB3aczf##9x*crqh(Ce$6ulSJ7#*dXnk09DO#RGKh@J13bNTiKJ0Gm$m0KzX#P4ECnwZrBTO`tULD|Vh!>`RyO_xGE zFFjAIGElu-EZ6rlCWc!;3SJ@m1aiaXzE!fHdv4@b-~qs<&51j9Ml5xS_pkkD@BwS? zy2fm0#2%LO$@BU4Gbv@Uioh4R36gCQAwkup%UXqOad)PRO$`ETT=x_KIOAZe=ihZ; zn0@)S*tz0QnD!%?P(CNxuNT0Tp%F-9n7;MOSAvP2;ekAVp;5HL%7Gze@x9-FtfMPo z)}E?TIVY=h@(CA5E{|2!rK6I-NzV)9M|yZJFYsnUPYyRVJ-m; zB@RJQ%9=#yL(^vPFV}bKR6Y5wrre4)caJw2>u_>7Oj#R2t)l6LGwFGwC|lNVlUjy>s0&>>qKK32{H0Sj2}zXz5PE~lF^v4%f?DK z=PD1~nfDsdpAb1$oPUvSlcScNb7C3H95t#ah7=KHQZOV@Nt4E7R-=zI8BNmbS;S30RCE(lb#krl1sis$)#-nI-`P?=(7UX^ndaBY+|vwmV^Ml&B;$+w|WG z@Jav&I)5BBDErGA-RZe-kTaXzTlX%!9$t_I47JXolTU(ZdCkazrht=h}mRU+&bgs+~wRgrUeA({(@Eek442CC3>j6Two%yZ_jaIbqE>)^dbyaq1yD zrXBz|3{z!G#}Oh=85nQ9`3@Ow)Mti0tBD7TvvC5>UPCv=fLchggDIR~E24Ld#*ml( zZWFWMQg!i}BvTrA@#Z1?_>kzEQoCH^v^q6|$yZlLyld;6{@t!)S*>NwMk{ZsYTh~egYqJx-Mb<7L~Xi+4>#jwo-R0OS`83+<@UMrVGqTEGk;Rs4_ zyv{?3?Y_AQH_ZOj^}QCNTW3$tEf_L(=ieJaMqupT6S;+S*+$3Tx*Y5k=?&P{6)zXW zpJKfd%nNit?jmz3%+IfmVlqGUey2&EC}4j7XB@X#cv#To0+TXwefrMga9%e9gC8FK zZ#hPG03={N;|kBxfTqW9QkEi^SADYG{k}kK@pg>G=Fq-`9;~ju%>XxJ4i7+Oy{z~m zjypUsG?4i?^5E}d|Cz3cy&ZsHy{@eZOzIm#AUjnXxi>XG-@EuN@ekNy|ILZ-ukLML z|F;QlT@a@lu`=N7+}hf@l>FvTFpn(}BM`)3NrFraf(L%4=OF4e1YVA}amk@yPKJ!4 z!*4^et==1?Hn$`wA>D9%@@|{XwbLVo*CG&$&}hOD@y;V9b^?C+S^BSx_y5p(!k8~= zJDQWiYY$xww@6Y#krT~N@H3v_e_)u7(6AE_!-_0oj>ywMf}P9D(LhHlkJU)!woKiZ0{V}V_{)hCeg5)$sc*j?(K;|`}H)+elc4)z#&9FcqU zdmyytVp757S3|J5ZzflwWOc&7^RPOq9i_+4qS2>ZZ1|D`&94t@9cq;Hzvhl~ON8p* z+6>v$hf0^WKKf%v?^4vH%JUg|Ru~$eE5S5wIL2^uT&u-q7kP*w>q<&;1-q`$SwCyN zpgJK>nkhvo$Ry#l*38b@E7-m>;r^CLkDbf&j#T2la&G=cmwT46t->gVR+kq zz&{7|Mh(SP8fU>pI0kGE{!C0!y}Macef3qbP49v^>rtVqgvoDucjqKiqbxqNoqTm~Z8@TEsNV}% zJLg4wfual4tpF=bRyDa5JOq96HlpWVrZlay`l!kX`7?T@j-E}vcd*m%cAElU0Nd>X znU*3As8`78&puiQoHJcvYhdUsa~{-@gm3k{vEg z>mD|^0xzuWK<$cH_jI73bIk{1yvY7QKMQ~C$WzDFo#10h-%}+;`+i>MaDT?g{p}A} zPGQ}cEH_D)xN;A>(r^D|sENw-%2tobW;*N~O)X1seyq7h`7HRfJ)j^9m{vD6GXNRD z;~Mhuz@@(}EgCbscEkl`&jkO9=W(69wT?Xf^ME_gE1~?iw*eUR2*Q?48pemXfrvJA zzN>8kp(bcB;t!OA=6Abp4*f(ybHT;YCPTRO)~lGBAL%d4JtlF;i5}qlLuQ-NUQLF| z6N}Y5f(r;f7D~x+kL71_K*$App7kZ#)(}eC#-h@7oH6nAN>=F%(Q8+)Vlh3t`yI@M zVyz%4sr1&HUDIW0oTDPT78!DY;n$Ak+}SG;-o+ig1~G6KJ()v23^5!)+U<6O`AdkKym6A3PjJ(t*jji?*!6in`9EM$hN9XX#=U`E}C{{E73>)9%iVtXc z{NoxMGo=Dbt~}czHOCU=qtD9Cd03+R>~9(P*|v8d3^VqBgPnOuAPg+|n-kXe)Epk> zlcH}-c(A|Zvl)GP5nCz1^9f9>IO_&9E8Qco^`zHZ-==UA(39Rc`S0+y68ZY)w%3p1 zPx2W+WsUXFZH8OzZQ&xh6vyBBYRvX(Xzma0%0Jay7B^Fe?>b(*Z9Z>9sAK zSd8;5Iv=0pXH5N@#Cu_Tez#{o>)>x8$my-EQ@t@|*0nQQNOJ|=&6F~7MYW)vb;8Kej?^V zZ}%6-FL!+Q3JtpI(`DFZ*=U{hr0s8EmnK6$HC|$K5`%k32H4knP>G1PWgO~R%qJ^( zyxAd$pup!X)u?h@ZB67kU6)jDP~!%sU-RVo_Uc>>r=)lC{8+4;NSS`iPZYI#cX_2s zqjs|SXw66|i3r}f{lt%Qo*&_{#ogJOG=$+SI=1)NORUeJX5K|+Od=|eJr%|!C8@-( zN!yIoGx4zfK5@Gcd3%$0vwxmk3-;QHsw}eh)vuADcI&%a=hw(>WfP%u^O&iXosJHn znO%$ub&@xHmKUFifs>n=$|)|=mG5RP9sqpB4`8|6)=Nm~ccJ1@vxjF1Y6T+?yZD5W z6Un))3d|;w`M~6~n1VpAQ=&0oTR)^ZtU9(gQ?yLxr&r`9pf@SOf!FS(OIB5mrvKUY zH)+!{+MQng(>Fc32`d+Cjrf`G2mLcWTr|teR92)v1!3Ue0)?!`4Lv1n+EJ+xdd+OZ zn`-=zYJMvjP?8iIWq`NP|1tgb>(8Vzy4HgN+V%vo!@0-hB+{#5-T&Ir7kGtI|8B&6 z1hCDmOkcpcLd;k~*qY?jaBJ;`5M#4!5UUWZ7O&h59*vwz1>PL_By}uKEZ7V{! zS}7{qkEFSPp=78PoAXlA8V%l1_0>ezhUDc+%r{e_Xm0$4=$8{B?vrf6Fx?1p8 zHf~~GVEdnsUZ$bHmh{?78GK@mYCQHZ5xTsE#l7f`f|d@R5|z?ZgKj@fKZcdo&N`>B zd)yP+cFtbyR*VaxrjanZeYvkS6T@l7cVfpge_972s>x-az$KtXgBS#gxuZCW#wYz( zfYf-f%6l-X%9GY}Eb$UHy7f~|i#eaH)XN*OY}b#AA;JyUw}(z-RXnY^h{&Mr<|{_ii`PvVjEHwIe)b!8IkH}i}y)l%Si*4FlX|K|1myURNZ#4 zO;)<>SGE4jrLD7SeX?u44Z@=+VNYb{?HkWMX`m+ZTUtTr$oQWL_Qy zOggBW+Q<%HK<{s$`X?Q2>~Kg z(39{aO|}bdB(lJ~XZ4}aY06>qz%U&cPE}^M6S7a13i&P&gH;L7f#cpq?*0I@;+19W zgUMb^T5dB+w-HFhWx)kI-HL5d2IOW)aTNE-iVVwjjp4@j@Ra`J5Wc}j)V#i0B3Iry zmMX8}7+_>9Ub+`o{;rksh%FF>_Zx8i7&o8^&=$0=w7qnGF37cLAb9iW0ljZA*m-fy z_eP1GX6Z*f=*i-lNz8)#CL%fJ79YECgQOWtKre3mY2Dlz&l=fEc%p=#yrvOHdpQYs z$gXB$!Z~9192X4GMjL5vwu`}(hI7=e{`Q+)mAO?46^u}bf#%Y=o5aRrbpOes6r561 zjwj#d?p|g<=+#Sx@|}+wF>p_LH_IS4k-%vtzpUTIEI9lQKE2rkfEcbtGq+3PuOmc` zRyyUkDVH&WU)_7&{nfQeOx)N1&GFXGc1!Qcfd0)O`p2!jz3C# zdVJ=o`sKqZsNS!!BadS@~@_|z>IVWAzM zM(!+-$x|)&dLo!u@tAKKyAMdV(KRYTjX>}s_M6`GO#-j>zZ?Sb-d=INRizx!Q|;oM zE!T)NE$#V5EmV9&bEIyNdgQ>!;2?U0tyS&qU~XH&1^?Tg`Mmk)=1*G!RaGV~>NKuv z2dOpFJK`^>%@shxeY48ui5ZLhe_R@SGD-_pX7K<*3T)sAu z`N9GzkN^5B;0mq1h`Q7dW+$|4b7&{FoaqE};jZyfq6DDCaHXGtLMxwq0`T?IrFG=dyF>GorK}cHqL-J-ZTVg0IwEP1}aI34F1mllRU-AWuksz{NkvwS#k6^gtLm39}dHdm$kP70FqCQ~0R6 zzH{(hXFe91`dUHvt+pRS8xxO1{X%!Y_Yr$nPOBUcd;a}*^^dGHTs@QwP0oTY_+Kle zNH4-&5@XV0EYPSb*BuOT+kaOd*Bt;-#y=c7$YptKnGx!^brdZ{BL#h`JQpE=bZ7`t&MI4ap)dQs9BfNFRy*G$uKbE*#vheNC9AXCXlMNO(gZ@w_(kPNE*BX_#=0>3J)B-r103f#3e9E4oP zuLdT5vYiUZSCh$tE3*o!`8dk!qhtZT3IE_!*NW&{0{V$BpoApQOe93 z#zD!kGfG_8@wT`nTJgj3`LUQgW_v~0WRW)=|K!Zw;tZ;ZKd#X~s#CI~JLL{$)q zZ=BRo%&JefF#@rAvEQ6OyBkXqqhRCgG~KgrkqFBMgSf}xHcta<9lLEQ>$Q=2s*}ci zun&BA)X}G~Xts=;ble$T>PGlf3yR?&0dZ!NOs5&Yg6PUG`HIkjIp4M3H6mRP+w6@6 z1$lvi_ad2qLEJTcntgqQMR#s&YjL5EPDvYR179 zo#6@`T2kO>6}vH z={vuttr=|Jm|5TjJb*zM89v_)>ZX7p8-Po8sNq!3Z)J-2W)47xC6?E)ln9OG6Z{^b zRY-OF^w~;padG28&n(@y?@E6N{?;YZ%0@WHwbDOSM#Rtd-@{WH(2V&t;XB8lQh%lF zMR*Splv9)lttIJ+KCGkaNq!21zgw$(F5HrDw_W37oZUMzKPIeRHug!9iUKqZ>Sp-$ zrlj+#nl!FrV@E^VS0Rb`sJE6GrvrpzupXIi$xz?Q+x0)CLK-xRJaX%18DImafu0jA zU2EM;o~WD*Hj3rbzdPR93_>~mrqn4~utCjaa!HXfK~&!rFp&WcLgAu-dXe)3Dc>0! zz4D8QF}ZGa?(yf905ICe;G5~84_Df8G%&10j0aQj@{sbwb(}YhtT~LQ!0j7g5j;4M z@}-!Ohf>S!pworrsokf;KXTO$aZNid;^^Jmi*$fO5zObFmn$kMla*`X8FH*8@)s_`UI zRVjq;7xzurvO6W|Y;omj?z(b+6)(85x##xZx%t9_m08Em*3i8j`Kwo6>hQ&BK6@LJ zp?j|J_LsZD5JN{J1!x~QsF1Qe(z zQ_Xr)8`Gn_j+{czS(Mnq-=#jE4A@-n@K&TTQy4w9@I+}TWlG4b2z~hEUv5%cxA;!8 zAL)n?2FVAp;jo0btB>SCqV6!m4i<T`MFNiWCZ!^^>SE%I+xAMPq_ z1?RKqeD(ZD!Ahq2D0qFjNV#^Xl}*s;-`C4aE%m6L2vO-`*~>XOwLJ+VvL9=nKH4-9 zU`rqV=f%y%b;&|0B+{(M4Rjov0-{bF9d2~;TvTPc(X*yfe|w&>a}Y$Qd}*feS!%n&(HCrtRD)~)o0DYzU48u_ zcly!lOsKa}g@ch~G-6QHr3T$I8{4b|!>w>Yu|F}4kW*!;vX3rx1^@1N6nWXe(jNgrDfRBL>8m_;I}tOfXx5D509kRqe8%9`qz}X^~lYjt_kT5gY7lu zE~ly4$yXq*#J`=Yo#_tWeb6#x46Fj=pd7fYW@&8yEoV>;RgD)Y`sj%NMQq4~^Hnf* z)tj1Dg2uE>kqBfB3oGO$zPFC~ZUF&*(Dr^u_2w5Xa8;jbj!U~Q3%y!cg3p9UQ~N?^ zRcrjL`5)1sA z%B4TRrQx<4l=LON;h5J7`yw!IKj7B4S=2vlu66U6aUID5H)C&_%yC>=Z#?icBzgWL z;<wv< zpBuRX{0j%n$er29y}a4Zu)UC

C;kIRhg~gM!vAOC`Sn4{$JCXdp2~=e39X=Y38A zFV6&940n{oQ-vTtSGIFgs;KMSSN4>CJz_li4~a+;e#ZV|xl&)k<((EiAGZL5JbCx> z_zf4#Mn;IZFG*}3a}*{MG#juo%KPq$uHfsYn@$|>uF$L3*h&!Bb-&pqo0|5MIfdh^ z>u;XT=Dgh3ppZrO*je3Y-gEp4ni{uKynBMP3=v9&rb4QJR}>@Hepl5d;U(Y~FIn8E zv)`+%iH_uO+%o^ygc)^}BBX6c1lpl0RfnaoD#~kTAh=ySZ2R)ayt*nYzcZVq zWFyBtHv_>=)6&IsmSJuAsB}aAC@>-(E*+2PIcf~t7hHalx^ubaH4{=}qdYN*uupvh z4GIp_=goj7pt;!eRk4g&L8%<2^NAv-Rg+Rozt~+dK5ntSm{%LQ_99S8A!U=duqO;W ztG&6t$0tYGX{U?2eL(NguRy(7kh9p|noogZu^#!B9MxZ^XVg%{jQ*uG__1r3X(Sr( zg%ox%htlAxF{&yBabPvgnqLz%SCC%UJ4iX$7e83tofXmB+`PBHP>pYj%Ub#;lv6j_m^73m1BGQo?D?QgmewfgSJEZ%`*czPwj zSSkC*qO|2fb1^t>vxFP)8FPUNN>@RIU zntFoWDNdS|0&;i%HW(4%tzQ;$s~`fp`nvV@!SGwI$%v{n=@xqk$vJW{~WPC8d0q2S^7u04=O?L@AU8S^3X0cFzVD zB^V`^`{455n5fRIZh1|s2bSDbR7)5<*|nVBfp&17-Z64XMBgBvZ(GGg6 zbkUUl^DaXTa-`2dWLL--p#dw<-V~^bpalm|q;j!%Ky2l~Tj>S#T<6y7A-LyzPg+6l zn{==3?vX-kh31&SFxAp((|&MP&F-`9AA-8kzdrhz*KGM~>Az~^{UB|y4D8w=?|9^7 z_3W*5FuHfAz;X^Z)Rb0Wg`mi2hZNYn+uwKUMMWnAIzTTePcI+!KkdH^Pz%e-UdWc| z!=mH-kDq+%lIqbpE(`RaxX-F6Aj`9UdQTYRv(FSGs`y`k1lck**iC?J}l!>gMojS*Np=v@wVBOeveL!;&RH&U7`12 zwJ|AUdPnYpo5G?-I7pQB@)mJcddq@lIzyh-cU0M-DZ{r%KnwEkXWSrxP(u+$I-tco zKMFS_dqN>U!sjs~PN@-pdo868(`ySq4z+>zpXUWbuEz+BctVqDzt$O9j zdP3|3n*HN1%=%2#i90A_=_P9!J-Vpn<09FTW|aRIEHx4dTq=_RDwBI<&Kf;(7M#aFf$aiZ2(bH#~|KkZD`Urohz zd-xwiAfFifl140h0Uq1T)W7D0$R{YovhShj{IO&MxxVboVAiO#hy>O3wuk5pN4{ z5>3>CifvgTB*q6;&}{9+WC}>8n!hcu(W^Xq!U=Lacz?cUpMJ2>vsVkuXc~cm5Jg^% zj&6FcdJyO^Cj`-+s>u+x=X+zxOPvbDEJeW63EL&s>dn8jP;l&08(LpmBKPbOBM-p0 zp{74neflxl>0@~>+uKVjhpxEX>+TL+`%bWBRfbeB_J&c^fB@TDI(J`x%S^Zh;eHJ- z2gaWA>tdD79tClbH^-KPkWJ041}xF?e$M9dl`{Sp70Vmu!F9>cI##M771P|$uytd~ z@qBu0!qR3aQTRM1X!=vUm%69q*MI)kzu;IPCCibL6@?*ymSdWt+5hkz6ycV}@%l_X z+!ygcCztk|WeNuxOl|HXsM)f5BwGk??6*Xpm~08Q$jiTK&>3!>iBQ7cew|p1+D&;Q zlBVagg}g)(VfEs8@=5>lpPl_M~}uT zy;kd(j*4Ncw*U~$uV0s>sU27=z{Z~2-+zIXABR(0m(#x5y!qNEq}ZO9cL?HsSKQnYbw2d}9BzT<3iqQBK7>WziK0^zJN^gcohK6eVijld@0JqF} zH|>1VRGeCLno)(Da7^uN_}<1!-t;2_&3qFflr7fdCYaT_)@(0NTa%k`NVX@~6^liQ z`*1^Wv|96x_tAgSykgNT80Mb)`p*;Mg0EISh)8=%>8^@uRU^+#^BSF?R}=2Byls$+ z9$!DLLPLx*FD1Lw{ObB|vKctLCm&tchSh=S^!wu}#0A!@m2HaPH$ihlC26Mz;+>1D z>c&p|hho~a7U*I16%EI+X}l9wy4jdug&yX>lCpjQc2%DMk0q zr3UBzrz@?3t(n-{8%|nhmhsLngoOqkT^+VwK{EIyN>xYv^08}6b8Ef*DMani*}v1j zr|xx!?k*;K>9c%*!y^D=?%yhC4*TcUdFo#0nfCnc{f+rHoz zkSy{mDCJU#5-yorlu?zW0pSjGYAgkf*>`g3!4p~k{HnT$zeA3T-rY-nDad;kf`4qt zDcF+Ynsl@Vz(4eg(T0dmxueZV^|C^aG-6N%nV3}zYNwu*mASaxlwui`t(uA}1Njl- zrardsD=R4Ib0m*sG}ggIDDegQyqm|v6UHQ}5qaOs23$28TJMJGvjAAf;uvVN|wA%6LoP!-|&wBVKc((a+ zJ#u|X^7zt|JWqfr`6L^Jg4Tyg8L{Uef#-w%=-3hdY%ITgg3W`1#V-`+2xrg@4So#lj-szJUpt?)d!p_T z6X&P?@KPM02{~M3Wj2%uzF2&AT(d>rwNKb+U}y{tj`;*(RN78AH7gK9C@U+otaWp! z&A$r;_=t?tneFk=xC=sfcJB|^R&4h7c3h0RtQyw6Mh#%S=PxJ7v}JMS9cfAU2>h|q z4^Bp5UfldrLrgQWDte+9lI+Sg!~b4;q-@?CVePzVCk#7G$fY6@l&7=XoO*IKI&vT72v-)?^GZ{Lw> zjBmX3LkQm-+SZrk(-rl@yE_A9kfzMY?G=rle-kahFtWeD z+v7-ZYdPwg2wE_2j|=zhMNl_q4nv#<@IU0-Y{{bLSAMax4C_j|vb>YO7mKXWPjLx4 zGwQMV>@DQOd$tSjmx<=}or_*5lIm8ix@l85u!A7eEWh4bc8JU^yyuF{W;GY z6Av~QUI+n<32;&%elr+lKDttjXE&-#6X$J^O4!Jd5cu0*HzgRD_k%CG`>W_!{v&UY zH-_JV-aJ>`=FSmf;<0$Y=5Q)dUe|lyBK%|tq6!(@&%tOSEO_hB0+TjRJ_`Y0)vsDf z^JMaI$P6gSI2M~e);VO#dy8%L9j^l~Yc5S0AVzP_wa@ZMUM>ay<)9pJR5PU=m&8l{ zMG5783wO_x>4U{!eMI|#d%JADONDJ}|qX*b_rh@Z5jLtGs*?JDaZIJg)>x191V*QN!m*;FC*HyLR0 zH|KM`{*R+`k7xRQ|M-Y}9*IrF=}KmA*JJj&kh`@XK%>-oHYt&07f0?9QY_VdCB4U^}Z1F^y; zu!R^6=J&7{rRRZx>#CIqVH9kVdV`hC}cHqS0@r8s!VX^RU20 zDA|Rq&V;X%7#l!H%7`!_KiQhv>V>0b8a~Y{7L<9i!V+7r@P4GfopDa2IMJSls#v|O zE3X3lq3-VybT)7Ek4&fIPp;fr%ccrpPeTgpd$fsokqn%^nE}yfU4%{EOjP$W9=bM@t>+^?- zou+Kl-s)Nt*MfGoktStst^ICNwfO;dGwR7zNpI1pN^MQy-@VDck!ejHp5JSh-3j|s zj|Bo~KI4m(B48aeS9tcHZJ_*Y?sLF?%`UZgH%JnVr3t z!B70ec+>Y~Uv)aD1}ZD!)_BjlrdbGt=eeZ5H|^HbM#qS;G{x;=@4w_Yfxm6F8!~vf z>aBk5+-r4uwM(oRlvwB7wCH{Z)mQRo!9IrDNMJYHJ2)ud+m(0C0TD62*@}0uu`&-7 zm5$+ks*_UDHpwF{YtMX^>ct&5id%%LO&Mp-edHc!m=AEsU6OBidzY-4mHY zE;mpCS4QfXU4KL=Lmkp^%ny3h3&4P2pwmgHbWF^9Ie|+|$gxRODUjZ@RYaOVo`i^^ zKM3jAQw2pw)5F9ZWyI-h5Za`B?3M_$+sd{=@xp0N4+f0yS--X@O`hY9c9#I3Y}Mys zOmya%R!Df2XSM;n$^c<)h2-Q8d&LHkvEn=av+|+aXgIWaZ^h&2UuP(eTvhZR;TB4q zahAnKz4M7i7O2tZgYGV(&buE$0NR4o-no6>ztJ9{>r3;p@%#>4%Y*;6f9ch>wA>s0 zv(m9L#|Vx|UZ1ONPvm;X%J-RMS|+Kr%v{|KH^l(~~5)r_PTQTT+mn@6ni7UGZh)q9+xrKWrz@ z{o6}jp8`m&jYw4-sy3p9gE>Bd zq~`c}Sh`qV$)d!{JSD&Uqrl7`zu}uMwCYn^r+ijl7SB22>W_Rc(kvR>MC5-T$DYvE zFHAtk5BiU)eAVcEV~Fswop$|f*%Ww7ya1(7u1h#`T1b)CS~=1dK)C}4Z?<4zXRV^KbPq(6~khcq&IC+j7nq^!fvX3{C1S@5|eFq+N=wt%sQRykZ6eN z?0?rU+CEu?_Pr;xn4Q!o=*b9@0Yq9QLyQGxtbP}}@%k?XH~)k;M785-MEI7a@$<)oL0 zop=;EKaxsyvfjDnQz9un&(dT*QZ<|kVpLA7% zlZaer=b|@U$8iDECh&}y2NP%)j6QByNbk*^)+N0sGOT9-QfpSXrA15;KVO72k1J3v z`dJPFa-qBX-2vA&n#!l#PabUwR1jj!zgh$KPtkO9%&I*!S?Y7=zPap*n2M2_+>^5P9~`bVLlr zFGh;d_kn=C{|s}I&5!E3hCqPilsc~oNw6Wg+`@58+@&|-Y)A9b!HhkTaMv+-t!+?> zDL59;4GP^FXlc5ml^VMLCQh91x=mK6{Z5|&4z~rxe7Cy?bkI=TdVybX(z&U(`$Y}Q z{U(#OD#<@Hd<^&uN(WkE#p%D(^JKh&-z9=6Fmic(d|WLqxmTQ|Tp&C;a#_%}-P(k- z^rCb|i)Dj6+A{;%3SW9kz7|Q}6{u54c(QN20Kn|lV96xYbX^;8t*tZl+!7|#zsPKq zGXneXi#%-rOPO#Uidq~TZ7@Wy$ggC*FBYb#R>BmP&pvSiWq{}q@F{Z1tUGsum01S` zvJKH8t{a~A^T4N!Gd?#~)P81udFetmwJ53F)yUuxp|G!4vv_3mz43cP+0?Qye2{p| zBj?I06_#sUfG^PC5Qf2yq?~}s0G>(lx8bhK(JIuQqIW#crXClj6?4H{4_xHitIB1s zv$4m9=|}aIw3=Nt=iz~HX1I<1Hu?zlJ_a$`Q=0_5BBG2GGas+#Ox^NwT9Aw zlS0IgY8Rb?-chtcxA@#eNNZOWo8>Z+Wzidx@~QT*#5WwG zTjNBx&^_lk`xXpy%@OJ;vloa?sx75ql?M{AXh zF0Mx^V~KHd&_(}S1s~l8UAJ(>->E0uh_hMIP?$r-S-W-Zte59TCAVVswaxN|*NxrY zJ+Qg8{PXu1_;K?hi`7Y8`zqj_9S0kG(yIi`*)c-@N z(*d$fGh>$H5u4aT*|#n5sc|!|8-9)o12suctqi(4wTG7-QZo}h%>$M5~_=KAxUYt?DB;hCC}fWcmsZfUOfJK?wdaM#?b*N3~+$tJ~P* zkz#opYisgy;6nh3N1Huy7=wzDFhEnUr#<;0axbvy{V^7lWFE6lgJ2~LW*Sqlo`N0u zaA(~4sz){S&ZH>erU=ipE5W=~3=;ND9i^Q?r}dwzNj&vt=voe!fY_~Tbe{nzjn zfRUPQNa=5v7JdGv6o8RE;UzK0iKWn{M#*?{r3=#4r_f=fo1K_e9CrD~%ucIqoqO9Y zn6rd8Y8I^6@0hAog(-mPn zM7&w$Ul`L{aYyYiHl~|1#oDNt__7H>XXlX=3w^9kgiX&*&?_(VMGi>VphRnP&llEP z{lATlmO9ef$V~kZ&eQzI4JR~A@NjXZt){(Yj|ukbLdYAnhUe+*e4K(PuBaM9*-o0r zQ`@7o_w6r%vD&zIM_8)8M_Xm|w4kCrAL0R5)ESrELnF^p?SD3G;kQwr^3}D14!^4$ zCb#^C-6{Hx;gE{A`|rs$3cFVH#SqD}C3^aqfrz28$^7hX|4|I9iI=;Dum;LB4@jUk&1 z=rz56vzP0~-AA8-o(~nncr3{dc^avEMzaEnmf75Xf_zXRpYAf2L6sNw((FOp|0pg`~C0A$g4ANqB22%Uw*@u zCN?w|A0215Y!P1(;YGV;QXXqodlGB0uDyQc{0 z)?nw+TOK}$*I&za5yED`*s~pyd~AJoEK@Qn+}Ml&Q^iJ*4S64sakiStbFN{No>0QrAyGxNFTV@K~-;Qc7cOIWx zfgoJ&SyM()kck)`!)F>_t1VCo+mdiHW~RxG=T!`c7VHiq-Jm$8Y@k(d*>v+AqhwtS zxXgwwon2|i6if#dF`w&n$&_%s^)SG>nV7{DmSAIczQrrN8^2b61NAsn8%46}uv{vx zM2JdWp>K%rIc|K7l-LM9A*w*?-FY0JDk0j6G@+;!{+f{#PVVtGuf_o!zk8DdizIen{t2)<+3sOjjS`Mw2$4ExOii zfwju}4(==*F4PhCqJFR%W15of{sZ|%>MK7J_cKusnPzuKeO@x@Hdsaa*skw*BM|fw z#|ONE&#ogYec%C^77h+m_f1UZMv5Eq-PoLSn5B>Nen^y3$}u~p@bJr27{2|=b?yK7 z!s`rVo}@4oGpTw}w;$0$=;`m?TO^^(1V4Fq=b71GbR1;fbvk}xFZ+O$dey#7&-Lb) zIx4y+K>hr+{WSpH+?eJ(1CsVwvC7gV< z{##sFwd7tU#ZR!X-Z+1fU#D<$aj{!~atcL;IZ(o@-K;Lt`?#823~ZA8UonIlv9x@u zgEc=)D>JUQ9Pdf1idX;~KrdV0y@US#STRa6;ZDiTt~=qDQDo(lCB^%Atm4 zjhzvS&8v1hwRbGKq!$jE2#lTmOdY{I$seu+T{7zK_O?|D?1Nhxado0GxUDJ#l-%-$ ztNN!u zXemSW44|@b)uAE3TDM5A*8*y?%s@&-l}hsC@3{#Z`~B*dx+nIku`fsjC=HzM`Cuc+{A_E3JNKR0b@V4rFug+jv-J(c9m6x~rF)AA zXhcU~fdEy&k%p}C_5E@F3P7T~PUU*c0P4U0=T>Am%` z6DQ6kEgcR-4k8`E2FzMKLER%TAKE(bLmI}^G}q|A9#BIw0qYD$0f2BsbP}qOW9Wy&%bt57z zJ}c7IWdRB`5zU)&8P04I1^FW6mD*_pt+C|~Nd-NS>q_svsr3I^1a-bCl&?KCALV`h zSYstCWF)&Lg3Eo9i_JU;H3CtL5h-Rl;EIdAW)o~V+aQhv|E{ds@wfL_oG zV(@kcE57%tWFoZL9_gj)(uHPTR6OU!J@+TS6!R+wkuL+#&k8v@NIoB;7V3A+B-x5H zvf2M&Wxl56h*YB}N2#C1+#XE8bhAW~{Y>;J?|UVQ!LZ*_xhiup34}QpjnM72(ay+$ zs^Nhwk&>O6$piAald4H|?r}5AW=A)DWx1|d;%x2(5R@&aMcCY#w}DCBa%b--?{FHI zO+&r9CeuVby-j}xcQ1k+U^inYEeh_=f|kM=m{mCOo}S^ODA0P;3fifarQ~0jY7H_^ zU$}I;!YV=2+x*ioI`PKPR>qazbQjD_R27Di;g$55Xro+!n+9_N$8#p?p@0r>}3WT76yv^0MgZ#k6rF^sn1!{m_G3eVy!AV>2amX3rq1i&qpO#4FAX zbBc7`6&yqhWyZ?E>N_y@aIPmSI5;>XUo4Lq@>Nw#jUfzgN+?SHy6ntoyoBXVQXnCX z(hS8R+}i@3L+Xt5bDd?w(tTee4^~LlEwg>nY0LkfXf!o9$IN^WpL&rs?{EDJIe-y( zjUxYmHYyDJ*b=ISi}rgwwK$pK#xw0z%z_RlCu)p7UaY>+<65l~^Cw6oV!DJsHH30z zh(eiBi!gk*bo+Uv6r|D(hx;h=#{2}D zEQnG`i=`U=)yue!^4;2U8||l5N>@~!{P2mG%?JS71K6AN3lPzZu*-QGom8vUM(3m} z*s3OrtY&`;w1oLIx_n6D9z;1NTA!u@v=Zxo{8H&R6&&dMLMc7ct$8-i=KuQAad)PS zrO=?REThg3JK4~*<(_&2_1(QXEd<0K6D=X~(LNX9>qyuelH%eseJKJfww4Ekpfl&- zcsjb6cuz<@)K<;_5nv02g=TNXRMphffZi|R8B8)yfjZoyU`#>I+dNSdbWE)tsa1w` z0R!mn+4}!IdZ5Vpte0R9C96(PPb5}Qg?z@<=_yXHqT$4V!HPAtxHkDpw-+et`{q?c zGDVFqhqLrWAM+Osxg;D36Os>7hdn7@VsgUI<`m`|dS58{87ZL1fmrOYeHMi_wBc)t zeC0Yj0_Scdx0&<%PaDe~eZt7(UzSD`nE#OZz*JDH;sz0g7I6yluG;kv^Xsrcc($nA zyaYWcgs7L#0}_(-!dWi1CmHFGFqTiLkh}+yqGe(7!x$V9F%x=p6tus5?dls=xaY-) zL3P^Gd>3w5yH&EDv92;-=wUP%l=xsjYXt`!uGd@(IU4uWYE?XD`T6+em^SH!=U8-M zg?J^^I-07Eec_u%q=}_ercX52?Q}QXtLZ-%T^PjItpF^*6i`{-3!RfR`mW{;fG-@` z9-$N~E7M)xQz+}jxy?B{UorG>GK-wbw0T!S%DP6Th4u8yY35~jJG~d9i$2d-W7)DmQJKW zgFQuc9%<($EoJO=tKO^VV*J_O-#5U!Q3P6Tn3yEj-+t39UR4v-i7{Mz7Bljgn$UZi zpH9F(DtC*yV_r=aVC-vjy-XIM`^zHkI@S|?$wIp7z7*s}8gPJ@a;GHpAO{L4E}<1i zpvkp4JrfyvFnPobQQbRmt_TeAKUZMQK8ul(_jS7jK{&yHw)}}EB0$NA(w4S2?(VCV z;ccv1vvs6e1~NXjld4AFSGNLQzt1mRRka;md3qNn7+FRIW1BQpkFLS~kW11ga&oU< zo4~z&1NYJ<_l?+A=H@{Db!)W+UXBBh&R)d}D#5higTwKLn$?JP5SReJ@Y-$ivu}OK z<3E4rBTlw`uWC}rC74l=BR#5nO5JRc>b;i}Hly{)70fujNxyy(Qo<3lY5m1Lrt}|LG<{FV*db zOXhn)lo`>Pz@5$cb)Z58{Y@=S;RYYsh1a2QBSNZc0FEZCRV4jDstufKUKb^~F}Z`3ROs>51=s5{f0^VOh>su3JaTHl{pY@D1^^=RRE zTCqI&bE1jU{j6iPh25zc-_XP9Q!PjTG-JN*(*L9|INzT55<;eA3f2E%y8xSJo{jy9oL|!)>gtSZ+ zBq;lyd;=Ye7yE_2Z|u@C=`(IQ(XhVrGh92I`h&RakRIva7bmy$WVB?YdfA}Rjj|gL zWZ3E6sRb_9Ir0}iY?0LyZIR@ZhP{Sy_pROi-R+H>(+^j5Mm<1!RAi!2M!m}I&DSIbgKGh%Zy z7^|~)YXZ@#y1KZ^JCNGwf-|b(jvFT)8oRaKR zMpXu`MyOMYD=oYlo3ZtvM44>$5%IH*hdWoG69!AK0Q(+*NzRxDk@nYZ^6fm_gm1^O z%TEEI7440vauI?x8;rcdTZ=!xA@>9G2WSKNB=Rnb0&rXKJ%b_6>*SPpuuljy%6iLI z_W)H64%6(G>s7nVX%Z!_%sh;e=L1qaAAd3}dRv(G=;>Z|<(|0vkTn4G|z%GbK7bIPLT(0RA)QerOf$fu2i-x&(% zzA`Y2`L`EtThzR9FD7GF)4P_=cIU}^65yl$L1MvX(gjOAwt}oy5WOgg#upzJPK80$ zWb@_k4LEn3zZ#A{`~sN@ddmu_<@oFL&hb7JdUpF@VC87Za&kM4^FR8F>vu}o976V= zgzO?CnE~9HcZ-$6 z9*L(y|6o@931!XR7{(xUe2B`r9N<-ZGFy(Xuvfo9`sU~du3%Z}=|{oW{9(uS;JeWDKksi3a_5As{E@WS)PM;QDq|IjS<@!=Ek| zkg?m|Ui{f!#IzF>rG0&UFet)P8BH4*58bm!GH8ja?=3f8p^ zRnOwBl|?~hSvv2@ICvW<8x3+dB&2@JkvPjY_HR6-AtQfr+dIf;hqBYQ+CpkPXtMLC9c_{XD9mWOiy~EE=3fI%;G>=L z{CdgRFKWiAn;(AZF4ujk!{hS@#wRA2rx?AAouF#!@zoWPm2hA)PVl~^kpDfLgegcF z=BWF`3dlMGretx_LK9~;5KN$5b#51CvULj3szD_d`-3Fp$mLxaHn#`(`iZQ= zMe>j#-ARGsG>aZY_~)_5)ii?pw;44mI<0zfZ$jv<>nnP$*KP6++SR&HXi$O$OYpI~ zmI@lkvm;kyKdFl%^kO|^#YcO@wwos(*PK0XBU8nr8q-#JYewE~udVW!T(~w+5hChbZ+IG8&y301M~gmJkVw{yIR^Rtv|)a_ z*@foxEp@a9ZqDg~(qov@<1+Q0WY|^N*oEdszt%_LVyt%eE5hb5omx$x*(iyJKP z9(?!vS8P0YzbWWF>ay`v9IB6HxD?vOnRmu|>UAXIj={b>#~Z+0`4c(L`Db`?e17{` zlr2XT50&S{wrikSt20{;7Y`>7+6L+~Qr2Iwv3RDgD@s6ad4>3E#6DCJZJpm%4BFe> zUu@g=7I=?~!*wece1CG-|6(V)WxF_p5!cye4>-pKDE+9Lh+ow% zA}gyIxX2k7Yv=ja$ADcR*4QjE?@}LHySEj3v??{~BGg#L1;0!>?Tl@_z!gl1YAuI!SJT-tV~lIgtFHziV_ zUvlhiKU1X@=53G`*GFqZC|6chf=7MM;ktp16ZlQ?K516xduwo4QPEb*H^d(Xuh0S5 zPJWcQ3Z~vFY20uCtAMVQ_X+)5X;NkR8fi57ZSHv&H3C{W@Hn*+q9F}zkR8@F<15JV z^X!SGeEQ~CJnPLTicrOI~kxdf@tZ1)o}auZVG0=Ij^ z-*|Gry?3*ivHi^yEHPuU*g4Jm9-ouH+=|{@t%!Ouo5KQ4sQL)ytnk#UOYrN`o2mW_ zXNcbukbL&pVBgaCzf-sC=fOpQd9WXey z+MDrM>MEA3!oL+Izw0$&+GOU0*s=heHkb5cbNs2>kfN@<5 zq#|lF6_lEcD7f64P^ak4a09$}+4T9v#Xo;MV|o)f0iu}TCA(pC2B73N8YchB9sW&= z+)E3fkTib^l&22lel8Xj)TO#wi3i8QhuO0k61}XVDxS6l@x->p=&P?bg5UOHk2+GH zP7fV=D7lZ1mwuV5|NTd!=|OOtoKPI+`yksEdttKN>n!6)ewk+@SMm>8H#2H~5xTeX(Vun#`#%O(@w!^j(fFzVUyE9SRbO6`!+XGIF7S_9{|*S#r&9)z z+N2#%jf`aRqN^{9FF(_ar8v_@A_%yb*z_D_qSVgj=A56}Y&<-=eQn)IU`McAKwVqd5PE4-*<@>x7c7lU_e<3Y^No zrE*R*mh#?oH->&8Op?EgS=+df=Nsok8$RbhT2^#)-|sqx2E9c2_^V%0i4KTFHi90C z2W9uCdqg%*7k?dpIj@9V-RlxpWZdrK1>?>fbu5?#UR26Dg~7nerkLAsT&|i&r;I^Q zyPTv1q!YyWr9`$<-nWQ62rzFJ`IYQ>TpJE6Of|@*KB-bxlE3HuH;gJ58ToQ%?t1=Wapi_i3li)4wDkQb+_=#Bn738tlRKFiCWJc$?kztJ#o>H$ffY2mqSn=e zot+(;i^kzT|FymOOCB`ptj1j(C?>5QI;4(mQldhK^b}j%4U#vvS`Aty1#LVy`8A%L zQ{d9KEb$E%Att9p*Ul(SzT^+{8fSe5m3<;^+2J*+>Z~U+7o{h9`Qas-B?e{Y&!)I+ zg#yW%6Ou0V!s6GrIqQqOxPpY*q@JI^9Q5q8HzCSxK0({Q!AGA8*5x_XQCiwr`82iyMqS!8Fz%3M3# z24YFm`lhji*|btx_2jZU67k=Edd7n6h6p+Z^2Lbqb8<4ulkgJnUQ^>@9pMlZu+L+7 zhc)QTkLJlor>8Ur+B~bt;IM2{o@tuLXLT_T8GC{A%EK31gxz>Hq>YxW%fUS{(M9t& z9A?@CWQ|IAgX$@<@b~M&$zH-(C+uIfm?A?elSQ2c(TELn^)${EATSRTBc(vh0onhq z0!Vgm2_0o8A9svl`y4i$T9J^zBEY0e@hi2yA|!vyFb2`_cHO;VZ1Sho3=3$X*RGu= zROp*Ul@}DsAJdO{Wu5K|Znmk01k|J05!}7#m`#xz`Z~|?lE1p@7I39yfAVcVLni-90Y~&*7E33=g-5#b<{-cmavI905t+ zs;L1j=#}M$v-GK*MTOD21YTBwxWzR^lM4$z_l2ET|C%(9Apuz7;6N^9i1Yj4Mw*d_ zFq92)v`Cu_-WFi;6#nLMvGsPcIXaMcxXee?R)71deO=-s}F1F2plbUdwo}P@ng`z3#;1PAit1+ zV1JwWGDYl{@U4(HbxAqhk5RWrYlYg96-N|}%nbmm?5jMEP<#=QSNqMmRowZ*lFmm} zIJR(EgVEy;x(~WUWj|nn%3GIb4g<)c`Vb0E#>nbQB~n43yP{W7u2(VU1>cHHFCd_& z8B*8S{<~q~2?x#BI`X==hKOm85luxb9*_~pW5iYZa<1eH&bkl5z9Se~wHwF=7?KU< zm2kA`w0@Gn|HeB3!BGg-)h3UG7_l6G>vZMG;4L~>oPwHpc0&EwW956BPk=T3zj$S( z%r3nCs50QF=u?ml2X=R=OkQW%dF!-;J7_+4B;C>x;di#tt(B~P?BZQUHEkIg2T$dA zR&9N^{eFzxfE-smS}`}}_ig6Avblo1sMjL^oJR6MGtvbT|3VSK;g#?F#eYz$O) zx_3$5y8JgKIi)^ z=ztekC+Mf!42(flrhm`1ui~QTSl+l8v5%ZuV^!t5&I7j{-&i6py&L$&klM_dIa17f z#QW<{Vv6;&%)I%`m*m7VquOxvVY*aXz^)=EF#6re$T{@7cpD`dZ*!)s@x-caxAg7i z-KG8pU*B<3`0*sQ*a6cyn`hV#m+Y`f;#5;48CrVEB z=~*55%Tm!1AQ4V^y)OS5KE5k12#!F=82&>p_CkY^vzJB7<&(=%kap4uw+qT7HAcEm z>UGgpk_O~f9!Xs`gae=LqL;gRmpJz%9>{}${ATlY8OaaeZA@O9?23!M@dW|+Na(!N z&h|8Ay*Il2r0S9N3Jfex{p4>K;gy=)am9lRp*_5nj?0|yR`-b2%T33Tl|<*$2?`YCs>f5e z(Vr7P^;w~1q=W=tQ2Y;^TS9#oLXra}qx54832(jMjy$~JHkzL<(ZBJ-jW%dOy`pAQ zbUmAqYBXt0|Iwo$VVjNbbFP@i+%t)E5+23!<5%Jk`E>fj{sn%BE-4Z$U`71<^Z`}k zQEPc$oBSs?#{B%#iZN-g=^m+v&vH4K`>VC(DM-taubmz7ez_0-su3kp4KF3d(ENzUKfx&e|j6fArTf@%fr z5}0iJ0Q?0ood#y%6i_%>^@q^glCns#e6<0bY)4?JM`V5H@a%{7WpUr{0sx`*l8;Gy z=#G5Ua4Zs&+#&8e5NnHf&!W2+^WCRxnRib-}P_)OscK9t(8d<%z!=`f4l_PMB7qGe?} z`9{{C_$?AlKcNH6VUmBAxe6fu#LqJRg=mdJI{Fp^a`DDV~T3X_B1<`&81;k zHH{kvA_lH~jPiEC;udAgO~4l3{I(O+Oj1yz3%MZIm|?Fw|JSX@=k}R`SyFO0kX~(% znS_FRDe^VSFAxCceJMUg4TkF*Pc+htgw4HU9-vc||775i>03`UOuSwxAgqbr4Rz=2^p?2npUurMecG~f>b!$mv&d`6;O=0x3=aAX?b^f6DTq}$xS`JGVC>Rg z^oT}WZm4#h`GAh(CwKlRGg%l$RCB9(k`O-eb$%?y3w%G;g$ z&V;pZn|_qeUpjq`5z25I!b^?x4N>OYfH4=v^`0*TE73J4EYjI$rkss~qjf|kg6)fW z>8*_pK|_@HScesRnNNsI|K|@J51z18ELi3EWP0T(Z+AhNnAsP1qWHDZ7tYV2KeLPG z#>z7@-^vd;=pP5OmHvOEaQh4#`831;=BN|5_BUvEV|tEKaOvXWF860a($40bX1}@~ zr0jvdK#{yyl5`b^pt^9xXxq_dTjcA)p3pw3ASM9;c&HbnNU z^C_DdRs_LD(#sO0Ku<@HJ(v9LxIb(APc)h(e@oe1Z#aez7mRVCJ$+?}s7feNGI7i< z4Xww2epD(|Tqm1+|MU*&okFn-y_<~xDi5b_!uRM znv4ScLzN2Bh{EE-GmK#k=adG!&uTZ0>@q6zeVZhksF1Errme46R)fDtAgMn_>{&h; z^Rq}-iGC0}Go!emG{|Bs4X4&h*HPI~3eJ&$~^`{LcJ+ZD;^9wCdIB_-aUN;nYcUn65W`5@?9(!4BEy{w!zQfb6U zE+!$Yg#xQ!e9@DuM4UE@ei*NdSy&-e5}sGmsgIb;@qt0?d1BQH`Mu9Q(Y8xcYCILJa-2<# z=_UmVq*gBY#nwb6rq5>0F?Tv>Ow*g2o1SXJnE2H6B!Bft(gc8tuFbpA9&;&^kz~QI zJiYgo_1R_9F52XMNWN3~`~~9&D!G$=Zuf=Xo%YZP^HN97mVJU&G;)pe?QhN-41c z3Xm@e(Ki@E@TSKL!KSv{WdBueQL3auN2_K!*^EM8>lioNP4@z_{10y%y#}&jOfbNU2YW~2d7oMd^qqSk1A)g`}&@$8+Nkp5`aIU zRvY2jQ~WXWXm9B;3)MJNf{o$v5B_$US@#S9pN|#Q=mTsEnRGz@01UR_(~s7dx(aAgvSh4EwJ*R~yY##@56IwzZqj}61AgLgPu zGU@6`m`z@a6i*IM;pbKY7yh`6p+i#t)77&kNnQ5R?A*$y5^k_XHe8;vXF>^nEfpC=T<=e(~D z?c{lXmb966b$WQG{fm{9o~vQ%OoWN@G5z zZrJ37T`~v8@NfBI%kKX;I`2TLzyFV4vzw8TgoY7)x;EL#yh>zd-i+*VZCPcN zy(*jQiqs|COXwOIx6rk(bh*k6$++nzbd&7g`TqLPUm5ql?>Vp6^Z9t_mO4)d>^_w_ zljJ*zCc55aE^B4v6R&7-DpNr`Zc^SZJ7 ztYS*n=s&Bs%YvwCYhAK^ivE9-&sRqe9d6DEdu*w|FfR~`%}aSaHMbDo4rb3u*fnT+ zT^*hh`50k}_Yip_mMfNhmz(M&|Esx_4zHNCaoxC(clmbb z`6Z$lXUWw2t}1!0<+~F;eBO>mEC%qm_}){yhBmnq%XSS>uFu6SJmtiIih}n;^BD5j zH?JRJPlC++ZP4DwHwMAi9>j|IEP;i{`FAIBJcZHl3+agkH zf8SC{iYRFKeCOWxOsq44b#TObMe|A<5*3LCt-Yzji2Y$_U` zq(y4Q8X98kg{z_87k+jn9F6UzsYiJlC$hEs8BA|GEi5nFfVCShm%W~VvS(xetclr! zU%eYU5#4ia@<9!pPVJGt?@qJ)V6ayQtVaBEmSRlLBlI11w24c<9abCfj8)VcMab`J zzN8bD3_N~^Xl3{L8L8PGk3Y5Wof@2&$YC$Bh$dfVg)H%lv|jwgKZR%yOMM8rqeRPQ zoms!`3xUy;&GMt3%?F{og=;W@iyDNYl0Y|)s|8x)rZ@SLJ6rvzl%2{q;^$USQ)99^ zpQ`;t&;M%c#I;o~DVrH)-_Qd~>A9X5hoa@>WiN9j%u63AKoB_K(5;+mQQ|BMqP=8Hsf)4*&N6!dgW*|lQCv~2Bds1 zbCKs(#O19RGi!(kEs@2mQv1UU98O70^)%$Arad##uiNh=PO@9fKo0X`$7|46SyDpz z0wVR~O^ne={9*{KX76-12*=Jnm|;=O|6HqpI4;lysA*#T`{-T_4eq*lkq*?M<>nRVb-ORbGFEV$&z`tFkdos25p|z8_TD|0lVdp@-Jhclyj?DiZ85PL zd ztK*NwB?29l)cddCL(znAJ5qm=UuS2p`73CxfN};?d?{J z5d(QMz}gleu|WFUpD49OwMi(B14woq9y|on_VzxgNK}2gGlY>q^4x+Iri0Tfl({nh z5!VVjj9t&>Jx3Msls8fiDyE0S7brh^u2Y5aHsV$}Ep$90+&c53d=n)v&wr$&AOQDS zjoP|4X?t;*Un8sFR(xFCRlw}|ERe4?^yAHkEA*CPxVGz9Yax}2K;g6zaRVQ*m1EOw&|CUzZfmn%YrKIpuMs-;~FwYh^XngvL zHcaGl&t?jNOBP@0WauPUzR4D9fi}K6liTA9)b-BODmjQc{kKCM-EE=t=>>nNVP+&j#J+l4|br|TM|3-$9~ zI>1d*3OyyvWdD0BZ6GUC%XZD#Eg2r^%q8F~=9J?=zjlc}M&&)uY+q@*nst6?xA5C( z+J6#G)+&_x?sN@g7G6n8@_C(-Rd-i<&*Uy=y`68>9LM)Cs>3#xofl(r~Vwt-c;z5 zH?2Ekz-j_23L_u>Axs`EE^It}*yE@FR*RO?&C$ngz3e%4USGsALRd2d!>6#w!N;`P z!sXE|;naK29{YrKxjDx9hvS_s0jC|6bL+8FU!5`$mPZj8S0!l?_76aG0bXTp2^-6lw!w@eMdFD4u@V|Rhr*^62F z()#ZHF_!zm@aU(hF{o4qaEKvi{Zk9fMMMfo4nYTMQCQFK>YTq_@cTkI+ z;F5AMp~u;MirkC+TnFCd?U#S6tp#b_s>zpX7H_Cr`iP2oB%T~dXd(ga$_cXKBZ-f# zvym;KbG`k>pH!VQq~!SOcoUJ#!2Ecb5hD}^LPw*enj} z`Crln33GIKXhU=QuejbkQxg8A@7$H(a@P5~&Wq)B-N@mazTG^5RIQp7;={LdS0rq@ z&Z<&@cJfYha(c&7NuJV04ECCLM4_ z3(?^rtGxHpH$dW`qhxfLx68ZZfs~#vmJejkc$9t6q;(-nwl>nFRWUCkn08YbQnS_m zkY|wTA6*^|qqZ!uwvo@I^6&k(F<0!4W(@m&cX~V`A*+c~{Xi61M_**9m>EhZ!KLv_ zV9e}fD*rjjM^JAe&xn;3-GUcnvJ9JadeVaC40n$3PZYB>f;QQRJ3PK{&YKHG8rEY^ z&MCUx@Zael+^N&XnH-79-rciFGn}BWIr;A7(54I0mqyw268(f@w2DVwi+Nvrhu~y(kc-rx?By?aF+nj;a@u3wUlwG8 zTEgRZTPhCHj{b?xB`~Hyu;LT8P$o&=APd?>^5(s3;E?;?g@gC*v)ji`LX@Gn%SH#h z(L3W2^oQd=KSfyMIHz|P?>>`NI5WMqU0gqTZHb{k*uN_PeoJz@Us{Ov8Q7CI+bO79 zCL`Jww8D)pZ)2A95Yq9t8G0e+R{xMs(wv?q(#u(Ru}z~9IT{y|(oZLpdw5*>GN zY>+>5+hxwMtyrz0NK^48bg_^jRIj8XpslPS5Gb;L@BaarOKe2&oOVDP)ml0n7$BX~ z(v^thO_fI%_&^kVJMhX0vhD%-=w3{mL2f7YHZ2Zp_6I`IMd)diyxdDEX^c>k=U{H| z!}+z=K&rn%thg5zdZ{kU?6CUNdlAfJvnaVn6E@CcLhR7{456*`WXy=f?`F`9Pi8J$ZTMmI|3cU&r(QhE1eRlN+@iHoO;$*akUB+YVek|zBu z^o4`F3FEadBOfS$tP=+`M`VHR5xmsi9rGAfu`oO*zu(4+P@F3U z#f))Yj~FVWg))4h%hoNu(!0N-*UKYzC+s}DsI47K%U!HSB2p657oC38OeZNNUEM@oZ>9piZZgi(JgNw&9J(%d2f&(g6*!%tQPXf zRvoS9aCuepU)7<{d4SUg@Krvs-Lq>As+l1-den)$mhvgnRmyx5N(q{o z)A(v;OWc>OS&M|fmqlkn(P@64{v1@(cY_#vy8GX;)Y$-iiuOe&_DjarVffMPWXWsr zL8m8I%dI(T^5k7!H#ogqXtzgiNRPBPbQBL7I{I8rbn?}An)g|5^o$$$`g?{icTOnT zRIY>4Z2h)tMLf)l`_Jta7|FwY)s*ymIJmxKc-8H-JC~TupC*wRK1&A^&`^5=?qkZ)%vil_T4AMP&SO03)`S9A= z>fo|gEm4n2^$YJ=ON4J`^6|N66HCae?Xq=tG87>KH5`YrQL?1xg{cwI7PLH5YU_db zNs)OKGxLt)|M9I-8z!p)Ze=S?f7~vA9#unL-CD+12V^^ksZlr*&l^I0xkQi(H)<+t zok0`Qv8b#pj@k;I5~~ivzk=2cP7-eOMC^w+xLnX`l&Hex13YaPTI%AdG`_ zMgs0j^(GJi6uykGTkkq*x(h6Ai<^2x2PCR`id+mmcVDdg2>!tQ*yJM5prv`t&F3G*Ig*#q6r^sYoumIV##V2sHA&M?+$X!KpiWfy>Sr7t zQ{d$o?d?6c2I4`WSh%S|lu9xtIS%>q$veR-k8|^{6y%5_1D8?O%~OEV7ilu_K>Q;_ z6`Auhr4;<=#YQDL&l+Y^FM^-;GKY`yO?67I0P{)19w>WVQLk(Z9Q)z9qhr3>9* z;xjw{L}oD}hMjVi_AoeCiG8)s{Uu&vNTmLSW15LXo)vR|$n^tGv0U*ay~@L8TaFrY z2t!KvuCvXbJ!Wg=%g`2*>1)!Zt?Rzwq}~9mRr99n(K1hCjawU?0NTO{M^Oa%BIwwT z##0MglIalVKF>w?k0I!{82O8Jc2#e}Ozo_6$ndGn+pqNwU}Hq<^}{i~{jp;9kh z_O?%6EF}^;%yBQ?9Gyr1^CtE{>m$^`3R-Kc?^5nAqVJ4!tX3jc3&L=+Z&jCtd~lXd-+FC_(pAh(w)B_y*OtZ8ol8d0w*(Wvo`dEq zCB96Qg-lu_VP+d^eMwCe$1>}gKXKFu;H)|iRvi0Q(vKHe!9;{v&zCw@eJs(*pX68S zGxqqB=JYTHAt9OMncbi&VQm_SvQv1fpr8Qs*I_V6IO+qF?7N-WNIH7r?H9n<_X?W% z8`-9mI>>&;KpKw>p4a{oyvt3iYB~)XUCSF)IwU_4|JuawXnJRlA1crOLJSojT58n} ztn>9MAGJZTvwHnDyBIUAJRPzzyY{OrxaVng(+mD^RSGbE-t8R2kmxoZoKZGy;+>9+ANTA+`fBZ?;x#j3cGop=kh7FTFj0WQx>pJe(HLrZF!K==17ulhCBrasAHvLJ|-@^r+n*HBlmV&n&S!zv= zDRG_z^-TcA(*%qA>Vo#)Z@AA~U{02OsdmOd?-a`gz1l)9=Tm;rJJ{vn5b-DJ#>s9u zDo&3DB*xy}wU2#_G=d=JPvXl(?5=i06Q78$Qv`xL?uOxfm|H?ATGx%2t$!PsY&SIX zi5vd71gMy7rEg{y-sf~@p3a3@*{Y@58PHmu#GV@CY;lla?ULoV2XADII@~t3y7I|O zGwF>W)OYIh>5`(s!s$s>F`D~?vfaYR?b$J~8l9cf{W3o{3zPk`3MlK3MO`a+`V;}Q zbWayhT|q8pSs~#s-StSy>fo?b_e4$c9`ztb<7nexyKTE{FRil`JZ8fiPF-7ZcVsnT zW34}ch|wQ5$1A?}hA8|zU2DvKe0d)3{nhJEoc`U_Eq$hyLdQIXqqrEd%DG#R$sj3a zYt!Yd?kA;=c8@!4NSb5h5&?I9wN#t%OU*rGlQh57RUaiD{>7=Co}Rhl4AXQ6Bn(0k zGB6}UN~LN@>Xa0;yYz9UuI&}(DlMAO=AW?tE)g7%t@PfQ z5-=k#A1ajUrok+P1o6~#JB5GPPz@)X>NXE({R%?H`#K6_A5!q*$!3JBX{{T!e^w^Kqs3-IP3ahDTbz4kVhtf=9W6s07m zdiv$Y#>PsG?%t~C(VsP*`2-7i$C8_ne}gj>AN7vk%S|t6CbDx4|6aOAQjF*z8k@aI zuCY-1k5z?I)nN5P{_F;+TZf=nvbRU|a(q;C0n+)ETe!Qz|7W-L3C&_#K_4!$mF9a- zV|}}}DJ2ZWn`C(Uv*QxOrwUbBdI}K8X*~SJDGrRU*nc!9o2(aG>RQfWB>U2a7Qxj# z`z$(trK(DYcMe2KGHxWB$shnlysscj)|NA! zhX?ohcK)p1xFy)VZAR5V;_$}b_c?4N+xuTXzh2HC0QWrWXlierR$3pV28G&8 zUx-NIeUQ#O(;7}ew$%|oyh;~;e!V`lUS;?4Pkayp>*}p|9t^Hw*94XPFEEKqdaOws ze{hAfSAH)}Ek0+?qF2Xm=Q>IX(frFay=!jxg4b{AqP1-Uv*5%V^PJ-sYppRqgzHo~ z4La$@A2Ljr4F6FHdu4>9v;?rF8jC0VXUO0$JO_d{eMkEhNB@90P{CsSx<^?f*x*+j zkrQ^aDiXR!6&U|QpXvzZx86REGxgIdzqxVFe)#REm=iYj8IwVZNquePEdB#lxo})$ zN8$IXE%XgZx?L7aU%(MriY_T~@wCKM@eeKMZ3RAtKv(FiGY;c5Tp#5_gDmC!P_hRY$iZ?L@dG(O` zyfKHC*#K0vm40h$sE7xjQZy3VLaB|j6+edsdGO_evg(eG?zp|bpiB{W^=!v)jj_j! zMeUr<* z%YxZ@FSBI7Ye4>Cz5W(K*S5&?&3IK5r#S6!n&w1(O>*SsoLHE^J5O(URxY8#TvpnF z;MewAfpsHC9YQscopR`&?B(bVk}@f0-&)GE$(ObgMUbyu(6=%%OT5|;*%4+5N=J3z z)?%?{RyyAP0mSJ+IAlW1Qu#n72f)(~Mn=f?>jZ=TgZKgPOZ0oD>aSFiD1PBukr(B> zs!w@5qC6_}21C0z4|sw1=CoKOOwq2cz)NK|JrXqCirf9xh^9n#o=er&`v~yVppxo? zGt(SrWFqJZ-Z}evEP`CZdJ{s<&8(32Y7vb%=Ek(_H?tia!sWtdP9yG+HV5vi6yEFmhXl~ek###XF&&h@soL+5Z>C*!1tz8 zPn#M}GLHrnNH?4FNY&G5g*pOE6T+{1HD44-Bl!FT+bPq7*9vwf2-sW+sC}gD&onCX5?6xF!MmFh=;U*QAM^m zBtKmGJpfnuF$p?pWROs|7wj62Qk)|?Y}t2uUIWz-K$rx#@FGMPX`2N-MYL6QopCHAn`v z?H!zloEXTDl)4hU$gjHaZTE&-1GzxOyk#Be0{#AAu5CC9_JbCNt+=BO ziuTj)&>_tdKmgR$_ZrqTC&wahxA0CVk7MkZlul2mL6-6c{BFXwK#?nolNpFxz{SCg zT&MZQZn)znW3Z6gikwlSo%Q&bGOEQ{JovJVTGr~voeNQ5D1*Sm_y~WEpSk9P%6mon zFYJQ4j=k_I)vtg5#9waIoy)Hx3$j%3XC0IVkdns~p}Cv!s+=;)0Xb~&?)$Kd7mM#q zW%-|Ls56L28d9x@H)7}%uemr`x^o4nl^w(@FsDr$4iDX*|5s`1gL4`N<@q~ses2k- zmw`fnjkd=tqAXC%2EnRg1m2HMWAiy5wy~=!Ul8`$I{!vw3gXZm1b^w^=6pfzYx5`* zKjL}L;zO5_@&1^{K#{0gP?+Zxs5Ms1)M|&)piR0sn|OP!Xx$_SCmd1JqN0cgfxc8H zX$VK3ijTpU`7itbo6sKpC?<4t-v>H`wJZ4y%a3^@9cw$k#`>t+3*$gt^v_SiH`~L* zlJ}gx;H%92A;hU}e#hud;l7lDzCzlbcArjP+R+M?u&_Rv|DT9p*k?lWj_LAyn(mT8 zYN#C0HX|qoBZv-)Kyi%7YZr^jkF5=4XB)>K z5~C%<2Mfzc7iR^cu8oHu&phY!8D3pn%x?l9*I$Q+V_o|i<}DDFKR^IH8xMMWPE^fgO* zyrO{RXJ-Qd|7lc|5s}aCn;e&0&O83@Nlbf!FN(DHwM&#}RYJX}k`wIYOKqKOzL{Sz z^VbQE zkQ$imET%n%CMTCsW(nQ*8y?x2Ds|(JI{?m9jym>tWrZSAW_{C-Qcr+r`V54+H`L_INy(YMreN*94X0>M zSKa)txQaPs-IO?|1B9mTI?eP_oiIbP+E zC1M{cjhQKVQo4zjIPCAUE=h9H7#d#T%(Erz__*{xtGhO=l$p^f#)-3;}a9!yPh>*qUAZLMP7e{CU+T|01@ymBa*Q9 zqrTCA?BJ0#Ih^hJKl;<*@8b2&26c86q%r(p_ULeNgudRYzU)J2zt7nEHBH$0ymsLC zG!gl^TE{4D3A?#PbjjCmX7JdLN17&f$ z_O2A>d-tW3S>bSGrPED#Yv652kO6tSyy@>om`3?y7_HYJof*uZ8|M<@&XYh&AX;Ru z>v`My9RI{ZNIHm7%K%4#O~YHS(gClh2GGC_5|$J^uaf*J`H88UAgeae*(N%Jekg!wVX!25LJ?)l%(CK1$z zt#|5Xrk9kYc<;&^Ux2^3Aqvz9=a~R8fN5gyEBdk0iN!&#GP8vH;ijoN@&7=Xv7g!V zXFP$fz82IX-V2%+3ukZr>;%0)kvvhI*n2u?TyWWkqP&!jupz00=-p3IXMDU+M7L;o zzdBDawddpmHz5-_&!5_6qswDuZNwDDD%d@e*U?c76&ga0Q#c*gtjM3r8}xT?Ht2ft z+)Qg=JEi)D62@BQG=(cHE&xxM;1w6<8`6AV0`I>fYVg7G2naU}Poi72HuBh6!sImX zivrA7E45r{B_tY$U(sgL!D^g#sy>Z@fo=*EZPIG&DT2jD`yZqx%OIAh7 zr5`W1vt>k?AzZd}>r?Jh9`N>py3x{x8kpC);CphGRZRxA-)}%zp&`!5KfcPI?aG$# zO|*`H35`m0?Cf8T*7ARqm8roiBOxYz`$iG3%G3m6`Auf^9YyEf@a`NG{SWqoeuvkSsPu8BZz6 z_g5S$c{}wGsqV~HCU!=T@!lg{tsj8o4xmPFuV3+2TTpb^P`(?p)!4Py$#)-d;c0ybGv!JUrWk z-Kj>bxSmpx4G4nkC(=|NE!MuMbAR9zX7c{NF4|m789iPo426podSU}R1ru3?_{z6$ z`{=(2eFQDVQ0fXhdkalFI%nGgFXxy#Ri!e&Z5A@s(V(p}y;z$OdfrrxyzNce0k7}S z>YxT<&~NC|MFr%Y zyOg(fv{>@%KtOE|A|%Ae#K+!~g#j^1>p8w0vIhE?o9~Pun}aGdE!na4O8316@7~nUj1pPZjm+#;hB`cceH(Y^yxa<5FQYnJDnG7og*`=$o zSS1pIRT4Q#w+58F6$B2?P;Lvn?nw;Qs36rfZjyF*ZS35!;>u1)k|T?kIq>-jTUncj z!knZ4^u3h=V!LTcPIqLcE-3=??C=iYg}CX%BmOCclN^(9OvbF$eiF*!z+Gw^RBUYk z$Fp6bwU*6-&{RtJu%0KkKaErTR~LQH1>fkIJ}6NYE>A0H5iEU(w_7;vkv-R7r)|*^ z+IW*avDXV_UlkcPruE@;N5lI|l|$*tP$E`|UltL-^m1!n501}ll@zPmZV06I1T#s| zY)WZv>7jh>AB^ehHhuL?sdnrhf!{t55+S@*6U)ub6ZDRvUvkpRqmMR5`2Crj9rC6tH@dtvP=S`mNLn4r~NEz)W8 zl{!BF8iK$6K8raTqo$>`0VYp4OXqA5;^LiVBH|8TUHS_S1Vlhka_c0->BUNM3Z3(d zg;WB3rmynU_34P0VRD#in@pQKkSW4fuh2bpLhe^UA}I+s$JV+@_zzdqh?8!~E$`+> z#7y5^vMtc>M_q?GIi`E2SlMw)z4N+)tqbq|q{6EACwOEk(+Ct9hlC)P3Qd6)jAzWd zGrUJX4C9M!E%r*1+=I&nKu@yWE7WbeUq#)AO%nx$shjCDuY;jWyO(K zDUzff1wO62@T3!c0PCxJG(uKU0I$@wpNeA)?3S=`2yNXuTQ;_Zu=?iWG8{_eh4xF4 z$Y;TI{~CT($V92oCIbs4N^$qaYCXl`4_Z(11{;!MP~Q0VLPT4c8DZ6Su-x?2Ga~If zl9Z)N*}H=^RKfr58pSt9oZ$N=@VLeU2jH(~49Km{Ck<)~JAQ6=_N?;`X?c{-2DD)) zQbh7jI#hy9Z&&R_0aGpY=eN8H*QX@f^UZkL^2V;#SiUOp3K+~>9J3(Xd<;wIOkn)Q z=>*p-BBz5BexD|!kL>R;KB$ewxBXY;hg!5QS%?-XaU*ZTs$)=lA9udWC0>^2RBpL= za#wBA+SJ5!IApa?s%p)<{>+rJLNC_#3gqm=Hi{p!%T1+XZ9qja)A>iavfiD@YXx=D zYo|~Q8F^HjPf^+5d!9GBzoJNB{rO7>a^k~m|MNpb-g?b z@@dEaI&68Y(<;t~OkcZvx$%4(e;F#pAgl%YB=#`iYu-vc?YW zB*u%^pZQO#vZOMA&YW1TJEkOXFlQBKzrpL2ALEQ~2u)6qrbx!NwWctj5)0Qpl25A#91#H&Jj7d76ziX5<&9 zTkT%%a0leurbfbc--x=dT$_4%U{{MBCi5}4=jq1XrF0(+sn2?weuu;TFW!+|-v7N1 z>V|1k#l4nlDpzIyquPEl)^=TtaR8!Q#kB&FP ze+4kM*PnH>eyRMy!)86ioZ+9+WgYOQ2Ns%gIyKxj-s!E;3L;Ei^(6Fi4Vt*1&231! z(f2AvSV*4vVnj$J?lt0Q#6Wn^t+hwZ>%vP;(Yc0ynU#n9oNIC+K5hq)Qe5+l{_@Q z=)>&`POY7|E%|{zfA*KqR^5(|5WC0Nalr=j5oTws<)4>ce_9>0vq=Ulr;!*Tk#)d1 z8Rg(!E->%~TYtqz;ZBbRSi}nu*sak!Ior=Z2Sv)tW!?=u9q@`sh@q;*0r+#*w&(Y| z+8kNp&)@9?lGOtLZ7dkd#}MZ4i$X2QxoM&3VyWVv<;Umpl_;Nv70oZ_$+0~exXjXh z|LCONCCqDFl#Ag9$!X?vY41;e%E~(T)`Cti@)!HQn)|7yz8ALfQ{JY)hfOPohZHTt)He+>QtD&F7b(^r=tbrF{DWAL8ox+V zg!9RVK1HD43b%2%-gFU(^jc2}Z(ZNZO0eVch2-)wi`T}Op0TjG{JGW4B*w8w;5n*z z=!w!bG_tcbDF`q{9m8OK8ob{lp>=82Jf%_K8l$iJK#Fu_1xf*JdLUBh4=g+U*w1C+X}C;ZLm%`fZx)_N<+{Kn{Bwo;vuF zY%cGWN4hfGJ-JZd<|4rY#oKfz)JP$}3PLDO`PUSho9&sQCSM;I#Nf43__+D%0xSgl zSp-uWWJ}}8$3)Q)o_?qz;6#^gS!q&g`IRmkFv#oZ#%r)X>(A*?fCxR{e*(d7r@jPZ zER9?|A|2vSO0G{Um!S*Uyw8*hD69VchC`%7BX+Bx`z2N;Tn_KvZBvpbZUW1pJ?noF z!{&+Oe5dZxN$|+*eebAXS<|vqkemw*yQ#VCBrTTvta@#?C+gsEGc@)psqCQ0)Fft; z#Pk(t;S&xTk5u3PJNlP)^r!9U*E*`f!cWP%ds6uXW3#sUhS+5rZd=I!E3VT|l@&}i zd!8b3FHUVxH2G?sjwfb6F#YhYZsfr!#;p)WOQuU8JDlQwm=iS@vtD@4_N->HtP|eW z*laz$eqgUT)Ojg0dgpSosX5;JV9_52W~ihZA)lN)R>)e1V;^JcZtST?=`ZoTO!_yJ zt&P$quTpJH+-TOC9w<4vf$AChC90?|BAs!r3}$jYz-#g$=)V># z48Mgv3^tOV6al#M;d|HpUPs2lCLmp4K<$~uij(*LI<>Vng5M3-I;^=$;zmu1@uIF( z=zy!MojiwJ<|KNg;_we{djTw?)#qak;r9ZiQ*LFBT&%Hr>>+rZyCA>~G= zI`9P!fo?w@&&~f#@e(?=re)=F4uVbR$hJ3nsq~8dfp1r1v&cP%E0X-GYf~td4;V^b z%QYX^;q$OS8aAkn#?ZuMLrl@7pF36AJaXPQy%ER0LlA}5ldwf1ibQGnwSLH;i_Hr; zS&{bq@H*MKj*_An?~YHwynf*9kNPG03mqY70o&Y{MHeLyj@{=p3cf+h4JFd-M zNK{SrQ_b_I0eG%V?s4{9G+rk;uZ5#gr{V!~Di@0f&R^&^=?W60Av-%;h}q=3THuM! zq?hyC%{wrN{Nm@9!Q(sU@ZuWl#aD#GS zqrEVxh{W1fOCv41udCyY2}$4CZqP$A6H!hXnel%34c*;I*jXtH4p>FW>blrqq4D6IpA^xjwiV6+_r%RuQ^FY)c*#KbBOssnTW6P_np&F0Viu)2k z(Q!=cX#MbTy?ncRh0LHoo_8$)m-hqx6MB~w7Tf+XLJ-D@PY=B-NCxonlanm<2jn*ljsRm)d zXmC|bdn{TL(@dH9Dq2xtKn~h%px)gTDs&nwS*vz+H__DcIr^rMa4?;)WeUM@M?9>yCsx(N?u8B1el>~&@D|AEg+d!+YoNLSLHdcVmu!h`Gz+6=< zW%GGE^M?(EAzl0hf9`U{cufqdJUo8!7HJgMVt(N_$gm7atu|Ylm}HnbnjuorZAJDE zSRm4{hw1o9^yPGo^;T-&G#)j2<&lR87sd%I{E5s&i|xtC?9T6uy80(tJ1 z@fCZ^)NA9{veX$G3(+E?!xfn;U{WqfEgm%;A0k-4T?uN7T zkiS=W{b)C&w^%h3=F_h-*5ZG3H9Zv0E+kORug6;p{jhxy*%`C<_mEsuqnofj%UCo; zpKiF+-xIz^<<{HXSa{sA!lV0k;4$G~k8spU*rO7psGws8_*yWzZvpC02aXQsk1Ui| z)D!mTU%#y0-K8b$Ob_sg(n?mOo~c6nA-*4C2%m$#0oR5+o}(r8(2;x1y`w5t#{ClV z$*H6##zvCdu1+J6*LG=k5!Z2S$yt{rYzqV5t7p(qVEwfEJ)+n&bS_x^>OC;ASqRZ6H>Ux96|jPL5(sUS{f(qEX)Vy= zJcstlJpUZc+Ck6vlop89_Eu=RzrX%C$B_ew7(b~)jjxZ5x-UMI)y%5tvGD7 z5Y@@><-6jrULHl(z8xl#pUytEW8)vc|L5f+lKM8!Iw8Jo_9rn04Ac?tg%-P!XB1>< zp<3nDF;7i#iM$U9;9qxV$LzYn#=zoc-vH0RVlDk1WUcH3W@Wl>AzHf?NBF#sYf9@2 zMR}_xsXMn5I-OVio?dHk{3Mn;188v0(!UL$&hs+QDJr0Vhr>q-5eEh=Y8Wx7(-KZA zQpF!r)v8Ov>M#XPy!sB8NnmJuw6INZEn3#*`hUu<@Qa4^Zl1Eh8a_~A#SZ0+1Yko+ zTI;76pn9#|)&>JlsQ=N6VVG}S9z`(?JYbc5e@{(*guIv!f(- z7r}ZIVxDpcO@CtQxPl2pwFVkRKc)GhRGPGsElVTd|FehhPQmUe6c4*fgpmARG$IB~ zaZE!(AN4Ly^*%_f>o1N8l}~*6u6}NQo)EVO%)jG|k&&!A!zq~mAa9;3rKGTxhT!z8 z@-l)P=p(r*o@&L&oGTwLgxy$j~>}{qQyAIaLad9V~ z9OgvyBLCtj-b4;upS8O#s{Y6Stj>sHavJ}_S;Nw%g|$U-qYh*sdYv4hO$DRV3J6QM z&~e8~yWFXfASm0D6f%D}X|AP7O>C9sL|{zZbGTIFIzK;;H;aymiHW=V z<;&dMmpMfSeIfdM_3+gHeC%s$K=h)3zchpu(<60I!gMTPw2FgoK7_-q#&B>kCmc>M zY<)%7bKw;|QqaMXovsHz>`6QGN56O!#v4ORL4EJE+%|LW!d+m^xIXW2=c43d&a2*o z*ZrYrA|jm3P7jd7wv|}_UI=k802AJt#m`sTW&ss@qJpL7@3r!}EL2At60h=CG+9KIP3E?aAnhNv2Ch*Q}^aNR`o|?g$ojXI*axA zlVoGQT=9dU`bWxTJdqVFS9cy{8S^cK&UX$xDqTCE!v2Fn(!~yFzXEW^l)tU|*3~%+aA-Zd8?Q(dLf(>(o4>zK zsh^siJy?zAwMj5JZgr*6-kIKglKT&`SSQr+!mUReaBm)y=8iEbUD6g`FMRGt8y2BK2!Ej#&vog?#W+F z;}Ybuj0k+7U5b=;p@5A{Bh^7PJ?5hfg#a2~4ro0mXrKM6gyX80OI>WD$yWxAB=-7) zdyc^Ms{7&IE&~MeD1_{nkCc}1w=rvOsxBV;v3Dg8{l1lio&b)gJEJ5>>$FaxgAI+Itl z6)^_)z;M{CLlrlbt4|xpYmLa?L-qux{jB(TSFB|$NlGt+>ziVB=nu$!<}jFekAke1 z7-zOAi(T%BaXDk-y3FP20OTnIGLK{uB^!%@xj z(I#J5eyfqNXja%)XQ=^3U2?}qpN+1Ul`ztFU|$k!#X0kVx%h9MFiFqc=A5Y_b@v}=C+`q=Y{tE3@ypniFBk4T`2}0c|?~`G!@`Zbs zI%t!$K0U^&F-9Yr(3m)=hs_ebF|4UiF278}LG33O#3d z{EPaMb7*Kt;VFBjF5fd&)|dww1GerQ6^(GmBO-k{cBCKrGLxz##3t=K%a|JWG z@%gO*&iGeuFp+)J`{Mh5-m`M^jVw!PvUSZ%>T4Eay>wyW`u1<2Y$D24m{80^d)BWp zUHo6A7!zYn+BvjFqbLm?PIdg^h@w^QT)Gf8Vc2GfZwKb&6*x zT(p|-L&_s+#A+-wUn60@c>K&Dg%3W!6@R!EnEeab)V06pM7h!R0h?u&yU={(bfiqi zw~eE(+f@ph*K9_$`uZ2!i8Wj{u;`B2NS6m1B4>Z9fbh?mfPfcZGEmmBHh!f?0}9>I zC+jO;0VHw%y(1p+>eZG>1_Fl9nV1n|Ai)PQ9!0)BM&60jE2NoON~7z|gx$CEL8lu_ z@k?%vknh+Ka%;rA+hGV4Ogk9zl|DX)nsn^}MDci;&ejoaEnGIGMa`{SL~q}qpZ0Mz z{O01)hFecud&knt@xl=3I5mts>xah&5oa9xNCw4%togN}5LhXgHg}sL>U5#~lCZbAn z2em}kBRwYFlxX^I3!6cNS>}+x9f3@`wYi(V9@njCl@2fLZ?jLtShI@8QtP>OWPkZ{ z9E58r{~go3OZI{wU;$GPVB4iWGg`|VUXNV51--&7-P*a1Z!dr^MBp;VuwZvUau^vTD!09e1y&p zA-HN%O-7WU2B3~Mt2k674Qo9)Wxgp(<_XEZm3}!ry}B58ovwn1A;Z(OsWE-nez|@c zp<)^E?wP!_pZK@2ojhxK35!WA5?PHTqgF~PL&(y)l6PPTG}IHjDi~T|&L+_9=5Egm zDh4lEV|abuF$nlL+uUPo$*_mLT~s$iq2veu*%J_Jo%0K<{)Bz}{p#2^dR1wUjU6#M z_j8tu4)}aHkFu%KcJmC-gvNHXo?O+P;NeISvTxYu#?-z5X_Z^=e-}>a8RVATLVNj) zWF$F#s=nj@{p^`!cJX*&7^$ZA%jwq?2xuKVJl>D~ao*L2nx&>^xygi?F3=q+@4Ny|kcek`T)Wp~X~t|)Gj=$2`Rno7>Dg)U_~wJ2^$24;$)2l&58Ruu(aIrOX27cQ>Sr81nSt5oW0;5K;3~HIebKLh{V$Jx0>6T+*!ar+288VXO=6azN+6 zag$%cNNU>lln^4~jgen6b}-1e`3@ud9Bxxhs97(`@a_{ooENr9KkR-AXR$kkt#A~1 zcd}eDHzUJVkjPOnCL4&5oN}__;|j17wu8Mq@CC6JnYpYnDlFG*fFIy7Fau_v*lh&{ zxsdobnDdtKln@aFY|-xh@MONYw7u`jom?Hp$2O&DW(Jr^1aMo=UAzmT;$Hc%2es4{4q0Cm~wIO4i9YZnb!>N zQ?Zz|59R+yy<#*)ugLodC7#6pZ)&=|^m&NzW3x$=t6z?D)4lzuC!#oBz0vllEi3ee zyx;8Q7(&d!&lY}jN$x_4*pvZzNfC#43@ncf){7sdM8VGQi|8}u`z|gmyq-zx6D`Cx zEAsIvUwn&v1I3Bb>!z?X8aXkFOQ(oli$B{=1NDPvzbuw|;tq~ivI<{p1WemUFcI68fZpLvdF#R2`Q4VX%zT6!TZL5h7RqpTI$Cy-ZvnvVZy)Y+9;KT+|XjR?m zRxZ8${jbX+A82ESw)A^@d$egSDrk3nQe{0FO>lHVcRJZEK3G^ED+K1J&9Lm?+ZTLcu=)m!&?Mk{Z&?W-M<%CMTWXLMIC(~u7t9h-pyv} zg?<*%GtEc@XY6-e4|jGbK%?zg0Nuq8fL?IfGkbnx72q9H(+R4}iYb~TQ95gD3VaZp zr9;ifU+PfiL^z)6>#&3oYiX3mGWEc1U=sI->H)4zr9WMHZD&ZOrIpl&=6KL*T(=Q} zx3bu7amByvi2_G^a`f4@O;3UxJLX1ugd)#810JU$iL|(FUlxc8`rf^Uk~qY zNQ^G=t*o$u8N9oOo+^K9`EGCwgABR{m&8l_S{PJJ;SxVAC`P2F8w>F6*3;k7#8c{e zPUd@J4}Pz#gdR{5PTIgRFLd_Ivht&%on;2;)*jAu(02F=W8EU7RBw%-m(c|BJ z(MV#G;jvdqlPKi!T;eoU(HygZT&Z$KxPwcn69UTOyg$CG=1ChxoY!< z#|59M<1YcmekMi>snPjLL29NMcIvd~P< zW0f*UN%U)xysL!p<*SL>a_x?KpYYF4-&$4MS)}&EZdY@}Gwu^VgjJb6%rLDzNC+PS zJ%`V^UvJ0|6U)a;XL+;iMfQDnZRNuG)r;KIIaRmVo9Y;G1%cAiSIFYKvQoV^R|bbo zKlB+*NsPreKz&qv4!ZcgviObP0870ynTwCO5Om(2ev5~hL!zQL~=A;P@(4r*7-$5M`(mh8D z!Qh2;h3Tg}1-Q^LD^W_m8{SUS@U<$p|1xCs>%NA?Ewr^5%Lr7pHSoMMxe8r$_5*!( zxC0JB-5!;DI%mhn?q{HT0sgB>A?r+Qw$?%mjEU0VE(JXb&bQ<+rNOP; z`O2D!>d9aGYs*KcF{1Qn?8J;c(9pIyxIYnj`*!B2*m^?6%lN3M>e>K;<3Vjq#DfQC z_jR81{r6wRM=Yr5eZKhAKt8nagJRnD%C}OcZ|^R5?m5;hf4yUn!c&m}zcc*uv(XH@ zK|LggK)!CH@sTdSsln(jnsgepw>~XQ{Rw^$m=(lyh*Tm_`4QxT51Qj4bep1!DiP`b zs9boK=PV=qQ`@57P}sA@`y2LkSVO}L={$&Y=YgI}J#yP#87n=QkzjiQZkNZ5uyQ;}3W@|zUXK^>zG(pl?O&D3j;D$bM zQV1-i#kMV3&7NL}j{zPcol#s{Tf07L-{lZI*Se`dnOx)2zc)AMXTt8E;pFgLSV@L* zuK3RBwE%e&^$;o?VNPybQJnlF@klN17Psq=2T z)(rPF1se@Y-J&_S%G)tE>(wU|3dc)O+Z*5PIozeCrJX;=wHAzk4djNFtZG)h$v&5B zSWdFWIsJ%W;ffOdY;S3v6CXjicJx@EVh%Y1$iq(N7)15vG4km z!%SJyGq&lWR`$qION{hlM`c6(I|sFwb1nGRnM^Gc{#Mqh_=NMUhiD5@o>o+stIoqi zrQO#N^3=TTFDikPGlNC$P)JnAf`)IJJ7hqVRNOS-2$N4yS_xXX*#WK?QTxXS+luVz zLeT68A?^#SBE~0R; zD>c>84Q0$nZfoZ(8<Evprq5ERnFf6v9a zqNE5ZR!mo6ly*t?BVe(4W-1|Kr0<~m%|}r>#qan7b5EjPv9i$bhwqfWF$FX z4}{&I0~w5@8NQ@S;j1=`k`>SjXi)vTYD4X8$iLUAb%#Zw#IibA{fdU_I&Y4DYY&R_0M%-XuUJ13GQz?C38HP3T!C{nUG^hr%76#E7j7~;3 zVG`1Y2Geo+yewnYTv4(z&W9Ryq z{}!Wi8W;ZngOZ%+eRY<>P#lrPW_lx4V${P@J$4ouAH=cBBz`Votu!V=MbojV9y_gHO`^ZVZI}2 zDbD4V-OlDDSvK+2&r>@p+0I}QZq%CQq>QaNQ>KI3d~ntyPt%5qJ}-ae^8A?=GOYri zSo^a!R%lI$m|v*;9-5G__tDNR{{xWvbuL*%peQIpu&}3Dy1P(SU!nCYz5Zfm{#CqEoKviyY1FatZ@^a4`4C`cMM=@0wNK0-UL$KtIj)xQiYDJB zW$W5pgX*9SDll8Klw6@^3R6xBYsJXc>fPE4Y7?e*{t#CUu#wf$nLW^>v#Y-^7#N7D z4Sw)o7UNc6zEhGmFy&opU5$_?y%Ds6ZNxb!@fGeJ+J5-Q?=?i6v>n%NI%#TC)IUVL zlzQv+sR<4OE-75fSmzLe5jt=bM*UgZNON59<8I@-#OJYw(@knN=ZG~xA+W2)r52OC ztiQC=ocaGMiLe#MTh)~#V1tQBDK&R74_$m>v6UQ`EbZPaJ=3sZ9Peuad{F4MmCTgp z%6rpSLHPE@L{Z(`0cZf}bBYtKv&UJ!?^BlNZ8ddxGMA6fN*7=`F4`52d6zuz{XHdT zBU{NxGMo3C@_5hh1i~4@AN##(EQ_fQxf`HLpkxr~CGG_CY@lRGZDqqnLFUj`6pzXx9JCh6D zvG!1UM>t$cK5jk(uKZXU0U;uoPk1_*9h0w+9G--oyYXjoPY0N_PY!96#RKT_Ckr@l zAiE=Dl0{}_8k@0A@gsjw+kUWJ=!w0(^y6b|34N(Wm*^@o^E>dB8>~I44kbfZ(mZ`j zvmckqCO7Hx{CpKRD`9M+CG2pGUi;VT*BZ5jT1uX0*S+THimAfG+ZKrz4k{SvWEdr^ z2#~vYtFfu{qFb-&UlXQ#dsBGD-Z7fV-1?n<&n;Q%!~dMsxRn07B5528G3v*7bDp16 z$$v!|Yz_3y#@8ay%Hzt?|5a#!x*1`9z6kGZ%}f8Cuj8~t*GKKx*_LkIsnlPRF`zmx z?Lppx={IaKmiEP`ivu=IINO~M?m1nrdj*cFX*yVoYnHtT1}QurB$>-I9Wv)Q84*Zn zFoB)y)Dt#s7d~-9-Yx{aXq~O6besec9RGT9jV7!3^2s$%xr@|1^#$VPhN?+T+;^Ah z7F6b|+LH%8`wcW(o5d7ZfOmR&{@l_A?|O)cHCf3yPkOU!y!}mX_1jLD&>wz*#2{66 z2JVOjfJ4rxa>Uz$3Fpvq0Kqj_?Wp4hTs>kTVgp=3Dx>qJ4t8Q(D5pKAZD3750b6I7 zMyW#KV(i(pof{h|!GE9CA=gi@|RudE6atX^AoOJKfXNNzKY? z%e=osM(nIJtEEzY55=(-I!JA$&-vvZKOw;-q#12tOgzv^g6*8Fs|BuQkc*xhqbCX+ zK^WwWgIn}gr=xt0l?b;De^cQdMFZ==YzJ-EIeWA|`=mxC1{?NmI^XZDI2Dp-)b(Z1 z(&Vhazcm!$PM$bg|N51V4=YdQofKQwM@0u`XTe2l8_)}r5mI+O2_ZIg%-rtMcJi+bn zJ*l~LSE~N(4qfJ?F7E{=?AL)6e!5P8l#*1GMgN%7N)C4CHg z-{jNVs4G3U1@mi&ZGC!?)yO`Iy&ReRCg+vj$$vvh-cHF!P;jnW$)1zK(Q#oMf>aIZ zWSFXXn;v1a;JZBEG;ZV7asRGGsR>=+(=|ZnfY2Rdxwxp_=L;C*ZR97|FT+jff=J$N}Wcivo2VU>z|-?OywV19$_ zs6PJnfBVC|Gw<`pR4nF7TK)S(7JCwO%ssgib6Vj8Awt%s?FQ)50+$Lk_Ah&O6Rd2} zb-!=o7<>QIr*6cmv7hLiPU`%{d44U%6ZFXt1c$g2X~Y>LLf{700wjRK+np_8Xg~dzdXObBt`mR!KTy-h|Bbjd$Hm31>tarcF3LwPqL}ZbtZG6&6)HR}+@99b zajgP9Q>pPk8ub-wf1I2Ki3}|SovnVX1L?_|ZY?vEZNLa->;D?MaSJZ}^C({Ant?w1 z&=~4-wIw|%=T=VL4#EPE5C6O0#xw_&90v91NIN%DTqxXyt${gefs`lPri=<@Iz1g7 zCU7X)md3<f~!@GRA_*bf3F7GyZY`tQxh)2jSof(D&&z(rg}ds)J9V*H^)qarxJK z*WeeGs*X<7RVb4~>`k}1R&KkvC7{mTOhf{ytY^utTi)hL+OmaCH(>aW<(R>?j(wV2 zXY!G}`IY{J8RKv9Me~*NVCOdEcS<)gb${|Z-_s|D$aP^-Bh50DUmb-ysPvDV--v9R zP6w51+HMV+)SVb$_&^m;dwKpI>&ru^fp=3 zm!--U!F`b{@f^Ayt5*GtO)zj+mUSKT+Wu?I&{Hs?FcGQ%_Qj_2t4+ ze=;idJR^lE#56@xdrvB4R`P^amLrM;?2V{$K6S)V)ji06I%mC836{AQ|Fqs7(s zo`3)S9J31I;8QT~4v5by43FozT;@XWWT}_+UtH=V%59e100qQjLEgaTd!(qIxcg&& zAjn3St%j5gCbg^NC}^5l%jzx)PMPoXKjoUsB;F^-?m=B4q#S zLpFHJSMo|p()kXDgTA!sh&-~8ryeBbUVSll&fefGyJfnvm3($N1R%0-SFV-^b;u7! zwO=Cq9^Z7WFgFr@Zju3mIY%MbZn9ljseo|hldeH9ddW@M^v3;vT4cR(|1LSoFtPpn zKNjU4cFFJZ_hT>fW|6wz!mkuc+A`^~5KUk#_404sv5^f_AMSm8>dZ!WtI#yy z)yV6pMJ5pmeZGk;0qNnaA9v#a)-z0ef#hf7L;nouTDO(K^*7-e%LP^y%_mpl%aO_GJO7dpD7ejsYnR@t zY1r7@dO2 zbS&IU2sI)#uZA8KqQgr|VBr08ICGYD$ryRrH8Qo6Od*`$!CH?({4 zo+S-<2mx=e(jiz4Nacl3_hY&`&kk-t>^;1VV zUh+bssSdrPsFkhV>yVXBu&EwpX6R?T%p-Uon){1LiamkDAA@Hd28^#|mBq#Vx|1pw z5<#j9m}<+e`KZkd_w+d8Tj%i);!AwF-Cyf@=2TNl_ZAN@7a97wK8V~)Z&CaU%$dVT z+G&b8VHNZwR?XzQEj4ukU5ovSdlcG1oNe`sO73FRbku8sn9!oW7wW;TJ@FA^$gjkP z90y5&wJWYvhG`bm_z9+rJna0v7%2yNvTTVk>c?hjczf$25hI!~CUP{@g*hqx1rd1S zu!xC4s5fdwCfb(l;8}3@=yYx@l2w;Q&Q|Y6JuXDVP*m(vePz&K2smiKjb58AOob|O zJ!_k>TNM1ro-*P~ZwaopE-uq^e%zmgLC2HJsK3Y{QVLhimD04j^X)}<21mh!9}4y8 zFYXT)C-YEIv6h&yyvz0^_b4`vFPjfu?D#;_6wljK^U+I7GQ)YS+8i?w=pRGfmqRO< zT110bD}aQxUGR`g!So+&O6whu_o3vA&vJKgv-wkeAz@=b?x3&I8&()X(}tD)HwyC+ zGnVJ0Dt#$;b5wX&U@+ilJ$>2%SOaGVXS)f}+w1NB4AJ?O(G<_$o$|Zo&KeC51l9o6 z2*P~OrONc?ngBepso2ZZpQ3&@6=23`G+kC1kDii|Ve z?z{qH&$BQ=e}vTV8j9mv>?s0WEMLC06}IN^qc17L{c$U4n4Kn@9}-`b z7$>ZU_Zt0$Q-BSZ{V`+>uUh^6oJ%y7My#oI{IH~#>CORZsi&FT%28sE*47TPl_@^z ziryw-{`2v^*y=f3{G!XCDM2F;W@0yqLCp*OT%k!wC8?Dzr^f3{;To5q8kIi)m%-@G zcp=SO=o%Q3zpq_C3G&6eSALzle2Eioh$i(vr(cM0LB;ykxfiW!j$Qh!dG>R*Pm)wP zX1MgZjN@sV&dHj^+3D8VB~RXKpA^|C=*yvb&mLxiVB8q$aXJv7rFPb~hcKVFeaGv7 zAU5adgd*r#n*+kkNvtPWt!g_dgOK~C6G9!XjbX30500ZWfF}olH!B+c9E4Vy!jto? zDQbfO=x`^yzE5ssU%(fPB(==f=hS(dX#B|Q*hG~fa7`l~cV+pvQ3}C!-clM(+EafiTbq;q=ZS8#WcYwTHg$U? zP75TEPUDE|ec@Uc3>bA;-v!Qh903CE0wK_a67g_!Yl#!wbKGIZ0Z&!jJ2CVuf})y^ zrOm8U+T#*Kd0^1jJ&&JjAa*R-P3xBH%S&W!GH_YOEMFsOSs5koL_XYT1WAYRPFc7s z`xP&RgX8{22y?Zizok06@vaW1&13?649FaDv)Ddq9N1<7T$;(rQ|tlv#d z9cg*EN7eJxcTqh>P{OCTT>mE6%?HtYk8s|F@Og;K2TrC-i5FtGd1B-a3fg9`ZhuVJ zR>8$d7g#hva#aQ4kQOsAQ&KTwLV!gVSx=$u1gM(-Hw#d?VIEcnlHb#TZ5^Yr9(nW6 zC$t3qxc2Wk5?eMRZ`(9b&@fz3330d4wJQ^T3JPTYGwmZ_WP6|Zo!dw#2}!-aW!CnhE?j#q0vi>lB%{}84o%S9^W` z)ViU_LESc%zU6|l8U1?yN`*T#HRSMk+rMLw)(61(PeqLayPRQOTOg~a@?`EkHf)Jg)aY@me@Sgal4gHym>%z^>jFUU(zHAN`#9Y0WMkj&t6yD-!m`F*hL4W$QNj`0%GZ=?5y5FQ$R8_%4qlS znSJ~S$&NU7U35U>A1uaJ@eaSjzW(*U``O>W5P`{q)L&y6D`JhS8L3>LmCt^_OY6lV>p6y@! zU}Rb54dE9XS2XeKoooY7PIs=NuHm!(p|#HE&~fZz5m|bBu8maWI?OO#py7j9XhC2D zuN(+pb~n&+^WMe)9tX443!MDuDB`rF$}*HYCxiLz3I#qh+KlfU*HlQ=e5NYO1Fz6S zzOf7_@?#cP-H5Dmi#ImF1-BUC{--7kdcte(C_weRAfDot6UO%K+4qa-CZc?;mSegC z=39F>dJd|VvaJ)e<2XXrj{s>>sG(0Km8(hhiSF>;w}I74{DY$fP!|~lu8k2gmc*?i z9k#tP4*Lu}TeiQ?B*kTo=A+ACzHWlJsenn;;&7-2lEPYS#~!Lt#Z;E{Ol@@K3@!hp{Z0sec-&O*{8bVWw>3=C8e}E~PI5K3 znWGmtyLkSH^+q;NIKH&FP45^{SNq!(YYj^YMTD`RXk16Xs63Y#B2s{2^q8E2{KRj~ z6%;RZA01F1=ujdSTIE*0b7U9_AJaRjmUb?4F16~U4oAK?g zX?cR&lsZ&f&{9Iw+5W!Nb5dJ_?%W1Vxx}L3|338;_8cU1G&J17t;Z%Igh_= zWe-jVM6^2tL{0GAMi8~L$Ccb4_W)W6Xs4A^^iaK^$TA3Mdbp7G_Fw=k}fkGl>|(iyQq^T3b?LNA^RHp~CKG*5GGni1lX zSyH?uEr|C)GV&sA5Ca$kSHElrbppL=N;x-E>!Wm-!~x{{Q7g!qr8zB=1FJGtCN5&A zH(=skCjvp;U2<3#o(4sZf~n9%g!SIZ{dQ*^J2%(vnD&LuP|<^dHQSz`6Q0ot1)aAl z7z6ApjmK?@*Pn-7s`M8bnp{yby~x0djyGmJFK_DXHQN#;UNYD?v#=9-qf7x3_xuO6 zdH67-#*v<#H@@M!&GR>b{-t3P)lBDcK~dLS9p@y(4KrVu1{c~Qla39PswW5U!rEZH zc|CRrS0RfOu`U_$p(gsf1NDSg!X$>4DDF2O^j-b5^643Ip~kE`72(eD+{iXr35WR) zau`#7N)vy9t*P|KwQ!Pza8Bb=TuOB+`Xy2?#RzFtDm*m1u{Y-j2_>5?V@&=F-@FJ* zwMxN2qC~E3xy-l1aAPZ(%z^VW5jft#A`Srdv=CEc5miCqxB=lp=NPn3z3%nThWBa2 z@BdF!(s=O$F%-7aw>;sXZ`0XE*7QKKX5U&>8V3Md8)iP}>!DXVrA=}8A^*3wTf0t> zQ_i!mTU(cRd(QR}&f37oSo<~GUVHVAS$5-X9fWNkAe-O3snqP%V!{ye-~mM~aKyV~ zzCiE8`0nC$Dw0I{36z^Kve~2-RVU%_3*qB7b(})u3zJW=Lciwb!q>IUFKC!As|2YD z|0-bRIgPv76LoOBb-D?b6#WDn9Unju>dsw-5@_hH*0|8xq+Xh z5i>9*oR>rDUY}ylazIxDZQa(9{jnj=u5KELg-0|lkKF5OiJ|1J*6V?Pc4Gb-5~cE3 z)0boNa+57#2t!xn8J)-q@>$0|1}d~o$Q9QnQt-K zzYGx@%=Hi{cHp^MbgSq`p9a?a8U)A#W|Ih<*CXh2y{_374-j?al_qQYZfKZkN_e)+ z@Rt^p@f1`HJHCcTz%WPvN|}^$0Zt8}Yo0mGT0w%NAtjQR5h#XdFyp4q>%ztay z+2tGMhs`(&*5xBjc^^yd(D739(q1XxY$BL?f&K}tw9@7?b15{{Phh%25TOjSjcIT_ zgBu}h4gdb$`8>Ayi?Ub!wLW*q+nMy##P9tQc{5Hh5GfnvU6#Gwk}e)}t|PR=cWlO% zA$PA~4a`!mk1LC9Ms|#?eBQG&U`lLrfUzu?*%$J-zM7D-gh}U+1-(*3MABfD7`_j6 zg$ief!SN6G?)&0FQ9dLxq_x zYmlGo2`{skL*^DX+y61lnP_VDX2Oh#cr}S{ibW0<=!u@gO(}A)4>O9fdP{;S76GEj z{#t|E13Xu{Km`|2aQ%)Y`(^}l_A_zIY$2_ypbeC|?N*Ff%#T6^H!a`O6*usauy5^})-@S%Q!3-Moz;~wyYaGx zwCKbAUyli`HNyOMi=kGYS8U1YuL8?_UMcIuMxKqio&4@gnj$paC6Mo7k{7rpJ2%0?}^ zj`({q_G+x^RSrK(u)~HS21~B>*qE}Y29>DxIyN9G#H?7uTeBRVrr8<~O%?A&O5t7p7SGs;~GOk2-vzkMyt17qQMW9Bn&zBa67>^%u#>kf9oh~f>+!bc=ivci1FM$UMOm~ZaK`|R*ksh97=X0);pN41BWg}})FZD4tM`RlK&X7ePiQj0|g zA0JB>%Bv5U=Fg7&oDtSCmVmPvf3mt$M^Q4vcOIoB>~EzF^~ImnwJANSTF;tivG+B#^@2Y;P7BQGWTOAhTzax`T%m5+Zv%~q(11J=FZWq zC&4D1$2nF~8AL=q$WpKZgL3E~&!|eyL!Iwf8gX$8=!~|0lI~?zSEVZT*pyv8DS;gg zplqe2@>GkxFBj~bS0C-=zZdysn`md){U{CD5F2mmf$UG@iHP)oW}eTcFdeVcRoczU_+C%qXWvBo<4q0Vq;fI~nM&(JS$(Rn*t zUpLCotAS*87tI(J%o^21<0U*eFdk}Z`FUM?@Bv`BubRxblV0-Pl)EPJf0uMw{RlDD;hMo z8VVTNu=0`F{c=$CL3YrqBf-dauj-RGKb~_}z$L4Yyl9&4{+ZI|;}=P62A_z`i^aN0 zWEev;{XJqqx?T|zi{y>m@Q)Q{9(riGTs<3!i&tfE<+4}q>V~uCtM*EJ|7v3>mr_Ew zC+bx{W8ruBxpD2YBTJ5{oQ)eKk@tB7KNv}lK%aYRgJjki>XjN9#ssp*aW^m4pmj)J zf%Rb|8k-u;5#W!U&}egQY@6TtOoiK^aEbDix|x^-ylxG~A6o^G2d&Q-MZOk|ET8=O z6*f}yJ?wgGdncw{;YqfL!Xh3sY{X8FPYmc@@XbPaSTY~86pTPJ>%k7);=~%@w zRrF)P_^b0l^+2}aq;^`gQ3~Q}{)b-8m1vGJj3J$(J*~5mHxW9FG_SUNrDSB+s)t*R z_DOwpS&tbeX%N16^_$2CjU2a2I^ixypv3IkN;6j_Kn8U=Lz`?%+HDA`^%eDq* zH8`Kxs09DBBJM=1!I(zNNSV9kujVR7z-*&B$A2z0F@xEF`y+`o^AC30I{OI$l!qTw zfhE=&W*Csj|L9@`uV+YN#9Fiz<7Mk-MGv|@R$hjOQr%5tJk{nhfAxXmT{{H-j z>vA#g-S_M8d_JB&&rZ3%t+X^J$-mv~txco2%Gr76_8I*Pu_Ldo`}=7hw6z=fX7tAP z%l{2^^TnhmywoID8EDHQHhY_+^0gkuGi4rEP>M>qb2MbKvVNCevM*$v$Vf`$?CdW|e90m( zKO^~wFD*`=u!e>Ug{Dh9eW>)riH$(l9DUYAL)uJRSxj>T5Rc!GM+~7}+Xn6a^nDNl z!bBX@UwUvcWd^RP$%1u^c8Wu5yZs67-q-y4@b?=bCKez+Fx74H67{`z-gSmWiTfxf zTKF+{CdyF1{^Q+_ZPP&why5C?a{=$f?`6#~Sij&}xg%J4S>1mCkEPe_jAScWm|l%I zdc?UFZKXH>!6lK*W?SOhH|{-4p_hxV^ewP)KwyspyE$p^JLc-v=u!N_oUqX#NEFch z3)yR35feKN)Lu-tt*o{h=BXBMFR%1%-qtLl1krO^{Pw#y_9OGC|J$OuW)Z1-5y!F8Mup1h(%Sr1%l+!J!Fj0&`b)Bo-G-bKA_xGJ!pW4SB8uMypV+WG zChgB0I6U4B6PBk>;(MNv(zWdd{NBs-NaX7UxB5W~jWN%m z2>#NXKt;Q|jHo;&7IYb(e0Y|gb`-7MMYQm)o21k`+{oK>fV9l0fK{X+KsltACAr2K zTj@cqr1*DOhl=(-NV|FAV$M!DnUAv%m>ZS_IUgLa{TFu8%}k`;9PVtF$EWR^=h-y3 zlmT^Vo&PvDfAXnU>ujQ&2K5h!uMvOnXOpoDhAlV|5i_se_388ti`IO+dz{x zKHnL6>gC3#!+t_-@a%e);NM8ehsM6Aa5qg%2}w&?OIQ}&SJ)B%liHGvG&4yu0QCze zltSAI+S@)YmABN!HgiQA8RGEAqa)so71x&G)4gvgvc7w0$i!Oxx;$Nn2_r_#&}4y( zoU3)u`c4sirtyn{$QtNBZ>kdO1!+8PNU_t;R*?j1#9mFefLrpZf|BetENK#t26U?- zmO5|BY`NtQ&g7x(dqv%;VT&bG!Mp!T$ZK_e1=|N2coK03(EJ0MaJND(-vru%Xeni* z)?wEEx4`OLpPe*OZYDGc4E+b)SWJU3bu$n-KG{E-y{1NWZvN!24$&HiF-i%StdT2f zzA;<3uQ*Wy?tHQ+Atq7of=GAyHLD9X!tYkX&vr*3xfTi702&B@7jmO|b0R>I{I};{ z#gT8<9>A~uk7)IURP>mr^3?drj&drZ93+3eqcFc|vc{0?R|Gu^Y)0qqSC|d***55R z;mFOGYOKH}BWYyk@8pwP72Y%%2BEVR?NO4Zd@E#jKQ0dFHm*PjP8<-7ge4I{-#t;DQ$znSXls0R1H-_}8(j|l$9#t(|lU`tFf@#696{NUyL$>5!v z^1g+PpnCns+4t?vxrZGc`}zOw3l4q*YnDeHV|8_Sx8za6YP-v(4IEZ6%eaMQTL^C}C%Us_W0@b1c{-?{f`DiO=#oY%Vug5- zfAcqoPA4#HfZCOtvtPp$em=!^n$oZ`v%9E)urO8S)U}#=>0emUy>YUEqR3D&nba_a zyaQ4*D+Co8Q(fI&r`sP^=$agFYo&%*CZsh~*T@_rnXw@%q?ORg=6EuMfs9WW6LgqP z;f%3Rd?^GWBWAvO7xGost;Zu7G4V9bA&0g7zNdeqK&qnaLj>L6@7bGKMiYddMf$i*%K7mk1br=$rxSozR*uo)VUNGSB>u!wJ1$N8O#%9y=ZqGjwx`+%&0 zDm*lPvf^@_85*32jr|(l_J)UU-c0Xhfij741LGDbISs$`zjKPO%k0I*XqFodVeA~2 zJYUKOhyu2-(nw5%=yIt*Le~57>gsq~>3U9y5dY2mGiEq%vN?RI(Pra9CQqaS3`?IT3y%dMpx6M;NKCM$5E`w@L-FPE1retj@QkWujdJt5{0MNoKvy} z-vcSxvYh^^pj0VXXyY$ia>7Cx-|vcm=88jSZF5t*IrCQ7)nBf@Xg?b(P}vnI!pc#~ z<4+|})BsESA9wSstCQ7m-svX+JYq;Ly1shOKSWCmOl&}ZG=H=?KY#d30HW#&qsbWg z9OLHY^UV5>$%o9>x@M)$3EuOj4XXM6T{EgCB0-u&u0;*D?QeVAH)(Bs3qL`^@Z>k? zwYO6~b1jsQwd9~JzqXFqp0Mqe+Zwg)ishDNJ4>U#nXaIEQkyU&Y}!I$z(J ztpoW9!sa>WM!(mR*KqaIoXT-_g+NVi;vj;zNQ_g8>m5R#Zw=P>>Y(rYi0aFTz*w9O z5UIKkx```cck8xj;|MAgqu5;?bCt)UnSSNeX1O;9EOdk0`~qKTr`n|$jd2UM^qge1 zFJ3EpIcWe8!C>cJYuB+v=$$aVaE!A}+F7i^P_`8lIBJ0cOq+}sWt9Ft0Ww&#lMLEV ztPmOn3Z@&Mu)#1{RwcdY${o+9x`?UMU0OHna`-cvucn~~>(oQswd_@#-2(M}dLtTg!d zQ2L|Fcq{JdFh%X*Qrb(Bd*P7aV(r?heq;zcCC!q>`&}}E^wRIv9g)l2Lrk)c`e8~@ zcIcc^^X$7OVxRXSxTV}2rYFHP5YStM6Ou$%j#FBM*~*xC)9-5DWVXUA04?#l($UD# z*46sE_~R$BHRUp@SjKmTw0^wkKZTf?Oe5c?Uyi8l+VXfupdDW#>lv%xz2 z(eiI+!ljhwExK%=(tD(WQ{eWHIJo0!T`Vs=q~;>~76j0NLJY)TC6c#K_P>??{jI0i z(iVayPb_7eL@?i2<8v3X8j(i7_$ev3A0*eGp~^Ai#gd7>{D4=cI==%w*L@9pkDIb9 zY<_ty&@d?HB)=>C<(Ad8JqNb5TfvUcWiYqxAS(5Pvz~Rz9zs8qGK$VC^`3H2I{FVG zq5Se8)~0%i8Kci6d6VSwBD`j;S@z<^YnOoIf9;7AT}8A zHLe`5>gA*!6wd+nBa9JbOtIR<>;Ta|(Y*G^oxf8~2UAW!O@DNFy<)M%`^JWSGMp+e zKM}?SZf%CWP4L~U1i1~$r8WZ1)~uLf<71d<5zD5`UTc6wnm!bUA zFs&Mk=&unc6F=@|ZWtFYB-?(b&W_yC;wR+X=_q4}7d-A}JWVE3&@v2_RPZxO z?X^KdOI~>b98v>Lv51s0&ylx*(Pgw+_I z)JO%Pr>kI9(c;IqACgGZl;l8wUf&;2HkneT;i5x8z(%F*O}*d7XkkhNh%>!UGBZEd zaEs9k>xb$lo1c?=`%76nN0-K$k^tvP7gdCxQ5>)xN7FT9{~R6we-M~FBu~I6mo*6} z`V2A9GtotHlrH3EjK2eWTY08OSm73w+bfo&958fp>z#q<^<*95=f*#T+=~h)>ciV_ z9)-Q`*MoZ`dwS*uj%3G4+e|9X)?zF^N^RGEtJo!xAqtzwkoDJZbZXlg~OQ8R~C6~FmDU3${00clQADV8$9$y!~av!PXDLEQbsRvh> zkz`FSjt67T?Qct6O`lAHl7m!f_a>N2=)G?X7`3jIZOMnq43 zurWMkYw_k6Rx$bT;ru42@>UKBl62MJ^-(UC*Xr})5J6+-&qEka_54ElPBglA^rXmR zL$NQV-qOrbk}oVv2~Ee*GEAX=R7o-tEM5O51ScTef@ESayR?((Q02u67VDA!>ilLJ z=j$(NRRI4WiiOo`8HD70iUo_3&t0csDwQohy2A%R@(+6qom4qIZU)zzxNw2BVsUf5 zBV=6cho0~W2yMT*u1^$;L3nbj4`1+3o=`XbqdHF4d=?Dk|H0Q_KB~?%#&+=6*fsfX zQ_0mhm!5g6PuUrafOEs*SGtSJ*)0 z6pUNISVnm4?jLwIfs-b0WpT4i4Y$S}Jc}dmXryM3AXyB1NacQ*A)Ltf{Re=(bV_$# z)$b8G{$UxiCPL=bD-u-BYsQm;8?p%B-M$B@T+7R?@OG68cDU6#Tjw3arWjW;T3Qe^ zyY2`NzT!k+-l*B}%oSfEa-{EO61UJF?uk4rDF5~|^gCb@*kE)N2kzZWo?xTRtC_6b zPpJfY(on5JNO4_FbzKl6rh~LT?{g4(xK2 zi5%a<2%kCUDY4NR@9UO`&?6}`+> z*7-ycXv<;i%#csWwHnOfS zbm}9hKudJ*=30wxN}7w{AmH4T_v$r13zrNsT^{!H8*=jEFq5`GDEKK<7Cu%#$ z)uUPl=VNUNzc_O3@aytk$uKH55jF6(ttmWwEr(V1ntb)Cp((y6Ii6`6E393wI0S|4 zqL&hfZk-Ktxi)mW7&SNw8N?8S>1Zvh``u z_RD5ym%ZY&SbU1w?0S1uJ^?%@pW0g1q6}cEi$9fm4(u`3kf0o0HWP(#Xju8P5< zod*XGy6^3qMy0_4&5*-|vKp8Oi&)&v)Y79EFFPxbSDCbIyCL-(GWiN*W zzshR45{8u((2ujh7~Gix=fa1>@>WVV7)WZ)8R5VMRMSVL6(qPH?9gW|+)Kl_{3iWd z^4C2@+&UeV(3a}piMCd7ddtXL5Pg3r|S zC8Xjt8G%}2se@A7HknQOBDeMeMwnP!WRAX~FVK}NgPeIx ziC=$aEz2MTBD@pcvD$8Jy9Ifp8l0SGkf>h97i;T6=ErWUyu8L2nyG&gNM^<{@Mj89 zjr3X@kuPs|b=h?ImXn}{&nV?vfFPxLz9{{>B{5h(zoLg1rTy-c&8SvtBZ#_->}C%V zG#A0g3-V`W3s>m-0H|(Ttq4^D|1khkL1KZ2(X(E$=_Y@@X`(_zoYu70U7x@NqBB zNMa6t00y1Dd~%F`a|ib)Tc|nnhugOp-j1|TXSZLa3+MP<|A7HZ_S*6puoe0FU8wZZ zw$?o-Z#PxoG-i%?n#cr^8&%&Eml*y<^iN60R;DFE40Wh6uSZG~&cuJ|y;~+CYOzf8 zh^gQckSvufZo)P77TqSAO2x z^ZIsJd&B~7*b=>E#BV^*t$_cRka@y3(d|#52G?ZghaVnEjDj&z8Q=Bu>?JldJIi8# zF_;GEvXu-YWFZasj`-DQVl7GM9_^*Q}S0e#CI09L3 zjo&EvSiIKmKYN_}dd0;r@0-^ndz51TmfUo8nu3@GdZOJvYM73${`kyYdXy8j&v z+PD@}iA>S%)3`N21YTRed&U*uTADf&;|*d3FE&-~Z;7thS5|A4ev31IcFDR)9-@+9 zf8%S>mr2~M_H|E-z3Ms<{6|%jnkmo>$0%RENnf*?v}8;!-`4PQ3sMATP#cibdjo&v z!Cpi=uxMu1o4c^Vmff2{P(jVkuotfdCxY!%bbN1Cd3s ziIyDR?}8@{+)MA=nyao^1M6);Ddl2)ysyJ_l6Od9MS8p;x6ygVh3R(`#N%kAq3bLs zCK9yx?}0s!)QsRZD4Ous39J<3{>H|P&Bw)>1gxd(00GMZcuO%e_mqx_%LU@yo$uDw ztzq#0nYyv*NS6aU`;Y43kpf+J6%-@mk}6eoa4&0-l-ZjBFW2DIlSMGEsJG0y%C>a!+Y@RKrAnwRLb(;$jdD{uqu^6|S9<2sf9*E&8`^e_|KAGVrzQ0gy?IU+wx&>`4$zA#C((jzy{+ zW_)pB3I)Ng2KH`L_keGMYK#DKnpe6@K>8vN7V5@!W{^^>)1h zp5;2*-!7F!%_6Q74)$M@Zr)glthI#e4R!V+>$%Cnqh(Om7^SmOyP0?17~O)8o5S~r zO9@x}=E^hz>{=Z34mMu=!P4U6r5CTmza1^<1=0kD;d`i^=R?i z-M^pg@-T|X|33J~IQn+AEAqexu-GG!!sLz}fHF`W7Ch^wsU?>4A?Nq2 zE3&HEmKb#G>{t>QBfeW1S$t-#bs~3`f9U;KTYQQozG>EfnYYuvei0mCG~Bbxct7K+ zszbf(Gx^l`vzsr5_u z!AMBfugl2kKsZ8wgSc%9D<=eEV2>>u8|cV5eO;_YPK7rXa^2iz#QS9wraaw@gPRv3 z;)Z@-#V?z}43oDHG(`;BVuu=T4-x1!DRQe3n!LKxcRy(i_t_Qz!P6mI=Dq^MzB98?Pa~fjZ(CS4QmMV>xZmbco zfWB)D7abHU#FDh9OhClv`;5C-wF3mZb{R_pGK+%NvEv;I>&V1n5uZO6+Y#w6%5 zBGclEmU?Xe&d9zx?Y|{!uzqps*~7s3W6#MCDn*r@s35o*yh*29<5>wOnMyO*Dp0iv zZP14pSU6*~yn!xVa{9uUeZ1McyUyCq?pM4U-zynC`I^X3EPgz##(Oa&C08KT&)5vx4TD{E5R*?PV8)N=tr zU<_2|0s;@eR&$QEc5Z$ZeoayF{lnpYWdC~PHoywr-yLxRIRL5gGv&Xntfr^JyDm{h z?><31UcbA$=5&O^-vjuz&Ic8zd|UICfyCs>a7+E~LLKjvt+QiD?o2@F1Cd{IKWeHq zi%b#MVl}CJx~lX`!DE~>JM#-=1X@MYoh_5e&>mi=q(3e0gH zUF7wYh-S8)2XKnt8u4<0Qt$m%>&y5d^T`(Y%}p3gF?xm*3c`OKfQMFPxs?uk7{tXF z;?VJ-wjhYE!G4g(_;C45Uj)*Lcrh0<3;5Lz%@mrZkumV))q2oo@VYHBF=yV1Z?G|mVRbi*_rFRB1e4{q+FtM|Hb_0p)Bd*f&% z3*OIa+9GG*ay>3Fa6xip89Y1w89SWeJ1~NhW}wvri(VhZYh)+$ zFmq~*^GW+{m=U6niB0s^@qyAK;>85X)(bXm!}qdsEMom;ZQYFI zgfhOSo!M0|e8d0xuNK2BZ1?Y4{qK&mkY9bbH49P)-5#v>5t0q~GZ3^7<=$?|VVwf5 z&WhEtI$qvmqYW_`Nt|DTaL(o@DZcE#a0X^u4%4`+uO!d##C~anMmMDWDO{j)T_CFg z9q|g0DN$Mns6uQa3w7f+v+gpf4A4%trORkq!%CluUX-{W^NVi()aYDOGdR$cm~#~9nTrEvN8lJ^oETJZ^A^hYh^^Nuw4m4 zup9tAtv;Q9+WmQP?~do=o$#ifdzJZgyJN$@?jHdGEP#UQ5Hdk)v3~uQQ%~gHy1LWR zuN{znAl9W${|fC><(i%Q4cd-VFKHia#>Lxret(#=mg3F?mw9AZgryf$O|HrD7N6_f zyHEGzJh)&CtvD#^b(ksQ$+kQBgP%GHugfOGz|W7&-Efba`ATs@=vvui)=N))3)K+D z%h}R+mU$6DqvRGKRbY3}TR4VqCCuZhlr2hiUTkqLzu(vU$? z4ykU^gsaa17qk+IFc#i~3$}7VGSpun3xH5Kl9@!Lbk}umkMdav$HB5|Aq6489qPtF z+Sl7iHd0HiE%|(WXO8oYZvXnSoOE-%aOeKf-uC$f3ZY4%?1j4)&mgqYtgMXSD#2y1 zo?^_EGI6&f0q#J{?%R~zkdfn?sW)Di_ExD-Ez>DMkrlLu6NFTKE_J%6_iOhr0#XtX zm(EyA9eMim8Y80#02ZSg%Pt;!)Q}girV`bzAT^NfC$~YS7|YvbJZx5ymbaE1 zE`Etaldv2$RzO~p1iOp}QqaLL(Q_n%-lHf73-)cLfl9MzW^xLw1^@(@bm9XWwHrh4*DXy?3Od!Fm6|AL6*aJ9L`t}83_{VCNk8dO;rbq<$UTs`1V2ke>tW{ePDrb_~2ks z{O~sL12K$Xf?mqF%Vgp!_!GbyzFK+-UWUxPUepb~moGXc>vx}2t2S4?4uAn-#|^8k z=&ORfJ|F$mr@OD7eZqkN3=q2KkeM>wzeQxDOP_@l!;1iQnJK5eIz^RkVpZRMByMrE z<6)Oou5;5^ZG@GCq-!5bS*;!(9lky2*ZS{m zpH{kaYNP?@9sO&>_xH%&-5a|*6zdJGC$6r^aoM_XFdJC^eb#Ub)+uUweSDCRF_AK% zkp30Oq$wS9)N5CtG@+aC*XN(>SMFF26dD}S;#%yKXRWrr+l4bSB(1cl(N}TR`DfIT zlg%@g<&@ps5bCPn+fI4LWNsM&Ri1OfX!V9$a|%q*55;-q#qFOs#Ac_r@8$gEJDMpT zs!3g@?cA(}GW?UdfG2gcMa~Ydz7{-OMZh%#-iAtuogO4M(s2?d0PU1Mwr|G7Psau& z*W%t0)iRAyzB!9w032SFrzS2t|OT+FF9CBI1X+(sDYLO?jVGV2AsZFYsfk z-~-dKkO1A6suT{jf!7CX!ESNWqvuL71}2jd?})s-!udd7enC4PhKl`qpDn=g<4%vZ zwq+1pMuu6QqP#M=kBsr3+T|UEu`sd%^03DGxmXSi-$RT7avEC$<$+#!NN6O;#r6*i zY7fItM~)&JVx=kwLBDBxFE=w^G76#6u}dYEv~O!mzpeUpSXDw=eMOY@rFgWbaLko9 z+>60}tuOd2SSnHRJ3#S2SAh7t{4=o49*g;CxuCWqXo!)?G{Z7_*{|71*8j1&*GNEJ zAZH4o5i7XWxr6ok?juvV&CSW|$#1V6w_f+QKeP!Ea7{L6t0wgel2rIP(#$ZHvLsJf zWtLo;K&zIP7Kl}oK*K6M?abunf(0s*N5bx&JbIW6eDoprn$oq5ofnQzdXgz{nNMFK z*64*UH(2e`2hB-M9}s^tjWA`Bg+~|HTyJySIcuYSCPVvm{X@OTFcvJlO@I$8VLus| zV201+31D+1r{1T)FH$Xk9%3Gj>dDzvS0O!$P|2HltISdIliHOw0g_PmN_nk+!fPne z&a6NkCaeLEWsm?G-tS%5v;PxVF=1gI#tKskihmlGT<=H$wqq?~26xH76$@0&C~xCQ zio>Y4ou{}cUpz=(NTI-D0AQ51eG)Q?5yIP=sQyoG1+YHY;r}f`O?p&K(h3am70*iI0?yvi^_a`eSzvY~4o?E@s9XLstWSXAo1BbjcHFZl4R*GrKGHX1=8mpuLTwwI|f5F`=iPJBL{Qyi)#R_D45> z+3UFG>BCti6t=&2)tlwoNyXTbt!sA51>zC6FTUG#n6#CAWy%Nx5tXPkglCgo9CRg& zsgf!T$tkZQpwJjOZQ1(m5QZ!5B;@wuK*;G^ETQOJSNGJ=*}HwWbIZdVKWZt#Wk{@+ z&MC)FmJ|d6L1C0HV+!!z4Nhfcg#S8b^ z<&%<-7r(|xXYy=km_y5IR&W@rlNx6%Z$T3V-qUR-*oJJEGTai4O+LHV*PF>W`tYcH zsoA2UF&|pyJeyL%3IDZ=QGhU-g+R0onMQfgom-DEipT)PyUY-`W@w&T*O$aM_tlQ` z$&)Z)CO7LK(#qx1l<)0a5L3p}pww;L9?J@qi#ziT24BU0eNi{1-s%Ku0C&Vb~lfrNj2D5W@H+FDq!1S?TiRVV6WTnkd zt`O~FtT73&`sst7J@p3xfcv{V(l>tfVDw4HX-Dr&BRJ*x)bz}nCuL2T91N$d?dRv; z=YYi#g2Iuaj$9|mnSS}Wg+PFtr(~JcToQh+NK+V*^a%VECeGptN7lQwQCZ5Nqx!$7 zs%C)y@86uq*bCK?EVcC6{GpP1u5#=yUR!o{AS^62RI_m0pwatpIT*l9s)}(9x`^7s z7Cqz8a6K5B6GZ#s*n48OHwl4Y+l0wA5|AbaEoyMUn=Z8(Ka8P=Ke(5VXNn3C?8_7d z5f?J>u-=Q~7N!Z4>R8hjgx~B6L0n91Ff51BMt4o-WCB%;a*Lk@CIvQ@^aPkDK*Jh!E z#*?wV!WI|gN0QNgvutp?pPuAXSHO0H%z2BYe`Fb3QfqUN4?IsD9d+xj$v`l7TWeXE zL`d;V^UAFtNJ+BuU<^^l2!>_nu9K9kosTUhw;ZlH-8@$RDFPVB=j;DY_4V~Nj{d(_ zsTIVe15Y{$93=6tFBkRp94^4eDM^yi@x=VzD>VQ(w>lWbSqCGuH@*p^pTWJngbvUOYz#r8m|>6q8KDoVL8 z)@2s%OEk!m{g|HlDcA_=c$5$y0F0qylVYs1@_?WJXx%z zE4ojjQ>KG7ZVq{9uD!$uelnRXqpWsx``rjo!|h=?PG{DY+@SEd4A~6rdQMtbCWX|D ztscbfiVF3ObA~u4yRY2RE|!L;eGTpnBR$wu5{@dvV&CvzHQ&11u@4xuZX)}OU^(;e z&qdyeP_dWS{(~V{cUKFnr=7!5D0Wn$XdpcId@F!aR)F_E2pa9%eCcp6b;34ic7xuL zlGg7cjOCCa5ikC3juKzwk5@dFK)ol}^*-DEqqoiZQH;~qG7dk#$4E@tLJQsdD zokU19Z;E?dZ*GT)=169r7G`oaZjgoxdFvTCiBR$4-pMPaTx^2@mbd~XwSH)4QC2R* z);CC_(39~fsm#m(i}i8Crn1@GHKl>Mw|o;|Wi7I4gJr-Zcz8BLo%Iqj5>1NZB}iXq zE@d-Qx;r~HrtAKi;tMXCPHH7fiHqWD!t$IPxt5nXJ}?*anRlZ;5wHj}Y{`d0pUEaX zEQ;GklH_B)UlN&pkHwg>i(DMrUbyWwbGK>k@XzA7U(d|OZ+Ip%vR{-(bzSaU&dM`{ z$$UKqs2MC2w^g&SEwc(v&TxIkql|yE@R2XpHI&nbpjVc@-7&YE(Y5UhRV3*%LX&P( zt>DHP+gepQuAJ!x7(MJof^3}V$1lL)Wr{3!Zx$|(>lz{r49X(G>BXU8bqL!E|lBPxS}L8e#!;A#lNZOh-jRYj!qW@N-S)9=s2 z$r!~uJF9H7s()r~u))$KsOWgiw;JRLX31~U)!AK|{60TnXz_>VOY6LgQ3{!cN6Dww z+QE9CI{~USsLdd<1}f$y!l4aaX^tLzR%{=dy9fcD$=fQ|7ncgI6?fw|r+&twfxt_%K)!tZha$ix1$9QcCEiAuZ!oVKaRKgDgK}VTa1FUSK%g`sf`5j^fIWP z&X6Lo))y3h0YH(3q6gq!CpM-@GSJ|N{?ya9rg?ck)=h6uWr$>^kO7j3qoq$4z5mSb zCSRX;R%}`4RnE!N9Npoxyf`>kN`b@XMIe8M|0!j zmzkCcTpW~m(qUTN#7*R)Bdub=OwMak7EgJL!33QOD64Z)ODSL=NuN6>F&r$YG}Lck zD)a;?w{^G@4?~C*{sz9w{_)5j8ynUD<}tsst))a`VA@fRHw&!DBtta{pe zdMF_H!qKs?sIX{H*ctwmkbXuiMXeU$<_f8>&CE#oG*ixvy>b=o9Z?y-JLWnD8laCFTiU^w9wC~1P(_YI#Lv-Q@s_Zame zd-`l%z0VbYr1=p)qkq9K*$^pHl;yE7p5l~9ym*2;{z95k1>wC{kp<1hBy>Vw1dnuy zTK2^ZL79z^&Dh!;5TJ|&j`fZ@Yc?Q~Wnn=X7O(!xu&6U55!jaQ$0O_&x+LQBKVxIk zunrgf<6q-y%9gZ(q-Qqn_8d;_?yvuzR040|Z(#e8Nw@i9;WE*u7O?4a`n7XH*Y!-GoC{KyOX1f|0V!WKNIc$5OUg*@Qv1fpEh)jeZ%f^TEP9<^~ zRNyx>A|nEWUq5vks-+TCB+Eepv-EQ`RgRf`=!XDH8b*f<*DJKz@*(5%@WYoyFe()x zPHIvpXgYLj6*q~S#x`WR0Oth;11l5AJDuIpF*|?wqZNhO(2@*4!=XJ(4=7a1Ho6f` ziVcd)y+7LwIG28dH!s|hv@lJkp?k}{AwKgvbl}8%JR;~egj^zAnq7}gSe1eA4q~0l@O{Rz}f)-pL&X2y)~Ia&m#ueO*68y7x((zia=kO zj5X|$A6FVT!B-5h%RqdpZVOfHXdSKYUro9K&W=;rQtSjwKJ_ud{^u(sDY;=xRUyUi z#%w7crp9XiBy5JNy4xBrOHrYg6Pz3!X^fGP1rlm%=KHj|3 zk%;hMAjK!K0 z_(t1hbBv)jm#G{n16`x`F2l8s_3?J>bsw@NO(^<4k>7Syn+2X}Uq%1|;OaM}sAaeR zNWU_xEds+?GoE^<_{qaIi%+MsybC;aiq_{LSfRs~fmyaM*v5{utou%)V$&ocziKjr zV2q&DHV|aWTQPLo)v0n~>~O}27X!~EVBz}gO50z$LTF465#<~ESPW-6OG9XWTJyxK zxDVrq{v6jKV*Q1^IL)H#AHA1|ZOn}JYw`HHD&e<-xyy-zCa4#sZi!SZiwQ}sF6LaWaSPiF4!9C`j2y#0Fcm&PB&3b9rlBOwISUWof3Wu17_gC~uX|U$SzlLxv2w*HE)9(*ozJM*(VP~w z@VyfhbZ_x~@}?~5ZFy6Q9uwp%G3Zl;eKD$MH9f|XnVTA50GEpL%iC$cHQTbL36Ir& z@83#q%i(e5kMX@dgnU|@KRcVBpMP4sdju&0+?TKyVY#Q3l%8C_PK&c?6?=yxq}`wP zZ&@Qg4Vt)IR42{q{VTI}Sf`Hk&JyH#T=Q&3GfWxV>tO7e4`N3SRu&dE?}Woj==rA} zl}{f4!8n7cYoM;KzNn!%(&$L8MNF#l@KJnzcXXxkn6bf|trs;S!a`MX3J|(^R9N{7Z_P`@#zozqiP}(0uUWcapB$yddyW4@`bwBi@2>bK0C5XB z@cB<^y8OzFvjEs=`;E=4WgL? zCqc;A@4_Zx8hZ^@5N|?$IcJM8XWPMbJ1-=g&|J^)NozY}xS2rk7vA(Di#AFQq=B7j zg+w82BFB!F;&f%986mU^=m&iVraGzJy6G8-t@j*E&)0A@e)BtV*^lxIPK#Z@kn^Z3 z#B7f`ia|*z$M7(X$XleBd!oWD<&wiRI}a(Vdjkpm5!lA2FXh-2agrG$b5udwsa}-$ zVtp!11ZTv=jttr?!Yklzpn-A-%d((8A2E4%%tMqIva>qO8kKi){-R99Xb+bsQ&5}& z2{ZcjAr23CRriDL?Jwlt^8bAg^>1_V>ixri0P#-9!tf@Uy+1ANDkEnWc-#=p(mCgj zxxZ}x3YSp1HsdX-Q~}c%KV+q^F)ZUo2tnZIw6%SZd-g$`ljV+r9`pi_Bsb>n${?N% z8kkQVk&Uj0ovRP$JABz}xs#cgT%WgN6L&TXi{i9-e|qfQ4T%``^Bp)Sh;>%Ockl|Ifx^S zvcW67%q(P&N6H)y2!n@vQiCd3usxg;X7|`70~K6jh0m|^zP{{=oBi!D{~J4G|Mp8( zfCL2P-Xw6MZgPG7@bLHVP|f4wclQr~=lOl*tZmc$1}3_1b16$tDomZ;c&q9MAyO&9KXoei=7GJtu9J2zns5u&cb&On6bo$|FzJ8Dn z?6Y4O=lQUIWZ}>D^{YoaJ^M91|3-SWnRg_A%zu0UUIIr)$H>3mq_{`7E%EE^pgyxX z1qbu>fwtxR)3?-XQoMar;pcxmeFkmZ2q#&=cb1p+5c4r!dF44gJq_UQbC^w?{GP?i z3bz=!E+lAflw;t<+!0df9?9?Z=hZ= z(lf^cE8JNAez`a5C-MxLk|v0nm?jqArv%pp7{u*Y zya|z%VdP-W;{{i;$7yKK8AOcWQ|oHF=6Dm~ju36)8r-X6E^>EnEV5^Q?z;u%=vUl> z|KsS~q9iZw!F<&-9eDAt@Ml{t^ZoDY># z&Uek>u7w%|`bB@7_?^kKUspiU zG85iNq^3B+zX1zLRn_b=m?&J8svZY^cmAj=n7YcrpQR%g4!hHUF@P`p%iK4pbcl$Q zTUD*U@LlW`jrvTxMyhI%z;MS$z+wI!cs#7$Nwuq9>b0YH`?ic-N!i;N{}V}e2_>_l z=*Y6R%6;Nf(BHgkYb>hkx>u0kg7cXXoAv( zYZyTE%AcFKO(D=|>2_SH$@A3t~pm$J#=jMM;AhJ$IWwWO_JOvghim zd{)`cO@s>!*T&%WwHcmnPraf9u9h5%l>wQ2IUy_JMSh7_{iwu1rZ@~QXk35?+a&yf z7*Cm$R=$J;7#wTw{e)NmL6=O>4Wpwp2zNVNAqfDmsGDnI;dgp{9rlVIZbB{9*cFzd zf=(W8o;7PF+`XP#M$1m_G?Nq(!yybK4A?HGqM|=m)(IzhR2RQVF%-(4tWbW8c;zXr zVr!Ci0+G^cf(Ada*jkDrEyyga04`f%cXOP3P;8%=f@8^7@oh&9A@I4I1HFZoV!!3v z8Y?r`Xp{u`6sd=UwU-%Q3}g<}34$+^^#&4~WtVtPUe2pZ9(QhJIGM)%SMhU$+objN zjjFpZOJ^UZbz`GywQ3cR?IzUY(5)V6LP`DStI*zEHg+jXF<3##@Tyz}-(J3~8vcD5$Y?<_uj(IgvF z{2 zy$0Yt#lV8Wz?-+F_T0^I139Q(vRpS>&AWhEF}RS~LMULGW1SpJqUGpiC(U}#9*5Wa z!j3umyl(()k5#0 z;0`XYkH3pb@Z&eMn@7_f={@(p7Y$2>2U6tn2i+Lk!0Q%R$3QHt&n&2&-&6;VrP836 zAGB|v6a|Bb?~?xMC1>>+_G{Xq$&j} zGsEBz@V>?{`j`jCv79Hbzsgmx<{);0rZVH$gEkYV%t8> z``uku0QzySq|$$8h!N7Rx=vFxf`E|3N5oaWSA*$(2aVdKte>FUiqtbVZhj0`pcqnS ze~Jl;3&~fp=YXSSaCV;)+U6pWwLE;U;S*l(d=p?Ff|OM`92Q@?GBTfrgX0+0b9u#rpK$!_aA%R<2Gj2tlHg$NUz1InwE z1Q;mgVuJmc{Zh}$zyh_1@mAuH8%r}}l--u=bg87~9gy|;n>YjhaFM4=C^&(QZmkT3 zE!&5A7P}-bp5M-E+Q~b=38o3g3oAW9=spal?RigMWcLF!!Ti08<9=AR$2)7h8xo>S zn#UsNs(uc2R>w?cw+cNDGwl>Ua|ZjuJdakcUCU%Zu>xp%P>h}$?`^g4zYcYdJ`TQr zhNDEAHhNx!{Q12;Gt=>7KCo#kp7aZp)4}k@Ix@2e)8;U|8q(`4-F}K2X#@Ra%}gfO z4Bk6)Zs#{I&*`&wV0jZckFhZF91M5HYM4ED%Sq`^jB_L!T)(}|q1A=ThkTz>5J%J5 zl|Z$Kz0v!Yzj$PW8ap2^0nLIx5=Vae;~s<$jiJpvsMX1u;s2iI{I>fsFDo*bpfnH= zGW2y2kEDZvdbj_=1JT)@x0P+*egS_YjCQp;cym3Ew=M#k^OC20?raR%xGuGOKE|}b zX*gxbul$2nf`zb2Y)~AZWbgurY+p^k!O7H4r=)_e)xnoED&apPadTdegh%|fPT1qbahIEi*o|>j=*$SUtCB*?8GIvY2 zkNJV9D#hRLnI2QJDHmiGVkn0;fkzPEqO5V>m$Odf1Dw5io`0uj9Y|#;(XX%w_%y94 zSdxHmorwV6#BkEs0xgHQ?Y$e744JB6LFi`M_g?6|d3It`6pB?9Jnk0vID=1i4G`d39`E!~X40=)`rW?jcrbT035vX8 zaj%=+PpBBYm6MoBZmU$pUA)w2A=m0%)g|9I(c<-6`lcg>Z*G=F!UmPNx!S)HWd6 z1M_!$K|b*^?B3ea+WGB`oi%pT)@5AT$Sr(iYt*|ncQisagc@77MIC)76HU)>(|Vy1J* zzIiOq+wA8n&5o{P|<-*xe>@BbyqTU%C6#7yx_ko<+CJ z+%ZAl$=p#Pmf4w*(8_Qzl-{fjh=hYOl@B8N2>M?#TMDR^8I%p0*1bLP(PCP z^H&@uT29cbKtLbVYEhB>Uqw-VV32)QHci8_>ICEU5 zbr&LHRmQM!*gFz|c?9OQI^e2qQggF_#DeKB97|YBZBzPiBn}fUDHgGM>1YOL{Z3;J zEvesmRE(edE{A5QMRy`zV;?S>KqML%&(HfbZmbxn&dkh!sL~Io)Kllx4%qzIa4~6B zY=MMl)pBT*AJ%aNZ$5vPDvF;9j-&2Hyb6~=iC-1N4rM+LMAj)=($l}at)KjnTLx$x zo+Bfp4kXLYF~87IEV$gMpY1d0ES6N34|Kj!;l@0cNCg_j5csm|^S25?{w6wR*3rlo zIB!R{1r;o>^}j45v`75K90H%1b#KQ8ErQey9JfB9%NgelJ^--L(x$G{BPoLiI01@W zj%AxMGCj8}es06NjveT%2K4ZBITQ}9Jqt9}*3i)xqC1;?pyL(x2Y>+^-3$-z5v$EL zJu?L{NydfHm{z-319i`ajF*<*?L63@ZqX>DD8R^xTlumF@X?JG7XA2&TKdkMzaIzHWGBW^rcf zc0!JT5Qwt#XRq>g4#@n@3tQ*&N`$6p2RWvNXlvB~-z@seHfeDh1VV$(LukgK(`UI! zb^t_)Q^lqL*z`Pilwr4}^CYTyudI+93K^+M*lVV#!kUb15j-Y+oVW$S_j0 z^#T`Qle+e1E8zz)y*5wB>_PBqSZyeKRJWPy`En4*w611MPfu%uRkJAU-__?#?c4u) zC|akJjZ0_{go(FAnaIQN(d6_LaN{k&t~$SFdKZg<%&)u1mKX0v)1UG$WDoZ6bx@V) zQFnqV@%}Xs!MR+n6^@MOO=$Yg#AMbvTQh8lfZT>w>oq|iKJs411y>>C26d^MgC^?m zTOA~Yc4jKo+HJs>5s-B#OBLldM9QfE4a^GdblzC@b)-&LS>tY;O>wH6Vjhg5UzI)q=y ziXqltaYq-JCRXV<5jajnGgTe^eED7CrwaR>yh=K5oNxbepkL8)l4h_-$o^3`09|029Blw+myZ+(@4O#KobzG|V^qr@BL|`QLG+C$^AqHx?wIg~H->VG zHq~w`aay|-_o#|@%~K=_Jjw5jK!aIR{M*vtm&Hm z?w;ZV$kRO@Sa@dV3d{C2eKfRDl1-$!4FJ1hVeTJ`u+8?S4WF>p3DK~0PMRh7AQzuR zDgKed#U)XYRNO1M+yVdQ4~j@6+$$|_Di5r00ph*SBC5;xV@btbc@su(KgX^{9DBpdTYD!XE zTcw-?7fz^B)Z28z0*IfKMozbVoI50XxMt)6GU(_B*U5Vdd*8phwsIej9v-jvAHfny z#Ei@$#7CQe$_s0VVhsK;*8rcGnET2OCyR7xw>#{$C9K?pw}RC;9+Y$NQIP;IICG?- zc!_5*Edue}pQrnD;*D~-ekNxfB)&u@S?u{m)LPyylS)FYr| zzl!T&)hl;ts%mi{+1zkrd}nP?2l+UT3Rxm^e;l;;0=LF>lY$O(k|((Lvsf(ark^o} zO@u(_|A`zzOZF4?wn#<3gMcV*kPtxH6^)**oXvI{()4*5%Ii6uH$esg*Ap*OQ>Jd~ znm5z@K?s<)xIp8Lhy0n{SuWifSKFCmjjgRsiCIlLsdBESw|S&hqhc^Z7dEySz(v+F z`ye3%M0My^qa!23Bl8?GIW*KU`JzdT+?zq(ZSDAY^dAJhA4gMH<0(jGvF?xnze?x1 z#SOdnzWnLX_)ycNNwE) z!JfJjcCnr7v15+ha^)vrCtZfWj7QVW(ax#{tRER$$yNVC9f0H>G+J?#YK-pb(lrA+ zgs~d~1XB?7CL9?_p*cz2NbyhQIN?WbX+zPrqiVeCGDdp;(iPrsNVr8O3X$;WL5Csm zfQ;87vEM{~+K?5isybaaY;Jl|#0pOa0NI22%k$ffBx&5+uZr|G-^mLyuwOpjisAm= z>DRgE`0+^V#@rHEztlrnk9ZZ!y*f{PN|=})D5pI!XrsaiOZ8=)i`?reP&b|5rZ~ahrPOZKXWrXqCBCdnqp4mO z9QLQ~9L9Ytun~826nFDj$Beq*3%)mN_DrbTgjW(NH^&D|l1px*#Zw^$(=&bL%nKAU z$1WDpWluO0BZ8&N1Yg@i!r(6}+A=cOt~-CK2u00ZT^edw06@qzwZgwrzlHj2cbV;d zw=SRwB6!i)bWh=6IFN#>EE;%Qz4OO)n-{izey6Mb#rY|)($gI)W==cc@oD!}JLE$6 zhLppL0x#e_GaflNaCi$~)f*fdbocQg9K%lOwfn`?wi%EbAKuuoZ93M5Zng1>^cy(0 zkg_q8cO1}MZ2Xu%Nw3|a`z-o6EE7dKef$gR)NVm}Icx2b)~`xtulZ?ELVJv1%!!*Z zYjg2PzY3`%AJ@KK$yQ^}#tq5|Oo^b&f*+Qu>ILdFMF|Adx??0CN2>m%5)Trz;%)KQ zi}C0uEgnY~0vzpycj=VKbVJn{j}-B$NrrS{?a2$0kO@)<4N2f5r8;fmFqLIRq4Aps z4kTp)vWu)D{rWo&hjlv*Bq$!Qx+L6W8{sYSKH8U6-ID;?6*T~)F26Q_siMb#touTh zr7n!L?&awYK{=4IoS-5-T42#lcN)PjdNDyjvu&3VPNarM01vTn0J ztj}N(n0^?`1G>i3cHZs7nSt;i|FGlkf9jfcHaxO?R_>!d;}5&%*a*JdYh~()KS_~t zW-J}w9(&>SplQ1a3@YcsHp29DgRwHX?d@GG9_#jDe21h4i~ul>v-O;MarZc=mIO!u zTwEo4OBb{YkEy1VLyuC~gje+EIYxINM;l&^YE!W-@$P}%``Pp#! z`ZvZX_iml1W4$Og#X&eZLLcWPPJxAtlC*)C?`b=yau!(T{Nso{H6pAQzLwq^sRQDm zyQKFW!mns$(N;?bQV2{M6x~S|M@?m>w|Q4xPUc~1UEt}Qi=HaTABB<=x?yrUh5xJ`>LC>TWN-*jYOSMmG!+ErF0hxGHn1N|`yBOA`CH zr$VBu2|AtS<9Yc>y^g$(c%oqG(k*XuF-z+A@7GBch=@Z` zns_4}JJaVkdqy|8JEfeIL;MhaI@3uJmP45oEh=ksP@-8fztJsZkO^Vz~qM!FL9<_#$X9izsk}$9yP(^4c{w&0LeimOjs*T;$y6A^tZ|L zPhR-rqf0#T>CtgPb8Jm~l%`4d014 z(Iut?yJ~_}y8D9f&7nzGpMPLk#8$*(1z2{x7(}D_Q|0IznROXU|!)K*sOskFs&wzYoDB^Rir?kAsC{sUz zVq_DQ!m3|O=Yf%%OR4qmuHZySq!}n@Q8BrmEyMsHk~`^$J1{UgjAh@68@toEaO!yS zMYQD0d&i9ts70qTeFk=~I8e4Wu}una3x9osY65%@sy`shxM$Il|2z5>z*)63=Il(C zd41=n2&S!FQ%k`*OL0TeyI^Ll;y(jFuA?XM#c~~~nU|9@O=AitZHU{8oTtONJ4Iou zhqmw=JKMYToT0cPTOY>JM^_z_b3MOSE{}-vWFok=-c6&(N@I99;Rq+WOC9zO455WS zkvS13WM3ySfeEf51V~c>^-2{>chWd+z|(kco-%wl<3QdDZwAeRVY9H&p;=YC6xQuB7K zZR7;v;f2ewK2?L$PU*Q{Tv&QVt9mKFGC7ln7X@JuF;ml7b7zL-$zkRTYfzM z=doP=LCR1u=-0Sz(vzt#?Y_TJHLo~zK`wD$X$=iw057AVnAcIV#B?u4*RQCq$=7P= z*9p)A+6M;^v>Uo>He~WTZR6w5JM!{;aQU1B5-Y1OW#{*4V0HWrE)bqhgTv{Fn06oI zTTu!4vL$236HHWVxSlLbR<FReVDH@D zB2HYcrqJ5@i%b@n?_+@!LNEp=(X3VQtTYo6eyaR=R^{W~5hVkWGL*@QLQCMqTtF^C zXF%3%pS#GjEB~!0nu?)Yi!w?N!mN}wmUXDwS81|j*oVVd)FD|3oVvw5@N7tp?q%;8NuiiOZhD%|;%(^XaPRqzcLDBy{zn(}xs^Oz<1Q8{55-O5`B?X$rQ zNwH(1dydp&2>6RQIMS;FNon06XDY&k#bJ;4zD;x2v=w>|7#bvCQ_c-S9JbN@)C(2C@x&CTDxe+Qj8c8EhP z`L)NjbV#l5Y1G8kCiY5EO}YT6jHGP! zG6{4p8;_2+ zW#Zt?>QY2-4yGrAoiCyuv@CW!)5*f^rw!p*SHlfaZ8~ue155hd;_5Lrw=r}|ZY3Xb>&8aDD=O{W{!>Eo@5FU3Uss`np=5O&OW=}j0bTgQ z);$%v!?dT=^?ZdSReE>1zjq?Fwq)X)vU@QYVUCXM;qGeKorDyoUt^`8{d&3R2-}z* z&os=b=#;nMO-{G=c&9n5Mt)k#R2;}Q!+?EHUpsj9bfF*JtPq1b>~9^NEiB{DaC&Be zJ#_ayPjcX{3X;bW~M9?t zEbgpXG<(#38I_{@b-IqF6?Yww(}&~>;gX4x+uu&25PS;q#RD}t-)A-Vv(yhzEo1k2 zQfz*v3z&5&;|KA1v>GEJJ(xJsuHIZQsMO9&Ui?y@ehYL#wJR>VTlwn5J%(10Yk)`d zJ{SyjqGpY|c4m?WwJOzU9DeO;`sTx|>j(i`MI$Uqzm`b?*|uQ8`{g)IkWxy*V=g2j zzK~r^JG9XM0^FWkZ_L81P;pGU zBY7wqx`wW?QwOvYA_JY5SuP49(C>gL;(EAOoS`A7ZKCYk@W?hhYy${Zb{3Cs1Jrvv z;4^&zLly+awZ0Z%RKUs(SR8I>*)5)>MLd_bv9$_cL47nSP97}!8~IQR?_G7u=( zO~!M7<5!mhHnz<0GgDLF8`!tB(Ko7e*qRWY8w|f-d{Isc6&x0}GnOF=u1HbM526!( zOa-;D96is8HtY7tU0Li+oo?O`eLi=v=_Uj0N{7cE`4Sq3n>JS1+)%Ka0vpo-EZ?Iu z`7J3T^Igc@YzOvTQ&3ms>1Kf+98~JKITEmsQ&&a@E5=jMN*;{!f>Du-u3$gcetKLh#ib|tk~F!|J1g}-cRU#o>Xn6=T{dz zwy%73zZkLA7wlvh3<*mm@4&M}m9D!%ef^crL(GsSaeltXRDRg>7of7V=u&r9ZAQ3d z$M)Xm)Js4&J9Z%%0gT(aK#iQ4hiO-)6o$iF(5GrnPAlD}SoYE<`I*U1VsAW;=$#Tv zL?4kCSh+)_*K7d{q0nnXR)x>}C`G~|?*Qs1-7&I;&7J?Vv3#BPJ9ur4*Dm#1;@XiT zuS=QAU3Y@WdQT4>kD%hc$PJZ|-|t0eWrF1-(@_cORUl`KFhRy^i~wITpj-6q{C?q@ z5n`GVNm_(--Z8~=j3!^dBG2{&aT`W=$3)9Fy^u=7AkhXq!@{)wqg@r4sp|x76N>sxKnXh7vG4h7g2pA?w9QP6X8o~S^Ip|Wr-!EKa$M;QkfPzk0 z!hgL$PqK>$ai^q;Tc}#cS!gmiA3Eeic*$+a4Ipf4^w|z`CTbcmrnd>13FqU)(&86ht10iQ`x69uP;q7lN099@|w9G*~k(BbI@jt|wo9>SA;4-YrG27E(I zIu&aTrbb19*W%cVK2a7!>~DL;{WK-{r*&U7a6hO}IZOOfoEV8q+en?BB2^%XDE3MSooQ zEQJgIdCr2TZ~Q3RsN~+1?L{;q+}Ie6SAD-ne6k_TgFv`W-;0v>pyFpylyA2NArr1M znwsd0Y0HFEu&voNuZ-Pq;*nnRD;s9!V4jkB!-t`jixkJ+K;25u9sK!FNj|U*{0^y1 z8~s}Z_Uh5V(@sjqE%N#R=!_ejDprW#H=#id7VQ(hcOTOzk`K0wpe1D+;V*&bET)NQ zX1GmkIe(70u~bAU9CI;$fX^UG6170-BCrC_JzR9V1QL4I=4lrfAd}=f1owQgXdM+6 z5Iy4H#Xr6Ns-+x=qmAV9FLS}hU7L$j>otR3@n%Tli)QZlUJHw-oUXJS0%J&8E9&B# zrs(ryAR_0jeObF_fbidkBsaardf+bKS@hXnZCamVsZnzfepx5cIeWpFx>ZHF?$j&! zAY(`P1xWpq*nb(1Wy1lh*u6Lr7j(SsYJP$7O9616YK{f zEdHh@TS8WxQZo%D&X6y?v&4TyoH%)9`O1UYw#Powch@vBEfobVvD;5J)bnj+@$$A- zF+~xyb)jjMn;nisv*-y;dG6tGXQkUq?11#NXA+MB9wK+^|Jo=KBZ2FLLe9ji}HQo3M(~ZYeb5yD|3TGzHBY~+A!Zk zy4?eS?U&};=cYkxbc20-d&=UM#5F--AFb?zhKh8th&a#M5>WJ~r{@CBh0GrAL`D@? z&W;uCv$p$qMN;d-35Vei#ywRQA zerc-Um|FhXuVKcmn10geW$EC#|5*?f2Gc>2VvulO_gG(Ed8G;Qu)B1-6&_>ed)vVL zi*}`JKyMH4mnph#?uq+e;85BB(WS`L*^{ameksku$}pkj^&vd;I(q`sn9la7gNvc z-cWhPudhdGC8K^)e$<*D2|1IM?0I#-#Qal@RB$0Y+5^PLWomw0w-VY>?t1$x5JW22 zSF10vBn;#x{mq5MYfg%%nlAP(!k;jg>2ECxWpN$rKj#Fxv1q#8matI$YWHE^k#$6l z#Dvb_RWuFiNOKPS7=^xZ(M9zy%SAT}QjTWQZWhnijYwGwt9GS5 zGs~gLNnRE*?ZtXD;wnDrQdMdQ>dtYpQC`LFa5BikfKwm#6gmlWJ+Pd+v8)+?K4cUALhs!9C)sMIe38t!%&0vZXwH}SXt)o9H>Z^uQV}Hj*{Tk0F)OrTFR70 z=AP;N1LBBDiH6np8#aC{&4!ssL_OCb)BR0Afuz6S1ttxDM{tkdJ1LZy7t(_fX>|qa z1WNEdFa>NBq5`rXQdjRxIKPN5Jt}poJ$QL`(Z85^z3RCQUQwR%uBdjmfLKwgfSM45 zij#{L0I8I?zZ378Q~td?e-RkI2>iF^2RM#DhGKGqw31%RYAYC`1~aZO1cmu;wDAWI ztfsW=3QWLDk_8V~U&0?|Ynrh&y0rcz_#Ml`OwL{>Rf|-X!L{<7`~pTUU~dJU9Wk}n zpU#}WyHVvcN^p->v@4d(^}I*MDtDoB)Z2=yGAif9e5E^WT;ILCls- zke|{SUk?tM_3I_E01Izrt`3}`%~)@hp^i#Vy4}~*5^nwK-e)SAmr2?iTl*9Io&J^1s<_R$EjDNjXAHJUIjRZ5bc1d2Hu*^4 z*<`LX_uQY4f+_Oj>93yJe%Gxd-$k}Ik9}k2a({VOJ(M9{*u_uBWy>T>OazXG`Ui%U ze`x)C71*#6Fkl)jtzsI3ZnGy@#p_K%FUtUJWR$m7MiWUI zs!%Y#Qxw`j*k!{>&U9zL$)ej((iPqPtuHa{UTghs@Nc9QQ}n~hcL_=-pMqK6>KG_) z-PWNwWk*mo?5jnrg`*PcwQ`A<&1ZvFx1U3Q&O8tuyQAAK1$ANouG9Prp@@-|67xE~ zX59{7Zkpo4RbNTF0#{Fzp-O`@wI0E|~qA!5b%~C`X zit0j1#lvv%h^&Ww^3$0HvCzld8K|?%Pn!VtC&B@lZ7I!ro3f??n)8^-@+njbBKSjQ zU-Q_V{oUP?pQV(NWDO6!UUebTX>PTe)_EJ=0|Czwnn1**$N7kdzkV|qE14PCi!IPR z#XwA?3wqM6@m=EzzPC>aqqF@KjXb{OT$Xu}A1rq_gkrdPY|C|ST9=3gd3gvBWS zA`E||o&<}(lG{ArwkDTekD zBWZZJuS|RMSdN^~1$keAk{cIn3f=%jeWg9&>4c8j0U=L%JO(8saQpG);{O>T2TNvY zHq3+i(db;7T~QLP^JeqJmy%i2f?o1#Qq#|jN=d`ERMt8m(5H9|;+*77x-$KZ@>#lq zoMXpB|H9JQ_W9V>)L;;xmCKf)J{45ldI~BG;MgD5jU~$am(=SBQlN5u{``>|1n$zz z^`=qoYCCHN;0(GnI8Kx+&zz*A5?6V{g&-mA@Z8@FDV{4qgnLLwT?PA)skx{se7wYygwLaKwy{BI*^{Nvx<*0JWp1!PWbw(w1Qd^6f$$omzpZxEL2-uz}p`W(1CMKS( zhNd5aE%aJU^lR?xyD!Mc--dux0EOUDaalZbB1q`!Vy}Pj&8D9|KC8U9Qtbb7$pLj; zkXO$|cq0BV2h1x}c;dhU0hSD2L=}Wv9MVa(Swju}Z@_cf+cLAdVY&pLrHN3qT*(oS z{Y6_#Q)4qTF?3@wqve%{Del}uwB>p1#aNBN^?d@Ep#AwyoP{As`0 zL8Cd6^OCL^RKz9HVv$Eh_egMtCZ%VcbY4VxWc_FLz25&Zn2X%s+1}#xy#W8L@l2S$ z#~g+~Asg}+)X!ej$a-{DO6P{-9p|diQ3v;^tHx+5;;S_Om8WRy%k0YXcxGFRl$%Jg zj|5J*^OYC{m6ZCwe*$BN0P+u$pdMGJR8#%qZ`{J@0|)Z!`E-%HnN*$bFyU&`@UsS?QWi;7 zxGlH`mH7EB>GEq${(HywtAW5MNQE8@%n{1AWy&azOURkf3HaLDy5NP>Al1Co z*U{DeVBpYVs9f+5j2;l3I868=DS&_!3gDQ5TAg;+_YMbwtDSmBYD&1E(d`m1R!`^D z8#Y!1S;WAm`NJd4v;&?B*+IpB z{{Q#F?ovOecaD=O{Y3$7_PUg{gNTrz(-8bGUb5%zWpN}v+}1blj|K-jO z;iO^3#=RS?#h@AysIz1?zOu;n1sE=?bWO@kORR2OHuUS=m-2FQX}Twbb=T;0w0bsuasRqw?%Z`o5`kfdFZ7g#^M;4b3YJs)nrH;L{L4#p#Q}YbuYJnX{rD`GMC_ze}bYTo?13pfSm%uiT{ zkiGUrxzX--kX88M7Ve0lmOd zB4d7v-tS#UUL6CmfkMw>8#k%kC{8=#LulRojrlE|9*aIT<|}P8^O#d8oN>q|6J|R@ejX_vQ>iN_K0!_ zTULpA+{m8uY1)|2;5Kc6BO4#B%OhxWX=$Bz;1yi2QTJ$i8wjTnh#=?C#RCuw`8J2W z#>k=B09HkJQFjdNE%u$(#xH6i=``~L6*ir-TuQ6E9sPI8X`v= zP-Uf+ES=j}tO--Nkf1<5EWuQ35Ya*H)Tog$?UA3>>3MefAfrbMvWmWhu*TT&^%JQ2 zvI+teirwk3ZT6@su~<~^k^KpOb2|bfAhrOPq?=VYQWUKp{lL>xGMg5wwWo4$@5>7Q z{IS>-{-9RUn@_0-*Jr1!RW4q$Qo8+LC2YF-+5>94Wp53w{L>lw@4}LIrFd6V<2%0s zW{$D3%wkft)AyhwDDy<4SN2~dg$ocm%}WE7P-&RM0~5-~hSa!@NVK{L2Sn-TyjNF- zj~v&%A@4vm9gCj%VW1yn_*<(m%3A1>J4j45)yRJ!pWy};d8CD|Po?m``SQ!U1exoZ z29VUHI1vsn%{JYzKLB7XIhl5T4I{7*x`)hf2Q70XE8F>L9$9U^5OffEXB3Z)8q-jF zVywhCoe>Y^GjS3d84ZD*3sg4@hpjDy);aIm1BZVptppgkpfh6X!gh`IG8_*F#@9eD5Yz|g${^klYa!xL zV92PL(8x&_d2zyNvjgt4=Z=K`RF5;1a=X(d4uQz9)}vKU+|LJ40}pf;%co?r`~7sC z4suRskWR>PqW+>pdB56#Vm5N%-tXA26Yo^q0d!!thaiimqmKXkQ9f8soOxY75|ir* zP*00Ny*27)ol;6INh0q)q-D+mu33)oz@8Uw;{s`flC&t&5VJB7FIM*`G1((4ufXm{ zZOPnOv~3)wHFxk81toB77>;IW74O>}F8q%3S`^H(y2v`N=FH>)FD7AmB+6HYmfdz! z;uQ=`H$BDtssU7qUIql8(L+}fsbd@^aP{&OMpx}I`&Shh)dLjFX^^;EPjSsj05A|xe+pY` zKHkeI*WL~B!wPIh1L|{(G19LeJ_Vnl<;*szo)SDt`q9<22E;XnVE%|b1HAh{I3(`k z2Z{0V=}cZyse%CF5kg0n_V6h?_iEoeO*8BbNn5CF658mLzu#L^*opjvK51An=+_;n zSTH#L&;q^qpgZrNn9`WxjSNt`UAPgpQnVH7bHtqqE&#!L=g<9300&`3G1C)^a+H6K z!obUzoYic5=b@n-o%(#dwB!9C+C%RJwy#@Zmt}4NzgskhgUb1KtBS-Z2fn6KelTRA7ES1jY#K5u6^g-%g*9V}es@S^hbW%K6>3{a|YW-AeMfp77 zQ_|W%>~Cky4L2lsm47it*9R8od?qyD|mZ)8F5OYM}tq*Z!?687%g8<^4P|Lsm8+u(1Vz8~<~ZusX@P*rNrI~9sG7VbSF zlNXbGu@ej6Ary&`f4I*P{i{lbREW%de%a4Fq^Y){X6Ea`7)S)$N;B<@F$l=01eu?B zX7l>w!)EB&Akq*$wF+L7+go)Y_FUtXUKzvFI19u*@*o^cB9*Vn=hAUrgb;s!1Gz`O z5qL9Ji-bQI(<#xb#R2M2jG|iZHH-lnkA`iqJ|ARLv zp-+E!_J@E!JS=5>ZSx=Vp$Ewlu2x4Sy@uxk**$k1zlnzZc?S6n0L@wR*cw<$k+X6T zI0aO(r6B5%fs#1B0Fvd<{0k%>T{XslP-_By$yCCY3Rb0(xOpERZ8nqVEybxMReY9I zGHr9{M;pVO4G>8cAQ26!=Pb=ip!*>2E~3TiDWeg>$SIv3`SLrF;-#hfNHO=~FEMIK zREqqm2d^->lPEb)un=G9aowJBZMwhONR;jfp;DK=_DwoH-(#I<8UFpIA*{n7hVVe` z4eh|5F4e5O#P*gnha;R-u`Xlt;*Y3v}+bJT+Veua)7!G)Fe5V$Y2m+a$nzz3S zB*f(+DZ)?;;nRi_{^a&+xw=L?Xsi;s0cKZAx?9JthIA&vu;ik_2!{&HkR?eRoGoMw zz@8=DM7Fdudz{DGE8OA2uU&0y0%<#7lAK)5d*=r@9wg3z5RCXSv8s)&89(W^u?=n` zcjLNDij*6xVzw<=BziLQ6oBIHKBz8ff{=D^zk24mi=XP?SArK_k3;awO8FA?rrY|7 z(9Z$;t8wIw6*C6FU}D#@X-El{F6bUpiJ&Z-*Ek!Q&0GHs{z9}(FtizIBc5r?VIq(%FX=WVcO{G1jSNytpwEgdluJ#bN%6}OXlP+C&j7Zy4AWly(FbPvF zQ-q|fS~=z%e%I&M|9S9uZ1;U#@AvEVeCA^5d%le|0;SPr*Mf}K`_b;-L49Y|$YzSS zn7xvR2#(zU6hSAv)BU0+|1PogYcPzxx~4-$NGOi}S(k_ueFV6GQ0t?4z{7}b${ z&(-$Jfs&L=v5b0u)?n}(WMX!O{|{V9B2JB);AORBcvwVh`lip*-1HM1iYIuL`%8mn zh9X}rUzVPlvirT%5yb(Zqa**KH#sV-m4`^Cg{ZomI^_RxJcsCZK+lH7TB;*f5~TeF zpP3^$pp7I?#wooU9K6xd;oVumego9GyckaF((KQLK*EZ&%m zaIVh?;=RCtpV>sbYEmxwP%rclfDMnWU;txr_iOZCIZ$JeNM$G6`AV4-lCQqIIYP&k z;!f;c_KC%veEPg8l;eqo{KPL<@++t>b@JvE;|Z_A+?ZMWxDJ_tf0eEzfL?i{%@5G*`T{`ewV4S7kDc@rgK=7 zK~C_9h3F{on5%g2C({ceWBfdx$(ZCzlf&tW9)^#Ha9nT{@Ha762rH(RkjW=p20sX( zx&x)N@Tq5<#w2Ne@xXCIx_>$74Wv4z3nGS5CA0&S+d?l9_UU)|d`-SwI`F{Mv^;va z;jobcZ^v3mZ||2jMF~u7{hJ{e4-95uYHY9H+FIli_oTrUNmZ+UeHzWV%9PN*`2z)FZMNt&?ra0ohDm2)q82xJXLdXhkZ0#6+9WXFC9g7_)po<^W(AXET>JYXPb{Kb zzFTH<#i@^U1!y=IEzW+sr3$xic0)RM_LSKrVix zYL0k{NZ#R(J#beQ@$m1f2P_#o-G&>&rZ| zz590|G-)AmFnliz^PIddUMc>@b+rovH>4627_qTUWPGk4^FDE69iN-@+&>RINWq$` z*kb^-=AAl0O@9JuD2I0FAG|xad4v87VtsQVCd1AlUC()cJwEL$k~m$v)&S0lnu+xH zjg6b*HBjNXs2e+emMb^iSrNFlugRh6;g!&;oMf)XGe zs{Pq9w>cc{4bb6tpoD&W&e6iFEM25B;lu5=GmnRs3=ckMeH9+kQu;7^d*@H%WQgky zNQfjtP`rRLJs%V(^WXpazV0oZ*_4%) z^`&boFG85~<+!Tif4p-Euk2#)f;u+N{Coam)?L#Z3-?8s=M=@TC(`Qtalk)Z>ffR* zIIZ17&hGda5;fA$^erqW&2O@Slyo{@`0gbH=J)MG4ik-lk_=b)ARsgB@DxX&e)tC# zd}mBK2|z#h5m3yS@4%wv#25MaTN5Q@y}V#Abw}dFW*|kl#Ds(g^tJKNPvlF*VcprG zTn9^w>cA)>BVPsl-Yx@D-b>2 zBN>?u2(A6z6q5l;4e~#`Kmgn9kb-G)pO&U5j?i4W3&zRxcEbo}O^JDa@$<=@3CmT@ zyfjA7ABIx9ZnnEXWjRaNzHkMU-3c$sPtwEZo)#C=+1Y>oOf=mzJWOXd&+(Lz%P>+5 z4;cmm!_pr61LmiF%I(dm*%SSqLd}ZDm5{H(wfU}o{21QmfYa#j#E205M?%!qfIa&6 z^@~^zSy+ohj-&xfdcW-TljDyHZ!<#P{EKHX+d=Y?H%V24KVJ z8rFuUr29<5ipW2AhK6IJFW+``Ech}SEf+F)a-mb6SDxphc#c?xbGphU$C2mnmU}~1 zuX#Hbn_hgjOLW4JIF1^t9F}88y_OM*yeA6p2GYy60ZIolUJQc2(i0KoS55lv1R_&uy}SF7>VOLgmjvfIFo#Nc*YU4MOH)%5T>fn* zcYXrs4ATSIbo87G=(F7FX0oZaR1v+Eu-2Rk3usMHUP(TjPqN2 zkfndv_PlZiyPhK#oU{$zD+ZE2@+fm_V&tl@7oYd1kb z!Q9AZStMrde>$@(D<@%YXPdxK^k5d3n|JK-ndpuADLa?sfYJ83wBvC~d>}|%#OJck`KTO~_0xhK`)h^pBxg^jA)l$4Lk^ioq z7qUW`(k_iP^an*3v=P8mYb2Oj8{jGwm6+iPM+KIpD#i|mv$PA0b)zN+SxUr-GV3=w zCFXF>`3J8JY>i-{!NK%#w}10pA_{!LD~T60shO&ASBIB(v|TX1 zK!|#yH*r^8#yypuZi^-e>zl6wxUFDbg`>3*eVcMo$aAK@Hf{JzsmF0@vu7#}fEsxw zP@yD)Oi2lV>*c_bj!S4W>Jk7XOSH@INJ-l+1dNrp=2}U2=%xh0J!H-oboHjCRTBIxXknS4e$mZ57r(bBT6xFOQ&bLNBPDZgt+4+c#s7woTCva ziT&j+*f&2ne*OpS8)@8}uDLQ*^AvHNv)|~M7#0}^iYkTHBWuNh9F{aP0pX>(8+nck zobbSW%*DRK7Z7qjX-z;@zrKC8!162Ldr>O z7G>yV1dPm?tG^;IIrocF*BbGha>xnw72TU}tD+eCtxp<;uLosRBD^9=NFkxRTFwlq z%&hf!Md~HLztMP1i5jetkz`ZSgT4&z4{)P?F>qpTg~{s`+uo?8it}9~NsYP@oP)lf zNa=WF1zZ37vk(@!K760cEgw1iZ~XzFPeq4((Ld>%QfH;aeCsWTa`DQT_J5NvpE?G1`j#r1U(Dp}l~NG@ad$r* zwPB@3G?ei(_P-90m13~N!h1QtS3L9V%IFiHTmOA>kLKpCEOQ$lpN$FI{`XzaeaGH; zd3~t_kjd38VM5j{V67bt_a@2qK9t+#g;VJ6S?SbE9kf9E!o=mRXs#eab3SfDujAi$ zEywbyL^-#e?bVpKqc<(=2(F&h%$mdJv6xQKvOb|!8cFn-*R$@*-r;B!bVRQI`}He5 z;E{K(?5vzk2{2XrYP*j$L{1JZeGZ|K-(TalB_roV61%3-JQq_4xd}qqC^Pxyrmzz! z0B5tm_7WB~LRpudh-e?mj+c-7cta~;_z7JAv`S(2G$+C!G6P3KFvQ z(vAH$L{0}zZ_rruPaaRc?>i>K!9)P>3N9RJs+0_a6baeOb&-%@z@Nx#Q-sqhwLo;qa0tJrWs+n?2-!7}>z zBok7?9Cd-2$3hU8Hn*3WZ+4w2DoT0yA8!MTiA6XYMP1DWak)#+yqy_=B%~S@tBwYL zELKXPJ}rr^&xyCk`q8XeFDeOR4Jl~V!d8Mq`4qRR&bd4TdLt{` zBDtjJ9^+5`_xGo+rpJ#{cR5DTa~mJSX%Moq2)02OWq z9rqH$V*UI(KR-V^-a>%}r5=*m%$7l)mQQXB$*|-MA{M{MR6}|tLobJyhgV~^qosY* zI;9_3dYq|B!|?MMi7}rz&C!UNNl)EDgbs!)RW!A67AP(98Ri$;XlT;ixFwAy|GH@? z?d8yQQ@R8S>N|I6&?_O`5^Dj6dHK<}!~c1z6T5i$BnkBpGXkl!Z@zWOwz3MGlC*7D zh8YyJL~c@%n3a#}dZZ|bixNtE(o#{Awj#Y-ngjiKoF^7*A=@lz7mvvM6Q5Rg#*mC) zezIAt7%QV^l10nkkD@tF<{V1;8_DT5?|rvfy4ZY^O~k{Hb!67&=JcO%5#ds1uG67E;t8^7ogFSbm0|Ev3{PPo%a8iV} zRjeNj#9b4Elk~L|imTOc8Sv`mN1eUpng`9CD&Rk3$|q0R=Rc@boUGmbCh7q?<3U2j^%z5=$vs9Jsl1H7q=!Tea@4dqVs4%h_~z%48Xd8z zU%&iXK55>ea*{86qqK%}{dzCCKQk=Q(&ij~ygo%YTzDzGZDS_S5nSK3+`AF5`NdzI zn=`BuTl53!Ogop|^)`6WaA*$HT6boa!Kd1>%B?MYefhscS!?uOSXY@X^2~498*;K3 z^SkDjL<4IH5hLvigSd_D>BPj%mf~f{5l8cC_5SFNRi@0!e=q0b)=M*<;48z%WqF5v zJpFfT{?X&erC-DS#b5;j&H!lHm89WoiS$l)ZAF30 z*x>T=^5CPq^o2%1R>{*l8;5AAeDMF;0dw;>%5#L*=pWgNVb>80q-E)9hzNA=&x&W8 zTLd7Jw6|@g6TvY_CLsd~cbjQxVSz7#;l%hPfWFUd=}rEN`$=4?Es`YAEa8^fRNl^X z{i-y1^?mT;3fGecO5;ueB<)b{L2rH$-nn1N6m-BU5NdR3T2@#p%3t@g@})Nx*h@hkuT^+>FN|qp zlW-^DuaeW1`GRGa9V^b6XC9W8-QLH8<|K~Kh8jwF>E>yOz13F*EFicC#g}o`V9o&i zfnG`IW1P11AbthL3%4qw#^ZpF`H#a9J70VD6)xZCcwGDtUO!CFq&$Ms5B`xXYcMlM zYuPcS;6>@Cu}PGL&_pLoTxPtjEZ@6Zr#*#0y~~jRI0{KK-_qXE`oapS0nW2d7(>|yM$fq6^4T+`nSP`*y$TMz zN)?w77ig&O;;g+z%1l&T-Gy^VJ_FeYbbJ55j_z%1z*(MB0|K-#Pg0pbo!x)p!L%es z{O-L7^(i)U^!EwEl5k?zpK;o1^p8_s`RNd$EiOLbD53h#Xn((@^0Kxq?4_OQ+^u9Z z!-=5qazVS+x{iBlc9b8k=FnZqr4X&mZP6eDo^bn+Zydrr$&?}aRC)6%c2JD$@J1y= z%|baN4f}Y%8|+Hmr}oh8zh6C%7|S~_Cr1CQG(HZ+Fu@q)tm$CB7xu8z<)Z-L#RR!?=ccLL(~sI^6VQO4>-t21|WK zd8}7(C1AWk$N{dB3c3U$q09(9EJN5gt$QOEnCJ%ISA{Y)AN(r(d7KU&qyFA#mrF>l zFg?*KVYeX|`R$wNw|#B^D~zHM6k5?J{~J`Y^k6NaCP~!J5L+ zXOho!%mUNHkw|ahQr=g|g%v_7RFXvY`&xAcB{SI)3M25Z*?#%+n8Y*^-G9WuS9%ig z4=mfFKA!$9D=bm-LE7$GEzfi4Zlf9hJq($`!4K#REqw?cU*g!>PA^P?d7^v!XE3&l ztt8~vF>yR!+PIsui=LMW+ke+tn<8^J$!5uLd?MN{v&Wf0J0}JXG}+rCDE0Qhk&zMG$8AX{u+RpJkLB>F;HY>f zIoVm5JmUiK03TL>vvog9i^Lvr%(4ZqDHr!;*T=+Bxs{!bfR0l1cS_8v+)f~q?&@)Ix3=b%d zdJIiVxPl5zZSj(#O^F?RpV4kb!eNK(;!pbHYndA0Cn1KTf25$WeOu8xJ6j#wRdUg5 z+lya4ooLdNp!ioHdWQLF4$u+5dTtK&$81d{jwE*VHMZD34oDnM-g^OQhWL-Dn2 zW~*xpJZ@G@c4q#7p*7tBE~I$6wjm>yc$I)fes`(yqs8;_@YZ`j^tPE1YIgJL5HYXs^J%wz~2S*cBs2I#0Ubv*lsgy@-UlFc$s_)TEm?GvG zd4I2B_~hTMG$Po*`IBn!qr>4DL|Iz^7A2tqTZ_nLbK#uRb&6SGIa%w1nHnloaY)p& zEPnT4*vo?C2K;9GL9CN6yz~Ht^XXbwUlPnT7A1kC(;dp|c#LARJCmHG`kvdnWIibq z=S@@Lyofuef~3|n@%}~js6B{LP%M0llk!saF@8e-Iy)&nw)}_XtLHe9<&Y=7i>F-R z+t&D2%j#V5v`i)`iGF}!tt!fg=2kQ!m7U|>OT)lv=|QjE7=2=MxO!uyB_L*t8#54` zljVG<4$vtOkm4}nK=-p}e8!_vc5b{m#>h46%Bl?WcVI_$DCkogt8Yt5X3|i(3foio+=X~#*4gwy@N7e z^P=dM%h}n`3s~q3@{=K?D*D)aGYfufY1x$_^Rm)dF_mp^F zM2QzSv#`HXWPH~p=$sW&v4N%E4z#zc9h+N;BA~O0FgN<3?=|v2x^4-{DN9*Kqy$Q* zuPiP?a0!$2l$8mKQfyz>TcL7~h#DzduSubl00OQ2l2(y8CLH9FG+>gn(aQZ8>m?UL zDfRbii})^H^HT6BIQ}J94{+Zqdq206d6)wzk7^+jb){dnchNX$m>{rmSfw`>S@{GF{Ipv|W->%-s> zG=feXvz03eQ{d&zvCJvXF_%yOPYvXE3GQ+4{AsKL#z{EQBq^}X?Z`|0R2J!A*1>X* z{WGEZ1#z(yAQ9B?T$%81$O9r58y4UcWo;&I2Ie}HKV3*!wT2V1=!LEY)QpM*5eX+};<&ux0lr|f=4v4i&~ zK9I}Zik2xMKSMw;C*P6s<>BGw`&sT8xU-QP^>OPr4L^^MFIJDEH@|K4l|PPTflFW4 z>@CCSjkT$nKF~Q}o8_FJ7ZN$!+`h~XFj(oglbUj2;UaV&>ZK8S2`f}zHehb{ z2!}zRkW7hVJ9AF&MUqNTMHO)>%3A%c>VVe_M$@jHizR2HMdBu-H^&<%qwhO`|1*QH zq|VDuZLb{AtkNAzq3?mW)+r1aRpYAa?fb>Dzi3jg)ez2{+HS zQ@pzz-sV=-wEj_Ug|0-irb)!sLq+127s;8{EQC=X^@Ar}+4!%1mfG$(gnT@mEp3M# zL~AI_KLhCtLLc?>6V?~HlPu(>f8NrK z;we~9Lk5uvR(;OpDaiE-F%{LisQvu(6NAM^`1aTZU1s$8)8*lTfq_$PmY{9G2U8G@ zt?`x5TOIUkTckBI^#jO00>s^9_0Vf*dM2BV7VzeX{CK^x~t(|Ne2#p6?d78(U^%|MAH?K^!8| z=$X=fO6oY|_X;GHO%ysxVlPt~tgai`;ok7lXTIY7!~QjE~NRg8RMV`1pa;KII|yIQj98yEwe{@TOc6l0Pm3R$46H(oVny zP*Cgw20n))S!n-WIwC6S56R5RAAnK@%PDWOo_+Y;Dgji%jl%?jnGIKjlk;f>;|*=jz`VFql896>u58_j6d0ixbi7e`n5a1GeMBT+AA4^{hOGc@F+5+?T{4 zHHL_Q;qE}hqad571FVV+3*^@P#&W9_cuC}Rd*J+x#c(RM4WjZPEhbhMk;?EQ#Hr76 z5A4qATOVO}CNCWaX6v+@AuX3d>h3IXNrCcI$+`CT_Cakv5_x!@JE{ie@K(wV+mrzrW^R@8hT}@S5&`nQMc}97vW$82M`Q zHwpo_^L=FnJnmWbYsJg$c*-M;=rf&SE(qRi+90=92Du|ulf~XVN~vx*p>n`8h+dz+ zxwA5uheI*ern*|d<{J-YHq`4W3Z2tarb+!L{HS1&G5K$BC*}Dz0QkFSK8X{9p-j!f zgB;QkoLeG_d>Ed%B&^ZVH%(4nFjV4|eD4#0FM_qGSdQ)pDd_T*=CqQJQLm2Bxy!ki z-j)OkhVa0dKQ+wuKWW(L8yqaJ>R_UPg$Lw#SortY8p+x#)Z~gn8)Q!!WDftZ&Xz1E z59Cb06c9V^Xi)q@{p4dW!C!RtO4QKNa4O%aBuKL)a2qA|(P^ZSi5|*c6!%KHGn`hw=1Xpcf05hn0(s!7F2 zm70KZRy(f7|OQBThTI3w_VtWC_+$CE_>nP=q&L^?3u-1zeaZE zxm9ja=U|p_oYZc=lND{mkX10TnG9?HVuwXBo7t-I5Z^w@ z%V%ENL{#GC^>jX%pFdYbZhrBxpnhU*HPYkCxt~<1*grQccUEr2=xxKFshe-mcMx=k z$&ZzU#P$pbZnWEbe(@>QiTg;; z4>{a*3QHrK6L!f%Sn4-h_x3o+?G>k2oc3qcg(LYhNfIej}N0 zXP{96A>b)=BsHy;&Ri@r?aO|@I=KUrhe*KhBx5Ush>-$pmwQn1!!B)qZpTAMjq@ z0=L@&qh>5nF_K~C$~!Z99qWYk#|18vt@r1!3CrO^@}zg0T{ifIZ!=(3KmAiu#k0)l zN2`D25&Gvj5$e(MdU3^(iuyjZ2Ft#5gRvhHAS~fcbyFTV486M_iv|q7yzT|+CJm30y|9IBe z7=%?nchQyR;5ahkI{CJ6CTZjg%F!8)ywv?uJGyBzNdI?!FqS*)*V82QK$%iOpS``8 zH;4%)H;ddXt@h|;x~@+_$>i4TDj+}X{QJI73=cF5%gz27P|E5As4uq^<1IDK?M2lx z8DNo3RpMa2QNmK4f<#T>bn>mGzxRM6;D!35)xozQyMCyUN2OcedP(dqxppoQ?-pc} zbP4aBzw1eZe^pf#t?uB#vBb?S{S-=}1J>J%N)}Kn7R&NJ5izc9eeNH3rb??}3=F$E zVsD`Si*f?%>RDc~)07M!hNk9POtKZSR8So?QHz z%T;xh&2%=gfqC1c*XbQZgg>-!ig!NUbh9jhUgmH{DZBGQ*INsu9d>hErZYXrnmUiW z@gsthKa5qoWXb6DDQ6p`9?4ijGkzXBVW!j#N%TFH!y-u@OU%`%4z~bJEln(PTAPXh zaGoc0O=^*()1w?9F_KmsK%>rEsC97-c&{{PfRM>%x#b}uM!r-j(5et+l)Vp7ZcIO( zrmgzXKM8hW)k^uqjVdw_q>ETfjY|*HFtQIyl-1F`%6x!=oi3qvU+~#$*O5adDHo)e zrs>1s%iu^CwR&(8PrdjA!tmk?@BLWH(n{>A;WoE(*PcAk)pPB-c5g{4<+ao0_qd(V z>sdIVACO0#%xp!`xbswgp5G!7vBTecMkCIqvZijPK_@@ngH=BRE!}eZ z8>+dOPS!J_r(UmQdBWzJ+kcKR^iL1jB|r5%@XYNxc+r}?nVam`nOe@Hyly=FRAqOD zHturjiIWbNrJl)X$GJoPrqi1(V=ZTQ)|Xi&L)-lT+~wXDx$M5P{+Jjx9`Zk(8*OgO zuKP@P6D;r)6XqvL%eW;mboQ9!UQ^RBw_?gKLu-_`Lbst7Dxq@0X zTH2$70vD+Ef;Be}UMA{d(Rh>8#H|IO$Q=K(;_)dYN-gdUa-HGs>ULc(uiZa`tWb<# z{nQsrBVQriXg%^C@FnVND)FO%l%fMmhF%&9nkkDooaPY-LjbXMw!u-?!KLf8CE)wP z=;Nu?@_1#l?R|4?m^-{6U<6CO%3P^u6v>zf(#ikv3hrJ5+oD6vrbe?+BRSbd%J;5;G)aM_Mr)i~rt-l3n%k0lc))x*j5whU0&M*+Xs(ky z@9>5myGt+8f^&~{myyBJisnC-fdf}|=Di4a|660aq>ib>V!O8UDy0&pKC$vZdDQi~ z)|tr2ovqcF_KwMvUY7O{Q(D2)rq8Lo9*Zsken>a~hGb_YFdd0$}*h) zmdA0ct^`sK6c6*V?`#IamfyaU)+`XxIY#~aR)l$qKq1#=8Wg0qBu2dvg;Q492HOr< z=_UN18ZJM~WzhW(Wo84q3Yybgc^}1G5`^_={{jA*u2Xk&&u z-tx5xjDZPCYSgZ0HOQx^U_Cs>Q z=fS6GMesI831M$yROFIuFAeilw}iVk9>DCZOvbnqwm^zJW``Ri$u9TCc_v8^;Kph~ zn6=`Q4QT7>(A*>=>?JG@IVqdSufVh%K=x!~1QV8yNY7Q|7nd6B<(+7c9?>2QZ(AR( ziVltlaeo+8i7cs)gUX;F1n2Qa{ZCamxQv-B?Hf;gzrL;~vS0j)=#WJ~G$(kXh*vNg*S7w`knAHnD?EE#uou zS$uK0yh?6;4T^y)3odq0NA7?4Ub^(S+h@s=34i`bVG42Jj&eb*!N33gR}ZHE-Mr}q$s>pA<8?Aa43#;Re-eg`V!4C!-}(mo4{DjX zY;OY13|NfiaFnU!pj!5ssBkY*SZL_Sxz*8Y>_ys0xrbzKLm*RB;y}+gvKOgj`Xbm! za>w?p^`mm((vy-jOAY9Gc4w%;cS`FL>HBA=epzFfeoAj{slW7a1Lc3uGax7!D2`^Q zJW4Wt0e_}V$s_vhmNpe?E65XpyhKRLUWM4SCZPm41SgqZlOF0zoj|7^d4Li?OhkCy zq6N1A`CUXRYRQ@vT6|Oo9MT?4zfkXA35r<$HX7m1Z;bUP+hf2uI-fmIq`pgN;EpNA zSbn!kIi1ay9mHcmq)LWm07LEF3+&ut|2&4{8@kvoIZhaa>7fuwRc`J7fF^6~&+SJ+ zZ*d%PpaApceJj>RWP_R`0-Wmpj6UTO`kFu7%_(#+s_n`3da3{89A(-rLk!}-$WsFV z%zaqZdWYBxxaqlb3K4=4T4ZtDDJMas^8bC^Q?2_|q>lhgkk|IDcgvVZzD ze{?;8Wp?NYH5ts+XT1B6RSy4qFF?42RrHa+B6muW4S5Zi`qs3Q%ec*dd=SvU64-tv zy%hmB2}<1r(TTZu6No-a@&CYaAR)LaBN_F1a{P>;Az9G1@n>_iG-o{W`l<2dwhsdQ z1Ph|m41(m~dg^%m(sYT|>Z(ZjG8Qh8x%PeKHjy5+yj*_&&x%OLPS@k@wXLqM%FOF2 zENmnAM(Q$kLPA<7bwuHZLH7)!b7p{LzR*>{^sL}e!I`%9)>};N38XUpfO6LKPjRUI zqnkNxWpk_6RPo5^I0Q_|SV*BQVs-Y{uTg`DhzMK0uD7QMDK#)jI|CWU^g?&T?SAat z;=U#ty*s-Y&%ba1d)Ps3GO6KDD;--J{bQ`52DkJn7pef312| z2t|Bf1gnuntoqNV4L?u*=@eUS`5*M_mLWfq&Yozg^dDmoZ46-%N7NkTi=eV}tg3%c z*p<^-#+-C09U8w5zD7bk000WoW&Bt|&leT>GAHr}NChe3k&}7u+PhE@|I0yu-*?K| zZu$^&*0Kh1K{eyYKn0S1=J#zBgN1hi#{%C9rTEX+^>F2q5ONr|OP*Ad5@4t;Z)9QY zL>soA4!av7ZOaVj`Bw3iZ1M&Zt8>&bJ1a+(UqO2A;ruN_hB*B_syoar>6*;3=ngWQ z#um(BG4y@u231ejuUa9;C&t?%*URP-|Ewr-fIbNA{IILmw)Y4^iVC~u>uf#kP;N}1 zPtHxYZX1KCUzm5k?~oBm)JRcLRMS#E_Rq@Qt$fl;h&OQFu?2u0ZQX%7#^_V0f4)%(yu8w<- zD~CD_ynw-BlMyzx+njgQ?K(A`%Ekd4`(Pd03@bXLOa+adO*d=?5pX&qc6NF@In9%j z`w7>_m`)?1kBK%&OC;R^dPDI-0tI#36oB*&L+gyo1m%brD&JdcHB|?m?#iodJ>cQUgX!D~ChUp0Hifw)IkvECl zDOMatxu**9Z;bgOLW+U>2csb1k zVlZw6dhKD0-)1}`Ks>wR9sL^%|3RP#0zh&b54VkF0lwcWt0;1Af2g$a^J4#^yTC6G zq;td7lX8AQenU0n?ar2!y)AbpvKu?m_&dw9VE>(Kxs~|Mh7^V3Xh}9VBdyl6#|8a& zpAm^w%C4Z_z;)n605s8CKsifZ0;L|fS{-v%cJ~2gs!jvq^W1~g77$}YkJrqBg)1o- zQr+WN@(btXTwO8&-Ee>CbJ;INcMXBKB%lX5SxdWk&~6RDG`N1%xV2$ zmyfUYD-sv96{qxV(7sgd#t63aG|8dXlSd(_vh}158s|l0!xX8nxgPSzvAr^C9-69D zanCeN`ZLMt@$vDWw*)MIY%otfh*LfoodPkP9-b|izMDPcAz753xaDD(GO-D=)%+ey zhVj+&SCqTSmcqz8z-jFY%&2FZjy{`ScrhmD`DQfRrS=OS1)-D}yI(-T5|jf(c;jlM zl`Qyql`OrmNF3hl@AvOLD?x`?i<_WR0d`X6w`$se5r&dmJYF#Z7Au6&nWd~D1wIai zctDhcFBV7&4T))-oTH}w6Ey~dF5-mKAq*vJi2fn00I~sRAq{ArAw$w-qM7EZIzr_c z?Bvt7*wSG?8elM>OO~EJ6EEl)#)aPElMg+Mf&V)c;@)mmvR|~OPB+2c-T_M`i@uBS zr7Z#4zfed7;})oGJAs9=5i0&Kg3gAX<5%aSaWL~kUL@mANt5K7pSgj#p7$71(RZ`s zi^yNABNTagaD zu$w>mu1bC+}UnX`XhIxnJWmHBeba|AT$)Z@HYQ z<>~5+@GmhL_P)YN2zq92c^XEF28!isPlb}w%VGR5vAC09)F=nA1w|C1@<_v$<&gc+6A62 zx_LUjp9hF9DQRn0wU_jx4WkTYZR1{yy9t(qHU#4bfDnI=Q}*b=ar>*H>+iHrut?HT5D6G)~3t5G(Ox!{JeZHLjk=ar^Fe{Um!6ZN6S7En6;(HG5WK;GW$f$HS&j%U|o-=vAH^RBG^7wpf)x!4I1E6 zJZy_oBls%}DC4~~H5)){wft!a6Ho9QP|#HNaWaa&!iu8v?Gg{? zDgR6U{qM*#9C|JLQ~;jlO1o->N`Pl%+ea|}?SBHc4$`!EPfNw~(ifOIobGMBuLe&M zB?C$#c>S8X*&6ea2R@fC@g?u*n^+j86sY*#YnTKaJn(4#m%&g?l4 z!cGRiS0A5=5$->sK@?obPIY=%w31CYsY|h0XcggU3CXdq!FjcYOq@b` zk2G}od6P;dypkT}k<$m{bw=G>t`p9A2`XxOOmU8JIujK{1ON0Si`h77G%2RPFC&NR z%dBly6-hh%uTB{L?EdvZpL;~OLiYtM3b*UxIeRQR0F0XwSom%nk15>4`7R=z75gwv z!gmB}@FyHn%ea|R$6g3!T`Nqs>{8&85D*7s>#dh6sf9QME! z+V@{NjMuk_RHYq#o;XOQ`x9lTQp+tW|Kcn46)_C_X*5M9s6^T6Q0ZE)$iTLd=U*jT zU8Ry&{8ygoE*PXT{m#8|f0Skp4kgcvxen#8a1S1kOO=beqK{E;rarFw_wjsxyWEDG zxwog)?)zzqB~Eq&=)wnA!2Te>qSQmk zj4*z%Y|1@GFP16(D^09f^7^HtagHjlXRZvk|(HjDpk z%zk2(Fh7lrwT#ck#+gjwX;{t2E06t@k>`$qbtZrrXh%zKh|Ha(q;6K?Nf%k#;H*_` z#T^!?Ns!F2SI!DV<24-1w-0vw^PUs|@ve!<*&rm!=V|;N)|WK@J_``bC$ygl|%`GVnNB- z(6961scf8@wNa9ISEQj#+?t+(e2;n<{eMMdRXRC6uXu^sou(?AqPw{@wLQDN_^Xy+ z!0S=5)P4!VD}OrMWl1=bM0$sLsa*gie-@3HtB1;me}R830UeL9+Y5m404{qs4G*X2 zV#)($_)FPhUeykVK}DF|SLZhQ z`SbM#0&=MD0-6&h%)w3ieGymnPC`8SMu9Yq?%D;B&{;pxoRe9X?67Ec%cPnEK*sf5 zYA4k2a8rj;QSw?g;rUD$$h!HDiFqVjV2#-|?~SqEG%K$9ZWjSjXRfDJlj%fr zENoKS_3U4h$*S;4eSaZ%n*6$-;p03b!HIne-ZKUTA~)HGVik=rZXfcaSSSgv~Y7U(^0r&}=-|)@%DIcIi zUI?{bb|9kU8{=GZvP4o4wrJ?iXNC$}Qo4r|Z9|tq&5Sfi4RGtY z&6E=cZ_jOyohKPh?ad%3xm$62;lbqouiH1P8Mty{s?MoKSyCoGXOz(adLh%>U+QIh z!E0w{K+f{e>0AFJdkPnl=Q(G0<`;*{4{8;}Y%Pz-O+-5dHJA}yJDjUVE3MIvgbZyy zM5mY+h`A%9rdvZsY%B8^Q_%@dC$c>G!f(NxlK7<2%HG?NV*G&ai``v%NqQ%kTW!`&w$rF=ejem>H`tAE-)I!a*02QMdPp?` zz~)W}IAgQ3?!h&dg!APRUj3KHcI>LoE&eIev9$#f_p-9Gab)@j^*Z9Lb}ITD4I~lr z?t{vmbLPi2Gh~kpQSoc_8}Uc)-&~t+Qv*hOC-zX|NO)V=@~`2+!4%IM?cz;En~CEK zH@{qK^cHR+x6d_hKVu8e%cU%dxrE$O zZp|eTX)ZA&X1R>ymV3EO2uUX7QZD->t@apL5>t*X#N0-EPT>3=)=0 z_r+qk5Kk_;u!l9Xb4uKfv?KILFk_(@BN|PiGl)u>tKFO1>a;0}x55ZH15iE*obzqe zmQ|-oMnCKHAWJx^6~BSz9S@)LgYRS{W|9;qj0aMZo|yCOeo;nIz-(hMbhm4NWw7aY zT_ed3(;*SyK}{~CO7O8*IHXNqKUZ!4D=VE37e1SNlXEmf&f4x`y7DJ0`NEe#l2ncA&haKI11U2ec zVr&76MEvTQmr6^U|6|Lw=-s`0-$fE8?<)*bE!VbNT!rEpXoK-L85u<-^YvbNRyK(I zA4=VBHu4st5f|m#TTG=YM?E)Bhx+zH|PA=#aA3@Io6*vJ>b5h}`~ zVk)fWSKkh~%gI+qPnOJI8n2nX|FzT3J?iPa(b1KyrRDCB(7^X~nrEN=mOOzzkXfCl z5&!(Fl854o4x(nE_Lj{+EK-l-jV153SSe!KvAX`*3Yi>!y0i}TxR?`d=UZP;L>Ss& z5hRPoRFMb$Pst*ebWZ5#;`3}sm`ODR0OOa@VbO5tSIkG-(-Nd4j<-Zx?r<9;V~Q2~ z8>Vb&D=zU3AA85ug5zZby)bd2L=;Xs<3Czil`@J6q#vN!b-^sF!UK@*;I@QE{l(Tr zw+80hFHKCggC+@r38Z$a*9>mOflT)^ym7|^>AWxK!w4+Y*MK2~r}?Td0Yj64d=M(! z-ndAsj6t+YP~2$};wR_5Df16=ldBl-iWCOLAX@hpxXHghV$EI=ZIY3vtOpsx0{HRO zFX&FhG=S(z1TO~3=2f~0HQ|n6<|mi$7UP@8R##tSP*t^l?QX9(?SgD%&2Ey#OiUsF z3*{2GIJl$)=vrM|p{LPNSrXn`2?QbdM4-mBrP`H20g2={1yZtH;bFs#UKs#5Rh_As zwWezqIT#&X2Zk=kwKZ3?luXJiOP0G)UFL{8Y=LXTU>>Sok7yX82DRxE~2O?jbEw) zZ9DH%0;cVS3}g0YKJY|DF{@@H`JjP9?XzlUk4BG-j0D*VEvkd-_!aVR9*0CnCYF2Ee~9D|PJ?{E2Yiz^FnNT2cIKdbdQ&@=iScX zmzM3((O{1X6upLs41KYvm%Df9v$dQ3^WZaV21#`}zG}yPPG2Ban6izV#bE#5G~ZFH znp!x8KPR5)sN>+YJoa>3lR}zE!a}^4xa(p>LLXm}6{V=P+a^wk2SPws+JjOYbC4s^ zCglUij6g19>+j#Muv=}UUrL!6Im$Q7v?Oh;6dPFNX211i;G8qx+wj9@y@S?rs|iF+Sqg}A9OgZU{BNt5ugcI# zmVI@THY~Wr!5)c}CM;aAT*03L@}jsS(rDmW1=iL_yK~cDRE%N~fU1==T;*Dy8w>E# z_cc(96!S_((R>m38!JhCgUZau+oUu)+Pjy|cn9t7&I9y}%_^u{I5a*~=II`op1xD( zIqbi_WZo5_tkhEFaYpT!m!A^ZkKEB_$xa=LiC0os7Z5rdOj9I4SMJQ37x456HS!p^mEmF(+U z_e?~pWG6ppc)_1??{v8Rpr>j^j!2whrKQ{#|2kmqvO!QgA9FK3WDM`=}VXEq(F(LN`My+ctFv@}KcXI6h z+DPdB0@#M_?j8_xrPX*OVR2pWAEJ0h@6>(7VWGN~Ml+W$yCky#zoXSegXp)2G~eV- zK2b(-@>`swHfxq2oq{Vj`?L4Q!Ep3gL%CuF>?J9g|+Vdd=h zf`3uvc_J8teZ;b#^f96(BROoi-=$@Qe$axUZ?0;a7ECouw!!$GZxlK1tSs*RT+F@_ zlw_d>KxfCLwev{!<9?4^Y-F#!OTJ?}BO&wpH8`mV&bgum+*duz zJVlfPXw*LkQ_dB`4nk?W;}X*lDju_OSM$_oU0q2fure$JBDW8*!vd6Vxg~qv1DseW zJgpDS6z~#F!j@Rg=Pv`=l6{YJsK%1PEt}Hs5&r6#;0yrjL?pCv4Zm)n?wIsB&L`b& zt5Y3*9Up_`BXKF_ohvJ|$0eqjy&}h=@q?=w-NU&4jhU^j+U4$__jur|IxeJmBmyy9 zGJuKZ4+}FeKf#%rR35N4TYdp&nCspq6<^f@g)@h5H#zn|iXKSlI3zvcG3< z7oot%Aq!7I$9t(}JjJ!$3IOW#{$rtg76{v&wQm()Y42<0C0OI`aSqK2@%T%(_N{m^@XWK5nwrOG2sB@@=b(^SS# zznVHT*{X8NPQm46mNMafN%YHdUx#<+7#Ne0!t&TO#H+g%Y#gRDpbqLj!5bEXk%IG! z?Y+-FASz1YtIDYKwIZD3tPkGa>ZgvFIeMl}Yh0NYy>R7K)|E$dZbH6_G=N!72S}>@fouZCrGq zHq!GZWEL39W@4pGETFd>?zCA3ibY-Z-J|hixdnA%ATrmJu>$ybWo!pY5j&uN?phBb zYO?Z=t<*D3CppHp!7xuRifYzh|M~DD@5|R6MkmJ>-+iV{ zC0T5(kMC_~H|>w%9(mU^g!t^P%&Dh7WUaXNS8{v^Ta9fEV?$`2_K2G%-zcy&DVV}a z3vza;jgDWIVdW*y);?2yAWCG+%*+gp?~aZ)JXlqjRs*3IAx?vN_x_5(K~>_x4_m~5@Xo@Xw^~P& zM%@PrZ0QZYSn|K$clFg3i?qHZq!pC=^KukPhr@`F!XLYttD=zjb?`E%#0|twf^JX! z=Ptg-NPX8`fO&MbjagEPXDKo5g5ukcNHTD4v-j(UiW@g)q+J$(!V=7LQtW78#k;lE zCEF$P;3)mPt+cBLV;%tRJVs2rgwaw3Ru6yc5DejM!j%;UVaa*eO3PdmpsIu>iVdni zV}XNW-H_ehzjOR(%g61gL??EHINYibSJeIBQBaU~iVyEyiZ+ZF;ml!xrzxwn27X^S zscJS^{#X-~eoQ9GzPK4ZjEOFSiBsgdPv`BsQw~n)FIn5`;2~TGc7$%eHJfuqrDR2r zc08RB?feqcF{Ub`Szxn%Y#YqJUDNZ37tFHl96XczfVtIJTjTdLuowKXu(-ZG`se4z zOM}N+QhV2%_IClPaMY~v!E*BREyw*;)qX_a;95%|i4T7_gYKz<0I1@;WU%&kB}O!g zqy-3rG}_`|d=!HBNE79ZrO#Bu9IbP9l{=8TDq{Dn- zPgp-XBq?M?UFMY2+S)rKHYmm!?fHg+fjGz*$v#PZ%g1N%QTs!TcwS+p6awYXc6OZi zeCO$0%R$y;_kg9W1XcLdc1C<_%rB}s;QRZQ|OM;k2Fxq!1lwdJB^p0_a7TR3XZ zYoArR_p8n;9^0=#fw6ViOlMlqxnT&P`ydoxxf6*e6s??6gSeTsAK9 zpw|^9*tj7O5GofL*ef2%I2bOKWg(1sOlrNt7WiYl?;P*rCk@4=mY4xd&21P?sw2?r zXd(&Ar7Nno{uhfu7-4w!oVmoR9kGZ#fgd$ln*8C3OEA>n$mOH|E<#=78*qNc5(7Z+4D_lF#JyF&M? z>*}_5Z}qlZ{?~AmK4D=>0cAvywM%#!vghs%%Omg7$1)P(%CFl0JpXw;ma1d!%c!MZVp~7ROuS@uxKsfcMFC4tG zE3V=bS?KzGh7hQ(J_R2OudFMa%`HD+c%PnWvX3?eG`v%q z)rPS5vS!FWV5BetY>ov*0~m~-+HZ;_-3D<&j9YtW^1nJTnP_X%GX~3~fd_4`&soK2 z2+nmfOFt_`jQIh8XP^T~ zo7fvpLR-E!y3?xV^aT zka&~5TgjwemsSbBSyNkvG!6qk-Tq4Pp4+q}BPr!%thBeQg6VPIytLv~+z6Oi(LBjb z4Gp`aiy_%qnIKuxNoIfcaEaH5XZeuLylH#?=z=Mf7ppFO46LY!7J(jp5tyJAi&Ypy zxU$8lfh~6FZr?XH`_oPPGi^^t2TL3OAMx4;g74G-k+IcB>jT#E7+LD1YeP-o)=rV# zT!w0?$e%@GA_IkRj(f7u!cy?6I)&-DH6Tq-#j((m=#U9TC#HZAyS_L0}CMTiJpWj`Bia zYu>zA!a{9GIdWKlw(?Hqp=b(r^V?l;5pH->33B(&&*0qft;WSI|MIcBxx9eWE8q3* zb6l8{RgT|q&?)xABFC+FF$MV^wN>TxyU&Er1Eb@A!RW2O(?)T9wxBy2b`uA}`(ysB zfb*%wx|(vqF%XS!Bx!S~pd3ySAcK9g2I#v~ZC~S% zjl9VCm-0U7d22m-_npjMT&;obn`Bc|xj=lvO{;A74{dF2{TWnpyGpOGX-$d(y*2u* zb?;av?e+FolwRshqSAC@7o>1IwsY0*63=C^cxr!zQp|P2BembWPmxiO%mA#RsQXCO{H1vrCYJ$%_sJ1U zcJS~wY&$2ZV%^3MtVJNe#ZQ@5lI2LV`1i#7Yqr0VFtVT`!vf3gx-(*Kv{dJg8X-d9 z2h$h{CN7qB_N9Nmc%qgVV@d3)oWU2FMA7hiQ}uRJZe8!HypMt{+O67Zx-$7wypA*j z%kP?>RBphypNPx15FruRp|Voi^J({SBTY~K_gAI^FXjWE*Grye{FT6+L%b)XMQtsy z+BE`A-Fu=@nzu*JyR=%&>$Flv8t%q%Z@c5nJ@Dtl50r45z zKus-AZTSzhKg5&P*Z+wv&#oQ9|K1&KX=`&H_dAuET6D{|rUp#Bz!|>jQJ|zU-xC}m ztyw864s^o7z?k==(lf+#(i|V_&SjpWa5glGX=g6wocxZF1yFDrks>3w>k?(uvwC}H(}NaD$52i}BsuY>Hpo`@xDn6n`yO7!KR+lo z53d(h*=K?#e_j$x!So;Lu2*yWZxhhy?Kka?@2~Ig$?tnq*ByA(eFuK^mtM1d#(L?{|4Tt31!P0opSC1YbL`~Xy2q4D?~ zaEgv6YnDT2a=GtKREfUoaMpUd%za48S9AZcnGvAw?!MTqmP zty;w4J%09!{C-f^@V*{gB!YLhy1}eT`YHzUHZCOng8nOtfOXMoTie=f_X}!omdZZ_ z$HLvjfZ z<@YNo^m*H+yd{F;dXQ6h9&#p*Av68?zzg=CC!{BRk(DGY#`w8ydi}=T8;fN80cqdM zfwFFKRSb{n5o~7&AX0f}ozk_$NDdrpxyaLC`3~F)$vSDwnyKH#z6>b|qXG>S02EGw zJ!NP9a?8bUI3rWAV}B?!8>nH309q!RkQV#Bo~lQsl_lUF@j8~wXJv_d4CDCyLqgON z%Owl@m-gEBsUBaLOnSe0k@2tipt)PRzX5h<*uNSkT^h;lU3La-Pl0rAq)Ib(HAP@i z! zgos5(8U$UyZoMl`@##^uX9 zdYVza9U^T0s>w|v!02katHd?^RIgX@bw*9eypO)PK>;brI%B}BG02ZKB__+t-rt!4 zPnSw==CG#?ib+Fa4Vl8JXBcUK^Hp_#gXrrn&{BlvVGn~E^_LM9ou}m$$Qkw;8MwleYVy3v?`Ju} z$Y0e=P!v0br#wR;fLx;)?(Tm!DtY}Z?@&j{P&1d776y@3lZ^GjP@g{*k|M;2#Fxar zS$byWT2Ofaf?xQuJ~kpC7789e(=_Qy8KbH1vhbfE#pTyfIRlv82~>d>NSxt76^-$r zQpl#uR2MzF28aaGxsXMUH_o`zC0%j*JAe-%VE`Zp^k^g^A{AiRTd6W%DF!#O?sq zQ1YcN0qmUbhzh2oaXzyTjPk{au>R%y++bg?8sD$;;8V?#P||WgHWqg{p&-5tL%{P~ zUAve5P;Peb=g+;p@aIb$Q<(u{_V)Hcizvx!VG|eJ()II;4TI=cVA?dO7OR2;nj{Qc zhZqC+aMj>2JCvPFMMcHLkl%i8vmNn=f8M?fg)^P6#OHbm;qUh=0alodWaSulE=`#T zf=j7EhN$I&=3|9^&sajzr^O*85=$dwZzD+EgyyyF3B!`e8I8Lm`+tkF_Xixgxo%|< z4C_~2_>!j@64bqdHrmhNc@W;6Ye65Zu4#Gm$jBk`Kb(9}P`Ohkk%U!aewRLDq1(!e zoxkP~$WZ1=MEBV)%5ib!{H3upDZb0kxXyC6uI?ay1=+lN)l=aiNLM z|KWr!b7!JrcQ~zeizN*}ED@`fS8;_PxtZdm`ZCGIRFDlj-NntT4cj$fJE+yzlC}xz zra~_Ew9VCd&EBrp6>jMaRI2YmG%e36Y5$k2@Hz$Z+)ABlCnfgvVlmrX_i9@|RTVJ< zY9N9Hf-D7i##-tE96jR`UlZz{X50S>Xz$H#B%7XlJmJ(icklbcpV~%c`o$G-87^p4 zqa$rAXTE}QY_ehm6xVYZjULgB1nn5tbPH373%MSHL)~shM4(}P{%tPshas1ImZl>8 z?~o-yPpUp?`jS5PP!)||jCq%KGRbsOoBQ6F!dD#vr!Ep?Ic*lwtJWK@Vb31j_I{b- zj1o4D-u=m92Gf*$O)r_-kK5bX86kWTERBTrG6q@;f|iPhB3gCZaIz=z1g;svE!!qr zMs<3;E9V&V-A_;TmXwxKT{lDbn)V%m5p4CTnoWNKBOW@4OQ@)dResv=$k9>BVy@2$$(K%ceB1x$$8F#Oy zr@N-pgOE5{lmj9Y`x(W{XU3wshI|J_sl9fa*XaHVeS|KDH)Hm24x}fa0K3?$4RNnJ zg*DIx6YF=xAcqv^Dqzuap(*2zCk2!=xRQXtx>a#3F{b%l9_VZWSvII!o6J{OL7cK& z;OQmv8#kW&o)jhZX%34R^&mMYBy4&z|07Mv$AtyiuzNjYni|GHd}7hB^k|%!_~bgB1h zA~NMhBG!#Z5h!U=KHvn?m<9YKY*ph6%!381Q3#(;_x5N=x@k)xn9uC|9w@SIbj|Od z4e;8Vyr-6Lm+xKX(x29Ykd4uw$HZ}5GBSn~st6U?rNAs?C{Rg;E&kwng+ZKAK^NaY zk$?;0+eIGiCvTx3qgJhxkT;bK?jE4g$s_67_&h<1y|dmTcXxY{v2G2v*+J#{MsUDp zV-BI14U_M(sEr9idK8kFF8vG)c332jPv*ibAfn4Q9D_Sis$4scJK6(+er^2^PnFYR zF-RoTP?{nxno?BM1>!3uRy?c21&Ykwyc1Vj3l)w<6?`_!0^sPPC?cJ-qEVK^_LBpEAJ z8R#{bG2kZP1P1;o=~#{NOkreRgID0)t(a=G80ai&-0<1x?xm0y#X59(9tg{3RarpRYcQ{cis6&82jNAoZ8PM-`f*AY-$xR^DGT)+-Awnxz}aef(O?h zGaiuh>*{Wx=e9}+HwX)EHme??YQOV?-u0v;HT@XlqL5dy&8yRq{66V3;I6RtD8TPg zL8X2k!1VeZs{{FA2Jki98VDeuFEaj;DyGP84ky*Jj@r%IFlma2DR8`3UqGwzju8`U zOM=%vCnoBTV6j}BycV5&uTIzIIC?8nD@rQQCPeoyZwwTbX4z?$F~siBNH7oyXsu{< z<|><#0fI}{8;cFII8`2x!9Bl%WGRGfW;OZ1lM4HkS0uJXFzZ5_;v#h56q_ zsg8oV@^TE2Rg|7K6VYtiUn-b?P+;8YS2`JN!k?|K>?5o=P%FLZgp?p_xclX!G{D607}>~}Bb>P`;+|7QiUz<+sJ!(#6dOzJFIXN^C&YE2B4N_b1xis$yH*u+Pg#3&b6bHLAv;UY2L(W3GF~~waaw}NZ-FUrTBvg{u*FeY<5 z=P+QAScBc!-@UiJi3C|uQR?TeLoP@9Mc$l56u5efDAWHdnFs9OGKW&$dB9%UUy9cJ z5mLj+=P~Y~Vv3%Q|^2_`bKa=c3O}-TM#4 zcgVI?0B_Sg8(0iRuk^d{R;AuaKzelZAci^H0u$z#NCLhi83IIetDI`ZkL>mw=;Da%}rn7-@5UaIq}8ogj@r8HL0=XlzM-hV$` zA99hd)Hqw)BhcMZ&B15$^%|S_&5r`VsAeMTbpYE>m`s5sf9ZA5*rooT5ae+@tI_$L^bG(!Xt8?4woZ0c-R2VNr!_-Se?|8z&^nF_ww&b8OQHmCa+SA{h zEUw6n{cEe)_DN?tl2zPFMiaQ77xLd$9U2vnp5ocxmw)d&Sl_Y=AdI8G4{}RBPZpe$ z9GqEyeDx%g-rO8v@Jj03lgS@40H^}lXjRPcNI>9rH{kl zpxeU_(-ZH?aIRz=$ylhXUL3A^(%0J`saQMY?$8vXEZ?P2I?y7dV}*OE zf25()1`V~->b}&@A)Xf@Y!DAS41ildcUGotF`G+sIkUs(DFW9Dp7{Q3a9n6c8P@b{ zd3}n$y@Fw&-QXq zh~SiauOh_T%Y3?SdN0H`*aE`^|IGTkvtyS!7PpLz_4O=!YPOtR7rM2(7#bQF(t~_) zSht6ZO~2#fefgk3_OiIiK=n*sz>}lgBq1oS4@#y2g43ku?!NatE2{uW@=M5iw`wR$waEilF4NVkxx2c#iD61 z&Jyncq&OOi7pL-LW8sPp2o26xQ*mR5M8s}Y^qJ_rQD^ErEbP32xDV~yHjo_gtRg0n z`S8E*gjy(HP5y)%m&1AOQ_ShyS|%NBc6h1j>w_s4qhWOZhIK|sQDqMfE{?xnSyg4% z^^Pcjvs%crW;5t2zPUjZVwmSf{L2;mUeP12xwlBKyh)=$9h-sex8rPf6P__(vM?52k= zX9`@?Qx8kfa-{s0K7}U=95^XCu*z<3hHW;)7@Xr|kMs2~j&-$9P2+Q@T+rewYB0)- zWe|+%JAN?+h=!CJc8)M}tgRc-^6B^M01%leR|!gd#gNES&SE?5M)Opc!Tq9wjpI2^zvv&(UFC%kv9uxAf!U^l0{SH7!o^TYr@{}Zd^?R_%nK3 zl-a+zE8mpW+h6-IaKCGNYkwKFJ5#M0vc6Cqu&&zmP60!KfgT;uxDvmV%p2pka;T&& zSP!*4i4RijhX80V12*%E7lzyo&EO3CVJU&hevQi^d%_XhKcg4Dyu8A+reB`?gw*>iLH+Iz{#lrD zKlt@!WnH0l=^~1festeyLE+=ZM@R2JdWz!hOI0W3 zlP-X-S5Ove9V3MoLWuLacg1{tSZ`e9hdd9r9Y02FHVfS)Af_JHes2>?N$>9D4BcL+ z8{b`6B$|T=#ZH%F$ZL{&loj{pNYiGwSLpT)-FI?5(KkuANCN`cd6_#J*>Y&;D7Fz=iFAUs8O60Rn~E-@t^y7{#7&%+wwW4$~d zf~`|UFayCSxR8v82k{jQEVu2e+WS-io#b#yIJHe1K}w2MeHap=g7Nd{7ibvqs8l&2 zWSby>zKd|J8#O4H3Hp1*ypxnPvXJlDR5RhxPa)@!$k-Sl8@H`HC^F|{H=m0G@ZuX{HK(!5APX$~tf$R3v@V}P&B%G7$MR#rGAo~y%$IBl5q%F4C0oeU&tF<32aGwfhyBB~ zg6tz62ZSBP4io6VY46Mn|0MH?w%SQ^<4MO=5?)ji!WB$kkkS?W1A^DLR_9^t6eViw z8&P)792;)l`Z+U1&DW8lUZI@Z*^hl=hEUcXaJ`s0tq|{VT{>z_5u9+MfIN^9Yum$S=CY zm#b4aQJ6U2Q&fUL6oteX3!alVaoF{Ld($Hu+f!kR*z7V(OC)rQnL{lV2 zzr3fPLyGuP>EV|X1xsDBb@B}U3VE761n7*tYYW7%DXoxy0RT4k^AM6~H zY-Ccff}|$KTOsn9Ez70g5#Pna53bov%x;QCD#RC9ejY4cRC57yK@7YH$@>cHBMS5V zii6Ujr>^M3`6Vm`IXS|`wV#^Tr++`3^MS|;NA?BiLy-xrcQPe^6vh3Bf`5)c3O@^okj0c=jJUp_K_=vanxR`xMR;un+tJd%MpV!ER{hN2HyTo0Dh!<6w$ z85|(X?bw`sbJ7I_Op6( zv2!e@e33opG)9uxthOX+z|$8^a&k!Y^{h~_FCSeqAht|%9saqD@#0wX&B zu*rUQh$-x(Ey?l0pk~A)687Gl>N+*mcSIvky&iMWde97hX)N(TYah1GY07 z7*f-g!PbOSZ=u1VC@}YD&=7hB({*0sd;dgxtuVfm7m#gqrx4H@Ob0P^Nwe;eXtxsYIcdi)2Z5ml7-st!%%diJw5cZY(h$wE=#KC9DR-ojVmw+91o8EHN|aJo%f3v!z53+5bmDdH0R0#82L zoKibQdIvBb-VqUOirSL=NnLvA5e!{`DU<(Ru4`a0Su3W*btRX6*=4fA2cq{pNV!ezazSv`Aj%a}y7_c6%+;N6o zfBSxdF)-|Bzo2f$x~|Ia)AoR?M~#{+cG={`dXC44nsqy#dkpBo`jqd9(*Mh3T`}!) zt5qSs&9Y{maj9Rx(Uf8A{M=d{oE9Y?*Cc+i z5h1uAH2m;KE>3hIT^p>&f)*liRjJY3?R2QoWSoy&SS z7b9Wj7!TB8Qc-4&DCa>+>s{#|4)^4e#Yj#aoTU{%w#d_`4*PaK;X{M!K4EXtF9hmn zWOHWj`Khd1yLI!LHHDm|nC2~sG*;!n0nyFaWyy`drA-Xk_k~=ueu{@k%|;yGL-$PG z?p-q>>FIucX&w>NQzIojya2)@1|<~}=xgi~zqe-lnJ}Fk)ImoD#!_y+#~%>~ws0`X z0C?y+?BCkYaQZI`MC&jN?__+hhvsXR;0Wnj732)h4H%It3Bh=oqWou^wn7dnEq^8d7$B zOqL=*H8k}1^=%Od=!u-j;{$Z%=;%RQNB^vkn(%47k1jiZ8TX|}bf3U0D>}e`4CtFz z9&W4*?r&@|vqQsq?4FGNygLkZnxmf5k#AW5ng;eaR)maa-ww!06kCY&q*-LIaAD_qxpfVJIsXqpIe?xVo+O)!)!P zMaqxh#YfIzdLlX|IV}sEL^;M-=?p);&&Y#4 zC>UDs1bVeaWqN00L&>B+nJG@OjD64~H#N_OMqI~KdbDt!A|KZ`VvGKW5}7m>G((_V z%3dX5Pt)=W^ZvrHOJ^Z(Z}L0=r*Sul8bDALd3gD{Pszcx2pg;^k?&kQBfshp1E<>> zfy}Sx{bUfc>?ZWNZ`H^Gsg}-lCMzK|NM)pIoCk|3Sb00@@8Ns- zZI0(&qS(oBEHuezW!Xo)yWc5Jo9Jgh))=(@uWbc(J~ek%%uiaB8FpmZsY zLXrUcZ5bP%xVYtfS3wHP$(2%0{c%*E@WOYEp*J7sd;~(Nd{lI~Q#tZQnSjnH2;*9; zLGpezOXfy&7B#-@Ife+|xgO}9YzmQ04ChVhx^C&%6uLFgY&=Kh{xoR$qLe=U052;~ zpPZNBh1UEj3*%=8ZGqZ9@1?fZr(bLhNZTK>9jG@(+#)Mt00zTXB%)W4w$tQ{7l^?YQ^GCRiqT#75HJ;sC zbxh~QiX_&=aSGWyiW0bZG@2Rl1$-_9;8Q1l9>>ewBn7U{g;bArvkVY`aF>?dHIUSF-&33A|dk)yBI=(f9a=sylrtj!hI`# z=xzt)YoWtU>4+(x<@QN7MGU2I7>($wIwEo3_M}LJN;)~3 zKQOTOE2ZJtY#5Pej!mjj)1bEyR3URQHTzwKXJGEfE3KOhxtOyN(sV11{Osyy>q9Oh zDiCvDZJlSRnUaO?A7H-r{Pz8el6tMIS+XzKtk~v9eD}A<^N@a#TkOKZU}E`UDv=Ww zJV0!cIErW^PrPZ8r3F+)l{oD=so8bbFS?lqP?T|awR zzf%e9Gcm5hS0(ebmd7qJJCkRk2N>No1}N10jPq(wMrWK?Iv^F6-AVr@ayi%7fD7_5 zn2AP+5#C3AEZg!_Ri6E!cXf5MzXJ9ubgmMmP&oUDJt) zMA+g1{^qvpg3Mx8Pt%*r7RoY&f4U%j#h!Q-+CNy+ zeGJ~;t*+_Tbt-XUDOK}co@>eE+2SKem}s1$!)*{mD>80D9FXaC%+d_m5_bB%{qrGk z*n#}rZmHLJ0~`{^3xR&5k_i(!CxlxCQk{RsxzP||35bL1O5I_4(v)NbQEd?)hj`$6 zUz3N#m#fV7n$k>P%6FX$@Keupg5nCQJXA7FK#h1-;Qt!yXH3>m?-dsw1m?LT2@sk^ z&xxIsd~JDJ6pa{Y2S5}wjJM=LAjhjEzv-3i%>Kb)&g+ARiqZh z$6p_mk2F09{E#HiEcY!CQR#dmU@`LFg8!vnup63JIQ%hSBpe2gTS_J|-|yyDmV8X5 z3y5M2w4uvAL5qTTaHv%fQiMs#F@1TE5^F^&%a0d)Q#-Sw2Qoig>F^v?>sM^W`~}kZu(piNw`pL zwe5S!T?O0e{4zWH*rRcefXlh3>1s~9+HVKF#)L*eHuRLR>1RR0Ddk?@m_K!kV~xS< zo;bQ=$nHd@G*okMO%?m5<>$p7{Gd|lc)fOOA<8rTj%~{1GTEmj#1~^Qct66|<&1}< zQL(yfJp1qRY zX*8Azp+Y5&3u9>Pntpzfd`z1=2kcl8w8vnFkQ#w=Ey%;h*8f*1DRF{f%U#ZBhn@d8 zl79_%l1QambR1fIgeEgN%cM>j#C?*a-O-}!UhZ--P7dM7yAKH3zqg;2<17rrAJ%-P z_P<1yfT;`e{IKaXSs<5m82-(gOTVWEG{;mB6hB*htlxBaj9(^0$ph;Bl1WxXci6l! zBPNnd0R=S?%ZKLUW}sx=#6N)xn_ZYMW<01Kf!|rwah}0qbf7UcuT?7)P?WTHGJL$! zG4BAWgb15)HbwGbh>0fn^ePGBO?ua~JFVP*75;r7YQhLXW(jm?n328;Sj2hTr(!XK ztH~Lh5Yja`g^@pBVzq)X(a{CWWEGrir=dytSB&%L7>cV9f`U9x{GuW|`57fo5hap) zQ(e;`VMvUi_3&+(eucpb;ufp$s7{UJPfB>`sy2JXh%O_JG;JhX`aYWoiMDo9RLK0Ne zf}0EE5P9_t&KvM5IB1&^6HCQfhR+UMea#<6=zv4aeSoOR7+zZcaA&x7z`Y9?&q)x( zw0ihX)9vVKy`#30oOE4h?tA)~O`R(jnSu7#U;_Y5!{IotdX)`UHlJ~;d?D%xWoP!@ zy&l|r=Lyp^6!X!b1v=}jNCNdmz@Ix!(iuN}1hccVv2k%&%#(6ycq>Xe`S&oR#$2;s z>y5%4BU?=#)#`wF*S*!yy;8@}-4C3};2Sa;(BS*A=3?(tm0v$^$>*tOKKr;$E;r|c zo# z05qD{k1gi$h3>Jk!xOi+2P0Un!kQru>ze>)7Hk%>x*l8@)o#D zu!WgRQTQU-%}vg225MZS4_Ec$s^g$=0BR^&9XpMmTd6zikC|QYKSvUf@88t$?)e`_ z=N`}W|3~o=HJ36{O(m_;*AUGmCZUm-`&>dUA%rBCx#gB7(ny+1%q=##O}XY;!o-+r zA>@`=ETrY@&hP#G^=I{Xlx?5)-mmjI=Xqj9bP_**FU_1hMTk=xxNy$1oYP1Sur6^> za08*kTNPztZ5Tl$#YUskwlqa32A6J`t6+KmMOvvZ%B5rT4b7-8GGx4%L`rq|4R__Y zHt!)`*v#I(Q(tzQpM42dHM#(Mietjn?7K+V%WlL$9QJ;uKwRtxiGK)nCeylnCm|j2 zuN34?+T2NpeTJVS@Wt>atkQfh+U(h{qohawJH)Q$O4tj0I}kAjnA>Q}i)TNj){ z0#yer9vIyYTXOQonAS6n@^!zwLsgyXEX@|`C8Lu@nLYY!_guBw^1 z;h5l$Ge*{Is)trWi}aAzW*!wufs0&rKUedRyhFnFosfI?3TvsP#kx_LuC;c60`y&T zFPr-*%o!H%QdoOR+?6;!%s~iJ#GxyTPUqbKtBDRa2&=(lI(MCyudXRcsnLFr_-SBM z&u7EBXZ&8w3&L%aBh9;WK#%L+U9RjmGxKGI9Sgv&iquc+{@y+>47JT^T4^f7J@jGl z@I&2WA108L=?ywJYYsN$gp@g+n-a!R3H}QC`PXwqEP!_79dw$~alC<4Mf$V0_O+%@ zZ6fMoAB)R+R-Nv|5rpxQmkXo@0eU$=pJGK!iEThh!fq<+VFdXe2Q$%2m>#gLfYM9E zkuxy2g-UGS^D_|h52_&rUt-BZt_pk*ndTolDV47c25{I)`Hz3_dDi!kF}Q-nn$I(( z#)T&%UE)Z*sy{5Tc3A3Doy%AQg&8@*IZ6$!ETzgfY@p<#=kpZ8qp)1XX@`mgq&xG9 zyEPavn(BOVMdIJ?{IwLmj4VkL1fi_8u-uHHO6LW?VDwZ~IddI5UheT!&=2_AWDi+l z(IL#m;8DNU$a~=HyldLyE0D(O+h3vc=)7TeZ&JRf^o-0zXt;Nb-MNYuK?s6wT;D$B zGlVaOYC0tnPY5aEy6-#CEe_PWg`MA<6Tm@fF1WLFSCRKbe_G z`kZOwWoNk6#qx6UkX{L{*FJHTjcF?{{kHdYF4A{&MOP$YfLB$qdT=&=V(a(Z@@cm( zHH}_D_m3asO<}FC1+BR*%x}38Z+o@JeZYpq?R(rNvPRFtDlWfD&eR$C*-3_k*DGyrYL zzW*#;V%iwl*%11)6jW;d=Ud0=3w)fO64y;M&(c>L30*Qo;_e1ktI@v<3sv6NqgYk? zIC|PBHHueZDLnb7W&`N3EzLZ3nBAbevar1YU4|qU0WU78+FzgpZM*1P(Sz*NSaKzL z6;3pV1y)Z)udPk3Z#m2)M^5a4u{Ax6k^Xeo_W&+>*=gZ>xn%L{qVn1k5BFnU*IjTF z6*}shRrd)it>-hgu4xML=$qJrq9T&RYqbQ(ySSetHU_2wVW|F(VOh)LO?loVVBRSB4t#j)qCP3^iK!J}fAqp*d{v7DUH?pYy&0nMs z9(mzqd~RyIn6=K!Z`lK8UNM@MR8a#9++W?UH<`2if-b~sA26Lf zw|sGeq*Qp=0vsnC->#hgCw^jkD%)=&a${{7Y?ryAXpw9X!r6e~1QF+ZVsJ<+RuKMy zx#>#=zh;N$GFP*T;epKE~Di0yMR>!H#rVYPTPtrK7hq zcK-!-&R@Yz>~`*qW^iOk^;=P_?5R6Tcc2LDT%Fhb6tKNlKdyM>o>pEnFd+$V-_^`3 z=2B>to4&hiYwCU6xd12XBh=t(%c_?ClJ5=T!oQn+9!7#HSHq-FPVKA=PVeAh*AZyZ*hUPeSngl0e~5ekc4{N|lZUPL=wJI;Z{^ zHCS_W>DxMX^DrSL@wN-8h)ne=g|{$f$jUZ5poh->Pepa@>E;(V1ipP6zg?gGAO4r6 z;_Z84QL2%)A8|Z6p8Q(&S3wHH-54qPLGgBuF?>tXN61`YFV*~}nsn5xcsm#fQ*5ugK_(XqopLB5 zy%%o@OmiwA<;Qw~6@q1=S${v;6M4o{lU-hfvxi~kH{EiMpocW?gKpsCqB*swE0C9# ztGw@I`p}h8eF?H>mm=Ceue5y5fq2M2B0{E7Pv0bt zI(V^VVU9hP{!E_xjoL4(C*=}KVlD)z35$rV0^^>2*?)q87F`@ItvdvAFe{w+eP`F2 z8&}^(t^J;}WXcfkj$hBp+~(`CXx zW_kkqbV?LXW1to9yN*N$ZNJwr&L@0%BJL(X^P@ZsXO5LPn7|`C<(_N-X%;zj*2E`0 zVuB1h97Dg|29N8GpQxc}eo->Ybz-sZI_4*L>aON-iMWC#@Y#fM=YsM{u025FbIN`Q znQ@KuzQ0s7zuNx>svlINZDoOdcQXyXcBh%Fw zSy|?;*$E5OXGbvK(5hiozM#%+V2<~N)t{Z6agE3P(Ob)E6A|3SA!qF!&hA#;IJ(4| zr^;T6Q!SW+y0`SejG%L0tk4W4D#)2HuVs86R6S3Ziz#jOvDUa;i41Ev^K0==niawp ztAu<*9Q)Ws`P;8MM6dE% zPL&f~2yupTGOqN^K0Pa(&C<|ZP!=`JZkAk)bFCg;SZ%8FvIIV|uLGvsUk_GMR+9E^ zSqmQA`Kdz%=lfWit4VXgO^!Js1N^1nflEYR($Z37*b4`b zlbEJJJe#Y$hw%R*90l(F&7xuiN*Yb%V>0mICn<1ClU#_Mu!`$Yg!L0$C*B|4l;I^m%x`WLDYeX@+VoLTN^2;F z_G+ZDfOg=m{dl>f&y@lK7FmNbar0)I@6j%U71ExTJ6iS(gsYncwsZ<1NLFf_k`Utb z%w6@w3{qM67g^+J-62dDyvwUR6v%MJP09W?g10LC0;vR<3ZGVhok2{mufJ*y|2lU2 zwfF}@04>dxAeoo9Gu@HSLvqxQWIGD#^D$?zTs-cS*JG7S!=s$2dYtuL|X# z9edRy+A{lAE)7R3=)yq@tW~uH5P`iXp7KKV<8DChynrQcdhUaasy(;#2^1x(UlD?! z<8-ig0m-N^Her(diTX(d^n`6nO5fGc+WkGYy1S6KE9Ck*$^->@A(krMJh2qLRkyn{ z>KDEKcP^+kD6*ag)1|H+z_)dEK6V3&Q?CDD$9?(BqgVLOb9n(*xkA#g)NzUBG=0En#}Yp9B3b-S!N0I z9UNZ_jvUg`maI)3D;vW6A-nPKBb%`sKeY@`+^TpR5q4a$aCLmR)+foPA~4u#NTV0k zf}#RfY=z7oWZ316IWD`n>#EPC8DoJG5-j%e{l2KH(91C-CD?aenK%>^S+TQEv1l)Z zy`^F@7>?}@Zdx9faQoF5?rDi7`7^^tYt_{1fs)wbps>5Mv>T=2xOm4e^G7kOv&%f7 z`>U^Y_xyxBL1& z*3w_PJN2{p;_em@Qmm9nu3?SL4X^a)4%TTnhM4g0v~{BvlDB*_IUeK@8zP zR0ZJ`?EQK~saPA;3yK(|aW#k5(#WR^tOQM@EwQs?rCIX-0bH-f7Dnp!Kzc(pPZax; zGx$+H=ZvP1Z1^5X3F{0d2K65OEcI9|XN1PNJ_eeBU>Gcu_HRIgJjeRmFWnRj7U@dl zYFGsvYd+J`L^tg z8gT+q0_qZLgqXium?-J3_l)F#>>IYWb1*Rg_Oq-d^9XQTm66ELq#|_KVOXSWN`v^> zHy{P1R5tWbNmjjb+!o_Cv7)Bd1hm@yCPeOb6r=Ny>yZdZPg=?;YMDtcPT>uTu0l1X z;IO32L-oPcHYZVc2xi>Z&3>7E&!7za8)XUuXJFIfRC0X zj+rc18Zt7Wmnau{kqc6&pRl0bh4=~WXIwy-B}LvvIiwxM8p=#In<2sX(u`M@ma{@e zmh834$OP%mGZYz$46=#llU3*aa|jRWs7lLgtxJF@6^Hd@*`N(uCZwM9GgVDk<~`cL@v2!t>mSyV+E@gfGT6l8ayRp$z}{lvPFZbH zoKtd_^Th53%Q-4MJp9iE*n3F+g>NI7V9H+2mWn1}Pv*lgce^DRnU#`4kkdp#d&zcU z3NOFXY-|)h4mOM=O1mA#;d=08fd9t^Bjt;b@ybCOL&K=skmuZ?i38bxVy`En4W(;w z(w*XA(BY_P*XfzK?yrGj?un*+1(n$WZ4U(5)|j zoccXT1BX4vQegdEUk<#K*7}~>lUB*2sG)n(mdET`L)YWfR&y+EY~XPKXC%ULp?gSs zpvr3MQEj!Zp41lps3A7tX`sash9gXDVJ*m$*L;siGqA2 zzsINMpONm6VFg#fa7GfbLZ}P+7uob>P2u)e5+j2^Mw=O_U;y!F{dW+V3UV22mcQLK z1nt*r5O@!~*RQTEnTJaxy&em$BoRNkFsYv$M-LO)*EN$LPkJ{^EYxMs#~C1#r#sNf zCftny-Tg{bQW!@cYYN8K2r3CwO@Jdn+B()+@UlF9n?~wEJ1Z0h9*B{lV1&z7INr#L z*ErUukXWn>^(p46kd%N02U!U*6KnLwB&afQ}54q;+S-rGoc5kNQjFAhgn|&YU zkV4;Ghw!zmW_8bEfBT9jNSn*iLwH^7=CJLL!M}+^_?z;9JTuPqC_qkMaW_5c^aBr3TbrOQBytPb8qy2IRPjRG<^Dk`+A++TB> zt!4v8Yl=>T&W~)DK=tEVQCkUU3vA0Opd*f{I5q=0ky14H5Qf{F3%`p1OI$Z12ds_6 zF~5R0NuRl_z&@u22YokV7O8O?(~F)t>MdygHQEE}i7A1JiT0$8GSrW>dz@Cks?*U- zlPe$zT}Cb|Y{1mrY7M_G23M>)=eBNbaQn$Da_Z+lq3Sz-xSP}5PuKHai_0MQ%IF{; zqqLgC*R}z8ZDZwFPd?BO^^7SxN$)y{y+bVGrJFwe4d6>$;lbXw z`nYeY!st^!z`Aip?p(Zm9&~`M1=n}-qKdu9(Dc@Y4dxG6hMfjGST{O4qEf5@6K2&!RT>@_i}A8iqtng&Ry{(tCT#wY0Xw!!&yn8WScm)O1b zoN~wV6$A|Xut3eEU4j#@HAD!dpJ%)0 z1$yYN$`CEy%Sc|kr;*2Zq^&8;i2&n=65hQ5;X67J938b4CEg_RQ(g)Qyx{-Do_C_R zb48VB9bAQrS&xK7U4LtZFS$Y?=KG+BoU<{r!T|?3Rf8R0HOLjn(ORZ^@1Vxg15bj* z3z91-Jr>Min~Muge&YdjT%weAMELGbWP*SoI+sqaQlTzy&%bYMGcRCzbg@4T=~d(fLdB}7$lay4qW_Dio)Gh;5!(Lc&};3 zW`O5JDpo8J+AU#?eT8*-=Kv7tI84i`>J9ng+eg9Jem>)Mt3pO2!Vq;)YThCf_ZbUt zc>nDvCaw+aG`=AY+o>rkQ@%8uqI(51lWtE_d~=ZtF^V?$(W0 z1KCXEZ3ebOT)@Q^cVBqjd%y%~%mS(N+8oqTs5{9O2~G^LlvK+T#gAd%7Z;h8oS=Uh zjlC>rf`r@qUidlkY1f{x=NawG9q4I=4!m7)Sa@Lc>U78ia5Q#qx;W0708zSe?wIAC zK1&|H4lmjT=(RJ7Fzmdu^N`!2?yE2vijI>K+Q;&t z^gqIgy9zjC^y&4Z2Oq%QWcVY07qhMy%JKsrgv;@kC*$$MBYp4gU+&376_ahLr1$6l z+H<@>MyS%crIgbncp1r`Hi)Z>^;u%6B%!CMOHrHIew6S1@zXN_v8VVlwD`Qf1Z&^k zSPW?RHa>p-?61b#YS*!b$-_U>10^sAM0NiXU&FpPeuY0HxxyQpHnXWHG~m8Wug&u} zpL$aHT!?h-v7vO{47q}fpp%uR$&_EZFF74}{9qXS+f2lA8#t`yJui8BwyZwIB?|DQ zdui=s*JT5PL+e80EI!1qW^Oa8mH5Q@gmz@OCLl0OQ|2}|o32UB-M&^q^cs=;uEHGL zh;bpaqd|!pEp2f#s(Lt}O@e~#N{Qv3fOUa%$5K34IYkQbyBbfcq#O0vmgBLq7~2G| zTo1OSj#pM4sixP68TR#W_JR}kIcnn-JtgO{fLxXg@aRrI>VUoYJTGmZ9PHQ+bsv(r zv9iV@D42Po&VxcS zUWgmmVvG@b7{k%J$RVF|Zo&ZIcNovJ^TOkEqbTk!y$m0cla8;oe+&Wy_cK$uiyFru zK#)NK_lZy`O%^raa7kLdt5M6Mv8sXtlal%3j&N%}{0}oM;l9H^5{R_~^g(RH4Khj( zcRCjEmdmM~yPxuFI$neyJM`Zc5BQqyg>NYJyLCe;@UdCa zTg?;AA&l=?bp=HDIrx$6zB=FQOS-~O4R{hhZK_J&N(qpN_}w-%(QHFS$y0vD_GJ?1 zh=csqj|-ff`H$#8(|aGmR1K8RGQ%eD*f}a{u(8jB0hcC(FAcfVx)V?YqKrD5Xvs^6 zf$>@jN-OsSk5(v!JI#jC<}^KOr-B;(w$E-(Y^f`9W`h23s1PayY5y3=f=@fGdDN9?5ZJ^mzOI-{L>VF z4LLg;UUG)VK>GBLzB}BI-QTObYyG=5!@jP>I5Di3i@D%=oVpu&CX<)P3VZAHrir5Z zzW3?w2&9-PltN^M^-YIi$8$`JwRbk(N-1Ulma5v)I1EQ;Kojf*OugEUV@QotgB9`9 z-RpOoMq9(U=8@J*%h3-SEWVvIm#4wE~k)yvcsZUr+dzXaL#cvCm z9+iBZ**ZG$?F~LsOY179JZC5jBhS$6=Q;>d%>@yZFCz}lWLWtZ#{f!zc!%8Vj=RfV zenIJ)@eGcruG1T-A4d;6zO53+ls<1sr7sH)D z^F-pq>c351+owQj}BHJ^TDHfirfVkco*70fc024YxA13U)0J` zX5yEkn;V+4*>e#`yBKHm9>kqE@Q*@Qte2+qe8Dr(Vs`;#8ac3SwCSP_alDpVl8L2Q zE4+jIHZ9J2RMxr>DVZ23>2<1T{!L>d&Swl@TxLKds{tazs9Kv0NFrQ?;cUi&Y5sh^ zNONz3W7@mHa#*jelp{se?b|)6SihtOAeu~!fi~HH9kr9eCp{W1nS0H%3($)mndjm( z96?q-QX5(@pY2DSb=Z#_plo zHk+*oGQ)`|3OP47cfQ{oYl^%+q{>cpJxf68x715%NB(6#g#DV&F~eX_n!|8CT&4I) zOXL3nZB4C~ST@=F-?N^*@PXA!Zl~zY;dz$$$A=*wBqT@X%kKhLAlzFmE!A;|gd4p8 zuGflE#Pj`(M$`6>c*i4_t;9g09OBi)QW7e*TEkih5&Cv;^Xm4NU;XIj=0${iON9V( z))1lPQ#ew0S-xX)G9pp~bl)^WtYlA6hDvQp%}tPY15GnmgvhR?;oBQ;5TKu_e1x2e zi{N>G2&p&4i^_Pz5NR@(o>}M1X(+D|!$qu)sf{lD3vw8& zE;E$$yW2Ii&0HGCd&iF_QG&<7H1pudz&S>0pa;1aF57tGHJ@KbScz_e{j7k=z)weZ zfOWpbtDAb5cbn!DW|T8JC8%7IlAEySx6PQ$CYmBY_=n;1O;b6=#5xnpiFb|sx03)~ z#|t%mUYL|h+Eb{947_*m-~*7Y-QH!W8=^Ftcy$s{1RV(qah8bi7Sj$o;CIBQH z;e0aNNBPjR!N#Q~+3zol21%c10+19)!`Ts^ik3jG(&nVZLG%m^)Am`Vk)wh2`E$x@ z;Ry=hx>RK?xjn z{5`wAa(7*7*T3mlbN$eR=e)%#otgIqnS!ou@&t2?)n;8_YaN>ugL@AwDMmvoc7eN`6jJLE(J#>;ATWN1E8zL+B$aG9LTQW2`3$xjN&d1!7 zis<50q4a zuKt$XAJ0XuK?vV|ts*HP%hce2BO0G)>Jd>LeCBn!yBiCmF4dQu6~JFB?ka|3dAuvG zzU%6`%CtM=ISLzeKfwb!aYI5JN0}^JNZR3%58HPFcQ>@wg3xBMjI3_4Kp^+~G{8tG zndf4*8DSu7jJMAwi`{ri*8Vn;SIn4+m?9lCm6(>O4pE@v^a{S%vP<43czVYcE9{F(0N&!KE6;&>_C1DCq2_<_9%iMYl@=uqECy@$DGd4gTO10h4`-q zLMJH%)LEpo4h&RXUDfVC!sad3oN?ezrr{|0t2}`=xCKAjs-gpDxvw=Lj)Vfaf z9dRDt-T1qu2Q@&*h~j-8CHPJ_zm4>is642bD$0*YcvJpZ97y+I%_Y+i$`*Y*nDtl= zuGk4RSDzlY+vl_2*_~{Ok#RBph|8ybg(ByHxfkG~us7(LffpWTq5>5*Xr8@1AhaGQ zj=Z8TQ}7OMlz@8`_0x^0pe_}`e61DKHO;LW@jp>O@)1w4!Tbr!>H?atG^H0#F$Xfk zd8>N67I{I$Lzf5qsMEekiq$deCU2%4a~LfGL3A+Gd1C$p^hsQ==tIZ98X2&_G`0GU z1_REg7gON1RnPY-`LqM&HKn<)!g57 zg@;k^NWe-NAGH-hl8M7Vw`ejxZ6QzIHhxXK8@!*X(L53r*bF9Vk8Gq1bX(*|}KC zdCX-9f8_p=B%4;|pUO+ezPLcQ*u0CSh5tbCmvcnfVLK~BUxUXE$qCzGYlpon`czYk zQ$W6~0e2U^m;O@xGc7plA5e_}AXVDhBLlN!g7`$_=9q}~R*Gm|Yed*;A7^y*B4^7v zdSeAdMV~zDD;ZBgW9%}nVEscjbsBEk3c~0g?!2A2Q$G}(CZN#uzS)p;T&Cq2!ea0xx7%4=UPsaYR64b^wlrwx(1Ar1Pvd0$ zhdc_eI3Xp4lxD7z13LrZET4*FZn#7?!~HoMZ>QUMD$+8Mxt`dgn)C>j+t23C7{>rm zTYoibcBP$D=Zh>n#6=LA-};3#okPw9X}Qs_`99_^=SpX|JAU)dt{bpF$Iwg$jRwGN zIpzet&ksCD+h>YNrScUXmqj7}G3Dj(8o9e0LvM3UJt^P-GqlzP+9vPcvi$m<7$5AR zb0ie@`ODLukdtY3`d*iBkVW6iM#X%x7eu-_NL$0wzC;dzxPfvTyqbAHclY1E>xi}W z2$1KX2q2OG)`gU^3X*QoyIOAJchoRYiWSr)m|5YI&@l4)NpT@8plR%H(s$xeMfPeg z3@2g0Qm1CZUU~bMrFOzu!ROrMskbEfWyoS!^kLyv4IOARS%3gV>b;RYQ`s=;a2}y& zvQg1zBhB^bus@1~^%%sZ*j@LoI0torQB?QbZQC1SXghC{XsO?$8lrE2pfIP!EmNz8 z|K?l>ZyEy=Q0)cO*^`rzFK;R_`Q7B%8%Fo&aM!0e9KBhD+0eSQs&)_aE?!3_3DH3+ zGItyW&Ol!`C`>*yE?L8KAzUK|q`w+yK5^nh zB_4m$j@tR=SFbyyM*T0|6ScN;4`cA#N$!yDOZDJ3iF=}V`Id^q0ytQKLztVrgCk_i zg!{O`m9ePm86pKfh+koP^paBh&I46?zCN0JjQwjSAS^swBMc`Q2zJcMLsyO!0u}|L~!~|t)mMepc?f#mJY0ljXnTNkpu)EGzFu6 zRgWH)kbCM2BP#_X_Iead$=Z2}obOcsQ(8BBQFN~?r?+81s8ov;TGH)7rVtv6-~tH# z`$9BQa({$5ucPwNgYIj?N zxxrAM6-Q5J-hG}SsI<1WT)66loFP{pzYdP!o_hf5oiH8`6LT@fT10*t94-}I`#q(b z<3B)+`1&u_a0$~Qg7)k3C*dMM>elb=y{iI}{l)VdM;|(QAB^0ZV>^S5i;9Ua3m{$> zg6rjvqv{KDR7zGCN0wX`3j!mwOR$_E4vl zzyZ_idHG~qiHknry#Bf>S0Lr4e402vU;d5mOrEE(9%PyQ!;QZvI`nDe&_Hrq(_~9K zuv^E1!t7{(2CW?&{C$A=-0BjKl1cR{(03W@r_%l^m}7Hl+v2ffpq0!^R{~s17*&FZ z%_swjz~I`#P9owVcP2n9C%_nI2pW>8;Z0cNO61i;-4e(s<=v7l zo|K4R2=>XMCNBk;LinDZbY;mfkvcs2hk8O<9R}SgL<3~fXI#MhThO+R8x?G#Dk!#m zfiIIivkwZ=w;hE>py4IT+lHjqKb_L(i{{xO8o~B zdV;0`5FJwsIcbWf(H66*TgvSe1KjUWsvku?` z4vwO?6p{h67R4X4+b1&7H3?iAmu`e{H+DSg4N+&nmjLGH$Nj#ez!OZ#Y^l#+tb?+% zuG7cOx{6t>JJq|hE2EoD7kMQtVXB3a2v!IK^eU0&t_0kaMv#v3^#ne17DpQzQ| zS=-|Zt|%gr+8w0YJA{;AN2#}(&h)v(oqzLo*YhrnE-e2BSf|~YWrp&RQk~>h*Xpkx zaGYs|ze7gny?*|s&8V+`$;+7!92YdrbSGLs{;GQTR-svOm$w+e8sqFhjQ+P^>CJNS zZ+Gf(NTxIiJMTcB6N}ybJ2maz3>2=}*$V`9#p=MrMy3{n?1aFxgvr?Jo^rE=C;D&+ zvfT>=8PL>5uIj6M5h}5d@AH0LJxxdEU?Ygh)5e4!z?3??Ra}}D>KdIueYP~V`@Aq4 zc*oF(I>K_z*q|7qgUD!8!b5->?!hMBlmx z;z5qSP*b1n*@bp6%ii(3b3zJ$cs9gZ+{x=dUwsUFUS7>o312ee(aR?VD3sTtz#2sl z*^Q)-T0|t23y%xV-J>PR#`O1JyP1b8RcKtRMOuOFwE+>emVz6X$*h+>THZUp3WH4fje2>z@m?bYH0+^}^p7o<& zEUW=7@YfTg(cppmG13CvfWHvyU9nvsy;*M+w@Ky!;Uv;$6%Y8{j#JhH+lEbVSnKAm zz=j%t#Cg%eJu%1X(A+cJ3o9KYav#TG5!_HV=CaxF=k`Yj1L! zt9eOVyDL+!G+SxBuCI~ycI!`>m9^OOGLvWKFKRuA3JeXC=|&o;KdT+NT?g)uwDdn= zPNv0fst->i+F#lIU(6ss1@j5yyx&GHDb@AmJ8damv8v%;BQ7_{l27&_IsgoOPinwf zlGP)nlfXm_MXlkVrwzsmv5r-ULpm}~36NsEg$qdj)Y=0mdru#m;z=@YP^H2O%Xuht z<<<`}k^+TMNM6F8^7{Sg2k}iUC(UJC_~4ZGU`yQZalnC^UJh07NEvU9l;ZEN4@6;TF0^pOD+cvLV(;3oQ|A+`j^%wTlJ$4KM^j)&=i?Ms$`4if+iX2WEq zMRcP>Lh1oht|k1C>kE=~d0H*AX=SA>zRoM%eSK}+-?1EZj%{H0VOSSQw&<~)$RgU7 z)$4%$3;FlxE{+yBz0<=J4&`^YqOc1+9NlaRLYnL3=q?jqg24U&Y6lcIUJM z^m{^wg4=-uV)ge7V;TfvlobCBb+}e$a{7%q7U6m={}cVMO#~cBea0$g6fNb>U{7%=MXpvT_!9WSXr_R%dZC}uGX&- z+vb7-I4^)c(jXm@5(O`^DV}c7M{lJ%(in3QvAF;4wD?Fl3g09FRcOLo=T2MrP)K`Ef?=^D#oKW(%udNe|M_PO zwsuVT&_G~l)Xvsgg6+l~SnRdKzAMVo#$;5gte@h71RXsPi@A83^kP^>croX}AZWn<&ol>&f{u_m)St5!c3olvr z0uNRqDj5*>BrSyj++t~y>g9&zRsjJQCFfoQW1=yMIN9`H&1L0O8t31p5a5m}$uc)U zu8#Bn*RQ1B7x-T0r}ec*wlOf|%|3G3tg8ayPOp+;Je%$AI0}d%G)#c#rxE^l=jns^ z+Cg5OA-wNst9v zp#^Eh*|-(Z7-Bw940#8rneJzbtSrOgLR3ty>$cwiUH0=KMmWkTos@bZynbTU(U~S~s-HOoY zXAv_S#5c1q9Z40s55)>VBqKf`ZMx1MA;VHFXN%cg$Dd8g&os*$b&Ce5*xM*&h6LN) zM&8Ww&`0d+GPJ-3m_~0+0pa@^=e9_3``llODn{^dGo?s{I=oO9nU@jPND;9F$J%LkrQg}^xUJw7g6!E%!RxJJqqKmO@N+L$ zgz{$_o%&pC6^-I}bZV1D+JA%VwCG7-Wd1-Y=XmE-q}Bmk*1?200fY>{%%KNo;$GUF zV~9m<|6XRX`zM+=sXp0tUbF5z`LT}&Rrpg;_Sk18C4#%&t(X}yxTnOz z95|I>GE~y9=2hklk))Hf+329~-}EvU^idy09UZ)THX?!Y1NXU#$>t)@mftzkq_L!n z9k&1HP3X$TA$A1V-Ayg+Xm$ObSvSHO8l<`I?H~tY4WX^0enRP_z?QmaSzwv%S{}SD zNS54aF) z+8_ziy1Slnt@6Z%3;bb{$bBUM#3?Lt7@OA(jSA15eFPE3;C&eq#l&I!a<%f=!}6$3 zShGc`-*vVdG7MfvX3>rsI+J=W?+9E}HV$f;k$c~y)A?=I9&oJx0&T35BHiTH>-=zrYJI5v=S z#;vBuhv$!q2`Sb4=@ogj`_i6*qP^g8Bay(S24uAZTNY1Z=`?f=QJGgyc8wE=}=owWr<|f>Afg&3_SZpp+%T#z=VYSv{ zLQ8S@LFDa5AH;x6(bD2#BkdsOi~1o9g54YUb+K;y{4Gimj$+kgMOD1D1lXN)Y^}}Zf9Ah$I3|ym`kUn>PY;$>Bk0IH87u+>Z zB(HESi@Q|4?4eJU>2L@(JZu0W$h2L-1ep|)AXpHtD3iF4cf?*s=&^z@J?57;P(p~> z!TOMTz^NgTKJNolXAD+$I8#im?Vi3@!ev?Mv?8MaNsORE==~Q-C9P{TP6mDkXCN)% zj+JZ6gryp(nw&=H2)GcN+p1Z?{EZ?( zf@Oa!SWS)im@&2P$_~phW70{H##S%;?(A-?_?jZVO6waLJZjShk=Z22As-5q1cL-( zddmV?N3R_#3(pmc{Y%k^wB9TB0ewWC;A9=OTYtB(&H;bh_TsBQ)-JY5n|#0L9rj|N zY;jTKy{9PN=B;bCzMi#RmCxvnotGq^G%bKhz$)J(=V~BR`9*4$P_AW(S{BHt4IfCb ztqzvCKcDH(1aK07QAk%}#@!vX#DM0-k6?`_y#mxh=M#BUFKHqMK^CewiV;k~VS|C) zC*~tA5nso&qmVuUssRebs19N(IDaCVZLE-V2CUv;0kzC~@e;aF<_P>W4EGA6 zgMD4~V6?9684UB%JT*W=xE=WELI~pale^rB-AxecDUbpz`>er+H#8qCK$dM@tK9iF z(|EhEilJ+)9M`iPaSn6DKIe2O_N&u;`0ndMYD&r9xB z=W}mGgzuc<^BYaZ$x<-#{buosPs}p7asE9Gw~W=2xs7ieZt=fA-VaJ513|I!l1+xI zM=VlWAco{!EdgrR8b+5a!Ce9#*n^xASIb^lir(GC+g0FkCGhQ;fD>JY*zuaN^MB>8 z2PV^B;)-Cgfy4N|sdaTHh4IZ+sVMIBlX|s#J_}t>q%!aVD5_Oas$)oDt$xQm6P?%J z)xNwQBu~*PK>owD8^VKUo%lKHQzNY|iis@5mV}$mkS4P(62|X&Ix-CfFUznU^q$lG z!=tpm*gO!EErKFk6o_0poF1xL2e(9q-RFIbI3d#2S&4x%!&q=+XK<4{F(;WV`?^fc zf}V-9_mXtINdn4k`_vm6XPSMLmDvig#-qM!C_4BpdG5OASz~^KZuk4H3*I${Q!`SF z#66VD%N#Rm#Upn9Kr?mq<$snJlJ3-H(^Uw#-l1lwUnFz=zf8_JXk_Rl%if2HO3Z+L zG^=ThMtx?+BDvlq53%0elhS=D^@_USeT^h)38qQPO;&BJ@{%Rbw}lYwm|D^u<7u+? zaVD!7_MT7#ejZ!X63%8T=glP26LLzOW$(|VkjPIGe#&}%-IVPIEru-(4Zfp$QA4iW zs!#vme@{2}BLmyw@=34BpWU;-@;syS)c238K7cZgE7fo=NiHNm!)4*HF$M^Z1rSMz zk1To3mCrm&cfBQ@E*87z9d)iKmEm2%bvUK~_q>9{hlEVj=1e+#$f`+^@cb`SehMnj z>1_ZOQ3|P(mSEt#)g*z^$OyplHP($Stn%M@?!o_#kN_-VR#2QEmC%rU^E4%2T#<0T zXN|ffbZ~R~g4@WQdh^89ouHngEI#-_96@|bC8z&tDz|L2$#+8OY=6ew-G&IXWNX9@ zzJ4wwJ6Eh)a^q*a<~fj6X*Qk@GO+dvh<8H{Zk2wR6da*=BvzT=0HI^5az5 zj!B|(!BJ;P*`o73PCIoF-Is_OCi3YB9TP&J`EuIH>_^o08AF%NcKL ze2?k=M%)EGbZoLEh%uATvr}-qC$-jCbKto%HIx-dAUJiuN6xV3&hT}Wsv!wtpF_46 z`u6j{i{~O*)sGZ?ZnTIE&_^T8Pac$EG}`(g4<&g?AXqA$q|$HBKEOt&)!Y~z75)bS zdq0T3ly_F%neYQpRr{!WWEh&dNPZ!=7qgE#6514r{)PM9i&71Bf2XO}(t=+HAHT|c zGno4E1*Kf4-<~p0dSEW7m~?H1k_adYC_3IQQrH@r5@0_VM!tGZy5JoeZ$66`#cK37 z)>+$!F9AOHcKzGUQE*b)Z!Ya_Mn{K5{o1q%4*E{^!Q)3vxgBvoH-RPD726g^ zIyn2QoZGg?yf8cJXWD11mB$5ZYzGN^kUHZk%UF$5#t!t?kOPsG-D@mmYo2(bDT8J<9>P8P5+kjxB_Yn)J*86=_{5oUQH#(Mxn4NftZorhYtmpvltR zPm8ZZ%x#uf>svi`yOGvJ?hm%}w(-Q;L6PeFP9oMu6oxMDo3Vn5My7lY@>RQif%w?n z?m&P1$e0p6({;cQBQfl^F%~SRUdedW=aGVAa>XOX9LzCa&jerrJ{o;$=MUpv1s;n( z#jCZ3J-4>37#A9aD5)5;Bhf}7Hzq$ zxHoQ6HAfFR9@0X3u~$$Kp*PwA`vvlC=gi@5P22zD=-dOD`u{(EXgDr$hBD<7o5Bzq z(rCHNr7V{anJ6rTR7mcZ)k9Lmq4IqY`zF)fn2Z-}{7Nfqo%@qB z@^%&N!GKL!UMMZE`5h!H;77k_Ab2Ty#51G2>0(pVsJd?H87vsUnJD-?P8N*`JP7E$bu) zvVc6?Hm|9~(%nwKzEwZ6SY(o5U|P14bGjN^&@iiCMc-XmXY2K%C2DzkWHInITVpv= zKcyT`Ut(R?qG+A!G$^dIyLEhkWJ>}&?7(7{*27k6+U(S#fQZ1kU^%91P}hBF@`bq% zfmphoQ6#eKw1PnEh4X5lym0Zr8?lp+Hc*sV?$RnP247-%3}BM=#rNZYWH%&=Hr+dN zt!fz_hNFlW-(tq#@t1lwfU2w`nR0(Jv(ApJ%ISJM!@&3Wro>aZ$IIDu&aYAKaCi&k z%MGZ{d;1D8U>7kj6R~8VQ}xb#HD^#q_3k9CS1*7|x{r-xF2|>&*w_dK)T_LI>W@;i zdzW_WUE0Xc@-jH}hop)vpAyMn+FNIdK9POOdZJ&>mF=s=(1CGw1wrnv8I6nZ_V94S;X|6A zF1ViVCEtQ3rnEFPay1$nBAa!GrRKTdD-Vjn>J3-|*es=gUNm zo>!>L8(!acy6s63x=)9~gCf%HoU3}V{j{DMIqTALTG^)yRV%)8jn3EyqhIn<;x+ru z*{M2XNuSQi2)Q>MABQenKAdKPu zGwpS`E^B)6DzTl3f}IRr>gc%gYww&tih8o`Ozz$84B92-IQL~_uF4KpgL=ANKl#SO zV)W^7@3rqkZ~>Fg%jpWm-yA`fzUM!7&A&R?*>Sew(-16*4x^k_zo}hZtRb8H4xWtn zj%>C_Yn5Ephl5XBX?8AE#dITM?Bv$8HF?H_#e-dA!4$hh7{*(5DiOcWehFP9Ejy8E z3qCqDeDQGN2^o0LeUBByh4US&Y`D);aoE}Wzv&MWQN4WfeclR2s4@jBmHW)bY}+6~ zauIGI&1rKU`@r?6XI)&1W{;QveVl``gCEj{9Cx`={;A&C!|bM%d;`ASaG2;WjoXRB z$f%PJL_fg)yB4EJe+-}R?a6dWtbo8C(=I2GvvVf0aHu8`&;J0gL{5Oz1>TgMS20j$ z*&fJBYb?^TI>8kHb4#%8rl`GfY?buoY>|t)dr31hS4BoKUf8KiO}zCCyAb#$BmYRMZq(X10Oq=~^r*K?#dLnW7nVlFJD*i8k1FJJVJG-r>nR=0!AE-3eiUa0rlzmC z1gin278J#+vW3gUGfkM?9MfGxs#WwuKTAEv+lJ!E*~v8-J;^L?)uVRQT8dbL+i z+HXs#pt;uHgBuBNu_qoVH0JbAitY63EeY5<*n6S*!INZ@rBpciKY(}S4k-#@h<|H{*$1I6X( znx$6Me7bZmza?B8|3-w5@9OCBGFnpLsnSPBVoS+1*7&b)#yFSqn9CaTF!ak2wYu+XnZ+DDXKN<1^FOB*wx9=wJQ3b9Kl$TW`^=5m!Socq~Z4#`4DXVa6(lvt@W{cBkUZs!wWZ#Z2 z3>J?ysM@hiRHV0qv{jJGQwMOu)%D_RA-bEZL$K=+@7zQEMO^>kRTF+?kDtBoV89`& zKzG#1B)~-@dmx-GgcmD>^kM-z!Ov@WPN^@cU36dkb%hUacCzCX&`TJ=6V zjGW@B41q%RyZ;a}n7%%qZf+|YVTmhN@6k5@zIOGCyu>CU4ieN-+S&}Prvpl1I;o|*4chtcCQ6%aEEZ^o=I2lxnG_0+Q@_Z2x`K9b zIUDt7O(tpT+pj+p(J#^`>P8;vR@{8q{7BUgrc0?7qhy|sL$Lv#GV^H`Nh^ca*Bc=9d;vh*eE%r^fAEt%@0w`))hRzZYWTCJc^q>CdEo#bm~CWkTUkCAepJ)XvMgn5qBq&?%`w9ni!-?i zIuM#%2G4;$KAn61U!4$`1~jj*k0k(cK=IP~p&RIgJP#UpQ#q5P{CNIF`{B1QVjloT zU@T~l{V7~{$EzJr0!WOqp0r2j(1PQ}i3%8cq@|TP9TK!rcJ-j}fwr`y9gr~RHD)JY zAWZ+N(q`IP%NEmv@@`eCiu&c&a)iz~c)EU^dAd(=;eYzAL+DWQt@`O_W4V3adcGM` zSW!vpDOJz6M|q5(JmEXIGRR0Hl(O4I9$Qj|abDBc6yeatMW`=T1Z;v&@L0wmVjgD> zAQEjIWMEjbIk@$=%_jX6%AMoXe=5Q4&aDS~ArecJ0~46!l~Ck4nVE1MLM2#sB!kj`%bn~ZknU9>ZJW1@)Y#c;#9q8$h zbZJ|I}-cMQq$nSgD#Ii(oqVr8=Iq)HUc-u8Vj>>!Zes^==O47jYpc( zJyTOt#fM7ftlxz}@aFDkqx<(`^P3lS)e_Ti1@*Vl&b;n&)gBQbRDWM*fMo%^{NSnI zO~jEV6gHw^o>-M#TveGGeX%mDj~4{Qo{}_mnyxSA9Fg}(wX^PL)(C@zDC6Jn%!Kd& zbUuu$QqNJTy4h7AlzGz6Dr3qlk0WtoaEPL9k>&URb`h1=5DS?~rj7Nwx$QH`!;eJa zKz4VHM&kR8D+VDcDG_>Bg)EoAXC_P+k$%FSc>u#oBWh-Uz8Li6ng)^hYifV)Vwl4R ztKvFYb77AJdicI0^hbc<)ekb-hp!LKVs?vX+0h3yl#pk>M}7m` z7T6Il!{Tr`je}=S;JTBVvwD&4AabG+ccvomwIn#f*!X&gz2Ob}7Z3`)eHIt{)YVXs}Q)g6M! zOX@c4Zx+^r*(k*cX;X&66>j_Y(@ATxDN%;L#gb>)6gB0UT+s)y5Fdao0RY~?zV9n5 zyXaj<3@zK%J|No}*Wib@pC4&NgJ|Zk#~Nr=tte2(7B4Gp|4TYN+dDls95SK9XglQ) zEV4_BCaZ>8`+QA4-aWGf-$RFN9F9@vaAf+)JYH7Ud*%I_ODL0!Vl~lY0L2exh}Jrv zFQ_ETzgoMx@0hUBDf3bUId96r;gHo1m(A?uL}8clChkNQpCdtk~vm9zH$7zg4wa4I+X)9@oJgYOWHb$f2R zHS7FXh{>1#v0v&Atej_1`+qz#NyDH)(7Nx0bZb(vqhg>@S)biThy zoQl=yA&K?tpy_-GpgR}E*&lkj>V#qlb}CZ0Z|RG3h_B7u^Bm4=6gB;co+;;@GCr%& zG8AQB{3lfZxm%7`tFy*2d@ygfdnn<*dHH;Vk(n)%vdzT-HF8d+u?rQ0r`+f(HCk;E-?43 z@1y=qq7}b@UyO9Dw}R=s9F5IqXw{|yM;Q0h6*{9LVzyyTjG>EbRqNkxy|W)RIqsh> zX-02WRImS0ziXk8k<=p^iy;AKrV=)w?9j&`LG2V`OiLv-$*`6Bn8OK@Lj-y{0inM8VF(NxZSdJAB!#QDhmG! z7dwfkXk>tGlaslV$8ll4U5W_g`OMJECJT1u1?qW*5l@{zFF%h8{Sp#&Ir$b6Az!D3 zum^3Oo2j(o?8^GkTbfU0$XWd}I#_I}DN+0N%b6QO3L0L!q2If|I%clM0aW_X(_-edC)yO|DL5Y(qhj(&%yO3Sdzi`7qxGGj%9vw#A-&461tKX{&LsXC(SE!=ebdyy+g2z{m7-*lm_4<4IW`-?;X6h?~8K z>M}5(V+CLAodJVbi_nA_0PD381A_mkK4qOu9e%v6{;v)gRp z!F>FqSr0n?@A>WWSzm+v(%B`Pn16(>xf!6vMw+7Am!DhB%a=KygkuOqdcw&eT$qMR z4}y-P;lyv$p0%*R9zf^>AIkq2#zD6SrddbG7rf_CL=~H+@l zAC~78ruI?kOa&l!6=?r(_4i8}s$W!tCr$d^e2>iUMu!+$X$STqgSdo>s06l<-+!D0 zyWKbT-erIahyn@W2eHDk94!5$BdnjzE@_=tAaRcXm{?gV({`F=rI1pp-sDF1r7}eK zbwKFH@HcT|we~XuiX_NQeJbH8({b0mDi28uqiAXA_4-A6X#s%G@hqVSD&^ z4JTraLkv)JQCIPT5d7NtJfH}G?sNro=o2S5dYt?5snr0vVis6TI`4@_O=&#yc{_+I zte0RbnhFm%C#g>`cA4GAi;otJqZbfxBm(@mQX$imjK=gF?kzbYU-!rLNc6wZJ}?n* zIeRH)Q~BIBZ+kxGPGEPUh)4Uw`_g6-aL6T`-#x+IF3tUYZqur~;g@;&%7C1yz^1c( zyc4RSW2@gMAQihY8}-&%sfW{d`bnwcFJbwAzsIF$RC*O`hAck7EAUYTQiIQu z?|y&8-ay?@L>NPL^Be4l)n)quls{t^=0v`4)!h(lkc^#c5(kc}OrtL~oS~ID`tCVK zPs=={QWg1|Cd$io-1F@jx_0Qu<Pf)W;Xue0HE zZZS*A5XpeA;!BYRIojqnyr8k*%yFAk9Du$$Sb9d--08|4g9c|m`N;lLaF8l;>)&Ub zSW1e7a8^LTlS4z6b`GAx$VK#3SgLNJdV(is3eX?IDhY`7K_FF98qJEc@JmLXt%Aui z7YCDS#wna@8zWz@T+vXO0*ts3{>&h8zE~tb5^b5b`4ScKN#5E&A|$4|h#v?`A zHSulKu?FXvc8mg(21-s2IjdzVFiD_7BFKhls~JHR2F4mw(Z~>oX zl5kUj3om<^c79gi!9-$T5MY#-<4*Z_Wlo++B!W2ID zpH;UUg)WnRLt4C+j}PXLEy-_9oI88=>^~TjdpmxI)D+=I;Xbr+&%n>A9`CVA-s&~%m_QBOi%80O}J=~(-OLUr3w+oI)7T~uP>^%r}DxOFH!fj zg3;!F2MZZ)n8L}-i@LL)FK)rq3$ab6LMG|}l<&rnN&pp^_leZyFnfnJ9T?i@a`x<@ zm7l-1rmyOs)EJHV8~VH@e0drklP-=l7|MwYc^^f9ik0=F^DxPJDhzgrqo}a#1!V^A z#++040`FT`vxSO-hgabL%I9KMrUIY5H8&&@nP!xMU|VVYdfndBHf~0j){CPh zY4>^#OuW%g!m$(h!IotX1Sk|ENGf;REhrF442SHGT6jGd5gcs0D>y?GRS3}~7ye%P zxU%_HqQ9tLQ_jg@j*v6&?Z$i8pdq(oZC{>6!o`cXb5(qlHTq0uBvygsB5{#B_+IBu42!B2Vk)qdU=As)+R6^zF<*yAV&GpE9e6d+;D7)QrPF{Qt=No%mb#sy? zg;?d#-OvfL?!c?Hz=2r32_pZeorR+A&p*GJ*52FP^gQO%e6jG*I>`6;b0$m23gXZF z7BeoDvGDp;<|Q4&bD16t?Bl<5rqJt7m2Z+^oH+h z7Ui0Nqy_&7)gG-rQLY)ruvkk7O?QihE(~W#28KO}TKndiw+q%w#jO5{vX#D9(W_%U z7PWd~@KNQD@^bNwHUgQW47THb+o{r1AvqvbCXBM%KjPQMKBLTAfzOMII{ZH>3vM@s zz@G4L9Wv)47iZ6H`u7J-o80+hqzxA?-(6ET=s_EPx;Zkx^-?7c6h?a--%NAM&0(5%*?ogxr}?~z{-t6T zUWNnw`W*^A7UU;I&0wgYdJLu4PooHu$`t-Adn%zM3RCEtlH-rv}|I%<486qvEw z=b?Txv)9|q7BOG^egdabwWO}rEr~v#%tlaYm9=(QfR9HDx(y9FC%2_~{Tvz!*z^8L zuF9Jk?Z2n~#e-7cN@hZdZuFcIv~+Sm^JgD>W6C!yD)^3YYv$$tK(0_MK5Bg-nlu}W zlGxZ>e5|NKLV1W$IPi_m+{)e-Tw?4Bu?Qx~df35|7e>hjr58Y=NA?RI67%H8E zB1k#7y3>{z=PrKip|r^!&Am-Ev_qsQOVRv(G^#E|qoh(l9=Z4aLm>;*K{}TbWn2TeRv;)1v!5$ebSk^68Jn0uP!56eCmDL89x_2QGx4M_>zo$%o7vevz|2_&nxW&g3(v%B`P$F@6j^xj(!=F!p7%wdpJ7+@*}! zxup-+7~1p;;QiUoXgM5RAx=vw$;|Z@t+q0M^2zKk`CndUEtD$5?WGf{-Sl^oG zu}6smNd+jkLnfvKK0@A`3_ern{mQSI)%0Ii`w0d#LVj;Xk{N>QY=u3N+tj*us_1=B zC4M^6;07uP-XnG=eTm2c4h{}MMl_*74j+t})6ztE(-nY1J9uY^ian+OuIiZ^^c5-$ zvX-URS*uJGl-Ib>QB3B{ApBe9dYFNiCsG;8%So%`>_Yk=u6q zViPbcz$xPZui3wEf}rnR-YC~u=HEA%08#tj)L80RrDxV!*&mHrgt@E)p8ZT9$~^rG zhNNKHy1lhgu6m9b5xxHHU*Gn>CTfIr49L?dg-3fe<&aQ@5y{qjjE=G z27E6!qN)Q%rq;sO*j5v7BZM#>Oy^mb1%`IQeO+_EqZU|%W>0r~7pf)tPxGUSx-hcC z-l|Ufh<%Od6$dK9!NHaW(P=@9`x5R3eu*`)Anb`~2Y|v49~v-lTr&D?DYYSjI49mB z!3;1)aZM>qfd^g3`oO-&sey8F?t6}L%_t7clLznKzAntwi&uawLHD5xdRDJr8{yCh zn;Y-VMd7bW5UD>e48P#te`21WAVH)=436B#vv2Gvp%@hJcgR`BP+3x>(g6L2jnZqb z3^e_e(0OJ;WB2kGR8_qo(B*>w6d0t0KUh9QiBFoZe!jIGmel22?^Ja{mB9WCS618T{Q`e0Wy`-H3u$IYD8z(qViL4qITWtX|!;H%A-E zDk8k$Ap}+XvSY=8zQ2{1Za3Y~eX5d3(U!`3*j?kJO1|GKWmkQ;-Yg7;9P$<$|INs_3hS^^57u3`9GXZB;!t0%soHvZRQ5(4jF24>81?EkenX9ZoCrRg8BLl+1ZsPgy?Xil0LU`Hv zvHI6HGQaJI;C^t1C)c@Zr8!HR4FDW?b_@UGo5(k{{~nJu_*dV3IuwTjA7mv>5M&E+ zuUrx=x|>z-Bu35mFBe=t*0c8(zq$0{WA`Zsv$~9?7N*ymom}Sa-^+dY z?&-X<3_m?V%UrkvA!UAuUDhRvfj;Z@EY)S55L<$pSM%^R1soS*;9t$*ey$2J-onEa z9493Ob~d@|R@Y(-_i;HCMu6&$j_>^oxpw?`NrqixI4Mk0%PO4-uR_Ddv|GQ7hC zN$rSyYvsb7wBxEXv=ChH^gS-lA4epJNe;a`yk|JjuG zM>jNWhPLSmLHazB*sEIsdpZMj2G0|FmgL+gsaMhm$$Ii0meXpcpjCaTJF5}(h`>F+E7UiIUGODEQ@ns!y+OLnuI zb!UYGn2v{Kv!4sT@+%^mwN=fj(sYQ#jH{?!qXq4QH$E}M|N5@%XMU|i88-Y^$dJsq z3n%>SNZ@PwTk|A!&7OjztK2Zovmi(N9UiDSV{!+|oh3yxRf1lbQLb>%3=>0flRiMv zOc+4LMZYlIXVVGs8>@6ARIWm37$r=%b-wEf@)_N)zceiFWQ2=@9(~+P3mC3P|1D6@ z`OLuoSzqZnU48bZ+rWqUkS3V3_Gr~W+Vk#|o;M4>Tk-q)*Q@&OV2yeyjN&>xd&vFY z75x#zFQRPl?3tr@P6{vRuvo9hrgzNZeDqq5VOdYNUdxLYo3KC~=8mt=L=yJ;sZ8m~ zWR3@*R-0Pzdyqd|a2z*URejV)J)Tj*%sboZHLoVP+kryzD&GkjDQFpkjm?drTT6O{ zuw3z9e9Ga&9DR*XN%#6H3hpB)h3d@5U2%^N`|@)U-gRQY#CcB-y7L65)$rOuKw<7i z{h(d<_>Hc1vC`GY-x=5*N&dE4t`&^^ zlxJh6k>~=N6DF3|B3~HZ1dYH1ShEFJF8TS;bP)}bn9BmJNxyLRd{~Bq8C!O7aGykJ zXU=fbh7e%u`f;?WpOQB;*p&K_A9GwbH%@r@=i0Kjl|2dm+(+ZVSP3NBDWjOjz2Ki5 zCh5laun2w{^=_OZTT8-s%C9sbp8URjQw06_bRCBTIqGCf?Vw7P3Wt=?(dhy@h#l@HF3jW`Uapq=*FN!AuMDMlJiI5rf;wXvfjPhrpvi}G zN+%U#6-pjCG0I+wQAzLY6NT9Ty-_vk;&tHz>Kj*rgn;hESD%ekjKM#y4P$eRyg;_( zYdyj|OZ43dy|8)wb+Q`d!QeC2`Ry8EuwtTV>Nx3{Asl=Ey*@oNmI^77^}4mZbA#G+ ztX5YgdO8Zq)wmRg47xqS4S1<1Q)mVX%%`UnX7y_5=9FZ~i*IypQuDki8F*gRGFHWK1A{)O5KiI^I?N2SyWH-2Hu=Ncw)|O9M}P zf`sk33Bv7x8ub%nM|eNiuBMSOi0l$cl9OUZ(fCRP-Hhh)-o)Ao4L4OPLd{N ziSneUAwq0qH2tN;kW%d`xA5!Z%=c*el-DiLV4wAb&fKaH2lZfoUu`o4oN`&H^J9Z& z{Fooi_sa9bDLpc14hd^qX&k}n-tNB?gX(-u*vmR?XMx3=)9x{}G@KG1%sSH**eFv7 zU76!m@9bPv8t`@q09r)hQR$o>!T-a;(#rDT>M{E9Mr(UV?zEcf!&?2FEcaL+aRwZD zcA9N^>Iu_v3Esi|gjtf9C5|LaLtq{H*^PuY=r&2b{0#M zbbiBf!_hF_(N~#n#GJ#y_F*wFA~~N}{Gnkin#U4$EGZ#!G9|O&>E<5lRc_okD>y7e zb9Zge9OnpJ8Hd-LpH}9c@5KOc{qXbb$IA+ghQkd&EF7acQw7_7dU=}Fu#xQg$36|i zh{d0)FE)5Dw&PmV1zcjvG z=VXBS4nPQu&Z!jN$7o|+NK8`q9cIusKAMm;yrN-K6QVw zC479b?Cqo-Vy`A=5Fly(s44C*fM#bFg(Q3TJ@T?rB;@vbIbc!BvVF`ac2@EB!>c^{ z-y*tB@Y+&%7HG5WS$`L5ClGaL~*c zdx#t2@7>}pm+WlM$f+?$gz+Z#YqJwArNiRDYlE`9c=si~Q^C2rm%!SA#_lBF7{c5i zAycqKOo1efrVGlmmJ`q9O=Z4yJ8EQ8alkTbbr5c$>X7$RA&$c#H9m;Nd*0il#5uX! zvtNg4tG6@$=znpxYP`zy^hCBIh(u2itoluHufVwPdQ_`g*dSKs$9Z_lJ^>w`6CkKy z=e#4Kr-poZ1R}zbh_@0Zm_@iDPYoAu5K-~*HVD_P2-yBVE35p4nvbajl2JVbB+!mJ+Ly43UI&O*@pUFmhH~|I z@a0-1w#V0?qFSRtffMlk3c4T!-EPs{)D%zYUr_H^`Z<-vn;`sgl~DxP0^Tqe4aVZw zDHU(P%XuUuT*z2vNZ!W3ULx|RzP!jiMpvM>>&IM-@U#5565nQPZO>@U>LXC!Va`s= zoYtJvq3GQbn;VO18+cS!eo2m~pjVye&--rkB0Q2fdh}OiFqZ-(U~g(W?h!g^3cdV3 zThrY5<1H)wW^x@AdWsJqHpH(~wS z_{uvt_8ZbPqIfeENy9zLmZe>Y`GiDuH-=to5d<6$^gFkK%6eAzg2>m2Uz+aM@d2Hv z`i#K~x1Iz=#}KUwihsj_G{W=+RMm>tjc(W=o=%*E?Qp_E+b-z8P(4 z`uyLJd#cG;XF_?&^G}z`?c7jao>_N;xz8K~O^tkR!@Dz68Gev!oM1cf!OS)m5|3}& zS~SMK*3pL&PUNz)*ibR^c(~DV^L_DaLHu) z2=Q;JwxBuGVq;tl-w_+lkN$TGd+`pp1o)cnyVgIdHuC(v&+2dOudmfa?ciJwN>L0@ z8uUj1(X?=gW`EnDURAEUAq6)XQ7$@ah#%TW;3qsLbmH?tVf8 zK;vZ#Q{z)!qIAhu!an||bLzX6myzEru+ zbs-uauOWk+)FjYuj2)aB^$@fmwcZf`5}vFgG0s!Z2OJyOvNBYhb9sP%ZXc4KwwtvC z73dD>?{G^{Ab2wdfmqao5NkCaYeL_l7GJERnb@pirzzxaDg0#XR*w*$DahrmcpW{a z63*jXJH;`Lg;!@@P1CGDQHH<&iv>EmP{WrrYb+r~onzCcIKPqXA{3fF=mgXyOY&Y$$uYklF1-Evgy%U{&j)S9A$*OSHynhxao~&dk(&7`pLL(v|%s<$2)8{)s10KPOo5`-1ESj zruZ^@E<=HN@>lnTIhLcK@pz?eKLYbI0R!q4b9W>pK<6{x%#F0ItiXfm=Pgkmh7>Fn z+AR~lKU}`;R&ZD3O%NCKp`G|zd|c()po8ZVmxBPT{Cph|54D3@WR8yrd$*Z6J`uRB zaCL6&^U9R>i+{zz*W^k=Tb= z^7!5a=TK{J);o-ZJ$;z1y76~w#Kun9OEQ%dcd3~j>s5s{lKA788wRBr^WUfF($lERoEBqr{2 zko1N1j-I)Wj=7P|n2nqldbz{W4JIf%Ms`hRua`kMXq(VlzSM|K!kp89vlOnIi>opJ z7JezqPk7)(kwiFNuLD;cj5wb_w+tEdtLX2Hr`$DA9cFvqeuttdCIpgq*Zvr&;DOhD zP{(iH%UM~j@Pltz*H3y?H8sVY|4C%*fcYRzi0|Jn4l<`|vY%jffEWBr0_wP5@~lUX zOnkZYLJ?f(8><`1O2y?5D46=a`nu>=_l;3~I%g$(7vJ|z_vfvlYoB@QYVX_CC|Jqu zF4WvDo3P6eO;^`+?9Gn@XD7`rCBq9P64Nd#RJDKtSHsYameRovx735+Qd3e5KT>Q0 zq8g!0ND8rXXzwb=?#@^hPsF|8nkZx5L0AxMcBfaSwy_-L*%Msrv8^oK#SgFZM_U_4HmfEYe+Z@{yhbzk8uG_KW*6+K44B>5QDr3QW zNm`7rNcQxVtr^iMG6{~_Q-$66(+?qyK$GG104=MLhZY#1A!h}?4ZcPl$+K?SG`@`q zC6EtUY4N9e>P>wS>XKDUYA!E7eEIC|Fh0`vp+6F1|DyK@#loLDG2+_hzqMte=CzH@ z&CQnj=oaTox@xbU)lY{KttO<=?U@2VSeLa=uZw9TN1J(^G17}>`c8N6btCQ~>Fb2x3B%l@~0+58p? z4^B#bPMT4Wmhl0{zg{F`&25Mo<1KhW>SSf@pe(hj_O*NVCpQKDGUj`-&rW>hY`8(b zT2=k~Hk^|m8i7iO4Q@~FogVIpG=q^B4e=(hOkzx}*oZ+<>Mkf^s1{G@U=LE^`>^@U z+82FqVNX~Mdfe3!M9Megb=zZwZ)k|OO|o{pyPgWNxTxBD>A~jU)6vS=62#m2Kb_;Y z36J-N)3DUK@+2b68a!|SaUJ=FpsaIm;)BFd(*5%6B2x@vPC^;s?(mVTXAZKDgS#W& zXdC0iUmgy==~g#-b@O>vZ+Uq7z0cwG1+Z(T%05ubKjhcY6M1Kh8jcQ!-2#iU9rkX7r2Ud5`0zt}8z~YGpoZ!UJot-d_li z#xy87`L&30oM=y0jnBJG-aO>E=g3S^Wl}9=^V`~8yC1*)3|baG(Vb}S>TZ0b#m))q z*Qu}H<8=Du7imh!)6IvWcLsL8PISKcNqx6W->2b*dGXS=C$nD5R(JZUA61{8y~%2v z2noqJ*10#;=ZM=slRnl`CXx%8D{c43TH;aq zel2)pd*_I?-$+V2v)r#KTzBJJ1-uY=Dn={ONFtUp*CCD%G5?JG4%7SPYUWNM0qYC> z!%zIYtycbZB$bq{#@l}k-e_BlyeK?TTl_K;1xxsR+`h3qrR_=G#GF2_oE2wKLu+55 zwxB3_canqaF6U&|;rdkx?uI)GrFdCa6Inu@?Tinca-|K| z72wA7px0~daUO4}k2|T~w_Dm>rXU!WU}67%t1NOSdE)W#eANJJw?b^|tgL4y9)T=G zP$jRgzW%lo?d>uf3#E~LuzEa{yXA#}H}tMbTY6AIJqc(Waov8Y5@)PUTqI(sS|w;x?4I{ zNq>Fm6LBYcCw(-Y`92Qd2Tj1tRAQfZZ5E_wjMGyXdRlL;2$=lt>F{{&{5s$t%q*OV z%q~K3xBpJqASOT(JrFg#5))Pg^N+{*NpYUn++qZ5MJo0#3?{E0~wXc=%BHM(aKsP`D#*UMa5id9B66zR; zM!b#KDVB(Kg5Qdj-8GM9)q8Ho+mG$EQ0fGQ2zxwfAD#V*E%TBJDph(wBt{v=ST4e4 z=av4VW#YN2Y4{IzO|aPdRci@Al>zw10thzK_;?LwMzi3C_!VN_BaLc!ltZv~jqsB0 z^H$>u07iZmAf@W*xu{J6^S@h*U;k`r2psZGqTKOzaJ34&ID8ut#Qbcm#Xp@J8Xg`l zP=A_}MAWRj3lFX5gN#|~RjC(U#xZ@_KG(ntTPbdVkwsA7QbPdbvjIB@ zJ54%Q!dnjZ@bit-FRU+N`V!;gb2GXH_WqYPaV021+geglPGOvJ`Ke5P)Z$ur(>3ns z*cdhTmibKKe5*V_2HM;r zW4WaKTR0zIS74s-*IfJlRPQyfmH?$lc&lV5PPSsbWAwujLW=80m-E~(qt~fmty|o^3Yo`Jf=;u(-XU7@U(d&6^6# z;5pn97GJzIwG|o@qo2zhG;`4HRY%z1YB-FEOmB)3{T*kI5bYS`PBzQfjC6g%K~aS# z@}5M>9iu-!R6g4{5NNx5puEx9_d1y5ocs(m-)@iUO6E)fX^2wu$-+s!2!r zC@mnG74u6nlFzkegxv91P|5P>yxx5l2Y2|D_t|>wlFzvi`DY>WN8``V=1o6Bme}Ns zb1wRC`v`JFVw9ymLu%$O97AYie_p~F=&;kqK%P=Wf*LJE3z(LhQgyiXom`b7#I#sx z02esstP`vdj+x1I(7+m8(aevplI?-B3k3?C9F3eT0g6+2`0=0AMbN^2P#ACP_C7-m zF(1C?dAP0=;rZTD+V5J(0o`kDJs6KJLMsDP6va%0s3G6Bn^$jG)*bJy{dcvvB8wf0 z6xG2ykAIcuPK&!t7S^y04G0NYYC#WfwthkvqlxfmyIc;RVOI(^=86|@LB`okjLkEX%?uPFssvNBqPUae@y@tFJEJVEo+k-#@ z9#_Aar#h}gEXFsG%>hc9c22fjtNFCS<39`(OJ1D#_h>DEje~{+i3*kLz`T@m>J76c zh=E&n)5jp zI-a-(Q0N{2sCm<&LZTfAYy^QB<1;f$?ZKd^a`^ENt89R!8DFi}wE3e`7y2~ttN_Wpg_ zrkaVyDxRP7KMoB4dY4k@4>n-)N{l^4sRMCtO_@@)e-^d@I{9V;;v+oMUzqgDjD-)5noinbzp|Z4E3T!4n1D3eBxa0i^ z6BPq!ezC|P>F~{OY&?U_23in|r+PKm|5ut;TX&Q>O^V@luEJi`lgz&v58V(Y7#148 zG-%HlVRs^$3rTf)a+E$C2X&KSG0cP*!EsDdiuV59$;R3B|3u5|l^;N0@zO8|f>&!4 z;a>R?^4Ou||8^^5pFiMHBY(C2;#c)bt2E7luUR9?3b`YtWcEu>75zQJLr%dZ?~hE! z@$&Bck=NtH0>NOp;*Z?;QD;hPy@&?Pu$c*>itfdM;145+MVt|k{0QMar%e3Ylk$@o ziAcFEHJh7j{^GQ3gE2RuA_?KDBHH}xx}PIn?3fXAFSGK?`NQ;iJ^P71)-%sgdoY?V zD9>N@H58)qb_^1N-T9)yz-+Hu)4F9LVC}#Nakzn?AC+5`8+5occg>L0`?OYQVB@FP z={osZG0{c+aSv$QXAfT|ei}3r-LU+$BV4Mj{mrj(GKzuA-dr@Dx*$JNJJd@$MB>bK zC)PcKG?LJ-Ug8avw*d1^6VzHSU_N0(M zg70Fh8mOs=c;R!A_7h^HMsdXZ6$8a>e!FjCOzs{$S1R6r*y0R)>EH@&hBzR*{Ae>C zR~SK%F-777`@YJ#E$yonzJ!)qA6kREaa8#2a6$_G*RzYgI@piDebFq;x@x1kVwqt zEiB3m`f&_yL70KLwhmrghef+kET?%6^S|h|*+!4k6OU8pwwyBv0taGbb#>l$;)mBGFFp~S-?I0@fJ0um zF#Fp7=gc2exHL>9^&}F(SK$8lCx2j_?cABzC7M*l^$BVKmdK zN0hZ;tjx?dB`-8Au*py7cx&RG&b#fPi7yNBO|vS>Bu6#)^fFhfv3KUhJ_ilYCTn@u zIThtkUcW7cGrv7*vKV3W8-AX-B->EBIXg>ka?fF^byRvFLaT)}u~rJVHlE~5ZIJeB z(3<3pHS5XEX{9XFu3wDYpKcl(DDsrtt zwP20IJI@E=ko8xvC*LS}r<37_Gho83qF-}$d$JZxG>1Hz`;JdHPft!|x2^%R#uas6 z5@^#f$OJ@)ol~ZRpMj8?w$A0yvc3yiC}iSNW$GRo5P{rhoZE-;;ZM);Tqbvn0w@+N zpO5M@7fi{ejlM_Wj6{aX&tPDvv4x2U7FtG#s@MS>f+HkvRUa=oCxAp74-{@k``^V! zNrh--2fVFVd@?*OeN;iToxOSVfi+p+a@MM38g7g76biqg+*>N z!w+P%_t(kvLJK`$3^ltf`N<2xz~6DQ4UiIHwHB3uL>dUU%3K7Zk-EL$;#ZF)`K35& zHrISafCN8)`~9bq-BPt0ZQwE_$WI7jt23l`xOiB^zV&kRZ^(U%+YoR`>9x1Rr|RU6 zIPv#^qf7xHTjOKJuV>(X2%(<&2wgAaq&-K=h>C*2W0Yt~Jgz;X+=m_1-IfU_59L!C zaAxJ)DFq}7d=CtTgFSWtJ#*8NvP`T5FJVhPuNfO?t%}=def;kcB1j0Ftj@AA*nOAO z6R2tZL~S14T=JC%^wKAs*WqeYPlpeMM=sf%LE#~iMA=m&uD>#uO^#xDlmGgn`hU`q zF$`J}YLyrGiv%~mg^(mKYrrD^=9t`j;HV0$TBd|?FGRH6+AvCkW%l}r1S-Hv26z9R zqd^+J2A=J65Ta}pE0mvs!J#5L<*w8F$>(sdtUgUVET~8ow*nvEhUK1Zz4$P_!*@wv zC=fhMHXcV$tgoDeWzunj;=A|wFdbguo1`ItZp_`46A}V9-;VulkM_Nz&7QE+XqBU% zO!J{C3F=m+X6a_&v(Y|>)_H9G8{SP`l*AW~jb|+OwCt>}gtdJWPrDqbtamB$zX~0l zJ0_EXuU`C6UkjZFy{C3F4ttz-31Ov~6g&byt9(O94F`V(r@`P@9!GA9AQ_t*HoE{u z!)YY9ko3U(u7W8NQNr z8P>3>@XgP(|HhynRHuAmaqEN1(#ZcG(2^LOVl-I0_$RK0g&ew@#FaRUG`Ql%_FMa6 zbPJn06mk=SiKFG|)u2^vBc+#`)HQs&a^c-_#V&R7#t5#K?!|w>9~5#G2KL;fogiDG zuI~k6gwxC6mbsw)yQ7%ha^=$ZFBeK+&+4tU3onyDKlV9Y`&-f0+AI?1l3ld>^WQfU z2^+rFe|F(}Cx_hO2aBbBvm%3WQy$hHGHmQUbDe|Be}KOR?0J6JpnYec9a9>YI`-kS{?2$*Xf+3gf*ljOv<|y&t)`z2RN21u<@VyutYFP04$Y2h`9h{{f5Sm*s4tRo1OZLB9g!Y=oJ+ zOkcxuiF1zDJuv0g!?nGD4)a?gqN4Th2|q&Ke|^$OKvt%Xm#qd1T`pwAe3A5l{`{Iq z7G#Sn5%i{ptW{BiiXTpXw#O$j9e!&a-r%9lu z{yJ0$d>>o{WnjO#l{qVoh}}Eiqi?UCX>D)c@){@oc6>c$k~Y;f9exrWe#CV8Q}lEe z_=yr)X2TCokDX{~x<5YFgP%ip-ht@51~Boc!Yy1a4O@zn6uKb4d`LIS+gRbeXZ zhRBfdcOmk|o!2`>!9aJ-R{#aW#5SFLZqRLe}3PcyWt2L4Jazc-jhaIPnq`; zCy#w)7lI97fE@oB2d*+h(R47l1{X6PJwCjYcw3qg5&_{sAs~q7_a?3v4*m4h9mh=u z9d7h)mb?S77XK#y8lT4au)g}bA!|VJ&MZ7 z*3O^(O{!?e(Gj=_9R(;K?;R&nj8x*wsKMn!E9>SalRNgjfa*%ew%LlrxyfWL*pJmO zFNR=X&JHEcbwpZlLeddpeC7W%gDxG}DpI6y#f}w{gcF_HFS^M~5F^$oKDhP&=<+H! zlq9`Wyp;V$`|2P}07e7b6waFj!E_JnDs0ps!8VmY7zy9fUW*u9c|XT@qrl77b! z{)*;Q6wbuN%G%Pc@d1SVq|wac3eOqHhnio6EBhRsAA$oAT1_n-??o_}XlL5F=WtcL zPI+JkFBFIx5XHW0UAjh)O^FalF#P1Wt(#NBax4%Px-bym(*t78OOY z$g^rlEe$c+wT%t$_urRx4io)>d}V&+Kc?!cs;R7aape(c1UT>SEe&NAR6JRE(&iJG zn)uv#CJa$p9#n5W9CyP9q^g+uo|E*iDhQ=6^lonh(24vKDEzMc*#QF1O9E+G7t}tu zkN8P|n{@Av{RMklLJ)U}_C1|z`lUv}b*2gqyb!;FosaNV70$$r(VDkqk1nd|-aBlaoLk`o%cqRk2v(v?Ji>#5{rz{h&A`ZUdusS_ zVjrsZAuO!kNrH$va>mk#e3h6 za+JfEYsdW8ZerjwhLkrdzEot2%Q zXr`7Nu0D|2v`^AGfMd2OJBs*?o(m$eJR?5TRzeWRYG(8BV6xNe_bUNUcB7A5c2{>3 z72Zijz*O)2d$c)d^V5IzWIp^<B5ZqEl87|8duSXOIwEv+pv&^EuWV3;ioVvoO!A9H1Ey?2}c(G})@l2)={yy!c@vP*o)w$CFi*Z0Nj9 z@R~-rFD);J&NM&*TyZ8enq3u>L`Y0hc7^mofGnt)C@ao|?oYC%r>EzIp8R17-vQ!KABn76e|&^eH>(73B%*~(zxK_uUQod(fJA{_8qUZrs+E>4 z{Q|WQI9bAC!P;UD0+Gowv6>2IRjQ7XBAm%-OGMkO(F& z2U=cQ?yrx&SZaFunPs0`O9RgEdJ>|* zDP%p8**6QmJ6f+n>Li`*|9{XjGhjF10JZXvxlEZ|uk+s-y-;Y3&AnO)B!K{Lzk4_t z)P%hiC(MRXjbJJTr>W=S-qLweeiu;LLmXYbb{!KI-1nlZR##S68OYgVS-bYqCGIVs^IO)~Z~NaiH3qO^k5$5eZ4MZlR#mw4OUtRimTg@{ zx=X&$bzk4zm`;;&4RJYOeFK~86n;DvE}@&^(_ZB=EaC9oUe5K;pjWnj%j~1zenDzW z8eDvfkG9j>;PMNfcBMvmN`WA|6RFt_QE8x^ETTc$Rf-TCl#uc*onYqGfD0gaA-p)O zf#gs~^2VR_s^w;9L*m!EU+kp@H%(0cPpIOROXv~PsVp+Z-PhF|PfhCB7d;JavIsxf zcRK9Z-|aY^Gmk*VK|)$Pc*U+nnhj;Ybm5qM+T?aFY>r#RTIqqM`zfX4NJS-lohQOG zcys-~`FrFd0^J+VBE_6xVjxitXU=}D;_lu`68tSpz{w%jD}36UKm8mYH5s30nW7SG zG%kAgeZroQ&)y;<6jlJw6W`jKNUWRErivldVrXtdRdpw=Ajs~=?O%=k0_Ahyn0__I zCeX&MoYJ{KeuJmG_g)(>MhAaDLEVv2L>O$XFqD20JcAz4o|^})w8ohW#Sb&JoR-Bp zg~=py2g%ZjPdQzWfJj$sXV-@aReWz|60??@+}^Qgz=z$HR3r4CuVX6XL# z?Mc`^)kc`DJPT9=mA}g}U5vi%`lb0@tazg)iHUr?B4&=5#+7aWYs z*EzE{S3E<62MemszvDxJa>m2JgTk~F#hiQ2^HjGx^J!+pB9uR7R`boQT_1D5kGRMhgUG8l8&%fERo3nNEgXs zy2o+pqTsP!mj+{oy)vakWjIEP>kH-M{scB;REGuahGjpT_$*BYYj)f@TCX+bOCZi* z>{3xKrvY>5KAYm5U9x^~;8Oby9$WtiOxTbtuRv@>O51&_X_*04KA8;PIGhjnQREtI z&J_KV0cW!uZ-;Upto+W7qWaC}0HrBm+|l3<`|t@?1I2ZL5A4fBee zI=sZ&s{m9F0F`Gyv4hmJ7U1K*(zpDgOk5hj+?TbR`2Q9|+Bn{D zsiIcPk$>jPV0yK+J!6iCGOS)A+=(mc5Pbs}624W9I4WZqj*nvGAzu zH*fuJaz|G=nyhd1X7xVun_hjg`Xt&8nIZh=Zshu+?;|`IlbH}q42|SmWd*RW%uJY3 zb-+(Xsf=e9d*z6wL#-)uCeq{X8$5L8W}OVhjsES_|&rIfsgXV`#vq37Mk@Wf$c;{P9xo2t89{zhZ)X-Y_=J?>GW! zY$znu=bW5U2jsgn-Ow|!Lj0_#jEA;KJUW6}f*0<3ao9#dP@Z!gp@%8Ppvz!~&)&s* z!PZvRO$QcTf|>0<;%xEK)cE&5$wRjzK7j#7dO6@Slx51Vui(cJA9uN*#J}%2=w-ud z<@g{93o7awmftBYO+O+@nz%FegLlWbqNMthe;fAo7vL;pjLAZ^jfxTY@1&b($|-T( zJM7Wd?@=3!TL#w-uVFbV82K3#-YR)mZ!PE4;?psc2TacLx0E#O>TT@t>T8?(6=enE zTCh1uJxutu#J;TA9?5Sb%BM>Q-|BrtM)m-yKJu;b-Iyd(Eg}P53^I!Ok=WhWqsC`> z5mS1gbBQhiucd79G_XKt)4a$#ssm2k+;ica(d|`PgXQN3KJ11y&NvsBz?-Wa2$6`* zf>9!1!*V_QguqstV$)sZY3al+%mBU(1bgu7Py>i5{ z;J*f0DNqO`BwZv}Vm>fk2pLm_dB3Wbxr)Dow2{;ip4^; zFpIEfackSu^sMbzF=VM%lW?_&8H@!A=>{PG9v7jF8;8p_M=tLz!&(Dd+rH(Ev{`%^ zkaN)82p@1(gxO6meek0?Eb`oz5K1BA4FHC;g-a}icKC0(L%>F^;Min4%=Ba z?Br+WNJD*DI;~N$i?iH`V`bFkr>0l7>}Pg+HugEy?g%Mt%}VN`4>rA2wvO}N?^)f2 zdO^Wf%%`bMzLBYK<>&g=(hUb3_`+jf$vE!sQ>s(bR6=%VXNRJh8XFJE{bzJFnr5w4 zp*+w{Ju6Yt%(9l}bk0Q)V%X{1;;OP{!JClxq!MnTumiRCJNu=OJD7h_j?>sxUd^|{ z<;~73E33(YW>Gz*#9eS5Uch^v$~ zg&fkZib$N0WaPy_j3VwYP18WkWQJG@s;;)rAA>WN9=?-42Hk~_0PtfItc|smnTbH@ z$=-+Hz~h6)@Pkszn8U2ZV`Lk^>nOm;Y&PwIC%;}BAB3!^;WFMR4i45>R;OnT$%~4U zD+xweR>{HFnl7+*B!e7Y*VutQ-EDDbnfyUmK%A3@aU{qgw29HcBVCptVa2DJANd`Q} zzoB<-T%=sfmz3%R2uj+RH$EfM!9Di1T)P< z=L0A!s~+K}LyZ%H@nh0-pxJnD3I(x;pfw_t$NVNtmMC0PBbz-iWR!F2SuvC>55NfF zOs5s{+JBEc3yNWec48^uhKP*hfkGX0VSB8^#x8YTYAYYEO8PwyDV{+ZTBu#5K}^?!Qi2pQXtfP2t;TM1Kfj&fDWcyu zvH#t*SoOnoDA?hb`s^Mjs$AXR!MWnB4Q zU{HJlNY+8*jftLKy!=85RAM^eTBm{Dzx8#`mFR-1%>B>u=hc1_e%B5(|KK5v`yR)@ zZ^aH^6@1UAK_RwQkWrK_5`*{RuJF0Sqai0TfN(%-k&(NJhr;Bpz)l{JBNiWKFE-Ym z^qXDx9UlPLjCEi65sHL}EPiDJWVEhEYhUd_T;8YUt%!d*EbqEd)V7PW$0fbzaJU0w zX1^EiIzj;(Z?cLH@lTvU!0z7K=(ZK~nU5B_A5lGEsPmHf>521N)@4l-vkAV$zCzO* zWbi{Xyt2c!)yG#{ka|;+Mc3JUs4AQXi3d|lD6>}OGTBQep>S9K;b*~P5X1*m zW!Hc2xl_|%4#Nm1Je=j3najB^|39+gIN>Rq zEB?yL%H|>amAzvT6L`L_#`VUan&BYjlhzZbC~kvFOh*~Ym&_-Ko!yj>w{a$Y`y0W zOk;PtG<@-rVd}Qnmj5mjOcf^7bz8t|$B?#J z2rNP>q>Y(}jc39cTdDP~h%noC9N00>%gQ$BDB@js0ro~XwmH)=Fu(qFQ6wKNTu9*c zCsLM)*3{o2aKD}rHX|uSig0r!fH<4_{L@IITMLisyK%E2A5>O$hAM2S7vPM+3VwY* z#6N$n$nApR>mu1UzX|h1R9u~2FKy&b=#$=i9MRr(`j-Y||HDI%kIu4HxE^{TXd-DM zYbtm#a63k?Ad0bJ?mZ`*XMsB3FF{8F^Pg=0xnLoOn2h_w43YP*-ijWh_g@*_1tU5c znXbTBK=<&HF=vD_A-M+=6G*a!*lkJE#XuP0VE+dNL3>XY=V!2fo8#?yar={*`C}t| z(!|mbr5b%n`QuE>OqD{wHFjP+3p`}G9Z`+*yNen+C56XB91W9K}bq0$B z1g|{d-tlK@vEyXmhe33}xE>d4$9VuozAtSJO&6Zl{!wSLk zkZ#X^^?n3TV#XvCMa#1`T-sfYCO*D^i9xEe7FU$JM8O+K0i~O*V%8w(>OK>+D(9Fr zr1h|yV^G$>Fn&T35a9s-IOK0%`|5*L|LEIJl~VEneTc^4<{xKbBI9rccK9Ku+J{4e zRBw*UsE4~BB#5oyyEK>pKfiuC3JMa@(vVeTNR)MjC!1Wd)%E^Uf(qpgFd3IOH{ja)wLPJ~2bHmggSF z!|e&d_JE@Hv@z9?1y_h980wd%fTb#!9*`8QxS1?;ZkcetINwc>Ht}OAh=%zYR>_nn zF;jeJNQN_2+}(mNyVri6tnWKgpEWjfr+%Ah^eB)$hssy0vrw5=YW1CkYiYlx6!uqq z^7(yDjnV%5)1KMclq*&m>%aa<{jzKNU|~7S=Zt?4*{=MXw`?D9+=cT|$>c!%%#W1_N(}Tn$)K>frZkh=X3JCHX*6>Q=6M|&G zS)#gvn%t-&+APFqDm}R1-JvXEllw0y7%f!&$cT4)(es~1 zOHi1ciQZI$TIZ%>JyEekal4HVW@BmX9fdcT^m6i3;Yz=x$j|zKKjQfu7N^P@mDRK2 z*koO4I&yF1d6IKx>cqDN&zDCevGif4<2&@t53xi<9>as1)7|HV?Llk5qedXgOd~3g*g2g6S zeXHU+U-RzPFT@EJIZ4c80sWeYpkeBNQ$PN1H+;|e??Q`=N1{pntjKNhdP}3(B--7@ z>ucWcQIoz<#Qn-zi8UG)OMTJy{b8hSX4(GM{=`4W7fAiM$Y_IXKJCmeCO#GVhB}E6 zQlv9?!~jbmIDtL4ErN8sy%h#pyC{9QHw|BI4w33CmR92UyV?oKTcc0xdyb`F)o{H#^yeZ~LeAc{3 z81EL+jTz@~NakZFezKrff+5sle>5|lgt}F=1Lf;koFNcQ2099kwg6S|C{(dzEU&BH z3<*vvw7t0ZqxCjSJwo5)*2hu59JGM;FTQin8YdUu)?r{m#fMe&T!!5UCR#GQ%N{ZO zTQzQ2+4@IYDb7%r0Y#h!@FqS60C#uFaiu00if0Xrb^BQj+64q+X<(vP*eDuIj*3t* z#-8dSCeH-cf13iAAfwXmGxekl*)0ku#C5!)N6$>?D@*;>27}&D)e*MGhP*$;$1%4;O7yl96(gr7kWEf`y*Ys*D zgk!0LRIziE+WpCeg@@8RoySKM?$aZRN_cxXhu$OY%$u&vw9aXgdW|7L?=H5Cl~bvM znky~E?3Zolwz(oN;mX0-^`bj* zlA(b?-}B0fgsbO6QgDhF5Mm8x(n5oZqGFLyd5)S&0uNP^%*+(o&Sqs*ZIipId}=vW31{IUIIf2@#E>hN5Bo|>jr`65)dZHM6nd$+ zE_oW;;bP{T*%o@(z$SYxyu`kqo@1+-FR*+AQyk6RUpPZLh~zYsrlYmh*h0qYuFx@u zgzBE|DXq=fBoZQyLD>WQ($89miScN?w3Pf#bO0*hh8G4kkVl814}LHB%JaiGcUv0V zxw*NNG@K1Wc8@&-YI_gR3U1P@T}kwMywX&q3pY5t2bz+D?u#>?(TJf}6+Fus0SmSr zi7uz_REI`9?^KUL;zoO_CmkSdn{#KN`C~-6ICK{@IDFU@C?JZz1f5shbNx%Uq*F%# z?nh&1OSPbo!-Wl)+TYg8-3D5%{w5}Jl+nPP;`+@MP9Ay)<_??|0)aw^d)X=YLRm(2 zA_Cg(s_?GJuw*$X0$G@05DRAvfS@@wY;wv<+y@KrzXCc`j7`XG62xpN@ud4gx0wFh ze&Z1w19K3q?Z>f5$8=#YBY5d1{gkwmYdGyHPd3uIr~FmQ+Sw1TL>%o*)>Gd5D&|>! zW;25nG*?!y&#(HJWN5?e#zO4Gvz;xn!tYB2jFmBIuC4t96cD(+d$5*Bc2h zAh=9}W+q2vUrI83+V9vdTz(Z-SxYkKTI16OXMZ<&14C0iJ=Q9!)(vgJNsHCpXk=`2 z!wU-gcL&`?tmZvf{{|0?hx#4)!m!LavwgJ(357haY^l16lWn;giTtJZrYIyZ?At1n zO)!;ck4cub%k~~dMdHRM*1tB~!X+7&)SK#TTit_^@$9athR@+TH#}}dB#C2SJ-ElW zXdnm_*V=wa)yR=n<;eGEVIVpSNIYT~(xtN`?^yUF0v(Nk zp=@-j-{H31zHju8AFXu*0tS);7}t>bhHMRk?rRYR?Ej~=9t2cSbuRwd-(Lq$J`4kN zEI|BZvN}vE?^Ye*^x#nCXg^v7+-=Rv(+AT_s-7d;4>y_?gCa&lU4~JIP1$5 zfYDZA)SJ-s`aEpL2+=RCZT*C=H+Ow!Jo5$eOY$W>{-Eki=Usk=-ibvwNe&dW(ku{Q z*Z)DX^-2kFj#|W|SZZg!#@pC!X3-|Ya9R$%;ZVliJx>7#VFtsUhHee7s8T3!;TdNaF z1qp>?57EG)dtlcgmu}idkNH&Vje6;zi)4T??2o=%{uzT_q+@{CtG&hR7d~t-SA`Y9 z@hmZ3F>sWzYv|5jdwl(vp-^F}1V_LR#39Fr=bhMSxIGR^!%b(foOo}Rd=e0X6}dNQ zaFK;pg)8w%b@lbaWJA*K^Ca(ji`B%wcJ4aRGQz{V5s*am!Th0VxKw;-&HxeuNmA2Z zdd4zmaXKiP=*Zr7x+Wi+B5JeJNX?j0qxf##V?0V$+bIsIApOk+bN5MMMGH<3 zTftb#K`pWuI92zof6k=+WT*R!kKsAQ*0M3j*vae#;Pe~5HNZk?mla(1;PPCF%uys4RR#g@>x3;wg(0Hyxrd|El6B6}Kr|3} z`y#@ODUufozbeev`5BoFJ*5YzKO-IHZoLUHt)8Sl?c`!->=g@ks#~YJ?wS~5>BLK0 zw+cC%=QJQPMtt5}RZpUnvIB&9jA6(LzZ_Tj;i0IB2>=`IcAH;{QbU~8%vGmn#$DVi zaV}cAce!_zw=|UixSlc6v4#y{*qr(^g=2#EY9nf%BCJgnAfwC6m;2xdG9PDi8k{vj z%L{AxtCHkywJbdtlbl6$t-KIn{WpI`aIjykIcc{f}?kEonx6l|-CPoXgr8&t9T6I&-9{d8=F&`4isaxx**{1J?Vr`jcF$ zMy!ij{?&r#6;+|H)*llt_s?HB%g=~?;BGeYP1ZlaI3d0sCJT!o7&56(RcHICb;37* zHRVlxXW-UGtJ^;gyBHV_wvH6Q=@dh7B%qy8ia17sNIBwBpaJ15%3S*ex`6}%jA5F> zl(9P;?v=dw{)zPfMM=Gh20eo!#os+|h!f5%FG|T_;x|CoRIrdb^r(KCgI-ibbgn&^ zThl<44xdE8Xw&3GLsl9NMe{l!Ft*LO^@rmPL$9BgreY92pRH%VYY2Zz@iP2vr>kiu z15~bZUoD&+Qe`aAs@m;nB$ zp!+j_3n6#OsK1O)9ueeSuSGezO;Yldl{$oHs%G;N{3ht^?i2Pyq#x)yIp*^INNvcd z*$Mc%ymQ!N;Oc;Xfw*@)r9ikaW>@b)z25w850VZIi9kj`nPc97IFUAvb(EHtT?64` z?!R&Jq_^WG@*!;f$>Y@(ZXw5XHlH6>EH5U;-Yz!(2?*HR@?kkX*vaw`c~aeQPc;1C zm?_#_DQAch=Y9*tr&g!Qdw&18we+P}{-j2U4v!yX@UK^T=h!!K~;I&Ynto<^>f z^L{&X2gcA7V|XKqh9^j0%y3w+kccgv?a3ln=!R6!Hnp zRkKeTbQo5Z@`d$k_IqE{KWUkbQ?td>L~wP=_WzRo2n7{9V7NK&erN}IldD@Y9r#Z% zpGM%bEFc8}ZrNO~GcYj^WA^?`b>ySU32?o6hVpooYl$RsGke2~o=lxzy_f09N@GW% z@V6xwE~44gl=H@z3x6?-j*F{MSIXk22chzpSLk=Q>V7j)0-2%j4K0ND7i{Cb=a@0S z#0DlkG4=6`LZGOY-PI}c!5sJ2owoWh8lW)K>{7*b(AGgW% zu;gsd1yL)r-(zyIJRd7;rX^qn{PiwUI@@2D+{zE}E~3-i%YwgF}3l=Sp6mqx1IOk6v6#X8p> z@*I$6r}CqUs6nohU9xy{`1=U^>->Yar~fv%Yx1)ias@z-+~f;kGNmpP{y9m}&F3Q@ zcg)-yU}R~v_D~5wP|O($+hq#Rb9yzQUvAg<03_?qLPih^sOJ#wi!)@e$z6BOM^iG@ zS_>J5M;igBYipKf;R)Tc`+Hs?feeL5`{DcP|1Sr?YO|J$ynJjN^8pbH=jK}pVGCl{rF&Ux-#14A}j&Tiz-$togP3>68z{Z|h!RZj> zhD*Df`>g_^aA52+l}=k1ShvMje_21G_e?H(Y*`X+ssSlXl-=PrHQ;2p*O*V31&={jw>GvkRP*H zeX8)zIJ-Clo{M(%osxv8t+tvD1O_yy-w}$JI>XF*2@=sedShNPrv@PY`bwkOZq<8D zTd2IoJJl`@ZEu6o6aqE5==*hb7i=qir&j@_>?$?KhcFgbK3G0DxE{cH+s-IQ`f%1O z{QL6m^ka8PaKbOBTpw1TyllJ@fMZb0K|I2`Ob^HSUKC$i< z%#QfnTe5*D>8{6KPd`zgGj`75Yi?%6Cj}H1ZaUz0v2^(fZxJrS7D7Xd@0*TjpyDU7 z77wwCIiFKL=+?UUSJ>p{#R)4nj#kui$$os^uJlM-QY59ZT9mNF)HiU|z|0a_QNWNb|pdF1ikR@*|><7XNgm06_9ybC(^L?T!q*gdfzt0Yl+i&Z{r^3&w z`Xk4m^wf}~M@D^0mefz?OGuFwV2um&oZD&p8#SlUSs6vTAzRGzt zu!DvPovV&CNs6!xC)Jz9b+s2&^E8UD&aLK5Z7v@DJBXsUW2HlvYHc&^`h)Sz=8h7-@A1a>+{}FH&W!skCt1*&>jmz* zb~0ssxm=So+5mCg+1hOOz;UAPhPD<#p8E1S=L-XU@?!4-ci2%kCmk%e?2_*=mYn0U zqhRL?kGdGS9poc>jau>^+`dp6Dc^+cKL&rW_8`=2<_0(VpgB#+_rtkjG8+S zMv2~FiLuBelyxYK0Fu5)M%ljXOEDM>mbH|)Ke6KLLcbJ&2H3yf$|E)3<(w)U@QK%X zG5wEf+rxqH_~P(E6&08$zy^RUxb|_|ehom8#h@`_#q|Vm%YO>5-7gT_;7g#{ik`LW zP|zb5%2dxia^5=llg#|JUT@0(a5k)EUgYuj3smmY4f0Rlk2oIhaW`^AUM* z>CYdN%-?zw7~HsGVhV!iZ9LjSfr4=C0b|t8zzjYijRu}(-x~r(-!jkvKtN+K0BYSJKCF$Cac@8Cidyv zEa#*6qO%@pvs`Bm2~Z|o=O=7WtKWe=gR_K452$;z`42mEGb5u*=xxK_XN<6&Cq7S4 zFFpQAxQsP2Ap$9SBtu(AID8}AyuBh>kP!irPK_Q_r+KHxr{`_W6UJ+V$%1GLpV%Cd z8Y1=EN0jmTpB4E8OibHhv!G)1gX~($^;gwe-)d2e7|8H{f|R!B5FS{?E`nDFNXJ7= zBC(Y3O#&|%1Tr8HcqHLIiXKIWaqeUCtVD*S3CG z=>5GxZ=Q)nvr6N@`ag=!J)Y_RkK%JFQ({VtkRegZCS@2g_vMmnDEHiwTkdzU5OWDp z3?YCft6KA*i_uXE1xK*EqRS=R`!49zrFKU~KD zz+sC^rN}&3_?Lf6cB^qioc_GX^LTq$9KT3+VLv&JX%{u}MtqibRr3qQOk z{4n+ZHLW)ABU0>~c%u$F+Th6rro2&jtGwGHZ*J*8?{uA3f(ke)dCF$iTWDOt@(Y8h zQjg)L4WkR;B1@Ws!F_tPDv{K z|8m^&6V1m>@@n)6*7Wis8awiMfp^6FMRTxIO7ItXk}VeB{&;abyU(XPFWMi2qR^>^ zDLt$UR$S`uUvjIcc@8TNju@UF>&*0gqM3e%*u6^>xc4zo`FZ7@M!J|QCJZ1~h zp0oC4&{?b_u@jNQF){f_?+eov6D=|K^DJbjn7Q#BMUS=7VohgdwbnIbNjQ$7DCJr# z=K9s%gS7dDC8yH3SGYA@Z=woC@AUh%1E2dvqKdqVi1#Tm9>+5sbwI^c$IDJfAm5ma zBiqPkYrObG$yEEFGNV7VhilXxsyV$;vF2dHgh8zEncmvNA;*V%X}a0twSn6U05TCA z1u?m43GbACkmkf|nqS+{PZ_4!H$8`|S*t+xaHHO<)x?2?a?2Ln_h|ig8ShlNyav~b zE-jJd=Cmewiqxbjbm*jHmM9*$+2U4QnI>V=5k=sGw-8|yw>;K zmLxN8y?eMN7?_{mTQ&FpfIsJhswGudVoQC9Ij7ykkHKGKegT2rO>ZQM>ir*;9dD&& zuUT3Y*}HQpMpv^R7rTDHWQ%~{^$D+PEZtg3+mBDS-BqF15Z-nurRw}QC9n8Ezo1lm zpQp!Jvtyp|;MClmsF=BPHuTOEfqF0PFJ)44(b z)uay7N%~@PF#neoG7SuzX{u#-chc`iq;f7#l?o+A(UAx8&tchpHiS-sTppi0lwxCu zUYSKRkk}%YnQp(;E|-vq5l6I6hVIR3D|Wnd(ZyS-y-78z4sVULe&$&dmUz~Mu9e(T zdx;fni(ZEM(%R$RHB~&20nrEu9i;jW?yD#(tgEYDa^Qs_7Fv3xE!qmRb=2jEgPz5P zzfCGy0bB|?cMo{hsE1PP>sj2#0;go*)BGA@%n1Lisu94>h(usDu4#(RrzoFp$|F4& zU?BpjgHK2O%I$m(!D!fKt9S5KMkug88hOu`=u#N-d*rh1xgDtckR&sr&c(2wa=HNj zt8pbi5ds0Kg}0FrNH133{$*DD`X!KqzePY0VCsRZl7B`;G41^IPNg0VuKENv{*$@0eQwH73^3kkD+zNIEwoLSSVu9lj7|kU6;XNrWits&$D%I3EEaqxAFeXE1(y~L2vyV6K zp8TUZ+-o1QX@e)ZOc3LBi;rv+LW7;r<)yph6V^4r(r~p@8?GL`zJ2V4amDkZDXi$l zq2a#1PemnFSPd~1_zS%h2xPfnV%5joR|2$5jty?(-+9Q)>KTsq17q9AvP)Pe*cA** z6{cKgq|-(RkU@|8S`I_%Cho_)yRFY^E?wQ!L$g_A0?z!<-Id{`$MWJa8zUqAUKq{U z3OC)pk!OKueKiClPBZ@nSiLSs1);J}6O~zFF}tTH_QPH`4ngzj4k7w9_HN{-}Ye=(5QNh;lIVdmASaD!@}_15yKPLWs6TV4A--X+vpO2 zHtsSaaG!Shl~j;;#!IW!wTvcg5cgIwyH~bA{%bVC3aKMGa_dNd=(!Hc;7x_d5-aC~ z(9>;?A0-L==GR#{QpkAxppJHOa6)NmXb8+fNAfRP;Ms{+|&ZIAwK9qugwQEusn z*DpUP3c^Z!5KcO}cr`TMRuLx#QsOKE|Lziu86i>Z1uRk48ls=bO3`7vR2B+%`E*m7 zKHQn4&hrHpe49rFnybVD?-=?}P2TE&RUo1CYv$5h-OawN0(juZq_xt8!Bp;p74#YD ziHiWE9*0L46XI8MM&8%|-l;>?pl1+7PnYr2))@W-=KSF?DV^Ew?x^}Qnt((ML9!h^ zFkHgLz|MmWN4YjwINiX%V|1$Bl+|Pdo;v1v*1)?E%7y|pB zel$muT)8cauEE@i@`~oVVB-5q(eydEL)OyzWKTTDv zS(oCv(i3R{)8zCH*Nl9pR))1dOJ0~v9cWJdDojOxTr|F8Fbce8KN;Ur8_;Q_ZHro0 zmlYKh*XTPe?fAEQl9V0jRQ$7;5c$r3GPAV`_29ndAl}g&bc|=-hKbq1)F97m|1Q@X zR7!^FE+YY#@*rNh)TszO3jPbY{C>TW?NcV8A=LM5iz+3N&UG7Iv$P zU2OXTE_HMe4VkS7C_O(T-Mu`)d$E}Ll#fbg#hl3-j*lRWVBb#D?u5n3kJT!-@g4*ROV8`v#+!IEAw?gYQlFxfO61@CEl-`bZu~TT&Z9C zwf9suyI<^Vvg4!7Cm*B4UtXo;mI42)&c#6!MedO8GZv2~zhEkGO&EngY;ufBGGdVy zXcxCKw~zzrmJAXXekf;UyzCr3Ni4ae6`HB54jQEF{yPlv9T2;NE2`DZMMG^NUvr9# zGwjZv&a*Lmz_%r(eJ^^ycI6#-qDnyGamW4lgF&Gd*&u(vnsGK_Wqu2cQ=ej=g~Rn5uu0@GO;pDFARx=Biu3knY*T`Y4cFDZJd9QpI?Z-(>OxA ziY6#KO(*gg!`H3%v+n<6%3A2jfP_S!cpL++zBXBZfXYRB-XLM=_k~@_$UuT~bZxQ; z_=RRt0*W6xr}fQGbO7g$2hR&@m?|c|Ln^)(Gcap%Pw)12z|pgxMAng)&(N6!`%bvc zO}<`rTb;2EN*>uB!=vp`6&uTK_dzC$HEO}B$rALj%p^=TyD@DvS`=!YS7ld&Ac)jF zVX@38vSa@1-T3onqcw*%1vG!-RD!2Uv>^|c5!85OzA$`~h>l?oCwk_`rChT|fhw4v zCt$=O7uS4W7+H~aN;S3Z8|q<@j&74-fS~zz3Mn2{rpn=5X?bV_ch>4?+#c}1%h zOpt3jxTd%y)YQO)o+dI(W(!3@1^>N1bqWDWBG=^cjMgmd^w2NXJq%^9YsmtPO}nH3 zs8SEZoOSDmPsYsot9u@%nQRybzWg7`Oii{af&_#L9!7doCwuQg@A{JAb-RORE`8Db z_l5xUsFJvKEj|y;yea6sY+t?uo4FO9P^GAW+UjVNgDP>HhJ#y~qJxzXt577Wk}P@y z0Wsx_>%M{oiJ~V8*+xbazVZ+#-Fe2G#7!Wnny<*CA65q<8gG-H7rDAxTB>h+D3;47 z4)t<(h%$?vniOIpND_vmZs5-oy9tuePtyE;D`5<+(k@Nou5@20OIr&Ig7<7l)0QEJ z|G7Lho?8`wcBH(b#LXSaA4d`sra0!dD zf!Bw$;x(LpHQyrRJ^VaScZJq#KEc9-HN#LTuh6WS-UPj9&7?2p!z@@n-PhheFx;6o zhO94|9H)sM?Qj)D8=Ow~#n$BY{D$U-<)huhUjyAq9vd*RFlR)KaHmhtr+GgIo*R061SI)mQZJlrb0 z{Fm8!avu{{C3=SYf*G;sS0EgjRiHXx~JFArzlv|hc=$!zgsl|C7R_xv1ccD4Qf7gzWW^F1h%6%FN zhaP9;zo^|grcuDp{t?G1yJ{zA%E)T*_|j9H=%xLcy*)AQVT-NcsY3L*2u0l3kD7Dz zjO-39e$*7Ku_u%}2Q+|>uMWv|iyG+rMnF;y{~oOAcuPbn^hvVKJQ+XS+$(co!uJT+ zxj|-Mfly2Y+y}O0W8zY0XZl!klUCSr7<#gC85mog*>bBjI`H{Rj*mF~8A}|~Cj(dz zN&0kaD>^ktgUs^KJTEFS3T_MSKraw%IJ->s;;%l1r`N9iDib~9J45cP`Kh^rx$-Y# z*NvTC3HFV8YfTYWAyQaG$}I z(`-R0DyFY2EG-CEWpW-jjmazAc=4I7Lv#+KJG*M{eKKz_m6e3Qc%`MuT&s{0_A2aD zYuo-0%y0R>J_j93P?HV=eE<{WJ<@+oxW7u@3lgIAimLH2UI4`HrcN}U$(`#c9(9{)QiU?3R6Nkp zlMlI6UopBq0Lh`=O0Rm8Z=77#L=)KT!%hoaMYIJt+w%TOUp0d#@ zTQ@D#2`jI%$eRG_5v*RJHiO1=kOiR2r}{bg1Nnu~mcEqM`jC1OFbP?)9zRPJXY3K$ z+-ls@J^6Rn!GCk3Lz;2;Xvb35s3SVqLP6$ihK?1F38eIH6?0Am1j2@4T)h5o<`GL}E7YCL{zlIuB(kgI zldi5rs~kkB)fDC)a+7yL=!7^ z1uqDVBfA*Xm>(Q&{&~j_{E!%k6eB^WCIie(!z&Z>u+qkk%ma#1< zT^@c`BlU}*c17*YsS;wsRCH`%Hxoh&z9FnD=0I1d_XQ6RM>e)R+|Go3mzj2``s(+n zizq~3$G=ewA6lw#&KM3t=_W@D{A9C%zo(ISXtxOw9*Rq#=Ub-`7J!OAI$mswqu3A~ zz_#32#d?1hf;Q?8X^{T|b{9IEZ*aCe(SfSIPRxM5L#8B15M|bo#jYfD8;tI14|@ei zG3yMcAboBcii*8vbeTXGXJ5{K@h1+zd$fA>vbsEY?M(ms1xTOGm*-|oxQk&MZkCoU z%}s65e|(s1Z*)l!1jE@23~?7DG4WUz5Kf}!#Ul_bh@ZRiLZSl(*u+mhix;m*NvJ^u zq0c|5UC)2L7|8CyFNtpwfiGzO^gaALV+vJ8V9HY6ET-P8b|D_?ec?{dof6OHet{}f zHn5&M7(QM~(~Xmox^X<85K5qz>~BZ1URavN5z21@to7@<2%lVMXG)D^$o<-)ab$d% zgankbt1Hnrug}^6xXuo=rA<5Vi@2q0G#auyQO@3JvxpQa{d4z(b{Vo+KJ~jXfVQx_ z8N)O)7>i6wRD@cO-}W9izfpFR^DJsBJ9N)ZHdCw6z;^Y?fBSveC&CR2&lEZ_&tTkC zP%}B>B;%Dyvyy6fr}S7}wmYD>ARxrwb$`+iLac)^dGxj;&-k}O*?xWyf6obC?`)%^ z(i{|O_WF2~U@exYh9L%LiPVRlXIR9+C~T9x?w z6ogi7L&rx~^2&U6h{L2MZt(yWp z-}M0dEmdQTuc*K^{pGgHxP`@J(7%y#*vvrj$WhwB!O={`RN$Hk%$>;$0oqa~jwVi) zmiMJwj&CEj0L^1$Ln>Q4^Y$-i>fA=ftuA}hR4&!?SMruHng(k`{qMju8r=>f()7DF z6K|1F2q?RbvMs;_|8`An=xAejcCZY-mH?hE4OgVJ1)*o-ReNK_%UDvnh4k{^u{5^; zNr7(Dq3v8(9%sIGYIxbH!H1M6B`Ym4Tc`lryX{t4%i9xueF33p2n2kKBzsO?6i|fV zcL;V^5LZxTulURB?|PO8#SN@i3TY?f}Yi>Rj$h@gbs6#aI8fiKgU!+^2{QwyH*HTS(~ z5oMkDMH7C?Aq9Fxg(^;YHpn zy99yg!P!Z1w{76ayeoQ9nc_OF{ntIBa#ON%VlgChYZEfWrr z$$h=Ig3&()P1l+a{t6X{<7NezBbB&|^S}oPWo{P;4frKSS%@`7U2F1fMdRPqkWDW| zDy7>2(YM;{=G8oQheN{R)i+2iogibf5)MD)Cq+Mb50PF_mhmg|(zjNCaIAV`CGMV{&xN`V?)5l6ZD5n@@V-;;K$|T?WmLa&?C!(BA4-nr42-Rd+gp*!+4oIZ=)N%kMHm$ zB>%i8Rsz<-(Jga-kN(`pzG!qfovk(8BF-V%_cBvAKCyC5!mVtyuX`e-Wg^RXTzNb; zw)h^taR{ez4hb4nH^4W2ulBN`4JS$%~U*i0%?_XLpuP)p(cPor52_<9T8@3(Q<}TU9{XF(egC) z9%QI<YKTbz{}*g3^V6PgsE8@b1prVeXG?~we+~jV(Ko&)OMv_ zL^y~xbM9P(ao_M?tLLui7q)6Y!V&MqaXJ<344tTsvu^z%p_`K_{%>Eaj{0;{+5&cW z`irH@Ennz;o+x4M&RhJo;g?P|HP0q~DtC3OxA&_L)mHN`>GXQ1Vpd30<6A8e zySFch4AA9a&8?;p9_;0Q)OjA=ty%R*Rqi@73g`Znh~r5xX|k z+^n7ZZjC1f!8j@Bn_7Y7wCX~1^_noz8l{<4bZm{DlL62nfynIu+EN;@ z@blTVbz26XY&5LJod(stIR=K%8%PStfhiz~c{u zAk&AB&y{XZa+k(^A{}^69+mu}Z_O*GfLeKaO)`SZ@!BnsYsIW71 zZR=lO`8^6@c83MsFcDfKzTh--zJVtL(6(HZT!}BE;J?w_H^w-zQ|uLF2o>b^t(SfX zy#H>-qAwN!TOcY!xk3L4^m&SYU~!xbwc@3S_V`1l6n*H0uxR5hhRf^GrwFj6l8$Hy zG**0p=wp7eM>B`5H;T*H^5IkT#ZJ3?n;%i-?M`50a~Xaj#BJw2jEJsxh308E<&T6h zzn9XeYP|HNP&fPk=3ca#wSMZAHU|=8JJif}`ageG-aKJ#H4ol%*fQf{a7!v%Y4$(K zx*v)B!Ku8qU%NVfXV&mqX&i#!>k{nm-zoD+EjA|)PojFV4O+Kc-nS2y>#NKFSsPt0 z(CL?(oVzkUgIN_M_`#=|xlegC2brqVb+i&a9)bNivT;Rrt}Ww1IjCL^M7_lie}&r$ zzIJK&YbXuS6pwUVGwRoXK2RkdOj|ITkTM^KUr9pqUIfcvAw@~_6zVbECEcq^Fc`M< zlYSA90z;?ZpvO@Op3s$C>(o#tO*V5jZG|a8h1l5hQed3w?2yl^vUcKLtqjygUlIez z0ZqQCEox-IM8~4PwFF-kMYR0y6U@$J#0j$-^Q#CHJh;N&X z;_8eBD@0G7a5{K|0EDQ-)QGYu5`eNlo)jIUKYKW0v)|v~R#sMyq*sfTVs}Bz*lUvOw z%lsw+D0~^aQ;k&OKmnytBKyIBF1$nZOtI}1AV|uq!3FgRIBF0K|7LK=@rSR==!1FD z3__?hbfYrJ|8wY>hv1zS^2YtD_c)E$`#kwKz)2xw zx5;pSY2(HDKg2e;DtiaiNersI0e`O5K{PYdII`;VMRn#c}Z5 zHJ0Do^Rqk4z5X9+K-I%xVd%d67yQPcGF;Ml!O3D_G%d|#^!HS->vGq_2xL6PG06X7 zWD1JsRHF>JaQM@b9Fw`swMr=jjA1@JEZ<2(%;t>}HN9Y^zF;NLu7m-0f#ucOzVS80 zi=yKCm2qo0R4KMINH$Y&F^)S)7~~rPXgoG{&kw^Y%Gwd8XZ)al;z@7`;CzY7Sm7(B z$}=Wq(3uiyb}wQ2YS+cGv$96VuexX@3Fi%QKz?r9{8vBqbO!)58>RxMW!U(tOnI+* zG@6#?*TnA4?eAY3NSfcAkGgz}eyi1T^3VJ&)s!E_lCj_=`;>^naSz4TuCgpjl)23c z50QIw*xFlVeKA=`#4nQ-UN}dd7ktjXDiDNCTy`TClck#f3-&%(z7yXpxc`nR#fq^) zF96oj*GJoMhsUYqRaSI)nXdtV{Apm<)f;`1|GKJ622=|B!5Q_2Yga~mTE>C(+@j}J z2b=oVg2p* z3qu2jc-f+s-*j1j9&-a`WS#Ag7fZ=VXfB|(+KmkshahopiprW^(`9?~mR1>a^Sf)^ zH-7-l8Lg{>DE4abMn+qezBUvY@FeYw0~nr}5I&|-EBWyw&N^vuOyw(H_5w9VpX*ID zP>1UsY*>~Q5B_zel~a<4UO2m+83Tldk-Ob5AYAYhK6*!c6f`D7tauBdZAGQ|u+|!n z9`#N%6#Ma94h#L9{NR%?5=w0t-J67dNgoYk!xA)&YElL}tEYLD7dr>#?>>z;QfBW` zs*vN|_niFxU)jn*4imqyX}j1`#%XK{m#Wzu8J=%6>P0dmJZ(ir5MjM=UF_z<6XUtD z)or`1eAD#nLc0yxsVay(rB>vOWW~wG+DR&6xpef&V*EW4Y^zKw&tBo2%-X@s%!X59 zd*B1#i2LuBjc&1+5`(Y;344z*xk|#~xl132-`K-i$B^%)norjE_xq=gCPI@JI{)mS z_q$;iW%36B>#zbB8nA~ndy<3pHbGmiJ~4l&voUD*?`_3KS2>5EhtecD4J(d95<2{) zv21*v=$Xc3!Yfgh?^4{q6y_TL%7$%>*8xX>e_ly>`d?b1E-BqZ?~(aU0{e&uZS#q6ku0xRCs?TevNG{MYj-q|HZBr{cISnwMOU@Ty{rDXT;Kv+wyg|W zFSiv49a8YB4GHwD`S-UWfh)ET9TOg=7oM^I*%Snl42*6U z+8RA5O`%6P#yS1UhU-p-(v-v=-p>5A;3rGeF&eLSGS;GUVT%mROH%<{^7A-fy{qF3 zMi}7SVqe1dg@M6jsZjUss0)ZLy579WHAFJz$60SET1i&V5R?A1BQ`vGy^Xpt>B=Oi0JSGjah?jjDIw-^U zj7BFJqQz9yxZ_YFL{m0`XV)9ri53$tBf==6~$0lY@f#QmE{u@M=4YH0~qAgsY_sw!A zt)BvDpg}W%7sh66%DMuQsdoP0K0XZS$kfI=JoFm=irf_OxkuQaj-|fi6C3VlunPRI zJ%H7vf_!Eua5`ByKKFxYpSU&xuis-07ueDxUdu0y7@wF~+BoIM%Luw*)?V=KMOT*Z z$*`4-7|TVnUom0rSgIH%qjgbOAfIgej6ZSiqA=%s>2rdw{i9B=k|+gDo3%eP?uO(q z9vWhkC8@+u<15#@J{#gj!(R8IBZuqX-r4!f8%8+p=Kai}wyW;$h!$KH8zEjDG3`k|S(A z@m6Ai^7v`Oj=4dmj$SJRE<8-PAX!-ZSo>Bq`*WysUUq6Dwxzntgh W{vqoXvb{`%VS%WMs+AypQUKtQ7_{2@A^ytA-f zyyW%DPA2cs7N44ec1N>IZ84YpR%|Cht4rJbXlA!XA|{FUt}G+x@_Q*lg#Zf_&N@C~ zUFR~M%8ucW>kwGZH!Wk+cea&+==O%wrV!ntFa5GQ#bY{z6Z(Mn$s1!FwXN^%owY!W zA_;N6pvAuZblzr>-4|j7SVd69>7mCMCK97{qOj9>$;a~}BT_o&KMqZ#c~7ufE0&BZ z@yaB;KHvJ*dhxlZk|GUr7WB(N0G0LaoyqdLH`CkCY~v()Z46U6gb=E{?vd70UXw33 zgv*Qn+U?CD_;kB8Q=jQTDsnGF+JI7HjpR!h76&e5?TI;6I>^&>Gf?5AS@|Z?t8U!Z zc2kc|Z0P{;{&~%l8Z!XWsYY=D;Vz9!kK>|hR|nu$Q-8lbDyWvkH3w`jcSx`0iI%OH zu>_BLb!kZhB}I1_Z|?KhE9$DMB5sh1ry)nkBki0m@Z zi4c7T!7C3|p9}wK##PAvg(e&`6-=|2x(1_%xAnz#Zg<+jBuCiBxA*;i^RqGKSKldc zI%rOdNSuriATV9UFIO9o5m7y0-U(?jP@|)E=b`K#`(nf&6puU$md$*xPkJt@llSJ1 zOWh+kKn&$z!5Vw>;SsHKhrjY+xSqb^;?5v)?MQa$znKQh;A9bx;c7yqly)4~5%TFy z)Y!H*0C<=}=xh&JXlHYw)>@+T5Gz9VWy_G`d1}Y8UvThI{{fr$wD_4^v_9~8u&bCk zubHQJ9ZVemrCrtf>WInGcF^P4H7|VaT3@yrReO1-@ItuXos2pEGmTu>Xr&+8foR@+ zB^#}l@OFm3!t10!NYm~wI2)vW-Ff#6Wb;9{;I2C~)vzMGM8t># z7i|fjII(z`Fy7>BBU5x3nnj}grn;SnEGY1^9sWU<>54#VwIhG1U2@*=cZ!7MOF_|W zg4nN?luHF`0l1H-ogf=M9CO|v131PH1`6b3p@NLuv8eWll#W5==mCV<%J02tPm<~N zOd64Wt?|WVU;^y@n}BJ$uwU@}+ig7;}Dh1Ans=kWN+|An%?u&gOoi@Dm z$M3hy6_Gs2flA?*y54rxG0BTNBR5_|3z%HK({R*$Nf?2S_fO^Eugagi<> zA1K~VJD}F{O*RHzexb+Psn=S~VLl$XwqjtLIZU>Wj?v>*eWl)GTDU){)Cxa`rU5_` z(HGh-{xVMh3XiAfcV}(9T<=q3c5+CQ7~TK_eXfSVnD21qw>}Tpsqdm|;)pM%fgopE zI6Pz0RAQWQ)JB(mKZXV~^mXpz=zi;gJwnrQdANwn4#(bSfYuNWcI@)b%3J$Gt z{-GOstk;KNeH(>e4}*oXpP>*{hn}Jw^qxK4pSRzmNmwo)*;fUGl;K+&=d`tPq36`9uXX z(cLi%TLxB1Kl$$wuop}1PD*530&pkoW=wL%N}n)m`q7=(j)bWe z28cWc!xr(@{87`d1m0`bd8h;S3kd%~cd zn)bl%l{2NNNRJf=huhW?O|NkQgnD}@x|`*?Gia?F?yGbO&|;8xTnUB@&{f_VMZpsa zRkPUiHN-M%0|NrSqzG$1^Y8#1hu$B-a@FSMm&+x%;Lnlq-N_#Z8|Hl?Iz;ANSJAIpAWK*3&5ECy%u4GL+6YeRh|pWF~@8{b}DTH>}n zJU%*DdK!bl5aSQ}NpP;np@1s$wcR5DMEI`{wFAs;F5?fP@(g?W;+WSh?%5%}Km#D61=O*7Vi!U1sKm((#gZip zYBDi>l|vs06vu{f`MYqERc-UG&H_Au`T;%DHyIQrA@ zJSG_pu|jwtQ%b7-HD`h$R&%~&;;)Y#YX2|Acd{eoK5l9F;~lafzO<%qzW0f?D5{Id zZN`89%Zn7jV3>G<=694;ugIvS1qT1K+u z4;>-z;~ET%G~;r*TAvXLssQag`Rh*^MW;_=f8rvdZ1bMhjPVJ)uN$RhtUMIMr^a0Q zWVBpeo+Kp`5uAs%C!R&nlMo0JH{_8b11B;XsRoWh@(K!}$D7=?LBaots;l;hwGBRr zMRvjBdN=zs9z5WpZ9f|!_je~ukrvV-I1VTwdz+g;M(jZH#*etxrRlTd`fR-Xy;0c% z?DTAGx{QLVcB?3{GC$4lyzFGsPz zfc$V#XFh|@XH?VM+TSkrb+fqY#7J~59_C!tEnJviV!z-O?9Cl%ohmZ9D9G-z2)cxQYIaSZ2pMAw zenY5y%F(SkE^Q6h61gzV7a7ft?Qm~?(-h+Qy1=T%{6=;Q5=(Eh+)-q?n<+U-lOoZC zulIq+lpT2{XVgs0PITTWJx)ysuL*N^xS{m<(*Ds7l4WAMvqa;9z(L>m-r<5x)#NQ4 zyA9AkqoUI0pEQ{c6jZUTO={cI>|t^Z+R1nUoA+w`SDbL%F6pQJP9}UWt2wbUkBu4y zqOD2Hg{rB4cN+?+OocN~LtYj9_g!Dpzvw!XIQG8NMfX%;=C=0_%ne3t65&cr}do-UmVD~mbJ@2dI7icGbyCY<;aDKxpUFRz4zhEQ2mGmg! z5gGw7n8+C-M>JfB(K)}-GZDLXwB)tF)hCexe&eD};N341bJOVczW|bUHxv;@9K!Xe zy`1oQIO6?^>XG8dR8;>YJ@S+ito3LH6zdfiodRQ8L)rLhf;Id+w%WUwC=<>q$i}V$ zyX4%&1gBK-U&!&~J`+gItb*Ex3*e#tYXTTcX9OX!O6L-v8oP+e_0qTk_$B8|6rJtH zgJS4AxO2W3B6dtl!t>*gi6e9K+pu=Dj#wuIQdr8%2ttu85rcAP+0REj%^>-+C0>YF zJfKt5fX=6E2&{)TaaNL8Z6IW80(&^sCEqjE3Rsfik!{C6w%g^R#cI2r$)36hrf_O+vxuICe&q4_D>!Je{N&^A4e z%u_lrh=chQTSCGBFcg18sR7G91Y-5(k%hRco23$C=+q z8J2G3;v=8Jnone^e?V}c0mpymv6q@Yn^=Xf)JHhGWO@<=q4v`cIkK(CRdP&p^BQk z0Zi%j{lik4l;X}S;qG-4E@Wrtr*FA9J}Jc@W73B;uLXPfYra}5%H!u@QBnk&JtLS* zNQ-L1Xc2{bW=;^th735XOj`#y%$hRR_Z=w5WCiM8^XtcYK+8Ko68F>HQ zgPysQI8*0lfg;5|!@MOmpB&r581*$*6j5)@n+_K2eE-c9W>5Wo6#8%dxt<~4?)cFV zme*mduW!>G9TyXHvi{pU^Nws^;pl^MON+n3Jh~#u0)*v)wO_Ta37nsEdkE@geMS1J zrpHky>vhQ+bAG?eEwZjwZTPnA`+0jth=v4!@%V0V!Ox?p&=Y6po|fYc_2^mg1W1kt ztXQcmRf3#8=GBEBQRB`lWw4W(#r#^`X_z268qI-^aeMYzS(4_1WKJu! zc0euwFX|^bL>v<+B^S}2-43Yx^(R79C;6x8l;Oqj3h{f?iIBR$fDj)1;=%FG<~7_b zj-`GDb_Mdhz9^$8Yv3k5zfx?E*KZ$fqs+)=dHw{md!+yB^m>#_18oV5ozI&1-+%tE zo?o|%Q<31F^`6x)j9pQ{iik4HyjH5+>fibgxz$bC+1Sf}k`K#WV=5h)AL)$RT}u{b zgh8fXbGS;j!>LY~Yf0!hIxn*0ZLbs$Qr>XLZsFZy=Lfzi_pi*)R760U0%$h}&N{aV zj6AEESl!V~?Wb1Jnhr*`FXsF>MS@%>|nq9(IdrvZi3H-pRTEnk0 zcQIaghT&Ho?ebt0C`fU3d65XGblS8Xaf$6oNL@!LfNM@clF@S9s4ZXo;NN%w3fQN3a1FI&CrK#9D2aluB~7cC0{1 z@PVwvGswF{L7gf+8GU`1JL3i;qIPsDW)DI5O2diLrik2bRgRVq!)oT&Firm!r*EZO zWT&UIrv5k;-eXc6cl;|C3*EbQ;|l|oxU(OW${wDF)K~G&^dw;+&@LczFaxM6ue-vv zUNF|pLLd&+TO@Z9i5FWMH^k%TR+^vkuH3hfEHh3A8L*7H#g8V*bTQH|Hm)`N<=w?X zO&v--5mcyk=?zG$F_c1A{h|2J@f+2QndwT@AHNHwV zy~o@JKWp8@WIi0<8*WuS1zejG-skDP{5`h&58RTFKi62FEmn$}X%)ig8O(t`l==k0 zO=+0Ok=cgNWQ_l#QKJqD!9S9}^4OH*5UVPCgIh?G2-5;pGZawseJkd*es>4I@|*V{ zj2k?v*Grl@$9$2BxpzTk>)>erFKyz8VgTy~cN-A%JNP#Tt<>D9JbyGh8-U5U;ac8gVEV1gZl@8m)ig~sXvcH(_6jGQealnM zG!n@2ew!apSyBzd|MAf-ElJTw%>JCqg-m*{GDFTIyZj*n9Lj-3IuJjI^*Ael<`0yN zI3>B0Y}t24slo!Uzu*?N$=mRf%yj46(q74=DLV7v>8@{r+n=3` zAl}499AZ9|Z@STa_t@j8a`_kQ5hFogHC@db%3cjMwGF$a$>DV*@cw4s$4|?4HAa== zCDKM$U;og@;XJ%CygPDY5oi{GE~uPa-n5a;)E3DnSNp}+O?@@8zp(tQCg6B) z_^PuDToSk8g}KoFv@R-#D`impUPM?z%p<3XYQFVWs~O?XSFpLU^gPMCb zOUkZO9;FtN)608rGBW2w3kD;$pIpKB&}L^hB_*D@#3i=#6y{vP`$OK}>6L}Bhp{S7 zgZsckrjlC@cv6uPZ@_e8ow15)V8==|*V{m3_VOx1rg?HL!XKlDKV4TrTDR@TW5wskzMrK~kT> z0lOmT-~}ukrYWXdxE$1D>O4@$1!)B@ntTG?S`FEu-?K+1|B%;1WwH3%Dcc~OO0*lD zBCK8bWjk_~D1;Y)mlIw> zJS)$E1wv~UZHx0<&%lf zgU~8ggjQME%^E$9w!OzsOZ|fa7MFQs(=O&meg3;|-*5mbpNsZ6P6xpQN=t5&;Jce! zl9}-*Gc9}IubfJwfCRSewD@!i3cIr7hyEiV0@Gs(+qF{2C~*2+cUvipJ!PKRorfoJ z;~V#IopPSAa?itq#!0+2@}_6IW>&l`!mrh>yu4{xdFql&)?a=)6+I4Sl)IX`9lJ_S z&BDfU8$-pL0luwPo`hHM_ny-g<1smRGvi^cjiRq5^`GQvk9rfL$}J}D0M)FNt^4Rn z&6$X3rspUhN$z0UsyZat8@;34XF5wLTfTsZK7aV{-{-Ma`Sz?&OP!rlCkLTikwM#A zKS!_rIT|h;fQNNHeUxeU}ZUDM=^%YZz!Mhbk zwB28}!z_wOtl%MDVq*0js4y3W`0tm3;qNhieqQvtnm!AJ-nMDc%;gcROY^*? zxp{$U4Dvhfo){C#B#&+#BsddcHZ5a5s4GncdnWtiv$^jhf{&7C3ZFvUeW#X)~x`&wLnX%*C(B7ap_^8)T;m9f(cfpEwV zZ=rmSXJIhn1~=RW0zlWE;9SrJr$@64{;#4lk7xRi@VJ*i(?%((4Zyr26zI%T^pZELqem+B- zD_E${#JB+O88`6oHq%^HW=y#5EeyDQS5;ZA@GOso={aP+t{xkMd@#wl_2;NFKth`-GnvE~LjR zp^M2+3mo$yr%2%;fHO!by11NFPeKLhXIRl7U69?|^y!?uKv% z!*t*+T2O-dSKa=k>ScOvDBY3=Uv_*1qz9}d;A3@-Xd$zzv0ow zcg9O}R5CM`ey_}LY|$6M|Vsyfmp!t>Z-50{J!1q?dBy)hRhH8kbT>S zR+xl!s-Us#g94gzSv41hy(JM7$IiK@;@nvlY6a5OGbh< z1h&yUy4ZJ^(V8~n05O1(#>YANu+mD=pRH;&JRuSkMM3?qv=xsp(NC zTn%!%bMJn)m9Tb~3kwMiwee)5HJOYUAGb^gPQNOW4YJ^`Q9B7MmKlU}&0N#2uD3Ke z^=N;cQET1-tJM%X-sRB)fU6qUt?o-kIx0z4ZIMy z4h^_q^IDgg!{TAF4FPd}>e$QeX6;>H)gQikL8ckNEr8EJ0Nqi~fM<1vC1r%2Ip-u$__{gaJZQ>xBJ=E0rgJy3!Y_T7au zGtYq2KB&9hZShOuMK4f)6S;qha3$(3vcaRQ_-*W*p+FQuf=fpmYKavjAV?4j3eqWR zZip9Nc3qRrC0mlBIyd~wX($kZ?h0PMqB`gQHhH97N#KjBP;3yHOk`|LO{wtg%qGX5 z1`%HR<@RwEMW1`JludsTK(h>j`c--x`WNR(Rpve!u#`~3V+TyUp9Ih zzzt`9QI8$dslp32wVrPh$JEH)9R9-Wk-D1YD|7&In9P?vBh@SgWb`M= zxMvbZwf(_PxKaf2uvl^0t|h4{CvgOX;9F^fSM?+tcKADz4K8wOM>Ihegs#)Re@S;O zO5+3PGH`tB4J2t%+NB~5%RGm;)N3=4hBeeTg9TG2KD!jwvq1$aiukP4jU;ayNkzSJ z6)Zee^qF9yL)Gu-|74aOi_nIAa(E8aeunIodZ%fapLz+eXiSU3;ddXOi-p5k;h$rF z#o3*|&;{4wcW}w^Aw9(9)RI*z>PeqfFQ2I@#kwKw`>D06pWK(@D_07ou8_cSp$?Fi zLZ}8K&`A9$Q>|K`{)tT)HzTW-#H6&thiJDhOYRy0j0=FC~)+xrYj4!AX2O&yW3YL(UD;%E>N~xp+8mypCi?C3% z8yEHPElOM0iSf4k4f0Czq2fsq5tW#cyBfjYrfuzW6r3DKTh-dU6FX2#y{u+psgv%? zW3UIB?P1aH#sncIP}CVoqg(uJ`6Avx>Mh67nd@QKgcEBS;6lRrpS!u|MD^RIXY)*^ zX2aj-yf&+z_7uq@AjjrQN=gb=A5&UXW0~x7q>n z;C6$o0`bKkj8FeA$vdu9est{5YAkRdx=JF_M`AB>Elgku*vq*hej``!s$CJr$JWr;NesS3|$=EKjNYGc@lx z=fE#J-iJBM-hN^(eTVMUD96)W2+L04-&Mr0xU;{i2|L@h(g>D1a8yL!a4nSM>KH1N zC6e=5@UObmObersvca=FHT0#xwsBbT6*DKqM9D10Y^j)Vvl$#T^x1KO&8 z3@_uM+6Gd*dOl$8_*xG$yU>tftOG=J=E8P>3csU7y_tSFI z_F9ny#KRcU@Yc0hhkH2}*eds#nAp4dZW7?EFfr*OBzpaNbag7$(1XCuZ124iVsNL2 zU3LAF4Lk5Tn017twCME$ihoC~7X3c~TXabf%H8V|U9?2Q(1>qMZ43YN|c z***n@vD%)`JBQQTTjpo>7ll!rj~QXcBT4pRQ@ zdg8->8sg=Z&X~CWVLRF-a`GP!52l2I_4^G8!*#R}vk{xOeOu@!i^b)3%7glD`g6po zDK8$=t@d+fe}8UQ{MtYMyEmRe%@8eCX&mRir;)IL5^hSrB_r2E8{Skb7gdJ*UYS_yvfS+n1b4RmL`byo@*$N+*2xU`>k(71%qmlt} zV`77PnuN%EflzgX^!sZ0CkyS)kWUSzLHi$-Nvbyy$>BV_!wp-3&DZ!J8GT*MFjeog z{Bjsr6$dw#{66I0)K3u_;Ct3#nb1IYq9E3<-y8V7RWI^>NbodXdh4a45yjFaY3^pV zSLEAqnuX@a<{Fu@&Fz8au1)vG3B?xp(EVtzd!+?o;!pi5YoB9s>fDg|famc^rik#6 ziC=bQHNX3~p}#^R=6JcLyC!}OY@N{;HH_I2nVGS(b8-k)6JA?uvvylh3}QepgbX+Trc8h#v}7{MRdPe z5luFq%^ttg$TeBQoPSxoLh%b%yU_KjK#>V%)05)r6nu zK=fe^4N}B6eE%g?kCw{FxGG7wU)ho;|5LW4{bYU@oIg!fD#&{&R8hq*GI3uAbSdY< z-JAE!pHz0WBi)pQ1GJU7IvX3u%eK1u7)Mo+hda|WW@EdbBAe)hy+6bxO)j9~mMQ3<#SD zpDIKJaNmh9D=p8u^z^CJQ?jM==N-$kS{jiielvAAn*pL_C@Vud7v7EGWFgE{5K5mp z%la4!hcM}WVW^Q~Q<-r!Qy$>9B6@v(brp@d%0A~?y&eN-{wPneIq!@VTU2wGtzL+} z#tkdX#AAcHWa> zP8H|1n1Jqw7=X71kShF;ip5lnCK$7TC8*HS%uNU4M1c1~@dT_P2Wu23H=$bpJT6Qd zQgjx;@pi#b?BILB;B66h}aWY{UL|5T*kS?XMk!z?&+!!ZnPgqrW)ZfP8{sKx$zX zsNLvgldh;&fxi_09SBZ!Opv4UXAGFh2#K`9XRu@-M(s^ zf;kEfI&PZ4w`Nq3le-W5A|z%1_DyUa>{;ga(zf)ztdCChp@98i@zk6 zZBYV`w`d>F#epZM+Qfi`U9)9Lhd=4haXtrIl#&ekn3YOTTD>s^<>oP^IGow#`>%M< z(o$$ZgC;)Pq=@1h-U{_y-Xr0}vt)~3)^=`7aCdrqJ>FaSXQrMqLLAsErCoC+Fy*i1 zaNoftiRd|Gse|J$;g6lHT5gI9cUc%)V!%BIoEIpq%G;`bVvl$uQ~^ zpT0oE`~8|}cd%t8x}_d*e7)$XWWPDSIlg;<@ya~?LEG_Bv&Qj!H;VB7c?3+-M;9uU zi0f9x*-dfrfc);kXN;}~%0!de5u1|N(q$)k`EK}3)rSw7c3~d+HK6TfGw{P0b=Ot> z6Z)lyK?aJ9xSu$YQtf?Z>aZ&E_}QR+D-Z=l;h)PUewYhMH{O&rPLX{Ae|N6TV)|6> z8oQPzM_6cBO&p^&DU5_Lk9I5CcZ!Np)IKTAZSggYu9z@WIS%_k9q^l?x!0&Y^Ssn! zerj)On$7B9ktcFHJQ7VjVKx$Ear>Nwg~2DJcCG7gN9pXNN)XwO^ z@Z^aVah=*0Dbv2CgXXo?Mcc4P?}s)a?H`&oTw3Qt$$oT>eq+)k=Zf3Fs;tkZ!vd;h2?}iAtO{4f?0j<|b=~&l_`Y~2G z9Iw=Fa}(l#OzeO-yiQXlX|i6>Y}>*z-%E;X=Zm||cOKkl0j!*v{0sA0Z|MWTn82b@ zE|r$pCwlT}0ZPbJXYkYUu@Y-D4J;VY2 zdH$V>{7?9lu#%I(b2!I`3YsOL^57bBZQIOP>}?VPf`$O8h}fE zK3iPa-01@qTMx%q&)G^l63js5LQZum9}tDKyj)SIJUJF#gomi5OYq_m7(YV(NND5K zsN-W`dFr?M*8 zeBo(osyL!YwzagW=Y<_3Uvb@LD<9RDcAPA}dKTsZWdm_2(?g9nB!*FNZhE2&`sScQ zRrzw+qML=qUCFgP6$e3E+e#hK68_Hh(M$ki_s_us_A>o8x=-@!n)O_DgUrqpU7_s= z?c%p}bVzUBv0NTl^rM((2j#Uzusvt~g@GfrC}GM(BdE6JP3hp-dSbV8QPGR=DQ+!T zylaPzTyk1^OJ%RZYkol9tmpr-3-#>>1_}%-Wu_d zg6%SU^?B}g%Fl_t&A7j2|LaMwp7U+6wFZWJn5&<&m@CF8H_*{w9J-jG7MVpI!CessI2%9G`yu8R;J=TgBl--DO7Tr4=*q{2~O&!i10 zoG_;!6?msV+SRi&;roHBWkKWhbmC~R;;<_Y!FEs>!9FD%$#m|s*%B5#lyk5f%(qJ<6?*6!kK zp4@QAb=-Y$znr!|A5uHf+5&@gAX-|RR(LuTB;6^Pr~m{}f@3-H`;2o;kP<6TbC z{h|s0y6vi*yiXU0cGQ@8Ye?;F)IZ=w&6`X&uyc|?-R?b#V8a@+Xm|rxS3}fwV99_$2LN`nhHWmAguJ2fF}rIQ1^3|=)eW1)5mcS)&Gf93^(M#OzYtm8|76Xn`ugHH0VwRh=X?qlbB&Cq z5iHwzqARI9ot%Z=Bw;$|@e#iyw4m}o{Ssa+h+(eekG!FHWP#uis7*iBxz7l99wia}ke|D$$` zD`}LYU^QPjAD{dn`Da;d_m}OZB3fMKA_-NPw01&&Wx;zLwD6(UtsIT5E%2evi3eb= zz!h!JWz6t8-i}m1q_$;j@rEeo^jlIh5_hi{*NPkF_;syF?ZLS_CLY{Ry8g>Fpju?| zQnKFVz>YNN`xGn}3JKnVyRotkedWmoB-CCM|^oVFDL71RCE<#@Cx;wdV0vdto)Cp_l4`yEDhDev{e5XPw6p%0ikecoP&^k zr2-gYZE1nYL6fhMLAO^_#^KStYo}_E;UnRr-$7m+V#asZ_QzbOx#{!XX!B|Dp3oT| zIP}vC>O;Fmy=$s>W?I^|I`NzeY6v999jtkp@q4pD)@C$^z+xQM&~HmV4g<(IP`4x0p!p6!BeX(4Fqm{4QtIb zAs!x$C{927zr%|@ci)A0g}bmTtcWm0-zNMejvG(cGj@OZhUZc-{QAO5#k;TuSSe zkfuianLL)nFE`IHNxfcGRbfL>L)BrpL(z3vMayUVvVc~~rHpel)RQStq#9^pamTG9 zK>u>Ws~7;aPmE#K##bW^AFB3rieoxO#bI!Kb$uXL!n2&mSOWmA>-{5JUz*Kvr2Fw& z2<#$`Ab2(P;>6xDTNWv^?V17z1;;qcGGOpr>Q%@Kb8MZqs~$&wYEpc$A4yQ|wWI8| zRBYS8Udv~6;p1Bmm)Qx`&%X=0fttV8-f7R~+YedmL?RNmMa^&(t_30CH*4g_XDCn<{m`l_e7yi5^%)@um350|#^-D_nMf8DCQI z4_OSX6F^BXc{X0|eTw|K&!ms%v`d!&k6QN!o$*7814(1phY7E!ym)}BGs{DU&F&yS z0n7lCzSI+)dvdI2bzHI;99pTaZWW-$F!s}Emq5Xf)on1>d0-%Q_{Uy@=TP&lX=Y?d z0Pzw?_?GD_uB4!>^Xyq2zMP1h_zi|WkR%Tk7*2ckhW@RmNtO`Y!mdg0!{w(7ZGi*NCir=#qF^LL9Te=&5^o5W+Y(pi5(FXf!v& ze^0avBtfAuVPOa9MNhD$w0#7oasiV{?OS37{_fROtUd%AakY3ifufh<7)V#Z-H)Ei z;Zx_`YQezAPrFc!R(FhIjEaY|*(?&I$u zaJ%_-mU!yd#gznz>aS#Q8|VU$Sl+H(mN>ry>l`BC(1EACXH)uIjq85bgtan}1@ff3 zMQTlA=Kw)=7EXNhwZ~yypmV6@wW+Y9h&niH`(1G^_7I__(e@jh_!$XFQq(xZ^LP#^ zi=cW0^=mtIzEPzy6Wj4+B{+C5P=w7P`$q?}?sbDV$e&bujWfmydR6iAHy0bny;8!i z$MuuUM{eu&W9pzrkjG8Lze{iV4_DrgJX}X) z4W842O0FpOTS6Z<586l7Tc_|DZ6>aab{XD3SepN8;97pBSJ8GXCU~NyMVK2vs+J$ihbGdIG>% z!)No1ifuQWTn|JE9a8Jz9{pQeGJ_Y9ucEZ1%`MWy6-?r_FB>C?reIijecHErE<4C0 zXlE&LjO45i4B4>2!7d;di#Eq6-OGBAbiOgi+v!~ou6zt<#39CbSfUWgID#}I6Pe}b zKoE8`fxVmvJF5o=xIq)lrSv!l{_dJ`VuMGw_tOU4>VOvhg14bGfwySJU%mO`hu)O1 z#@P#|lImQLi}-gV-%*J`9E8wUxbZX=I?RUXC8s-QWSk4hJxz_`@35lnw#qS+m1`#pS!o6yd9{ zf>NhR)mKF|fn0H!untu6^$y48ork(%cW`k30;+JK!<=d~@=`zJ6aWz;sa2q&8t9+9Mi=%)j^8)6 zGLb#=?AO|6tZ@b6cJgHKhC?jwNwM-}A~>dwbiI8zHNc<*M#50eBP`HI>Y!yj*zXYs zQ}EvdJr+pz)taU3pbkvYV@g&~xWc8YmRt0dfQx(bd2$rQNDiuAspiVM)t7I+A6kML zWZvgt+!Rz_ZI2bslK+f1Aclt%3L#dKU_B2Cj1Z1j)zxemn4XQ8RP!XK_tj3Y<8~=O zPiV>(1&67j>E(eow4MhkOR9xN-ZP0zC~u!?^5Tyr$0^3XC`YYg*5{c}rLxjchVK^& zihH*8(GK_NgT1{y%Akg`bzoq_I}#hj4=KtXEvgG?Fz&u&r=D|~eX3!>*2uY*XBIqp z9?+(ztKT@)f;HY;3I7~nB2EpkxMiBLwDNwo(O9W&+8^yhvU0Ad$3lAFh}?qzzXD#Y{XQ@qs47}3nZ;*i{P0e2PkvZ&+BV63ORAM^IU$PWyeD~WySXYIU@AmwzHF<+N@}mmR{&>x;~op zSTq?tkz9IN=Qc&9V<0T$i7D&!{aJgvSxuwnR4jutv`p{NvrY5>iq3OSd39U<7ll!vf}I~ z@_4C7gpCJ*MgzQP&#)0>rTUe~YpPk%)`KjCf1D{R{Q|dDPSuik=Un7~_03J0gn}l| zY!;XUq*~-}JBXaQF;!Kc4DHW6$9fxQe%FHJk}ocf8mK7+Q&T!-=3~s-yzkPlplh(v zJlQ;{!@FVI;H?DC6+J-tNgHo2K%LMskUovQLq)(L+Q6^O3hzbbel&Oo=mPnc&V5%Q bzm8AbuVoE9qaMlyfL|Cr6WvPfd(r;^ifYg6 literal 0 HcmV?d00001 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py new file mode 100644 index 00000000..0175f155 --- /dev/null +++ b/ldm/modules/image_degradation/utils_image.py @@ -0,0 +1,916 @@ +import os +import math +import random +import numpy as np +import torch +import cv2 +from torchvision.utils import make_grid +from datetime import datetime +#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py + + +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" + + +''' +# -------------------------------------------- +# Kai Zhang (github: https://github.com/cszn) +# 03/Mar/2019 +# -------------------------------------------- +# https://github.com/twhui/SRGAN-pyTorch +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +''' + + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d-%H%M%S') + + +def imshow(x, title=None, cbar=False, figsize=None): + plt.figure(figsize=figsize) + plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + if title: + plt.title(title) + if cbar: + plt.colorbar() + plt.show() + + +def surf(Z, cmap='rainbow', figsize=None): + plt.figure(figsize=figsize) + ax3 = plt.axes(projection='3d') + + w, h = Z.shape[:2] + xx = np.arange(0,w,1) + yy = np.arange(0,h,1) + X, Y = np.meshgrid(xx, yy) + ax3.plot_surface(X,Y,Z,cmap=cmap) + #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) + plt.show() + + +''' +# -------------------------------------------- +# get image pathes +# -------------------------------------------- +''' + + +def get_image_paths(dataroot): + paths = None # return None if dataroot is None + if dataroot is not None: + paths = sorted(_get_paths_from_images(dataroot)) + return paths + + +def _get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return images + + +''' +# -------------------------------------------- +# split large images into small images +# -------------------------------------------- +''' + + +def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): + w, h = img.shape[:2] + patches = [] + if w > p_max and h > p_max: + w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) + h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) + w1.append(w-p_size) + h1.append(h-p_size) +# print(w1) +# print(h1) + for i in w1: + for j in h1: + patches.append(img[i:i+p_size, j:j+p_size,:]) + else: + patches.append(img) + + return patches + + +def imssave(imgs, img_path): + """ + imgs: list, N images of size WxHxC + """ + img_name, ext = os.path.splitext(os.path.basename(img_path)) + + for i, img in enumerate(imgs): + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') + cv2.imwrite(new_path, img) + + +def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): + """ + split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), + and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) + will be splitted. + Args: + original_dataroot: + taget_dataroot: + p_size: size of small images + p_overlap: patch size in training is a good choice + p_max: images with smaller size than (p_max)x(p_max) keep unchanged. + """ + paths = get_image_paths(original_dataroot) + for img_path in paths: + # img_name, ext = os.path.splitext(os.path.basename(img_path)) + img = imread_uint(img_path, n_channels=n_channels) + patches = patches_from_image(img, p_size, p_overlap, p_max) + imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) + #if original_dataroot == taget_dataroot: + #del img_path + +''' +# -------------------------------------------- +# makedir +# -------------------------------------------- +''' + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def mkdirs(paths): + if isinstance(paths, str): + mkdir(paths) + else: + for path in paths: + mkdir(path) + + +def mkdir_and_rename(path): + if os.path.exists(path): + new_name = path + '_archived_' + get_timestamp() + print('Path already exists. Rename it to [{:s}]'.format(new_name)) + os.rename(path, new_name) + os.makedirs(path) + + +''' +# -------------------------------------------- +# read image from path +# opencv is fast, but read BGR numpy image +# -------------------------------------------- +''' + + +# -------------------------------------------- +# get uint8 image of size HxWxn_channles (RGB) +# -------------------------------------------- +def imread_uint(path, n_channels=3): + # input: path + # output: HxWx3(RGB or GGG), or HxWx1 (G) + if n_channels == 1: + img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE + img = np.expand_dims(img, axis=2) # HxWx1 + elif n_channels == 3: + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB + return img + + +# -------------------------------------------- +# matlab's imwrite +# -------------------------------------------- +def imsave(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + +def imwrite(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + + + +# -------------------------------------------- +# get single image of size HxWxn_channles (BGR) +# -------------------------------------------- +def read_img(path): + # read image by cv2 + # return: Numpy float32, HWC, BGR, [0,1] + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +''' +# -------------------------------------------- +# image format conversion +# -------------------------------------------- +# numpy(single) <---> numpy(unit) +# numpy(single) <---> tensor +# numpy(unit) <---> tensor +# -------------------------------------------- +''' + + +# -------------------------------------------- +# numpy(single) [0, 1] <---> numpy(unit) +# -------------------------------------------- + + +def uint2single(img): + + return np.float32(img/255.) + + +def single2uint(img): + + return np.uint8((img.clip(0, 1)*255.).round()) + + +def uint162single(img): + + return np.float32(img/65535.) + + +def single2uint16(img): + + return np.uint16((img.clip(0, 1)*65535.).round()) + + +# -------------------------------------------- +# numpy(unit) (HxWxC or HxW) <---> tensor +# -------------------------------------------- + + +# convert uint to 4-dimensional torch tensor +def uint2tensor4(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) + + +# convert uint to 3-dimensional torch tensor +def uint2tensor3(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) + + +# convert 2/3/4-dimensional torch tensor to uint +def tensor2uint(img): + img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + return np.uint8((img*255.0).round()) + + +# -------------------------------------------- +# numpy(single) (HxWxC) <---> tensor +# -------------------------------------------- + + +# convert single (HxWxC) to 3-dimensional torch tensor +def single2tensor3(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() + + +# convert single (HxWxC) to 4-dimensional torch tensor +def single2tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) + + +# convert torch tensor to single +def tensor2single(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + + return img + +# convert torch tensor to single +def tensor2single3(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + elif img.ndim == 2: + img = np.expand_dims(img, axis=2) + return img + + +def single2tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) + + +def single32tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) + + +def single42tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() + + +# from skimage.io import imread, imsave +def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): + ''' + Converts a torch Tensor into an image Numpy array of BGR channel order + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +''' +# -------------------------------------------- +# Augmentation, flipe and/or rotate +# -------------------------------------------- +# The following two are enough. +# (1) augmet_img: numpy image of WxHxC or WxH +# (2) augment_img_tensor4: tensor image 1xCxWxH +# -------------------------------------------- +''' + + +def augment_img(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return np.flipud(np.rot90(img)) + elif mode == 2: + return np.flipud(img) + elif mode == 3: + return np.rot90(img, k=3) + elif mode == 4: + return np.flipud(np.rot90(img, k=2)) + elif mode == 5: + return np.rot90(img) + elif mode == 6: + return np.rot90(img, k=2) + elif mode == 7: + return np.flipud(np.rot90(img, k=3)) + + +def augment_img_tensor4(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return img.rot90(1, [2, 3]).flip([2]) + elif mode == 2: + return img.flip([2]) + elif mode == 3: + return img.rot90(3, [2, 3]) + elif mode == 4: + return img.rot90(2, [2, 3]).flip([2]) + elif mode == 5: + return img.rot90(1, [2, 3]) + elif mode == 6: + return img.rot90(2, [2, 3]) + elif mode == 7: + return img.rot90(3, [2, 3]).flip([2]) + + +def augment_img_tensor(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + img_size = img.size() + img_np = img.data.cpu().numpy() + if len(img_size) == 3: + img_np = np.transpose(img_np, (1, 2, 0)) + elif len(img_size) == 4: + img_np = np.transpose(img_np, (2, 3, 1, 0)) + img_np = augment_img(img_np, mode=mode) + img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) + if len(img_size) == 3: + img_tensor = img_tensor.permute(2, 0, 1) + elif len(img_size) == 4: + img_tensor = img_tensor.permute(3, 2, 0, 1) + + return img_tensor.type_as(img) + + +def augment_img_np3(img, mode=0): + if mode == 0: + return img + elif mode == 1: + return img.transpose(1, 0, 2) + elif mode == 2: + return img[::-1, :, :] + elif mode == 3: + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 4: + return img[:, ::-1, :] + elif mode == 5: + img = img[:, ::-1, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 6: + img = img[:, ::-1, :] + img = img[::-1, :, :] + return img + elif mode == 7: + img = img[:, ::-1, :] + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + + +def augment_imgs(img_list, hflip=True, rot=True): + # horizontal flip OR rotate + hflip = hflip and random.random() < 0.5 + vflip = rot and random.random() < 0.5 + rot90 = rot and random.random() < 0.5 + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +''' +# -------------------------------------------- +# modcrop and shave +# -------------------------------------------- +''' + + +def modcrop(img_in, scale): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + if img.ndim == 2: + H, W = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r] + elif img.ndim == 3: + H, W, C = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r, :] + else: + raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + return img + + +def shave(img_in, border=0): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + h, w = img.shape[:2] + img = img[border:h-border, border:w-border] + return img + + +''' +# -------------------------------------------- +# image processing process on numpy image +# channel_convert(in_c, tar_type, img_list): +# rgb2ycbcr(img, only_y=True): +# bgr2ycbcr(img, only_y=True): +# ycbcr2rgb(img): +# -------------------------------------------- +''' + + +def rgb2ycbcr(img, only_y=True): + '''same as matlab rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def ycbcr2rgb(img): + '''same as matlab ycbcr2rgb + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def bgr2ycbcr(img, only_y=True): + '''bgr version of rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def channel_convert(in_c, tar_type, img_list): + # conversion among BGR, gray and y + if in_c == 3 and tar_type == 'gray': # BGR to gray + gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] + return [np.expand_dims(img, axis=2) for img in gray_list] + elif in_c == 3 and tar_type == 'y': # BGR to y + y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] + return [np.expand_dims(img, axis=2) for img in y_list] + elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] + else: + return img_list + + +''' +# -------------------------------------------- +# metric, PSNR and SSIM +# -------------------------------------------- +''' + + +# -------------------------------------------- +# PSNR +# -------------------------------------------- +def calculate_psnr(img1, img2, border=0): + # img1 and img2 have range [0, 255] + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +# -------------------------------------------- +# SSIM +# -------------------------------------------- +def calculate_ssim(img1, img2, border=0): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1[:,:,i], img2[:,:,i])) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +''' +# -------------------------------------------- +# matlab's bicubic imresize (numpy and torch) [0, 1] +# -------------------------------------------- +''' + + +# matlab 'imresize' function, now only support 'bicubic' +def cubic(x): + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ + (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + if (scale < 1) and (antialiasing): + # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5+scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + P = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( + 1, P).expand(out_length, P) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices + # apply cubic kernel + if (scale < 1) and (antialiasing): + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, P) + + # If a column in weights is all zero, get rid of it. only consider the first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, P - 2) + weights = weights.narrow(1, 1, P - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, P - 2) + weights = weights.narrow(1, 0, P - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +# -------------------------------------------- +# imresize for tensor image [0, 1] +# -------------------------------------------- +def imresize(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: pytorch tensor, CHW or HW [0,1] + # output: CHW or HW [0,1] w/o round + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(0) + in_C, in_H, in_W = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) + img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:, :sym_len_Hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_He:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_C, out_H, in_W) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) + out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_Ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_We:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_C, out_H, out_W) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + return out_2 + + +# -------------------------------------------- +# imresize for numpy image [0, 1] +# -------------------------------------------- +def imresize_np(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: Numpy, HWC or HW [0,1] + # output: HWC or HW [0,1] w/o round + img = torch.from_numpy(img) + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(2) + + in_H, in_W, in_C = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) + img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:sym_len_Hs, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[-sym_len_He:, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(out_H, in_W, in_C) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) + out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :sym_len_Ws, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, -sym_len_We:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(out_H, out_W, in_C) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + + return out_2.numpy() + + +if __name__ == '__main__': + print('---') +# img = imread_uint('test.bmp', 3) +# img = uint2single(img) +# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py new file mode 100644 index 00000000..876d7c5b --- /dev/null +++ b/ldm/modules/losses/__init__.py @@ -0,0 +1 @@ +from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py new file mode 100644 index 00000000..672c1e32 --- /dev/null +++ b/ldm/modules/losses/contperceptual.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn + +from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? + + +class LPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_loss="hinge"): + + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + self.kl_weight = kl_weight + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, inputs, reconstructions, posteriors, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", + weights=None): + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights*nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + kl_loss = posteriors.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log + diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py new file mode 100644 index 00000000..f6998176 --- /dev/null +++ b/ldm/modules/losses/vqperceptual.py @@ -0,0 +1,167 @@ +import torch +from torch import nn +import torch.nn.functional as F +from einops import repeat + +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + + +def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): + assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] + loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) + loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) + loss_real = (weights * loss_real).sum() / weights.sum() + loss_fake = (weights * loss_fake).sum() / weights.sum() + d_loss = 0.5 * (loss_real + loss_fake) + return d_loss + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def measure_perplexity(predicted_indices, n_embed): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use + +def l1(x, y): + return torch.abs(x-y) + + +def l2(x, y): + return torch.pow((x-y), 2) + + +class VQLPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", + pixel_loss="l1"): + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] + self.codebook_weight = codebook_weight + self.pixel_weight = pixelloss_weight + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") + self.perceptual_loss = LPIPS().eval() + else: + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") + self.perceptual_weight = perceptual_weight + + if pixel_loss == "l1": + self.pixel_loss = l1 + else: + self.pixel_loss = l2 + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm, + ndf=disc_ndf + ).apply(weights_init) + self.discriminator_iter_start = disc_start + if disc_loss == "hinge": + self.disc_loss = hinge_d_loss + elif disc_loss == "vanilla": + self.disc_loss = vanilla_d_loss + else: + raise ValueError(f"Unknown GAN loss '{disc_loss}'.") + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + self.n_classes = n_classes + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", predicted_indices=None): + if not exists(codebook_loss): + codebook_loss = torch.tensor([0.]).to(inputs.device) + #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + else: + p_loss = torch.tensor([0.0]) + + nll_loss = rec_loss + #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + nll_loss = torch.mean(nll_loss) + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + if predicted_indices is not None: + assert self.n_classes is not None + with torch.no_grad(): + perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py new file mode 100644 index 00000000..5fc15bf9 --- /dev/null +++ b/ldm/modules/x_transformer.py @@ -0,0 +1,641 @@ +"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" +import torch +from torch import nn, einsum +import torch.nn.functional as F +from functools import partial +from inspect import isfunction +from collections import namedtuple +from einops import rearrange, repeat, reduce + +# constants + +DEFAULT_DIM_HEAD = 64 + +Intermediates = namedtuple('Intermediates', [ + 'pre_softmax_attn', + 'post_softmax_attn' +]) + +LayerIntermediates = namedtuple('Intermediates', [ + 'hiddens', + 'attn_intermediates' +]) + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + self.emb = nn.Embedding(max_seq_len, dim) + self.init_() + + def init_(self): + nn.init.normal_(self.emb.weight, std=0.02) + + def forward(self, x): + n = torch.arange(x.shape[1], device=x.device) + return self.emb(n)[None, :, :] + + +class FixedPositionalEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer('inv_freq', inv_freq) + + def forward(self, x, seq_dim=1, offset=0): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset + sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) + return emb[None, :, :] + + +# helpers + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def always(val): + def inner(*args, **kwargs): + return val + return inner + + +def not_equals(val): + def inner(x): + return x != val + return inner + + +def equals(val): + def inner(x): + return x == val + return inner + + +def max_neg_value(tensor): + return -torch.finfo(tensor.dtype).max + + +# keyword argument helpers + +def pick_and_pop(keys, d): + values = list(map(lambda key: d.pop(key), keys)) + return dict(zip(keys, values)) + + +def group_dict_by_key(cond, d): + return_val = [dict(), dict()] + for key in d.keys(): + match = bool(cond(key)) + ind = int(not match) + return_val[ind][key] = d[key] + return (*return_val,) + + +def string_begins_with(prefix, str): + return str.startswith(prefix) + + +def group_by_key_prefix(prefix, d): + return group_dict_by_key(partial(string_begins_with, prefix), d) + + +def groupby_prefix_and_trim(prefix, d): + kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) + kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) + return kwargs_without_prefix, kwargs + + +# classes +class Scale(nn.Module): + def __init__(self, value, fn): + super().__init__() + self.value = value + self.fn = fn + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.value, *rest) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.g, *rest) + + +class ScaleNorm(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps=1e-8): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class Residual(nn.Module): + def forward(self, x, residual): + return x + residual + + +class GRUGating(nn.Module): + def __init__(self, dim): + super().__init__() + self.gru = nn.GRUCell(dim, dim) + + def forward(self, x, residual): + gated_output = self.gru( + rearrange(x, 'b n d -> (b n) d'), + rearrange(residual, 'b n d -> (b n) d') + ) + + return gated_output.reshape_as(x) + + +# feedforward + +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +# attention. +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head=DEFAULT_DIM_HEAD, + heads=8, + causal=False, + mask=None, + talking_heads=False, + sparse_topk=None, + use_entmax15=False, + num_mem_kv=0, + dropout=0., + on_attn=False + ): + super().__init__() + if use_entmax15: + raise NotImplementedError("Check out entmax activation instead of softmax activation!") + self.scale = dim_head ** -0.5 + self.heads = heads + self.causal = causal + self.mask = mask + + inner_dim = dim_head * heads + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + self.dropout = nn.Dropout(dropout) + + # talking heads + self.talking_heads = talking_heads + if talking_heads: + self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + + # explicit topk sparse attention + self.sparse_topk = sparse_topk + + # entmax + #self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax + + # add memory key / values + self.num_mem_kv = num_mem_kv + if num_mem_kv > 0: + self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + + # attention on attention + self.attn_on_attn = on_attn + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + rel_pos=None, + sinusoidal_emb=None, + prev_attn=None, + mem=None + ): + b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device + kv_input = default(context, x) + + q_input = x + k_input = kv_input + v_input = kv_input + + if exists(mem): + k_input = torch.cat((mem, k_input), dim=-2) + v_input = torch.cat((mem, v_input), dim=-2) + + if exists(sinusoidal_emb): + # in shortformer, the query would start at a position offset depending on the past cached memory + offset = k_input.shape[-2] - q_input.shape[-2] + q_input = q_input + sinusoidal_emb(q_input, offset=offset) + k_input = k_input + sinusoidal_emb(k_input) + + q = self.to_q(q_input) + k = self.to_k(k_input) + v = self.to_v(v_input) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) + + input_mask = None + if any(map(exists, (mask, context_mask))): + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) + k_mask = q_mask if not exists(context) else context_mask + k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) + q_mask = rearrange(q_mask, 'b i -> b () i ()') + k_mask = rearrange(k_mask, 'b j -> b () () j') + input_mask = q_mask * k_mask + + if self.num_mem_kv > 0: + mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) + k = torch.cat((mem_k, k), dim=-2) + v = torch.cat((mem_v, v), dim=-2) + if exists(input_mask): + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + mask_value = max_neg_value(dots) + + if exists(prev_attn): + dots = dots + prev_attn + + pre_softmax_attn = dots + + if talking_heads: + dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() + + if exists(rel_pos): + dots = rel_pos(dots) + + if exists(input_mask): + dots.masked_fill_(~input_mask, mask_value) + del input_mask + + if self.causal: + i, j = dots.shape[-2:] + r = torch.arange(i, device=device) + mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') + mask = F.pad(mask, (j - i, 0), value=False) + dots.masked_fill_(mask, mask_value) + del mask + + if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: + top, _ = dots.topk(self.sparse_topk, dim=-1) + vk = top[..., -1].unsqueeze(-1).expand_as(dots) + mask = dots < vk + dots.masked_fill_(mask, mask_value) + del mask + + attn = self.attn_fn(dots, dim=-1) + post_softmax_attn = attn + + attn = self.dropout(attn) + + if talking_heads: + attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + intermediates = Intermediates( + pre_softmax_attn=pre_softmax_attn, + post_softmax_attn=post_softmax_attn + ) + + return self.to_out(out), intermediates + + +class AttentionLayers(nn.Module): + def __init__( + self, + dim, + depth, + heads=8, + causal=False, + cross_attend=False, + only_cross=False, + use_scalenorm=False, + use_rmsnorm=False, + use_rezero=False, + rel_pos_num_buckets=32, + rel_pos_max_distance=128, + position_infused_attn=False, + custom_layers=None, + sandwich_coef=None, + par_ratio=None, + residual_attn=False, + cross_residual_attn=False, + macaron=False, + pre_norm=True, + gate_residual=False, + **kwargs + ): + super().__init__() + ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) + attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + + dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + + self.dim = dim + self.depth = depth + self.layers = nn.ModuleList([]) + + self.has_pos_emb = position_infused_attn + self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None + self.rotary_pos_emb = always(None) + + assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' + self.rel_pos = None + + self.pre_norm = pre_norm + + self.residual_attn = residual_attn + self.cross_residual_attn = cross_residual_attn + + norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm + norm_class = RMSNorm if use_rmsnorm else norm_class + norm_fn = partial(norm_class, dim) + + norm_fn = nn.Identity if use_rezero else norm_fn + branch_fn = Rezero if use_rezero else None + + if cross_attend and not only_cross: + default_block = ('a', 'c', 'f') + elif cross_attend and only_cross: + default_block = ('c', 'f') + else: + default_block = ('a', 'f') + + if macaron: + default_block = ('f',) + default_block + + if exists(custom_layers): + layer_types = custom_layers + elif exists(par_ratio): + par_depth = depth * len(default_block) + assert 1 < par_ratio <= par_depth, 'par ratio out of range' + default_block = tuple(filter(not_equals('f'), default_block)) + par_attn = par_depth // par_ratio + depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper + par_width = (depth_cut + depth_cut // par_attn) // par_attn + assert len(default_block) <= par_width, 'default block is too large for par_ratio' + par_block = default_block + ('f',) * (par_width - len(default_block)) + par_head = par_block * par_attn + layer_types = par_head + ('f',) * (par_depth - len(par_head)) + elif exists(sandwich_coef): + assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' + layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef + else: + layer_types = default_block * depth + + self.layer_types = layer_types + self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + + for layer_type in self.layer_types: + if layer_type == 'a': + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == 'c': + layer = Attention(dim, heads=heads, **attn_kwargs) + elif layer_type == 'f': + layer = FeedForward(dim, **ff_kwargs) + layer = layer if not macaron else Scale(0.5, layer) + else: + raise Exception(f'invalid layer type {layer_type}') + + if isinstance(layer, Attention) and exists(branch_fn): + layer = branch_fn(layer) + + if gate_residual: + residual_fn = GRUGating(dim) + else: + residual_fn = Residual() + + self.layers.append(nn.ModuleList([ + norm_fn(), + layer, + residual_fn + ])) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + mems=None, + return_hiddens=False + ): + hiddens = [] + intermediates = [] + prev_attn = None + prev_cross_attn = None + + mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers + + for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): + is_last = ind == (len(self.layers) - 1) + + if layer_type == 'a': + hiddens.append(x) + layer_mem = mems.pop(0) + + residual = x + + if self.pre_norm: + x = norm(x) + + if layer_type == 'a': + out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, + prev_attn=prev_attn, mem=layer_mem) + elif layer_type == 'c': + out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) + elif layer_type == 'f': + out = block(x) + + x = residual_fn(out, residual) + + if layer_type in ('a', 'c'): + intermediates.append(inter) + + if layer_type == 'a' and self.residual_attn: + prev_attn = inter.pre_softmax_attn + elif layer_type == 'c' and self.cross_residual_attn: + prev_cross_attn = inter.pre_softmax_attn + + if not self.pre_norm and not is_last: + x = norm(x) + + if return_hiddens: + intermediates = LayerIntermediates( + hiddens=hiddens, + attn_intermediates=intermediates + ) + + return x, intermediates + + return x + + +class Encoder(AttentionLayers): + def __init__(self, **kwargs): + assert 'causal' not in kwargs, 'cannot set causality on encoder' + super().__init__(causal=False, **kwargs) + + + +class TransformerWrapper(nn.Module): + def __init__( + self, + *, + num_tokens, + max_seq_len, + attn_layers, + emb_dim=None, + max_mem_len=0., + emb_dropout=0., + num_memory_tokens=None, + tie_embedding=False, + use_pos_emb=True + ): + super().__init__() + assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' + + dim = attn_layers.dim + emb_dim = default(emb_dim, dim) + + self.max_seq_len = max_seq_len + self.max_mem_len = max_mem_len + self.num_tokens = num_tokens + + self.token_emb = nn.Embedding(num_tokens, emb_dim) + self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( + use_pos_emb and not attn_layers.has_pos_emb) else always(0) + self.emb_dropout = nn.Dropout(emb_dropout) + + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() + self.attn_layers = attn_layers + self.norm = nn.LayerNorm(dim) + + self.init_() + + self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() + + # memory tokens (like [cls]) from Memory Transformers paper + num_memory_tokens = default(num_memory_tokens, 0) + self.num_memory_tokens = num_memory_tokens + if num_memory_tokens > 0: + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) + + # let funnel encoder know number of memory tokens, if specified + if hasattr(attn_layers, 'num_memory_tokens'): + attn_layers.num_memory_tokens = num_memory_tokens + + def init_(self): + nn.init.normal_(self.token_emb.weight, std=0.02) + + def forward( + self, + x, + return_embeddings=False, + mask=None, + return_mems=False, + return_attn=False, + mems=None, + **kwargs + ): + b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens + x = self.token_emb(x) + x += self.pos_emb(x) + x = self.emb_dropout(x) + + x = self.project_emb(x) + + if num_mem > 0: + mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + x = torch.cat((mem, x), dim=1) + + # auto-handle masking after appending memory tokens + if exists(mask): + mask = F.pad(mask, (num_mem, 0), value=True) + + x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) + x = self.norm(x) + + mem, x = x[:, :num_mem], x[:, num_mem:] + + out = self.to_logits(x) if not return_embeddings else x + + if return_mems: + hiddens = intermediates.hiddens + new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens + new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) + return out, new_mems + + if return_attn: + attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) + return out, attn_maps + + return out + diff --git a/ldm/util.py b/ldm/util.py new file mode 100644 index 00000000..8ba38853 --- /dev/null +++ b/ldm/util.py @@ -0,0 +1,203 @@ +import importlib + +import torch +import numpy as np +from collections import abc +from einops import rearrange +from functools import partial + +import multiprocessing as mp +from threading import Thread +from queue import Queue + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): + # create dummy dataset instance + + # run prefetching + if idx_to_fn: + res = func(data, worker_id=idx) + else: + res = func(data) + Q.put([idx, res]) + Q.put("Done") + + +def parallel_data_prefetch( + func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False +): + # if target_data_type not in ["ndarray", "list"]: + # raise ValueError( + # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." + # ) + if isinstance(data, np.ndarray) and target_data_type == "list": + raise ValueError("list expected but function got ndarray.") + elif isinstance(data, abc.Iterable): + if isinstance(data, dict): + print( + f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' + ) + data = list(data.values()) + if target_data_type == "ndarray": + data = np.asarray(data) + else: + data = list(data) + else: + raise TypeError( + f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." + ) + + if cpu_intensive: + Q = mp.Queue(1000) + proc = mp.Process + else: + Q = Queue(1000) + proc = Thread + # spawn processes + if target_data_type == "ndarray": + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate(np.array_split(data, n_proc)) + ] + else: + step = ( + int(len(data) / n_proc + 1) + if len(data) % n_proc != 0 + else int(len(data) / n_proc) + ) + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate( + [data[i: i + step] for i in range(0, len(data), step)] + ) + ] + processes = [] + for i in range(n_proc): + p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) + processes += [p] + + # start processes + print(f"Start prefetching...") + import time + + start = time.time() + gather_res = [[] for _ in range(n_proc)] + try: + for p in processes: + p.start() + + k = 0 + while k < n_proc: + # get result + res = Q.get() + if res == "Done": + k += 1 + else: + gather_res[res[0]] = res[1] + + except Exception as e: + print("Exception: ", e) + for p in processes: + p.terminate() + + raise e + finally: + for p in processes: + p.join() + print(f"Prefetching complete. [{time.time() - start} sec.]") + + if target_data_type == 'ndarray': + if not isinstance(gather_res[0], np.ndarray): + return np.concatenate([np.asarray(r) for r in gather_res], axis=0) + + # order outputs + return np.concatenate(gather_res, axis=0) + elif target_data_type == 'list': + out = [] + for r in gather_res: + out.extend(r) + return out + else: + return gather_res diff --git a/modules/devices.py b/modules/devices.py index 67165bf6..f30b6ebc 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -36,8 +36,8 @@ def get_optimal_device(): else: return torch.device("cuda") - if has_mps(): - return torch.device("mps") + # if has_mps(): + # return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index eaedac13..26280fe4 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -70,14 +70,19 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) def hijack(self, m): - model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + + if shared.text_model_name == "XLMR-Large": + model_embeddings = m.cond_stage_model.roberta.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) + else : + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embeddings, self) - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) self.clip = m.cond_stage_model - apply_optimizations() + # apply_optimizations() def flatten(el): flattened = [flatten(children) for children in el.children()] @@ -125,8 +130,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): self.tokenizer = wrapped.tokenizer self.token_mults = {} - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] - + try: + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + except: + self.comma_token = None + tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] for text, ident in tokens_with_parens: mult = 1.0 @@ -298,6 +306,9 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): + if shared.text_model_name == "XLMR-Large": + return self.wrapped.encode(text) + use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -359,7 +370,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = self.wrapped.transformer.text_model.final_layer_norm(z) else: z = outputs.last_hidden_state - + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device) diff --git a/modules/shared.py b/modules/shared.py index c93ae2a3..9941d2f4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -21,7 +21,7 @@ from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default="configs/altdiffusion/ad-inference.yaml", help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -106,6 +106,10 @@ restricted_opts = { "outdir_txt2img_grids", "outdir_save", } +from omegaconf import OmegaConf +config = OmegaConf.load(f"{cmd_opts.config}") +# XLMR-Large +text_model_name = config.model.params.cond_stage_config.params.name cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access