From 0efc3bf780928f15dc654b9b21692b5d544fd8a2 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 17:04:30 -0400
Subject: [PATCH 01/58] Add bare bones web UI

---
 scripts/dream_web.py      |  62 ++++++++++++++++
 scripts/static/index.html | 148 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 210 insertions(+)
 create mode 100644 scripts/dream_web.py
 create mode 100644 scripts/static/index.html

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
new file mode 100644
index 0000000000..3b10922c68
--- /dev/null
+++ b/scripts/dream_web.py
@@ -0,0 +1,62 @@
+import json
+import os
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+print("Loading model...")
+from ldm.simplet2i import T2I
+model = T2I()
+
+class DreamServer(BaseHTTPRequestHandler):
+    def do_GET(self):
+        self.send_response(200)
+        if self.path == "/":
+            self.send_header("Content-type", "text/html")
+            self.end_headers()
+            with open("./scripts/static/index.html", "rb") as content:
+                self.wfile.write(content.read())
+        else:
+            self.send_header("Content-type", "image/png")
+            self.end_headers()
+            with open("." + self.path, "rb") as content:
+                self.wfile.write(content.read())
+
+    def do_POST(self):
+        self.send_response(200)
+        self.send_header("Content-type", "application/json")
+        self.end_headers()
+
+        content_length = int(self.headers['Content-Length'])
+        post_data = json.loads(self.rfile.read(content_length))
+        prompt = post_data['prompt']
+        batch = int(post_data['batch'])
+        steps = int(post_data['steps'])
+        width = int(post_data['width'])
+        height = int(post_data['height'])
+        cfgscale = float(post_data['cfgscale'])
+        seed = None if int(post_data['seed']) == -1 else int(post_data['seed'])
+
+        print(f"Request to generate with data: {post_data}")
+        outputs = model.txt2img(prompt,
+                                batch_size = batch,
+                                cfg_scale = cfgscale,
+                                width = width,
+                                height = height,
+                                seed = seed,
+                                steps = steps);
+        print(f"Prompt generated with output: {outputs}")
+
+        outputs = [x + [prompt] for x in outputs] # Append prompt to each output
+        result = {'outputs': outputs}
+        self.wfile.write(bytes(json.dumps(result), "utf-8"))
+
+if __name__ == "__main__":
+    dream_server = HTTPServer(("0.0.0.0", 9090), DreamServer)
+    print("Started Stable Diffusion dream server!")
+
+    try:
+        dream_server.serve_forever()
+    except KeyboardInterrupt:
+        pass
+
+    dream_server.server_close()
+
diff --git a/scripts/static/index.html b/scripts/static/index.html
new file mode 100644
index 0000000000..326dc50392
--- /dev/null
+++ b/scripts/static/index.html
@@ -0,0 +1,148 @@
+<html>
+  <head>
+    <title>Stable Diffusion WebUI</title>
+    <link rel="icon" href="data:,">
+    <style>
+      * {
+          font-family: 'Arial';
+      }
+      #header {
+          text-decoration: dotted underline;
+      }
+      #search {
+          margin-top: 20vh;
+          text-align: center;
+      }
+      fieldset {
+          border: none;
+      }
+      #prompt {
+          width:500px;
+          border-radius: 20px 0px 0px 20px;
+          padding: 5px 10px 5px 10px;
+          border: 1px solid black;
+          outline: none;
+      }
+      #submit {
+          border-radius: 0px 20px 20px 0px;
+          padding: 5px 10px 5px 10px;
+          border: 1px solid black;
+      }
+      #results {
+          text-align: center;
+          padding-left: 20vw;
+          padding-right: 20vw;
+          padding-top: 10px;
+      }
+      img {
+          height:30vh;
+          border-radius:5px;
+          margin:10px;
+      }
+      input[type="number"] {
+          width:60px;
+      }
+    </style>
+    <script>
+      function append_output(output) {
+          let output_node = document.createElement("img");
+          output_node.src = output[0];
+
+          let alt_text = output[1].toString() + " | " + output[2];
+          output_node.alt = alt_text;
+          output_node.title = alt_text;
+
+          document.querySelector("#results").prepend(output_node);
+      }
+
+      function append_outputs(outputs) {
+          for (const output of outputs) {
+              append_output(output);
+          }
+      }
+
+      function save_fields(form) {
+          for (const [k, v] of new FormData(form)) {
+              localStorage.setItem(k, v);
+          }
+      }
+      function load_fields(form) {
+          for (const [k, v] of new FormData(form)) {
+              const item = localStorage.getItem(k);
+              if (item != null) {
+                  form.querySelector(`*[name=${k}]`).value = item;
+              }
+          }
+      }
+      
+      window.onload = () => {
+          document.querySelector("#generate_form").addEventListener('submit', (e) => {
+              e.preventDefault();
+              const form = e.target;
+
+              // Post as JSON
+              fetch(form.action, {
+                  method: form.method,
+                  body: JSON.stringify(Object.fromEntries(new FormData(form))),
+              }).then((result) => {
+                  result.json().then((data) => {
+                      // Re-enable form, remove no-results-message
+                      form.querySelector('fieldset').removeAttribute('disabled');
+                      document.querySelector("#prompt").value = '';
+                      save_fields(form);
+
+                      if (data.outputs.length != 0) {
+                          document.querySelector("#no_results_message")?.remove();
+                          append_outputs(data.outputs);
+                      } else {
+                          alert("Error occurred while generating.");
+                      }
+                  });
+              });
+
+              // Disable form
+              form.querySelector('fieldset').setAttribute('disabled','');
+              document.querySelector("#prompt").value = 'Generating...';
+          });
+          document.querySelector("#generate_form").addEventListener('change', (e) => {
+              save_fields(e.target.form);
+          });
+          load_fields(document.querySelector("#generate_form"));
+      };
+    </script>
+  </head>
+  <body>
+    <div id="search">
+      <h2 id="header">Stable Diffusion</h2>
+
+      <form id="generate_form" method="post" action="#">
+        <fieldset>
+          <input type="text" id="prompt" name="prompt">
+          <input type="submit" id="submit" value="Generate">
+        </fieldset>
+        <fieldset>
+          <label for="batch">Batch Size:</label>
+          <input value="1" type="number" id="batch" name="batch">
+          <label for="steps">Steps:</label>
+          <input value="50" type="number" id="steps" name="steps">
+          <label for="cfgscale">Cfg Scale:</label>
+          <input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
+          <span>&bull;</span>
+          <label title="Set to multiple of 64" for="width">Width:</label>
+          <input value="512" type="number" id="width" name="width">
+          <label title="Set to multiple of 64" for="height">Height:</label>
+          <input value="512" type="number" id="height" name="height">
+          <span>&bull;</span>
+          <label title="Set to -1 for random seed" for="seed">Seed:</label>
+          <input value="-1" type="number" id="seed" name="seed">
+        </fieldset>
+      </form>
+    </div>
+    <hr style="width: 200px">
+    <div id="results">
+      <div id="no_results_message">
+        <i><p>No results...</p></i>
+      </div>
+    </div>
+  </body>
+</html>

From b1600d4ca314174d0933c44c2e7a7b1e9813ea23 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 17:26:22 -0400
Subject: [PATCH 02/58] Update seed on click

---
 scripts/static/index.html | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/scripts/static/index.html b/scripts/static/index.html
index 326dc50392..21d49c9427 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -17,7 +17,7 @@
           border: none;
       }
       #prompt {
-          width:500px;
+          width: 500px;
           border-radius: 20px 0px 0px 20px;
           padding: 5px 10px 5px 10px;
           border: 1px solid black;
@@ -35,12 +35,16 @@
           padding-top: 10px;
       }
       img {
-          height:30vh;
-          border-radius:5px;
-          margin:10px;
+          cursor: pointer;
+          height: 30vh;
+          border-radius: 5px;
+          margin: 10px;
       }
       input[type="number"] {
-          width:60px;
+          width: 60px;
+      }
+      #seed {
+          width: 150px;
       }
     </style>
     <script>
@@ -51,6 +55,10 @@
           let alt_text = output[1].toString() + " | " + output[2];
           output_node.alt = alt_text;
           output_node.title = alt_text;
+          // Update seed on click
+          output_node.addEventListener('click', () => {
+              document.querySelector("#seed").value = output[1];
+          });
 
           document.querySelector("#results").prepend(output_node);
       }

From df9f088eb42b9ce0f682ff321a3c39c8ec9ed49c Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 17:28:59 -0400
Subject: [PATCH 03/58] Preserve prompt across generations

---
 scripts/static/index.html | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/scripts/static/index.html b/scripts/static/index.html
index 21d49c9427..d5b27e83ef 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -87,6 +87,7 @@
           document.querySelector("#generate_form").addEventListener('submit', (e) => {
               e.preventDefault();
               const form = e.target;
+              const prompt = document.querySelector("#prompt").value;
 
               // Post as JSON
               fetch(form.action, {
@@ -96,8 +97,7 @@
                   result.json().then((data) => {
                       // Re-enable form, remove no-results-message
                       form.querySelector('fieldset').removeAttribute('disabled');
-                      document.querySelector("#prompt").value = '';
-                      save_fields(form);
+                      document.querySelector("#prompt").value = prompt;
 
                       if (data.outputs.length != 0) {
                           document.querySelector("#no_results_message")?.remove();
@@ -110,7 +110,7 @@
 
               // Disable form
               form.querySelector('fieldset').setAttribute('disabled','');
-              document.querySelector("#prompt").value = 'Generating...';
+              document.querySelector("#prompt").value = `Generating: "${prompt}"`;
           });
           document.querySelector("#generate_form").addEventListener('change', (e) => {
               save_fields(e.target.form);

From 91330878503ad1140fb2b2fbd523f01804a3c212 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Wed, 24 Aug 2022 17:52:34 -0400
Subject: [PATCH 04/58] first draft at big refactoring; will be broken

---
 ldm/dream_util.py | 143 +++++++++++++++++++++++
 ldm/simplet2i.py  | 288 ++++++++++++++--------------------------------
 scripts/dream.py  | 130 ++-------------------
 3 files changed, 238 insertions(+), 323 deletions(-)
 create mode 100644 ldm/dream_util.py

diff --git a/ldm/dream_util.py b/ldm/dream_util.py
new file mode 100644
index 0000000000..1526223cd8
--- /dev/null
+++ b/ldm/dream_util.py
@@ -0,0 +1,143 @@
+'''Utilities for dealing with PNG images and their path names'''
+import os
+import atexit
+from PIL import Image,PngImagePlugin
+
+# ---------------readline utilities---------------------
+try:
+    import readline
+    readline_available = True
+except:
+    readline_available = False
+
+class Completer():
+    def __init__(self,options):
+        self.options = sorted(options)
+        return
+
+    def complete(self,text,state):
+        buffer = readline.get_line_buffer()
+
+        if text.startswith(('-I','--init_img')):
+            return self._path_completions(text,state,('.png'))
+
+        if buffer.strip().endswith('cd') or text.startswith(('.','/')):
+            return self._path_completions(text,state,())
+
+        response = None
+        if state == 0:
+            # This is the first time for this text, so build a match list.
+            if text:
+                self.matches = [s 
+                                for s in self.options
+                                if s and s.startswith(text)]
+            else:
+                self.matches = self.options[:]
+
+        # Return the state'th item from the match list,
+        # if we have that many.
+        try:
+            response = self.matches[state]
+        except IndexError:
+            response = None
+        return response
+
+    def _path_completions(self,text,state,extensions):
+        # get the path so far
+        if text.startswith('-I'):
+            path = text.replace('-I','',1).lstrip()
+        elif text.startswith('--init_img='):
+            path = text.replace('--init_img=','',1).lstrip()
+        else:
+            path = text
+
+        matches  = list()
+
+        path = os.path.expanduser(path)
+        if len(path)==0:
+            matches.append(text+'./')
+        else:
+            dir  = os.path.dirname(path)
+            dir_list = os.listdir(dir)
+            for n in dir_list:
+                if n.startswith('.') and len(n)>1:
+                    continue
+                full_path = os.path.join(dir,n)
+                if full_path.startswith(path):
+                    if os.path.isdir(full_path):
+                        matches.append(os.path.join(os.path.dirname(text),n)+'/')
+                    elif n.endswith(extensions):
+                        matches.append(os.path.join(os.path.dirname(text),n))
+
+        try:
+            response = matches[state]
+        except IndexError:
+            response = None
+        return response
+
+if readline_available:
+    readline.set_completer(Completer(['cd','pwd',
+                                      '--steps','-s','--seed','-S','--iterations','-n','--batch_size','-b',
+                                      '--width','-W','--height','-H','--cfg_scale','-C','--grid','-g',
+                                      '--individual','-i','--init_img','-I','--strength','-f','-v','--variants']).complete)
+    readline.set_completer_delims(" ")
+    readline.parse_and_bind('tab: complete')
+
+    histfile = os.path.join(os.path.expanduser('~'),".dream_history")
+    try:
+        readline.read_history_file(histfile)
+        readline.set_history_length(1000)
+    except FileNotFoundError:
+        pass
+    atexit.register(readline.write_history_file,histfile)
+
+# -------------------image generation utils-----
+class PngWriter:
+
+    def __init__(self,opt):
+        self.opt           = opt
+        self.filepath      = None
+        self.files_written = []
+
+    def write_image(self,image,seed):
+        self.filepath = self.unique_filename(self,opt,seed,self.filepath) # will increment name in some sensible way
+        try:
+            image.save(self.filename)
+        except IOError as e:
+            print(e)
+        self.files_written.append([self.filepath,seed])
+
+    def unique_filename(self,opt,seed,previouspath):
+        revision = 1
+
+        if previouspath is None:
+            # sort reverse alphabetically until we find max+1
+            dirlist   = sorted(os.listdir(outdir),reverse=True)
+            # find the first filename that matches our pattern or return 000000.0.png
+            filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
+            basecount  = int(filename.split('.',1)[0])
+            basecount += 1
+            if opt.batch_size > 1:
+                filename = f'{basecount:06}.{seed}.01.png'
+            else:
+                filename = f'{basecount:06}.{seed}.png'
+            return os.path.join(outdir,filename)
+
+        else:
+            basename = os.path.basename(previouspath)
+            x = re.match('^(\d+)\..*\.png',basename)
+            if not x:
+                return self.unique_filename(opt,seed,previouspath)
+
+            basecount = int(x.groups()[0])
+            series = 0 
+            finished = False
+            while not finished:
+                series += 1
+                filename = f'{basecount:06}.{seed}.png'
+                if isbatch or os.path.exists(os.path.join(outdir,filename)):
+                    filename = f'{basecount:06}.{seed}.{series:02}.png'
+                finished = not os.path.exists(os.path.join(outdir,filename))
+            return os.path.join(outdir,filename)
+
+
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 4737d90ba7..8e8b077922 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -23,7 +23,6 @@ t2i = T2I(outdir      = <path>        // outputs/txt2img-samples
           width       = <integer>     // image width, multiple of 64 (512)
           height      = <integer>     // image height, multiple of 64 (512)
           cfg_scale   = <float>       // unconditional guidance scale (7.5)
-          fixed_code  = <boolean>     // False
           )
 
 # do the slow model initialization
@@ -79,7 +78,6 @@ class T2I:
     """T2I class
     Attributes
     ----------
-    outdir
     model
     config
     iterations
@@ -87,12 +85,9 @@ class T2I:
     steps
     seed
     sampler_name
-    grid
-    individual
     width
     height
     cfg_scale
-    fixed_code
     latent_channels
     downsampling_factor
     precision
@@ -102,11 +97,8 @@ class T2I:
 The vast majority of these arguments default to reasonable values.
 """
     def __init__(self,
-                 outdir="outputs/txt2img-samples",
                  batch_size=1,
                  iterations = 1,
-                 width=512,
-                 height=512,
                  grid=False,
                  individual=None, # redundant
                  steps=50,
@@ -118,7 +110,6 @@ The vast majority of these arguments default to reasonable values.
                  latent_channels=4,
                  downsampling_factor=8,
                  ddim_eta=0.0,  # deterministic
-                 fixed_code=False,
                  precision='autocast',
                  full_precision=False,
                  strength=0.75, # default in scripts/img2img.py
@@ -126,7 +117,6 @@ The vast majority of these arguments default to reasonable values.
                  latent_diffusion_weights=False,  # just to keep track of this parameter when regenerating prompt
                  device='cuda'
     ):
-        self.outdir     = outdir
         self.batch_size      = batch_size
         self.iterations = iterations
         self.width      = width
@@ -137,7 +127,6 @@ The vast majority of these arguments default to reasonable values.
         self.weights    = weights
         self.config     = config
         self.sampler_name  = sampler_name
-        self.fixed_code    = fixed_code
         self.latent_channels     = latent_channels
         self.downsampling_factor = downsampling_factor
         self.ddim_eta            = ddim_eta
@@ -154,16 +143,25 @@ The vast majority of these arguments default to reasonable values.
         else:
             self.seed = seed
 
-    @torch.no_grad()
-    def txt2img(self,prompt,outdir=None,batch_size=None,iterations=None,
-                steps=None,seed=None,grid=None,individual=None,width=None,height=None,
-                cfg_scale=None,ddim_eta=None,strength=None,embedding_path=None,init_img=None,
-                skip_normalize=False,variants=None):    # note the "variants" option is an unused hack caused by how options are passed
-        """
-        Generate an image from the prompt, writing iteration images into the outdir
-        The output is a list of lists in the format: [[filename1,seed1], [filename2,seed2],...]
-        """
-        outdir     = outdir     or self.outdir
+    def generate(self,
+                 # these are common
+                 prompt,
+                 batch_size=None,
+                 iterations=None,
+                 steps=None,
+                 seed=None,
+                 cfg_scale=None,
+                 ddim_eta=None,
+                 skip_normalize=False,
+                 image_callback=None,
+                 # these are specific to txt2img
+                 width=None,
+                 height=None,
+                 # these are specific to img2img
+                 init_img=None,
+                 strength=None,
+                 variants=None):
+        '''ldm.generate() is the common entry point for txt2img() and img2img()'''
         steps      = steps      or self.steps
         seed       = seed       or self.seed
         width      = width      or self.width
@@ -172,41 +170,57 @@ The vast majority of these arguments default to reasonable values.
         ddim_eta   = ddim_eta   or self.ddim_eta
         batch_size = batch_size or self.batch_size
         iterations = iterations or self.iterations
-        strength   = strength   or self.strength     # not actually used here, but preserved for code refactoring
-        embedding_path = embedding_path or self.embedding_path
+        strength   = strength   or self.strength
 
         model = self.load_model()  # will instantiate the model or return it from cache
-
-        assert strength<1.0 and strength>=0.0, "strength (-f) must be >=0.0 and <1.0"
         assert cfg_scale>1.0, "CFG_Scale (-C) must be >1.0"
+        assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
 
-        # grid and individual are mutually exclusive, with individual taking priority.
-        # not necessary, but needed for compatability with dream bot
-        if (grid is None):
-            grid = self.grid
-        if individual:
-            grid = False
-        
         data = [batch_size * [prompt]]
+        scope = autocast if self.precision=="autocast" else nullcontext
+        if grid:
+            callback = self.image2png
+        else:
+            callback = None
 
-        # make directories and establish names for the output files
-        os.makedirs(outdir, exist_ok=True)
+        tic    = time.time()
+        if init_img:
+            assert os.path.exists(init_img),f'{init_img}: File not found'
+            results = self._img2img(prompt,
+                                    data=data,precision_scope=scope,
+                                    batch_size=batch_size,iterations=iterations,
+                                    steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                                    skip_normalize=skip_normalize,
+                                    init_img=init_img,strength=strength,variants=variants,
+                                    callback=image_callback)
+        else:
+            results = self._txt2img(prompt,
+                                    data=data,precision_scope=scope,
+                                    batch_size=batch_size,iterations=iterations,
+                                    steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                                    skip_normalize=skip_normalize,
+                                    width=width,height=height,
+                                    callback=image_callback)
+        toc  = time.time()
+        print(f'{len(results)} images generated in',"%4.2fs"% (toc-tic))
+        return results
+            
+    @torch.no_grad()
+    def _txt2img(self,prompt,
+                 data,precision_scope,
+                 batch_size,iterations,
+                 steps,seed,cfg_scale,ddim_eta,
+                 skip_normalize,
+                 width,height,
+                 callback=callback):    # the callback is called each time a new Image is generated
+        """
+        Generate an image from the prompt, writing iteration images into the outdir
+        The output is a list of lists in the format: [[image1,seed1], [image2,seed2],...]
+        """
 
-        start_code = None
-        if self.fixed_code:
-            start_code = torch.randn([batch_size,
-                                      self.latent_channels,
-                                      height // self.downsampling_factor,
-                                      width  // self.downsampling_factor],
-                                     device=self.device)
-
-        precision_scope = autocast if self.precision=="autocast" else nullcontext
         sampler         = self.sampler
         images = list()
-        seeds  = list()
-        filename = None
         image_count = 0
-        tic    = time.time()
 
         # Gawd. Too many levels of indent here. Need to refactor into smaller routines!
         try:
@@ -239,38 +253,24 @@ The vast majority of these arguments default to reasonable values.
 
                         shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
                         samples_ddim, _ = sampler.sample(S=steps,
-                                                            conditioning=c,
-                                                            batch_size=batch_size,
-                                                            shape=shape,
-                                                            verbose=False,
-                                                            unconditional_guidance_scale=cfg_scale,
-                                                            unconditional_conditioning=uc,
-                                                            eta=ddim_eta,
-                                                            x_T=start_code)
+                                                         conditioning=c,
+                                                         batch_size=batch_size,
+                                                         shape=shape,
+                                                         verbose=False,
+                                                         unconditional_guidance_scale=cfg_scale,
+                                                         unconditional_conditioning=uc,
+                                                         eta=ddim_eta)
 
                         x_samples_ddim = model.decode_first_stage(samples_ddim)
                         x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
-
-                        if not grid:
-                            for x_sample in x_samples_ddim:
-                                x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                                filename = self._unique_filename(outdir,previousname=filename,
-                                                                    seed=seed,isbatch=(batch_size>1))
-                                assert not os.path.exists(filename)
-                                Image.fromarray(x_sample.astype(np.uint8)).save(filename)
-                                images.append([filename,seed])
-                        else:
-                            all_samples.append(x_samples_ddim)
-                            seeds.append(seed)
-
-                    image_count += 1
+                        for x_sample in x_samples_ddim:
+                            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+                            image = Image.fromarray(x_sample.astype(np.uint8))
+                            images.append([image,seed])
+                            if callback is not None:
+                                callback(image,seed)
+                                
                     seed = self._new_seed()
-                if grid:
-                    images = self._make_grid(samples=all_samples,
-                                                seeds=seeds,
-                                                batch_size=batch_size,
-                                                iterations=iterations,
-                                                outdir=outdir)
         except KeyboardInterrupt:
             print('*interrupted*')
             print('Partial results will be returned; if --grid was requested, nothing will be returned.')
@@ -279,48 +279,20 @@ The vast majority of these arguments default to reasonable values.
 
         toc = time.time()
         print(f'{image_count} images generated in',"%4.2fs"% (toc-tic))
-
         return images
         
-    # There is lots of shared code between this and txt2img and should be refactored.
     @torch.no_grad()
-    def img2img(self,prompt,outdir=None,init_img=None,batch_size=None,iterations=None,
-                steps=None,seed=None,grid=None,individual=None,width=None,height=None,
-                cfg_scale=None,ddim_eta=None,strength=None,embedding_path=None,
-                skip_normalize=False,variants=None):   # note the "variants" option is an unused hack caused by how options are passed
+    def _img2img(self,prompt,
+                 data,precision_scope,
+                 batch_size,iterations,
+                 steps,seed,cfg_scale,ddim_eta,
+                 skip_normalize,
+                 init_img,strength,variants,
+                 callback):
         """
         Generate an image from the prompt and the initial image, writing iteration images into the outdir
-        The output is a list of lists in the format: [[filename1,seed1], [filename2,seed2],...]
+        The output is a list of lists in the format: [[image,seed1], [image,seed2],...]
         """
-        outdir     = outdir     or self.outdir
-        steps      = steps      or self.steps
-        seed       = seed       or self.seed
-        cfg_scale  = cfg_scale  or self.cfg_scale
-        ddim_eta   = ddim_eta   or self.ddim_eta
-        batch_size = batch_size or self.batch_size
-        iterations = iterations or self.iterations
-        strength   = strength   or self.strength
-        embedding_path = embedding_path or self.embedding_path
-
-        assert strength<1.0 and strength>=0.0, "strength (-f) must be >=0.0 and <1.0"
-        assert cfg_scale>1.0, "CFG_Scale (-C) must be >1.0"
-
-        if init_img is None:
-            print("no init_img provided!")
-            return []
-
-        model = self.load_model()  # will instantiate the model or return it from cache
-
-        precision_scope = autocast if self.precision=="autocast" else nullcontext
-
-        # grid and individual are mutually exclusive, with individual taking priority.
-        # not necessary, but needed for compatability with dream bot
-        if (grid is None):
-            grid = self.grid
-        if individual:
-            grid = False
-        
-        data = [batch_size * [prompt]]
 
         # PLMS sampler not supported yet, so ignore previous sampler
         if self.sampler_name!='ddim':
@@ -329,33 +301,18 @@ The vast majority of these arguments default to reasonable values.
         else:
             sampler = self.sampler
 
-        # make directories and establish names for the output files
-        os.makedirs(outdir, exist_ok=True)
-
-        assert os.path.isfile(init_img)
         init_image = self._load_img(init_img).to(self.device)
         init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
         with precision_scope(self.device.type):
             init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image))  # move to latent space
 
         sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
-
-        try:
-            assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
-        except AssertionError:
-            print(f"strength must be between 0.0 and 1.0, but received value {strength}")
-            return []
         
         t_enc = int(strength * steps)
         print(f"target t_enc is {t_enc} steps")
 
         images = list()
-        seeds  = list()
-        filename = None
-        image_count = 0 # actual number of iterations performed
-        tic    = time.time()
 
-        # Gawd. Too many levels of indent here. Need to refactor into smaller routines!
         try:
             with precision_scope(self.device.type), model.ema_scope():
                 all_samples = list()
@@ -393,25 +350,13 @@ The vast majority of these arguments default to reasonable values.
                         x_samples = model.decode_first_stage(samples)
                         x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
 
-                        if not grid:
-                            for x_sample in x_samples:
-                                x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                                filename = self._unique_filename(outdir,previousname=filename,
-                                                                    seed=seed,isbatch=(batch_size>1))
-                                assert not os.path.exists(filename)
-                                Image.fromarray(x_sample.astype(np.uint8)).save(filename)
-                                images.append([filename,seed])
-                        else:
-                            all_samples.append(x_samples)
-                            seeds.append(seed)
-                    image_count +=1
+                        for x_sample in x_samples:
+                            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+                            image = Image.fromarray(x_sample.astype(np.uint8))
+                            images.append([image,seed])
+                            if callback is not None:
+                                callback(image,seed)
                     seed = self._new_seed()
-                if grid:
-                    images = self._make_grid(samples=all_samples,
-                                                seeds=seeds,
-                                                batch_size=batch_size,
-                                                iterations=iterations,
-                                                outdir=outdir)
 
         except KeyboardInterrupt:
             print('*interrupted*')
@@ -419,26 +364,6 @@ The vast majority of these arguments default to reasonable values.
         except RuntimeError as e:
             print("Oops! A runtime error has occurred. If this is unexpected, please copy-and-paste this stack trace and post it as an Issue to http://github.com/lstein/stable-diffusion")
             traceback.print_exc()
-
-        toc = time.time()
-        print(f'{image_count} images generated in',"%4.2fs"% (toc-tic))
-
-        return images
-
-    def _make_grid(self,samples,seeds,batch_size,iterations,outdir):
-        images = list()
-        n_rows = batch_size if batch_size>1 else int(math.sqrt(batch_size * iterations))
-        # save as grid
-        grid = torch.stack(samples, 0)
-        grid = rearrange(grid, 'n b c h w -> (n b) c h w')
-        grid = make_grid(grid, nrow=n_rows)
-
-        # to image
-        grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
-        filename = self._unique_filename(outdir,seed=seeds[0],grid_count=batch_size*iterations)
-        Image.fromarray(grid.astype(np.uint8)).save(filename)
-        for s in seeds:
-            images.append([filename,s])
         return images
 
     def _new_seed(self):
@@ -513,43 +438,6 @@ The vast majority of these arguments default to reasonable values.
         image = torch.from_numpy(image)
         return 2.*image - 1.
 
-    def _unique_filename(self,outdir,previousname=None,seed=0,isbatch=False,grid_count=None):
-        revision = 1
-
-        if previousname is None:
-            # sort reverse alphabetically until we find max+1
-            dirlist   = sorted(os.listdir(outdir),reverse=True)
-            # find the first filename that matches our pattern or return 000000.0.png
-            filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
-            basecount  = int(filename.split('.',1)[0])
-            basecount += 1
-            if grid_count is not None:
-                grid_label = f'grid#1-{grid_count}'
-                filename = f'{basecount:06}.{seed}.{grid_label}.png'
-            elif isbatch:
-                filename = f'{basecount:06}.{seed}.01.png'
-            else:
-                filename = f'{basecount:06}.{seed}.png'
-            
-            return os.path.join(outdir,filename)
-
-        else:
-            previousname = os.path.basename(previousname)
-            x = re.match('^(\d+)\..*\.png',previousname)
-            if not x:
-                return self._unique_filename(outdir,previousname,seed)
-
-            basecount = int(x.groups()[0])
-            series = 0 
-            finished = False
-            while not finished:
-                series += 1
-                filename = f'{basecount:06}.{seed}.png'
-                if isbatch or os.path.exists(os.path.join(outdir,filename)):
-                    filename = f'{basecount:06}.{seed}.{series:02}.png'
-                finished = not os.path.exists(os.path.join(outdir,filename))
-            return os.path.join(outdir,filename)
-
     def _split_weighted_subprompts(text):
         """
         grabs all text up to the first occurrence of ':' 
diff --git a/scripts/dream.py b/scripts/dream.py
index dc5fad5bac..e0714dbcbd 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -8,13 +8,7 @@ import os
 import sys
 import copy
 from PIL import Image,PngImagePlugin
-
-# readline unavailable on windows systems
-try:
-    import readline
-    readline_available = True
-except:
-    readline_available = False
+from ldm.dream_util import Completer,PngWriter
 
 debugging = False
 
@@ -131,13 +125,13 @@ def main_loop(t2i,parser,log,infile):
         if elements[0]=='cd' and len(elements)>1:
             if os.path.exists(elements[1]):
                 print(f"setting image output directory to {elements[1]}")
-                t2i.outdir=elements[1]
+                opt.outdir=elements[1]
             else:
                 print(f"directory {elements[1]} does not exist")
             continue
 
         if elements[0]=='pwd':
-            print(f"current output directory is {t2i.outdir}")
+            print(f"current output directory is {opt.outdir}")
             continue
         
         if elements[0].startswith('!dream'): # in case a stored prompt still contains the !dream command
@@ -167,47 +161,19 @@ def main_loop(t2i,parser,log,infile):
             continue
 
         try:
-            if opt.init_img is None:
-                results = t2i.txt2img(**vars(opt))
-            else:
-                assert os.path.exists(opt.init_img),f"No file found at {opt.init_img}. On Linux systems, pressing <tab> after -I will autocomplete a list of possible image files."
-                if None not in (opt.width,opt.height):
-                    print('Warning: width and height options are ignored when modifying an init image')
-                results = t2i.img2img(**vars(opt))
+            file_writer  = PngWriter(opt)
+            opt.callback = file_writer(write_image)
+            run_generator(**vars(opt))
+            results      = file_writer.files_written
         except AssertionError as e:
             print(e)
             continue
 
-
-        allVariantResults = []
-        if opt.variants is not None:
-            print(f"Generating {opt.variants} variant(s)...")
-            newopt = copy.deepcopy(opt)
-            newopt.variants = None
-            for r in results:
-                newopt.init_img = r[0]
-                print(f"\t generating variant for {newopt.init_img}")
-                for j in range(0, opt.variants):
-                    try:
-                        variantResults = t2i.img2img(**vars(newopt))
-                        allVariantResults.append([newopt,variantResults])
-                    except AssertionError as e:
-                        print(e)
-                        continue
-            print(f"{opt.variants} Variants generated!")
-
         print("Outputs:")
         write_log_message(t2i,opt,results,log)
-            
-        if allVariantResults:
-            print("Variant outputs:")
-            for vr in allVariantResults:
-                write_log_message(t2i,vr[0],vr[1],log)
-            
 
     print("goodbye!")
 
-
 def write_log_message(t2i,opt,results,logfile):
     ''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata '''
     switches = _reconstruct_switches(t2i,opt)
@@ -339,89 +305,7 @@ def create_cmd_parser():
     parser.add_argument('-x','--skip_normalize',action='store_true',help="skip subprompt weight normalization")
     return parser
 
-if readline_available:
-    def setup_readline():
-        readline.set_completer(Completer(['cd','pwd',
-                                          '--steps','-s','--seed','-S','--iterations','-n','--batch_size','-b',
-                                          '--width','-W','--height','-H','--cfg_scale','-C','--grid','-g',
-                                          '--individual','-i','--init_img','-I','--strength','-f','-v','--variants']).complete)
-        readline.set_completer_delims(" ")
-        readline.parse_and_bind('tab: complete')
-        load_history()
 
-    def load_history():
-        histfile = os.path.join(os.path.expanduser('~'),".dream_history")
-        try:
-            readline.read_history_file(histfile)
-            readline.set_history_length(1000)
-        except FileNotFoundError:
-            pass
-        atexit.register(readline.write_history_file,histfile)
-
-    class Completer():
-        def __init__(self,options):
-            self.options = sorted(options)
-            return
-
-        def complete(self,text,state):
-            buffer = readline.get_line_buffer()
-            
-            if text.startswith(('-I','--init_img')):
-                return self._path_completions(text,state,('.png'))
-
-            if buffer.strip().endswith('cd') or text.startswith(('.','/')):
-                return self._path_completions(text,state,())
-
-            response = None
-            if state == 0:
-                # This is the first time for this text, so build a match list.
-                if text:
-                    self.matches = [s 
-                                    for s in self.options
-                                    if s and s.startswith(text)]
-                else:
-                    self.matches = self.options[:]
-
-            # Return the state'th item from the match list,
-            # if we have that many.
-            try:
-                response = self.matches[state]
-            except IndexError:
-                response = None
-            return response
-
-        def _path_completions(self,text,state,extensions):
-            # get the path so far
-            if text.startswith('-I'):
-                path = text.replace('-I','',1).lstrip()
-            elif text.startswith('--init_img='):
-                path = text.replace('--init_img=','',1).lstrip()
-            else:
-                path = text
-
-            matches  = list()
-
-            path = os.path.expanduser(path)
-            if len(path)==0:
-                matches.append(text+'./')
-            else:
-                dir  = os.path.dirname(path)
-                dir_list = os.listdir(dir)
-                for n in dir_list:
-                    if n.startswith('.') and len(n)>1:
-                        continue
-                    full_path = os.path.join(dir,n)
-                    if full_path.startswith(path):
-                        if os.path.isdir(full_path):
-                            matches.append(os.path.join(os.path.dirname(text),n)+'/')
-                        elif n.endswith(extensions):
-                            matches.append(os.path.join(os.path.dirname(text),n))
-
-            try:
-                response = matches[state]
-            except IndexError:
-                response = None
-            return response
 
 if __name__ == "__main__":
     main()

From b12955c9631ceec70154690eef072b99b9c71c32 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Wed, 24 Aug 2022 17:57:44 -0400
Subject: [PATCH 05/58] remove unneeded imports from dream.py

---
 scripts/dream.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/scripts/dream.py b/scripts/dream.py
index e0714dbcbd..6ff7802fa2 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -3,11 +3,9 @@
 
 import argparse
 import shlex
-import atexit
 import os
 import sys
 import copy
-from PIL import Image,PngImagePlugin
 from ldm.dream_util import Completer,PngWriter
 
 debugging = False

From 0a7fe6f2d9ba03b7da1eef2317476ce4f9de42ca Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 18:19:50 -0400
Subject: [PATCH 06/58] Switch to ThreadingHTTPServer

---
 scripts/dream_web.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index 3b10922c68..7c9155d926 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -1,6 +1,6 @@
 import json
 import os
-from http.server import BaseHTTPRequestHandler, HTTPServer
+from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
 
 print("Loading model...")
 from ldm.simplet2i import T2I
@@ -50,7 +50,7 @@ class DreamServer(BaseHTTPRequestHandler):
         self.wfile.write(bytes(json.dumps(result), "utf-8"))
 
 if __name__ == "__main__":
-    dream_server = HTTPServer(("0.0.0.0", 9090), DreamServer)
+    dream_server = ThreadingHTTPServer(("0.0.0.0", 9090), DreamServer)
     print("Started Stable Diffusion dream server!")
 
     try:

From b978536385613ab2c4bc4660633337e730020d75 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Wed, 24 Aug 2022 19:47:59 -0400
Subject: [PATCH 07/58] code is reorganized and mostly functional. Grid needs
 to be brought back online, as well as naming of img2img variants (currently
 the variants get written but not logged)

---
 ldm/dream_util.py | 64 ++++++++++++++++++++++++++--------
 ldm/simplet2i.py  | 76 +++++++++++++++++++---------------------
 scripts/dream.py  | 89 +++++++++--------------------------------------
 3 files changed, 101 insertions(+), 128 deletions(-)

diff --git a/ldm/dream_util.py b/ldm/dream_util.py
index 1526223cd8..ceab2940b1 100644
--- a/ldm/dream_util.py
+++ b/ldm/dream_util.py
@@ -1,6 +1,7 @@
 '''Utilities for dealing with PNG images and their path names'''
 import os
 import atexit
+import re
 from PIL import Image,PngImagePlugin
 
 # ---------------readline utilities---------------------
@@ -94,40 +95,43 @@ if readline_available:
 # -------------------image generation utils-----
 class PngWriter:
 
-    def __init__(self,opt):
-        self.opt           = opt
-        self.filepath      = None
-        self.files_written = []
+    def __init__(self,outdir,opt,prompt):
+        self.outdir           = outdir
+        self.opt              = opt
+        self.prompt           = prompt
+        self.filepath         = None
+        self.files_written    = []
 
     def write_image(self,image,seed):
-        self.filepath = self.unique_filename(self,opt,seed,self.filepath) # will increment name in some sensible way
+        self.filepath = self.unique_filename(seed,self.filepath) # will increment name in some sensible way
         try:
-            image.save(self.filename)
+            prompt = f'{self.prompt} -S{seed}'
+            self.save_image_and_prompt_to_png(image,prompt,self.filepath)
         except IOError as e:
             print(e)
         self.files_written.append([self.filepath,seed])
 
-    def unique_filename(self,opt,seed,previouspath):
+    def unique_filename(self,seed,previouspath):
         revision = 1
 
         if previouspath is None:
             # sort reverse alphabetically until we find max+1
-            dirlist   = sorted(os.listdir(outdir),reverse=True)
+            dirlist   = sorted(os.listdir(self.outdir),reverse=True)
             # find the first filename that matches our pattern or return 000000.0.png
             filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
             basecount  = int(filename.split('.',1)[0])
             basecount += 1
-            if opt.batch_size > 1:
+            if self.opt.batch_size > 1:
                 filename = f'{basecount:06}.{seed}.01.png'
             else:
                 filename = f'{basecount:06}.{seed}.png'
-            return os.path.join(outdir,filename)
+            return os.path.join(self.outdir,filename)
 
         else:
             basename = os.path.basename(previouspath)
             x = re.match('^(\d+)\..*\.png',basename)
             if not x:
-                return self.unique_filename(opt,seed,previouspath)
+                return self.unique_filename(seed,previouspath)
 
             basecount = int(x.groups()[0])
             series = 0 
@@ -135,9 +139,41 @@ class PngWriter:
             while not finished:
                 series += 1
                 filename = f'{basecount:06}.{seed}.png'
-                if isbatch or os.path.exists(os.path.join(outdir,filename)):
+                if self.opt.batch_size>1 or os.path.exists(os.path.join(self.outdir,filename)):
                     filename = f'{basecount:06}.{seed}.{series:02}.png'
-                finished = not os.path.exists(os.path.join(outdir,filename))
-            return os.path.join(outdir,filename)
+                finished = not os.path.exists(os.path.join(self.outdir,filename))
+            return os.path.join(self.outdir,filename)
 
+    def save_image_and_prompt_to_png(self,image,prompt,path):
+        info = PngImagePlugin.PngInfo()
+        info.add_text("Dream",prompt)
+        image.save(path,"PNG",pnginfo=info)
+    
+class PromptFormatter():
+    def __init__(self,t2i,opt):
+        self.t2i = t2i
+        self.opt = opt
+
+    def normalize_prompt(self):
+        '''Normalize the prompt and switches'''
+        t2i      = self.t2i
+        opt      = self.opt
+
+        switches = list()
+        switches.append(f'"{opt.prompt}"')
+        switches.append(f'-s{opt.steps        or t2i.steps}')
+        switches.append(f'-b{opt.batch_size   or t2i.batch_size}')
+        switches.append(f'-W{opt.width        or t2i.width}')
+        switches.append(f'-H{opt.height       or t2i.height}')
+        switches.append(f'-C{opt.cfg_scale    or t2i.cfg_scale}')
+        switches.append(f'-m{t2i.sampler_name}')
+        if opt.variants:
+            switches.append(f'-v{opt.variants}')
+        if opt.init_img:
+            switches.append(f'-I{opt.init_img}')
+        if opt.strength and opt.init_img is not None:
+            switches.append(f'-f{opt.strength or t2i.strength}')
+        if t2i.full_precision:
+            switches.append('-F')
+        return ' '.join(switches)
 
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 8e8b077922..3b5aaeb696 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -99,13 +99,13 @@ The vast majority of these arguments default to reasonable values.
     def __init__(self,
                  batch_size=1,
                  iterations = 1,
-                 grid=False,
-                 individual=None, # redundant
                  steps=50,
                  seed=None,
                  cfg_scale=7.5,
                  weights="models/ldm/stable-diffusion-v1/model.ckpt",
                  config = "configs/stable-diffusion/v1-inference.yaml",
+                 width=512,
+                 height=512,
                  sampler_name="klms",
                  latent_channels=4,
                  downsampling_factor=8,
@@ -121,7 +121,6 @@ The vast majority of these arguments default to reasonable values.
         self.iterations = iterations
         self.width      = width
         self.height     = height
-        self.grid       = grid
         self.steps      = steps
         self.cfg_scale  = cfg_scale
         self.weights    = weights
@@ -143,25 +142,26 @@ The vast majority of these arguments default to reasonable values.
         else:
             self.seed = seed
 
-    def generate(self,
-                 # these are common
-                 prompt,
-                 batch_size=None,
-                 iterations=None,
-                 steps=None,
-                 seed=None,
-                 cfg_scale=None,
-                 ddim_eta=None,
-                 skip_normalize=False,
-                 image_callback=None,
-                 # these are specific to txt2img
-                 width=None,
-                 height=None,
-                 # these are specific to img2img
-                 init_img=None,
-                 strength=None,
-                 variants=None):
-        '''ldm.generate() is the common entry point for txt2img() and img2img()'''
+    def prompt2image(self,
+                     # these are common
+                     prompt,
+                     batch_size=None,
+                     iterations=None,
+                     steps=None,
+                     seed=None,
+                     cfg_scale=None,
+                     ddim_eta=None,
+                     skip_normalize=False,
+                     image_callback=None,
+                     # these are specific to txt2img
+                     width=None,
+                     height=None,
+                     # these are specific to img2img
+                     init_img=None,
+                     strength=None,
+                     variants=None,
+                     **args):   # eat up additional cruft
+        '''ldm.prompt2image() is the common entry point for txt2img() and img2img()'''
         steps      = steps      or self.steps
         seed       = seed       or self.seed
         width      = width      or self.width
@@ -178,10 +178,6 @@ The vast majority of these arguments default to reasonable values.
 
         data = [batch_size * [prompt]]
         scope = autocast if self.precision=="autocast" else nullcontext
-        if grid:
-            callback = self.image2png
-        else:
-            callback = None
 
         tic    = time.time()
         if init_img:
@@ -212,7 +208,7 @@ The vast majority of these arguments default to reasonable values.
                  steps,seed,cfg_scale,ddim_eta,
                  skip_normalize,
                  width,height,
-                 callback=callback):    # the callback is called each time a new Image is generated
+                 callback):    # the callback is called each time a new Image is generated
         """
         Generate an image from the prompt, writing iteration images into the outdir
         The output is a list of lists in the format: [[image1,seed1], [image2,seed2],...]
@@ -224,14 +220,14 @@ The vast majority of these arguments default to reasonable values.
 
         # Gawd. Too many levels of indent here. Need to refactor into smaller routines!
         try:
-            with precision_scope(self.device.type), model.ema_scope():
+            with precision_scope(self.device.type), self.model.ema_scope():
                 all_samples = list()
                 for n in trange(iterations, desc="Sampling"):
                     seed_everything(seed)
                     for prompts in tqdm(data, desc="data", dynamic_ncols=True):
                         uc = None
                         if cfg_scale != 1.0:
-                            uc = model.get_learned_conditioning(batch_size * [""])
+                            uc = self.model.get_learned_conditioning(batch_size * [""])
                         if isinstance(prompts, tuple):
                             prompts = list(prompts)
 
@@ -247,9 +243,9 @@ The vast majority of these arguments default to reasonable values.
                                 weight = weights[i]
                                 if not skip_normalize:
                                     weight = weight / totalWeight
-                                c = torch.add(c,model.get_learned_conditioning(subprompts[i]), alpha=weight)
+                                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
                         else: # just standard 1 prompt
-                            c = model.get_learned_conditioning(prompts)
+                            c = self.model.get_learned_conditioning(prompts)
 
                         shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
                         samples_ddim, _ = sampler.sample(S=steps,
@@ -261,7 +257,7 @@ The vast majority of these arguments default to reasonable values.
                                                          unconditional_conditioning=uc,
                                                          eta=ddim_eta)
 
-                        x_samples_ddim = model.decode_first_stage(samples_ddim)
+                        x_samples_ddim = self.model.decode_first_stage(samples_ddim)
                         x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
                         for x_sample in x_samples_ddim:
                             x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
@@ -277,8 +273,6 @@ The vast majority of these arguments default to reasonable values.
         except RuntimeError as e:
             print(str(e))
 
-        toc = time.time()
-        print(f'{image_count} images generated in',"%4.2fs"% (toc-tic))
         return images
         
     @torch.no_grad()
@@ -297,14 +291,14 @@ The vast majority of these arguments default to reasonable values.
         # PLMS sampler not supported yet, so ignore previous sampler
         if self.sampler_name!='ddim':
             print(f"sampler '{self.sampler_name}' is not yet supported. Using DDM sampler")
-            sampler = DDIMSampler(model, device=self.device)
+            sampler = DDIMSampler(self.model, device=self.device)
         else:
             sampler = self.sampler
 
         init_image = self._load_img(init_img).to(self.device)
         init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
         with precision_scope(self.device.type):
-            init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image))  # move to latent space
+            init_latent = self.model.get_first_stage_encoding(self.model.encode_first_stage(init_image))  # move to latent space
 
         sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
         
@@ -314,14 +308,14 @@ The vast majority of these arguments default to reasonable values.
         images = list()
 
         try:
-            with precision_scope(self.device.type), model.ema_scope():
+            with precision_scope(self.device.type), self.model.ema_scope():
                 all_samples = list()
                 for n in trange(iterations, desc="Sampling"):
                     seed_everything(seed)
                     for prompts in tqdm(data, desc="data", dynamic_ncols=True):
                         uc = None
                         if cfg_scale != 1.0:
-                            uc = model.get_learned_conditioning(batch_size * [""])
+                            uc = self.model.get_learned_conditioning(batch_size * [""])
                         if isinstance(prompts, tuple):
                             prompts = list(prompts)
 
@@ -337,9 +331,9 @@ The vast majority of these arguments default to reasonable values.
                                 weight = weights[i]
                                 if not skip_normalize:
                                     weight = weight / totalWeight
-                                c = torch.add(c,model.get_learned_conditioning(subprompts[i]), alpha=weight)
+                                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
                         else: # just standard 1 prompt
-                            c = model.get_learned_conditioning(prompts)
+                            c = self.model.get_learned_conditioning(prompts)
 
                         # encode (scaled latent)
                         z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
@@ -347,7 +341,7 @@ The vast majority of these arguments default to reasonable values.
                         samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
                                                     unconditional_conditioning=uc,)
 
-                        x_samples = model.decode_first_stage(samples)
+                        x_samples = self.model.decode_first_stage(samples)
                         x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
 
                         for x_sample in x_samples:
diff --git a/scripts/dream.py b/scripts/dream.py
index 6ff7802fa2..ab01e8db01 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -6,7 +6,7 @@ import shlex
 import os
 import sys
 import copy
-from ldm.dream_util import Completer,PngWriter
+from ldm.dream_util import Completer,PngWriter,PromptFormatter
 
 debugging = False
 
@@ -27,10 +27,6 @@ def main():
         config  = "configs/stable-diffusion/v1-inference.yaml"
         weights = "models/ldm/stable-diffusion-v1/model.ckpt"
 
-    # command line history will be stored in a file called "~/.dream_history"
-    if readline_available:
-        setup_readline()
-
     print("* Initializing, be patient...\n")
     sys.path.append('.')
     from pytorch_lightning import logging
@@ -46,8 +42,6 @@ def main():
     # the user input loop
     t2i = T2I(width=width,
               height=height,
-              batch_size=opt.batch_size,
-              outdir=opt.outdir,
               sampler_name=opt.sampler_name,
               weights=weights,
               full_precision=opt.full_precision,
@@ -79,13 +73,13 @@ def main():
     log_path   = os.path.join(opt.outdir,'dream_log.txt')
     with open(log_path,'a') as log:
         cmd_parser = create_cmd_parser()
-        main_loop(t2i,cmd_parser,log,infile)
+        main_loop(t2i,opt.outdir,cmd_parser,log,infile)
         log.close()
     if infile:
         infile.close()
 
 
-def main_loop(t2i,parser,log,infile):
+def main_loop(t2i,outdir,parser,log,infile):
     ''' prompt/read/execute loop '''
     done = False
     
@@ -123,13 +117,13 @@ def main_loop(t2i,parser,log,infile):
         if elements[0]=='cd' and len(elements)>1:
             if os.path.exists(elements[1]):
                 print(f"setting image output directory to {elements[1]}")
-                opt.outdir=elements[1]
+                outdir=elements[1]
             else:
                 print(f"directory {elements[1]} does not exist")
             continue
 
         if elements[0]=='pwd':
-            print(f"current output directory is {opt.outdir}")
+            print(f"current output directory is {outdir}")
             continue
         
         if elements[0].startswith('!dream'): # in case a stored prompt still contains the !dream command
@@ -158,88 +152,41 @@ def main_loop(t2i,parser,log,infile):
             print("Try again with a prompt!")
             continue
 
+        normalized_prompt      = PromptFormatter(t2i,opt).normalize_prompt()
         try:
-            file_writer  = PngWriter(opt)
-            opt.callback = file_writer(write_image)
-            run_generator(**vars(opt))
+            file_writer        = PngWriter(outdir,opt,normalized_prompt)
+            callback           = file_writer.write_image
+
+            t2i.prompt2image(image_callback=callback,
+                             **vars(opt))
             results      = file_writer.files_written
+
         except AssertionError as e:
             print(e)
             continue
 
         print("Outputs:")
-        write_log_message(t2i,opt,results,log)
+        write_log_message(t2i,normalized_prompt,results,log)
 
     print("goodbye!")
 
-def write_log_message(t2i,opt,results,logfile):
+def write_log_message(t2i,prompt,results,logfile):
     ''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata '''
-    switches = _reconstruct_switches(t2i,opt)
-    prompt_str = ' '.join(switches)
-
-    # when multiple images are produced in batch, then we keep track of where each starts
     last_seed  = None
     img_num    = 1
-    batch_size = opt.batch_size or t2i.batch_size
     seenit     = {}
 
     seeds = [a[1] for a in results]
-    if batch_size > 1:
-        seeds = f"(seeds for each batch row: {seeds})"
-    else:
-        seeds = f"(seeds for individual images: {seeds})"
+    seeds = f"(seeds for individual images: {seeds})"
 
     for r in results:
         seed = r[1]
-        log_message = (f'{r[0]}: {prompt_str} -S{seed}')
+        log_message = (f'{r[0]}: {prompt} -S{seed}')
 
-        if batch_size > 1:
-            if seed != last_seed:
-                img_num = 1
-                log_message += f' # (batch image {img_num} of {batch_size})'
-            else:
-                img_num += 1
-                log_message += f' # (batch image {img_num} of {batch_size})'
-            last_seed = seed
         print(log_message)
         logfile.write(log_message+"\n")
         logfile.flush()
-        if r[0] not in seenit:
-            seenit[r[0]] = True
-            try:
-                if opt.grid:
-                    _write_prompt_to_png(r[0],f'{prompt_str} -g -S{seed} {seeds}')
-                else:
-                    _write_prompt_to_png(r[0],f'{prompt_str} -S{seed}')
-            except FileNotFoundError:
-                print(f"Could not open file '{r[0]}' for reading")
 
-def _reconstruct_switches(t2i,opt):
-    '''Normalize the prompt and switches'''
-    switches = list()
-    switches.append(f'"{opt.prompt}"')
-    switches.append(f'-s{opt.steps        or t2i.steps}')
-    switches.append(f'-b{opt.batch_size   or t2i.batch_size}')
-    switches.append(f'-W{opt.width        or t2i.width}')
-    switches.append(f'-H{opt.height       or t2i.height}')
-    switches.append(f'-C{opt.cfg_scale    or t2i.cfg_scale}')
-    switches.append(f'-m{t2i.sampler_name}')
-    if opt.variants:
-        switches.append(f'-v{opt.variants}')
-    if opt.init_img:
-        switches.append(f'-I{opt.init_img}')
-    if opt.strength and opt.init_img is not None:
-        switches.append(f'-f{opt.strength or t2i.strength}')
-    if t2i.full_precision:
-        switches.append('-F')
-    return switches
-
-def _write_prompt_to_png(path,prompt):
-    info = PngImagePlugin.PngInfo()
-    info.add_text("Dream",prompt)
-    im = Image.open(path)
-    im.save(path,"PNG",pnginfo=info)
-    
 def create_argv_parser():
     parser = argparse.ArgumentParser(description="Parse script's command line args")
     parser.add_argument("--laion400m",
@@ -260,10 +207,6 @@ def create_argv_parser():
                         dest='full_precision',
                         action='store_true',
                         help="use slower full precision math for calculations")
-    parser.add_argument('-b','--batch_size',
-                        type=int,
-                        default=1,
-                        help="number of images to produce per iteration (faster, but doesn't generate individual seeds")
     parser.add_argument('--sampler','-m',
                         dest="sampler_name",
                         choices=['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'],

From 269fcf92d9f2dfe699eb1950176eeebbc1df6620 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 21:38:47 -0400
Subject: [PATCH 08/58] Reapply prompt config on image click

---
 scripts/dream_web.py      |  2 +-
 scripts/static/index.html | 17 +++++++++++++++--
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index 7c9155d926..a363dd32ad 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -45,7 +45,7 @@ class DreamServer(BaseHTTPRequestHandler):
                                 steps = steps);
         print(f"Prompt generated with output: {outputs}")
 
-        outputs = [x + [prompt] for x in outputs] # Append prompt to each output
+        outputs = [x + [post_data] for x in outputs] # Append config to each output
         result = {'outputs': outputs}
         self.wfile.write(bytes(json.dumps(result), "utf-8"))
 
diff --git a/scripts/static/index.html b/scripts/static/index.html
index d5b27e83ef..6d8697846d 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -52,12 +52,20 @@
           let output_node = document.createElement("img");
           output_node.src = output[0];
 
-          let alt_text = output[1].toString() + " | " + output[2];
+          let output_config = output[2];
+          let alt_text = output[1].toString() + " | " + output_config.prompt;
           output_node.alt = alt_text;
           output_node.title = alt_text;
-          // Update seed on click
+
+          // Reload image config
           output_node.addEventListener('click', () => {
+              let form = document.querySelector("#generate_form");
+              for (const [k, v] of new FormData(form)) {
+                  form.querySelector(`*[name=${k}]`).value = output_config[k];
+              }
               document.querySelector("#seed").value = output[1];
+
+              save_fields(document.querySelector("#generate_form"));
           });
 
           document.querySelector("#results").prepend(output_node);
@@ -115,6 +123,10 @@
           document.querySelector("#generate_form").addEventListener('change', (e) => {
               save_fields(e.target.form);
           });
+          document.querySelector("#reset").addEventListener('click', (e) => {
+              document.querySelector("#seed").value = -1;
+              save_fields(e.target.form);
+          });
           load_fields(document.querySelector("#generate_form"));
       };
     </script>
@@ -143,6 +155,7 @@
           <span>&bull;</span>
           <label title="Set to -1 for random seed" for="seed">Seed:</label>
           <input value="-1" type="number" id="seed" name="seed">
+          <button type="button" id="reset">&olarr;</button>
         </fieldset>
       </form>
     </div>

From ab131cb55e0b806cbbb451c63d4b0ab562c4296b Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Wed, 24 Aug 2022 23:03:02 -0400
Subject: [PATCH 09/58] Add img2img support, fix naming conventions

---
 scripts/dream_web.py      |  39 +++++++++---
 scripts/static/index.html | 123 +++++++++++++++++++++++---------------
 2 files changed, 105 insertions(+), 57 deletions(-)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index a363dd32ad..f56f67f7d5 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -1,4 +1,5 @@
 import json
+import base64
 import os
 from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
 
@@ -28,6 +29,7 @@ class DreamServer(BaseHTTPRequestHandler):
         content_length = int(self.headers['Content-Length'])
         post_data = json.loads(self.rfile.read(content_length))
         prompt = post_data['prompt']
+        initimg = post_data['initimg']
         batch = int(post_data['batch'])
         steps = int(post_data['steps'])
         width = int(post_data['width'])
@@ -35,16 +37,37 @@ class DreamServer(BaseHTTPRequestHandler):
         cfgscale = float(post_data['cfgscale'])
         seed = None if int(post_data['seed']) == -1 else int(post_data['seed'])
 
-        print(f"Request to generate with data: {post_data}")
-        outputs = model.txt2img(prompt,
-                                batch_size = batch,
-                                cfg_scale = cfgscale,
-                                width = width,
-                                height = height,
-                                seed = seed,
-                                steps = steps);
+        print(f"Request to generate with prompt: {prompt}")
+
+        outputs = []
+        if initimg is None:
+            # Run txt2img
+            outputs = model.txt2img(prompt,
+                                    batch_size = batch,
+                                    cfg_scale = cfgscale,
+                                    width = width,
+                                    height = height,
+                                    seed = seed,
+                                    steps = steps)
+        else:
+            # Decode initimg as base64 to temp file
+            with open("./img2img-tmp.png", "wb") as f:
+                initimg = initimg.split(",")[1] # Ignore mime type
+                f.write(base64.b64decode(initimg))
+
+                # Run img2img
+                outputs = model.img2img(prompt,
+                                        init_img = "./img2img-tmp.png",
+                                        batch_size = batch,
+                                        cfg_scale = cfgscale,
+                                        seed = seed,
+                                        steps = steps)
+            # Remove the temp file
+            os.remove("./img2img-tmp.png")
+
         print(f"Prompt generated with output: {outputs}")
 
+        post_data['initimg'] = '' # Don't send init image back
         outputs = [x + [post_data] for x in outputs] # Append config to each output
         result = {'outputs': outputs}
         self.wfile.write(bytes(json.dumps(result), "utf-8"))
diff --git a/scripts/static/index.html b/scripts/static/index.html
index 6d8697846d..663a32ed12 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -40,6 +40,9 @@
           border-radius: 5px;
           margin: 10px;
       }
+      #generate-config {
+          line-height:2em;
+      }
       input[type="number"] {
           width: 60px;
       }
@@ -48,41 +51,52 @@
       }
     </style>
     <script>
-      function append_output(output) {
-          let output_node = document.createElement("img");
-          output_node.src = output[0];
+      function toBase64(file) {
+          return new Promise((resolve, reject) => {
+              const r = new FileReader();
+              r.readAsDataURL(file);
+              r.onload = () => resolve(r.result);
+              r.onerror = (error) => reject(error);
+          });
+      }
 
-          let output_config = output[2];
-          let alt_text = output[1].toString() + " | " + output_config.prompt;
-          output_node.alt = alt_text;
-          output_node.title = alt_text;
+      function appendOutput(output) {
+          let outputNode = document.createElement("img");
+          outputNode.src = output[0];
+
+          let outputConfig = output[2];
+          let altText = output[1].toString() + " | " + outputConfig.prompt;
+          outputNode.alt = altText;
+          outputNode.title = altText;
 
           // Reload image config
-          output_node.addEventListener('click', () => {
-              let form = document.querySelector("#generate_form");
+          outputNode.addEventListener('click', () => {
+              let form = document.querySelector("#generate-form");
               for (const [k, v] of new FormData(form)) {
-                  form.querySelector(`*[name=${k}]`).value = output_config[k];
+                  form.querySelector(`*[name=${k}]`).value = outputConfig[k];
               }
               document.querySelector("#seed").value = output[1];
 
-              save_fields(document.querySelector("#generate_form"));
+              saveFields(document.querySelector("#generate-form"));
           });
 
-          document.querySelector("#results").prepend(output_node);
+          document.querySelector("#results").prepend(outputNode);
       }
 
-      function append_outputs(outputs) {
+      function appendOutputs(outputs) {
           for (const output of outputs) {
-              append_output(output);
+              appendOutput(output);
           }
       }
 
-      function save_fields(form) {
+      function saveFields(form) {
           for (const [k, v] of new FormData(form)) {
-              localStorage.setItem(k, v);
+              if (typeof v !== 'object') { // Don't save 'file' type
+                  localStorage.setItem(k, v);
+              }
           }
       }
-      function load_fields(form) {
+      function loadFields(form) {
           for (const [k, v] of new FormData(form)) {
               const item = localStorage.getItem(k);
               if (item != null) {
@@ -90,44 +104,53 @@
               }
           }
       }
+
+      async function generateSubmit(form) {
+          const prompt = document.querySelector("#prompt").value;
+
+          // Convert file data to base64
+          let formData = Object.fromEntries(new FormData(form));
+          formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
+
+          // Post as JSON
+          fetch(form.action, {
+              method: form.method,
+              body: JSON.stringify(formData),
+          }).then(async (result) => {
+              let data = await result.json();
+
+              // Re-enable form, remove no-results-message
+              form.querySelector('fieldset').removeAttribute('disabled');
+              document.querySelector("#prompt").value = prompt;
+
+              if (data.outputs.length != 0) {
+                  document.querySelector("#no-results-message")?.remove();
+                  appendOutputs(data.outputs);
+              } else {
+                  alert("Error occurred while generating.");
+              }
+          });
+
+          // Disable form while generating
+          form.querySelector('fieldset').setAttribute('disabled','');
+          document.querySelector("#prompt").value = `Generating: "${prompt}"`;
+      }
       
       window.onload = () => {
-          document.querySelector("#generate_form").addEventListener('submit', (e) => {
+          document.querySelector("#generate-form").addEventListener('submit', (e) => {
               e.preventDefault();
               const form = e.target;
-              const prompt = document.querySelector("#prompt").value;
 
-              // Post as JSON
-              fetch(form.action, {
-                  method: form.method,
-                  body: JSON.stringify(Object.fromEntries(new FormData(form))),
-              }).then((result) => {
-                  result.json().then((data) => {
-                      // Re-enable form, remove no-results-message
-                      form.querySelector('fieldset').removeAttribute('disabled');
-                      document.querySelector("#prompt").value = prompt;
-
-                      if (data.outputs.length != 0) {
-                          document.querySelector("#no_results_message")?.remove();
-                          append_outputs(data.outputs);
-                      } else {
-                          alert("Error occurred while generating.");
-                      }
-                  });
-              });
-
-              // Disable form
-              form.querySelector('fieldset').setAttribute('disabled','');
-              document.querySelector("#prompt").value = `Generating: "${prompt}"`;
+              generateSubmit(form);
           });
-          document.querySelector("#generate_form").addEventListener('change', (e) => {
-              save_fields(e.target.form);
+          document.querySelector("#generate-form").addEventListener('change', (e) => {
+              saveFields(e.target.form);
           });
           document.querySelector("#reset").addEventListener('click', (e) => {
               document.querySelector("#seed").value = -1;
-              save_fields(e.target.form);
+              saveFields(e.target.form);
           });
-          load_fields(document.querySelector("#generate_form"));
+          loadFields(document.querySelector("#generate-form"));
       };
     </script>
   </head>
@@ -135,12 +158,12 @@
     <div id="search">
       <h2 id="header">Stable Diffusion</h2>
 
-      <form id="generate_form" method="post" action="#">
+      <form id="generate-form" method="post" action="#">
         <fieldset>
           <input type="text" id="prompt" name="prompt">
           <input type="submit" id="submit" value="Generate">
         </fieldset>
-        <fieldset>
+        <fieldset id="generate-config">
           <label for="batch">Batch Size:</label>
           <input value="1" type="number" id="batch" name="batch">
           <label for="steps">Steps:</label>
@@ -152,7 +175,9 @@
           <input value="512" type="number" id="width" name="width">
           <label title="Set to multiple of 64" for="height">Height:</label>
           <input value="512" type="number" id="height" name="height">
-          <span>&bull;</span>
+          <br>
+          <label title="Upload an image to use img2img" for="initimg">Img2Img Init:</label>
+          <input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
           <label title="Set to -1 for random seed" for="seed">Seed:</label>
           <input value="-1" type="number" id="seed" name="seed">
           <button type="button" id="reset">&olarr;</button>
@@ -161,7 +186,7 @@
     </div>
     <hr style="width: 200px">
     <div id="results">
-      <div id="no_results_message">
+      <div id="no-results-message">
         <i><p>No results...</p></i>
       </div>
     </div>

From 51b9a1d8d3622a9e6a0866241a863bb16afecf09 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Wed, 24 Aug 2022 23:55:31 -0400
Subject: [PATCH 10/58] Update readme.md

---
 README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index a9b03362e2..74deb52d83 100644
--- a/README.md
+++ b/README.md
@@ -172,8 +172,8 @@ repository and associated paper for details and limitations.
 
 * v1.09 (24 August 2022)
    * A new -v option allows you to generate multiple variants of an initial image
-     in img2img mode. (kudos to Oceanswave)
-   * Added ability to personalize text to image generation (kudos to nicolai256)
+     in img2img mode. (kudos to @Oceanswave. [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
+   * Added ability to personalize text to image generation (kudos to @Oceanswave and @nicolai256)
    * Enabled all of the samplers from k_diffusion
    
 * v1.08 (24 August 2022)

From 29727f3e12fe918cab2adfa947bf990c399136f5 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Wed, 24 Aug 2022 23:59:37 -0400
Subject: [PATCH 11/58] Another update

---
 README.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/README.md b/README.md
index 74deb52d83..a14d3dc18c 100644
--- a/README.md
+++ b/README.md
@@ -172,8 +172,8 @@ repository and associated paper for details and limitations.
 
 * v1.09 (24 August 2022)
    * A new -v option allows you to generate multiple variants of an initial image
-     in img2img mode. (kudos to @Oceanswave. [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
-   * Added ability to personalize text to image generation (kudos to @Oceanswave and @nicolai256)
+     in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
+   * Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
    * Enabled all of the samplers from k_diffusion
    
 * v1.08 (24 August 2022)
@@ -457,8 +457,8 @@ to send me an email if you use and like the script.
 
 *Contributions by:* 
 [Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
-[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan] (https://github.com/Oceanswave],
-[nicolai256](https://github.com/nicolai256], [Benjamin Warner](https://github.com/warner-benjamin),
+[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan] (https://github.com/Oceanswave),
+[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
 and [tildebyte](https://github.com/tildebyte)
 
 Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein)

From c521ac08eea928c99d2122904a8d0ea1822922af Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Thu, 25 Aug 2022 00:00:39 -0400
Subject: [PATCH 12/58] Another update

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index a14d3dc18c..a3347bdb4d 100644
--- a/README.md
+++ b/README.md
@@ -457,7 +457,7 @@ to send me an email if you use and like the script.
 
 *Contributions by:* 
 [Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
-[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan] (https://github.com/Oceanswave),
+[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
 [nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
 and [tildebyte](https://github.com/tildebyte)
 

From 0b4459b7074060314aaafe2c142370d77ac56a46 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 00:42:37 -0400
Subject: [PATCH 13/58] mostly back to full functionality; just missing grid
 generation code

---
 ldm/dream_util.py | 171 ++++++++++++++++++++++----------------------
 ldm/simplet2i.py  | 175 ++++++++++++++++++++++++++++++++--------------
 scripts/dream.py  |  37 +++++++++-
 3 files changed, 245 insertions(+), 138 deletions(-)

diff --git a/ldm/dream_util.py b/ldm/dream_util.py
index ceab2940b1..a1d0d3204b 100644
--- a/ldm/dream_util.py
+++ b/ldm/dream_util.py
@@ -4,6 +4,92 @@ import atexit
 import re
 from PIL import Image,PngImagePlugin
 
+# -------------------image generation utils-----
+class PngWriter:
+
+    def __init__(self,outdir,prompt=None,batch_size=1):
+        self.outdir           = outdir
+        self.batch_size       = batch_size
+        self.prompt           = prompt
+        self.filepath         = None
+        self.files_written    = []
+        os.makedirs(outdir, exist_ok=True)
+
+    def write_image(self,image,seed):
+        self.filepath = self.unique_filename(seed,self.filepath) # will increment name in some sensible way
+        try:
+            prompt = f'{self.prompt} -S{seed}'
+            self.save_image_and_prompt_to_png(image,prompt,self.filepath)
+        except IOError as e:
+            print(e)
+        self.files_written.append([self.filepath,seed])
+
+    def unique_filename(self,seed,previouspath):
+        revision = 1
+
+        if previouspath is None:
+            # sort reverse alphabetically until we find max+1
+            dirlist   = sorted(os.listdir(self.outdir),reverse=True)
+            # find the first filename that matches our pattern or return 000000.0.png
+            filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
+            basecount  = int(filename.split('.',1)[0])
+            basecount += 1
+            if self.batch_size > 1:
+                filename = f'{basecount:06}.{seed}.01.png'
+            else:
+                filename = f'{basecount:06}.{seed}.png'
+            return os.path.join(self.outdir,filename)
+
+        else:
+            basename = os.path.basename(previouspath)
+            x = re.match('^(\d+)\..*\.png',basename)
+            if not x:
+                return self.unique_filename(seed,previouspath)
+
+            basecount = int(x.groups()[0])
+            series = 0 
+            finished = False
+            while not finished:
+                series += 1
+                filename = f'{basecount:06}.{seed}.png'
+                if self.batch_size>1 or os.path.exists(os.path.join(self.outdir,filename)):
+                    filename = f'{basecount:06}.{seed}.{series:02}.png'
+                finished = not os.path.exists(os.path.join(self.outdir,filename))
+            return os.path.join(self.outdir,filename)
+
+    def save_image_and_prompt_to_png(self,image,prompt,path):
+        info = PngImagePlugin.PngInfo()
+        info.add_text("Dream",prompt)
+        image.save(path,"PNG",pnginfo=info)
+    
+class PromptFormatter():
+    def __init__(self,t2i,opt):
+        self.t2i = t2i
+        self.opt = opt
+
+    def normalize_prompt(self):
+        '''Normalize the prompt and switches'''
+        t2i      = self.t2i
+        opt      = self.opt
+
+        switches = list()
+        switches.append(f'"{opt.prompt}"')
+        switches.append(f'-s{opt.steps        or t2i.steps}')
+        switches.append(f'-b{opt.batch_size   or t2i.batch_size}')
+        switches.append(f'-W{opt.width        or t2i.width}')
+        switches.append(f'-H{opt.height       or t2i.height}')
+        switches.append(f'-C{opt.cfg_scale    or t2i.cfg_scale}')
+        switches.append(f'-m{t2i.sampler_name}')
+        if opt.variants:
+            switches.append(f'-v{opt.variants}')
+        if opt.init_img:
+            switches.append(f'-I{opt.init_img}')
+        if opt.strength and opt.init_img is not None:
+            switches.append(f'-f{opt.strength or t2i.strength}')
+        if t2i.full_precision:
+            switches.append('-F')
+        return ' '.join(switches)
+
 # ---------------readline utilities---------------------
 try:
     import readline
@@ -92,88 +178,3 @@ if readline_available:
         pass
     atexit.register(readline.write_history_file,histfile)
 
-# -------------------image generation utils-----
-class PngWriter:
-
-    def __init__(self,outdir,opt,prompt):
-        self.outdir           = outdir
-        self.opt              = opt
-        self.prompt           = prompt
-        self.filepath         = None
-        self.files_written    = []
-
-    def write_image(self,image,seed):
-        self.filepath = self.unique_filename(seed,self.filepath) # will increment name in some sensible way
-        try:
-            prompt = f'{self.prompt} -S{seed}'
-            self.save_image_and_prompt_to_png(image,prompt,self.filepath)
-        except IOError as e:
-            print(e)
-        self.files_written.append([self.filepath,seed])
-
-    def unique_filename(self,seed,previouspath):
-        revision = 1
-
-        if previouspath is None:
-            # sort reverse alphabetically until we find max+1
-            dirlist   = sorted(os.listdir(self.outdir),reverse=True)
-            # find the first filename that matches our pattern or return 000000.0.png
-            filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
-            basecount  = int(filename.split('.',1)[0])
-            basecount += 1
-            if self.opt.batch_size > 1:
-                filename = f'{basecount:06}.{seed}.01.png'
-            else:
-                filename = f'{basecount:06}.{seed}.png'
-            return os.path.join(self.outdir,filename)
-
-        else:
-            basename = os.path.basename(previouspath)
-            x = re.match('^(\d+)\..*\.png',basename)
-            if not x:
-                return self.unique_filename(seed,previouspath)
-
-            basecount = int(x.groups()[0])
-            series = 0 
-            finished = False
-            while not finished:
-                series += 1
-                filename = f'{basecount:06}.{seed}.png'
-                if self.opt.batch_size>1 or os.path.exists(os.path.join(self.outdir,filename)):
-                    filename = f'{basecount:06}.{seed}.{series:02}.png'
-                finished = not os.path.exists(os.path.join(self.outdir,filename))
-            return os.path.join(self.outdir,filename)
-
-    def save_image_and_prompt_to_png(self,image,prompt,path):
-        info = PngImagePlugin.PngInfo()
-        info.add_text("Dream",prompt)
-        image.save(path,"PNG",pnginfo=info)
-    
-class PromptFormatter():
-    def __init__(self,t2i,opt):
-        self.t2i = t2i
-        self.opt = opt
-
-    def normalize_prompt(self):
-        '''Normalize the prompt and switches'''
-        t2i      = self.t2i
-        opt      = self.opt
-
-        switches = list()
-        switches.append(f'"{opt.prompt}"')
-        switches.append(f'-s{opt.steps        or t2i.steps}')
-        switches.append(f'-b{opt.batch_size   or t2i.batch_size}')
-        switches.append(f'-W{opt.width        or t2i.width}')
-        switches.append(f'-H{opt.height       or t2i.height}')
-        switches.append(f'-C{opt.cfg_scale    or t2i.cfg_scale}')
-        switches.append(f'-m{t2i.sampler_name}')
-        if opt.variants:
-            switches.append(f'-v{opt.variants}')
-        if opt.init_img:
-            switches.append(f'-I{opt.init_img}')
-        if opt.strength and opt.init_img is not None:
-            switches.append(f'-f{opt.strength or t2i.strength}')
-        if t2i.full_precision:
-            switches.append('-F')
-        return ' '.join(switches)
-
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 3b5aaeb696..91b73def43 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -4,52 +4,6 @@
 # Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
 # Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
 
-
-"""Simplified text to image API for stable diffusion/latent diffusion
-
-Example Usage:
-
-from ldm.simplet2i import T2I
-# Create an object with default values
-t2i = T2I(outdir      = <path>        // outputs/txt2img-samples
-          model       = <path>        // models/ldm/stable-diffusion-v1/model.ckpt
-          config      = <path>        // default="configs/stable-diffusion/v1-inference.yaml
-          iterations  = <integer>     // how many times to run the sampling (1)
-          batch_size       = <integer>     // how many images to generate per sampling (1)
-          steps       = <integer>     // 50
-          seed        = <integer>     // current system time
-          sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms']  // k_lms
-          grid        = <boolean>     // false
-          width       = <integer>     // image width, multiple of 64 (512)
-          height      = <integer>     // image height, multiple of 64 (512)
-          cfg_scale   = <float>       // unconditional guidance scale (7.5)
-          )
-
-# do the slow model initialization
-t2i.load_model()
-
-# Do the fast inference & image generation. Any options passed here 
-# override the default values assigned during class initialization
-# Will call load_model() if the model was not previously loaded.
-# The method returns a list of images. Each row of the list is a sub-list of [filename,seed]
-results = t2i.txt2img(prompt = "an astronaut riding a horse"
-                      outdir = "./outputs/txt2img-samples)
-            )
-
-for row in results:
-    print(f'filename={row[0]}')
-    print(f'seed    ={row[1]}')
-
-# Same thing, but using an initial image.
-results = t2i.img2img(prompt   = "an astronaut riding a horse"
-                      outdir   = "./outputs/img2img-samples"
-                      init_img = "./sketches/horse+rider.png")
-                 
-for row in results:
-    print(f'filename={row[0]}')
-    print(f'seed    ={row[1]}')
-"""
-
 import torch
 import numpy as np
 import random
@@ -64,6 +18,7 @@ from torchvision.utils import make_grid
 from pytorch_lightning import seed_everything
 from torch import autocast
 from contextlib import contextmanager, nullcontext
+import transformers
 import time
 import math
 import re
@@ -73,6 +28,69 @@ from ldm.util import instantiate_from_config
 from ldm.models.diffusion.ddim     import DDIMSampler
 from ldm.models.diffusion.plms     import PLMSSampler
 from ldm.models.diffusion.ksampler import KSampler
+from ldm.dream_util                import PngWriter
+
+"""Simplified text to image API for stable diffusion/latent diffusion
+
+Example Usage:
+
+from ldm.simplet2i import T2I
+
+# Create an object with default values
+t2i = T2I(model       = <path>        // models/ldm/stable-diffusion-v1/model.ckpt
+          config      = <path>        // configs/stable-diffusion/v1-inference.yaml
+          iterations  = <integer>     // how many times to run the sampling (1)
+          batch_size  = <integer>     // how many images to generate per sampling (1)
+          steps       = <integer>     // 50
+          seed        = <integer>     // current system time
+          sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms']  // k_lms
+          grid        = <boolean>     // false
+          width       = <integer>     // image width, multiple of 64 (512)
+          height      = <integer>     // image height, multiple of 64 (512)
+          cfg_scale   = <float>       // unconditional guidance scale (7.5)
+          )
+
+# do the slow model initialization
+t2i.load_model()
+
+# Do the fast inference & image generation. Any options passed here 
+# override the default values assigned during class initialization
+# Will call load_model() if the model was not previously loaded and so
+# may be slow at first.
+# The method returns a list of images. Each row of the list is a sub-list of [filename,seed]
+results = t2i.prompt2png(prompt     = "an astronaut riding a horse",
+                         outdir     = "./outputs/samples",
+                         iterations = 3)
+
+for row in results:
+    print(f'filename={row[0]}')
+    print(f'seed    ={row[1]}')
+
+# Same thing, but using an initial image.
+results = t2i.prompt2png(prompt   = "an astronaut riding a horse",
+                         outdir   = "./outputs/,
+                         iterations = 3,
+                         init_img = "./sketches/horse+rider.png")
+                 
+for row in results:
+    print(f'filename={row[0]}')
+    print(f'seed    ={row[1]}')
+
+# Same thing, but we return a series of Image objects, which lets you manipulate them,
+# combine them, and save them under arbitrary names
+
+results = t2i.prompt2image(prompt   = "an astronaut riding a horse"
+                           outdir   = "./outputs/")
+for row in results:
+    im   = row[0]
+    seed = row[1]
+    im.save(f'./outputs/samples/an_astronaut_riding_a_horse-{seed}.png')
+    im.thumbnail(100,100).save('./outputs/samples/astronaut_thumb.jpg')
+
+Note that the old txt2img() and img2img() calls are deprecated but will
+still work.
+"""
+
 
 class T2I:
     """T2I class
@@ -141,7 +159,30 @@ The vast majority of these arguments default to reasonable values.
             self.seed = self._new_seed()
         else:
             self.seed = seed
+        transformers.logging.set_verbosity_error()
 
+    def prompt2png(self,prompt,outdir,**kwargs):
+        '''
+        Takes a prompt and an output directory, writes out the requested number
+        of PNG files, and returns an array of [[filename,seed],[filename,seed]...]
+        Optional named arguments are the same as those passed to T2I and prompt2image()
+        '''
+        results      = self.prompt2image(prompt,**kwargs)
+        pngwriter    = PngWriter(outdir,prompt,kwargs.get('batch_size',self.batch_size))
+        for r in results:
+            metadata_str = f'prompt2png("{prompt}" {kwargs} seed={r[1]}'   # gets written into the PNG
+            pngwriter.write_image(r[0],r[1])
+        return pngwriter.files_written
+
+    def txt2img(self,prompt,**kwargs):
+        outdir = kwargs.get('outdir','outputs/img-samples')
+        return self.prompt2png(prompt,outdir,**kwargs)
+
+    def img2img(self,prompt,**kwargs):
+        outdir = kwargs.get('outdir','outputs/img-samples')
+        assert 'init_img' in kwargs,'call to img2img() must include the init_img argument'
+        return self.prompt2png(prompt,outdir,**kwargs)
+        
     def prompt2image(self,
                      # these are common
                      prompt,
@@ -161,7 +202,34 @@ The vast majority of these arguments default to reasonable values.
                      strength=None,
                      variants=None,
                      **args):   # eat up additional cruft
-        '''ldm.prompt2image() is the common entry point for txt2img() and img2img()'''
+        '''
+        ldm.prompt2image() is the common entry point for txt2img() and img2img()
+        It takes the following arguments:
+           prompt                          // prompt string (no default)
+           iterations                      // iterations (1); image count=iterations x batch_size
+           batch_size                      // images per iteration (1)
+           steps                           // refinement steps per iteration
+           seed                            // seed for random number generator
+           width                           // width of image, in multiples of 64 (512)
+           height                          // height of image, in multiples of 64 (512)
+           cfg_scale                       // how strongly the prompt influences the image (7.5) (must be >1)
+           init_img                        // path to an initial image - its dimensions override width and height
+           strength                        // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
+           ddim_eta                        // image randomness (eta=0.0 means the same seed always produces the same image)
+           variants                        // if >0, the 1st generated image will be passed back to img2img to generate the requested number of variants
+           callback                        // a function or method that will be called each time an image is generated
+
+        To use the callback, define a function of method that receives two arguments, an Image object
+        and the seed. You can then do whatever you like with the image, including converting it to 
+        different formats and manipulating it. For example:
+
+            def process_image(image,seed):
+                image.save(f{'images/seed.png'})
+
+        The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code
+        to create the requested output directory, select a unique informative name for each image, and
+        write the prompt into the PNG metadata.
+        '''
         steps      = steps      or self.steps
         seed       = seed       or self.seed
         width      = width      or self.width
@@ -175,6 +243,12 @@ The vast majority of these arguments default to reasonable values.
         model = self.load_model()  # will instantiate the model or return it from cache
         assert cfg_scale>1.0, "CFG_Scale (-C) must be >1.0"
         assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
+        w = int(width/64)  * 64
+        h = int(height/64) * 64
+        if h != height or w != width:
+            print(f'Height and width must be multiples of 64. Resizing to {h}x{w}')
+            height = h
+            width  = w
 
         data = [batch_size * [prompt]]
         scope = autocast if self.precision=="autocast" else nullcontext
@@ -303,8 +377,7 @@ The vast majority of these arguments default to reasonable values.
         sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
         
         t_enc = int(strength * steps)
-        print(f"target t_enc is {t_enc} steps")
-
+        # print(f"target t_enc is {t_enc} steps")
         images = list()
 
         try:
@@ -408,8 +481,8 @@ The vast majority of these arguments default to reasonable values.
     def _load_model_from_config(self, config, ckpt):
         print(f"Loading model from {ckpt}")
         pl_sd = torch.load(ckpt, map_location="cpu")
-        if "global_step" in pl_sd:
-            print(f"Global Step: {pl_sd['global_step']}")
+#        if "global_step" in pl_sd:
+#            print(f"Global Step: {pl_sd['global_step']}")
         sd = pl_sd["state_dict"]
         model = instantiate_from_config(config.model)
         m, u = model.load_state_dict(sd, strict=False)
diff --git a/scripts/dream.py b/scripts/dream.py
index ab01e8db01..10acccbfc3 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -153,25 +153,58 @@ def main_loop(t2i,outdir,parser,log,infile):
             continue
 
         normalized_prompt      = PromptFormatter(t2i,opt).normalize_prompt()
+        variants               = None
+
         try:
-            file_writer        = PngWriter(outdir,opt,normalized_prompt)
+            file_writer        = PngWriter(outdir,normalized_prompt,opt.batch_size)
             callback           = file_writer.write_image
 
             t2i.prompt2image(image_callback=callback,
                              **vars(opt))
             results      = file_writer.files_written
 
+            if None not in (opt.variants,opt.init_img):
+                variants = generate_variants(t2i,outdir,opt,results)
+
         except AssertionError as e:
             print(e)
             continue
 
         print("Outputs:")
         write_log_message(t2i,normalized_prompt,results,log)
+        if variants is not None:
+            print('Variants:')
+            for vr in variants:
+                write_log_message(t2i,vr[0],vr[1],log)
 
     print("goodbye!")
 
+def generate_variants(t2i,outdir,opt,previous_gens):
+    variants = []
+    print(f"Generating {opt.variants} variant(s)...")
+    newopt = copy.deepcopy(opt)
+    newopt.iterations = 1
+    newopt.variants   = None
+    for r in previous_gens:
+        newopt.init_img = r[0]
+        prompt            = PromptFormatter(t2i,newopt).normalize_prompt()
+        print(f"] generating variant for {newopt.init_img}")
+        for j in range(0,opt.variants):
+            try:
+                file_writer        = PngWriter(outdir,prompt,newopt.batch_size)
+                callback           = file_writer.write_image
+                t2i.prompt2image(image_callback=callback,**vars(newopt))
+                results           = file_writer.files_written
+                variants.append([prompt,results])
+            except AssertionError as e:
+                print(e)
+                continue
+    print(f'{opt.variants} variants generated')
+    return variants
+                
+
 def write_log_message(t2i,prompt,results,logfile):
-    ''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata '''
+    ''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata'''
     last_seed  = None
     img_num    = 1
     seenit     = {}

From 72a9d7533091aa2af66f78d81ed5bf52b3e8d731 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 01:25:22 -0400
Subject: [PATCH 14/58] 404 on missing file

---
 scripts/dream_web.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index f56f67f7d5..41bc9a9029 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -9,17 +9,20 @@ model = T2I()
 
 class DreamServer(BaseHTTPRequestHandler):
     def do_GET(self):
-        self.send_response(200)
         if self.path == "/":
+            self.send_response(200)
             self.send_header("Content-type", "text/html")
             self.end_headers()
             with open("./scripts/static/index.html", "rb") as content:
                 self.wfile.write(content.read())
-        else:
+        elif os.path.exists("." + self.path):
+            self.send_response(200)
             self.send_header("Content-type", "image/png")
             self.end_headers()
             with open("." + self.path, "rb") as content:
                 self.wfile.write(content.read())
+        else:
+            self.send_response(404)
 
     def do_POST(self):
         self.send_response(200)

From eb58276a2c11f1cbdba1eb7133d4d9404118b7fd Mon Sep 17 00:00:00 2001
From: JigenD <jidaisuke@comcast.net>
Date: Thu, 25 Aug 2022 08:34:51 -0400
Subject: [PATCH 15/58] fix VRAM utilization

---
 ldm/simplet2i.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 4737d90ba7..b5dc775c73 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -494,6 +494,7 @@ The vast majority of these arguments default to reasonable values.
         sd = pl_sd["state_dict"]
         model = instantiate_from_config(config.model)
         m, u = model.load_state_dict(sd, strict=False)
+        model.cuda()
         model.eval()
         if self.full_precision:
             print('Using slower but more accurate full-precision math (--full_precision)')

From 49247b4aa4f65f98a903b8ad47932d35b940e1fb Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 09:41:12 -0400
Subject: [PATCH 16/58] fix performance regression; closes issue #42

---
 ldm/simplet2i.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 91b73def43..a3b4ecfcc7 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -486,6 +486,7 @@ The vast majority of these arguments default to reasonable values.
         sd = pl_sd["state_dict"]
         model = instantiate_from_config(config.model)
         m, u = model.load_state_dict(sd, strict=False)
+        model.cuda() # fixes performance issue
         model.eval()
         if self.full_precision:
             print('Using slower but more accurate full-precision math (--full_precision)')

From 26dc05e0e07e1fb93c8988832bf99cdb08f9d195 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 09:47:27 -0400
Subject: [PATCH 17/58] document --from_file flag, closes issue #82

---
 README.md | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/README.md b/README.md
index 22c6447248..28a610db8c 100644
--- a/README.md
+++ b/README.md
@@ -88,6 +88,26 @@ You may also pass a -v<count> option to generate count variants on the original
 passing the first generated image back into img2img the requested number of times. It generates interesting
 variants.
 
+## Reading Prompts from a File
+
+You can automate dream.py by providing a text file with the prompts
+you want to run, one line per prompt. The text file must be composed
+with a text editor (e.g. Notepad) and not a word processor. Each line
+should look like what you would type at the dream> prompt:
+
+~~~~
+a beautiful sunny day in the park, children playing -n4 -C10
+stormy weather on a mountain top, goats grazing     -s100
+innovative packaging for a squid's dinner           -S137038382
+~~~~
+
+Then pass this file's name to dream.py when you invoke it:
+
+~~~~
+(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file="path/to/prompts.txt"
+~~~~
+
+
 ## Weighted Prompts
 
 You may weight different sections of the prompt to tell the sampler to attach different levels of

From 1c8ecacddf4d8c8125ce8edbda5f7047c9cc6de8 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 10:43:05 -0400
Subject: [PATCH 18/58] remove src directory, which is gumming up conda
 installs; addresses issue #77

---
 .gitmodules             | 13 -------------
 src/clip                |  1 -
 src/k-diffusion         |  1 -
 src/taming-transformers |  1 -
 4 files changed, 16 deletions(-)
 delete mode 160000 src/clip
 delete mode 160000 src/k-diffusion
 delete mode 160000 src/taming-transformers

diff --git a/.gitmodules b/.gitmodules
index c8f0748436..e69de29bb2 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,13 +0,0 @@
-[submodule "taming-transformers"]
-	   path = src/taming-transformers
-	   url  = https://github.com/CompVis/taming-transformers.git
-	   ignore = dirty
-[submodule "clip"]
-	   path = src/clip
-	   url  = https://github.com/openai/CLIP.git
-	   ignore = dirty
-[submodule "k-diffusion"]
-	   path = src/k-diffusion
-	   url  = https://github.com/lstein/k-diffusion.git
-	   ignore = dirty
-	   
\ No newline at end of file
diff --git a/src/clip b/src/clip
deleted file mode 160000
index d50d76daa6..0000000000
--- a/src/clip
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit d50d76daa670286dd6cacf3bcd80b5e4823fc8e1
diff --git a/src/k-diffusion b/src/k-diffusion
deleted file mode 160000
index ef1bf07627..0000000000
--- a/src/k-diffusion
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit ef1bf07627c9a10ba9137e68a0206b844544a7d9
diff --git a/src/taming-transformers b/src/taming-transformers
deleted file mode 160000
index 24268930bf..0000000000
--- a/src/taming-transformers
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 24268930bf1dce879235a7fddd0b2355b84d7ea6

From e82c5eba18bc2f2b0e9588b5a9227e19ed1e04a3 Mon Sep 17 00:00:00 2001
From: JigenD <jidaisuke@comcast.net>
Date: Thu, 25 Aug 2022 12:18:35 -0400
Subject: [PATCH 19/58] PR revision: replace cuda call with dynamic type

---
 ldm/simplet2i.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index b5dc775c73..c67d02bb6c 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -494,7 +494,7 @@ The vast majority of these arguments default to reasonable values.
         sd = pl_sd["state_dict"]
         model = instantiate_from_config(config.model)
         m, u = model.load_state_dict(sd, strict=False)
-        model.cuda()
+        model.to(self.device)
         model.eval()
         if self.full_precision:
             print('Using slower but more accurate full-precision math (--full_precision)')

From 39b55ae016c52c82de5158170ecc9205affff7ef Mon Sep 17 00:00:00 2001
From: BlueAmulet <43395286+BlueAmulet@users.noreply.github.com>
Date: Thu, 25 Aug 2022 11:04:57 -0600
Subject: [PATCH 20/58] Remove accelerate library

This library is not required to use k-diffusion
Make k-diffusion wrapper closer to the other samplers
---
 environment.yaml                 |  1 -
 ldm/models/diffusion/ksampler.py | 13 ++++---------
 ldm/simplet2i.py                 | 12 ++++++------
 requirements.txt                 |  1 -
 scripts/orig_scripts/txt2img.py  | 12 +++---------
 5 files changed, 13 insertions(+), 26 deletions(-)

diff --git a/environment.yaml b/environment.yaml
index b554cfc035..7d5b4fe9e3 100644
--- a/environment.yaml
+++ b/environment.yaml
@@ -10,7 +10,6 @@ dependencies:
   - torchvision=0.12.0
   - numpy=1.19.2
   - pip:
-    - accelerate==0.12.0
     - albumentations==0.4.3
     - opencv-python==4.1.2.30
     - pudb==2019.2
diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py
index c48e533410..cea77dac1a 100644
--- a/ldm/models/diffusion/ksampler.py
+++ b/ldm/models/diffusion/ksampler.py
@@ -2,7 +2,6 @@
 import k_diffusion as K
 import torch
 import torch.nn as nn
-import accelerate
 
 class CFGDenoiser(nn.Module):
     def __init__(self, model):
@@ -17,12 +16,11 @@ class CFGDenoiser(nn.Module):
         return uncond + (cond - uncond) * cond_scale
 
 class KSampler(object):
-    def __init__(self,model,schedule="lms", **kwargs):
+    def __init__(self, model, schedule="lms", device="cuda", **kwargs):
         super().__init__()
-        self.model        = K.external.CompVisDenoiser(model)
-        self.accelerator  = accelerate.Accelerator()
-        self.device       = self.accelerator.device
+        self.model = K.external.CompVisDenoiser(model)
         self.schedule = schedule
+        self.device = device
 
         def forward(self, x, sigma, uncond, cond, cond_scale):
             x_in = torch.cat([x] * 2)
@@ -67,8 +65,5 @@ class KSampler(object):
             x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] # for GPU draw
         model_wrap_cfg = CFGDenoiser(self.model)
         extra_args = {'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}
-        return (K.sampling.__dict__[f'sample_{self.schedule}'](model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not self.accelerator.is_main_process),
+        return (K.sampling.__dict__[f'sample_{self.schedule}'](model_wrap_cfg, x, sigmas, extra_args=extra_args),
                 None)
-
-    def gather(samples_ddim):
-        return self.accelerator.gather(samples_ddim)
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 4737d90ba7..7e5d5d6e7c 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -467,17 +467,17 @@ The vast majority of these arguments default to reasonable values.
             elif self.sampler_name == 'ddim':
                 self.sampler = DDIMSampler(self.model, device=self.device)
             elif self.sampler_name == 'k_dpm_2_a':
-                self.sampler = KSampler(self.model,'dpm_2_ancestral')
+                self.sampler = KSampler(self.model, 'dpm_2_ancestral', device=self.device)
             elif self.sampler_name == 'k_dpm_2':
-                self.sampler = KSampler(self.model,'dpm_2')
+                self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
             elif self.sampler_name == 'k_euler_a':
-                self.sampler = KSampler(self.model,'euler_ancestral')
+                self.sampler = KSampler(self.model, 'euler_ancestral', device=self.device)
             elif self.sampler_name == 'k_euler':
-                self.sampler = KSampler(self.model,'euler')
+                self.sampler = KSampler(self.model, 'euler', device=self.device)
             elif self.sampler_name == 'k_heun':
-                self.sampler = KSampler(self.model,'heun')
+                self.sampler = KSampler(self.model, 'heun', device=self.device)
             elif self.sampler_name == 'k_lms':
-                self.sampler = KSampler(self.model,'lms')
+                self.sampler = KSampler(self.model, 'lms', device=self.device)
             else:
                 msg = f'unsupported sampler {self.sampler_name}, defaulting to plms'
                 self.sampler = PLMSSampler(self.model, device=self.device)
diff --git a/requirements.txt b/requirements.txt
index 30b2251a1c..a94a4a5382 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,3 @@
-accelerate==0.12.0
 albumentations==0.4.3
 einops==0.3.0
 huggingface-hub==0.8.1
diff --git a/scripts/orig_scripts/txt2img.py b/scripts/orig_scripts/txt2img.py
index 42d5e83496..1edc531309 100644
--- a/scripts/orig_scripts/txt2img.py
+++ b/scripts/orig_scripts/txt2img.py
@@ -12,7 +12,6 @@ from pytorch_lightning import seed_everything
 from torch import autocast
 from contextlib import contextmanager, nullcontext
 
-import accelerate
 import k_diffusion as K
 import torch.nn as nn
 
@@ -201,8 +200,6 @@ def main():
 
     #for klms
     model_wrap = K.external.CompVisDenoiser(model)
-    accelerator = accelerate.Accelerator()
-    device = accelerator.device
     class CFGDenoiser(nn.Module):
         def __init__(self, model):
             super().__init__()
@@ -251,8 +248,8 @@ def main():
             with model.ema_scope():
                 tic = time.time()
                 all_samples = list()
-                for n in trange(opt.n_iter, desc="Sampling", disable =not accelerator.is_main_process):
-                    for prompts in tqdm(data, desc="data", disable =not accelerator.is_main_process):
+                for n in trange(opt.n_iter, desc="Sampling"):
+                    for prompts in tqdm(data, desc="data"):
                         uc = None
                         if opt.scale != 1.0:
                             uc = model.get_learned_conditioning(batch_size * [""])
@@ -279,13 +276,10 @@ def main():
                                 x = torch.randn([opt.n_samples, *shape], device=device) * sigmas[0] # for GPU draw
                             model_wrap_cfg = CFGDenoiser(model_wrap)
                             extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale}
-                            samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not accelerator.is_main_process)
+                            samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args)
                         
                         x_samples_ddim = model.decode_first_stage(samples_ddim)
                         x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
-                        
-                        if opt.klms:
-                            x_sample = accelerator.gather(x_samples_ddim)
 
                         if not opt.skip_save:
                             for x_sample in x_samples_ddim:

From b7735b3788239f0b0157133efb82884319ef96a9 Mon Sep 17 00:00:00 2001
From: BlueAmulet <43395286+BlueAmulet@users.noreply.github.com>
Date: Thu, 25 Aug 2022 11:13:12 -0600
Subject: [PATCH 21/58] Fix attribution

---
 ldm/models/diffusion/ksampler.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py
index cea77dac1a..62912d1a07 100644
--- a/ldm/models/diffusion/ksampler.py
+++ b/ldm/models/diffusion/ksampler.py
@@ -1,4 +1,4 @@
-'''wrapper around part of Karen Crownson's k-duffsion library, making it call compatible with other Samplers'''
+'''wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers'''
 import k_diffusion as K
 import torch
 import torch.nn as nn

From b95908daec46203db8a7a4b594f849d5f05e7052 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 14:15:08 -0400
Subject: [PATCH 22/58] Move style and script to individual files

---
 scripts/dream_web.py      |  15 ++--
 scripts/static/index.css  |  49 ++++++++++++
 scripts/static/index.html | 153 +-------------------------------------
 scripts/static/index.js   | 101 +++++++++++++++++++++++++
 4 files changed, 162 insertions(+), 156 deletions(-)
 create mode 100644 scripts/static/index.css
 create mode 100644 scripts/static/index.js

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index 41bc9a9029..b429d749d9 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -1,5 +1,6 @@
 import json
 import base64
+import mimetypes
 import os
 from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
 
@@ -16,11 +17,15 @@ class DreamServer(BaseHTTPRequestHandler):
             with open("./scripts/static/index.html", "rb") as content:
                 self.wfile.write(content.read())
         elif os.path.exists("." + self.path):
-            self.send_response(200)
-            self.send_header("Content-type", "image/png")
-            self.end_headers()
-            with open("." + self.path, "rb") as content:
-                self.wfile.write(content.read())
+            mime_type = mimetypes.guess_type(self.path)[0]
+            if mime_type is not None:
+                self.send_response(200)
+                self.send_header("Content-type", mime_type)
+                self.end_headers()
+                with open("." + self.path, "rb") as content:
+                    self.wfile.write(content.read())
+            else:
+                self.send_response(404)
         else:
             self.send_response(404)
 
diff --git a/scripts/static/index.css b/scripts/static/index.css
new file mode 100644
index 0000000000..d3674525f6
--- /dev/null
+++ b/scripts/static/index.css
@@ -0,0 +1,49 @@
+* {
+    font-family: 'Arial';
+}
+#header {
+    text-decoration: dotted underline;
+}
+#search {
+    margin-top: 20vh;
+    text-align: center;
+}
+fieldset {
+    border: none;
+}
+#prompt {
+    width: 500px;
+    border-radius: 20px 0px 0px 20px;
+    padding: 5px 10px 5px 10px;
+    border: 1px solid black;
+    outline: none;
+}
+#submit {
+    border-radius: 0px 20px 20px 0px;
+    padding: 5px 10px 5px 10px;
+    border: 1px solid black;
+}
+#results {
+    text-align: center;
+    padding-left: 20vw;
+    padding-right: 20vw;
+    padding-top: 10px;
+}
+img {
+    cursor: pointer;
+    height: 30vh;
+    border-radius: 5px;
+    margin: 10px;
+}
+#generate-config {
+    line-height:2em;
+}
+input[type="number"] {
+    width: 60px;
+}
+#seed {
+    width: 150px;
+}
+hr {
+    width: 200px;
+}
diff --git a/scripts/static/index.html b/scripts/static/index.html
index 663a32ed12..2e035fc9df 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -2,157 +2,9 @@
   <head>
     <title>Stable Diffusion WebUI</title>
     <link rel="icon" href="data:,">
-    <style>
-      * {
-          font-family: 'Arial';
-      }
-      #header {
-          text-decoration: dotted underline;
-      }
-      #search {
-          margin-top: 20vh;
-          text-align: center;
-      }
-      fieldset {
-          border: none;
-      }
-      #prompt {
-          width: 500px;
-          border-radius: 20px 0px 0px 20px;
-          padding: 5px 10px 5px 10px;
-          border: 1px solid black;
-          outline: none;
-      }
-      #submit {
-          border-radius: 0px 20px 20px 0px;
-          padding: 5px 10px 5px 10px;
-          border: 1px solid black;
-      }
-      #results {
-          text-align: center;
-          padding-left: 20vw;
-          padding-right: 20vw;
-          padding-top: 10px;
-      }
-      img {
-          cursor: pointer;
-          height: 30vh;
-          border-radius: 5px;
-          margin: 10px;
-      }
-      #generate-config {
-          line-height:2em;
-      }
-      input[type="number"] {
-          width: 60px;
-      }
-      #seed {
-          width: 150px;
-      }
-    </style>
-    <script>
-      function toBase64(file) {
-          return new Promise((resolve, reject) => {
-              const r = new FileReader();
-              r.readAsDataURL(file);
-              r.onload = () => resolve(r.result);
-              r.onerror = (error) => reject(error);
-          });
-      }
 
-      function appendOutput(output) {
-          let outputNode = document.createElement("img");
-          outputNode.src = output[0];
-
-          let outputConfig = output[2];
-          let altText = output[1].toString() + " | " + outputConfig.prompt;
-          outputNode.alt = altText;
-          outputNode.title = altText;
-
-          // Reload image config
-          outputNode.addEventListener('click', () => {
-              let form = document.querySelector("#generate-form");
-              for (const [k, v] of new FormData(form)) {
-                  form.querySelector(`*[name=${k}]`).value = outputConfig[k];
-              }
-              document.querySelector("#seed").value = output[1];
-
-              saveFields(document.querySelector("#generate-form"));
-          });
-
-          document.querySelector("#results").prepend(outputNode);
-      }
-
-      function appendOutputs(outputs) {
-          for (const output of outputs) {
-              appendOutput(output);
-          }
-      }
-
-      function saveFields(form) {
-          for (const [k, v] of new FormData(form)) {
-              if (typeof v !== 'object') { // Don't save 'file' type
-                  localStorage.setItem(k, v);
-              }
-          }
-      }
-      function loadFields(form) {
-          for (const [k, v] of new FormData(form)) {
-              const item = localStorage.getItem(k);
-              if (item != null) {
-                  form.querySelector(`*[name=${k}]`).value = item;
-              }
-          }
-      }
-
-      async function generateSubmit(form) {
-          const prompt = document.querySelector("#prompt").value;
-
-          // Convert file data to base64
-          let formData = Object.fromEntries(new FormData(form));
-          formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
-
-          // Post as JSON
-          fetch(form.action, {
-              method: form.method,
-              body: JSON.stringify(formData),
-          }).then(async (result) => {
-              let data = await result.json();
-
-              // Re-enable form, remove no-results-message
-              form.querySelector('fieldset').removeAttribute('disabled');
-              document.querySelector("#prompt").value = prompt;
-
-              if (data.outputs.length != 0) {
-                  document.querySelector("#no-results-message")?.remove();
-                  appendOutputs(data.outputs);
-              } else {
-                  alert("Error occurred while generating.");
-              }
-          });
-
-          // Disable form while generating
-          form.querySelector('fieldset').setAttribute('disabled','');
-          document.querySelector("#prompt").value = `Generating: "${prompt}"`;
-      }
-      
-      window.onload = () => {
-          document.querySelector("#generate-form").addEventListener('submit', (e) => {
-              e.preventDefault();
-              const form = e.target;
-
-              generateSubmit(form);
-          });
-          document.querySelector("#generate-form").addEventListener('change', (e) => {
-              saveFields(e.target.form);
-          });
-          document.querySelector("#reset").addEventListener('click', (e) => {
-              document.querySelector("#seed").value = -1;
-              saveFields(e.target.form);
-          });
-          loadFields(document.querySelector("#generate-form"));
-      };
-    </script>
+    <link rel="stylesheet" href="scripts/static/index.css">
+    <script src="scripts/static/index.js"></script>
   </head>
   <body>
     <div id="search">
@@ -184,7 +36,6 @@
         </fieldset>
       </form>
     </div>
-    <hr style="width: 200px">
     <div id="results">
       <div id="no-results-message">
         <i><p>No results...</p></i>
diff --git a/scripts/static/index.js b/scripts/static/index.js
new file mode 100644
index 0000000000..3b99deecf4
--- /dev/null
+++ b/scripts/static/index.js
@@ -0,0 +1,101 @@
+function toBase64(file) {
+    return new Promise((resolve, reject) => {
+        const r = new FileReader();
+        r.readAsDataURL(file);
+        r.onload = () => resolve(r.result);
+        r.onerror = (error) => reject(error);
+    });
+}
+
+function appendOutput(output) {
+    let outputNode = document.createElement("img");
+    outputNode.src = output[0];
+
+    let outputConfig = output[2];
+    let altText = output[1].toString() + " | " + outputConfig.prompt;
+    outputNode.alt = altText;
+    outputNode.title = altText;
+
+    // Reload image config
+    outputNode.addEventListener('click', () => {
+        let form = document.querySelector("#generate-form");
+        for (const [k, v] of new FormData(form)) {
+            form.querySelector(`*[name=${k}]`).value = outputConfig[k];
+        }
+        document.querySelector("#seed").value = output[1];
+
+        saveFields(document.querySelector("#generate-form"));
+    });
+
+    document.querySelector("#results").prepend(outputNode);
+}
+
+function appendOutputs(outputs) {
+    for (const output of outputs) {
+        appendOutput(output);
+    }
+}
+
+function saveFields(form) {
+    for (const [k, v] of new FormData(form)) {
+        if (typeof v !== 'object') { // Don't save 'file' type
+            localStorage.setItem(k, v);
+        }
+    }
+}
+function loadFields(form) {
+    for (const [k, v] of new FormData(form)) {
+        const item = localStorage.getItem(k);
+        if (item != null) {
+            form.querySelector(`*[name=${k}]`).value = item;
+        }
+    }
+}
+
+async function generateSubmit(form) {
+    const prompt = document.querySelector("#prompt").value;
+
+    // Convert file data to base64
+    let formData = Object.fromEntries(new FormData(form));
+    formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
+
+    // Post as JSON
+    fetch(form.action, {
+        method: form.method,
+        body: JSON.stringify(formData),
+    }).then(async (result) => {
+        let data = await result.json();
+
+        // Re-enable form, remove no-results-message
+        form.querySelector('fieldset').removeAttribute('disabled');
+        document.querySelector("#prompt").value = prompt;
+
+        if (data.outputs.length != 0) {
+            document.querySelector("#no-results-message")?.remove();
+            appendOutputs(data.outputs);
+        } else {
+            alert("Error occurred while generating.");
+        }
+    });
+
+    // Disable form while generating
+    form.querySelector('fieldset').setAttribute('disabled','');
+    document.querySelector("#prompt").value = `Generating: "${prompt}"`;
+}
+
+window.onload = () => {
+    document.querySelector("#generate-form").addEventListener('submit', (e) => {
+        e.preventDefault();
+        const form = e.target;
+
+        generateSubmit(form);
+    });
+    document.querySelector("#generate-form").addEventListener('change', (e) => {
+        saveFields(e.target.form);
+    });
+    document.querySelector("#reset").addEventListener('click', (e) => {
+        document.querySelector("#seed").value = -1;
+        saveFields(e.target.form);
+    });
+    loadFields(document.querySelector("#generate-form"));
+};

From d3a802db69b19f3a8362dd296feafed7a664604d Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 14:18:29 -0400
Subject: [PATCH 23/58] Fix horizontal divider

---
 scripts/static/index.html | 1 +
 1 file changed, 1 insertion(+)

diff --git a/scripts/static/index.html b/scripts/static/index.html
index 2e035fc9df..d670dc64f5 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -36,6 +36,7 @@
         </fieldset>
       </form>
     </div>
+    <hr>
     <div id="results">
       <div id="no-results-message">
         <i><p>No results...</p></i>

From 2ad73246f94000624c06a9f8950c4c22a1ee6617 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 14:27:33 -0400
Subject: [PATCH 24/58] Normalize working directory

---
 scripts/dream_web.py | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index b429d749d9..4622f457c5 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -81,6 +81,12 @@ class DreamServer(BaseHTTPRequestHandler):
         self.wfile.write(bytes(json.dumps(result), "utf-8"))
 
 if __name__ == "__main__":
+    # Change working directory to the stable-diffusion directory
+    os.chdir(
+        os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))
+    )
+
+    # Start server
     dream_server = ThreadingHTTPServer(("0.0.0.0", 9090), DreamServer)
     print("Started Stable Diffusion dream server!")
 

From 91966e9ffa01a256ede7b0b49902587f9d41fcf7 Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 15:01:08 -0400
Subject: [PATCH 25/58] Fix appearance on mobile

---
 scripts/static/index.css  | 20 ++++++++++++++++----
 scripts/static/index.html |  5 +++--
 2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/scripts/static/index.css b/scripts/static/index.css
index d3674525f6..ed840a056a 100644
--- a/scripts/static/index.css
+++ b/scripts/static/index.css
@@ -6,16 +6,25 @@
 }
 #search {
     margin-top: 20vh;
+    margin-left: auto;
+    margin-right: auto;
+    max-width: 800px;
+
     text-align: center;
 }
 fieldset {
     border: none;
 }
+#fieldset-search {
+    display: flex;
+}
 #prompt {
-    width: 500px;
+    flex-grow: 1;
+
     border-radius: 20px 0px 0px 20px;
     padding: 5px 10px 5px 10px;
     border: 1px solid black;
+    border-right: none;
     outline: none;
 }
 #submit {
@@ -25,8 +34,8 @@ fieldset {
 }
 #results {
     text-align: center;
-    padding-left: 20vw;
-    padding-right: 20vw;
+    max-width: 1000px;
+    margin: auto;
     padding-top: 10px;
 }
 img {
@@ -35,7 +44,7 @@ img {
     border-radius: 5px;
     margin: 10px;
 }
-#generate-config {
+#fieldset-config {
     line-height:2em;
 }
 input[type="number"] {
@@ -47,3 +56,6 @@ input[type="number"] {
 hr {
     width: 200px;
 }
+label {
+    white-space: nowrap;
+}
diff --git a/scripts/static/index.html b/scripts/static/index.html
index d670dc64f5..55f338688a 100644
--- a/scripts/static/index.html
+++ b/scripts/static/index.html
@@ -2,6 +2,7 @@
   <head>
     <title>Stable Diffusion WebUI</title>
     <link rel="icon" href="data:,">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
 
     <link rel="stylesheet" href="scripts/static/index.css">
     <script src="scripts/static/index.js"></script>
@@ -11,11 +12,11 @@
       <h2 id="header">Stable Diffusion</h2>
 
       <form id="generate-form" method="post" action="#">
-        <fieldset>
+        <fieldset id="fieldset-search">
           <input type="text" id="prompt" name="prompt">
           <input type="submit" id="submit" value="Generate">
         </fieldset>
-        <fieldset id="generate-config">
+        <fieldset id="fieldset-config">
           <label for="batch">Batch Size:</label>
           <input value="1" type="number" id="batch" name="batch">
           <label for="steps">Steps:</label>

From 2ada3288e71dbd6b99574413ffd47b8e96e02084 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:03:40 -0400
Subject: [PATCH 26/58] Small cleanups. - Quenched tokenizer warnings during
 model initialization. - Changed "batch" to "iterations" for generating
 multiple images in   order to conserve vram. - Updated README. - Moved static
 folder from under scripts to top level. Can store other   static content
 there in future. - Added screenshot of web server in action (to static
 folder).

---
 README.md                             | 22 +++++++++++++++++++++-
 scripts/dream_web.py                  | 21 +++++++++++++++------
 src/k-diffusion                       |  2 +-
 {scripts/static => static}/index.html |  9 +++++----
 4 files changed, 42 insertions(+), 12 deletions(-)
 rename {scripts/static => static}/index.html (91%)

diff --git a/README.md b/README.md
index a3347bdb4d..15d4b48983 100644
--- a/README.md
+++ b/README.md
@@ -88,6 +88,23 @@ You may also pass a -v<count> option to generate count variants on the original
 passing the first generated image back into img2img the requested number of times. It generates interesting
 variants.
 
+## Barebones Web Server
+
+As of version 1.10, this distribution comes with a bare bones web server (see screenshot). To use it,
+run the command:
+
+~~~~
+(ldm) ~/stable-diffusion$ python3 scripts/dream_web.py
+~~~~
+
+You can then connect to the server by pointing your web browser at
+http://localhost:9090, or to the network name or IP address of the server.
+
+Kudos to [Tesseract Cat](https://github.com/TesseractCat) for
+contributing this code.
+
+![Dream Web Server](static/dream_web_server.png)
+
 ## Weighted Prompts
 
 You may weight different sections of the prompt to tell the sampler to attach different levels of
@@ -171,6 +188,7 @@ repository and associated paper for details and limitations.
 ## Changes
 
 * v1.09 (24 August 2022)
+   * A barebone web server for interactive online generation of txt2img and img2img.
    * A new -v option allows you to generate multiple variants of an initial image
      in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
    * Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
@@ -459,7 +477,9 @@ to send me an email if you use and like the script.
 [Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
 [xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
 [nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
-and [tildebyte](https://github.com/tildebyte)
+[tildebyte](https://github.com/tildebyte),
+and [Tesseract Cat](https://github.com/TesseractCat)
+
 
 Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein)
 
diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index 41bc9a9029..378845e89b 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -1,11 +1,20 @@
 import json
 import base64
 import os
+from pytorch_lightning import logging
 from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
 
 print("Loading model...")
 from ldm.simplet2i import T2I
-model = T2I()
+model = T2I(sampler_name='k_lms')
+
+# to get rid of annoying warning messages from pytorch
+import transformers
+transformers.logging.set_verbosity_error()
+logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
+
+print("Initializing model, be patient...")
+model.load_model()
 
 class DreamServer(BaseHTTPRequestHandler):
     def do_GET(self):
@@ -13,7 +22,7 @@ class DreamServer(BaseHTTPRequestHandler):
             self.send_response(200)
             self.send_header("Content-type", "text/html")
             self.end_headers()
-            with open("./scripts/static/index.html", "rb") as content:
+            with open("./static/index.html", "rb") as content:
                 self.wfile.write(content.read())
         elif os.path.exists("." + self.path):
             self.send_response(200)
@@ -33,7 +42,7 @@ class DreamServer(BaseHTTPRequestHandler):
         post_data = json.loads(self.rfile.read(content_length))
         prompt = post_data['prompt']
         initimg = post_data['initimg']
-        batch = int(post_data['batch'])
+        iterations = int(post_data['iterations'])
         steps = int(post_data['steps'])
         width = int(post_data['width'])
         height = int(post_data['height'])
@@ -46,7 +55,7 @@ class DreamServer(BaseHTTPRequestHandler):
         if initimg is None:
             # Run txt2img
             outputs = model.txt2img(prompt,
-                                    batch_size = batch,
+                                    iterations=iterations,
                                     cfg_scale = cfgscale,
                                     width = width,
                                     height = height,
@@ -61,7 +70,7 @@ class DreamServer(BaseHTTPRequestHandler):
                 # Run img2img
                 outputs = model.img2img(prompt,
                                         init_img = "./img2img-tmp.png",
-                                        batch_size = batch,
+                                        iterations = iterations,
                                         cfg_scale = cfgscale,
                                         seed = seed,
                                         steps = steps)
@@ -77,7 +86,7 @@ class DreamServer(BaseHTTPRequestHandler):
 
 if __name__ == "__main__":
     dream_server = ThreadingHTTPServer(("0.0.0.0", 9090), DreamServer)
-    print("Started Stable Diffusion dream server!")
+    print("\n\n* Started Stable Diffusion dream server! Point your browser at http://localhost:9090 or use the host's DNS name or IP address. *")
 
     try:
         dream_server.serve_forever()
diff --git a/src/k-diffusion b/src/k-diffusion
index ef1bf07627..db57990687 160000
--- a/src/k-diffusion
+++ b/src/k-diffusion
@@ -1 +1 @@
-Subproject commit ef1bf07627c9a10ba9137e68a0206b844544a7d9
+Subproject commit db5799068749bf3a6d5845120ed32df16b7d883b
diff --git a/scripts/static/index.html b/static/index.html
similarity index 91%
rename from scripts/static/index.html
rename to static/index.html
index 663a32ed12..a96405aafb 100644
--- a/scripts/static/index.html
+++ b/static/index.html
@@ -1,6 +1,6 @@
 <html>
   <head>
-    <title>Stable Diffusion WebUI</title>
+    <title>Stable Diffusion Dream Server</title>
     <link rel="icon" href="data:,">
     <style>
       * {
@@ -156,7 +156,7 @@
   </head>
   <body>
     <div id="search">
-      <h2 id="header">Stable Diffusion</h2>
+      <h2 id="header">Stable Diffusion Dream Server</h2>
 
       <form id="generate-form" method="post" action="#">
         <fieldset>
@@ -164,8 +164,8 @@
           <input type="submit" id="submit" value="Generate">
         </fieldset>
         <fieldset id="generate-config">
-          <label for="batch">Batch Size:</label>
-          <input value="1" type="number" id="batch" name="batch">
+          <label for="iterations">Images to generate:</label>
+          <input value="1" type="number" id="iterations" name="iterations">
           <label for="steps">Steps:</label>
           <input value="50" type="number" id="steps" name="steps">
           <label for="cfgscale">Cfg Scale:</label>
@@ -183,6 +183,7 @@
           <button type="button" id="reset">&olarr;</button>
         </fieldset>
       </form>
+      <div id="about">For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a></div>
     </div>
     <hr style="width: 200px">
     <div id="results">

From b64c902354319360b0deffae2f5d3918874dfc2f Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:06:10 -0400
Subject: [PATCH 27/58] added missing image

---
 static/dream_web_server.png | Bin 0 -> 548302 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
 create mode 100644 static/dream_web_server.png

diff --git a/static/dream_web_server.png b/static/dream_web_server.png
new file mode 100644
index 0000000000000000000000000000000000000000..c8ec9756c59720052a4382ef4f2cdcc445129787
GIT binary patch
literal 548302
zcmeFY1#=uRv#`5n#+aF6#~d>=+cB*<W@d<)nVDi{W@d~bW@cuJnHj$Id(Npkx9Z+s
zaKGBxogIzT8mXljbwBDb1vv>McszIj0DvSVDGCGtAWA@AZdhngO5}x&9_RpRDkLie
z0My1Hyy`=N&Pfa<fwBOAJ0$?%8wdbAgOYrY005`&0KhRQKX(cMfNhu2rpODr0Bs;6
zAqx2T{N!{NCxBAm>?AcDKu1tc(D%|(coOsn#!*UE4CVk97o8mewVxjrl!R)iFXs#Z
zK!GGfYy$vb7@)LJb(*|(P;CGyQ6Xj5mD6=M*B^`cFqapxLxkw5iZTPh)Fo!Px@k!T
ziiQ3(^)F@pK!4RQ7}FN%X6AXC!ZN~|mT_ir3KXw&aALxsV`7@Rg*ZK~)bZOJQ^J^$
zE*;#)<jtNNGm8t?;hMu_=S7CYWN2vU9~{F-rUB58N-dsBCgAhm=PBYA;P-!$nFiF^
zdcbJ@J5#NNAB6J%<e3MDDfzFmOao<5$O^dsBME^ZhxxxVm1A)V8vl1DReC$$k^h!o
z@$-LG(D>*7R`6e){r}QMS;}K}p<-mAqGX{)2{kOuujQ0uRu$Uv6v9zG<+)YI9lYl5
zwuIvS{X{k9lzvO<h`%6cZ((_K-zT%Nxg%ncDcd?ir>Re?Y3Niwb+W$=QtZ7jFgsdo
z>4pE&QBiO~(0554GEN=QoCTlro+#>1wKLh7w@Psx-Y{3-AJx3qt-3$i>WHOd@X<CR
zOxyX!p*%j#_Afayp|0g9MPla?D~;__dlosZnSLkDbNeqDm@=JZ$?piU!afEp)D;++
z+vhw8`uH&<N=oykmFKKOOK<f410H`GGcgNH3_>HtP*bHyE%#v`&}Px<_9^BtALGNA
z29{=xmFMB~B5T8^NsNa{Tzwkwaf56J9M}|eWkg0ASqn}zLTuHZ4epv6FU=M0n4HG!
zh9s<l+y2r~WNCmWt95>zGs~G66O=ZA2HuO2TTFX8;&o$`_YMf@|Fly~2g@c|JC3^#
zT7&Z64oT)9H~D~AM*zz-sSqEk2}b3pVc0yJOekUE1@9$kr3VKLX^m(5;AMf2d$BQ3
zd?nk}B12yYG4Q69r&Di&zwuzNI4)2T&@e?~+d!CQe-9)3^$N<!SVVGT0FTZdWVoMW
zywvs!o*;1KG!)dd1lM`nQ5-`?obhv)Z9X3kW=B2(%F4Cv^+hOlX0R3)!cv%BREHyG
zRzRK7qW&#U=60Pa{N9tM0;&E@lUi3uzvwpdWK&ztX&2r|<gO&wL-qXtpIQ_-?Z9r7
zL+TPC4>kj|P+8C_<YW8qn&}^36W#VLj-%mj#Ct0hq4<5O3hZ#$kdRdxUq+bYdvdc8
z_P`r-hf1o+cbBA$j>Hjo+fXBN{1`afX~0e-*sG7{RB5Rr#Bm^t!S@)lE|1OuskpID
zCv@>m66D(k?0~J2>I=iQ!z#|!sj9MsaU8>Hg3o6GBPtU5!fPyX23}_^;AKaE8dVUC
zhGP5-qIoh#2*URrJG$hH66$H<!<Dn>g_rU?ZmB&I9~LS1Z4~%Lsl2Vw(TfsV-2)b2
z>)p={=gN=v9q}UA-icyAPjE7EslY$Gz78(>QWq1c`x*LTpC+!sfavw`)i?14P&ZTC
zvx$<)a~|XqF>-$NHNgS)ggtA#+NWB6BYf}TS=gZE$B;%KlB4+61`L3e^G*bEhhH5G
z<y_1-vi2}MTgW)FQ&4$mXwJne&Bc)sBLmzQgW;x2^k@w=uu47TXi4(15fEFC^u>rQ
z^f>D8^J<&eqz{;yA)08O`(+s0QXnPaFPY-rcFT>MN&`vbCObk_LVXX${IS7Tdp5~c
zR+KJn%-Ga0(xef+SyQs!`^^&9BnK7ACvzfC51v2LltJor!OG{5F5P0GTrZ9g)>7@c
zu+gg_mt9WRo9%7xL}U#AlBu2J3(9qSwCVhh#%-^=dH=krKT0=_F3Z8u-QBPg6F}9m
z_pJVSuqymz`1luFnjlk5viRq-R1_9=nmV;wSb+-kz7d`O(hunJ=<~}rGt77!?E20+
zN-1D5nMs(BpF2sCM$fb%tG_p=|CoXM_U=H+UUJM^uM$96QiBaD3mGdm{4eUL)%H$1
z?62gkom!Uo9^+#R3Q>7~GRBm)FEK&7r!$KxeWtNd_<bKfzTD<;9fWiw><n62ct(_-
z5rV`VOG95*xT)&>cXR&1uKp_Qf3BAM8J5$`yR|d|4y-+YQV+=j-53OLE(nVuBg+-g
zj7)B&MWxP$O2VGdJK#mui~LcpDHIe~xVv37Raop~`WSdFVd6IW?E#(n8XIeCsjpMd
z4jy2j2}$>ad?NrS_9#Ccs{O*Rh2RsV+jCPZ>7{TkHU+Sh{7W{6Q-jydc%0+Y)$$Lg
zAM;_&b6|1hMnX-MzuQEh{<fqC$8a71nzjhL$d&kr;Eq0(RKVLt`>M1z+9wU13e7Ef
z>3rv-r1sVjaPB#JeJ<2Uqq*Jkghb<(vm<?qJ+BQ%vY_BQGK74C1<cjh&3k;Pc+8J1
zx(|ox9F+p**UQaajFoQhVRNSX+)_<jg;pct0^g)(r*tR+dbj1hpuN~^Z@&zY>cp&#
z?k)-ru&+h42bsP=0b9O9*y+CP;ZbGXf!zlz>DgTtj8m%$`Psw6+iX5wxm-T{L@hZ$
z&qzAiB<4z%Ql-I-q-f&rS2njsR%)ko6I+IyM@^^%+>dCt_rq}rXW-5tp(IdCy9Q5o
zI(47Nv8*KP`PukbDy~`j=QXle46CHWY#2tmnqqy-tS8SK)7=ww^s8oG%5{gxmC)A$
zToN3qlG#@;s1s_mLT`cCJ@K!}zkj`iVOXYY_WoOr744b$o8fY|Mtz$Rj2^%pUohkh
zWsl6@$dIJ|;eyTif$v(KcWYm1mV<t1*z;WTn=GWONWka0nruEhJ1#;`;lhQFrcQHz
zQTuMN`$;ss^8s;IsfSEIvexu-NwS0>_T*LV&rybCHDm}x4YHNaQ7$Y>WV=v!t`!YO
zahu&Tv1JXSq5v8I$qLIuL+jun^=ht*rq-zw(~^En6FbgsHl(*JJsJnd;zO4dp~p+o
z4H;YDR7QjqIYkV{$#@0aHl>PPL30dzg(-3QcArW4ls#{sAs83JELr-axWB|@NGNYr
zCqEZMBa5Vy49w#GSDLM5?7_6=O@Ob#Y`Q}xFae#K-yo3xd-a+0&2Id+D?PHWx*b~2
zhskX67&$cLZF|W+^n-91ZPb3~F<+H-&gn6@+nnA_*`68l9sACHW8`eCiFzkJzb*n@
zXJ0XyFz~Tu<A`JCk~<lu6!9onynpV#P8MWp4fQAsBli_gze0iS&6eu-{PBD*wV!{w
zhA9y(bAq@XU9d0MN(g?MhjC`;T$A(_#9Bb!n;G^CKB8&39t3+focr?|oSj0k4JcK5
zwHX`JSk)0(Y4&>G{+#)K_tj1EcJ-Xq5aMf=T;bd!=Cc#P{cYXrS+7pviO12N)S;E<
zX*^T@{cZK=1`zA%i`|+cJ=9}~H=fNEPJlRBES^vOQ*7TZBu`?IR^c7=${V)TfvjB3
z6dv&~uGUSxnXciNY-@=N0HD)Z4HZW4Ek9Y4%<o2EtaO@O8H<XFE>d@xBE7ktbn8Am
zu}I6eT2L59@@amqHg)KhO}}*{>Oe@*s$Js{i*9QwN&Z%HysOAY@*ife|2A!xI*3he
zKZ1IVw<D$_(qZ;$5x~%Vy1r*y+6{N{(%1bwi3p+W`xCouSXm5ZcJmhd_$xJ&;M<9e
z{V2jxr1iHg$)2p=b1~L856k?Io?Ii9;9wTHS;u(mNe3{_h4+s{DY+M7EO!M?Wy0z7
zXE#)^3Wl2t-J5)M3NiNgzu>cXB;HvLl`fNiC1x4cbfSaL?U87DWm+6b#U&%y09z-v
zT=7;F{-C{?LUnC@TkCtc<Q#WlW@yrqQHlfn_Rf|Ke{G}8y=o61t!|2Z@p7rs`=02P
z;g96b7I8~6*q!e4YH_QbM4V#!*C)eg0ui?>bO*_wua6N$&(f|7Q%+u64St-bAjqE`
zFh?o4X<7cfT=QCa^d_?ZO_VI}HhZYJK<5{^-yR{~u&FanB~QFYgIZJk=R^d!d!0gi
z5st|u>7$r$v;TW`+0*~|Lnm9`9mlYu?24r%vKBj{mj1Q*-S07!`m%<d@w)a|m~2NC
z%5Vi0hP4hiJofD)$!9eW>rxXd;3&mRbWoK1_>BE|G7?W@VTkji@c902r&)0n%~tkQ
zM-NCU)jfSVmOe+R7WQRucH~F6yDm8TO_oIYVdz9tOzyPV^~avJyKs>Q|M_W`x)JZZ
zFqX*oA>VImO7{-UDv~tjIWO`7{WYbq%PlD8scnZBk(pcFbm-eM0ZbpA-m%GSY<d$K
z{ni5MAJ>h3vs<ejTVz-wl)bBDwol3+o2%^6nhiI6`C{eYiT@H@mrmuiuE2yc)(gSO
z7qtH8xBc)~mEL<xZh48NJ@z1PN;4FL8zM2bCSg;m2yiE4<SM(kqZw}sFgS~KhIRfi
z0aF9-ECh}|jcj_OXRkHo<i|+EGw<2$WS6$EpMUbRJH0zss!})$J>FUJ<8|J)(;oCE
zoW8NtD>7U&sV66f4*lt|tNF$%qE-csJ7`erEd}u8(dez4r0dt7_IXvFkZ_RqY;iS;
z<Nijx1GU(}EE6MEA>;(SSo06;cw2ciD7mSZM**33Z2wu15s~Ar6)}idrQww|^o8o&
zQ-crO2FS?VfOD)ZW9Ommodwn0U@kdV!AsF@gjA;s^JqlW+SteAS))0!4(hd{FH~DX
zr1!EE!#623Ol?BFd#PxcFf%h0oCNgZTW%Xu2CG|L9=<!h2a0@T%FoL4-zpmk6)xx!
z4gCQBH@o(#bg;vMhunt$dfH$H8D$?3^#k1eWa5%Q06||fnxwu1&G}rT;6NV!YmT;N
zdL9c7f~rD8Me4`(Tw$aE#3SCB)}3ESbVaGq{?I(hi8*%g&q5UNC`d_}6u+ilR4(7(
zyHn$Gt0lNf)mQoo^fJpigavw*0+a*n!CB2UQJIG!s^AeMZz$W*!$__-D&$d&e?N*z
zlskumCtwZJ;hpHt)dxjwCqou74<TYrj9~um_B`h4y@;rk>?#v=pFtL7a3}T_;TBRP
z(VO2II9EWteMgmQgA|CmsWn-hu%kaW{hb!&E1!(_wBKc@rW05uymsIB^wNlbn)#7F
zKFP`#$)x)C(-!WWqFAW9OhXTx%HD}}6+$vz(Fv3{Ly^+qb)_QOq}$Y;sHCp-&LGsH
z@(H_pnIPT0Cx^v?|8`2arC046WX{E|PZQ*}O$4WC4ZAb=#P~6tNJwu!Qv0Lpg=Wnw
z4a{QVS3$;X@nS(~$5CYQywu>|(1>>&qJ>7pjX5)qS+x){yA%Yv)dz{C>2_n=ldiHq
zwtMhJe*+Kl+)*KCy5}%9`+iUg$8D*)4s@!d7AM?wvuVm8(~j;Wp-;64O;5v2ndMO1
z@{wUGTdZ415%-Nd;rOebi_Wy-D~N!%Hi9GHBioso>qJMvp$7WzYvp7dONK9WQ_)M1
zQ}1Q+ZgX%T#xY4rs+7ff>p_9HaSZA1b${7@WWU$dp5}dedQ`Un)2O8#+Tt3c@2<*N
z#tmT=d`VD$anjYFbMCLvcYd<uUA_H2pgzCzX~r7RY>WRiI79yU-Htk~oAI%+X$l-+
zgH%nZK^4NUw$R1V)SpWmRnU<Yvyf(vPv9_2tNgW$-&#%Mo*?g%(^`D-tDK6VT>$u<
z1=jlK4+&3Xzmh<Kc@spp@b38%t`?|Yi`Pp2Hrm|D7cL6fNm{bgyXoa%tXyBK45nX1
z`is`Z+-I<?tCxKJI}KtO`pNA2>H14XmzqnJKlY&k^Ij^`#z)9=cwW_+Ezy^rqs0`C
zaMPM!?t1DvShUiUYbxSejy#I|`M+13g>JDF&b4NogxW2hvmE!y#}?44kuO3?c9aRb
z|D=gmijV7Pak+c3&tGa1@$EPf4YY~y&o<uhpvjPIt$4dN&xm1DB#Qtu+{&db`H&kR
zKZnm*t|#}waf*u0-!`<2T`|od8o()+{?Drt=eR|Fn`a6tx{{*Am`hw>tp*mCj_$V8
zTzNt#H<CQ$wo_h~mr38q!D}nGH9E4}VPs>dPq{#g`ZntTeWq%!k~XKXP%Je)PV?u#
z2QMB%++$PCrl&eN5~~f1x7a`8Z%hNTr>&<yeNZx?=FfyDkv9b*=4g#=g*JvYS81DM
z7isTT2kk~_&uKMLJz64Vah7{2Kc)?l7_y}3S9r>}A{rb%_Alx)g%#<+=f=hz(3*5N
zZ`R#QY~N$2?BhoD6Pi7n(ElxtTv-(#K%&PSo+$}p6L-HJDua>i+MBSsdTRs}fXsrk
z0|0HRZyH2W@@vrVDIBce%&yiIqA^o%es&zdMOGUhFK6WAn_6s*L$!@-gD*#jn~+V7
z>+eRCNTz#&PfwMHzGMMPY~2kkkYPh?V$+i8B6D9-5nEd>;Toyedm801vB`<f``fBE
zn=*7yIi*ZNcj^Sat7c~FlV-ok0VABODqQnAO2XCbaXysI$h_tv89QSNyA5KooV5a$
z6bi@NO@zyr+Wp0JsM&gJ22U?ay_FXs*k#x2Qb@40NJ3b=rO7P~O)%%#fGwquFm_FT
z@hH;Km{m5;7OLDof!w&3WN5_<CY=@ap-%?<;8ah&hNt!g7>~@tz8w!%k50_$yv>n0
zqf1ysZ@1biA65v1ZQTeA@wJo$({6+HO-RtUj4!VIO06^n6?iZx-Q&-USZOf>rb?$Y
zO_@ayTS>{kwZ1l4hthJEP6x529PCEGDM6s!KB=B{*~{Q!WjKK=3J}uU$*C9_9nX^g
z0rw%X*_1Le;_uz<%<g4$^<ny=Y#v+d#<Xxa8TU1)GT>fn-PDI)dYj3}x2hSWp%#UT
z)#{T@4wu2B9;o)-@;on)v*$4A(8e&blT&L$>~iStQm33S*yF1^4Z*I+_KpFHn-X<@
ztA>J(r1(yp!}B5RNt?kzzs*x8lD{Zo7+M2IaWlv6EEmI+BS*;8%9pA$E0P|KGg-F-
zWJsSbgqhr^9O*}BzncfaH6U`v?0sj(I(AfGoRj{nGIH2<R8^dBcNY=jWCqmbg*lw;
z3Ra*`6W51Hi;0Ksk0Y(<4tD$q-#C5A&<<$9Anp2R{oYQJN5}6W2Ql=p?~YvY%wRaf
z-;K^9;UBivBhz6nx*fH}TJcJLmrC+`xBj_FM&eC25D|n5$QWTh2Q)JP1GVQi5;t{-
z0h<RJmlqqb#|6J(-*;>!>A~)d&vu?T*1%)2-k^G;4*;P;;-FX8#yOZ61lUwbtg{}J
z|HE;T*Er@mp~df1j1XMn9@{fJQEg~D7OeNTU^0gyMglt65eQ3EtS+kV3>{)@aGzCX
zj;C9*cr>OB{9kS`uU&ZN)FF5ESP;IcNJZY*VAbJrzJ^u^-48G%YqmVh4t}dAMZJBm
z-2Tu!f-PP+C&`}SCA%wGXNzwhWK*!ddI*D9f@9QYid-FJkcx6F+iB_HP7#|DOLs{M
zbMj&<)&P|FTEkIl$Kz#!Ys$&mF~AU&H}uE+8=5mE{*a2nraw_^wOybkmF3V^1HpV@
z@ezHBtkyW)=140ly);wpvj|g9esJG{O$FDv&B(iEu)8M7{v&i?aWO{+Lh_=6<mZ|u
zwd_3;AM{L%^@&m18(M(!c4_GEhOXM{Zk&oy2boIzSV75{Lr>+jq9`cSp6x&X+Ke_#
z0;T1&B$36TFYq;|=iWp$PN0uwXFbWFV4Ugrz3#Nk-*f%D)zptK!Z)}?dqDPch5$Jn
z*W|LAU1mJSGvOTYBa*^Mz#OyC1ap0i<%Lk0EN`xQsM7nFIr@<Igj_g5sn6tPGhuza
z02N0B^EE|N9w{x7_hM7xrqI02jpa4XKMoeBlZGq$pW=l{D41om^hHXZT;2h7e=c&8
zu1v`u3bwpb#W;ZC-bgfre_@;{!W4&J%$7{izT&+vn9l*;F5<DiIpANfriq`3W6=%b
zc_k~FXzbgn%LV!1H%!^0qim#TXR-JmO^)wv=GwH5)T;96hVvFvE3;w>5RaCj1X;j{
z#Z;-8wFB|$u592!jx~xyK+bz;lq=A}M13q{t9cs(xT{9uI)d7=tl2A^cS4K`DVavI
zH8YA0rrS@#b@#m&0Vzr(KtELdfytAbbxsb}kj8JVi-o&64zM!;yMY@q^@}g3=DVLB
z#h>04O#bYFtr%UC-X3FUGJYaRF)uJw3bVNdsAw!-al=bP(mVRCt7TisS8sUUQ+iS~
zNZ7l5yDRwqwm?ou7lw8i#_R@`gA3lE{tMII;$*(PyGHYP^`$?5E%Ah#i}gMZ*Rw|`
zSp7>Oubs~?)6o2#BpAr|Uw$d@Uf5v&Z4X`V1P6mSB~3OPUFz-m@;>@txOwPBntViz
zJhwJ0uS3B(CggdoFFHX86(hG$Q(_C=Bbi*>#BiC0=YM249Ga@|p4;NzhbT%cBYvA#
zx?(L23F~%e+0t|>UsdVPOhZ@A@A~7<>pq<1iy_Q`i%<L}a$N6O1cYSt8#GuSx6t$)
zfj<Bi9pT%WCtA%ltlU`&0(RdfH(@s%oIn>$HFDAv5j!m>Vn*<#S6C>46;igmYHxSM
zdHu7!qXY0y!8Ke(hpGr#mLu};3BTV*36Wim4WyI~|BtUVX8!=q+q0-0lyGX^x(MCu
zLBOZ6NeM8bzURJPJ1fmE@{#9C&4D6!uPZPEI38Ib%UQk)_kXa>>98FNjp1pK-A+GT
z2Int--5X2q@#?2L7H^R6Rwc~i({~FQC;8Hz;3kXJeX#hNrh9W@dXokkC+H9V3+z0a
znt*)RfxCk-|0RH}+Z(wW*RX=-3RU-i>Y4}t99jRLY~w#LNd5ofpZ`Y}>9~n1&r9ij
z_8P5{sw&oIb4_xVq6(F7?cv$;-O1zKqHe9vhj#|I6TDir&fnAJ`dYi~-ZDA7Py7aO
zyx1Uoxj&=5_HMRWx7+&TS0<<PyG*;CJ&oNOo`;9$^LhX@l5hg0Y#FykBp(~uPPP6L
z?aoZ03@idd-}V0J_Q?qX3JMA*4^Q972s~L_Nm;_+r=kesB$ch?b0sPKr3=fB_f)9K
zdVkZZ*}XpgI$dwaPG`67Es#q1Zn;3$U^blxDzo%um$d8jca7(heS_^L5wH~F;e5Ss
zrO|SGcQ*iw{->SYCk(8`4hBgqEc_abOc;LA^@0p)Q&4Iu?n<+5%>HPyn$tCohrwFF
zh>klL7}!#?EwT6GHVjB!S6M>eXK`jb6pO!^Tn^B)MX~|u>G(+u+CS%q^($sTwiaWf
zuP-12bgkX_Y$po{8kDC{Jzy65yxy-keZfcpnVEzzrKdrEMLd^fET69Tm%4ofV2VFG
z-s9_Bi6TTghpo7<FgVJCF(ci%kTGMSOfo~R*UP=6wDgXCCn(<%La>KI_E;*5=~S^i
zaR$E^o~o*<N+!VoO(2BwJur>iiKYNZEi5ct#!dYxwwwf6AzRDN?mH2$>mNQ))qP+z
z9{;quy$NO0SbMrZ-Vs<>>dWMiKQ&1c3Nc6J^6~D)YPo>u^YO-k$6=%O*|N1qT;=<j
zC(tkeb)?d`ztUH9)eBK^%wjSJG24`T&FC@diuCVd{_K9;*dN>fg6v8J5H;B*zh7}t
zq0Y&HXD=0}bBsUxeLuOzoe6*WzH8YsPwZQ=I{969e|)!IQU=%fS^0O;dq#aIo(YmU
z1g)!>cIhj>4Wam4(v5tQ+$U2!xD1={Mh?1;yaAd|)^@tbdt#L-nQM~A+3hY<HqYJf
zn7eJ9$vG`Qus9k|a-?kEQJ!l46r@b(nW^Flh7#Xi5}3F14quh1Jy?v8H&!YtD#GFS
z<n(^KTP&NvG{o%v?3L5aZsM%0ES1bi|HM^Pyt*&Atty$ZBMvV+%+U;<`N!ybr-3;E
z$+cyZv+EUGcVDW`Q{cqr_bWX263`rMkUUS+oP@LdRzpzhU)UfCr6nSDZ=`L-9WE_B
zYraBFUVFqJ7E0e-6Reg~jTCQl37MkR8{wkU+?;7bb$mY)GwuvL8~feJf5gXzMJbZJ
zv#6A+pwk(tgnyX3dRD!~#tJz|q~ZjR;GWn(Ky3R0Ay2Ep=W*ZP<@Hj=tr@8T3sZ7h
z=x{to4Vp#CK0zK-sBE<`R(q;7@tQ*K2cn{x^xHGBPFN?!mdU@_>itKT_9cN3LtKG^
zL5SYn|FFj&4`k<XYw%nytOpx5i1jx&Ex&I^y~FZA2_{6RXuNHe)c8g9LWlFu*1o^y
zhZkQQtJ#$cAVzqn!EE|v**V3EY(J5rJ8pVgM$`M*N^n(RM1NAKY20TW(ofZ2ypZ+|
zEceW~LpvXU4=wrS_#xMr8$+b4vfJTY;qJUIFl2upMV~-$;wx#sCo}S8tJq_=^K|&;
zf4@do>&liMoNqIUg2prnlhsM6H{ZrmWX<K@A}`)k-;7}OldIR(TBkY{UP?y1M@1j1
zTvfHkV)KiirepSghM=9AOyo!9X$PiMU#;!dsv@!mEq)7PTF{*gbiQ2m{tzH1%nf1u
z*(Zz@0H>7Jd$nE&drBj5GmW3i(70n3TW6KQUO8^0&?3J`oZR6)GXl*HPrKB?g{L3#
zF+OQHQuoWw))Xq;nf<fM_qgKlZlUfP@Qq5Vt%p%rl3fp1jhLRc)o13YpR7b1MCW!^
z#t_L5-oqDs4D{W9-EJiyDO;%Z=6^sx;pe0MI#wKZ5^cpM?K{=aFK=h?9*v9FRVXCr
z7G^fw2+q4xsNimbn&rhJztU$G4&m-S9HcE=blcYnbY1R~e$eo4C4L3iT-fxTZ65R$
z-h@bgqiv!2r!_&FaNC%&unML+PElhffPqGJeXK_9HpWAojO-t!VhTwpmU<S!z5T_S
zFKJY7x=R4urxoa_BTfBa5TgWEn5`y+teSGE0n3Z2dWra!Yv!SnBj=5r{QOqCm0o&z
zdWNh9gAcN6fx6Cx^KZiY^;<7)P}i-D`TqOGJX8yt6QXmPI<S0M!6k9gm(&uBo+fKr
zt}cmvWxp|v?5SMo*Rp-yDSp_3hxn?%rXLsD=6g@wU*k1lV_L2PoMzA;omGya^-Ui=
zySgLUi?&+PornOepGffh%t~)|0-8Pg4Xt4pUN9MTI-h|%2=`X}WaH%OP8X`Fc$p8^
zepD*7-S%>$sn%@H9{N6WwwYx}R$^?-+s^8@nyusA7hiW3#uVod;AYX@X-|U*J*pjN
zb(WP-PKeXH+wsnevER)}Yil=xe)iYAH>#7%AbA+pYKT8WkO$!#DOhf>RNly<5?|Kq
zIPS?$&#LUa`*0_0IrA59RUUV%nnLcT7n-l0x5U)z9q~5s*kRW*GNvtU=&q|h3oAUf
zyfhtONWc-Emvh$(aMqyg93}@{N5_%x<?d^mPNJEziDztR%}Jp>YY;yAeZqCTu(kym
z>>idDQ<g4})|+1RdeuVgn!&}Uaz$W3Z{{*^%kQd4sD_LwgE7R(j~k#xQmOOV%3lVh
z__<hQ<@y^k=?OBjHj%4~hP;(GwYa(z<AeaKQoloH>2b2W3>gfr7XHrnbN-PLeo3$e
zELQJi@8#R-cHr52xulq=qaHL{bB5d%SKFDVmCPr|6U9VR#441y&4ZRh_{LT2|F8X>
z&<z%7kIXNnAu0+&BMtRvPAJ_!KAFMUc&Wz`*O{ryy@GE7)BI6;WTI#yCST-_f@oH!
z6C`a?or0<JA|sB&Q1)@h5Q9x~F(+vA2h5LXu0~`|X;BRffl4N}OC7O)T%w5%nR!$V
zWQM~E;lngVQ+XkAVX5Tf0=J-jMTuzIL;YjHMduwdqMzVtXihHq0fJ?yEte`tcdn{T
ziz5<`9Yh)*LchImj@w$w<vS~+y3vw=PMSHW4ksIp6YmZm_6)UUbdgV>W6cl65~W42
zX7QBP7rHRqm@Y`X#rSki<l~ydO=wS=rHstk(iXI5iTd_pIZC5vLESsO@MV@?rtvN?
zbjF83E{UEJSP}k>)|pwO>s&T2*=hA@*TLRWPXsK42y<-r?rd*SRVaT!iy;k5I~DqL
zI*C;KBbpbFbLKU{_^?@Wxq*#T=|QZm%{63a(^9DDT!y+ckQ;qtHgTn+T}e~IzDnu#
z$YgHJr~95W(fJi|raC-ZgrSm``nEtk``!H<!@Kg7JDb2rraEe)tUO;M5$_He>B*7U
z_}@Y~p8*7>8q%m2g8;usk6h!>m>1aJ&6!M?FKNItH2mwyee@gGHy(SI-@xLS`+qZP
z15X||64&MyBJXXxCEVWwPET5u9`}XasPoq%2#R~9W$GEaLEZ-7HQ(EH@R<tOKi#-=
z63X_NXtUWQhp=9cG~mfo@%3n9L7Oh{Y45N;Z{O5a&gkO@zuTdp#{0UsHx@HqqoSIQ
zkGGf&I;}gXNye`Y1Ip!%)#q!JBhPpA7A3SOefN37)~n;Pd$V)l;?`4{lN=8UzFy>?
zR%NZR)=X<Wra)K)z6bCTcl~4&J3eA8vzHN_I^X^HK|=KQ&~&aemr6{Bw&AVgWIkG=
zjic_HyCa_k-=Sm=^P4p~TSKbcRhxU<-HQR<etTrRL#VPMU#T32HBavPfz7q8h1dl#
zX!gj(y;&-qxHt`7ZSJyeE{C4-3#dg0<g3ll(*S^_%KI-79w9m$#G<r}$+c^l7|(eg
zgceT|SZx)+xZ?$`XLHvt7To$g#%0O{&*rG9DJ#<rl|JUl0?mo;^Ue9b0WPCj4~t>U
z%$8h{T~ex+i=AzL1FUr<7bOTQi-Q9B9OLxEJ}=s!iX0vN_~!4!XU-pbE$~*B;yd!+
zrx-l+X=N=JW1p*^%h!zU4krjfYjEeFk*wOX0lt#cp!{hwX;1n5>8<U)5X|RwrN5^E
zE!~z31dxE(-o|7;3$1bAIy**D5UM(B31-Om%tZy@X!O|jsbqX~@C1e%ksLzq7%=4A
zJZjj*f%XhOwU{3{@nKDy(#CcDgh#|TFP06crN^iov(t9NZgGZ%``2_N&=PgeVlMya
z);O|R1=#9#&)i<`&b^PaTt!P>BQm7q76Cu&;@vn7F$vkJx#-e=o+Q)%5IAD)$G0a3
zh%>u(&ZEdm58GERu5bU-LICLLTSG)Ff;KJHGXMl<r2UFMj4V7D+q|O^Wi+=oh1;{l
zP+S~`q$CCeg4or?H{co5Q>MWT#VtoA&Dh}l0@AU%E6_*}qjE2;74P+btaU*=cs%u%
zhc&ShntTLXz8v=Y$&uK+Ksn;F32Dj$hC-ft-+GYyvjFHWHiMlTU$)BehiMwpAq5Uu
zPuKH(%48iaiTyiFb|)Gt)!Et}Nvj)mTtZx2ta0<Y=rO9bLzwIRpXUEDkI1WG7uwxi
ziO+rslLTVxN*H+C#)K(7TbzW(A?hRq=L}StQ5+;)Jn~8FfM1voZnsiGgE>?FEh=u{
zW;9noX)SIdu^5yx+Q81i=_X(j!sLS)<xl2`Nrh^DZhLE27fCD)gf~7^L>->?QFmGA
zQg+v2eR0-XwcNOzC6tg_td4Uuhj~RHzGfcj9Qs95K`{8&`HvZAN>cr6Bqk2~n1-gR
zxauX&mTyGJ&M&Uw+csUTCk)lN>vGn5u+Wa!XccnD{rNhowV!L^18myz)o4AZ<I`;p
z_^250>g!5mH{R2-zniURPgf()4mMKwm0j%cZv!`LOdhQ*wZi(+BY4!%o6rBy$CzK%
z&v#*QPxxl9qggFGA%GaD!B&)0D0jixnfz)S6t+iXu*>$qatned%;G@Pc$-&w6Djuw
zdY?EZp4*(DKP6~Cj>#^6fib`T)Gzjh>P&E${5p7b=<e~4t+~UtU9$s7j%4UZo_u%{
zeFfTI#nF)Q+|SMLuPa~fn-*%C8iN}v`HE~u!KyBL@4qzRoGgAh0O-xZLTnHL6knYQ
z=I7aVi`Ku_MA+CwxU-v)kM~EVfAi2L7q03fK}<fO^khDZu$9XY4NSHnXjF0OFn5Ac
zOB8z%BPM)>YClHq-}4)9u${YzW(vS3*B{KXM*+hM6Lo#O+`eJ-7|0S#9Y-{rk0OEn
z+C>EmZn0Q}L`LHhTh>|w)stN%+zS<TFDIWFO;^WoP_)gRXn|G`-xkcXuu-tDOabGP
z(~%zaM!eKRqyAxo1*Y@&#}7r5=g9tV3KZgydnRBOZ0DiJLIzbIk~F0-wLN*1=(q)L
zjNuwYcgC@MS${Lz1<3<$%P%4jk^)5GrN&Rb9z52P(BlZEz&<<T2z{g;LoRsX`QJ$;
z#~D)qK)!j4<DSaPaWk%Kla`7R$#8e*(UCN^;76k?^GaI=dWo8Rzvp|WIZGQ5O#*~b
z`)2ScY*a=K+#yl^V=jXU+e5A@{|Nfb!K8%(lsv{4DpUpg4v{0YrbWD^$mhxy_d6x2
z*O+Fn@c?s-kwoz4CP_`4Bq=GTGKynJnF67rF%@}3H742heR6tGMMl<2x(m(z<2*nV
z1$kGgJD^1=pX!y);<r{?;hfu3q;Bo(Ekd9V3v#X@GwCnFC@2|!5Pb5#rrp<~)J{#o
zvzi>rG5M`3VF~s<NtldUS7)yIx2xH4o`5p|Qi@5Uqr#riGXtLXrURX%(l+`!?`M?0
z{$*s|+#F=y3r&fGO!-S2=}gc_aVUJE?ml%~&5lx_H6OT26#&PW^ajX&ij-8MB=ODh
z+!(XH@Vd$RcvX5&YXw60FV<UKt44`oxj(vZ$CPt~fLg=ZM1c0LUj1f{+y;k8AHyQp
zKfy;NZ(?%qP!Y33)zD>6q)va{@Hgf^J*J3h_c7vYZ#{Oatxz66V+KA>eZeQeIO)XK
z25iyro~S#p3%2A~Tz;fr2XFNl3s7anT|x=>?#5Q23w;M?O}^L?Qu%>I8kf8A=J@R*
z+;_17%-{s><f>aS<`P!A;)>8WA%^wOdb`5mtDl`MkLW?RAs+9@S#Ww=Q!FGLq9$%a
z$Nq?VGpX<5*f`QcryqoXr{5*vV9o>^SR~Q5i97oWX2NT`iw()cwf7H4_LRMEY5^+?
z7DnLTCcnIh27=rzYC_vAs%`{1b=Y#h{7g4I$l_qf)7V7Z-MN1@e3Rh7$rM}S^f^6i
z#cyA*^gw1yhtBDt`b)2c!h93L>G(p_;0FnC;+~nS7Mg|x)Byqnr5yyy12xkIVmno_
zy-YI)7tjXenY@A|e<;M93wmMuo|^#OG*yJt0Fo3d`ybPcF{{!gQO2>DEiRQXVdhw%
z0ST4U-qT$e_}mRo(2q{x#f*MYDEs%q!yxB7l-(U|OY{1xksAw}Uu=1ACJexlPgHvU
z4t-R}NI6S)1D&LF6US#=K?Gu5)~pI17rUO+TJZ<pa3@4#8(hKNi7>F6iRd+drWyt_
zu{ny=np#W;st#o>M&(+}&clT_Z^c?D=m4oq&l?O$*`klT(!Wi?H@&(Ga%LrSyV=Kf
zT8AGX#WKq2Jz)Q66B#hb$j3BuA;Wo^lDavYR6WMc7&Q7QW{2AiJM}H@@`?oURuevi
z+<{g>vM^)RIMJ!~1T;~}N*P7YKy7x&ZO5~Wbz_16?cjit$KBUSx-MuX<D1IM;2BaT
z|InoU{WE9~h)W?3GSq%zE{QyiRM)Yt^7~FMqM*jdLUv<Ot{mF&w2|Envun~wdErJ{
zd5}C0QSBf3iNYO*H?5cMwiEr^UJutEKclyuBd$Z%d}#cW<k5$=P;7IxYRiL|pWaI9
zet~J$M459|!Wf?UINWQ^ExcYtj)7;?-vLt%_a5uA24<V{=MEV})yx4100r+D^L!5B
z_gQ|56Zg9>MXY~A%N`Z`oI9sAnUT|*--DRGICjQ5jp^&a0Qdr>WY_Jh=YvE~Bbj)-
z{|1_1{xHKmAp*d!SqrQ@F;RLy0EAZW^&&g4Vja+X&b<`Yv=$ET@Op;YXuVfhP|j%G
z2ECuIjajW%8tiFVRK;9F89n|jEiZ2$P3P}xa;ibiYoi@E##?q9cRAtcTfX~>=QFsA
z*q%bhfK}R=sc-_%A*Kq*Eeud@;rQU9;t-i5UN}7zIdH`m;QISB=c+9sL+h0lECD#H
zy@q-$1Ui+g$bdVL)8V3-fI5vzQdeKrqi6rFeyKx@11!cKX2H5sVdNTO>u!Kpz5cnx
zc_&)y>yGMKzqN~e3m)<tOH+^~6%(>e^uok-YI^itxy#N)vE8Ziv5(%&53tr!n~zcN
zo@Fhh@W|sT$ql;pocdEv1E&NJ5<?_1@7DW%b0@+nVyyOVvAHJ+kjbpc4SOabK_XI6
zPK8evS0v*y5G-lM@etUY;LKOQ9;aR`6Mb+CUm;)-_3m}%sEI{VZv)Y~_z-%{9n<Qq
zdth6vMH8p%^^;9<1tX-KaDA$@WjhONg}ct)ZDWrCub|Rhz^5*`E}D4W0#Pho-0o!$
zX6)(0A@ZkdgPFWKwUZ76ra=*B-{H&6${i%G6$Vxbl3)S?tT^Pjw#s`y%W)#TdiROe
zv{#}tL$YGWepSBl-d6<QseA$V&8UIqZnNT_io{x>-kHVZ&)NU6JVt@JtPoJE<vT{j
zdM|j|JaHiBmf^eT%S2TTD?{fwcJU=sx#Sb(3VMG5XyeXFsS2gR+ltp;ImDt?FG7?;
z)f#gHaKV5`q`1(Rc4*~?>~+T~;TU}!S8}{<9>>|^JsLEYMHoMx@NB)R0*Aj&l0K|U
zPxN^V|7P4R8HESr2cKoyFnMHqkBZuvKGtPjmwZ^)q@NFjj1a1~tz&Ozc`M#JVLT)L
zVUDY_f!kbI&5a+>32scF-&t5R=vHa4if4|o7W}KyPX$4G>gvby@|<UR@K#~`3=3eF
z&|I#E>!sk$t7#$onV$xN`&M<VeoD(VN!1!Am}_ABzmLAOr`fBsuTZCzkU9DSh<3aY
z#A<|dvVym;K2@2zU2tJEEma4^Uy0H*d)t1|17hsHK2x2x>OdUBtbntY!tu714`>4@
zwPErv#;w(#(rsK-&XKG>jKHy8Ro^$kxV>LFPft%rI(ZG}!9M+pvz;C;{R2riA%s!w
ze|&oMO0ZXY26`~hXPtYl623=bR`xu`jmD{(nU(f+WM6fT+dR@3#SVLrbu4&1a<$+?
zDk40NX9jU^-Ua2SQ*}^T1qOvsSsWfS3|YVQ)i1L@Hr56CSg{kB*!H=cIrmKp@$lsB
z?ybinUrq6us=VK3(w=eA8B#$qco54j-1}+SK!h~=1Q@6bz_xxxRQ=NE`w@<Gl>LmX
zg~?uHei$uJEY~x+P2RW}e)whUY8Wrls|B)Zyh3l68^YW+T0A*zg=AZ*Wn4X!f3Uc>
zD3e@^&xEw{K@Ilb8+O-SY9Nvi{%wXDh3$Yg>g^pyXqSDk3XepK&n4AbuoI0pryUe*
zr+~y*MzH!~H<uAJ7q4?ekDB7mwjO9=RW|jk3h^YA9=3_AH!xt>6!}iQSoV)G^*z6@
zXmAn^5(V~goS-T1LF@RgQVDEn%fk6-g$;J*J^ePw@t_9<2m~3@$~+RB2Ud#=V&mGU
z=t`(o4%WzA^fdm)toXV7=j4#!mUstTN@I|5cIz=L6Sd~}3#$eP{CoErUHp6e@oP&Q
zzCPPB%3U6erKyv(F;w$V+vCwmsoi(*J-*=lmtnV>X2PgD9|&9~1SW2>(xt89eupQi
z;}W3qc}FC1D}nh;Git339TB~A@DIuWgX>={5~4lbZn+&=jGb$v!1Eg#`P}=m(3PsA
z`Adn%HqPDw6F?K|6VB*n{cN%&oKrVv@Kc4^Qzi>Rqy8EiQGlhv`gcpmpRO33Z{#Ku
z>Ol#A8>He%5txdX*<GovwwC6<^ekVH7!=V@c&%ZBwZH#mnbyN0TZjpwtVnkjUsFAY
z?Ow+0(#A{b(#drYvwOiiIpp~B+yk#pU7<JsM4n-Nh7{8QCPJq9*kK%U@(+lJuk<|W
zKOFKXTx-gA-~A8aLj126Kg0$~m%r%-UDt+(ekUimw=5uc(TdLgiEvg+Yny)xU&zNL
zKmuV#2jXXx*c64KsAIE#egtjD0=wwe&aavcOkbji(Wm_@r8&DJi__?9Kcfdv#JYN4
z<pt=8J}i9?XWhVyjc5Rsp`cM192zYXry+wo&vM_!Fm8VHL5}SQhr+?u1ALX`#s@^l
zA|0~fhMjn`cOP0BA5pGS<uCm!1-j7gkOR<Uyh)XxYMiC88pidX8d97J^!T$eOmr^?
zQG1<48|-AEaQ=F+Tn}SoV>8*ghU<Y&MphOsgdNGpvRJJv0itVm7aM#jX-O^%DEBnO
z>MLMp$d9`#jJqRF0UvoD^VjJY3-XsQEevG6FltulUsdZOtG5?^F?vo~hv+)JSN>=V
z46WVl`8AJi=k}M#1UQHo^2Wii)a+w&rOVn$B1(o9-Y$k2CaO@XtdiM*-5UUdGF73$
z^7{6$89QwBiJ(TJJQS8?ihu16xE^M@TrQhtt2hDG8*}=fNs;u3OYo81E+F&RwFOtJ
zsu;~{?#uR@jnXWgwzz7EVT!BCoG1(WiaYIlsBO-~E=>owxo0nekmZ(VulAF54Q2|Y
z2r}xhC(0Kb>(xZ{^hiEo9U8fK4v39`s#YyDy8L<j6@4=kT&!LK6(6qV_LZpoZB^N+
zm$Q2^Worwmx=aqT9B*dg@}KT}m}?i1{~v73{)Zrw8>RvDyw^2S9rg-{seO_|5L0U_
zOXyV(0zu{bnV(&rT&h)CVIX&EzBm#Q-DmJ(=Qr0-+kHL|(gUd?h1ZR_=tnB>lauv6
zR%LcJfX@7h_}p_q#LyLlDpjaJ`yD_WFxCC_ktrAnAKJym1=KEth)-UJfPf$ZLWTe1
z!~HM9$aMeX6Cr|x*VNaGsH>}kMAS;}0)BVi_V)IEH=m(sX=^ihe|>6kxzJIe0)?)D
z#Fx5WZh?Wg-zOIQk;V!K0*95y6ioM`ASziM=(6DkQ!JkyEL|wP-sZ&Qerr~t+f9_r
zpbah%3}WygCJKc{N>2XC9|!h^6Q)6mU2Ati@p-+1DU!<s;Z4KSuqz6w)897_n?8oC
zP1c{_QZ9=(tX!?yU?PJ{y~R#ng(~XvP5^B#7ttWKV7hf{HrsI8Z3(JSRZEBcHTa~8
z&d$!DwH6FC5?<gy6uz2M9gc^^{|?u@{NEt|{~xiQk!9Rqk+sLRpk7qT3_LWr+W8AQ
znnuTbEk~^4s3zh+U}WTPs%Qi0pGmch%i*AdIqG#ZYcZM6r`kd%yHTx_PPeebo~y&%
z&1?Spg=YWGxKG6Me@Di4%_W8&seECBt`7bj*-J7c4iDHVTibwgXz>04gHU7DPgI8{
zaiQuoqYh<Eq!spOo1rF7iWDn?)Y=5%p%$U${WRZy)H)qbtrd`4^~GO~w+C5;s>b^q
zwS|U`1T%sJU53<h^GjEcW?ZgYIPU0jfVn*rH$A`Z*ZpVdd>PmigUGbEB=Y(xiL~_u
zQWmUy-MR7JY!aJ1j6Bu)TE=pMLAfVB-oK68H<obOhXlfUv|#rL@+wZ|9hb~qZeJ)(
z-q4)dQ&1e(d(?j7JCph(`2-YW_trWiKoT$<UVG9l(?)`3lfh@bl^$fLKozQjG|o;`
zhI?W%>gKX9*Vl@Joe<4J5wR;)h{7C%LI%=g=&EA@Cw<vWmCZ21*XN`ya;grXMJx{F
z;sy)qn>@@T!_T0j6sORXd6f7)+pUwG@IRd&$4M$=&#Sg}9ao6`1nLpyO~2QSCp1J6
zFQTuex$M&G@qS4SOY*ls>67D=7}sLAb>ATPggr8&Hu`GDuz%-H@`ZHjiJ7@S3&O*D
zwe=x`_au&UAb<;*bFFO0*Xp<4ev#rdd1(Ch!Qa~H_KTI!Hz!n{sbPP1-3mmT%q}OY
z$|eX6<uL_7TNA7n<w(xNvG!Clqb-Cp<0DyGL65=6<XC7pOYc8!hm&?!FR9PZ3kzTj
z+IT{{S7>qsTqsM8+uR!KnePPNV5^te7peqN1d*=_Ioa5r15YFcx>S&z*oYSiGF!kK
z<T)Dhp*Gh)eL9LyURkS=B_bx5>*xV`lRuwqc^vIRBL0cEicyg%^5GE0moX6B7ZA8R
ziYH7q?_OS6J>Jd{TDAoS2Ke;k>Y`{#OA8vD>$U8!duj7V_K$*ZAle7{w%OtCs?`$O
zd)(~Aa`#wf%qne^Ko1W_gRzx!J4EtnuLV5Q3@--=w&I*`ztniUqnv&J3$1GRqKD0F
zdnUH@-D9Z0AXL}IUw+Rmz}AgTQsa)FmvApOTnsgRcx#Ib;bamYF=+HZoUbPCUlW%b
z8_;0GFQ|Zu?0vzO%6j$(h(C&0PzK*RQMp1|iKcBY4&!Q~?ir(iCWNQ~lhK(+FXOA(
zRVM;mg8sZa1NytHsGba(MdVtEY&|6*lS9ZKMTTkQb<1^J9pW=#ar4e_`LAzy{gG(7
z2S-to8UkO=;Soar8mMz3i!E*YAl(;=u2JQ0tw^Wyr^?Dddbvnv4`%SCEc`&6yvz*6
znISkpUq>a^?$Xkfa_p0_Zv59#txbQH&=ecJ9{pk}5L8}^!$($SCeQ*hN_W>A+*lHN
zkH@EBSA9gXvpfWPRx6wfLm*A6h><nKg6TVB>vE=f{Xsq%vV5jO2-K{^$dqAg>8gZ%
z&Vw&T1}!1yqH`GQt>%;geW8=Fu>u`Gf(ndxw>Bog4QgR`K)sAp3t(#NLu?DIArdcP
z$}tdmX2WhGsc8nUg9b`ga`z2xdCq3&_`%u058(}wSZZI^W2^Lr|CTIc!N~TqLCrW|
zauEKrk}tXOLak-b-qUWQ37I)GpsV}fTarW%9+Q`0Ek^pFG6fi-DME^$e8i;w&@@2J
zu!y~%#c*w!Z6Zcb&$^6YEul4TfcUFOtQlrbwD6zU)h6G*P^i#$gBn4xRRDKH4TBTt
z`GoKqW`VhhY{YO<CR^T9SG3+Ox^lyFIw?Q+ZWmWl<MP4v=FyBo&eX`p9djidk{%Lg
zw~%8Mf4?I-K4EtOY);u;#w)16Ddt-+0e)L6Hk>yP7=)9yumyxhDod)f;zToBYvx_P
zqW*Hbt7o<r8yU32h=3;$NqFvqNAytv-5nhA!S~~*ugr*x^Lv>rK*>VjkhTPN2W1!C
zNZSGy(T?UsQ^B5Lsi<f*_hqE=?)}3X3G*-EDQe}Nv?`&oZ)l=H6NjMaSvGYF!e0o_
zz0zwq-d91Y-E9y;I`=D<k`TBA9oR=Fc!s~lPQN;5_IfOke1>q6k$qBHdfAe=-URR_
zouY~iI_an*F)VRrpyOT9mWWH3{kejHf<&3|xl~;0N~IBDijcjK_#6MK)G+o8HJ>qK
z(0#Q$TPBu*eYA7x94bCDlLN&E5_Mu$oOCT|=Wt<VhDGXebAC9HIH(vO|GLyL?!QF5
z52Ppc#k|G0CX3mJN3tN7YAK`_y1b=CM4(9l&~t8ePmd=5iiZeuTt^Oz+J{Iy%Ay^3
z`HQXe91U$fcgZC!Hf$`@zqwf9dEQYv`rUW!7-7zR0&RpQw;)UUIqaLWBfWp2|8r;i
zZ{P*mbZ0%rMmPU_!zgVzUARv*R4S`op)b<P?N{+mS>z)0V{A8#cC__hKu5b`((JQ>
ze#6p&k?F$PELtcl+1$BEN9ibCA^wHNlGG1x>#jOwj1L_I=DJL=D``w)V>CrjfZVy2
z=)f5ja4#1TsKY5V?MFD=0V<q5{q=d<2A=3Z?83H7(Q0`ti?4l;=6+FrZ(dah9)YXV
zr=;xUC4rW{`X8FMqyYu#OLG$N_G)bB1X$}-$2;iB@ucZbF-|aSdM5?kM>Q_2BF#>s
zyj?M~ej9nVMj!smzSqnh24qQ0^tOV5dZ?6VYojkz{v`9Y#E3<t#Al<`F~$cM+oWl>
zjK!pznfY{NBhqhKo^8*-J+U)W5SpK3)Oyf(mfxEN%I!99Rvz~$KN&tfB0}-tpRD#X
z8?Ez--ke!XpCq`%0}JyiGOimj#dz0b-|2<S4&=bfzkIt>9ez=M=Z&Nrvl=>F<&@^n
z{BfllLlQ)iPm14oXqtK^cyDvvj4||d<FJBM<Rd|J2USPVmAErIL;cp!w1Klcp7-eS
ze5=xiG(=ne=rX+F1ojon!Jr^`Y0C18Gk-w->+SC(wfPIpZ=O=$DtQt$MzGGO+In(8
zfr~(Qo#oeOEH*3FG=p=3V!igCb9}G%8&nv4ZCLY;It6?o2!!nw5G<0Uk|l$$nQew|
zAY@%jfk5sj{wDmlnWOC_q@xnyF1^zaUA`E2U7a2Kf7pAgpt!<+T@!Z=!QDe}ch}$!
z!QEX$aEIXT?tvg7xHK-oo!|s_r_p9s{`>5yT{ClT&(&O|Qq@g2AFGyp@B2KzNcc*~
zZeF`r^*cxDmyY>0FFe})R8^iC^U)1U7<d)mP`mb%8)d0fAPBdrZTujj(QyIf^`kWt
z9^Y7xLSAyyJjUlh=kiU*tr`#T+*O+2LjrjNX~;F$n=nP{3|obkRzyXp5F4~0O+Uw*
zOG$<~PU5{lo<mO4&1zC<d<AtH7MlX|;K;xj*1}T&s_xGg+MlFnRX`;IEp0Ud{AXtc
zLsM2~+H7Og;Wptn5(Ck<cCCfqb6yR{(XjIwdalqPEAOMwpCWc9-LA33CSc-^b;j(a
zz$Zr5RdQKHOkX6)p7FT9?<f~#ME_T7L84A+*|vV%^BV~h-7fd91Xgt$a5l{`=vyj6
zj@qkGEK$}!rg+vUml@6tqsy#b3a^Y@f$1^gyrrd5%~1!XE3;+w0)T4@&y5Wk>UG@`
znF)D`lp4U_{tV5@i((IUJ1W`;H1q`gTtuHXa&f|*nIIokFmt)795V+p`KKUb?_4S2
z4BXa{Ibm9cy6-47!jG_wp<6oO6)$n%I;nlCZ$a5y6eNiN9dJ*G;>FQg#4H(&{sJJi
z{H^FdKcb3)n!ja;h|#tyUr~n2Q9?sg6m{nsL?{++Eg-L;y65+K{xldiATC@y?5GiY
zmB)1d*Y(h=nB8C^<js?7h<I-IOn;YPnR>#9mjS9-ZmA3gnk~%d<u@&#<~xDED(?f+
zow@#3$J<4>i@J}vzD%BbFWGO9YscM&djlU~lLDt#w_mulBTwe&kKVR&Fk)=)M*24L
z{5CL44pZzxUu{>jR?N|z&(BpIrsas<%Fa=nf35^eh$~+sYFn&Xa7h!_UM~Se>c=ul
z;xVwLWEQW_8(A@wZ)C_>Z14bB53h4b)0}pXn-OBDj<n)VF2g~j)i)uU#8S_`)buEo
zO0Z1zwt_ACqX9xxgDQ4CRzBSj6F8h3yYe0lMGL5%z_mlARoeKLbOg*F>R&!#3yMo{
z3!cDi|4!|%lN|k(dZ$&9!XO+Y_vp{c!HiXCrS^2*1b^~aP@q<^bi1dqAdjjFe;DtB
z9OwgW*H+sU0d42Al6*~~V3<RhZvbHp`nC(cB1G}5RVWEyfV`9ehv;LeUPitI^hd*r
zfxDSho9X+i;;-p?I{EY%s>TLF1K>3CkaA{6icw#N`OpD5H2v_h0BjA0(o}fQ4Hqs-
z)T$h-K2GQRPf18rQw1)en$v5Dg#b`B#yvf`fh+-$vA8_l3wx>f!9}0w_Y-g9@LbPC
zH?Ops(Se+H=W^7crxjM{X>oWaG2k-45$yzA5GtS()ESEU{%$O*>z73-fG1Zd*WrKB
zpHT~50jnpum1TP7Yd?9~HLHo4em9m$l%u3W=fZ@_6%0<5jhksxAP)V3<Z`w`=s8JG
z{YW0VV{xE)*#;DxqA&<yhOOR3#WfB0Ufp=SR4`aAS1AvT?mSe~Qm^ISPIdJz<FT;Z
zxxRie>owKIO>J;P>8@<V8ryI(QFUVFBwX8n4VkZ&r*YDcE@aQ6m|TwEa&=N&zLg!Q
zk{ztmJ~wA6EXy+wexhw@mOeZ6C^drK6&9M=h7iSqbE6W;y{PtnQx|rl8wluXx4&>I
zlgY;IhlL#p1I@}1t>^s91R;3zpz)ZhR%V@^p%FG=Hyf5xVYRL*I~b@Jozb+hIqlS6
zn_eZ@0hb7=6=1XVWbigaU{I+R^yRq;@^g>j%4cKGGeto}S$|@51|gC4ph|FV({qig
zV4>9Y=-9~F76NZ{?<$_**2+F~Jh@<G1?AZ)xc<+PB%CKp_j@2!NW6J|_eEfYGG!F7
zg#1P2z482euk<v`@0PaQN?SDGrL)nG-kg9z4RPTSvrhVQ$=W$1?R)lbskt%fDb!%M
z){JNjsw0iMPGkOjduNM(u?_NyS0%^|O^&ZsP4(%^DJ;`CfLX$<+U4E~z~f5G6;t^U
z5SE*Bg>>wQUTwvp%YQH1qxV`OmfVbyh@Zm(eQnfz`IB=hklx&ygr&(d(%z@;L@Ua!
zgUUo-=r0b+>+3zckm4O-?PXK*n&ar4Xu9<7_rgxbzP3s=7e`Cz&z!`q@rGexoin4w
zcrI*w0q~<+9KTUn;;xMtv0FId<2qWmk@HuqV++Lnaiinp-l#76tZ7(WA7h<mdhEB!
z6Q>^0ZCj9};DF2a^(u8-bJ2T6RHHnaJK87wY(Z7~pDtV@-COPo+U`k(qanx5L_zP#
z7z1Tr`nr39S+_P7pl!|dzl^_?-5$uIuY*Dbsi4*8LYz^{?<nZdH3x+&?&fFfq=#VS
znI?SgDglMeFY8`w;Zk$noy}o+3nHN@jj8zo6KUE-71AiMHwa;qo4)AxELr3(#j3is
zfJQYL<-w5N7X{Swshf_Ihl}QSzkeHTvG}3CT}J4>=TSGtG{P;%b0K7dzR9SS^^>Pd
zjgHyQ$B8)-KZ$wy1s8-u>fWHK?qVoD9ne+;J?(_?7o%q2c0bU3eT~xPS8C7SiRdUk
zwMSzAJ7(_rXPiQk2JBJWBRNPn<K)Dh#1_P%^4vODu?#-MSiCZl2(0zQT4%&1qn*gj
z9&+qvcQyQ-Y0sANH-+s9MnM*3RO}~-rv-(m{7Zq!z?}qE2%@1NY?PA1H?fb)7}JuK
z7Yk5>%HiYn>)%(Qdn<mr-VJ#QU9qlJ@=)^q#IqVFa=_BWr*S7lzS!76+DL)kThNqh
zdka^QQIs4%`*vDsH=4uE;=;n^Xs30JFS#hZy~u?ciIETIL@ps=*Z9I(35T!a(dkPN
zZ5=#bDwX?Vj_`bP5boXs@z|GJJVdqL03O;b>bIhE@X5>E^e;xbTkp_|4B4arZrJF8
z7RgyfOt|x@+G#5eo-Xu@%l3y4X2+=5J*wyo?^&E&PShTzh~81Zx>f>*A&ldwhG=>?
z;iqjr6D}cyNty8@#SbdZ!N&LA1P}Mu-~0))s-@Oy#4{^UdN9Q&r+6k#%Z3{tesX-k
zZh2*coRLOPx13$GrV=&v#>G)Yn82;g*g$3O)m3%>Kq!1(RH{UbrjV)d-l?~<*iMuQ
znlCQ8HwF-UtFT^283GqUT-Os(@vRp%f`v<=_|9j6-xhk9cORPMD89D^B7Po(t5p$x
zwPa5b``8?O@nrL$kKcYsfSmYQLeQnMz9uwkJq<px5DU(uWjs*d^O;CK2A4*iAvPyg
z=AqWdh1sJSRYrZ};~dOxu5{)bW+L+wc_$I{NYwz3ky!RPTBXun{RhoR8kR<Eu3P)l
zjr@@LHP=DKP7R28ewjM{+JV1P@K;7#UZNYFxFJ8A#H?TN+vME|wi{F<Q#C=duAUue
zArutP=*Dnf&ZT2xBoE*hG<R^mq^dc)RQss^?UCAcLlrj->v5XoJWMUi#MKu|uHrhy
z;J_QRbp9?p%E+7^X9-N*_xXg4LXOg|ZdxNF@5MJX)i3=u7{B=Q;W~+qOI^3=_YmJv
zU<U=xe}o$R#enOGDAh#C?i-u!k4MS=%Q-1XK2cZr>M(K~ebdbQ85zl5<C+5_C7n{}
zE$uxECoH?eql*K7HahFn&!_xq#K+VN?EJrxg`_$Zt~qg9<eh5QeM6*Mi)B3BLIDH}
zh$?@-80tK%9*Hy_qVQJpM~S+T#mhn>*W!(FPuC~u#i#<%l4M}H3o+F%s*}R4N$THA
z{^dd4c6wUE>k^ePnPR=#h)+SffXMJS7fSVos9kQZlTiu=2I<5c<cC}9i{N%GYBw|c
z;7K)Kel8Z3q+ITC-uM#c9Vgj$#&F4~qSA8A);IKQYMIBs{5aMMQ!+wOjRrs^WhIf*
zyWc3%3GPcjG&m{2Tt7R)F^=Sj43*7QA@DZJ)12}Lzg%d;)3>fz@APdFjr_n>-F<EP
z6-42LhKda3`SBgk#T+<&u_bsob`R+gf>fcL?%q8Noa^Tmhog7khp)?NX?IM8>G|G!
ztNH02u_iPtyRfNIy*dfM^3>p}>Sl^nva&u78J7gp;U~(MK7ln?8v>RLw?rw%Ow^LZ
zS$0WD)Kt<ini`6GAiL!-rAo{nz1_b$aTcpsn{6uk>NfohNt;lWG8pgqp2Tzc(C)?w
zt>a5r(68YX(~Dj4jZ`M<t6JPJ-t}Kth#R>(0C~%^OgCn@?(>l?HU}5;C;BiExjuH4
z!7v%fP(P%osa8%>l_hi1{l;ysEcqSwbNbngIwZUa{n%Je^1FD%oN__<vfh=%TWR=M
zDYDBmD;YVcqjL-GFI1sGY$1b2!(~U_V8Y4=6@9fZ1Xm3pm$LlYARLiBRy7g?r>!&r
z-HROG%+HsSYQxs?9l^D+y??LE7$|O^1*N0KlQL1Z*MyaqnG#b(Y9>lpnTF7pNZl@`
z*sRtC&ahgc%@(){A=^gyOu;8IMl2r=A}K>dnbAM|;72Z#5C3)8=E87SZWv@=$HFF@
z66N`fu_KPi+q%DAPb}D5X76805+&8=iY?&f&Deb^r9|Ouiy)8}%N5$5ndL#ipt5#m
z=z1IzE!S?tr$?>e>r9bKud7XEC2y>#L>zo~wb|*>%b*%<?^w2ytgd<A01pLJP~1Mi
zXGJ&~IpLYmh#BOwQfv8QbGdMt3BE*8+w=2Tsvsg()_4Gy+a~9*HQ}TvzORH?88{6A
z?U-iSaUuQYw3Foq!A~HT%k$0myu9O*m+|f+o$24gq&2WS|ERw)g;&D+RrnU2PeEUz
zZbtsH&l=&wNr-aIn;!BP_&W~;5dUTo_?;VuabcW<z#D^8Gih}L2S`}*YGPChbVF7W
zI1^hBKFm(82bK4_z;&*_k5+gAoNgo>oScb2ZRuX9>-zUe16y(NlL$r2ZZjf1_+B<^
zBOfq_sV#ttrNNlgN^e@`;u6^;j&_F7sp?-i*;NA)2m=W+K4((aPk3>2Q0R1{pxwH>
z^vYw$%C>~novKDwY?$OhqSqOA7+DHi*E*pehb_+{c~=edoYKwKpBZd_YL8xvMbof{
z2db@%oV_v0j&M26k4XWPhR+%8@DS!b0uVc{>qRreCXW%<^=PJO5wbG#De9DWhS_UA
zO(}_rMBlw!SnN?u&ll9#FH<^#7C&6_YI@k6zXULsN1QJ$)`ig`8RUO!5B1-+Liy0*
z4_9Z}{kZyX<ti;ZT*UX`GWMYgC4m0j1Kb5GSVdOuDXzB%r1N)<{MS@+RRXij00i&w
zj{<aSXZVa&4%Sl_&xfVbA7Ym>(EYN$oy$`sEl*HM#+X62r<$z*_#!^WOX1{;wirkV
zrJ}<dRtgH9Dhb(E|I=KI1Wk6q8a7<gL~y}FBrbX?)tB_Xltf-^oEQ4?_K{2{uiQM_
zm)TK}vx#;5ZwNc&#!!wwE2P!Z9Q$FtAkot!+=qV^S`uT&x&$1TmU#>!`gGt#apT7E
zH$67Y-q#zBba5R;=NI}&iYHohg;8XRe`v_XK{>VQgf`7O6cp||k@M()Ic@4Qkst%y
z<ZiSfs}#s(K(s8he0zIQf5{gq$IggmLqk-wIZxoGRW52^g^_u>%=eRHIaG^jKPBjW
z^dtFeNNNnB+@E`f7ae292Wj2y`*MS->M#05v01_08;w}*($6*i`{xPR5K%@nf-Tkk
zcEZiro@e}xp~WVRt;=Kt*1h&cqVJme>S$`ha&Qup<j7HiSrKU59|J_JNo5B<U_OMA
zE{JryisGup@cg|vC!@VRCssFmoxfV1@uB{^FIX}@NXS_CJ-;kkSYhyOJa%1{{x0TU
zS-$*l$XrZCptMtnqNv8|gz)<{5c(&$V4C!JuH#1k(%&MI+pjq(6b;2!`seRneRHWc
zz3AGcD0=%Mp5PtI<~{pw8<*bwgEne?6FLQSlck`L-!ZyfqLg|ba6NWtlRiYeeEpmf
zE;T*rQRPyhR&YjbiL9|Y*AHxKym`J;q@7>W=bbZLr2PglVdK}$PN6`us0ChUe9Fis
zXos!B@Y>)^jGq;qX0}OnBk+_Jz;N(xFhD5TKrcJRgeGx%Jn2cvP<r%xr@8vLD^+pL
zCWx*81N{>g+)tOw(#KU+jYVV8q=Y8^g|F)=Ky!blU^4~yAM!g?H;6$CWJ2=2KrJEz
zlj8N5N2jUJ%QLl4);H7%j%uBKrZ0aGZ6S>8F&CurvDu%D3~%o=>JwAOdTif!Pnn?f
z|2?+I8a5F0wXRU{o^1y|CHM7(!D*oW1b>!~_8B`oZt@~td9oH|OI*yGSr84)D%Yaj
zEo^<MdIzE`P)?_*Vl9&Sv1WUG?Ecs&Zv|}Uy^&3f7I%j+>9x&1q|qBk5&9|ouBO}z
z#(mFLgEQ)R5I!)QBvc-u_QD++jxzOEL36UXxUxpHMdNZHV7E{(fGsU?4cKrHF{pd}
z^zk)0l>{imH93|#g7n(v9`@*O5^O)8i4C-M!3SDJL}`NRD1Z&jkv>fdq<23(uFAH~
zGSIIzM|rmDSVw=<_M-iaSmm61%SABUl<cKGlQS}wNPfbzSobB8&YxHHyq&jtonIg&
z07PF&X1{^n!{b1RXp{N_eaImOZJ=<70n@}M;QN%Va($CG5H~6}BkE+=?~p6Hrz;6N
zku@_xeN~)Fvt@K+QS&e{cC)cleit3aL?Xmsa<lhR@FN=7gA&J*!}`^zi5em{6epuG
zE_tRRxYv~dwl=fUaI7UPgN!SFI`F)o+NEZH$RaHMS=SUEU{wxmr3nouN2P+4=^~$2
z96Lc7A3a5?fdvP#q|+qH$(d1BDDU)tU}xP#v`XyL^j(9(@)(|Gd7zn@xEPU~1_3{R
z&4X$%&0|v=J)XH50z}9vvdVZ>n()qsbN#vBAWSlzcf}QNgkRH$9zKrQClSwOFa>ID
zW$oSJIxx0wbbd7|bF2K>sja5pH=N#u^sfl`QC;L5DuxH3$@XYCSga{%&v6u=@6jwa
zL<TXde^?pa-f|A*V>&Ssn5)3H(8i{nO6SESX>nrQw3v3$LG?DaQ=hI-BXIz1;WKoH
zL^}v;s=Dj&$U0V-d`+8eC?CN|fq;<^C{rC_0qp3Y_7{J(1!N!TxhW%up)Rcaq`Tc!
z(<!nR+}MVS;MI3S@&o@f!=<A73IRQg6ZjSZ<Zs3IluH99qs~^~-|?#ki?rUm{e}MF
zm%YK^bS>}yDMAK*t^XxR{y%I{4*3U<v)^Bjj*m+zDx%dIw5czA@-gLh-XJkDGODky
zuRJbgyZ;R|E|j(CgS({gSd9#JyAC|_cI}aUv7@7+z6)T-He`(bv>Y>WbVQex9saKT
z8<mltpYUKZuYbNuqZ*)_yVUo`vosvUdUY*vbt>|gJd^8aPo@8}ZYt{lC(g&`FA*Of
zzXl5w99XOV`-i&*{WenWpqJEM#S4}#=<jw*19Bde+8;DnF@uTC=V%36h9lz(tjRwM
znPV;g*^jR~s%n0p3c7#RnYUv@(66TG0=`)^%BQjNzEx?AtSka5bM{gIWK=f#76vNN
zcxM*TKTsoD?4;@WnGN9SF#!>pFBK7>p+!%a7NsocBx>+A!{#o6R*pdH-Ky8n%4?NR
zxy=mSGmeeyI%l{W$L*Tg$}`l;WBO;93@wE>oY@I|6DRnEbaGEa^{5L|jDHLLZofF;
zz=3A1?wAYEUROJPNv0XfALw#KdfTR2fy?b{?^#>{x3@~j*~dcX>?FQ3L5#$|WWss>
zx?M5iTPA+pU@kWCT^ScpG9<fWoh%wj+*Ca`oWokbKHsyrXl3@eAB)rEO+@6l|M;%V
zV9uH<i+L`Qf(iu#*9%8Sr<F%Fl+OY@NYq8lp>FFgA)|T?QQ8D?TUePPY3FT&u2AL~
zHccdS_yO0l?l1Ch_5E+hOaGn5A_J!5kk#D<xo96myyX0-2jZYVVp1duL(Y&N=w5VU
zdLS}88_ns|#9Uw@OxN5Hn(3H=TSDOn%-ESW2;!nNypx&(P}XqYMGdNdcnXN(3pKSp
z4prDXDORZ7Q4Kp43aK~ZRtsEt`)&CC@mX0ftH8NR3!q26KZ<reQFhBq-F}CilH+y?
z+{C29p@++Zak-%XlCvOFF?djLSTFQ+qXVZxfMjj2Fk-=>yW%-~iCOG&N}l8uOkB;r
zX2(XAR(znT_AwWhME@B1IepRQEr7|%i7>p}A2d(^h0*C8je)8x^lG^Oufu>Hn<pwI
zUxbp**2sPu0;wF}nWD1G<Ss?8*~0aNN7kUR!FlNL%0YM&#r43WBpu(z$$PyWi_qfy
zJiWFA)h0b<+`C6TP;olw7rLeC??7AbW3y`A@C%+qhDppISoVMmQ2XL+kb0gio+w`;
zEk|+n-%#+9vUB?Olm~kevzTX4iyOMUF}UlCY5tKV0PdYjHJJaIXOCZa)8rm3kVy78
zfZy_ikToTp==#S}CSpV|fHB)z!x94~Tw~vjt(8KF+2sH1rP+q|oxo(H>SHbILw|;?
zYR<zD%?24y9N}hel?T-Xqy1cj7v)TC3*fV8(x=#}@67=#Qvf~b<&o0F-!)tNpp`-r
z1qF@IkET;AX-Js5JWXMMe}RdadHQCIQ!5WRcm*(2|5W$Np;6Mq+Do=bKpq)BIOh#M
zIR%>jLBsq4XB@s1;jYS{Vj`Ymm?=z;V%xqjn~oNwGOyI??*msgY=*t;NBttYg1!(4
z14j*oMbV(yBVkrwm)j`E_kd4BYEbl{2&~CQMX(+_NH0o6?V>{WbGF6YW6}yP{xmpf
zz$ps2Vt>3N=F2w@$ud4BEA46`gowD;x6J%=u}G^+ky|ijf2(vKsx;SxjJDo@NuTgL
z7xP2qJxN`it*M_vp`;8xcK$*O$O({jv8J9U?127Fe1-HA)`TY1929W4CZTsEdHvyz
z5e=Lfx|tI{_!~A*qN?*B-QWATqH{?^6k?8<Zw+5X^SdYbY<T8+&iui=ik=AL0mC{l
z|J$NU_a29C;h#L*%<DT1BAI|^EHf`qM4{wS)RjF(3oFacVIOb456iN`WGkc4*Tw#V
zF_~V$FR;lh8-UO^_G0^l9k#(LrQuxnqdI0lXwh3hq<H9Uf6xNEbFcg9lq$MjeArx-
z+{?D3+|E$(#V_M)*v4P9)LS@rtZUs$630hfx+`axVSZPl1OcQ}YPUm)8}XG72~IYN
zx1;ZCI#z%6bYw9Ona$TtVEIWQmvP`$=Yn_g8jPvXgXp$lzc7!6Z`Nv-<||8It8}?I
zMPOd%Lyf-JGRa8uN=_FkR;E@(*#x3(GlK15yKDN9$V7;bHb4|R=_tRK8?YRo@Mbc8
z6~LpvAuV+UiVrc<as>Dy7T3vAnhb;BQ~W&9Uql6AtJ=1?CBj?5h&!m+vCC+<nt@Ic
z6>ji-=5NfLN|t+(!v5ZvFJavDcia!PdguhmY-jhBUdH1C8$Hchc~bv%{4vs?rW=W0
zHgZG5F@8aJ4V5Yj+0&=Iw6TN9T<P(RVzeu#+sQ=`n?$ZzbwMttmcSC2C391N7utV?
z0;OktXuFUNEmaxXj)~>H>RsziOid=5MY_yo$6>jH9{JkWVa;ii8n@kLLbF>QL_v;l
z!4igQt&xN*)@6-ucF}6)wXE3sd5qFKD{6$+FnC-Rm#jvG2>QHHG+fplr$-JYgS}*C
zPmM!4mm!-|oGREv@e7lj31yNP6qGCL_~!e#`XY2jA0{W0x#B^-RY5d+ysstx{d!P}
zCbfrgK_lF{Jlp7*#4Mz5>=7Va#FO;JFXjUoe2Gi_U-x+7629V0Do?t2xeev)WSFT0
zI9C$|N-6bI==kz=2=sT~n7~0i5E7uE5-tz)0@2)>BD+$APxIPM6%Q%b=Kb%7SD?_`
zJR=&bIdePW#4eo!aW$ec8m#(kYC03+b_y_ia@@^b-Jb>PNgL1t@r>?gEFIolKD2jj
zqaE<(UPXW%9NHdegrnks0u;rul%n+VpLckp0x@@Uz?m!!-Qm4<y^p&`?+mn$W)9ng
zIr}?uI%<zfYQKQakg(6`E=IbFbyaSx5)ps;WlUj-!_f_1(y65lhah{vZuEbdE;w}W
z)t>YePtKsKt=|2+CPm@=v8>SNE$(gjSFHZr``tef4pMxwGE+)zr7g;LJX?OqNqRD6
zpTJWa0C1N5^@kNcm!yZtI*9!5^U0~U?{P?Av@z7bFt0|qICIkYox(0@zdM8b6o87=
zuwxLs(VdE`P(N((p|5+X$=OkuJy4qMtMRva1Z59owauX|w<)X|%wtqU1^)L=bJ$l`
z6hbTS;yrZ=9UFOHp*ic|S9m(ANLY42125<B#Bz`NW7v&ObOs-no$N3sd*@;Ou?4zF
z2OcjzVt4T)^Js)_&B*0gXJj=_GkP+cO~M?zSGAV8gzMv+&4zed^KrF@HD)wIw}*K$
z#`k$PI~h_ltck29gl(N;LD}5@=s`(|M!K?xg$^pr4D&=>?5l$h3Kca)MVH*NShv1r
zM<UW720?NQFD6PAqO!4J#*u<${lyEC^jEQ)egORdp?7Z3L0LF3X31FFhArb<Y1!oW
zx3=+ATrSvN#vhiwc!~H?Q^@@yX;+>5cH#YEqZ)n!szW16b=}XrOCT;>iXXJ0jtZ|>
zoY7TKi89X)k^4-uEK6d7zVwHzXB=Pd-6XD56sr>J3t1!lUdd%-f5?jE%xc`U#-lr+
zrzd41v;C|Op0M}1KuSbvf{sJ@48<`!dsSG3LxT1*2;+5o@jdQ6@Y-Al^o_(!u^@!c
z*#}&y+9^@6vNVSW>m6bg-zS=d5gzWPP6XPQ{#4YTI9P|#=k~Rwh?;FpOkuU(u?DeF
z`>nveSiRDE74Us4p%3VX85QUxZN=k-7FxMsAM*3WlnfLJFNv0Q^|16dc(8y8N!W)v
z{(%H}pxQ9F5>(FTi%tRgqdmLlT<J$3C18yAhDk56nJkEtC<%RoH5T~W<I9ParB>-6
zl}>=uhKHlSBt4}9-s-W=m%b^!8D1)3y9=Y10KdeSfXI8S5>PXF+203heoePPue|dJ
zQ}GQOya<T?W!9YX(*};tm>}v9IF&0Y$exCgge9O$<+e|U%x)tk<iIYq=VAY1Bs>}B
zVGYReK*n}P@6Dzwpk`;rt*Y2zXME;FLwPc8=zmtni|g?v?8nYa$FLkSQhWeQ#u`+f
z5QZT526Uj+-7(04vV*lIW!ax&2r9?qz5C1*|E!}FF&xQ(665D9d0qV#NYveeQ)ywZ
z?k`BOlTyFU|FRfB7f1m6NhT(v_Q;`Kg?@495P(rzHA{MU#!zg1CKP$i3jDgAmEslg
zmYt$%Bpke~W!zIIOZ@>S-DCv{UJv}D2rdhMH%iY+-4aT?MuTvo=Mik5gUA<fN>a!l
zyqnT=DdW8aBV3SNTU+m+8rx6b*yMO$mTj}WWixGs>T+7ygCn~4)fqEnLlk4mdB1;Q
zzL1yW0Bx3qCrn8pl=UPGFOjz`tP#J_bf!skB`;yHU1Nn+VSd<q%f(CO_Ci%5r$SV}
z`0CDiYh4SPu?^io#xH_z{d9<s9_WR;OZ456xoGMFrxBG0Sv&H<k<QF-t8VZPXdyUl
zO(fZ<wj<MBle+xy#`B+}ES-7TK43ysq$C+^qGWsSY@bs^YB9=wfrEsgq5oFYsCqP2
zF+Ns)VfDtM!>FnYoLYt(O6Fx7T|$WV#n^J%Ynjw-8U%DjWk}&hIM)%!c#<jbN-Pwa
zcm@jU#_Aq_3~0^DJtCLD{FzvdINPsdON}2=?<Oj+ix7^SjxMT$1VVaw!BDCIZ25bg
z;QgBSnS6$A*_zJIIkh|2*B>ODg7Y?Uy#c~AfOWBoGuJWbB+EPaq@(^I6<wxtK^&hz
ze%fA>m5cu;A|e@-Echh?QFx>8(rP8=6HZPGdwD<n>83Xs7EzJfI^)A;`Wc_swsB#A
zH=I;J9J^cJ3~7gJ#Q{xh!(w`M^o~VrgK)g@4&$HoboY0@UZwp$w;mo`^iXs*=YoXm
z9Ny-uZS=MwN`;w^!W;he1Q4}wm>;<~QL6#ahB8V)5<32S{eIgJ6GSt{XWi4Lv#7@|
zVLIibYEnYrrfaE`va*4ZC^{T=7tl(rx}qD(JlcVsE#N`uI<AnL^Tz{|1NO!(ZE~nm
z7te->4;sjxpO2g<#8dPmu^U~<XF15l5BZ2smXw2G^TZ6s;XTMiQCZ#rH<skQ=v}qL
zR04I8ZRs%mGAp)VX9rYiMot5r>mTbkRJwe9<^~iJI)jw_)HqdY{iaVK6}G+S8vtH>
zGD259)f^cxi@Jdvdx>3EIT&=gk|uC@7cN?C+az{D+#3kYs@vqIEyO+b^ERy%P@DdQ
z*zS=_Ppea!8bM*88%0o5^f_wdae?8Z(7kPG#Ox$i4xCrJVze$Ssf%OaEq*YW%vcNV
zqDpMd!S(p#&V$wKccgIOE)*rdu3(IRe}#=u(TzugI@Y(V1k;Q0n^7mv%_hD0>*VH~
zg!jDuQC**c$&yDkn1d@Kvs=><&qLknTe`$_j*>Q|o~d;4I)6j=e8M2%@xh-N=12@M
zL3V)f;z{-?r)8C^-}L8q%O(`EGkjTh7WE~M_g{hEFNe82Omfl$oHY2rLoj3Xzd+N5
z3;sEBzGWdCPm|JAn>_ZLeFVH9n6lN{GO{Z@i9~$lLTRA`c5fa3l8Yy;XN?9Y!#RCt
z){Z8^hqQmGDfo>H+WSI+Ho2#>w%7WO$t1rd;>n5U98wbYz>R#{lJYJSq>XdjVJCJN
zyrpDZ0`4+q3$zf_T><xo)nRPYk!_!g$_p*;V_H{qZiCjby@uX@_D)M1uR+Q3Zgd5l
zJ?&?IfZH8JJ~_L}H=jN?E-#55Uh^Wo-GoZU2umNt-yKaet=(DTZ404NZipW}X(Rtu
zEvWv7Sa<s3$jGSM)r9<+2$v(~MRUaS!vd8YEdrt}7RBzS5^7>L(b^&L-g-b<l2hbh
z)anCISeVN8nF(|ZZv@q024nl(&xSWxLI0H%P$Zl>a0Gxbi^Q<R#hmb#8%u+q+^pDf
zz;Z<dV9J+MvYq)XE}Q?6T+$MbMg&%9BtfJ_Gm(INbKsE6+PZfNa5!hDe_Pl<zgcwv
z%uJ5WdWPM*>0+c{vJDU$M)kom?qf0zmL%H(MRkvq5>X{tG({%L$l5KQP{1#l!z{%Q
ztQHdfw}w#?5j}4$4vt^rEH88UFi<pX$c23Z-|LYoh$ZW-?puSlV|vb4ur&U1AVP%S
zhVr`(D7bMYJwRJhe%a3-g?Q2=<gKyy-(W3b(tE4b*a_d+m87=`4k<Q=4j={9(8JI-
z5108Pw=yA>l~>Kb)K{o%qY2)<UFs(dJI^n7-FT0jZuT^`?U>ZqU5$7S$y4cyZ``uS
z=P8FHtb6d%{U@$;Xx(E?Z!m-N3*hLF>@dgLx;>5HAJzX{BxCh)7J{&LEn}L$787Pa
zWeXX2%m3R9r*nIh8rOEhvu)+P8G^4n@hJ=#5p)9<84Q~JHn&EiuD(-?BDPU~D45l%
zUyV(awG{Gb&tdUeGDIzgU-~`iF@c>%`yEu!rx=k;yhX%0s5PZm=wNd?ll%qS&bw?h
z&xjvAqpa;hW#hk1uJjCVa|6)qBM9)HPOnn%Er$~t9KOh+q39;mW$)dEErpK1P-3ii
zBZ}dl?rj9?t;5YerPY+&(IzgQqk&$1w>LX$!ejn`1W0X<mkMo+dhbxmR{x(^b-h40
z+|Q-s;)|M_Wr4odLo3GTbr@f&<AR8Ow?5j3Gi7mIu*98mUVJPu_jG#&8jOX@sR{Co
zHZ2!3E`sPpnfSgXHw_*g6o2o62r=mONAQ|n5$1b$t&xY0X0PIQ68#TM<U3;W99&-v
zTVfGLKMg0LFUcQJo8IuO@%duqe{*hfUIWEPf)jdsS>VUwC&P>PlpOfIQ4bfzz>l9x
zUma0pPxRz4Ah>ULd`NZsE823Zo$wbJoNV64C|g#j(txZ?#~L;qfifl*s;v*t6T?aZ
z`F-Y;n1fV(eY5y$nXTLc6kdCuoS;9R*~wMCPz&Kq1!off9GNyJ^cJP&(m4#ZV<KKF
zt}85V{e*C=Ah?KJI~7$34>@SMd7g{bnUz-5KesoZW7I@3(3&<vDIY1uS%Q724O)>b
zcldr93J$UzKRvYq{o#!!8sNK(dTQ~9@^N<lH#ueCA8kk3kqTdmTVnK#Kuk(g;}FP&
zDhdbVYpkhSvXSJBh980PxN&^x7?58!<IU}phnZZ(20|%mV!jJbype_Zt6WYgiB56C
zCgJCYeK|HE@AL~hVloFkr~iiCV_$&e;l!8%jO_$zK<_Qt>|4rUn(j0u#9trsXYV;;
z3NZ3Ect%q`M?d}Cf#}_S7wk^j)8<S*{|Ai2W4It9`1Iip(Sd&k#eZ!THqbB5bc5V%
zO6kmczizdp?NJhnh!1=b_tjU&Oex|VXbeAN^El!)TWDTC8kpPP9<LI}F8&xGZ_kRp
zpbJdsIjX)K)*#HG?b+)u`*QLk1b5L@cc#Yf6*3&8&G(Cv<E?_4gGb7&qm8~Z?_(Qr
z4D{vK9n2boZsR5Da`Bb#;O(17NU^Ym&$BUZJTNGW>nJLF8P;DyCH=|$y{1e%r*>S>
zsvvPv(etaQkD$VQb(MH}1G*3bPyhBp`zy36`7;6}U@Ye4J*IB}O}|JV)3Cj+Kqn%d
zUpKVtb8@1a5V9+=1beSJOfgx9`H+;O>JD1~&(}@e=qgBclE*m$2P-|b)2Li)v$yQ@
zwu9I_O!r)uD)<2EA^bverAg5uH(GX}RoP}o#PB)xX9G?rV#`2RrV4iJJ<=>oIi=~J
zRU|<w1}r<v7cO`sbI`m<fdPh5wU(;Uf(SS_fRc1H#<#=^EG@@>hyH}j@g?CXc;2$W
zu3&Cli^_BcpKptcNW>A!r^8E?d@Tg7$;tfpS(G44#19D!0>NHyO^pFoWSKtnvv&5)
zn&BoEvO`mp|B=j!?i7nO6JS5<7h1EzZ(ZZb<Ty$kf>*57xosJ-I@NkEXUMQtn5Bx<
zvkEqD=b0~^8HOL?s-k>b&?(4KjWehpytY|a+>aKG!fGL~XHV_DKV$#%Xr}lp(3^BI
zzAl>Ee6{yy6&lCbInq-<c2Q0rdtuE9jXCQFU}v}HJqtM`Hfj}uM7R^g){7$SCy6Cu
zFMLWb3TFq?>W4I%)syC_vE5xe{o8X5J0O7drC_m<e}azs<1xp}ejpm}ppY)#7c)IM
zHjBom?Pu#Jw5s>{8g#{Zn=O~;_%T)XQ&gvM3UV9WxPhzwNSa<jNfW-y@9Br}6+nYE
z8_*9o<k;iJ&I$RFqLQa*D!*75&+$H+_35aMyqM5^1r^2J9<Mx_F1&N*f`fJ{FrV;+
z)@A#P9tIoKK>L+7et_wD7)r*h-5m<=wy~cHDu`bXT{m78lQWZmS;nGAi0RTF*0c=@
zVjqTKj%O0mdCd!Rwz(cuMj4z5*++3?b6i1?Jn#>`S2^i`Z!|(eA&w%L!et{X^&d0c
zn0g4f=lUX816K%5)L}l;uG*TV?M4nvnlWx7J0=7;?FG;@6kEgfK!~%!x4a)K|K0()
z=X&_q9sE%T4YdV)>F&ad#HWAkB(DTcB)r_fhnmv9DP)JCXqEsCVqe<BAT_pZcfdm~
z-5<1x*>e8$9Hb!eA(?1NpN2j9rcfBaFcb&mBW@;+sUDyta%uG}oEZ+(q6f*MuK&Cq
z)y3C=6(?to&>5~88OJp_+KNU#*<k}U@^q$tc@5K@Fo5&tC#!8_k)+lY-`)==Hw)dS
zoN*cgzND_FFHwk<m#n4}&N3$2z-$670mo6X*R>$j@vrq|B5!hYC_bTZ2ffZaY90gP
z$R6c)4|W6fe(o><vAHn(P~*#C<W7&&^(DAmGC>F&%L(g2mfb5}8tl*3OgfL|*<J5{
z8!#|!=0+8O$Sd-YzW1_8)l7xGJ^NGUlto0(BX1N8Qnb1=LjljHBJYDCp%j?isB(^5
z?1934U(>C_!q5U3c_`dvC=4uj3b8F*t{$<8HykyrvYCF5VEW?G9P;_#LJq)4ymi)8
z)$#Dz8P*VlAtMtiKs0Pj!1$>a;rKa7su(mj0YK4?-5vw1_}V=?{F^U!^L93H$k!<;
zP3Sl^GF#(_2d=y8OV#bat|ziM5#0|ae;iwMUWz=Qa{gD`*1ODT31FIj8x0lTTLtcf
z>QeleO<9S^{*LJ~{BP4k7vBwG&X$0X0ttkS(Xk{KI#|MkcMXtNiW>hlLURP2g00kl
z*lcXW@4uAYv1xs#5T2SPIEpXY-eHEmaN<_58K$BpI&$7Z;G|IW4T*vB4y2bu;)ixc
zX*wF!&h6;Lg8tv^w&?#A^evDB&}^DaeaI5N{`Q@;7QK|htfv4KPPvXJ2k;rd2l>+d
zO9fA>>^D03lT%Wv1qfMx?e1D|IZqny0AnaPz8Wok>Q1l6$5u%mAn-i~iZj?aIH8O>
zwVIylM!FL@ypb6h_)JVp|NO#TZvPTEz#Pq*^q20#|NYMY$?XDb^IxB9{{PQk{U0Xt
z|H(Z6pBPcQTuo(-S<a0kPV;el>MKvOTeUzEE8%~Jwc3!ig1&30?{TzDgjE{^sV*Ul
z)90Uc&sTpfO7|dWwv*SB2+dUc&&a~#?<|=@{|(;O9*3pMax;k*hFIz<-KfHC52>Mx
z_&t;5o@pjFtBc$D;;$o!^kjhEkrA@^+(WlFb;l&g{|-E-9&gs@b(-8U6NR{s8p_p>
zQ`B}Y1+U3wo)t=LvXW)EhhMi{y@C=yd`!d`T2>gY{EpK@;M+|Us~ZuQvikm2%mE(J
z30s%e0@&B~8ok(A^!8WKm54rc2K8PIK|ffY@bT%1UcBPd^_Z^7*Z7^n6n#WJ57$x}
zt2=^%ll~P^V`+I^C_Y|Er@JZ&%iir-EkG|2CR%cCet6I(cA`fB?q*og!y;P`h}{hI
z2~&ZYe>n3qd=coNQ-q4jCNN+0GuJ)5R<R~8oEjj~^$?52U{zgZIdTd?DxzCCp7o<X
zHug}ejT5CdDpUkwf24x5*e<iMa%Gp3tD(7d8<tjXAKX0{wpohDW%fcVhOrsaN-dd0
z&#R!wuD{4&orejxQxL&5U>nq-4ZnA*Oy8sT3|Zo<_`OGYz?3{C1ofmN-mYLHdUPtO
zsGl6O(lt@f+=qmYE^>0AkJ_~XiiAGljw01ZG>U_gvnzVjedX|05gGb3%(@}QpTa|T
zIq`;(=JN6J=zf}G=sO6%ZqF=M*!?(k(@eq{9td!w$~ZneDay`AceJZCN$$kT)XDVG
zubWuXg-<yTI?z~S!t9x~hLcOLg;2UU0JDD;QNj-*qn=ky+5mNt2q8SM3l4y<BMx8t
zE;=+;OucPUxAkmB@~!4v<fP4RW7?|%IUOktPFAQ6KLph@>WVfNcm)ob$pI{{Y>!Oo
zIx8*cQ~QhjAXao){a5o3AzlOyZ?_VBi=KVO{>VtgmY9rH1pgS-oMvo7g#PJy#EhPn
z5)!ip0=%<lYL_uwi(q-?lkqT>tyL|IDj=B$h5>Jo#3gpM`yV*;!(|cVbwjFq1c2nV
z^3SV+#o=$C5JfcbbY4{UEUY#<T-1ccKN<BqrxR^H9z-6`T&!Q*2*G5FD?1+>!)2#g
zEQl~lI@<9m%>`^*DAW6n0X_EG7cw7eOU?|>aMv-ysOT(Yoh+ym%)eMO)fht53$3sI
zqEU*uSRUV15gPyBD6HLy+VAfSmmc*&nl(xZIh*P<Ya^nwdzIF|lfkU-3P0(`*gB;H
zV^0(HJR_#OD?EG{ggQ0;KWJT@Z-Qn3eatQ%2VI?%WxS+9F<=CzHby8n!rW?*XG+}k
z{9oj-2{jJIBI!Z>EAKZ4Ll0pLKr>C5lI{}wQ9bat0LbIcT91=msNb8)OwK)1;c2HN
z$g6U};oV=9JWfId!JbkGTMfu?{8G0}d_C#+)>apkYs8+;<V%L!9Zn}PFz~r%t1XD>
zjDOZ@yQ)~ykF%L#WQ^?Jst%ZqtboIeAJh!9?|fQw8j-v21SodLsU1iiF_P9U`S8?-
z_>&uc!P;@>gylt?So@)Sy9z%X9XoH|R8Am*@EfW7iYg6G**S(z6HpN<^th)zKN+s^
z)uJk^xkOzQ#GFe!nX=Sw&K8;z<3=zs2CCT$52Yu4oF`5T?LfG&3+P7tbn+afM%<b8
zOzt(_fq2mTQEs(z#xWKl(kbJotrAA_cz2|e*GD=1@6!Y4EHaa$z3_(qbC_4Jm`Sh;
zhmA2Vrc1(6dK-fW?Kk@V5~2J?-ZS6hP~Ht1dJvcNZZMj}b}>b<-`kCBC_$9O0faBT
z+u-<MmkxVm#Tl2j+%lQNku^NQNvVnGtNJeY8P!aQxP(4F5k~;wAU&-{`}fvYQegTj
zokfvL#TBDU^2f*zjIZ?9wzd*?^cST}0P0)(0M<+V1gnFxoG5XmGsnPw?C8dprV1Jv
zTXEFQNpwGKdzlzgu~)oKcl$2tRqaG91q@QPu;9=8+ZdG$IG)qfc*D;XN^Yx<QlD1e
zUEA48{5T+C5CM4y(2=7CXFs!)Jj@-X=Kp6XDfP`ib+DQ-9ZN_mlc68Otty=*b`r<E
zp&J$rm&KXkt5<di5r&@pm2FmPz*j_g%m!*7|LGIx=Wm-}fE29m7R!TaR=*DSkO#SB
zE_Q(tvUqPxM--`kL(*QD;M^d`^1%y#H`y!x1A}h3Ho<+$=1-ooGvu%MU!3gW4}fl#
zH<H5$2_tH{DW9gc0~X-cm_8-6D(tJD_ven8$Vi)RLuL;5Jn55oi$o6UG&9MsDFzPG
zJ1zD%l`q}+S#qSKz_=D^x>D6+Ep*Fue&yc0xXQ_8^is{L>xAf{nw%9!kcdx(R~8A?
zUIOfhgoCt^@p;KS8f_VUEhRu3eV=h;DBDj3<>R&lYH6x)V6P@hiW;M=XGv65U~24!
zX=_%;3x*rbFLoRzQK-EEI5yB`q)RezkPN6!`DM%B92)YxxKv)2`BGW@H92We>f6NF
zsj+dgzCU9!uY1{6=lAc`Z|nW6IF|NvHfsS?(qz9W_QG-AlP)KXpR}o860Tnd`bhx`
zuzR*P8t4f_TS2Sgw@i_YiZEPRd{<F(Ge(j34da|^w=pmE3sairFkW&fMadQ5<^$gW
zShiQ)r@u4$pR_7p0AduNf;FgHKL0v5;V7eXEDAa=*POG3rae%7yf!Wud@npJWqffH
zA9$*dlB(ea1$ccwNr`LDS2k0_7tE&>Eq~h(Emd;<0?wYVpQ8GvG_!c{P}wrtt0}YN
zE%Z?jaTY|4DX#e1-J$qVOZ)26hhhF?vVEx`C3<y(_};Jo(Lk47byN=AR8~5moOnf+
z_LC*G=hEIjjQf9WOv@G#l9KxTHWU2Wjw5gfnp9JSP6-ct&&EdkS#TcmN}^s(hW{wo
zbjgy{(x%eDFD3Ohvybg9C&1Cs18L-eDD3|@SNkR3A`HB`?}#NXGe>E7a(B9l6w{{l
z4Dc>{DI*^RC|2NoA6eTblRh0QV_b4OddZKxO`jum%YP=v#<mRb&KUpEbTnN_rlwmb
zI6u#t<5WqO5)t)bhZ0?9t?0gl+v9RA&|@z)q7R{ZWNnX<v{Pr}X@H=561ago)ha?&
zH4gM9uR6C>)7}<c7yfKk7#o|TQmWgJ`kDt|vs8&U2P+*H%Pn!pb?9ZD-)HZ=6Jhj1
zq(VJ-8w9Q{GBqY9V}9{Mcu}2n7-Rlca4)W}S*e}EAuo`%i+fZ1MT7{Bj9#RGtK!a>
z<i}@N$e){22TEGh@hoTBi=@F-?Jk+gtf?M38PeaYk4l+xjWrrX9u!}54d8&`&FC<L
zr#oDrAD;qx>hySn#_fsWK1j6_&#41*VE>e2t*Qo@p5_8Gf-%bzIbuG(|Hs?LrbcvN
z0K9GVIl=1{Ha|h|X^0AuRWcP~C_wzG31C~kKH+_ia<qZChDv+zJ{F5t%Y9nuQDkc_
zuJZkcY(RIx)W#z8AEWr~-sJ;YxB&4<g8fF)13<msz{FlJHk#cVI|$5)Qsaxu1N13T
zZfW3pT5JvxT&|Q<%cT0>24L!@ll)t(!T7CZ+T9JC-C=i7$a7591?HRG=F1JI8@qey
z7Uv4ZZwUuLqK-D3@`E|W3i90aJWW~>Vq9uaG1AR_ouC$eqLOuhe>A1h@Bdyohh@r%
zHq|&F(T2bQawYs2hg^C%wl^NgUl~9qv%h~1r1^FBtC880?e_z0ViUkA94~zGGjPT+
zk52s>^KCa6!t61LUm|oM=L0iuc4BSXzE!snyCy*{M5nZ9KD);GFn3lh79^i=G!Mg_
zB`8{I&X2I_5EyBq_1l;hM)Q+1I$`?t<DeZ!%ObPJfceU)KG&tT@p!o?iwIqD2=qra
zmT+)~q*BHHZe5^VaQ67>Mg?iaD(EROqNQ8XoHxCZ0~mtD4Ln6HGw#>b`HBJ7lGbVC
zGj`&l2p|o0rdZm5uPsMl{FDn|ouVYdDe0oAKgye^NoXw3Cg6Uo2=uzJH`x0B<1O2u
zCiBF8mkb@Q4Z6(ILDw1zmP|fc(7aLiG|B4B3*|@c8bD?FVg)WBpO*;s9!;9k7(pkx
zlSIps73+KJAYpCI*3lCUPDTzd!$&>rA#uzHm6kD*=w)Xl%0PeL#l5_sjT~3xEphE?
zh-y<AnI%w$Wej&*UAkk9qe;wui;T=>f)bO_6Cj&!3O7d$)yfv=?VT;%{o6o>lz@ki
zEmAI89_}R8S4BKcFj{zMh8cr0AwmrRwbV>#8$j2f*vm^m{U)M|@Y9y&)}so8gI=@P
zM2zjo2>h&aevjkLq)QXGuGBnC{C%A2jUtM=rz9SE&W9}<<7%pC#$(85ngVW#4&_?-
z<A|*-BzT4-zkO|p?_RaF^sCM=cAg+XsRmUrAStz#l1@(pUrI$j+p}%BQ%M9YN2f9-
z1U>P%9GQH6p%DWFo<AOGNkod3tpE1*Cwg3mMaH1V)00LZ1PL%;R}gpf|3FW4z2<{d
zognejhu#7v(7`g1T5^?njD<zg5*9;JG%uP^C4C<V`^Vy%Cyo8bG;lt0_6onaQz8#*
zUApS|H<@B55aH^MOFcw2){FJ)Y15;7NXfR-iju^xig8ahFk8bI;Zt4FC6`DnbBD(r
z^$EpaFWYI;O#E*`e;n4J$^1`tY~)10Onr{Hd!7={V-(xyh5z=1Tc;xSIF)q$Vamq>
z2C5C79J{a)bEYGYC?pS3rvh(1<M^iu14Akc?{Rgl#jPc5A}`i-nxi;P34Q+YLiFYH
zX`(*1z&(dV*KXv4sP7x8_o`MN`0wWD#T8%BD_5TTJmbfpL@9iVP^eY0SoT5TXmf`H
zBm8809Kq<eXq=Xu0j~hpdJQ%w_JN^@WiA{I$3!?eIf~%0V>Qv&PESxr4fnyrEBn5I
zJvn8p+<-Lx;>{yTUI}>JGOcZhWRGy_;hdcCW)Xz620|D+z!ka^x*_OhcDrH5_I2&O
z(Xk5XoVq!Eb<tR=TPf=21sM7!$w+68+-q3SM5Jf{^ODHlDQ?g$hxRU<eGbOwHP@f!
zPF}OdXqgiWbYf|PxL!gL7sL;sGDLUxxP=+vmmviNG=#GP(!e*BKT(MM9%$M|F5FW>
z0q9HX7&|AmdpG!oqwbSmM07+Rt!f{45riI)YkeP%I(NfY$~-4AL6P>GkAbd($G(qc
zpl5^0bx^@r;FgZnS7P$ulfd{Tzp23e?Sofe5jT^tD2mm<zH0pL`On91%o3_QKeRG0
z+}-%vLrJ?pdoPGnJ4+(h*a0U^3TyX^Zz(zk%?WArU10?eCCwFQ4QBP`d)SDp))eO1
z1goc|AeWz#YsRPjU&i4G>W;5OuSuQ-y?R{MVjHnUO`L+?gkyC+SDV&fM_oFbA~KT6
z=$GuvCV1uvA9M2`?HqK!pcXv2cfL7q^r__CVL3lrZw}&G*VSKqC*NxkX@nfsvF1-M
zu&xbOi1?uI9HSb5M|*AxeEBm(501tIul1a&hYg;u66Sn+7Yxo#9X%yH9}>Py#HTDh
zGD2?p!^{!+GhvAvQ_?_tWd29U0pWKy1o?Hbd8qVtYcZf5PocYQ@Kv`8v>We5;1o9#
z=<kBT>j!Xvqv!Pp`?uXq(ej%&`Q<Blqnk7Lmpk9hF-nt{E72W)Vx7x&9m5A3*8M%_
zM=o+mxN{`v(fegUfz@ibN@agf^z9&l>{Jv%XwiDNVT(~jcB8==lf<8mV@@rWziuk<
zr9@ifH8RHU2@~<<@GWHPa?NN8)kxQGv_N4hWUmggvwSsz=+VmtHDo*#m3Ifmf4B@h
zbp7*``gUN=%XjkG-fs*u?s34}?i1c(Wc}Bj#70&*b8i-1%i%%&m%pKxJxLE*>ya3w
z0z&LFM^0Xcmr4;E-4*V4H=x{a7d(ud!8SXa6+KNL=iwbeZL|N2y|;>rD{2-7lOVy}
z-JRgB!KH!V5S#`YCqQru79hC06Wrb1gS$I4?u|na-+%8~|NYlI%{<M+Je;-8L)EU@
z`_$gM>{K}hlLjh2_&0UMXEz^W`e$6!L2bJ`63Vh~*&h4pqc7Jc{Q^iQ-KpO<i~eIq
z-2>m^dOxKzYPKkN;AKVkG16zkV_$50yy}I%-NQR3eFG}=GBf#V)45AOxqmM9x)EO*
z@uBH;|M}tNeDMn0b<6Z+g5K@aWWoBu`Z(w{_${Bmq^umROG49l^1TH}m2mVT6sde2
zVzTjg0?o*NtkJI94Pd)e)E0iy8{9o(dtA13dGBLl8%dZdDunl5ao{o*ZP)!q3R|a~
zhTiK1L3;Y?n5nkfN$v8gs{{UtD3r{|tN){&)mAHTANsmY%dGPH)%VTKFD4r_mV5FR
z!3Pb`KA<-O@`G=niOFIL6ps7hSs~Bh#p~tI=+5J>lUqEK47uK9Dq;^I`fe8$!Y}gM
zrF;hRVg{=__2hfw<PTu|$KkEP3uC{x)CisDI4|xuy}+(6msjB=@X5b@m8@$)L+7Ka
z%aDsloG$MJm-Z8N(9OD-8zg_@IgzwO)<Ed&f_(SUPx{v5(XAsz-S;%J{bK6gal&ik
zpU<Qxc9rPGzxK5Y@G;+upmx{x;`UNSJM;{i#`pGef8|rOHbD=aW{id?st86`y+jBr
zP!*7IcqU9fuWGmK!cEqccL006$eA9uXRp&ql0}95?Dqm`*RC|PN1%d((O7HIeB@Vw
ztR}GK-pe1>ek?G<zocuUJn-#yk3*e%tDrlf7q3G}#L#E$+ScFu9y%>Qqq-h7p|p$D
zO{a&`xsoB!s{FDX*TX3j_~(|LTIV0xU17u^(tTj|_TVaMvgq!gAwF@R(b<-Ew-~Pl
zH>WFU{3gFU$l--(2rhaGy)y(EuldNqHzeAd>iQO6AHI8F(noOjL~LwT)Dv6C$lWDv
zyLD9Q!UKQNJaOyI_lq9{I?ncbsGhRnw41M`JeZHk+0`3G7;@7HeUsG}dRv`$BM)=_
z;?8Nmi@fH3KZbtM`rC&5<@)jEs$=oG65YF|B&jk3YHA227MrvIqhF@7*?)q0dez7r
zk74_<YG-sUzXoX`*3;D8>IOR)d!1u_neg}O7zEj;Brb^zS7eI;+P6%<T>Y3ZR{`BW
z@#sGXrzQ`t>wCRodL|q6;2JT8A{Y$}e0LYr7OvPyI;pbLH1@xOEIQ>2gcOPGm!Xq&
zmR|V5q8UJyH#RjCpHIEc(XSPuPtVQn5ahMnsr;@Y=o-HB{Irexg&&lGPybT5at*m$
zIGUNQCD>;3#`pcuCv1Os{JtRwCe*HWhLA1%30L_J2D98QJ3e*ZrIw;Q^`X{n4}P_O
zeepja6Tvca{4YuD5l+?adQv-on|gj2_S%@-%(zA}ym^sDC!1(FB|J6k`W-#?rE2vJ
zM(oGD%_C&e2Nw(_nDRT?+`qtCyo^QnO}O{l;@`YpS!|r!(=A^gm3_O-KjZ3p#xi{S
z4eg(7|IrH;^;*@wimU@aCT7R>NRa=gmf^Z{_FlCkDee;V`t)1TuX!(V?&X-cXboyP
zKm7cu2_2fOywBm|SA|~7$|cXYJ;5eLHp2X{13r!~ZFk!sA{3m21BtAFW$b-76Nrh3
z&zf(Azz>fXO}6W}&Cq|EP3hoOrz3NC4}MO8Nynbx$7u5who!dY_ruQ{CI|VO(9gpk
zSXs|Nu>s8!hi%Zt!_fO4rbP3(>U&ur-styAGWti{c|QhyWlI@|x;UWswWKG1!xtN4
z>$<Naalgc9@_Au>*=sTytn(`%-F18nHw<`8-`uuy6Ai`}lg-?F<Eq+Ab!(4o@-08`
zU~5$~VxVun<MR!@c=8haw<`81jVA2=Yqj#3+V_N%ukK}xx#dBX<T^b2@u%zNy9B(*
zX75hr!g%jBK|hK;E`zU+>w;nU)`xQ|hGZHIJeF-d24)&w4klljE}*9!Yj-1V*M-@S
zK(w>-ql`Y;K2pDK;;v_&_P24r5hXX$ohbvp>o@2_o41)0$7Gi+<{!|HM|Bn>_xshK
zW`K@S7w8+l2l$GuVkX1snfz+E>yE?T>k{L$=(U5#u3u~7U+q_M#gSWP+x8o<$85G=
zl4J<lk9EJ-p_iwjq=&`}AJpexZ;G4UCu}Y8(1%&Ee{G^}DU#?LqtAYjp~nW|?0)pu
zKJDx|W>2g!WvwR(mm67kD9B;Adj;66b0F-yHPjXU*9}S~c9ktQ;4c}%|6^V6VI&)R
zVe9)(<@)`9$lsF>n~!g@TU%{V+A805bZ(o=Zm`&xp5HUfS_||@_`aO&rqG9N`#9WT
z`>gAp1AfxI@=iSMrumvMT>p8s>t7L@FNbzfsWwzvY*0l6a`SRR{;a$4R^laayY<oD
zb4Uz~JJ}xK0IgVsNH=`!>OBzhOXKUf5BH4rz2Eb?)vU@MdhqrU(0?(g+She!kLT;y
zk3aRBM|XlGhtWK<%{1I3@wNLm`QFy8-G;q?3%(alzgizeZNmqG$!yIHh&tQOvEK`r
zo|d%}Hk@sLe$)P8*x^n1Lf;2b?4eX7`mJ>u^iaXqo~8}W-`u6n7R%Xutn_;ql$J5)
zZ0G6Z&W)<6i|A&^?)yK+ljO!{s#(iJ*Ab^)#{(H+PwalT^zU;XbbBLo@cdUqaH;de
zOzvOz{BqF6wB*qj1NME6F?y|17kv;b^1f7|dl^)5xIWd6c$_*XgYG?(k0)~7;{SrA
zMf*|x081L3B@=(?n4@w7xP!qTg8Imbt`4I6YWEB9f(+0j9+x+ZTW)fPeaMNg{y{~Y
zMw@Qk-}1G-QD?tZP;TA>sW%`c-LL4<=MQs1n;vdvlkp<>!ha$S&wCcmaLJ$ax88XA
z{`z$dHQ7aY?24s1^_|61-)nkb@-@1TG}&J>FTR%_4sn3>?szxKejZ*zvB*wR2r+d#
z?)dg0Y#nlUthax=YE`~??Gx|YWjn}HwqJz&VN;EKow=C3!c}=r&K5ZuTI{?Mc2rSB
z%A=y1U%&43gLGW4-6a3Lc%Bj4Me<mVhE}dM3*Hn``%zRGin~8maZ}6D_!{~iEeBV>
zAU$U0b5(UfMXnXa9&B6OjZ`jujxO4y3~zy5tc#VtEigVh8~1|_vCdjgGn+n9!r%};
zJ$rr9kp}27VXcFS^p}^^Xgb`@*Xv#KG!MOYMc*?IL)X{-p|tnrJeOT@+D47qPp_S)
zKF7>^pp7fc$A;vkctdAS8K+%6D7wd^Iq07Pd2SaKSoogj`UaX$%FNjc=^8o7_G>J^
zeXe!7!@c(DdQyG7OwU?lqEvw@`0ku`3WM>XY8!XaT?St^7USP~MO#jv{qjY?1@=!|
z7w*|FGj1YE7bl&Hdu?y~vMQY+_kQ1ek3MI!f=|!o-y36ltpIg>P817tJSy5xFl);`
zLwCqW_uC*`eovCoPHE8^S4v>t6Fn%Khexa7t55Q9xbI7oa;r+TRi?=3z1Xp1)&q=(
z$NOqN?Us0l1)Ln(dw|7!G`|PR`R%W<>E1;DCWaohd=lDwek<1ml6Ks<)P6D&TsR!2
z-&=g^2~Trrx!A$~hYoE^J%Ju&Cx(M7WB59tKG!#Xt0GTKi<8>kot<OOf^CD0WVtvm
z!zc84lk4{)dfEBGK2FPJn=}{cnNc^16Gmi7d|mybh0h|Mo15`ISuz4mJ>`F@Ld)Nj
zYJtqJ2SQyJ+G0$f%VdQ@V!a=fCS!b%eRMr%qDO_IeN)+>NRy$%h>w?Do{6F%%+Gsy
z4zKba&qdJic2AQK?@g~+N1s&i;Xe~d9q@5~(P6mmiwGq%I5S2)yOvJdXEEtzRF7Fe
zI3Lfu_MPNf1FP6mId-knbgB1hSIH^|ml!b_#LFJaXTt@wkTP?oa(LW!5rE3mo9)N(
zJTCb0$ErXd3p3odf483%hiXuJ^u?<Tm%T~S`aP-e?&;!Z$0X8MjB5J%u`E`-vC`jm
z>%6y%CB81F&&HS#qGVRljlX!glE2A{*YuV~>j0@&w(1z&v&H%7c9_tJJsa3Ky~Qgd
zfU{$YI^7k0cRQ|X45t2L25g&1?Un6k{yhB8uA@gPMZb<$@ucN@A<y2A7jxUr?>a~_
z+WG^6@)_Rtv^_$=J{{=Ik|At?tG@naeveceP}$ArheoXb!iTElV#^=@XQcmJvg?ks
zt73zrihk|L%Ki>MS9Ij<_kODO59uTq4+C(9_pbws&+Px%;$8k<#Q#%t{J*gt*?c41
z-s$Wh26R1L?kqlIyaP>bpZt#6S^Y1d|DuLQnuT$w;r)J(|2OV`Uiz><8T?xLzkz+4
zd*|qX0A2=(<NrrB{hxsJ|7EEEPe5VHDY)YQ1955_@qcjp-y^CZ{QPMBAMXD*1$T*V
zzYg@A|1~FJQTKm(8`lugpm&z1n)l)q-8XKs?jfQP{|n`^>A$p*scmA|WPK;Z_h;n4
zR_4FhdrLNvlIq;}{2!J2|DYlA|CY7?ruV<c`~S|W{r{}}525^jBx_qIVL}dF(L(3V
z*^qqVcIY~}8DvUCNB!5Y8j3@50N|op4u@R^s!c^8jzaJi0|lO@Yxa;F^OXdJ>Zz3h
zp2ieIybI7zaR>_LH}gFGpcwwoHi}O8#~!7v17eBH7;9E9a_7EK^DFYAGc+?B=2Z(Y
zHbm>G^N7%I98oa&+2&T*+97o?4dHang{Uag6?6A0=<1R`Jv~HMcSG7lv4iYU;{%X&
zFn|MZ)hMs4IkYLO_D~g_?-1fToqoc#dB5g(6SC}h9E{?63beW>yUXxJQWV0!6bZ+q
z1bl+%5I97a6!Pp|@#@)>0eE?r4234#o0Ub@WsWhmtZj>D8-BWT=wLHs7NLk_X-zoK
zCnh^@#83Y1TI#z)0&L$lXG)jD!3^?uG#zy+;2=nlsHEE3J6~?8+PCgvG?57Sk4@GI
ztW(*l90?8ma86`jw$PByXplqj1XWNf)Au8M4Z^UA2-5*pdurEk?ED=JGtTV6i*oY7
z2O0bj?Fhz2TXUyIHtApZF>Lmo8jI1pXJ_V<bXBw)Jcva4=u&)|S$D0a|LultYrD<C
zgk1*~({_wmwJJeprd7hBpq_6o#nsMfrj%!IliQ=bzP-E#f!8O9V&XEUj6e(`Ap9H<
z-h8s~{5ZV2IkB@Q>^r^tL{<ZjFS06WvRV1=#|+F`_P;<!fFr=aYUmjVb<9dk6gU8T
z&ACU3{)~X^rVAfgseqSW7rDTSp08wcTjfIu7_{G%iZtT?jNB#gM+O}wSmB1MsShzy
zE(8(O;G7CwkYqj1IBeJA>p$`|aG?9%d7|k<9ks~YKO=sufd@iFxhG?J2u`Xej5B&X
zqMgxBNCtHxA_-7B87@1>%)3Gw$ZT5`DNQp5V#H>#PSU<9A-E_NmkrW{5q&Vlkw5?x
zsncQm+#SnHBk;J8BDhe@;ntwd&59Q@NsxxQP{2~k_}ELqhQ)?johoW-qevQCT^Lr!
z@{<BH=>Hf?vA>tOT-4nr?L1XY61faN725rj1+il@HNRjau&G&~f-i4)s;p1Ad8${&
zpqjmksen+<LR@+mc;~Ro;k0%Bw0!inC)+#$cF@DKH+JKSl;0DSofuSG7tKy1e!?P*
zZhSoaEIK$1lkI;+H2*V*qxYw99*b~zI1IcqZ3tTQ!_v^XalKvqx(fjy9+6k3dk<W-
zY%;T{x(HwM=*Q7IL=g657$?H974gkI+-)nuw^5$le8!DXev%PB3_gq<>Br_^0%L5l
z!Ig%0ig6x==|}lR33P1a*RLaQug@(f3+c;lqoN8Ij)SFkTmcO-izOd{C<{aZ==#^0
z+BLg#b<O+E*}HaKQ2SEGpq#r!!XF#J3KM5wdZ29dRR&l%y3IkX`}LljW-zbi^Z5RV
zoBpj46($g6!^`$N_TxT$Ac0kuWXBp8v6rvi4c0pO%^TyRgZzoGpVyPF^S3zLGPyQG
zcASnlMK&*_&DVRwA!fsyKUF1>k6X_hlSVHiLJz&;d|a8zv$cPM2$tHs@V(qFx<6u3
zClc7VL&E1c*!lQmO?*1d&dinZ2UG6aExy=3Yv1{-kQZgI-t~SD$F*9>u`;xrQMb4~
zAm{+m`8hmE0WI%2f$T>+#5O;D(r(?Ju2bC#$IChJ#>Fsa&s`Uc`^VawUN^jL2U8B*
z#z6!ygH)KjGQ@x~j%351m__I7k&!(8-B%4tWc=<J`l%DF*?i-5anPlgA)@j+cN@ov
zo$?;ta5ubUKMRzFgRc2FqQJLI<@L0?=kKfWSx2d#rOg&vuqAMg6<!H!P;Z>0!oLWn
z(~I3$Em@D<ZNuEUo)zhA1b?lT>X02VlNXVuqoyHXQ1}72^a(g$8Fk*3Ufr2H#Iu>d
z)<iDGhyH1T;P+>8v)xNdsUWA^p`D{5RhEmwEP&$%5FuiLmoQU@776F(CIR_|z=&?u
znnTL=dm!E5&-20#1cDuRIg)71#r(AvWO(OqaG|(`m8q3vf6?H%n(}qiF=E+i%SmNI
z3AB6HsO*PILT`?aRBU(*v@uh!3IM<mnko<AH)!ea>94(JxKCa^U$FB(XPnDZ$>EgB
z8#@Ds7Z!N?Dlj|t&6PVIs3w~CMvT;nZy>#VcE6sk(ET#lystz(Uq3oGnoVVc=I^Ra
zyN_oJ(@74gtSC88bPfm2X<7hO0;Nh-k*%pj6a{={)TH@?jdTS*;+gFPa5loSvXW#(
z_G))v!fg%{z$C$!mP?5z=OpYQbklN}8tbJxS|rT!!FlTscAmyGa5R`T@Ufvczw-{A
zkB`8dq^MyoX22Wv_nU{`yYcWTbRacSG*s~KF6$r!_Tni*Nh3s!7cqBd+Y5cmQ*p^u
z+<|>^w9+7>)`5-L3-55;<8=+Aq!2fLI>?%cz}~jb!!&Cgn;0k1<3MH=(8Nbrkxdby
z6>ALxhL!B!Wyk~({zGQ-z+&^*P8_=+q0q{?m76S$-(lUE8F18!lvF^V5yQZSJrVKY
z+d^py5dibs{hh=|t8~2QujZ&s#ZjlxBCugZmP}FRFs-gEsFpZQ1i;ZC6wW}5AxtLa
z@UwH6vENBu5QDCmY=+1!7ITJDRayd`9Xc_`5^C!;atot+7Y=xDWO7&-6nHFEDFB7#
zWMVxt8@nv0D+BH>a@#1mS6|da(_Yuxy^1KLlY=mBEV6wo@rxi*D5`#Dkd4oAFqU5<
zf>-MHSj*7YdW;;<JuYXPw*QH4B;ri$Du%)X;5e5L>eX4D_jPUU?!$O7&Iqh?wFS$i
z^f&9+wbh6eO;B_HL=4Iv;Q|l9__{2fI-hs>EjY=z470x}HHEn-krrxbQUGjvxv?aq
z6TWAwmJN~iC4|^_L<Tl(493FTUsw2DJa^IY9^72`(YyKV1V$#ZYh|lb5hR~U`pU@r
z4@AtGvM%V-e3;<wNo$D8W8q=MqBi|B*s}LP@WV<HIm^-%DcWq%zF{*m=PTgJFhVH|
zx8av5%Ysb6h`%xb@lVdADsXD9Gt`p*prWV0_)didFPw)ner(nz&)@ayEiH0U=E!U|
z)O4ctQlgyAQdlJ&TEaW@fkQ?npHlX`E&190^%L>^x^xSVsV>S5-qWBdus^hu`MbgD
zAc85Yre>W0DVIz!_#kDqDLNCJHB{;R#n|!QP~TySe}&4wR{Pp+^GXeE&66nm>C))i
zqWaFy0L)2C;a<VqQ%LdgFJ@AUo$3ZFQq@$EXxo#UE|pHUanqO#hBU`=yJsRv<%~ih
z45jp6qv*D8448Qc97GZP<ySbc-g6x%ds~$vD8PtwU(LVws(?RLe?>SWce+z~`H0~V
zc`!<BKJyy{*O7mDPtI_r?Wm2${4qgY*iFD!*Vg#ed&8H#Rcw{ykRZJ>mfO|-E@OCd
zklJ_QtqoOK<p~r;7dny}VsU{VgM+a5OREVlHx1Cdt<LwOkG=|w+~n<_{TiJ8nzod}
z2VFJVjfQTar+w%&P+>gc!Ja$PwH7D02~6abPuTQP+p=0g&F=tM9JB$jxruBj6@YCt
zpMA#9s-r^*L0tk>;P#8LN??3rf%k=N-MNz>qpbJ%tIFp53(HxT)T?HC2OpQK%E~T>
zK#3S}^UNtZMI>uKLHiXfm`5N2{!hRFVYnlijoKmqd*^367$ciiig{XMhZ)*xt#}n)
zsNb6qsiYxvLC5xHY$bIIT5#Jt#-j?IEOf?EC2x2oi^nP|iNRtnW=0_ckxtNLYpFe$
z`%^rv$)>A^M0ULQwnj)>>VFSiFmrHVl_Kqma8`u60Kzz7+)Xi#*`*a^N#RO!M5`Pr
zFiQ%*B?q;B!mk<p`)o0j7RK0<dX`X$nW`L%(F03kxeY^w72ien)1{@jvo^~-hycN~
zT1QGMo-L?;<A%ug%&Bqr@hgVaV&q7dVs_<STeqBoAh9Zxr+`8aHjHP1s9kf@@@Ck?
zzWLccO5~P3x$E|?5%&9$+s|kI>*<l${yh%hkh@fa4QAc&^c<kVD_h>h85>xR)GOd=
z70*~kxcOwuTYZaRM*V99-O*QEtv;<iiDB75QBwzqFz9rgh0I^m*yHLF(UK*399l;G
z`d4&657gY?-vGyvQg8B^CvK1i%sE8NSe1@a6KY)kxg?Ck=}tEpV_@on?Q$y6$jqKS
zNBfJhgk=W$*Sh_s7mqYJP7?(L^#DR_Ni!>R{M~7S_t4d*d#FL1vN`v%46hcW7XA_2
zq{(xVv``KK&Qew%zVUOGDN(ggG<m|(V_Yk!Cf)gA`-7eiFFIOHu%MhFO9#X0iLT)d
z>t^r%qzB!DI1NSMr`emQUtV8HLhV=^2zq4JtEr4_zUpg)6fj)E{LAt$I>jAZ{q$SU
z+ukRV=QkjAiVu$YMu<?$zXUcTY>Q<g2_@ri+8itjC47$kom2(y;eo26G$r0KVjl#^
za=tv!YnrV-DHPJ@<`>IPv=cc5ADU}n<2?N0jSfWh`PGY+?~ioe@%rV9SI?|~ph*8u
zAU4?cyM+a>HfOy02$@bL1#zfHV53~bkwA(^$43d`(7kTaLbIp}<RtV;=lfhcx)Vt}
zc%RW;^)cLUTE?iFKirokIm8Z5eLx&xQQJR<EoTBtbIRe3LsEJ<V~5SW0GcW~Mk4(=
z4kdjt{wLIqWA5A*KF)zh48}$N-y#tqI2@S7sxX~XMT_Zu=)&el45Ob;klP{S9(E$`
zzdm<fdQ7-qtCMLH-)w;^!IjZ|a`BQewbDn+4(&{7*0_Vl8Rm3q^M9D&cQc;!O9ZVe
z2KXzQS1iM{L<p(5l$#r*!0bIHwt$gp_Zbe+TBaOj3s0rH-vMu{f{GExhXZ>tX&9je
zG{khvFb3pW;(#1SUJjzA@8eFZdq1A?3NnoLO4!^nRobo<JOc&B{V=%+6r$L~BYG{H
zd;BLido#0}qmD%GYlrUnT|9!FN6EAf8l>>4w74<F3u;zd7pQsR9GET0fGCbU)sI^u
zB(GDb-W>K^%j!-CiQ0}=$d!ffs?|MQO_thXtn(Ma>~*iVj~9N=7wl+5i1VQYtoc^b
z4(*YS#*R9+yC~p*IDZX@mYzdhl><ts(mj20%c?>lt*B|0d{&eVbxBI#QA%7H715sj
zzrVxJ>h%&B21W}OT<q8+;Aq-2a^4R$%bhFMCL2T`1uouCq8<yFHgFu6fr?%lwiCK?
z69RdOxij1C<1XA50wnayxi1DKSnC>3W2bfV39+C6<ym+ny9j~6RGP28%*N&QBLL@X
z#dIbzuAY0qC%uU6d*}`M<(^{p7_;H)gO-}uL+Aa?kl$rU9rA9$EYGqQeL9tDwG4y7
zSItFMOcX4xT!`eg7hw0W>4d6lVhr|#%9D);l&1+{Yw7$nSb!6ciAA0_?nt19%c4yP
z!t+r$mqQZ<>5A_mx%n>45dKY{p&=~iu3G$K*d}F>4<_M1K+;xs$w0M@)H-ai1hJPq
zOli1QSP_s5c+B8kU2{xl6g9wu0K_Ro9a%~_OB#+h&k#59(#}Rrb)dzfhZmcLgAAJ5
z&DRELjRv8|S5xkqVqS(Fx#yq;&zR+n4_GI^NegUaPLmIe8_mPyl(TeuzLnXfM#X-k
z&EK6*B7~_KTs|c|c|yiBo$D`Z4#)%7Y*1sxC@i+dh8iR0`EyZWFxwhn(_x3g?>gOy
zhRnrb-80stFDpN?RAlKT<aDb2!Cz5090B}s%~Y@v>*q$)_OakWWTLa&nM}E5v2-z-
z;leciR%abK$ojVk0j|@?X$WL*Y;te^rTOK0XnxSU<4TDzXX7DE1@i=98oM8Z{nnFK
z4s~ai3Xs}nE0SNIN;M+0P>?N~M>DEA@tqdPWHp^eLKDYaZ4GNmhrVwugG52AW7`5F
zRviSvP7yTD$B3*J%&2}FX!{t-Fh269AOs0D35!%f&XPASC6XNjs8rvZ<pcV52mg>a
z>3+trurM}$HvM^}fB{yu2rV#dMmQvOq8!~a(j|+4swOI;NcP9Xw~#;89rFD@2u!$F
zwwAWnS<ZEG_%{0>BIdb0fr`Y@&7p++qx&CCh>8~%5%O7g%!0x(nSYb<fF=dr&fPjA
z>atuJ<)qK3Do-gzBpK#HiLfyiXAFiX>J_6jgDf+$v|%0dE}TVNo7dfEi&ltguDbiy
z+B+wPVVO>J56A1ghR}YmQq#qAY<MLl;W2!6o{R6c<Tn92$G5H&GG^+%MmNCK;1w*G
zN$3#Q`JS=|_6Qec_%h}1mf6nTn-uc!|E>9rkUx#nul9u>wKf8tVZL#DFxn>#)cznq
z(sE;1x~)O#fE_l`sQKBlZbCI}T#HDd+2XvGYnE<bsqTmynl8#!=wfPWmV1{$%%qGX
zqQFP@^rxiZTXDQXME}>3JG1m>w2hE=@6*2^>+jeynCi(Wc&s}Lzv+MXTZTFIE;Jf5
zED8FiWI78q?>cAyMNu!sVE@VxX=2|wdA(td=f<jv_LT4{x`5kH0gxPU=d|D?<REjX
z;V4gypaan3at0r-U+jC4fj>YtRQ#xF5LGH$>k}Goxm0B(^K#n!{#eGLQhmm#S)OoW
zGOBt7SgjR!DV4J5VggXUkBn*1qj11w4t|fA{Zr2I(RPBj$hagow{VfTUX9tmm_)ZC
z&C*`iD$neDnW)hMA>j6@?vy3mI8$rb^;9e#zw@|l+3#FHp{LS@oh+H*pHxHs5IX5W
z9r+9Uz3(aZDQd5}%Ic)?eMuSsvj;%8%y5)A9@afrlTjjf5M&&|X!05P=4Nb??FamC
zN1Q01({S$xpKQ~i_{m-Q%{M3bB9?%ge$NO3_s9D;ii^kBO%P|+R!x5iB|v1_tPrUr
z1{+dZBqiNApcFALY`WZH8(7=U7n33L`l~%^g@S`ls1Tf7mL{Nu0T_$wXw;Nb$RR2x
z(^mgF%1Z^ul9ZHZo7pKw?oij&5Y^N@f?JWGQ8RLqWxgyj19Fuq+4#c6Z*jNLD8q=l
zEB-1XaMXPBWdG7{aaOoF*01x}gu4HQZTuFw!>{nsKz+`};Q2RP_vzv|!D?zzh}dJG
zn9mx_T$lS$b|Z-M#$0Jfa!!jNf3i^0jq1YIDPz{hQ)+7+)-u`&wN8Y~!)_clLTNr$
zL)BrSJ~PY#WZB6z6cf08&=yvaIkp-5f?=f;w~<Y<gtVtnL8j8il;)L2mV4lQV`Jgb
zd&I_db4PN^GihL0DDX87ZTNe5WFaHBB|8iESjyc`ixf_-h}7i|JYBqETmprGbe7{d
zn<NFio6|I)m0hm7aB{2$ff}`IECm$Oh#9Mjm^i!dsW^&A%Ib-8v~(GvbVcPuYS_-#
zJpwB<5`e}r9-CRB&veH980G;S0rzBflumN&!v%C&E`3-MD2)t|8{5>EZ?fEY>bLBF
zvyyFC0~)h+!oxoPP7k`Ox$UG=EbffKj0o!|&@-AoBoC)tu#=difo>A4E+NQ!0n`QT
z?>uJ*d+2a0XIkUsT6w<$LkS!^4bmJ7*V$#J6@DW0W0aJoeVd=t9Kf$^D@6X*h56$Z
z(zN>`j4Vs)%$)W32#2!z3obb`VDM~4dbRfNinn4yV@9?oVMH-C6&>+R0x6~?Psxh7
z)@!szi-h=b{HNs}{b&g5keT&iszlwmMQY#udRU?aGb*pKjmW~OlnxBPBtjlq5l@OB
zujV{S=$X)&@F&{82<TPh7AB4YxLyeXY58aI*SVZtv~yOd^Fb{wq;t7462@L$iC;@T
zmZe6N9Lr?@QYy(oH@Sg0fn?@Y{SdDQHLo6|i=#)f#(DyE)#xEWRF?5gt`}h@e?<6d
zishm1P)YFiMaNxYhR4xgz~4bV_E!&yNs5hM6)s%z%i`<(Gmzx=k?#nYSMZ3+jBFg{
z^+?E_W<9LVuKekcd2HdAl6;LuW6la)5yy-~b*&|Vrv9s4T<I?3p>y0P<eRzKVY3P<
z_@%WB4AZDcmpX}rQJ_R*1vTA5xtMo=f!943iNlYB*{ojh+kwex!61Jis&V_*e)Z@=
zTOfNl+O9&Ll>RLN<Kx`t%I(r*qk(t(`Ja1jg$=)ES31CH4%NUSqh#=7x(h6}h-@0x
zDT?H;J)EK~Ew#jT)fzJv=U1W&5?Skig9u3zI$O8MD%*|Gq&b3(gS|6kykr~38&%(k
zL*{VF*rkotbQ_~9fB@;LrU<U2b>!Y$$ubhL#tGfzjPc7QyyrW%n)kWr80wn%)`X-o
z7u${x9?;P%m)G2Nn=C{*R5x^v-z~%(@vALjQ^o2K)9669l(r`H$hW-Smc@MZ?Z#Fl
zKhNX?N7icfBiMOW-Atd{a3ZL2k>ctwtO7z0T9ES)b1=d*r?%MO(&9Lem%mtfy|4r;
z!3+`5+66s5P7nmR=l~rTFqITTY-+C-xX(V>Igaw`ZY-656q&~eE`CHQcMh5pvzqEG
zQJ|{%k>U4P=RV@r8I?!EP8Jyofd#Tk$IjCJ*x8oUf39@9ncDPD`*(M~Z|*fYC^sdR
zTX<B#$33);2LNRl%(luqVHt1<WR6I3!9Q&{u2hnquC|0nK5Mm{<NF16s0d{a6bT2H
z)@OM1NcXoM;BG#{ygXh&zhEt-Dkzc8=W<Dvv!>ZB3F|Rp&UubLepmSCn!}^YI&*_u
zLM}jR@5$?&CDA1bqipc``xZMRMO4twE;(KARlx_PZQ<EaTDD=0)M-(Wk_=tjW=fzf
z2(tN}K|D`peWIUE!p-F}-3SPF#=9`sE&q24QOHi#vm&~)8fz5({Nbn?XzdoOF+Uj|
z;Hso3S!HAY?JbS>!=#ITJT{WWfykqq9bD5g-s~%`RlIk0+tI<$eCLNX&zo_sGGQM-
zb#tmSo*8}F^=G9#Rr=K!l2dOn&<yq&Tq-L*_}HvgQ%QQd@#O0YjNDMX)!vclS77|g
z!S>j>T#tQxfsI1hcxu{R;_(*PD?4!RVA|fuZsl|ATFBarJ+^ea2idCJ#DtVfYT6nd
zM_g5MAaCwb6VKp|_c5xHmxCl{3ZgYncY#32i_rMz2Hbf%f(bE2&13J!G((M5G9Tqy
z%RDyi!t@nGq~f3GF&X@*JjNKH1VmFiQ-Qsrp@t#3)}&v#bl=18@(Z#j4TV6tf2`;M
ziSjm>yBMjz_FOYU`4s`ax9&Ef>7M!~HO}EW^suG%Lx_~WE&OU+y7B(PFTwT;^;;Hz
zI%XA0IS8;#&ZKVimr{+@mnovwHsaj21}zBkM|8tdS{In?5V$)#>`2NIK8(wITc9ZN
zQq3&QGVE$lgp0$`S@Wcdqrf9#l%?^P6wBZ`l2m{6=5D6=rb*KN?ju_NZ!NR3N`>Jp
z=;{f6U|V!h@ONMCosvmCQ|g!%HzQ1zAhK!c?|pz<yee*d+$|??@7f~P5PM$NE)eG0
zZ>C9NS7sUEBNCVa5dB99-rV&7If`5$qE7jl6llr(C1qh*JX39a$;PPkjz}jUEW_hY
zJ!}SK&pvbz`-})lP$S{6oGR0;Nfoc>m}vPdnMNVU%#4R9VW5Hl!K>7lDJ239$q`^5
zQ=06BPH=JrYsqH};RI`WW3!GY({4zV7i#or9#?1{U*Sy%!b^{wBW&K2`i0E0`4II)
z<ub$=eq~|Zq1F5n7Y~f*04P&1&aSSBB*T_wXsH5m&d_lre_~!GnKy+aH+|W93qf<*
z@ef(LrsW`7SkM;O&8S;8r%Knuok$)VM=k#Is{#rB^<!Ty_2BnlGbcRJ;;EP#49O|7
zstU}ZbCJRKu3X(Xy>GLV+x-S^Sdy6;qs2RjYTuNYemi0}wMRH-SyDG*DI7eUbUiR=
zcU&`(J@kr1F#nZDqDU=F%IvbpoX{@=tSb?JPkJ4FDdT0EPQYHtkoIk!sVa3GZ@n7J
ztV!Rn?`y%&m?*l?2luK^nBQ0O*thL|`~i%>ju`KthRcT#m5_8uM?-k}0|t72on4#!
zP8^uTZ#IlJb>!P0#R}o$<M%aAoCgl+8t8$VJLQX!{$3Q+)ky(hK?;A7;Lh$t=`3>$
zzz3H@=rxrtKFdqDCFY=F%7SG=iJ1DH&i>+#!@MXLCK;MWAE{$OL|*{_nwmNWHC7lU
zcgF>tw*TC9F-%Mf(T4^8jBx&onD3#37!`Jb)A65>;se;yd5`Gm?2xXNe$d3#PQ`IT
zzbi#fZ{g!TG%otE;R{2KG~&~ChtRkpr<gyZz$anA#8>cCdR!Pe{S$hN3r|XRKY9dD
z^-*y42+WpCP9rF=#!HQl_M!ZP!6@odNyL@E2v1c=Ck-A<#m$2)k7T(p+^$u3b^52o
zchimP<~*&5fBiVQ(c9aCUkkh-GrZlwzsl`;B8SB+)oECEx#0PRH0FL@apSJW9((Gi
zPO4&`(I10#QMjxUv8Pbq$rq&2Gl84Hva?7d8(*y3dhxWLclQCNz*tP+Mi+rWeSD?h
z&&(*N`j@sAPHY^xH=$*{U9}mWnyA|N%*AornOU9in?^gH3@gQ0y6y~Cn-P@PbF-lr
z8d?2c@l5VqxF>X~e87I10k)d;0y~9MZrs(BGt;(>Ro~N}1{T3d7=`g8yY@g0j1lg|
zatPy$W9T7~gxxnl$Hf9&VceIfRK9&qpkA<pwK!wSG!MU?Y@uT_=>FH8bG^!Bs)l^J
z&j%;&^v0~lr({>!ku&tTCrgmTY^s`@tF*GGcbW^km(_}tjPgcieSyU%gK(ve2KF{7
z%Z>c1NcbO5!UL{D!Yyg58oZ!@DY{%!qFw-9lOA`C6{u{qL@p;$qb$$z(5qa@mLT>w
zWN|G(KhxE&dNa$RGe}F1=lcF=VTPvrT0k)&A!RP87*EUMMZY^`X~skP{<}VaN<jcv
z7*<2@X}ZW|{}~5#TIe?N3Aqzpx=RUG2WBCV9HVCG2)2u-z;M&Jwn16slEVM#!SQKg
z#wbNHuWu*5Vl?5f9u!vWEAtH-OWnM)hCVV1a17HBW<^ya&%ft_4V?P0*-9s4*&N4f
zkUUrb_S&G6ly>Un;C*6PD0BVeLU$yY^W3P~SfpZKAwLrM9>nJl5h}Td5W{RHd_11`
zD`1NvSDw2uC^%%QqN4&YYh|1hal2$Y9QkCw5ozu!Usx)Krx0^cWM<9*SU<RxCbyX7
zFXJ5upcBA-@-WR|d5&mesbo3AkD6Y1H(UmEWD5p)ZWHV6bmaZR3iK^C;AHW!*O_<z
z3AmP@%Ki~(k9?1t^u8DyM$ZTBsarL6VD1hV*%KS-MT^4!+0ftF7oq{M&smG_!cyaE
zk@QPpmjwS(@7tRDpw(axAAF<nk;w)Tf`BMl49gValJuAAvKHTHWI$Uael$uW%(+vR
zK`x<QinUh8imrr{2b(gcuKy7>Z9|}(2$$pgG7C!2iI;MVC3!M=ONtknXn#;z2|*OO
z$V5reIq0G?GlCDj)^`sUi%9}wFkO8xzal}}+4|Qv!FQ)71-wH-?{P#Q_OkF3#=Mo#
zasneL!|P+nMN2rRrOdQcK6)7@lDe8wk*%FwZFOys3R-bl(9G?_+~=xYT{dsBd+%*!
zrZ9!{c%1GreSeucZbS<!yk*|Vm)7aMet-F{bdG>!t1(QC!f@T8X_H{xET_@8%yLvd
zLXj5==P_(Idk(V7ujXO1CFepS;m*NY0!8ZWb#kaUn6?y4M0)NlNxgapqj78-zQ5*G
z3{=WVgIU#VK;5%_t9eJX+L=L12-#r>gYw_|;B8*yQ#XcC4`@iuSr^h4mizWfbv2Z@
z!r_%v;i7B(7;4Pl-Tq;@bbn;MJiI*(4bmAxW53NSshQD*OckFWoM6NwF%^L5ul}WF
z(F4==(Ds^()&q8Jt7T~lVWUIg={Sl-*yS~-WDd_Z#^>3*Xh8;-IHuo@??<mcQT=X3
zat|Zv1lx;V8g2aP05fQ3`@)Ai!ytuyGiyy!OfX490^qqTBzIPXisZ2nruRn}gBF}z
z>u4%W(mjVU5O{=djwTPX+RnGmRn`Y^wMD|c93ELn1Ogv3nf0#{K#s3=*}Z9@5SjVL
z%vs1HyHi?#8g_F`=WGFur6^XsT5iCMbL79Lm`AOY3`y%K5t{@ZTThbsz?6W$jd2!;
zM|E{ma_U$AKAw-o?oCcwRkVMy)Vc%Y@XZcNxTdK)X{!imv!@_xLG;?|+`$%evUbjq
zM6RmOk&(s5@xMpCjIjxp>6G~*TPDA<aeb02P!glXOv~D*^Q4`<<cPC`)CqjBD!pl0
zN}hk!WRZ(sjAjTEGnBF5ZJqg$SJJ%9yG4QBg*ss^K08QONGLPq)bQ7*a5#K|`<}Dv
z&$8lAr=M$v?z7&?u9azq!SSDT*S%J9y5uAN?uBmb)*CAS5nLTL_R_Ur{y?|cD`mw~
z;^yRPH&(BoqTdreE}N3dW~&BqJ;<Unk_XI@ZX7yiR`I-7qSQ~fhuM}H5aEJN8$AX%
zwjesGdnqq`$f?M&j-w;}oA2`_Eq-z+C2G|gqMu=78{Y661Fk5tfB?~5f0x|&ScBC*
zS>et_DSvHkg&3$uYiC1BDPyV$tQhiA;o$3@XI~DrB^4Y#*w$O(@0>UYql{OTPCz=T
zs69)<KRnYlySg<W?IGFUp{mTtZ>mmzi>kW}7umZ_JGzq8=*!n}s3w!p7!=0{q!bEN
zU8XLyULaWZr6re*@2d+Vm^VSt{LWT5Wme2Va1tXGZl49!)swE{;X$;bp=(rAmMWZ-
zI}tk>HvWDW=ZFTJuj1t}r8SmzGfIx1+cbVCMERBsrsCM6vmwBNQ)Cu$-|7iTbTXFv
zBB5q1jcNx_+DMf#S36b%&M#?cf_BJfZwuq32P{ymktDFuI}uUJLv)H*S_bZ)=Nh{c
zeg5=TcS3$?y_x!(OPrt8ju`Uh6VRwCs815^RO49_3*5gb_9WasBL`Ik0*>wQ!1vaW
zIR<jWy1(2BaqS-`(I&^2ElxZcztU+yFfRUB;Je9Cz`KN|(>)2APyfPT-?<d|JS*7n
zqSfySOZP2A&I(m=!fC(*blrZ@oHiOA+Y9{ni4k=vr8Y>nhV{5*6_6vZ+(l!)dHSuu
z+JC9u|M&%k3N5|Z)Kuhrj&{QL=~4J~qK)7EBr_<_Sgp_JURb;VL>_;XP;o{{Oe|Tb
z`AxxDL=|p1Wq#x5EcQwLu^r1aJ_4Q%+#oJ}lx>{@dvs=CaHdcSiBJMbVM>6!fz96w
z4k8tX5bT#V;Xwgws=rrrJl@;0+53#iBB^cX?coHP`t%OJdI5gQPNxk4T<V+u0tmw-
ztO2f0MAJ{VZcc^(U$h`W*v20$8C~)0KbGd<qBd%V7oJr6g}bNoc>pB1s}D>0;Dh3_
zVRz0v?b{My0oq4(X<L(btGHk@UpwccxL=eL&FvrgKc_@)hS~ly55~U(-n&&H6|Qs-
zn7Kp^5iZn!^&|GG5BRK9^J%4D9H}{t=Bw1a7wFKgPPBvr?}T)rmF{6oLr{$!Sq5f)
zj=75Go0SxGKV416mfFICX`a$!ME%!7Rlem|ZQ@0(u-na&Cft1M@P%VJa*ojHg4s@b
zgW)Fc=YK8t{vpd;q;*qljxvi~8XvJ?vR{XX9xpFmIq)I)0vESjKMTI4eyY4hAqJ%%
z8+LX!I>y9NC&4hywh~aageV~t|4?!D9XPD33iGuhF~f$_`@ZwDKN#Nt1%0hAlDs49
zpP>_+jcgR>do{M&3zjdUXP?dmy@}CeRox34w0Z0}<WLYONmXX3+4ID>h=LBRJ(Wq#
z1+)7}bkc6g(vI`YXbt2gqGc?1|1CX1&wwioLwws7XbS4liFe=kDMXC~*3lE9$iDl>
z(=qUjr%lSEI9-2;587UV4>-LIw{$UxP`mVw9U7>Y-->rvpr<8-v^CsM6hbH?Nk>Dn
zN)jJa0FsDyljk&D6OCXVJ>9Xs*x}KAu0*qR?4{I}_%W4tTlQjc5y>MH{I-)cs*K&O
zJ1tUKS=PuWnyHKCM1(094{RrVvTYO>u>6-D16;Y&9?vOk|DE=ny69jXXNUYzpmd@t
zy@fAkG{NcGhx5oHb%~Dq+D(*ddx?|!8oV!FJ#H5E_beAz1AzfKjHcCHaz6{v`=-!V
z9ggcW&&p<Yl|khiogdSBCLQ5>GEpH@5)!s4r}+}MrncM{C416Ikzy_8*G74y`sbgO
z2&L>A7HSM9y;GHw1c6Cr<=@dkB7?0s)dy=jiUSj!k~cw1YZeC-3v#6F8-HH~FqDk#
z7nRGMQP8uumcZx0-AnkP=nCi4VW#{KdVL(66|ueF<~_?NbX`w%%uMWKH$doh?aty=
zAPhZqZE9DeA7p*gc)KQB?fC7#62rgHjD{XheN0iVK~`+VU&Y!zp0kL{KJxC0(DI7>
zSj5UUow!B1&~~fvBiI2+m8riV%zWlh!a(z`0Gw+OIYYysWXiE;cqzk0nYLG*!t0R`
zCtP>WL(j}p1G=irx%gL=oA~&&mCh;7iX@pi*Kdk*%zltSDuQ?LWUJ)z+Z?zcg&NJ-
zWU7}q77(s9$SH8+p=gIStCWT}O#HL$=PDUjgX^-f@lWxv0ro@4p_lB}@pG<;gCw@K
zuSekGA`Uz8#+tLp%kBs#^Eb3`-j;hjx9aNpctbx##jR~1It4858Hx)q==-*R1aG%h
zt48gT%!aLml7Zdqjn!Rp4pR+mLf57Rvt1?UVyxjLl?#n=7*^;!jtw=IMnxm|)AIq6
z;njN8Z;z0aP-cY!5F)RT3^$=#&rv;UI;r2PR(|!0kg>XU#9rwKPu4&RGMeTCmt1mP
zK`73bj%y@QmwO+8O-D~(11ec1%zXVI!O#y4AR{7OUJy#p5)mR}jLKkFJsu*@Mltn8
zUOr8=&nd%p!pHpaI4(ACruW3LDEtmB1)-ngsD!uhws!V2%$^VaTA$tnQg%9G9NSwx
z)GRFHTx)6EW547^bBf-q;uoB_h$t$0QvhygA86>coVpaw85O>Y(SX#0*ee*v5<~*w
z2GvZhF$jTuOw5o^HX+d%Ows(&ky%GS0mY=9VmWA0f&%b`Xc)nAUpEO4ko$ge_O^;U
zRCe3tz0&7!{i?$%tzC&BDW=ohA<*o=`RN=cVEIhs9!*4Iwi`pzdwN3`Pr-<gO3D>e
z28*RSuv%Z3>1>6TH6d&FwuF<D=d4tB5J+cg*S9xy(hacCL8qdlwE?2?hnF~`x7qRb
zWWc6e6%x71V#u3nr9H&PWJ)PU;3^-aAn9L@fgJZ0I}>-y-cwiOkahG^WCJkr1SwC3
zqMgT~?rd(*dHR+D{hf$J+^4{R+BvXJRlH*A9!ZXcCl|rVuuoP+$hww;69KKv4;>le
zvRiz-chUOj?X;KwXM+%=(egcL$$2iH5uELc_U~1o^C4`t`N1Q3cY$|%2{X%c+1wVO
zE#UM5TX`uzug+*uvk=j-*Z+%BwR&~H)FOp1a@uZ=%*tx*2;dY~Vh4)*O_eqD-71m4
zgMQg*_4k+hG79?Bna>A~a`kn%kyj&lG>!?&c@Ca-fUmNJG%FcP3O^lf)VgX@YiJJ}
zswcLPH~C-dr1yC@0Q1$UOb7CsZL+t>6G5tW(jW_Oxo53erv5^4DlErf$$rXMZf3sh
zF7Cul0wD3!0gY=O@1Mgbx*#pmXU75Ct#mATB?;3)dOkRRPhrXuaN!o!GrLulJxprj
zino&*h4whWS!p%9=$UI(NLWaFX$F4U%4fCxGssPPr6CUHV&S&_cFo&?r(~wG&4Lu{
z&iAOJo7&0I?IHSu>=lPAl9yI<1veQ9lkqAUi#;xIjbiB+T^&g3q?WHJtS+^NjaS6M
z!?l{hd`>{fxm_MpU>!OA&MKT<d-M8K-F$|h#iJlI_0hm<{F*v%mSE;FRO#7h&WE>{
zsnXiblG*=8i#cc{YKbN-sY=;GTfTJk@Gg-D5w<k+%AY;MbfC7gbz-^=&2g_2SWZq{
z%Ry2nNW#G*LE5b)UYxp+BuJV}k+DRkzT)C~roZ!Gk!{eHr2GA-<|aXUqWRYLDcMlY
z#XX()?P1dWa`NpL$nT`dX=OHh#L_E#I`E#oiMyM165I4+?hKfFf+&Tzhmnd%CqGeX
zSENJ?jTpJ%v(@@astf%x@7}TJ?8MZ$v<zn%BRzsTM4;jK-bp&#sVPtlsGiy&uRluJ
z-!JGmxU)0n{Dd0dF#4gv$g>bWQ&$(+QO(g5LIFBk8NnyxFF>Y5XI4qI|3Lh++nf0p
zj=GExTp?wLC`iVs)t!#dJ}xd#^jv@|<@*p9J_TH+N1$V6!6q9wvSws#3Y{-T+?7IK
zwQ~}IMQPBHj@ob0@BGgZ$QUTn4!-~kx<*F8t%I3DB7v~l)WEqyd}=Sia&}2gqix!P
zi9`(I#DobDc5Tcz_Q^xK?g2A2H;={3x>tRfVGbKbJ)=g0fde?*JJ05c#th0**k@{I
z4h{ZlJ0YCLD{qWCZJjS>M$e<^Z*eFi;1ZLlSwILgXF&$Q$voIX$t@N*`*Y*50s%SQ
zCc%U<#}E)YL?br#KVaF+9HLI&ty{Vfy9^owZQ+e=sxpWavq>~;Smp<b@*xH|$S|&_
z%N5j85@`adpUQg=n?D3@g_GIXl$|o(mrS#uqWu$2>T?qt!)5bDbK889E>QMwvwBuY
zJct9`??+TrY8VuqP4T;YT7f!b#+pSd6&m#>#96HsCIrM3w?iiWcxbCC_-ml)qxWDP
zBj#f;B!dZAP*M=SE$VRG+Nlgwmdep)%?N0b`=-p4PP+!s372mbDEqjo9u@LZ(M=jA
zxV~4UV#wE;4PanBx%%Cdn#51cZ3R(d`Cg-PCoWBT+qn<$rkf+A3>$i&(J@h%RxjyT
za8El;u-tS9Xi9v27N-?~S=fsrM%B=B%-8n-ga{E!r)LxlyDG$!m}W%%RG3!}D+xE`
zzI4;7ZY;pM6mq@HbMS(rZw!$_JY8xCv1D;E;L6nSKEU?tgT4ed)f74Dx7YW-eT@!o
zA$ygm>s$DH)9@wI)H3kN8&wIbp1tLh5>b_Louz>?)^VKy8s1#bWe4{Jy~tgSGMZ(n
zpjU&o43+%`W?6Ss4^ki(iyk_O`R1F*?eU)qJ$vwRS!cYB*j@74xfUUj3-3>~I!V(w
zAg_3N`b8bx1-)G`4{5dFuaWKOdgZS1^60_M33-gy3whn=L1mjHMlAKN>Oh(RdZo+s
zjTn+;z|kV8RM}tF0)xk18@s%r#|nvargegs|AO|+jiv_&X2ZjKyPh_m5G11*J)STr
z1O277X4kmMPYhW`(v9-HmM;2BGeL*wgkf5qzs527!YD)-Jvf)H-Y<0dmoB(n@z%i#
zO9d-&<_o1|TvJrP;8vhzFPHF}&-u7px;~suoqEfOaKWZcgP=g?;{{>x;#&5*ZDawm
zYE10=EkApiKl`e{N8JQJeLD1bAE~xA7NMG{FvXHjH!l)zmR`cnbH~Uhl%2q&ZInNv
zK0Kmb`uE?#6j#*-$*qECRSbwPf1CJAlQj3&4sKCnZY|a(ZsaP$S<}e>3#UL>zr`7+
zJ&XcPA1Q6($2Q5Jwz>*}siP+5Vh%`>-7wnjV}eOP&$MlqW~jQ<o{(*~c|_qtwJ>53
z2$4F}L<xjiG#E1m@g;+~6`WLfD!f<q+-R)!f}(RsB?Vj<i>UL9vcVi;wap|NM|Et7
zOe2SGxsOAKj&c0x2FH)BvNaqtjZg(i^j3j75<;XR&EifypizWr2y7oY!NT%9o8y}0
z)d6ekb8L@3!`YjM*m?PrdCpJ&2wV58^6~%nE<W{v5Ayu)c@3vt@qCWn{7L$257OVe
zi|Mdd0l&<EO#4FA24D5l65}8yaX;(4Ub|^Z*oR2;E(MSdtZNeFhRjnk&3)Bdkdqg)
z{q@NXNo>=ol6adl?&@sB(Aqv-d)_!~j3+T0zftf_8KC;YYcEMHgu22=&FbbhJ9nOf
zdmvI9b>Iv-lo#?wsk5MBW-^E*?T5S|HAtHkdYIkFZ8B_)s8`-hj1EIr<<uISN&R8A
z$@mui9_O8RCU@O=fWnrlIvYnc<f)XAge=hB_6q4kqfUh@3YM03AjJS1BO9yhjHY9D
zET5v;)fk!#T9XOQadb+bYNj(A?SLzaWXdg7HP%^EnP!_rCBP|jAqK5-NizQ?;AVLE
zb(|36gr3uCk`0m4T25P@=ZbH8A)%<Sz5wsR8LXM1thuCCYy{_$*h{JiXqH?#{`obr
z3DnZPmDDTjU7~3TNXkprHKr^mcNRSR%4hJY+wUhDsGSZ>HfcnW7%<-8LIokuj*K%m
z+bV4k&3Xl(HjXF(XA8s#TU#5<Z7)!iB|FYIm-Pd;;|w}7h$(_L+*&ad)-aBi&8;n#
zmzQvs=CiSm!*|}#^2!QptD8!f(VeDEnWz_#)@$my&dy|h4S7amTGz}LADYEMj5;pD
z$8Dr=)Ltk`Az;~JnH75+u{G8eVCJ!PB!)4r49v}YdJ8=Y7pYATA1!`+N~na&jIegX
zj@@Ulv3eA&rR<j!PWu`qRC+v4oi2xKd{pCgUZ}AS6AU2)qNp0K)XAtz{kYp0XJ?ra
zoHUX{w8>GosYEBrY$=<nH0rpt^k8KksZfn|In!<1UutxPgph)(biYKB_*dF=X*bIl
zViI3SYUM)HPozOi4pMV~VA7_Ts?sx}TM_Mnlw8peod>!#o${lv|1mblW26pLb=7Rt
z+oNsX`PM(=Gw=QYXSo3v?LUp(a|0HZ2lTzgTZ8vr6>-z`an3fg290&85|I)<F7-||
zv}8MT&D)+f5uxuL<E=H^U<qr}sDKC}Wpmv^s4${y#%6(!B2wZgm-gT$Ta32HPzUA;
z!{pPqaMjCS$^Apa+Gp-p0i$Rz-XV3Z*}-HdW7Dg4a&;)hU@1{Jd%tMtz%f$=PZEwH
zu^QU@+djiIYfyC)a=@zeeF@TzGfGOmKuZ2u#9D#`0wIU^O#!v}zP7Pc?@x1I^lwdw
zkeqtKXJ$rn)~qqO!Z*~#Hf1u~%RN({nyG%~_cuSx0Zk4J@+SzXI-IMqW=0pzB@!Px
zW4Ko!Jx7>E7?hZz2fdP*g-ndaTh}OP=cAJtkf`IYvf?%&>iDF`8PZfOlfOXg(B>p;
zs~I3OYR|dGCntYa4k~j#t@R*U>NX}7i1HpG&3b7Y?WAdcGA8*Wjia3t@mWkQl6!S+
z4rXW4B@GeGp8SJc*=;Jnv{i5|MoH6Zl?90M{ciTVTn^4QIww!8COtKZ$?r4GUT4T-
zm-HM$4I#L;+H8#Ovqo7MJ_VR2_XwvXi?zC6=1b?&z4T5KAl_NLwYVaU>oVYtCr<#g
zNeECvN~Re@G_~5gSsj6teKt1J;T_%>T<IBDhxG;JT%U5VKsh%@Z@y1CKM$iV;?{8%
zmIsJSqrxP#(i61)p|Y}KY9C3X4Z}9$)op4iIC|rKJbd2~9yoM@<HHeEgg}9bQwTy`
zPgRjOqH4fVa4`~0>QEEGP=j%tIJU;R-C%8Qj`Q7l9PI~=zW*-v-uW<>T>UKO_MOcu
ze&G!q`s+{fhrj%5JoD=3@ci%pLDug(KzQ&Ny|Tbat!-RQ(miPz2?$=Vt2G*Urac*w
zsAkN)-N`1W{YP@~$+QAea#C`Q)Q+cqrkrHd((@Q&7KBboo|MY4Dj;eIsPJi4qSgw%
zwGMB;@j}#Z$^dn)-^WC&4~K;sOFgvM-X?n%`aHZjrmmuj3PIzzwPd+CBPfwK%dC={
zUYjatIlGzOrSFeTu9loD42CA7O-zCT5x^ooGTPdraGv$e5eCivN2zhtAZ6pMHdMBp
zk4d9el-_PR^V~}b*0Q*?h@BLy9y>~?D+*%>VS<<`QEU=6X<Vn~+BKgENf|Iw+A?Lf
z4RzSo4M7YpMxvXgS?P7kL7UVSz5cuk6q%OVt2sLD0=B@SR9yXSS2NhXLUa}4Jke&c
zo7}OVMp;-xG*%s1$pJvh{w2@Zq)5%CD!1foE+DBd*;>dGLQF#sgQ~3Aclnbz=L5UB
z<>(p_OD!XuDX?KoIk!mR2jCYN4~JCKahe%9Gx9($Dp(Dom3C7zF%<(>Fx}W<IvF$E
zUSt26XL9(CFEZ~{={8ms#(5QUGJ=hUeo?YD+F)URp0em`=1+8!Ts^YNlb`nt){fl_
zwjdJ(vr&yEVDMm~(qyeq4q$Amst~cXJr<2qoY!~$|FlUttJLV8^@La<Magt(*}Z6p
zwJPb^IK|e6+6W8Fa}4I@ff`%aI5F7DtID6PiB1?zw^&|T;-UKw(Q`00o^o-SFq#mj
zUKK{2V4O=;L*39r449t96&@EoRb92S!(v2gf!aOpHpW@?_;Yot365;8qNdHy<xIM3
zM2f6cK?ZAVGnm`jr0SQPM9EI6DBUd6QiDXK4ym7}rG3uawHZ!F`dK7Z*3?3Ul+A5s
zWX|OMb&iSwQ|YH<T9Gj+nWNuej0IQlte3tB-zympw~2Ml=EfE=!uD{JKl~5>kvl$e
z15ev=CTGoi_Vf#u=1b<h!vVf5C<+y$FsX8E?M!tp&2cqiH928S5|}dWF(eO4lGCYc
z*b?iS;l>GUX|cX+1d$R49Fc&N5+ZmL2vvkwQ^!hsQpbV0Vvb3e5JJS(0&bhN_k5D?
zea+wD(1AbX?n9fJY{U^=HvF3SBU;1?CI$L44It@~&xqHcmH#}!LDzPx>PZSN8s{e`
z2FZb_G6-^InP@OE2lrax(J#)`MN*M72LM@YDI3L^l_V{%VpfG!myT=fjQ(3yopaFA
zvKu0W^El^DYDFa5AJg{HHCL(mj<yQcydKgxo}Bg3eKhBBoBEldO<ty78USbLEzL8(
zYlY=gPhov^$oh#9wk)7mQiVtzBMzNSo=0(rq#kZ-h)ye~mYl5l8Y*l`yB3;ipyc2`
zN2Ru59?V>bHql^6`*QQ4G`++zCDjs>A_!O$)2Q^sJTfF9K>e_oH4UO6Z@o7ALN@K!
zv}}lm#W7Rt&c0V0j4~sDVK64PG;hgC65aPyK{|b>Roi7#A}>$#)h*aI3=E%ah&48i
zc@dn`K7VgL&N`ekcwgvRb%x2b=HUmAQB9}Kb+d^E?-Kpedyhj$hT2@s&5uw@m^7pO
z8bPzr_>mG)&DE$4v#5%%_l}|{@IHwS`2z1eWm!@bJ<48>qAV!N5?2)X(l_VhyvI6&
z>p7fPZ4z;sD7QxS3(S0h-?tZ+lBzcK&X}w)MNIwJsme%%IxvkeUfaM92E;KuaKr7~
zecSyUUEN@89K%tda8Ok>A~jV#rLI$%$2eu;gg`~g>7jkO#;U-kD-3v8n{M%$TR+Fb
zeSOY8^BmTXt#kaLkMfk~U%>9mpTh1dFX9K*UdM0##-DT7t%vxYpLjjf+F~}2QaYNX
zPLmZ&!ciE(dY|aKn&?k5bV`3uZCa{ErJ079sL@&hwXFeb64SFjG$~==$*dleh1$~m
zQ=R$hQvpmhRCMmE9;=Rp)91@3`~8g<o_>=CsLn*ZhdQV-tnrRmdA6rR_U>Nf*w&b6
zA|cdG=3Og+rg7f$+$7B)l4QEGAO?WbidQq^RGYe0Jri}d1g+{dnL|rJMHdJTV;x05
zFg$#efACY^&s+X~zmM1jj43d+O-<Umu$z`0k(PRq0+D<~v4lOl_p?=P<7~lnyv}fY
z6`v}LF^G<rvEX8ztb01XI3s5dG?6wb`yK=nE2fhPg(;O5W-JohhH+9B9YE?@uYGDR
z2O&-;SP6Jz@y1cdK+nS&%L_c^xz8cm0KU-6WivNF^)jXYG^|x+U~3yHv7~?rpB$_h
z(lwGKij<q4bb}69YzW%xkH=S4la;mJ;&+ui{qm=9+h2VGu|lW>?<|Ek?Ax`B8_Z*E
zkM-4(V<(QJnVUKP?6exAySE0zQB;k%77gIsh~e5QbE_xVbILwy?+8Y_LZuE=66uLx
z2#C`|pYwC9ZXBmKI1P^l2QKw7AH3^cmR6S7+#V5U=Az}^2V>IQN)u9|!!>=-CW}ht
z{%Vm#XUSE0k~5t~GZ}~T1Il`csSNac6y=B*N385zV&lLTE<jaJi9sml3KmzE@YXX8
zsoGvgB2#K2A$sBjdfw3sQ>-cQu4cjn?=8ikkMwo(oJyq?ivnti$T$*XO;HwjUl5{T
zqQw^lVl8eO3DtDQDSI4kjLki~SxhK7z)sbxqisqC7;l@5e6B!QYg1sFtEowRB}>-F
z*ogXI456;6Lq)7Z8Z#4`WLbV(oA*kq;)}E&M~pfA)hb!)4{cb$DQ70>S<z3nGfWT-
zs7(k=E2zdJE`QDo*`7{uSgL4QKej<#1>W(Nw{qKGU(W@-Mb4g|V|h_nSm;waN8vr*
z6}Y0n7OD3y1yfo{u&GZDrCwR9eL{^2v7>1xFiG%3b5Koa<%%9<e~IbVkio7wOzFw9
zL4|-x)c_Mg>Hs~x9>t`lt|q95qcps#o>EUL%A-RT&fLrPM{eeo-}VCj>z{p$6XU67
zy)l?rrEvpE%OJ5*#F~a~miLoX?4*sPS{kxGhlv_yrI{3BY_6Qn^oG#}j(H(Ss=j9S
zkI;TkliqCXsyacj)=?2zyroI{=Zd+_fvM3vMhXg=B5xMlvD&AZgI=2|JX$ewP49EA
z$To@v(kh(hOj$l|eSX{2nfAR#q6Obe>qF@Y+FRyzHmff@#wQ28&<YXx+Vm_%S#s`0
z7c-tzeD=oAvwmWoUQsY821u+4^;DS&b)~A!DM=J1GZIoYFn(<+sJZ>K32w+jRZ5A>
z9V^ZKH@OvvnZCi86m3&aXWp!H;xXTF1S_Fw<%pSPdlC&`=Df2ivT2>?rg}M3{nq>0
zR$HyLO#qjbeT_|X!>#t>o3zJUo95>_m;QL1PeGV-IBzM+g0k>9=hLiRk4wR5ujeU>
zQrS2~p*Gjsr06d>x=qP$Wi?m>RTZeKnyRiiaZFWb>(P|bmsHaV6Kb5b4NFfXsBoM!
zkc3*Dvu*F7RY4+aRRP@lg0d_qy`wBjio)Sj@L3d=qR`_lN>8sWDF&qmqD6u8I)7E$
z8&X2vq(nuG7%E_zs;~l;L8{zBU{ui3SR6(Jc5yhD)xIr)OUXgKt0o0*gg~gKN^c*n
z(cib9)w>RH&$V}O=)q$g-W*fYqY44*9QAa{G*mjEp|*{dL`3I%wn+|yn4msE5WP1+
znIGO-thbD&6As;b4|B^49D1PU(18<NHLBSAq_a5f1<&U1-g}HUzx^Ye_;+vShko)$
zal7_YtscQr;}V@)tb@#`GFnbTJ*oC#6W6dMj3jz`%93TahNP;yhy$r-M)S3F`KC`b
zvD1u6XWH1$)a#xFZX9X0b4vDErFDx@+P<?EZ*p((Hx|Ar1Jt=eK@2rgL-0;}M+t<9
zrEtP&D@z<$JwZJIY!V)61a?eXz&bllvPJ?Vr7-;;$$dt7u}kav=_YJabGvIA%Z-3f
zj-K(B!UxVeb(yoTx{9~{`tMP@KGc?8VRe*?RQeFN9%9O#N~#p9hz!P1Tj8v;pNw|}
zw(enKWOIF$;c%N?udm}RNc6iH(j-0)HmVA2%2Y}UOf{n(6RlE%WeGJBrW8(lG9)Yf
znwSiHz+nm07^&zLOG<$=5aW~(r+5oK$-o;7b0+YD7d)HA-Ma{-G(l)HQz};ztkGGf
zGkvF$dis>EoNu4#F=LW=PI8DOB^<=)L=uxSW)g```}}OIQ2o!EOD?~V#SdS@ma$Z>
zqy~_{;!>Z5xp|O2MQz#I-ePMw)y+5cFB7y{X)Ks%2sUD^;G)$yIaCZchAfRISR?E{
zWiP|S52mV?Bh-R*+V@~V4OkGW?V5T#rCglXbBrpmu(`U<c~3o`;XQW)rB*I%qLW0B
zwBJ~38!ca&>XRB!kdzR}RhFFjP1nwuG$OE|a1o@3u>;BiC?~q6=XWF1Aq0!mI*fc}
z*D`ax9yRK;ngG@WgqkRZ@fe1ifu)r_3^#`u8yX6;Ei6nX2r!M6(u=B^NTJs^^<Rc6
zVkq!ZQ%fKwRb_zag|9_%!=^!XBB}S&v<kk?Xu`oHin6h8W<-EZPMSHXGNTp5T-DO&
zD%7>Edkhi+v95@r(!erGLq$@*P%F3jPY!uv^Gs)Uhcp3aeukoihMA!RUq_-AXs>rn
zn<a)ABF3oWcm6Xkr7TL;hwJoRk7x{=YZE^A$!oajW7n{MZi#c2ce7);$6{gW8&9wB
z*rLGn2ii~NQ_omgV6ZsvX!>HU_5)%}61B+H$3z28f?|z2t%<1@*<k8Oe`$rI2X3c0
zWj}@QHN);D1jHI#2x;s9lo-Nv3`Gw&?J*rsAk_Ftz!a7`7$$Xu<ExZspThZj&*FPt
z{hhq+oqxr~_L|P$wN6KyiHe;hU2PueR^)xdq$*er7_w@tB=uU-eQoMsv8^WNs=Xl(
ziq8tqB<D{v^lg7y7D<zt0$cibsw}mo(rgBDFKQCP$!!z4YGr5HGnti<)JszUnrqsv
z5Kg9PH^!x{-kn7B&ByYkPMJZ79#&4>&GO1Fj-NQeczcX19ML&USwK+`3ro>2=`YM{
zRXs5;!gxwm)jasXAxyty&;B!6Tv}%J@Np)q8_1wyurSZu(lV2(qMD91K^OG86viv%
z-6Up1%t7np3!hjQV!);Qm^%x^7{vSbnu_Gvc3>_2Udh~`-&PY%s`4bZP;N1>DWREv
zZ*#zwY?Ddzyfeknrk-)<5(~sytvn`yH|Kqt+iCFLrK+TUrt=yMWkEP=bta@~@}Mnk
zJg-4(eR_Rb*CINKD?~yFRH0@voiLtEm{eo7$07M5LLF)*^_0nE%ye24>l&$JqpBV1
zNC*(>npj0b4B8fwS`WNQ<ItjIxh$CX;E4=ZhfJy|+oK5!{gQ?G1-$ht_~wMN^pr)9
zvT&)|=_t#BqVPEHD2jB=yrXa$H0#*w)TU)!Qz_SL88K#pHB;R)1f>%rSTZXi1w&ap
z%eF~=5!+BzB_;T5N=~^Pe33JJo@i`CjnyPxVhC1aM(dj_UhypM|KRo9`k~Kq^zbTc
z+uMv{U{cqNCsRh%m@u6tvr`a_LsE5C>PX!rbW^g!YGU8GHc=r4s>BwGE+P&#k00ag
zQ_rB<E_m-Bewqu<yPq>p-^tErUBpXH)Li?aYk2>Eek-s2Xa5W-7m#E3&h#@IWu?Uw
zDEibtZ*nV5vWWTLS+uWd#Yo&7m)i0OG^4I!^8a%JIxDFgFcMPHCdCI?jM90)Yhfse
zKHx!Ug*P?Xt;y!Z$NXBTJ${|>O&Oprow7(JL91VY)P~vz#Pq1EDW~mSVZE9%EDS1n
zQ@3;(BTeDZh<1*~6a;FeH7P*mjC2;;A&(YFzfX>84sJ~vq#mQeTZ6HIeJk@EJaRW5
z`0xQtF#vIR;~`W8CfeMNnu#oEVbgczxr-9DIwsC=+NtL=scU-ulDe)KZEg_b7MOWr
ztpTrQQBwuQ=9!pju4+o*XTcdHU|d0@#u`suZ?`i~(QH>tPK(XeOsSB%#`v`9#TxBh
zonnopz$A*O!HH%6a>3KS<(b5OY#dLqts^CQ4q~SICTBh(>1R1XYnz!<*(FR2>Lz)R
zg-1-&B&?L07(}!0G0j#ru;Z-#?BBJ*J*%rkEYms=>j~?ttE?={Yp~=!Md6cBfi!Be
zrcL|ny^=I)B2mr8VM-ZBjJGx!t*^27)U){Fp$G6Hh^Yyp8AmVb0AwMGe!s`s#wv3Q
z3%GP{B91D;k$WFv=gJO_4u>aa^L%nbbJc}5E3Vng*ksC0<`^~t71?0t@|t!ddJFS&
z114)*?A+C(G&P~F@v=;))fw%Tmbt|qOY;MeDp9YLQbjaGvDj+Dw4N~MYj!UzuytYs
zD?P*oO6PH<2OkKdiT3&g4WtmQDo5`qG7U7-lX8VJ3eiNYv&`tJK5odf2J;A7Y*L1_
zDUkABfA7$B?Mz}CDUFO&U26JML6sJ5)Z|wcu`vg*%6<r8s?Jm$61_MDp(!wsAZ@ZC
zrde;AO>1d>B$1G8*K+jUv^b>a-{dN~X?l{ZfN?q=pbix;eZ{NT*xp1$@TF(ExlOEw
z{N*2hfF1Ka&Kc}xM{mHuS_(Ygmg>M31>Re1;fMmh*Vn#ts})m`q^g>FPMaXrS|vze
z6P-6Zw2n+I&3%SZ&`$2M&&ILC>^b{f4PdOr)T#OuwF+%wBsx7FscLN5qgVE*rW0)8
znGT1VAgUu#ER3ee<_Vthtcy9mVR-ik-p#Zc5^Qz=wQnq-VrMf=mu-nRJyOj)YEl(Z
z+GJ=ONamzzdSk9=rGVC&c8}6?7mz7X5)$RIrE?16a&?WyR=Sy>vQ1WHx^ecuOcR)D
zi<E7ut=6gC&iZp;8U@%~Im;(*8WF;V^xWE~RIt%;IH{!XS}L~&F&ae7EiTZT8&Dq;
zswjk7SsxCo6uzRs&&@HvbBBts3^mzhY(W`>0m5jb;-S&~j5jBgwq&%riW!bzG-f*2
zqv-c3${yBQrFn)rIi$L0M$y1R<neDKSQwN|qP3}3CWoHXW1jocn*P!{V69<kVUFd+
zd5lE0F{yItoW&WdeYhzY$TNX+py`WLsr0HOp4E+WOOOUlH1{?qz+zf^ng|HgQZt=S
zwD&Ls>KF)hO;uG)r<GPJ>zcZ*wcR5LQRRvhsl_3NTmaDZ?u@~u(I6Ty>+$<kfweZb
z@p!Gg7LNI%N8tt(eNSOMW#K4%nJTZ!h;ga)!dXM%omLnd4T02WJ*~03mes#=o}yRa
zOP8vo?Ht;Z1I$F-O$joe==#<LoC9mqEOVPlB8hpGv?7ejv|-c!?Phd{XK3su??Iw-
zS{r*{TJ3<GxVMb}k|xERONnUfTGC$(7$4LZ^>Zv=bt$*}m;b>-pSp{UwN1vu3K?6%
zWJ;(e#8~TnbA|SviV(ttn5w*z`)UnIoK5$(DhBFFO$?HyN!7=7mePO|q3ri~_|7|7
znww*}U-IyYO^TZ>OCSCmuHWM+7d?s5=qP{jYyS_g`bR&9*?kVOb{~19q82`a2IAIh
zyN5{>TdURNrm~)<AT(@9W7@qPNiAh*-^>N6#4c#YQ8g?QM<ZO>*xW=*NlZYK7;0FT
zv=<!42{uRy+WDGHBlvpbn=(M%vunWC`nJx53o3~_u|hS~#_Zv6or})e%Yj2{Y)!0Y
zLTMLkvuI^zQ%!WMoGESSN6a|;sdu#wDiq{>nFIr99KSs3!;9m-2k+(nhY#ZYfDpA$
z(b|F#N}{Ij2&u2grM|Hycb)?ttHL)scAiGRKhOG+qb%-PU^3idI696cVx>;cXiPzv
z_L!*PifQ(}%!ju|4T(s^DVZOFFbxxYGAvc7DU4CMnmjfm8F7h$($8Z>vr|==B4LWP
zrDkVQr?5}WmCw9_ou}_3xU|uy;nphTlQuwA8_@gE^q_@i#$m1wWp!p!2G!;)eNtCw
zggzWi&`JufV>GJ9M{<nj92Y<7T<*K>Gt^1#O^j!CV~zfa0j`*`IvP@kDZZ(mYcG;V
zG}trR1LGt#^fIu7N*%A^+7{cp#?0?rW-2v<to|HA)8nH<<aK>IYZy%?D(&w2O54?_
zf%VO8o^;kJY;3GDF{P@^Mx|jlnaWh{&!RF76UF4sztQBUk(3vmi)cWzd?w-p?DU?E
z`wy~w>JlquiMNi)ku}UD&?_9i=b2v_5Nsq)BF01CYL#bFK{cLISKI8KD<~G`>6M;f
z`iL89wors#;i<|<k@{oBRs;#yV2~p9f|-bMUMo#jgBhtb!)dGvSXA=(TXvf&`FSiy
z78x;C+Hq8=XVN&WLTZLwpBJOj&h6aPrrH#dpe&3SjV7O>Zj{OjQGq1MA`GVO&&#a<
zlB=d9Md8%D8XJ|?_S-ZLvJGXpiJ+3ONzP{&V^UwC!xzHJnfqAYwZuaYJPcN_-cV1*
ztUdHF#dyrVqQ_}-19p1PLRsju>K(pU;&K(WRHldR50qwxX)3DLDNS2jG`!NSY<9HU
zUdWupGNiw4DkO<Icyl{<bNJ8$ET45c{c?vgGy2KV7*|+{geceuu4k!TKtfIM4m&7_
zqayVQYonc3>}$gI2Jz@&UUc<EeCDQ4bKisaHEYp^T9qHEX(Gv#{A*&>ymzGg)%2y3
zBfJ!jo)pO?hDk~=nA~q~(tO~2LmepB&^F8wlZ5ng;Fs!{`dv{SWM`V8GmS$Pt*WfI
z1G7`0Yulg^i8Bh8G`1$MoxGmZ@olRW`J-|*C0AORaqybaAt~vmfwETXBA&58Ef(AB
zD~;EAd|Bdq159agy&iK*OR3hpMTn7TE2L7IyzvfSl=xmnVLg)=F<}a0O9&O!_7HqQ
zZ@!P|^%=}B5{Xok2~q{94OBV@^6^H$ERy6_ejZZ-u9>YaIIlhV`8r8Tj(G2}-Vr0z
zQIvfVH3$qTG1K(6M^#o0S!^w~<DoQwMT0R{;Js0*ZK|TW)W2y{aOl0m+x)jll~=74
z_FRcA`uLuiq59g!zT{*~4(hDaHR-KuS|W^@mDB=jq@lZdOR5Ufytau2$m^<2%vhWG
z4Os=)q+VK^6z_8aFEe8VzvxrDMpkD}dun<fmDZp7>7Dl4t1zzBK*OlWpDQ$Jnfpzh
zYlF<3P{3*5r?J{Lqzq7(9={wcX`v$bQJUO0N@D5}wRn(!N7E^h9zfUh?qbroGhZgE
zud>NPtTA)D*gmwzb#MA@j^BNR?ad*ZlfY;)rXE+ss#c-3C<F;iL!cru4XVyNsgrto
zs_?p`x8SlCgVCe`rLGHWl-8ZasFoKND4gfS<|d=@gwqCn&N%CIM$?Me8!&%LkENA4
zh8v6A{h2TD&R_rce9xQy8L{7k`WQJ)EjH7c9b#jfa7#snY@hPX=f>vcOmwnmR&1lm
zVRB(hvc0yAeajY3Qt}+5i9G9EYU5}%R$BX-)`6IYZDmlJ?>8F0DFf8);g}tJme^Py
z5sYRULli>o5DKgl!epB#pS_d&j%;#t7^uY(C5y#qh0`Qslf`s&)?}V@l>51oEO|^-
zGm8ofWW!<%1RY1Blb0k*iE-%Ip_KXbI8nM_#3E8bn$@a~Xe1BWjwz3+V_O2zsA$mH
z=Uz%M5eXH>7KG`D(N;x_jw;r8QR;y;+9)nopvodkS=lkBfHxU&5v(mz|7j#n$C_z(
zU_zzM<F3HQkW?vwP~!{~XrGLaj!;i<R$D|1qZPEm8Fuv?SG?fa)I~&!bPcRQT$Y?n
zG(JmnT;g)lz^Chzqb!<=56l?gEWV+<SBOEKtDsr6Y`8%%)?>qjAP}A5lBZt8hpxMk
z%0#@4j6-B|IDv-_QTR1R(-C1>;n50Ckcg9*k^<>E$EeksnK1zoi<t!K&21*z6>|#(
zuIM4vM0-bK)Sj?DO^{%n!9?)Zv$nCyuEG8$D6$cPDY*ZRdpYx>v$^;FL%No&l7Mnm
z4Qmmjw3T43R+}=tR3&<oP)b$|6qQ>yv)8vKILmy$2jdZwdk-;~8&D5xCZh?Z7-DJh
zbB<{>CQQcISZHsL1Y#&Lj9KXSSlB;D6rqj|aMUqU^m^2_)w^o~)pSH%M?zEzmZ2sF
zLkucRBPI!tB_|?U?ZZ-DjU@(E-h8}aooz^#4Ry}hJnl@_o=aI$F&d;w3R3eZ4MQ{5
zN$2ruPZLI&9ocV}%y_7SDu*T;VqC6@w+_Dc_^Loei1|9u^sD6<w5i&fT=ht1Nwlh-
znIRe%X&_`pd-2Ca{=tv@D68umOsk4s&l6){G#+x`?%S9jIZoNLgZUEXdY&Mb(mBdP
z`>3q<NKp_a(py@>8ISQcRhlj0Ky0Dw)j7yXsYWzMUz01Wvq37%j;XWscdbxv4moz;
zgPifqQ!qsd&SQKD8{5d%Hg2E_jp99a!BS5qnDG!Jk-`^^CPtjp#8@HI2%~Knu3~oX
z;C0{go&4sTe~&O)OV3&s8L`SjX{ht*F=`mBCbfR#Ds`*KI_s}A0j*}N5+g?%=1$Z2
zPy3oMllVxS(8Ta*1H$ZY<uSNzKcb!eux2iD^PJhnz&6gf$pTxU9ify3aB|yA<McH_
za9dq&g4$Fe)=H<Dxoo-@h&Q<Tl5%+kJHHHb0~XFY3+v0&zX*lVz@-YcR>h6ZFpSnF
z_46tv95XFGB;{>M6-li!=gJ%c^>{+4CLn>q!aNJhE5y2HJRBlc>86i2)>b!@4M{<i
zH4XhUSCegRm`tWkf&pXb6@@BGM?+ytiqa*v21%$YRYRLfXcjX{#LnatlkUOE2?c#m
zlO&p1ce$|NR^s(x)L<~*b<_C!8Yty{Xh{{q%=pPSKBlT1Niarcm?y2MoI!t|@+ySb
z_UmTKD6u`hc4VqeldAMHTqDwE+ha{6BA4t?``!82b4!vYit-#%Yn`^LBwDmJsz;#T
zqu16Muh*gx)3fc2>fC>x_7nZisKIEnfuw*Yi=m0&d;y}qW*&Kn2quJ7K!_^rh%zlI
z0<PD?7X^OT4$MNI=q$loV(qcLd9M5Ick%B3_BX8TU1T`gWV$xeo_$;6OT*MVrZF;%
zk<HPB+8RPk)xV-g-}e(`Hr?YqekUb*vr2K&D8d_EXTH!jr#PN~hjVxA<=AkY+g6Vw
z_rd8q7Fk?~%nyW}yXQFnwXdZ2fB$oi-26qZ{U3kAb6)!z!s6MudQGd&1{Ky!<7V_x
z$~~bfl9Xxj+P*WhQJP#h$;TWM^DegHfvAdkl8kQ>B<d-I)N2pq(KRvc+15AD0AxP_
z2zefB7ION=!Z&4r+Uw6#&|^3j=I6nMh{yyJg^C_hDdluyb&Jz?&$F;l^U(1z<Ed-Z
zx(p>`+F(+$)1WZ-m8o->+ZnamZ}P0&L=jF7fz2DR(qbjOx2`J;MG~RN*_aw5$q`3I
zZA|9C=U7Xi&a<;bDEjlPtn4DxY0OPkQ%y!prdx;!m?Q|~veSglGE`v$gAte9h1d?1
zH<GS}(=irdLaax437Ul>RgBVuXfOn+h*D7&0|fARLf9t6DLv~ct<D@13<WjMc<Phb
zed<19kp{julJ7L0lUTspRNc(w=$KS)ojU(b_BA;dxoDbhMMFbk#%D7sykt`#qZKcw
znL}@os%G!Gr?J%QF&b~H{JTu4gD~70;u#UCly;5Bx>Bn)G0$9WGs5K2Luq7z6~nZu
z=#NK?w<d&LQ<heCFg@~+jw4EaHaZ?6w3`t^;T+@5HDvc*jMHR66fn^+npo=XEe1u+
z*2oehtu>Q2@2J#JwXcDg`iL>Dl4tA8CQa%fshXeHU+Qx-Lg5Rhhc^j+sB0za$eBoF
zx;|}aD>hoZDUyRUV$Z2_%n#HViq7J^C)AE$Dz(|s;)u-kN{&gT*P*WS-Bo6U(XmA~
z^?!w^Yd7-@NL0`|M0PFje%x)0`mMSDH2o|Kz&K_EzS0cOyhih4jJc;o1+TJ)7qim0
zn5K^ilm<0B)3m8@CVG+<>=V5-)1VEhS5;YpA*L)_;*w=5X0Cpv?NzCpIYP=x*9bW6
z<2&_9PiEi#(>QqKAtI4r45R53F_D+O=DWE2e_qSdG_us6)9QbKqSvEHf;RL@Hn!H;
zdFGio+tcHA25}yWLf3Vs>}z>JpK+Hdv^FJM8ZWmwACt#e7z4eU{&+WYn<Lf^A7nb*
zqBw28Iw-zJi0DYDDo~Xkw>YnqJ_*DPkKNkB_AJ4udS$N)jJAeg9CaNjH-?z)4bC}l
zAD3TtF`vBd(}+y<xSUI)WhLz+(l8nF*bT{i*hXzq??-k{;pA#$4mO)adK!7AOD8Aw
zXvF<;z^mpiIX<SXp4&81D9^ezR+F=_jVzP@C~dMLIg!!K`p7vED6PGnt(@kb!dxLv
z{mq)lm8Qa#=Wxz6tj+}Gy4QwY$-=37SlqLZ`IQ|^5f*ptVqs;4axh>r8Z%y7gUL42
z@wN*2r4P!!;3U-ApAVQiP}xXbM`BD24FOjN@Tu3bu&VHDEp-fRtglh_dMxzk=#_mY
z^(YPKd3>?|w0)@xmV^GpxX5ELOuOqf3d=StMw_N8Xd23?GN)82mZnnE?w;xYQWnc%
zQV(=yETpkMX#zkqr#Mq0^F5b#y*0GP*lI~+mpeZTO{&9WYHo^l)7TW7=&F)wnmMpc
zlylbvgbmv@Ro+Z$|Bz<gnsiLAQB=20FzoYedXuZTG~<*UxuxjjbGMmZJHzU5Df)G3
zzIC2|txC1po?w!Ak~KatJsa*;63a>ZLn9=WDA~=u_OyME?V1sRdOFpFE)eoKs%VIH
zg^WgejlESGv$u$i49+|ayR-v(J!-UPG}c0|81S)Q_zgb)*}EB<ZH^pT=Sdfz$^ZGQ
zKh2IyPG>p}9J=9)eENgeaPuv9P;U+~eM@1jwk$P*m02AjnX)vJM{28MwQ?o3ESu{W
zMQ5lQqf;X!R2*MlV_|-tV1#>)uCdhXQy-qPv9ZS5{YTk4c!V>r_#%6+dKu5Z;^$bq
z{bqXOf@0S$WV+VgtD?Y!kd~QANl2}(T1#W#V)OK+JtwPpD7BBwlxplTGXZEWeB>kn
zF=<z1gO_{zV@k$)uqrZTZPe?MM3S;t*f$!!DFf7F53NJ5M{nL@ZK(w_1ZyTB6RMyN
zPi=(l?J+x-=2_}3aNy7e>!VtmxJ}h4=mHG|LNkrA$%<%CmwIt>lJvj6EhfPWL)I|R
z(Jet7Wq%jb@kWxU*J1G?1cFSkQYQx|4f~y$Q>jXunxzrzIQ@+C5exOG#$u_f3DfDA
zX&fR>tBz7@wbh!O8Op|9lIm@8?j&b<k|8DKx?`27T)AqKeCQa|=S?#jwU?tvS=z|B
zo?wkC>p7B;hqVkGT>ir6!eWs&O45ds92DntbGD7A-AK!mt0=VeM(x>}rDc<<89B8Z
zo7+f}wbg%1?oE}NEc0fZp}%{ceS3Cu=z)6?171`NDk4l{Xl7<bBGyHvO_QVJHh|Ee
zS=Wq3D_>RA5kiyg5u@Q2J5Slm!9x#|x&*01oe!2~)|oNQEU#%brF1=Ph=@zhOKmuK
z`-42=WtVaNHFq%zK$=S@sc~Ne8kGK*10f6|nU0u(l$a~2DcDJ?RFXrh6ULKU7V>pT
zLC-`TAuAf7i5QHJ_|mdtSC9T&33a9GFqTSpb%ODFxyl%b2y@GQPL+LZZPrxN2|;2K
zK~m~5sqz<MO&zp<DTa_{I%)zahMJwH?R?yAObBh&F8A1p&5wP`s_P9;HXu3qK`YyN
z46i2NZ8P&z>bjYcY#P3!)%`AjIGbiFx3H;4CC!?``jq6TW7DrKwTkJ~=^ANYmj>Cn
zcm_#ur>XEs+PqZ-F-oN9_4$!E{uJYS%5Y<y$)v^>J*+L*v-dQ1oV~!!fAk*qo_!WQ
z?~uuuqTi=CU*L>j`<~Uc<1C$i8h$XJY)w*FEu4y1WTiuw{-mJP6lsG_J-teUb7^zW
zRpK@aO|QoLU6_qkR*Ew?^!Zyk^W3ut&SA<v#`kbVNj$WQ2~lS+FVCs)Tj_PZjYkyA
zi>lUH3$BX9y2j&SG{vp1Ba6$t?AxBlO`rKJ!_hb~U9uyau4Fvfkwmgsl}SQPx$4^J
z?$S_$Xa1I%AaEwsY$?Mzs7w@4V{MKHn~J5$YcG9*NfNB$AQ*MjGfh23HAotJ)v#4^
ze<>;O%vnW4L(MhKDk4!HZF4<lMV^`fI+v<vXzTe@jL~Ps^$SdIfSH?PYPFrl^$Og=
zJd4XaaKjPP(FhYNRa!-qNnnz?Fsf=wowOU!<W~$43A#7uJz8f0M-BXAjfm4=C<^s-
z$i{Hg?DbMV{uNZc{4?`UB_)-eYm>E^R+_G>Mb+JNlEB&-CFLAYnzk~OdslNcGC%We
zWiIausV~vxByyW5Q32Ha9Vu~_@2P2J!pup5Tv`)NK$m)9bI~bjFSz9TWC5o<>L(M$
z_5Dd)$l0bcX&cko_UNXZcUvh%n%SY4)Ek<fS?vjJ4hCo1u~s&XgwfA+O=>TX3DbV-
zm}tg=^U_piwFv7m#PmFvhLND(-O`SYEi&O0(0=Dwrv$M~X~uJTk1q;r*~gYWe1AZ%
z-@}zXOi_S$SUmB>5rzkD$C(;eB(-D_YHKia^Tcv4EhG!mNLeg!{eSvB?z-<V2ktq>
z`A>ZcKky4b!YLPDL?s>>ALYJlKghfP;C(#s&?>{KrV4@Rj3$ip2*lV7ze`>ls8RA5
zufCTn3)Pcos*zbfM44P8jcTzbvc0j+Qoql(cYN{Kan4*>qMQc8I3jVKYWxMZj%;x1
z^R8moQ!YnF6Y9-ll*K&MBM^)A1%`36&S}QT*ycXu#HZ?_%-C(4_U0^1m02Ju$<P=s
z)UIdoNwq7YO3GQ7N(`VypnSf0oRaE)TJQ#2v_gyDNcg4<Q0uZs=?i*Gea!ZhP(^xk
z1DtV9!@anYYC5F~k<GPjmUk?3!Rbrf{qPzmhA~YinaxPkJZz1KjO1J;DxQ(4l#=_z
zP$yc;Ipn#8Ce=sxopvsF-*E#LAw)Rs%nMjqIfZ-gxQVqB$J9B@bkQ91CK{{Biha(o
z|I~8{(K4BgvBpwQYw9XA)wn!vAm-wu)vB-!S#3<3N@S*xxh$oRBpLmwZc5H2yWRFZ
zX&Y08iIKvW7$T)Bslu2L1HIIXYonx1K)C3f{p>&YRHUa5pl*`tu}hP=l*WMMYL6Q3
ztmtVQhe&Md3wVSd*bME?RxkDPG@ogusm_`x;0g#Du*VY4I(;WMK5(znXRKA~kkf$A
z=8A9X@o~AAfVO(u&_~kM%<}v(3L$8vdpeo2Fu#j5u@um;B6(y{>s**7D?A*Y!1qqk
ztt*;lti&jk9QxuxR#y7Np-DZv8bnJPf1xFR*HlCe4zVHiQKc+mlQAR(*EVUp>NXQ{
z6<Z88SLI^5p6L3B25dBV<FOWs9>#iRS^xlm07*naRLsvCiX!6rW$Gc#?vF!!L1keo
zVCo8Qgryxl==G?oKGCYivWZ43yD@47FVut(sp~*BoiZ6s7*D4}JXH+L@0fesZOlUl
zHzAEd$WEYU#<jmBQ|4UHH9c0|XBBF}c$b(0MjaTh_aa2C<`@jVoXMJ|YJ(MZinNb9
zRaRyybHRXwq%ml9?p>5fG?98*YjP|`LP)gUhW?zYda2@KZBQ)<V;b=j34$$z<0lTW
z_q4M({k(HIcKjG3!p@yL82CQ#{jJ~Q;ytI(16&m7^_ZLMt8%3;*_=#Ri-G;8>;vPp
zB>{t!1yXqMo=gc8lllUD>eX?!30%b|TB%nr)+Fk`RFwv@#t_9pub^CB#x3mT(ESfE
zJ#m8Kw9|<$*^iYu%w!5D)-hE;tj8`ENUy*<!}z}YaMn|-EE1*@im4~o`V56CV7Eq?
z;U@d{?Bc2yJe~Lb&yQ*w1TOWVr{2}<2W8=^T)CE<07|wtvqu^Ss7)KqqG5+fPJT8~
ziaJ@6=CI~~FbB%1g4B%qAXhxKPgO<!0-3to*w9q@X+)o9jw<_2O<^D%MG6OJDrzaD
z&bB=D+M29DY=qeCb<(h1ViI=~0}h9AJ_YrLn5djMhcEk>QH3$CnaL?BDPxR|1(BfR
zbjS%iqvM5Cq$hxA1-X`h3x-(htYurZj$=%LVvOk64KZJnF*7aYuhj~e;pM15k$!1%
z^EGLWrNGNXNxkpsGi|$)a<w!E|4p?tQGJ=w$IC<C4T&agBF&7`+yk4B&+47dwy8Y_
zeT_@3l1A{Sq1BounV&_qoH41VI#(-oUU;^*F6}LiI8o|dRjM+NIEKVxGg>XyDSK(}
zw{2x~_WiSgdQKLb=6TX2cw4<rd%P8#^_Zk!tQ9erjxUcD)8wJ*XJ{Z()irepglS+h
zsR(r-Rsr8H*|~o|^E>z826Ghsd2Cr?N*$xrw30YS_TTcTIAvg@ky;Bqig|~vbMTn_
zS8Fl8Nb{1X5K4Tpz~|rb4nF$P>lq!N^27iBja>4g7f{;;CUwPR;~qZtJAcey|HWt6
z9!=O7*VHM|AA{B%WAagBN{q$ythK&{2CKcYuf>_9PVdq+j@dW05Nw{rDo6pxt0C5T
z4vvQO%04TFSLOS#rIQ(^fyr>f=^MvbdFt~h_Mgl2@Pia$F;X?k(T(D`wZu?20#~g@
z35{LVgo|(drrd)n)+Qk{m+6v;fs~&z9pBSDe@)ApQc<jdV4eECE@Hh0A5*oq&|unm
z6%zag!#8Dsy0mjI&h}W?vp{r_!w($9^@r43LxK(9r!W$X6{?X?S2b%#*IC)K%#+So
z;lM-d9NrocB+VyGl{WH5DKn~wsgb^|<mJ-7X=G&crk!IX?E^6M<_pd^_YzJVeSnLf
z{0xpfe2|Ck{1kC}Oi2p-#9$>fN%Yt}Eb7Db2P+i)KHe78)2TlAF;Y(gp|%u-U{k=Q
zN_<JN7fFKFN}sTzbWRls$YLJpOC%8DRNqUQTaq~PF$K|52b_&0eP5lPHLjb%;ia(@
zDG>Iy;%U!&27YM)3ZLi`DS>XYAwaL1O9DALvuMqX<0DNrS6`n8N#+Zb`HA)<r>}7W
zX<wS_;W;a9tR^7NI`2%%Yi=MI&D@5n!ctI~S~pcmmGKZ8waMoAAPHM2#ir>SvnEk)
z0-+vLPiqGAB^a9mQVqPKM66g%FeJ4tF@kfR@o30$Iv)|2M9;Otea~GF^30b%pRLax
zU}Q5TB~|dW+)b8Ez@+iqnRZALcZ#CSL>m+BPwDO|ydtW69V0QArqXY4$sbQtL=0Gm
z1dsT@V5y`(S5g)ZTNb(r<q9ESrv@K^XsJ*|=E4i}bHaFRm`3mz&DQ%g+t&ypMI-D{
zSADD<F`aHARYeSC>fd;LantQ<xJ2buA4)LZs6a@{{M*7|9ayhpgBqpCG=d>ro@V5N
zv4*-1)IsTC{Xs#mUufkZ>gMhXt26anqP)elE*fW2KO<D3b?gzQlbY$IrViT68zTCe
zB!&`$A;v7I6;<*dp^iyZD`@t2yt>7&z42f3y+8g#T=I-(aON3jLp4R5<CA~-F6JJ5
zi2d_@%AS@q`$d5(9Io$i<$!x`x{arO=gW{@Pb)#&LHkTg?M<chZLnr^uB8;}gb6mO
zOr~JfdtD2KitiW^f<<Sh8c{oCgu%)VCL0?JPCbJo*Wbihr=Ouc*rkUmBFhUfnNUA`
z6u+{BTb@Ja9ML%3;u_P{Rr<?2a0?^MXi61CgWmvgYeF#|5z8%J`jY4K@oR5lxV1_i
z;nU11OXjQ%-}3&D9p{;U&jukaDQ@QP*3=0pc`$3zQs8TH|9$?x9FQbhVhp*REz!`W
zrNb%@Fuys8$2C>CT)%4Rpe8$yX$>~>fm5TaNfk`}`1C<0)0|u8LDPIYa|P5kYtU_?
zV2x|4yScLFoo7%KRL<i>lPY=aatund4pku5A=!mUZkvrb;#3952x|LHh-w3?noPnW
zwdUlCWQbZ#Ck8@QJ?=IpqYX;i>(TswL;076$<YRf$ssmP9lK?Gq#nQKeq);JX-;y;
zrAb!g-cvzvIlwdeOy>S#Ay>~+#nWa|l9?&Kr(?A3D6_0^Yg`shN`Ygh`zPgQ#O5)Q
z8f504)HFA>>E}$Uw7E#oQgPcvjw<Qu0oW`OW=tk+G>OG_H=AWN<OEB*hC^a4NZKn!
zs`Pw<#L(yh#41pSnotMoNyWIH5+*f+xkXJJjzUr+HuMGsd(S+Ll{3zvw|5`n+ZGmO
z>@=BoO&VmDfCR{zk&**FtBOrFGzE`5%E#EACaNj1cAi5YzK-|)@h2$fPvsxI?Wb5i
z^-QLdh&2^k58T9ie)CVb;hNi-R*ubyjw%#0rIuL<tJFFGsrNLs-55)ZHP%7tdQ^4Q
z+!LMcu8F05{Rz4bY)a4un`}f~Q_jtCe>G(1Y3H*&++=e!W^`<uih`<w$!MFs#}Bdp
z@~bFLy$~6Xs88IhiF=o{2U1VAG5N-);q$b8#?tsHGmOfd6wJjUNl8_rGJ^8G(enDt
zvt=6pR8V$`R>L*e72~l^{n0>;bp^I4aYgTIW@Ely_@)d{pZfgEsHYV{gssVldiR*A
zGYmZhXXv>R>k^>`@ikSbsp^{JhmW&s_f9T2V<+;!5sqvHLevLeXDDSQu;!@;QZ_}h
zG0(J8X)J)GhgK!FN!1VyES46RnV(<g*!nv6-Fq`VD(X6ojcBMbpj(V>XvHR~(cORA
zc~tcjOF>=fEI1KE2onrb%~pwu4HmJ|OqJ7_gUzub4Lw;4e96)9CN<O>VwmdYWE@T3
zN($O-2uY^A01UmNPpB&@rg$P{6bf$}6}^4Cck-l5p9+N|?~*1{DVpm?dVM;?B;;gr
z50{yxNTgoa+2*IlTgze{iH6oRPo<O1i3Vd+1tGo17$ny0KI1eB9Fw}nSwn5|Av;Xa
zN{?2pltSHZ@|k`|JIgXv0aTC55rR<1imIOKJ@p>yvCb<ET48n0>pe(SrBs`ws-`%x
zL=$m&G*H^Ps=CMY_>g+ML9o5n!D=sw%Esr73)9qnn=UcYYt{6Ukw=q|L|xiSDXE7|
zk|Q9gnCh`q25Ps>uHEx2F7)V?7FQTtS>uY5qU_<zJ`yA2;h1d~Q)NJ?s)(tfXAQO&
zu~Wmew%Q+S0)oREL$pr!-Wb!hgRQj@rrO5VHD3dbBo3`->Yd(em7&+<hs~Q#q9hrw
zvqP;!rRt`>Dx2A?sydd|iP{He4c6-h>AX!D|43PSeLX>o-J%(1U-X)_EXL^E;~<SF
zSfkfqz!=ALQc=f{dRtSF+9cp;Yz@uKI^BeQS+W=$|9|%WJlL}AO7FvdYwvT;9o~@n
za?Z+{t5Ai)(3pEfH<}>TY%~=Li<C&)qNGrSXolrKq>vn@BW#6b+2N2vvMrmILkdN3
zkVs0DI@m3mlqix7_C$0yW}pBxP*o^YWmV-ozTw_`&e?<iSZnWd6YW3faKVfKcq<CX
zdimac=bp3o+H0-vTi^Hj&whoWYw*V#NRd0Qev2o3!;{ZmW_|6DX5P{?4c%hNY~hhv
z!~Ofm>_7V?i*pyq(!jdc-QKopCDS#~_<9er**-|)R2<D4WgKl#<W9eqy%wly|52J}
z+E~jZ+U1O+yBl`SpX0&HZ*lzEciDaKi(1%8BJa-d=?vlaF`L)lWU>1qvcE)pOLO58
zt2f?ZwzE%r`V7NrK!#C&m+#19CO$aEFBY6Vy~7{<!#~R}|H^-BmP}godzsia8mo#T
zt(#(PtwV(}w@2BNH7SPe^sk1zathg;UlIQy3og5=FI&Z!N;NB!QgeOhdM&!IrJz1i
z&@HJ&XE~g*poK)h#zeM?Doe-&Jxgk6uIvp<{<lS>nBu+&MsiJLypF1j!B`%p*wHye
zFV4^xJv@@3IiU}@PROJFYz~T~Dl!8|Faa_fs~7_=Cp@v*j))f(u1keIWyNh&e@!<G
z?|O`pt$#``T+p3~08K!$zg}C$37F`i<qWcaoIS_#JY?%^bk-gCesr@W=4A#FD=?hS
za@g#qF1|`5mNH8z6$Q%$P%TAgrEt5ckTnlzhpVDMuG*l8(QBn5#dR$Ti1&E%^d2%%
z>-Cb#by-ZYb(6Cecqy4SN78jBNfq=#R9sIW#H@|Oq%)l<sH%F5kr*>^OtrB%jDcXN
zeGb+<G9*7@<0*#Vm%~*g)3yyyKD~#-YtzGu91)?JwHg-OsLedAF<@u46y{boZ_mn1
zm>NA|im_!k>e*xs9}`_l5+Jp#Zr<X*{J;Gbp8fa-_~-t&{|2sEF!UR`CUX7jzsukK
ztACR(e(qJ)W5aP6$!Sze$COA#WYn5Z>T;#J6gi_V^LeD{T5z7Rh{%=ZY8E>~i{^`h
z9WJPdU@S;D;2X0BOqpx9-{#{V`!LsEd6PT$AL8;6!)S`e>2=a3bK(8>Sv>WA<ic~>
zdta?;K8g9uW=lH7Nl~&Y^a37<zJ*;<^5tsHCy(BDneKR|DHKL6SgVh!Gffq>q`|d{
zZf8S4nilab&j0Y0b3c5a=pW(r@^AbuiIF&tgmEN=KpX}YLfDK-oA9uhc?c0wL&}*k
z4&1tZiz|<x<9&~xXME!h_clV5Jb7nsoy}8w`cV-hrS&GW3b9~s$)A+jL;>Bd8cACk
zO}0m;Uvt3%rG8IxU{SPEak6hYf8lZZJ|J<!7)P2;JL84!lf~)4bqehGLw@!n2YmMB
z4KXf2a+&0;>S9?+qH1|c3P=)S7->;IsxqrW&UE4!EC{)*v_;<t!*Nf8s<^cxG|myl
zA$j2a?|+`X3#VY#S~JpV*UC_n>NOA*Vv=I%v4s*%bT<8OBL?aBmA@0T>%Q&zw~Y+f
zcgQs;shXUrfH>GYyT@|Y@o+O0z_p~yhC&;)B_^Gdb>)C7k2%$J&E}po-i;<A5XM10
zJKGs49H9wNB<4U!BaL{QMV{txQihY&5zEDn;gCg0L?Cy(^2OKSlv0nY0&?D7wcC?!
zCHU%B3equ6!%P&r(zVyQAZIipQ(8u;qmo>um*Nwr&n-E9y4Cp`AMuT2v1nMFKg-^w
zD|DyM0*T@NT^`=J!GpUG;nqV|$0H$w8zYUtd}ab+0itsZ*qD%g5LHYR=%8D)Jbb7P
zm45R`dH$}v4v$W3PEQNwI=6xj(u&jB6KBekBB~x<=#5JH$*K(LqVgG>9>~SdPb8}s
znq)jWU*(%3Qj<wOBKG}lGpeVbK*~X%Xv(!HO(9ZDXv8^kP<x5&>`J0``7<g~70qrU
zCG;6vJ3gTCW%cqay!y3ou)nv&_Ye7({=^^V?7U<1<{k3>g05+4=N-*#iR&`$sa<aV
z{;Rz7C;l1IOb=YvgRZg0BaM)|qHL(K5$}<<)fsDnWHVQVeN`HFq&&?Cr9_UpE+s|?
zk*?{WA840*?4P^H)qn6?eBjxq$cv>Gj!ww)mhR#?HsAd&!|7|Z?|BNkhS`}@G|N-0
z-+GgsXP+nS?GslU;`#*dGD${Q_xOG!A06?tKl2ej`&<8z`?qhaM>P>wCa-eEVW^ei
zRztr2_XIFntjjeK0;)Db6<R9&Hly4pYuKfnDur9)q{i1=ewJ*^eEq&O2Be(|-hzzQ
z$~8~-z9Mfmf!jiywrtxdXq`X^ak9;+g|gI0S%tZ#XiXJOGApE*k_pHa|7PmhsxjP|
zH@6n2DI~^01>54FXMYN#&ZTalw<$$(2;>-%IIx3oan`cuv_MKR6M_{YDHAPzk0~*R
zL_cia^%w)S)~Et;Qo9J2_-@U&VP{dHEs7P4Q10`Dp2GK`rRDqlG#jk!q(Y%C)JKXP
zBvt6gO~<mc=P#nlqhhvk^)!(((Ua@=fJ@QlF-`R~D}s#`tz|yDfcnm<jhjfuSzOb&
znn;B%Tt!dyq9drZud!~N2;E$LOmm9RM`Ni3nno$Pk_>_z5)U7oaI_lP3_**~sB19>
z7-OOz6aA=vm)taW?Xl)!BQ|$flzIz#X~lF@?SZywDzTz8fs0=zCPXOG=!YwUhsKC2
za$0OXKTJ<Vdrpr69yLcP9^EN}?C|&h@Bb+mo_mUa{(tvxLbGJ^;4ZV}oOeF|8+`Ha
z{SL2w>mB+5PGY3bW7T*mR%E&6le)Jf>Pc<`1hj2O7RQ)!Z75(PO88{Dd|B^YirPpp
zmKMd9$XP?dK}eq4$0J|-+ROaZkH4R5UyIy2ddNPGkUed?M}FltoBnG&w&~e>?t?TJ
zUmy$thLtthre=)uju@gXA1nHCQ)sH6QEz5ekq-)+ub}Rt9b~=#&Og$1(`RC_lY*R@
z?u&Cqx7N>`Ev`xPPehV_C|>=;hnldvgwKLc4lf>$(C0)9f!2nCd;>0bh)cLW6UM|l
zS8wv<Q;+lh$1m{ZH*atpRVuaG$A~i=Q;V5w)Jkn+WjnAfXjsRw81yAneN^(eg;IcP
zJdZ#53EuhMS8#EosGQB4rZSt8G^y2wlfyT2r1AJDY=#Xn4CLI9l9<zhCdv{4GChf=
z8R*9Wky(|Puj-3f;~ez{6&%>iPD()!1lyQRP9{DYkreSwO9QlBr<Njd1mECYrb#*v
zOHAy~93T4R$8lXlb`6XJVI0`3*9_~PVMxS&)PU(bx<$**nbY{4C5Wn^HM51zA^GAF
z%5?v0hpnKz+If>~#Z>v8P!c7v&Xpn%Q>)s6i}?LHyZgJ)uSht2Rs_=<qAvI;AW2c{
zbR~k??NxyT{1oI01uf^O4U{<2baVQZNy|I!;OdaQQR;H4Ce|X3qodpGEq2K%SfDI3
zS&pMk&$*`$IJvzx-&_5?n%@-Kl0NlJu~5nuG^HlKk9cF+&nh?04AqN?xuJkAYhsde
zLfXW+i#zO}>1dj$v{&DPWJ1jJCmZt3Tgd%ejKjeCq^Ey)$hZlJBwDZ8Oo$N|GENe{
zv*au#I?I-$Jx38Cnz8kKcgg<cj-&hckaY5{$C$&Dhm;xUZNU;LS$Ulylt&ekrCe)G
zF(-dsVM-VaRcZ@as>++S0h1<`Kj&$DYi}A<n9Q}bol5)17zklN@aBK0=Ii<h)p|k&
zbG<SlG8IW6X`9T6w}d(2#tkVa?yT<fk!O}%xqP0Z@4N;fFgvruV%8yE6?)x%#}E_o
zV9D&_B|_Z57#R*9>WpsNlDkGxDpNWv3f-zgT?)`NA$}q-2J@P2G%m*oeSk(`3K>R-
ziMSbPvd-SmmM!6G&%Kww!NJErPI3+6geW8VV8Qa@d5*sM3cF|aaOd_3V`TaG1rC4r
z4Z{5g_{EH7XHFaka!9y&gY+Z$WR088=;8^#@QZ(hzxJR1SC)W!a?(aZv2WTgHp{D|
zmD=o>Ak{iDC|js%VY?UtT3U6vyl-85CIl3V_-UTrN6NM?IGZN~C2#84RhoGvo@rw5
znb5d0Ws2zhzm)-7Z!Jcd)VQZJo;8q6P%3OuRb{K8NZmuVftoXM>>0y=&vV2Jfk<|l
z;4^XNNJYp;9E3m|1Cmf0Zph>il?~-hKOhTn9LXUfF)+^~&o&J&EL6iUi_V4j66oW=
zCPw-Y88U2!M4yE|z55j3(quw1$?l8xO{JoqQf78*H<Zdi#Tpp3Sb1c<NvZF*1h1yT
zwb-51;>zBBqUnl?<2m*DE{z8-Tko_@)65BP6iuJB(Vc*J`jjeYi>GTE(&m`3+33iT
zHN2fQR-7cd#+f>9!n;K4plKXk>u5Xm(rg>_d(2KG#Yl{*3!;j+Iu|-$Ea(<X#*-6<
z)d?YKBe6tVwK+?~0Yp5J$e0`<t3rNAT9}Oq`UFFQ5k>Ss8%BwwjniCE<!OTyEzN@X
zgZj;mHd9~J%C$(cIedyhr#0`5DKW_pl>(_KgBxw%*b6oSY3IG2{cWob2JRGZeC|uU
z{mL8stAF9&CpSyR^?e#2xb}O$!Qc4{|1Gm~mssBq^us`qs0~4jIc2)OsP$q<Z3IG!
zWG~ES3*EzM^6@Ur3iHb|O%OeQD%lNDTNv}Ml}J@|4>>c85r4SmD_{B+Kl78H;H$s;
zWv(5b;IrfI{2p$n#ofBiO`jNZ&za{wMDxT83|GHxc?5Q^8$&j8wI?dSr})d(*e$u{
zi`f)?D|%fwqN1;|C3`LymA&YIctyCC-mZ;m6$CBprHMW8!}j`-EKs{ytI2lZa-a&Q
z$*T{jHwsCXftHczB5ek|Cyg+Sf$v_u%8SoF!TZlF`Rbh&eNh;+#7zsDY{-!q|J*u7
zl2llxU{XaLmr2Q2XKDcJD&+{>ZpY<kKFC|Izf2l3j!=_?oRz9m|JytE56-D~q-z;M
zsD*MF(ynuc=;uC;bN<d(PB_VPW;xMfbig`40y$cD-Zq(JA%#Jc(jr_yrgK6oo+0Us
ztd|BC6SHoIkb)K?Koc_awzIBo=CQ}m@Ys8vB(;I@{xRcvP0S{}<6Cz2I^034Xn=TP
z%Jls}S`Ea_NXVm7Am?+=K6M!}Sgi=oC|!9=N+L~^5G@)Dt(h&#TbcqvIqhu@OXkAf
zsU^*wG7wLd6OUPuEv-2W!Az;D<JD`jxa9zeket!PQRUejBO#2m^Bvasijb%B9+q#4
z*Z)qYD!Ey&be5xZ*pnuYjR=mGGnby==ABo-3DM9_Ndh!vTU+(kfCbr!<yRcRl9ZW1
zsc2!QvBN`)XBF_7Auf>^nJ*d+E}mj%p%UpDd>e?Kn00n*b9|eaZjw?WMC~}HV4lS!
z<qo)nGGkKpu5}Jt*N}qJnzS!e=rs^SVmvwIblY+Esq<REzuPZu{ZHspS$j28EQi&E
zP}hWfVt0KX_nP|s)4bV)bd|MS=i+lQ_H%|T=(8vZgE+6%Nrl)@6fEl#Olix#t=7m!
zyF;q4o><|e=!13svo;x8S@V<s;-4a{j__H?^BK*oq1j)OV#4q3;AV5Ke&y?2c<KEF
zXIhR&1M7!}oVs!e@t*7(rPY_c7n05yqqID!JXNEKMjg9IDI>1Y_z@y`wSh&aXh{g<
zVSv>@8-=)DL5`ez>}kIDTc795^DiK04oETJyM{bQHa^qLmfZinuX6e)K0t0-V#u_M
zJyvhL!QKbnk1Q56hdpsv<85iBlM#11(uBZE?|G67k6q^O_3Pv~ZWRuBivd##FSeQx
z`H@wyJ&IYI8F?x&x2dq2q{_6+#M!)gQP{Ov!n#~Fd8#RsO#dp%UVrJde1xios)B1+
zb5E+Ej4NnIDO?Mx3f2F+7BG4G9LQ84l_GvBpz~zg6eFo02))jXI`1ci@`A3|eagw`
zm_sD{3_-0kk`*h-?!6?)Lm=Bc>8=;fH^Ld8+3g@8jFR!Hp`>%%A(3LHODZ_%)BlH!
z{Qv8#D1dIE@0EHx-AbL;^QB2zQ{zYeTwa>O*o<&JCCh!+_i`eHlyHqQpS<trW-YVM
zbN0eMXD*!K-1&?6EWG)xukp^eU!@bJEPCZ#t75lT%3@PS`>ctwIYQ?H=QEt|$nyo#
zcI>nrZ4M9y8kG9!J+!T(X<FJAywCX7(Rha=STES&T2Irqde+!EAdL#=1kst!oOFIQ
zJBN3UrgL}?(Pd-zcrCcB!4qR5#LQ+$45JAa+Ve;Sfy~<A$XR_q^CLH0GzJ&@np99x
z-S@624qE|&2iLgC`?l=C(sW|$eJSEfTWx0h-l<t(T!`|8!ZdRQS+yo->p_;}A@O(r
z{D00r{lEUR%nmLQ`W4cjaCrMFfA#<H7r6M`v%K-`>)d*9%;0V2)?#)>)WX}Y)fvDN
za%M>CM?7zrM55Z?*$n4Y%4IvwvNk8mbzA(9RMc~|*-@;G1js(K=?6|^&O7(leC|tM
z;irG*V|@K{U+3!md*t~HIpt{&F7WW?4ZI7?oM-vWOLR}ZM7Z`Ujd*STczGn=>a#OP
zi82yVJ3{5pB}BS9hh5HQXS1lec6V&>y~McknJT(!6#EjJr4;FTViNuky?!JM)cNk7
z%@1UwAgTpQYLa@rc_Fu=LP^pgd5y%zkcB`_nRSS~^7>Ui^ZxhJudnj*L#6dhok!!F
z=A<Ns0?C^?T{h1!XR|z2XgiyPQ1-Qu6v^PVTe{er^VAC;;riR(<K*E@O)zsbp*Br=
zQ&z+73m2cF-wZ?#IXXq;666@F>Z=$E3qbC8u=dssN`$P}*_aAnD#=OG%_T|iTMVOh
z<g{}&lf(y~d!AeG+~fN3p^CoP44!Ko@pwaHh;aujyCwY?$a&2NKJ*gpZbOoh?$j>x
zv#0cVly0Fbik!8*tA}kCTtYowR|6;O6`R#DyK`^#q!pQVo$6U4bq#11Vl!Xma4Cp4
zJW^Op!dRMkojH4o^jhg)ssfw~t|s^efv7b@Ell)!vL%ojkn}!hrzlU3O509xq@B%)
zYzdE)p?&);*;oNFB|=;w5;=T$gEM=NAt~a-5nU#_2AO-hoijEoRSI&OfkkJn9xbLc
zXePFTZ~2~b4OQt`EU1>M+Y0rppKJg0f-~plw5`y%tQ~moOh75s`zqek_?ad(2q_9-
z92mwC37Ir1B|4G_K<*)r#zM$ZRk@3RN5&9lz5bnUju%gpGcmmT^I=<ZtNg(PVjFYb
zUSC-$+BLhRQlYj%>N0a?FiCAvAk%^FjP@l)Q7y==tBI%0mWo|#DhIBKW>uRAL*Z;p
zAy*MB+Y6Qq<gK-?OF^IOPUv~&nI{;ozDc`#inLlWJH3x{LQW&i`Lm>lJ@;?j<wKwT
zv?|2r#OBUzVvM+^Q=y|m%>&X9Y7(OM9f8V7C`CEgTxTX3l`0N_)Q`9ni!upp*09;&
zQY0J=NRIfO1snzLed$Xa{Gm^2avu`8RU4DEyM!Bu9Dns&ES`LVc<+d|@$`qsjCb$h
zyAI|vhGA{CLBKG;W}s_3+M@^j;a~VU{;U7uzp-vq;XSy?wuI6kv&#FCM2F&FP-i+z
z1LXUb%Jd&KVJrKS`h3f`Bq=5e`sSutr{@YmML<brqhaw+EVgcS@5{X4loJ;?XhL>%
z>AFe-)i7Kv%nHLL8}p?!a!VmrKG#%06<H=yW?c2O{XiHaofl&R6i--ZHcqy|lCF#F
zAlCU)J>!dbj3lM~k`i9@%yKCcV<bqc4YTM-ArgWiXGtEJCu|>mD(}i`2%(7oKk}$+
zqhacE*zGS$-)6&7y^c$a5o=^MRuq+aPN%b+#@%fiN7J>;+>CB_m(yqWIdkqbPdt5|
z_kQqs_Afn6W{yi6&hNqA<{?cH>?^--D;`RTL1@$9juUR5Gz{|w*Lqs-Xvnl-!-Zwf
zY+2?Q4Y_g=+-UJZ8BB<nx^GbgR#a4+w?=-p8O}uGJgF1`qGyrT(cbQ+&BG=!(K|8L
zn6MrbCqrP2Ng2Z1jR7fV@<eH$bVKx-q%=Xy-^Q`6R4lcDQ}&GazRrCsV&-dKr;OPc
zmR;Hv7T3S85nsU(>enR|jI-DjWwH$TZWo$ezVH|R5*MF+hG#zh6ND6rNAKVy@n8PC
z|4;6oWabY7Z(YAlPhw0-C$LI!UKoY8e`D5v`#6G!<$R|$pKCNU@xP2C<xI+I4OMcm
z@~I_mUB8KTk&I~)r}J-2iS_7s>&_8h{^~dQ*iU_&&;5hn;O$$taW1lda0<7(<l);V
zbc-8gMs}b72>#MD#5>nidAaN`(>Te+s!QyiOBvI!CtV8ta?WoBW|7hiD$z~_RgHU(
zh*zDDsPn*4d<9FgX~bx(OqM(SlLGaJ;<Y@zuk;hL+AO3|QK})BErA6E8Wj)-iOi^!
zx9G`@XWk2&KJw1>clgW;=ehCa>)gtXcENJ8SwfvN%~EI$c{;R<psWe+RPzxr>YOA~
zDk}@y{5Vshn>n6-_C;2Qk8}UtEshUw5ylPTbWTE?qn+)t+}R~WPajt_%r!AnwG%_S
zf_kO6OpYL4Xz<f@BS<dt`o^^v!DGl!HgDF{8%pJvAv|_!&M*F<Pw`*>^<P1Nxdq>@
zn=x-3>ljF`)44%6^4Q~#@y_el*jdbZ|4)2O1(#Y6jgVZ@gT@sICr$}nE*+$-YOvN}
zoG+Km(&7=e7|xqYt|=>ti3Zdf!sNdT<$C)7r7$ycRSPc2Ms|)fr{`psNukcSL#rZ3
z`UZMPB~x@xW_n);A;k`${BIPUr*dL*Vkqj@bZrgZS+XgX`udqtj)b9S2x}xp=8fm%
z_%>%xU4oEQSzaP1<6XjP<nW!lCMEB2x!4}TWCFfL4a-x<z050>yHiCB>dnf9%cTjC
zG>FSIZD#MxKD)aOtqVA(oo??Oxe*lwa)N6ceB0m}^>9;arz&jvvBCAiFb49F34Oxb
zT8+t?6lRa2b7&Ev#YscTNsnGHv|XdcfSShsfFsUUr+e#Yos@|tL@95ryKV2t2{9@O
zP_4phe5hrpl#Y4no@;^UtO-)jF)v5~d27#STk*C0IZdnqi4*~!EqR?<>^R#i&aUCT
z&pp9xF(<6oY!JHT0zYp^A&}=D7M{)94{6$l?$l}e)FZ2rlWRKHBr{h9gcK>w#FJy1
zuG4Fa32{*maxAdsB3NgQ0TZl?CnpHrnrjhKMEW(k-zdvt7_>O<H=KX!dA|2sU*O;a
z?}tklY|bs?cRNnrzQz2pGsIhW8Q*w|<Qq~LX<W<Z+6{Ide~jprMkweET?##U91!o=
zX@#e*JjUrW`#iYwzzVanzozG@yjKi`D2k6;iDo%6>Jr%&xS8;;slQ}}Vf}7ZK2>&t
zJ-DsHkMcG9S>H6ZDOON&QMuH~QmY`DMFKWYbaf|YKUIIlcCnewexj~eL+-bPlgcHP
z&r&P0N7o<yK-%=AVIa@Fo*|%PJmPf@TS^+9A!%ZaQQIYEYZT~uPcf0k5eVv0NM;iw
zC%x{Q2!tWB-b98WGe*;Cu)e1^)q!{Wb@SH!DkQWeNjLp(c@|*y71|Vb#R|@zUHKm0
zv??a$9gQ~;vqtSu#5Ed=o#5J*rfcYy3ue3fv^%@B^Ci-CRxpobnL#{cb@(=|mnwuK
zrIA$x3Co{B&tMVuc7(MP8t?I~qji}bMh;lxWgyjxyzB*~xjGZ!6FXC@K$cmzcxz77
z7L2ob?BrFbY~qVndoZi@>>SIr7|a>Q=veh5>oF6OGES=WVJ<n_^qxF&PD=fICtE1D
zouA^SaUT_mvIc^mnnTluNRKp)i+8)!v!ndowAZH2MSgU<D-u$Rr4)La{l|Im>Ua6Z
z-}-g__<#6s>uha0rtyj2_%HrneCaD!IdlFJZ{L5wgOd}YjS`88zeUkv6egLFkP>|y
z8FS!ZaaJkOHfLI9l*@h2DNs0n)U0Xxyh54T%*8UHBDESr)lzFr5#M;m;CSuYU1I(^
zAN|=+^7&u;hupn&!tU>1XXnSBXA={5-?+>C)Pm-XZ?bs)<M=a=!NWUxX5j1Sh-+HP
z7*j6IVzkkdixxo2RpGnDyh#aLS#QBaKufbxKaa2LujB%`fCMr<H+4x+`u7jp>xb_X
z{Uf|ioj<3Fe0Ctk97r*+S#OX{i$CrWw*fzpvUW_z&4z@E0mY~tc;7p|@%BAF^4vZ@
zePxIL;mtLxny?r8Y>5w2IrxPnmt|6KtuIsO(&Dsu-?{qJT1RD48kz6Scx?X(Vz|tD
zeavQc%+RliF|t_hQ4kh4I&vPBTAWqfgq%s)5mHk6jVX0j>Q6301Eu?>-F?Z)=1jy6
z_7U=E0oO5;#3w)gelA@)!)Cq0i_kVLW6GR5IG`VfigvVd<k>41dGf+}Zoc_8AN}OV
zICtd$@gqtzHHugUVx$-)S1}K!4IL$$&!`15r}I*(^p`OeDT=*BF5G`hqV%PStWaoV
z+Wksf=OB@(E0#52oPX>**33jgV*k`0$A>4ZHyM}I!x90;O!A2+na1FWY;<4|h>{H1
z)>$G+N>NNEP4B!XWu>>0Gl9f9td$Lrh60b{B;y)SCNhj09-iFd^xg%e^*rqFaB}ws
zU9->O$uX|;IGNWDHpxv*5tmg#&i*@93xyJB?U2nF@@B1i(e-Vl0i1KpcQgA3OXi)b
zA^S#cRq&9#(6mC+&GhSCgYR1Wti{b+XdF2O!Z;E)0a=f*b`=?D$dCfDF-qrXhl2^G
zBuSc#m#(KkY+KUMlTxg18Smz6IzVCj7fido957`KTZ>|g-NP1owCzD$ni}=}xBIdc
zO46?vtA;73$&dcZf~<)a134El4gEcmOYdf)h=N)cl@2lGS2|2BTffq|%+LLypCN6I
zSsa{UbrfiJcW_O|5I3}E_Hna0x8Hb+-Afn1C*tvd+}+UMdcdg{pQ<M2WVDl<h5r71
z{J{Y+1;n+wDUEulC`=@$sB}FOKFJy4g9kLrJ@s!*83_^k9@+Fbmucdtn>ei5y?mML
z-wE9Q{FgcV>7R$48H7>i$n%J}Rw?F3$E4O1`wh)(#_G;}mS@i+M5R<mQ&SEjX&7-$
z!?K%k^ww4W$S?j8{_DT`HxwxlrCS#pf-(nQ63jo))Yw`>Qkn~S8(E+(r0wPXNYZTU
z(bYfehAI{vwb^0wP2Q}KoHbNjiJ^AVP2Q=7A5`UfL$gaG#u_1o`dbm)G|i|^%X|vH
zmC8Hqg4#$^83!>7kpROQ2~lTabEXj$3!6DdXBXO0JbaC<k|QKlNG0i6mr)G>31db5
zLWYwNIXsEv)RLSw5QiZ!nv!o$V&{+vqx=9W)G_9IFYEoXSfPcOd6GKQQSjdER!o}5
zIi&?kv0o_#s;6x_nx>&?oQkWtMpaRb{_4G_Z9AH_rST1_o6&Oo@R*(DEt-9SxFLq5
zsGkYilxS7dP|E_HVw145<9TAo8S35R3A7SvT+l`|fOxf3!|6DcH9^Zvs}r-J5xsub
zNc9}8E6V<y{FL`hs=mG||CYTZMae%aTW+-(*bIp=WI~?SQc*ltYHMk<O!s-ams|g*
z_I|}IQ!LI}JfFm>EPpK2Zm7Sf+*^6%4>E}tmLDmMsrtvQ(Ft4gB7vAQ@!(1Bympme
z{d4~zAN<IR99+3X%021$4tKtHjbHwY{~f!FQ=F`O!fHcEeYLCu6|PG;Qf#fV<XW7r
z#-6hW=QWoxT740ID)Yd}8m&dMOmLHcTU|eu^Hze!6yM7EXuwa5BO{(PINrQ@o7v(u
zKK^q*&gcH#-(z`8ICtX?%gbkI!^q*S2gJ}L-M3gg_aV~hbEM4!74Is-r`9~kt}r7N
zT`%`KTTQL($g&pl)-$wc8n>ECu6%Lp8M|bjyc1hQvNqS072^L1g8D=An%lgZNqMJ1
z2ua9|$G0uaw|E*Rs)BPg&f(GuBv|1T+4M8M`0elVQ_nxmcWxc?opr}(uzQ-KMCqG6
zdL6cTN~dT-@=8**ML5mVu`D)InQfj`d}gt`WM_BDe7R)PpWtZ$N59_CG|moRF+?s|
zw`YXhkPGEU1aHV~wmE^)71xeb+8V^3zNXYMPwU{Ri>LX(M?Qeh1IK-jI8V$f)qZgL
zEbqK^RsG}0?6}N}?|p`Yv%CE4ANeRh^Ur)rY1XE!D<l(xnYzR-nTME3tg97+Qh;1h
z*!8NaoJ>7UiqECk{UsxDwEJ#1z7(mskd^h%8I(qpldA2;$okd;PVU~v&03}M(kc>_
zRm^I(=*T1<9``s@q^D4N03@lcNJ-oZl~@29fX0WcD#g)K&MY|B*mWo(d%QZL%^4K{
zEXiI;TJ(2i2M>;Japl=(*~DYkx9-u(8HkB~I3YJJNi+<k3a1ss%ISP(IS}j~=7K#K
z9lcP+iXyez3|QLgBg;L{sZ&dEBi<)VDjd=(D&rbQGjsTvr|CS+Y=)c9Xu1w*Tc!Wz
zNE-$QH(0_|s8bP1o}9Dl<C<Dt8dTPwoFMi@0!VA?S7(>9Qzg$_C0^gXSE2P(pTVtz
zve2Km+0lOhA!UjUkN#;oh^J>V{b1F<aaclS`}Yy5WWAJH1Xl#k$u>)Ny0EPR`jLXh
z3X+Q2)`SRh+VH+le2Dnqd(6*Yz{h(udrL?#ro{5}8T^pA_0~0>|Cvt^`VH~HG3l)b
zq*YJ1vm}orbPjUV19>wN4i8~xk0uKuk(@>folO_A78u!Oh*6t>F_Vsup_$i?JTZ|s
zE2JOvel_;l#|`a%!<7%c#Pu(~%;}e2#6NL`XlCn8BZQM<+;UDnI)*`KSyNQM=keqS
zna`~IZ=QuIk^7!J?`U0vjBDQi!H==n+o69j(ZSR{qRf(RiD&VB&!iS3<!{m>#a=-#
z^Y$}DfFf8^l)DQ=WV7Pd@KY4LX3Z7xDcue6#W&KKy@;Vx^=l;wc6Gf!(#+UO$cuKt
zWQDaAuFFcZLr}^X!PdqR56k6@`C>uSDJ^t%YR2APtNzimh64|$B(h;(<pLWQ)g#%%
zkdi9JN(s4ouBIG`QD+{Bv0nF(;UvL{^Z2AQu4z=tXRHEqTZnhvl<)4#x7}n9QxWW`
zdEmrrfod_pc|q)Jg<h@5>eh2}z9NqLZ)?rOMhiV>Oc(EIT90$t@+cUG$hc8e<dy&l
zyjxaVmn7JEohm9njT=l1kem~Zlo`EfKcCeOt2RFkxz9S!ic40QQe&PIwAeEb)Iy6b
zMZ7o@pDE(dg&E|`+QylJglG<s(o|H8F|isV{TK-;sl804g>OBiNBT*53a4d%nE1r@
zJ}t$Is=F7<oYFAzkNms}!xb*MFm)&=_RJ)oRvPeIg|ju6r{YGNG-KP5XIJ>zfAd*>
z?Z5u}T--aw3m<-w6f;A(&+7U+{Ea{Vm+1%3O0=0XY<k2wi<9ch9jwu=Z=V=brcZ&@
zxZz;`49+zwK$aA>oyJ>A@vVqqw%K3rY@DDrX3L$k5i<>;rtu=i7}5KXa@M)nsKCu?
z4Zr{0H)-cBAO7i2@Ri^C63e&ud2atS?M`MK0-KYO=H@Ntm+!!AkLK}bSbgs`yqIO3
zZ*?}g@l$iqv~7y`VReOYC2p5-KWby3I8}%_dnnt%X`Y_cg&`XF;=KB#Ywq9aeeE0C
z_MeC({ZPDi&z*rd65_xx_G&G&(nXNs2u%hrG)+UAbr$S(qp{RrBza_rBLv5{ZVtTn
zxsgvkf0}FGI^llV7$pD~Z#2vpJV$a0q~OTOgg$HrEEYOLg)XAaJfy9}!URdk5Hvu!
z)-#{YxOw9(E}pwcjLv*Vdjy+@EL|T_ae_=kz{HlEt7oVbdIEZ?Ode^j4zFuxIO0@5
z9EYQ!=jVRvBV4$AmerjTK$yFRJ|$Y`Sk8Co1Bh#BV&LhEXL<UmE9^bF&r6S8W`25(
zN=`K9`Izf{EUm|s%uBZTt!)`hCo%sh^Td)E4g4)dC=<uTieJMN?DJaKuCzdNU9I;b
zA!8&SuE~AGMeTeY-?@X&I=kb&=lH=x&YwBYwfI0g)L9Wz?;8lCCStz)3R8khlRUqU
z5}U{YHt!N6-gni;0r9j%rRJt&jRqk*w2_i+_&!1354`sJw`gXl7+4Af@9D>kn|o9R
z(7Ka=Cb8w1liK|Q4g8buotscN;{v;)ZU;C5yQf-q_8L4rjWgv$(OG2IcwE=gwGO`!
zeAnTZGn#IOv@`r{hVzXvu66dUnS0wus$5wb3~K!o?3^BmS$!`hng3-B#2l+4t#d--
z1)?+W!gv2w+bUa$$#xZ5!JyiK+Csl<WlBvVO0qGfGHku=BdV_j3D_!%4b0l2MwTDW
z_4`WFQ@Z@Q(7x@M)wiAmY_{2J+n-o$7Iq2jonErKm5}qNXu3n1-5vTncad3x+udRH
z`b{<`Yj#fW6CWOu?muL6<F2lw#*>EuCmlJCWHNEHCZ4SA>!!k)tOylR0gb2~`<y^x
zAV)1mWxYXmHcAK1ATg2qje1`$=jP#@X|s^RhSM)R$L()?mpfnm8s`>s{OMg~YAk1L
zZrosb_6)-Vwff2_5(c&M>F+&YasE7F{)kO7#zx<h;}S>U)b2j7e)H>m{9_;HbD#a3
z?)BP`G#e_(RPD>DgQ?Ha$@I*(ZL-P}ks8A4TB?5*HFwbCUl)fq9=6cxveK-0tk)r!
zfSXMJSWMbBny&(ITeFPXh})XWufS3%Ql=EWT>m3$tQ5L+xn-PpEca(zeEcHkFP!7t
z*#pj9ILny}=h;7Xx>8i#ykj<Bu%2h`j_0_ygljW6S<mRlKpKJ;7)kw3V@&ExiCPh*
zF%kMm95%RdV388O0l%2xfYsqKVZBCr^ZicA&Jo?4;Rj&0?1~4eZR713-P$m+xNP89
z3H92PvzQ`DnJlWxYs$Q4Z=&OMdagB1gLw6lZ5oHTrmlmwX_&RGqU_xq#38Ozo2;@7
zM2wA8aG(-bwh%;Xk`)$7^<@>EEfs03xb%3yyTbBPB-L#dmr_;C6+QNL{*?Hso{|n}
zQbpmNoo59#hJ<&XB(7oyDZ;uBY(i!<JDHS9(MK&rYjdgk(w2Oq6yi3vRpzSkTTPxx
zkZdYQwSiFMw4vn1PqeTM6ruBWDir8`IxMP?4{qzeD?4u-M%><Mjvfqr`9Jsz+<fyU
z$9M1YPyOPjaHEz;_uhPyYhQVluYdkEnC-C|M!Y13aZpVN12iHkl;pBDl4Dj;snxJy
zckdK!Gb2VFozpAOvsg@tIAyglO9t4KBW*%Ysj;fY3?2!38oBKLsb74?mmEAMQuciP
z<=1)PsY|@@{txio-~SGmE?=gZ2`BrW=rh@QP92{RZobX@%q7}OPm$kw&F-VEp)%4`
z3hQJ7f-X<%p&rpwP?HkZ?Rk`QOv?L4ZIY#=ao>vq9)8{;DHGQtF$~0ULx?{BEscN7
zuOG<*_58VKs^oqh+9#6oNKPw4|B&JMA^n3xR`(xr^57vS_acY)T8<tDT!@NrjfvcO
zMp^PZU%SR<e&%Ujyt?5_M;*aLCW5teiuar3q>5uPnZl%Ps-y);BB}+4%C6eto@{G$
zhA~ZnFe#mL*c@_nZ_ULEPqJJrS+4?P8r73tF#k@Of|)olE1Q(XW>N&QSHt6+@iyOC
z$IsG)15aX58XkO(JagfI7vJ{`iyhDEwnHL}lE}_8n|Z>}6MX|27J1;==bmD@)39^)
zfZ5I*7<JC6X(mrD`<yvXh@Dn<P}GD0qdGt)P>RV?%s(=FnhN?PwXS+p&}zHcZKLQp
zYay7`u3-PkbL_@5l-BFV_ICO2{@zzOT(9vWoY|jo>B26rzNQIg&d}IBNI4M3XsU0D
zWJfFPio$Oxn1gc!3^AT${m2DPLM5LX)TdXmWc7;70zuSc)=Oe_c$2PM;!^`@ghr+N
zV;;(>Y{iELZD&-b>0p>BDCMlpHYeh$ppM>R6?quh+4U@!9+5~#!qEZ=*ExLa@EtUB
zXxfHm?r}3uGxv%Nv@@FdQmv9&PuQ$<7B_)+fu>c_5@G{%<UA-c?-Fdh(%55Wj4I(S
z85q^BCyU3q2E^eTNBjYZ_||4fNyO`7o{}*wtV`o$N;GR-Sq%iIx80`MPBX!p1gXbi
zDMH0MxZ25f6@8#4Ot~D8t{kf}b^I&Zp{uOI0;SNc))N!LDw1`i8Et^Q@XXVse#P>_
zc`YVpy04EP9<ux7WpJJcSKnc$o8iRM-+4f~cgS#jNHcF$T{$Lb8*+?lFVnASLL?m?
z<Mw96rd7rSy0^vrTZ;!^PHI6k93H|jkT<>BjG(Al+-x96{A`!;xK}UPS%>otZa(MC
zOD}Tmt6$^vOHb1-J;P>08Uo|-5$<4@AR{s)h$Ar$IOpk)*US&JNQp5LgEdArgL-M^
z%)w&LZ+!jheCn6}X@2*&zo3Fp#s*Q!f1=ieTfK(w14F8fNL?RN9+#9A_Q-O~2r;W5
z5@P2}wU)^hMVg4=**X?c$2p3puOV1vtvI(`Oi;b~rq%|!KDKAEicLwGH``w0k9?r5
zeI#4{(I(c^v<*AUIlFuM@6KMwnS(jAd8TO+^VyO_W-*I&DU(jm$gzcHmzb5|lY**$
zrx2BilOYliud}M5Pn<kBVs&&xzgd&RNJAudDzK9~Pg=EhuG=8IaZO4&_JavB6BWDS
zZ<QKY)YWrE#HAuUiK-qe<|(<2))}&01d|Ho)fM3+r)O)^3iIWR#m<7+yu&wbT@cc?
zdQDbH>0D^uYCLSW)vO)5^}L;wpH**D#KfD#e4?GM0q+F7*YsXc1lWrrQbu?7u27nb
z^0=-~Cn}s*5lX}e1zOA&q*aRrbwL<IV%-P&kcma}Am@qJn=M-_nidYVN13t}bx;s3
zCKY#Zb$w5?`svmbVBryJFO~wvNrY(%4)(|DANOeUYO5VjHhY>BAfCF&>wou4y!j7)
zhr^o>dHe1I9;T5e-}fx5qeGe-ce(M^ukiA3e1lbjm{qWD97d{>w&i6ZCu_2+KX7!y
z@vx@r<}79lVo2n;b-&6bgHq&8gCpCldn%Oa+EAr7tDUVK5vg?G^6_g!HS4-h*_rl+
zBItd9@4SAE$M#OMbNUS5di8ZawEqI_L!FyWArRL+?|k7qq#M7^OaHU~IY&3I;+&xh
zQ%A^<Q@~k%|9v~(Rdx*1MIt70RHEfp6PgmsO}s59q{-wM$$i8p*jctL7Z+&c-EV^Z
z<9_`}7N{gKh1G%#LZ;EKlQcBTIn82+<@qIN#u<48w_1}9M^?9b4&NEL{bu0p8;P6u
z;K4E6-86jT)e}DY{CVE^+I?=N#vGnX61q*<vX@D&yDVg1P%2js8AXGVbsU?TNTedc
zof7TRbLZ|2_Lh4bT)u$hmSMADw%Ft7WCb{KOk2UoYPF)C7&5eGp6xRdfT9joeHy7Y
zFvS=+A-I%jGLRiJ7x>gqe~h!256BW2`;E;QwX~AhU+%KroY1))!nkI?>3I5uC)j)P
zEVKQkQ41X@dnAp>7!*MfZ$Y5+>q}?7*u|KccaHi!DS-Jjil+u<!}sc~FDOsNthO?w
zX}S_C6wqWbC6a^WJt-zl;!<L^fP>wR8=DPec>dY*eCR_z#+SeJ3dhmm$t-<l?laLj
z0@(z6bf{H3d|I3)mvK({rxK&tP^hStN;M}fY)w(n71YxSNg{$K!%4J@m;Hh<ui4*O
zFq~w(7h*FblC?9OR7ePkhW5uw4b0o!-<%&g<0@t&Qs>J&wz!Ba=CHTp@Tljecu(8S
zpy_dKL%VSJSxeI?y_Z>wpEbCq!Oc3PTOi#WngzaDlBG3;*Nltiv76VnM~v4aFmo8?
zsgzckL?C5wQ8che6<l(~(~NgNu-BQYvic?<^ilM&AWIe9no!PyAm}$fio(`|%xFS|
z+Bd;j+7T~vFImb$E>KGS1r_xyNm@3lN2MT@7?~fv_AS2J^Z~39<eP?{`}EH<o*c1r
z@e1MY1N`0&VcoMj={eXr1vzv7>O1W0>=9ST<iiadu1VvD#n}UL9Pl|KgV7U%FdiO~
z`+>MQqFHz=syv0Vn^Hk9l6qR^sJ^?0hw76zY|MI23)DQUVAhcvM;LqZatVtW?97N=
z<lOt7=It+jm78DrI*(<K?Ch!C$e8FKu5jMz9?hC~#1t7f8{*I_y?7kSsnI55$oO?n
zS}vG-$YW&Dbez9@mUmviNhXo93h``@nr@?@m2zkvfYY;(`n^-~BU7$g*TGiT9vy`s
zWu@|?g-R)Fi*KLKQTa!j7Z~DFd<Us($klbUodzp{x)r~RR$W0+%M#DiFOjX_yxl-4
zvtFf$Zk%V{wk+F@u4%NuOp&o47&jxcS<7a%LGp}~lQo;oG1+BKpV?!vcbZH?9|p!@
z;N;;U!|H@Ms)$zWP0`dxnl3ZOo?+aO!^oI3n_=2kp=}6aERH-q70FYh;N6cgTd%DK
zMs2cgQLQH}i9*AkMgW}HY@1Uxb?3D>_0F5G=tTQ%8dOlxtG>iyXTfaI(zaTls(qC*
zU5Xd8OKL|ZZc9P7uJKYu=s`OP$P_eH8@LXs1?NO1H~TEXm@ycFS{kX&ELtWN-?E{c
zccf^{C1V3Az^kgouF=Mr!N1Dj0M<ibJ*szVEX7c@<a(s;uxYJHJD@1pYOL3$w09-v
z`~EgYel$*3D(Dn;i#qcbJ)+vl)y&6eS1LZF7nm#YwKOpuZ8zf|{6~MC@y2bAZ`|j`
zgJT|!iN_v0OShPF=dJ66FMf+VUw?x)zVkNGH-r?3IWgv}p44c3NnnDss;Zwv*bF1S
z@tod0O$=cw>e2r%Yd0D-c2XzS4?4|#)>^PO1d0iQhQ3teUMh53(Slc`rDpynQkr67
zmEg@=H@UF8%cAwX@yb=6Up|AIb+{}X-@L;&e(Q^T_|g+R{QOt(XD^TsZ|YjyCb-l)
zx{aDou^|6}jcaVk`eD@#A{zxi=ZrM1H3U5}OEkfpGlC<HLg+IwMDpebE%x~T|JRRX
zftsyHTo+R=iK1GKxTG@X*gT`lz&zm734894{k;uOJRbPyNoI93ar0`&8`tN&b?b<O
zv*&p1Y~c2twF!=-Y4)P7Vo`cy<JEtwMm1&9Pe@@VXYIk3IlxS!wc<3n#O)i`xp4VP
zl}Yb>!)o2r&N87NaBZh0OqMz$p#`Vc$M2G%6RrX!QHqLTwpq#2*LOr4(s#zOm!zTY
z{9emT?|+8b!4erZjQyZRmUtQ`JbCGWYj+Rn>6tq?yVr2x@g?&ek83i>Ku(#s7KT;8
z%{n5&I7Y&HL+S@+zNPgIZqYH{*+X{b+BvOMiio&I3mKzy6qT1E$6bQG@y)6jQi&cF
zcSxmxRU}T)Zox&Rk~)B-g=U_67V{R5$Ggl^&z$A)_grCrvBUa!jU>2uu;eFy=7W6h
zAAX(hzBOt$);aCwr2qsnts%i4a!>_j97!5t5Sm7ncvDoXn6{aj-<Ky0Ya|64?=YSX
zinjO6_gcp2kj^7C47nra5quyG5uvYzO+npsFh<uvp=D_iT!5BbJf<tfH)l1H?z+t0
z{+z~V8WOI_xVD3Kjc<i!Av80Obb_C^_*sXv9_cz<J69TT*WrANYg)W-iK)T+4)K|>
z(Gww|1*<rvojDn4P(;_~SR8Itpd<#Q$&fvgg3>|_jYj@aqQ?JsuOis9eHcwhY7tVS
zW|EUiw3|*^Q$gH@r}cT;f4Cx~Q?mBy_Y|!|6-mk!^(s!qpsb4Ry4Bf?AJ}ttQW*fc
zZA!6PkXEUPTS+pj9cvnR@q;hoxlU?3;(CqW+vDiF?<k{SK10UHy_@%W?9yY5M<>L?
zHRH*k{*jBB`YT8C%xfKzgmJYY1bco?D}kEwT2Wn8*-L^%Or(<&eAgmFZ;dv!B67n9
zclv-dCZ(94+h=|MK0AB69Ip@A-#yLcr!Vlv7rw=rOIPp*BY9kt`+@PnLuA=8rl9-A
z8Y3x1!g@s`o;(D?5NKKx_c&hB&SsDzk6paN*MIMGeEg$7#yhWn)+ol_nweHmlxnA-
zoJE}K`wO1$qi2_7sTR3HrLs%i+e{V-j})KvO}GAW2o26P6z{!?a1|fRN;#bhVC{7z
ziv5D*T+vo%9*?egr*2nu`O)im1RdJ?Tv`AMjq7N9tBT;+GizryGubhpEf_X>9)%R>
zHyeh{K(o-f%AyC*#KcL6^s6<)dP9nVv1>>vA`$xaMjKo~*H^ZJ%g4!*%Xvj)rSM}(
zwhn&)L8jaeXH8XW%9femg2<IZbQ}FFV-CftrT)EV%UTAthBK9B6&cdPu5CPB*D+r%
zXlEVWtfQTEbv9WC{4w=h6Oze}9HTKqrf!$3>o1e4S1lFcEuW#To9{2IJ*wMaGqsg?
zQV?NRKHEg`Rkl*2BTBh6?#foM?`>LTV3a)|o)9DJF*Amw$SKM;OIVeXtZ8Tf!!{dB
z!3v7R!_<Hjww@Q$Jh83tZQQjK>|4A7eXlypSk96%dpg0Tf3TlX1c|_V;miN~|AhAB
znA^ASaqr<VM-lok@bQ;E!pZSN!qJ-BpZ_XH_g6ePUTdxxjR<dGhIfvbG9jytV@i=R
zWuge1u;H;wR~XkDa1A*IEgYjz?O_boQ2@zqlyi?zs3&M?x{oCWs**A#NwMN}D*QD~
z&MI~(5K>UjTL+tvdHd*y3(GxTf9p2;7f)-`d{4N4<1W|Uyv?c2g4OCD@_~QzU*YKR
zE>5S1?9Fq%-&51suGY@%q63k)YJn2dvk94~SX^ONIYWg#St6N8PM#!@lpQfQ#0YT+
zg#Mo(s6Q01smo=X+qz@Q*i0a!CF%}RL#9KRA<dH9Y+yFPkjcY_#p#}>T;Qn-4a0HE
z!{gUD^~3?U?v7l`r7$gYPLL!dpU4<>#!A0Ls)=?;+lr`Y;^lJ<cv)bzK4w_2xp3tv
zrSOT)+Aa6zJUm)+yjqcB#<e!QPa=yd;l^Bkm(qMybVeoU#TiXBO~C-B4OK-dNaiHs
z5CR`~_6aUMc7g0rmCp5w);GkQ*m0g`E<VQfYY*|_=-SA0?|qELsReE^*8n7r+1ax=
zX>mg&J~+f5AJgQ_Y<C~oo8gy`8^`g@#GNm^0y~-AXC7mD<_xiIjFuV{4KEMb+Y_%m
z<*j5tZ>iT+lXxhzy3Q%pL~=A+9#JudC?qw#RRq^NT0D{m_7)B29y^6!cp9(w%Dcof
z&z<L^Kk*W0_V@UY|I=S4CQtTy%}r88<(zdpx=P6NItOJccEdW;G;>0Kj5-Y}x;!w$
z&u6rv2yV3K>&3Biszq>C;CaT}5TdX=IABak6@atbn#ZMNvo?d641J?2A?tl8L0+lc
zny`9WdQR;xX`6_+jPnhR_c$N%ou`?%G>Zl|YiPQTW;R3GR>dq_V~J4<+2im?$b{G%
z&8q>gb~R<jMN$R{H1>SF6>Clc&Kc!3C#8_ueM!=&FI!65#r*+PsI}W|5vO)+nf7o=
zPKv>IRbwm&Y85Ig%_>XIy%=qM;xE<N<ZPB7m<5Q%7cD@ILr{>c%IpB~t}3@;?1+>S
zsm?e}X;LXCCZxmi0Yqtc&a-eMr_L<M!wyKMKiRNY?sM<jO}f*&h=+KzW;i@%@nl0f
z*$`JFu@9sW6zvGshzr3G4ny&pnW>R$JWFHoZq?*N=dvd+OU8_BHb|#4;2P%=Y1n}C
z`28I=M-TDmcG<anTG!@$m)XOfVYT7(`<~;CZ+(+H-+i4k&pt;Q1|&gP_nH)sQ7L@U
z?pe+Zn>DVT5kn?!dfdE$B*aai83$Z)oZH>uw_pAO|Lp(tU*T7O<<~S!B@d`?XNkU(
zWrD4x!fYwZ+m>`a0@`mu_GN87LYK{6sqb~UX{Vwnicc#+V`j=|<E#`51<@(RY2nh<
z0!XHVR(XGVjdgxCv)$k*%JF&Y>l2du$i~|G9EIAb&8l%!h5e#D+s-<?6T&bOVrD!(
zra#$0a`@CTZUzqT-eVI2anM<=&G8ZA$uZ+*gNU#hdQuokM%DHbn01~smLgCPpGYA=
z<JDu-IorD>@vZ>dyB%aIN>iT2)TENF;-{<++p}5fENYqgbXMF|)Y4@u{<aD{=kQ*~
zRT}S^buF`5N7puVt+%;UBa(|Zs;sF*i#CZJx~H9#7^dSa<&pY|6k5Y$OGL(5Omn7}
zAtHD`tsm#7m|A9NH75CqqU?PYWGc$1D$p}EX`GwXic_Hr17pq%V_+DRCYp%a;3U_1
z?^<_fYO{A+hq0iYQkuToYyqYXJdp<`Dr;#_*3wh#9IouVt^Hr$Umiv9Gh}D}nmBTF
z$gJVD|LcFo=FSbSU%k%Fqhmsf<k+z8d!Bpm^Q>>*A#MW4_YOIHc*4VeL+~vrL7#Pu
z%3BT@QzBTy#MJs&_s5()I7h$U)KMbP!gCA*P1}Oas3vR5RaNB@$12KLitYe}KzqL}
z+aH+Xed+6!GZqYioqjweqm5UzRi8l=c(6Jl&pKZD)|({BJn`fO4(~o-jESq)uJYaw
z{1^{^=PURp&ltcUs2WrW(G$vzH3bV0R;k#98DdRnU4f3mnAAn=m4#EA>_X}@{qaZ&
zkdh~C5}_ATaHROdSI+(LeWHJqSJ^li?PwaZG#<DWsRX55+NRD%6~T4TC8UkeMr023
zfmzdYe&IOlPx!MRT=Mtcob!!U%VBT?#_9!yC9bY)z=G5PMgONHqxF)FVk!dn@7<#7
zW}G^IS^d_q4IhNra)D0|IXOPDU|eQ13o4jum(4k6GZdNNps`}wCG84E6>+U`RC2|2
zNzMk^v(O5>^vrWCPwf(1q>+|*5}7wMnk<}mj%S~Gk}tme8aW5%o#TBkKF9pjX=HB~
zH{YX;OV(d}g`-#ApdWj7pT5A(GnbIlrwJ)Bx)#3)Y_7jXJjtAS<{aV5Wx~xvuD$#X
z&OPxM{`6_+8gS86eoJ6~)K9i{9SeF|=rJW0n(aoiKkL_ooCA_<ZbFiZVR%9Eh>V9c
zE^}(fvwyHpM(4Fs$}GL(;-%BL-5Jlk?^)ja)MZ}2`amgac`~h*EclqnPNz9k$+*)7
ziW^D&hTX+k?jN7vB{N7Sgv>guId^Kwv(LPr&Nn#8#E=NX$a-}|?0eIc5|Z~c%Z`J~
z=NZxl=hUxNwNOe?Q$!c+u*@bRC8pw>x3jG<B@nr1XQyG_LX$_lYiMR3*Ca(NW<s-U
zaIMmLebbmYhc-1_+bU{m$f@`a#N(vV>zVH&(xX{Y2hB#s3C5l@WSobw$+!j@pJ<u}
z?*grNY}Sd^dB#m*=o3-AQnbPkMkHxU@3#uM^7VpzR)p2&p-Z(;ET#&oU8rG>nwS-D
zzDfP4gtpSfoeB~$B*GQ6q6QVCU)t~2#5$I-cl~dt-RzRY71R~yYg`euIZ+bL&IvC(
zd66VN{>*t&AL&n4G&?Kq-+RE5S1up|n|lu!$4KJ@)&q<K48d$cJTep|UTu~{Mx+Co
zWoR;Sy;6lk-|Crfe#u#q`Z`Bdg$c(GRq;1Q($H)2Jm@^AKerE?o@TMj;`}+**Y0q1
z_YQr(;pFvutPUTtf9Vug-uo<Xf9+fBoIa!UvuH))MvIpmGciX}R1JZCy;iO3EcC}~
z7W+G3MeVp+;pR(Xe@q@C-E79e*#(C;64@nkwDPUQt-4ICNys$UEK+9Kq>A!PErf01
zmZ`AQ#~wkv36wx~rAb9C0t*lZXalJ$g((${D>Knm*rl$2v4m2Sz&gv9;)IHMR}oZ5
z)kktmkqz1U^Y)DF^#3~7+BTk@<&52(IcLu7v9r6RX;fi%97hNv!_X7@4Jijj26JLq
z9}~xszTXgsHN(l8IE>`6S8*-N6jXRCnI50!0#Y(v@#MzU>raLEX$6UFQ4`<w7*i?w
z<r(mZ(s5}P6xH6XKEgB&P*Fy~H&P3<ZExE$@*_@*Tjw3Fb@;~9Hd?UCcD&1|P*jsl
zfYIS{FqW~aRPH)nVD!dHixhh=uk-9>-IbpyRNjKZi^J&2-mViX#!Ly-O%z|rI^(VJ
zZfXP7N90TZY72Q2)BfmVVu+EDvnjw&qc0O!nkR-y&F!WNHk-_P(F@SeFcni(Z=?2j
z^45D|`3&fZUW%MtEx|M(=cz!(#-Xz9si<^Zi4NL1_kZPYar~`sbM=*XxN|b#M`1N)
zh7@UA;rzuj-22`egki%`PTV?L(Ptsz81n!UNqBq~MN=!?SyARbjdX2?!xK}WX*@9u
zBqxmHsQ#~3Y%2a4EmnxxAY~JZF86L?9TnygWqra#1-2YI6)v2u$;w5EDww2R#!3Cw
z$3XCo%^10P`-tY1Yn)mx=yp#CW8h9da=1R`mB0QE`T76Tzrkj`(lzJwaL$N&{g!oH
z&*QQnOYsS1Y)Vj=N3F7KroF7DGW$$MBIJ;C&OItETe2so#%ACD34;1V@v1WQwO}j<
zv(lKdYaz8t{VDVd*Q(c+@1W5Gj?O{rp#|CmEl7xnthjP8@~00ryneIgmtXa~k$1>3
zmZ?<AvQ%$Uj8R!+wkKLf&arl)#lhX1H`v|TVZL{u)Tu0$8qjnNdFbg=&t_b+X!o|}
zShv*NQlad0WJV3NNe_VJRXAgeD)8X4s6uUq2B+3G80}dijYPb_r8B40xVZD=A#!-<
zgjqAAjXl5g3qQwue*8sFec`K|P7ip``5m5l@gub7E|TXCdF37MfB7o@)STpJ9AAIP
z+u!(29^AXl$^FPG!g|(l`n2Qem!9Iv&wY}aZ}8v!4zs79<MI=qAPs9y?p$MbdY^W8
zUr{lyQ=UbMQbU!So_9f{Y~}&MjKnpdrIb|_6g2q6L=M{F@?I5lvp^UFYv0g%&$+Yv
zbUSmL%gn^7;<NXhK7X1tYiQ>S-uvDc`SN#vo333@=pT|rhu~cz#cdHM<&Q{GRpL{p
zFZ1B;+e8PWi-gTc%nf_zPUGh@Lm{*Y(R6cK=SgzRxJtOLVR>f3{)JPtPUu%_HR$nn
zswHQtg5|Oj+pS?K#-=q_bn1th8-&2DfxS~6X%bEvoS)%b#?QcYLNjx?MrhhbDO7C>
zzCqds*LL`Bjx;lTw}WdIi0d@4+w*bJky@Rda$b|8+;ysqZ`MdTMAGO<V<v1y#!Vz_
zBIA0bKgkRyus%twSDDQ!)AulhcOTv;=*d>^<B@d5FvzmWYfMtWbW-QsrfcS{<gCQ4
zB34um-8_M|M%^v5dU;A<C#V&2qN#7F!YKu9EA@_do*Wg|PcBuVvAPy4ggQJTnWlC8
z#HW6OIG*5|bI3{0Fht_@JFHJO%w`=qM^0`%sHj|6kEB88x<b}sAVyW{C7T^ajHEEq
zb}g}wgu@4nM=L^z_|_wh&@EfKy*Z8R6#ZyA;@$g7q=*S<z$a2aLhF!r&X63^G+h17
z&*SGUJI`HaerCb)%JZc423Z|)=Jb2{#^=Ar<M;2=H9MNr<OE|NSz)Ta(2<xktCKZd
z(~(ol$?*xN!daco8WZEu8foW*^?jas_IbYZ^{?~d3s3RI>)!-?6<cwJl)B=jS&9*R
zz(s;+YC71O`vc2a_q7zn!5IZntZB8iM%>oAo0=&4f8N{=oP$TibPC#KjjBn3wAfWm
zq9DaXyBxuVW~$APw1uY1BQ?WRrI+)%n7C;H(=?v>a?Z}~g8kFG?CmdYLLq|7EYgg|
zWo26AKo}0`*K3BpXB>J$41|7U+zg~qpKGr3aS1UY9A(5%3q#x%tvPSevP~c+=B$=S
z!n^;f1?g0rTJWjQpdd!rXigR}!J%x5>fX>ovtu!Y(%UR*LB~}McWQDub2(_+MjL^?
zQ3|lz;*yF{!=1xtAsJIg71^hw#bI>f8pTRnDM(bV&AUe^yQN{_tl43Hy~WO>hzjcX
zUQt{1+ISFAdywq#&h|x`gmzrpDqvwPxSSm!3=Bgc7FPzz<fIM7oF@cNzrRpTnGis>
zK3w_obOc!3^^^jA8g4Yx!b+8Q6$PGls{TwBg*0Vz${MIl3kTklf`D}J+PAp++0SzI
zyH|O*=^4gA%889A)%LMV=V<c4xL&jAd&V&EaDAfJ7ZQ@xe#v<)@`8aI1v$<nau_){
zd!F@bLpy7UDH25K-Z2L{KUa0+WQ}=OIuVl&Lbl>HOKIL=thY2ZHENgE8o)(#()Mje
zuMJV8o)k4gWfL>Adle)z<cO!?){T4Iy>kJU_wXK8edHT2f0ex<GQNEa_7~c8Yo|uC
z;3kX7ntPU~V85q$!8si(oW>(;*3>u3y0Dq!>{R~*b0L^Yn1u6lQjGkdq}cv(zJ4SN
zR50|t{70oi*`X61A}!!;esd({py9v?*@HBK^BG!!Ho`n3eI_}OCgOt7<$)J2IR4Dy
zfWQ3BHQ!1z%OLH&auJWPi*SX~V!;j!QPG3^op-Ks>C$Dy&no>RXFb%t^X$&&93368
z>5p*EYx0rRCo_?ZVxkGKYrwl!lXGXCVDGfx3PA&c&0NnK_=QjY6bFmb{O%XNz_o|>
zRHfK8fTxwfZ02jX$J4MrImV5LeE37};iLcj&k*|)&hO5+iSUV!J;&lbmvG&{`m^8S
z#us1X*0*2hwVU_27Ed^~&aI1?22Up$KeQY_fNQUQjp46<f%)lEJo6J*_~f7Z2~L0D
zgZTM^`IFC)h7+PBoKqjzVzp8fC1op)u@zFvd~hn1qJmVJX}j76NL9Rzf_h(tfIPfU
z>|xn9oWF1ie5&)Q-g~-6SnMotZ9`6(-7`yyDZE(n5_0LX>9cn^lS0DJ=PGuT63z|G
zcg`YiMoJMMGh>?3Img9=vv_RgnUee^c6Vp&>@8`#)9hc^<*<K1+N@PPCW)cpX32x!
zQU@;cY?c_>-cSW$Obt>0>|CZvu)ptVeWqztY^CWE&Ag%QK;|t?+ajIA%`>ja_-=p(
znpuZyJ<=?2eixbTlikviCrv&yPM5iqp#Pr(LNr?Uh_E8(6_Sr3-iLSu;RH9H5Qigj
zKakcs=Qyl{&BM&`eK@}NgOKsH^;N`&%5B_gPffUL>G;}z*MdPNM9Gp1XUVai-xC50
zY$3Ddk%=V_MZeYNvr0E!vrj#&?Qq=g*o!OIhI-CgfkT>hPRXvEXd^3$qjm6skAH|X
z-i365@Zb>dgxhajN2I0e8YD$l_m61WP6bqAQ8o{x7&OTX0lXuIp4>F#lQrq6=dIUY
z$M1D4_s`%LGlun=<-)Vv-DS47fHabZK$eas1~@v#&t|q>0}PHZCc<*Y^3ny4-nz~3
z;DOH1&lW7sokp5~xE15-9-EtY**kT>`#%0G-~Rn?^3f0d1nUGbX0k+Fj>M>YD5gZp
z(DywtCUOjnLm&p7iB2KXuY_hX(0Sp)`73<+t6$>7AN_H@@LOLaN29ijv&2Pbxm;Z{
z8k5uxG264Fb}~{u%L+qeI~}$amDR>9OR<+hg_)$xEK_~41>Jv0DTbY@yEoeikgJ7@
zB3`EQnlh3dUTpR+n~2pG;#a_g;v-oEeAw~;uM4^)b>-SBLqHLjq3_xB8~V+ra1OM=
zmnR#M7?nxneM2L9MuwPRj1V#sO*m?cS>v+JQc@5<J$s#v2KkzVmf3QnT;^$(JH{V?
z5L@}3Zu@@Z+That?#jco(FbE}SP`m4rJ>>8Xs}lFSIZeY3$`019@Xq?ozQqyLst}2
zChzC%#s#)M%2-WiYxZ>O8CS+hVU<kCd=+mp)X;fn4Fhk#*G>1MHVz6h+)N@#m>if_
zk-${!znwCK^!GHRxUo5Fh~jkC&s4ZmOxEd{D4LJbH*L=q(5yuKQn|^*<SIpX`58TD
zEf>M8^w?sz7CLun4w97-QQomOJEa~efL><?pIUNmXwwm2|Ihv!H^2ENN9%!plZinX
zMd&lc6nW3HFR(g1Bn**ZjKmNaQz9ZnseBIQ#yT5u5JeGl&Wtgzf9il?40Nq#Cou+P
zj*USb8<5GF0xFZk+4EG!iZ(4JP3X8$cN9pVY}dT?@AMqXh)S{BN>Z9(WiP8fgLhti
zZ5z)fj<kNp+uyrJv%AlLW9-+w{^lF}!cYGp-uU~!&NKh)X9(jOKVMG2(VO8<nMZ~K
z^*$1}kRsMvMwiVd$T<baT(LwlCxX%_YFllZ5s^k`?$c;F*$>(4N47wPM~l=gMqCZS
zHi-yM6?C(A5L@l~dm%d&QL4fKGmjgcnVq)e+#@k?zI(_Yd*T$Yzv?+oGekV%LOsyD
z2r-p|TDxS<d7V8Bx*@N<^(GfCo~P*++MP*JH>Ni$h#Wb6dY5m%{#{nXn&r+Jz-ed7
z0;L`fc7Teb?G~&D?0XVXAoRiJxkYu2)5xFr%rEjYKlce*5+8ftGyJvx@>lrg?b{l(
zv!gZtv|N2P6XP=Qb3gMepZU}OJgF5LI)46>AI9JL9X|HcpI|mW;o-0R54`>PH~78p
z+~SpS&Dgd0p=IB<Y}O~F2zMKZO=66CSapo-dDuC6z+0ai`72*}hkxytpTYSK<QP9+
zBH0iF5fJq<E0!Oi0#HeuB{2mxqE=-Za1$<>=~+U>5}eM87V({&^w1rT9Q}~kS<X3o
z`7Ca>WDFTuwB*Cc++~)F9h@|H9D939yjlCCoC%^7-_}-M6QMRNoQgA$%(<`=8b4!<
zC&=i@&T;wiv&?2Q2w`i^!erT9M974Am%G<*5pgWKnfCaiuHQn(%4MEKJUIsyyD0~d
zb+RQ}0+7Wz-%5#JEF#NggO^AX8@hRiYlUtm_*uly2mDS#<}<qemUj0H?cySCeim_i
z;O0PAX{VZLZz(=nS5gXmOF~5Fb0rPHpo-5~O!#OtmFz@pN2Hq}^AXt*{C;9S2KF|I
zvtuG1zx#oG`w-gVX4Su|%%m+OvLtO{4XcU9X$ePmTU20u#-*83-hb-KPgJ{dxTyrb
zSl(1<byL!rvw9(xrWaU|V@Y>Gl(Ki)w5bTN79l<hNixn2>>Vr_uD?bKPx9cc8<0FV
z-oDAs&K}+i#F5p>O2-VwKo}LJB4`m4hCtV~N|}w2!<zJ<=k2e&%HyB@7|Um$B&A6I
z@BypqH}OX+x}9Be4rMG0oF|PZuo=j+29Z{~_I=MLCw88FmN?&G`Q)5@{T*oF%=@0>
z-qovw*5UR&uYc|9+`9IV<9j!`a{3bA{NCF<_nr^pozbeC5Mx&VYO_rU8CLy<Fh){J
z#Bn5sU}7H-Rs*s#;8MfRe9r3Vkc*E!j-Ls6WA)UQc7a2x?Sxdt=DgLLuqRP!)za%I
zs7zfRW#Mc!OH#s>C2C5hnithYrU+Qm;G8$194lZP+JN~!8fn$st;X7d_mIb2&pdI{
zyrK8ry#BV5)UEweUd3i@VnB%r1VKoVF(t-9=h$Kl<d_(TfiMh&ek6|3KCdE&jc*Au
z;$5eH;DU}55S1Zg%@;-1OrJptqCGXvyk*SOY-I72bz5oj4`2xOk$k0~rd86%a)YTj
z#L%c>yW*U;=7V1AHp`>3W=dMGGe#9LZ@kkHj>hYHsI!rV-dXdtX*w+`v$y*gP=#Mb
z-by!h5-Fc(+=cC-VCZBW<4Cy1)nXHKB@j0utc95|A;85Az^R}jidH(`Am|w%jcMRa
z={MPIr5-m~=a6ygp;={P6$B^;@1#sSeV)Qfn3UHG;;El6k#L12Vp&O1EG~54GWu1J
zMQlFTu~h^ih%`1AyS3K~DbE;zDJ6W9dFwCzDi7be&S8k$->lh;fe<qTnP6;{r(b-Y
z;|E9dy{c9Z!^nE0%KSM-Ev&3Mj5#xsX-VoiE>3Oi02fnjin*-*s96$Tx-FW}q}FY%
z1#;HhC={jrN@*`l1({-(6@@kSQO-GIyHz%WGv|jbTcfP`mUoUA2a>di(|CU8@Q@eV
zCF{+=N%jySO5(NOeVJ!}>C-f+M-Q(pHlCe9m8hAg*j$?tmWK<TNr$3-lm1(V*K>Kk
zc85yt;IwJ&8*i3-KU}XLzEAXz@S2p>*h=oEri>ObrmQLvh-kVzi8A0kG!AJ5npRQN
z9);|kfZJ6115MzJ->~OA$IR3s#8gO04HNF6-PlqrsQ8TI?w#wLJ%5&Vwo4?c{fKpP
zHOCZ&zUTbKvwZ$bUnC|&H#6F`&QXPae67MrDKTr8iprq-JeCd{C}kMo#iyR&$KLlr
z?p=M0xnJ|x<InLY{`eo`#$Ws|xR*18fb)(a_4rmh(&B`F>Q8=#Zn;ZdcH}gWL*(*P
zr}*Rl+y|LmOx*p|U*qM!{SAKo`U9?|KyGKy!(xQgX5#4sRwqX^(nIS>qQPg=z`AR>
z=Y>V`?B37(?LYe;@kjqJ{}!`(k2`k>Xh1yTG^mNm(3g&URdsWvqf0G}wtXlI)zJyK
zr0SeBbMncCcz8rQS}|Nb;m-9Vo?f1z+wZvY_zSQ&3oFOmD4z43mZtOMxFOTgE*#{b
z>6U<mjI+Udk5@WJ&RQgk!(|~Q7*oJ^4&Qm=21XhJfy<XJ6GJGf>4;g;6l$zEM~+l+
z6Yaj~yhtXiEko`2<w^avB(XVJm)7XD1sO!CTk|$?us5emiCO1qyGXaYW^n+`o})cG
zr`>PyyQj(BSzL1(+B3jDF7JUX$yq7&`fG$b?`qUU-K$Cg(&NPgUe%^c#!W`p01b)8
zl9ZO5=io+g8=W8bgEk&oL!*{I7<8k0o61y(P6eqvlE7D*-j)DJEi|_hPF<S0Ch*xZ
z5HToB5KWdGkg`$rY6Vj3<Vh4n*t(>t@Ugg(iilE`hb;={mTr}Go1_*h2m1}u0_`4h
zGH~nlo6PrjSUn8v@0e0!zoB2R)Q2_(LWt@sU65PJ<WcR9l0d&6Y1ac+KJg+u&pyuJ
zGvZs^d`Ib*G(33wCc9^rq%a`PG2h)MzI{!Fj4UptAu>v4@xhm9Uw97o_LxiL=8ZSm
zKRC^;w{P(0|IdGw*Y6$j-3Qmzht~-yL}uX|eCqP!ymRw9m(D+?$!mlxacfRj6Sb4E
z*EzMMf=6i_k*-yx@)$|$HS>1C&D(Et=F}c8C-(MtxxZRb6ntxgp*9inM2%&u31VlL
zX`4+=4O?CnJt$4s;tQOa)|M4i<v=VHUa(>-XPg&&Qz(;Es;BpbBI>O0aXMmQsGU@S
zovg?T(>2zDI$=f7bRNt0e*kI&SCpo+6%)Ef!x$L*k#SU0d$YpT{$fGrLm~yGKsyW}
zbgB;ST{at(Z1FPIJL?NFSW#lT|87DlD~ee!x%{18f0^}t*W*lSUTqa~k1UERoTTGA
zrI=LfP@~3{6;R(Rt3q~dt|cq*bWRVi!U5v5srm}yld6CVI$EO?9u;4!Oh(Zrk-2x1
zq9N#|>o$#-Xv~?sSzV!$I}mB>=#|4fTAN9gSo3y2jc0jhpn%N^n#G{b`>8;sbB<#1
z=X?WEeYU-~y%95^tR>44VltY)%@QUihD_-=)_wQLLy@V#sCzuuvvz817{OSaQCp`x
z(YlG6L!^mGSJ*FGJ<E#PW`m(~68!DAx%-VbxO?-6VcZ~R=KL@J<A0xj_mBVUY}P$F
ztFrCI$1m~DH(w@<i5N4Rkl3WCzb}rO2iUV4bH<@F_#tIN>N$J*9GiYnO#vKvREAf~
zQP+}o$E`V~UeLAR6m16RsFAuG)WTmTKjm_}MVvAf>f2`Xo2A~(<S=1H)qE7}z7)*`
zo8!zmnTQtjAu_Ie#!a#ondg<)ULl?IoZPs{{L})8BiVHmwLV$lZgIP?7mKh{t)Qhe
zB_=k3<~`oJ3aXr)Fs#LAa8c(itdXyDas$kM$X-8^1!~@^N=x7nG00p3*$rgr6`4tc
zb^D@i@Jw!;DnmAb>_!dTZG<)}{N*z=4a64Gs3_Yg#ORUSLJrEPZR#&Kqh3R@C=^jg
z43S}d$j+t93@IUK0W3)m{}L3N4u;JUhsQ^Bdl$&~+N{ed*(Mhi53;V{e7;Lei4Ldq
z<`^}Q*REaS6F>Go!gxeCY`A{yZDN88&%VGP{rOMwH-7E67@~H-SDU^V)RDcTJG&2|
z6nHJkfs@4Y@yFQw-k15x-~1fE{k>z}*`3k)9mG4Dlt_(dXodNFhj?_K3m48K^BL`8
z#w<3-&@&7JcaB#uLU02QH}J||`Wt-sU;7N3yZ7nNo>A&ZlC2;y2}jiDUKL!E9%81z
zsnj&<)W>Z6enEVPgf++C{T_=8=kYuHtXB6qzWyeQ_JnYBgV%otn!TUm!gDXty0gT1
zRmGwfa}z%b;2NB7G;xfIl7}|2*l9?8z?zOEC-}D0dm7<nv(bdGTaqLYlW^(E<ILwX
z+_<3%>5z%o=kUIEdzH$pkupgJf_ApQ@A#~Wb0$onhUTtzb#)tL8t<5qIpu{@GP1iE
z*uT=Uxa9F?GqUH%ZAWT7xH-95BJC29UC13G9aEv%py5cW1pL%EfGTAnjm_mcBu8{k
z$o@Wia&7@d%LGrB7NG-~0ZU5?B3jJ_yc44QAZAdfKg-ttODPT?K~?jFK-C{MT5hpc
z*m@u5ihz<a&bMa}QRZ1eOezJ;zD;WEEA-*QDiE`k*;1Iy<#XuYJ9!k*)&w|_o#4^x
zNSx=%XD$%)K)ZhchlkvI=MMAL3G-Qp_n9!RaVJ9R0|`;u+>kV($S@8)aTwG#L|T#)
z=F5)TUw@6qKl4*;aw4~0Ma3lHf)K+1VZ-Xy3Cpv4(6x{U(&0l~>r7xOGW0z=Klw4-
zdtV?Q9FR%m)tdHTfh>0TbN`!vk88(~ll7XFTe8>8I5}BCGsm}%zL|6No$H)AbAcv1
zoj-C#7{t)ulvu@{F-3An#GF+XGbyVfq<{|zm*MKQH+j#C&+*P1uW{kRIqu(lL(hU*
zI+n)4#MG?Mb*uTWkEvv%R?b>if2;GV2!kc6O5?WX*p}+)RQDENQ(armXpz1_6<e`F
zwa_}N-Hg;E(up@3u%YpOnhA4T#nprYQDgPiWk0f+w27!Qh)Rb{s+K$^`gPA{wPCdm
zO0VAZ^nK4}-P892!)C+K4-9>v-{?$XKLmz8G7OP0CY^;2kz!qxlARA#qLpeVveo;s
z7#pkCY)X^(UCi%(j44#m?RoW_Onu&Hjbf~zNqkE0c@e1cW%jLDSCvA>P`AnZdiw0r
zfHaDrYEuFlYewJ{nNqEXmf38s-{)kqUl!5EqL^Dijq%h3zi3A&dRh?DQnXTO#1p;T
z681`0p5}1H`Dq4K9QbLSD+;9rwaeLP@sS(@#6WI3a@NA9%$Jvfux!3<GU?>Ip2~vQ
z{<0O|xe7&1#n$&75~T=bDmo`t4wf@YopGEzZLBcg_L~xnjgbi{(=_n=zx=nkedjj5
z^V~Z=<bUxizr{EI+h1jjfna;@?72NUAK9#XVuTP9{itIMF<Y^Rx7pB)1eNpG+_oWx
zkr0B3Cj}ZmBZL4xlhTOqI%~ccD=jIci0Us2YFRVJNY_doDJV#POp4S(@ne?~+N>cr
zHNm!v!YL1$kWD8-_MRL}Xh~GH#Ea+Qy}Pun=j2!gDBrkoo%igY=iV!CaplR2I*Q>m
zE>*F*8jo{*o~413ssLQBl=r$^lEp<+^?f9ClyRRHW7BxV>%S>JvJ!tNUO$=zDnW<a
z^LF6QAS0O#g3YnX8eETygQ*;PvR5xjX+(vT+6KA-#s(S{N@5(1q8NaIaJ1yLm2i?Z
zxfbjY$x_o0r6QD*&XKaBXRp5Yb>8>E57Nh^vp+eR(1BOdL(JeZXHM^N`{oUD%5<*9
zWm9G~bu*1$MjJ~5&bM^!Lc3W_Q8W_^tP$rti?-qXrAtUWX6Im^_w0Y1+wa`s{_-{-
z``AbM!tcDy_cj}HO7!ayXNR^bx`MPV**lU~l-S2zhFf>}?iat!_a3Z~9mj>moC{6E
z^B3o=`ozJD&-3eFe2s_Wz`^cm4%e9#YZym%8qfK5&SPiK@pw05b?=a8&mWNbBixgx
z*xX;S`|K5BvXaXS)4)BH#Jpx~mPkl02HKcNQt?Ggb>1N{63t7tdH;(XedjH{`XB!a
zUwZW|zP35xb{_G*;q<rf;Q!|DadzSO%rE>TyL;~=9o}HMKPR^!Q5c5}l9UP<6O0ZH
z9>2i3#~<VVJ9jz0b`!5IpR|5XPC|?gM|Y2C5IW~ngeVVeHuqT{?K1bKESV!woTB$m
zkfaRt>I-J$`+5spMU}}qH<}6rV|+m;R(!Cf1Pin|vy?R#_XC%n44i%}F~8KpULbcu
zp10uVh;Q+Jm+W@H&(#`7R0TOnn5cYN19EoicR5KwOsGE?wY1EpNXfZM<3SWT$JzdN
zwjP7c<`uC7F}hc@gh6<>U#=t(G9g{%x76t)CstgP8CyFzr92?pOZ5`B=x^KVOLbLG
z^IPTFO*7S*iNt4DQJ}a?{kf{KR6I`&S#jG4Y^f$PWD`abZwB!so`2yPQVPsE$I;yf
z93Kb9re`*9$W0>l1HrX8JUKy#5R(bfMD3=BeuGav5<6shio<We#om(_2wkIvP|P?{
zVWPdu=Qz1KB16Q@X1M5ZvmNsNLnMIf7UZZM{^h5ChUWSAFfL~n!@<oxyH}p!+u!_s
zUcGjQVR4#s2bVb2G>{VSePM_D$0N7izQz}B-Q{!(ABY*r5tkA%MpHl5VtWMokO(Om
z4K6dLWP)o#hygzic=6o5bB#}Z`jh<HfB!iiJAa;6-5ZL^<_U3^f=s1iFHw5#B?*;D
zk-W4cOjx8uJM)RDe<#_IEIa%DVzKP=^4}Rn$#7cGdM}DtS>fre*pbHQzOFDVY^h|W
zDW_7{l|~BN^LJ+VQ4@777#^v@>*kt7#6%zrnfv!oz;zrR_1w8X@a<P`kWyj{BmFQi
z^aJCjXB<Yt7zjgDYIz8ZQD=l>FuRzPiAj;57|m)cWpXIA5>p&?sWvk;$4+1(g4qh0
zIO>`Xad_8b3|q0K1oet6mcn!UdFH9n;Y4T}6`R|pN>{H@L)9lCTPZCHJc^_@t>Bsl
z-xM)BrEiJ|F1IAVz&A5!1WDeC%7pg~#H@mYgz3>{PgQ*0T9e-9XlwK;4Vx{*p2k_s
z^OF#lp`>EUUImdg-Ey)k<2u^J&8UBR3Ih^GNP#4t6q6NYfsl2^QcV?<aaNkzlZbXH
zEbCHcng`vkh+K`^RV2ygTFXY<j@Y%>Ea!(caLXPoVw+kEmF860!-<4f8><J`xcBBY
zHusLWd;1#y@?ZIj`1uaM_jf<bm=Z^E#K*vM@Bb*nuqKU>VN8rEauOl~j_5#4WJt2!
zpIj^q$!vx-r%xRa!iaC2A*v&eK#Z6tcj-)-C|p4iy;qB$VmT8cZQI(Mshvy34}5E0
zwKP<<2@-4fXhJWi#;_Dy#o8!VsG>A&NC|1wLp8?4{Rj70E*IRn^MGL#9)!To(|g>0
z?G3K{+>arlC%d`M@|Of46>GRsU)U<hf|so#Iu}^LJip5rLtQ`4m>mh<s!$|D0%^!D
z(D;t*gN|tZkiC8+3se|2LCr-PT~QU4><_@2#1X;@@<u&MMa68AiNxhb@F|g+h|Cf)
z2I!Io$`%b$A;YTW?s~>o?>7W#%=a{?4^>n?ZOU3?=42{tlJU9W+O4ZR_xua0e#dB?
zltw$oQp~0y=fK4?XL$MLZz9f<Y3im=!WQ~sA}AL4+OE}NI|ijMixT+AdN?%Qg2iH&
zwpo+1aOTPL9EZUD@800x!)N)-r$5bK{MElrXkc@+CXXYsm}%h?^-#%9AdX0*#XYl*
zXFmODF8##MbN_4K<@3Mw6`p<XSuUJgke+&q-H(5ouYUVK;ppxiniT1AkXyXX-lk@U
z;CI>FyTyw?{XzB{$CEES#_`+VqknLhdGkS~_n<ZkmcLamwke4($b-$H*0ym$nd}=<
z!jW-y7`5{m4}Se?{O(`=TYT|5Z*#Tv42zb1nGv#Q*bIbq<n4aoOMm0nIfw8kfBqAU
zZjY{W$T~BQnV218%7mEc7BenAb%isRF0;P3;=YLwMG^dLPR0>OIJvh{SC^cKcyKN2
zqrk8ZG)om#5{w#)N_@F;YDy&>*&Z-Tuk#diGRC0a=YZ@m!nQU+Q0iqvWa$Pjoe4bo
z%*d%{1$Qdrc7!}PiBCUAngy9%M3%rV$PO?g%N(Rdn4ue`(S$*~(nYhI1YYdIlc9S$
zOJXTj9zh(*C0sUS+B(RY&IG)gD!QV{T8l`qJz}%gsQ=o#{VH^`s_@D8sjXUpJ>gw;
z`L1+)D^6L9hB|Xts}5>HS`w8iZX|_bSZEY^OZk!dJg4H6x(*8M&YXp)<hM#ka}{YR
z0w5mI*atlM?BnEdLr%iOTXzv0VMugM10sYW;L|`yOA3(?5;;V&Ax<%7Bt^)cunsii
zhT-~scAt12Ps^L%e47Wy5BS#aeu-H-XQ%b79&Gr)(~mRvkxQ3PGdmU8ym5oZJH{Bu
zJNxW?`p4nPrx=??5f;SMA%!t}zVYR6vTSB77pM5}kN+&+{?_j?9<TU`pZpAe`?H_r
zJ@0uxAHH&q>tFtz>dl&BBw0~XY#ma{3^5RNI)oe@aZKbEa*A58B#q%W@7!lT-{tO|
zyL{pkKZbW-)Y*1p$!uw2QEX=>q;o5EwPdHHI*&BnYjciTFp9-P5s#Qvlrv<iG*!y1
zXc40k5q#tEt<%{<aq2^9YgQ?reo|Q75{@rQoR6qg7n(Rmzs^}lTME;-hz9D0+WI`D
zGo_z*wcfCK`zF_}-BOyKXw1-gv}na*DKzz`YC%!vs&iqDl<zIyGby|p(@+}{mYgRO
zRLT_z%->hA2$?|S2O-2(V%t`a&FJiG#S3L-P2w`!!bRTZPqWPsYNOENh%v1kT6k8f
zEsn<M-A$Qi6)OOZ|E=-6!TGLIymjm*TP~HY+3vL|oG=Z9!XnW{;04W%un~fyAie!N
zi^WY*@io>@K}-w6<cf7nLE1!rF1u7WPmTjQj%q7}p8rZ&g_yij_KRYCQMqT@dt&!m
z3nQ6Wab=d(@~i4wUAPjp@sYG~m)VNc6Oy`pf|Px+&2*)DyBCTml_{gt_J*s!@kQu+
z7S6Nt;tQO5{>K>a-{E^Ne}^d1#50e1>Ej>a=)nWhsA5NB%xpsOp|D{aCsv?J9UaIb
zv~8=@;p~ZCXHFeS*G35Vwn3~RUW!^>oDOfm0ixp&k~1^knz)kY#|hCn)-nq&MOZM_
zT&|2EVGXi^P8N0Qs-iIA6*Q)&lFYs=C1uauKYqyB$DSsUSv#Ra@ZEx=J9o*cB?q9L
zhzL}aeLJ=k+WbTjlZuq;`I~E_OV@JFwsr)>J8~M80pLZM4B6Y+=~RQ`hwSyES)j^T
zH$*md9QFBHf$Pb6tpy<?CS9aK+%<@w>){wzq%^>+(ZpmHkg<V2>AxYuX2$)Kmd{?F
z^ZI6HX~Wi>$QCMMiZp<^A>_#V<cRL_<Ajv(phM<}A+K45Co-I@PPlyeGQaujzpJcT
zvuP;7FK?q_W%D|n&G6on)2NA+RdbC^`3-5q&Z5CZN2Z}~h4b&ZM3YB`hll*wPyHA_
z`}^PKH(z^&{-}qf#=^9n7V#w+X*(?<b~^n0EMdJ*clQP-C%1X&Q_s*J55yOr<H}F{
z6swzSd?Pfj#dpwdM&`3U5+1kd*^GxAxt8V58ScLQCLjHipJ8+NRi60Sj}cZ!xcfKY
z+!b{9+B{7`7-~zugxK;NrcT}zvFs4a32B7&y<@yz@bK^aL%#9rpXaw<yT-}xg3AX7
z+<UNM%mW)QbZtkM8`=i$jRW_H{FPt-BHz4ylYjls{Y#u$F1h;FHIgJ^h-^Y&G2dac
z9=Q6Q*SYq+H*5_gqIYTqB*K`CzIM`+^MH>>5E_!q)O*Z))znf#laO4BAi<D13!15^
z0XY}>dOIXj&bXwsos=~}NRsf*(Wa4$I~$&VI&%KmNOL}rcRef`m~~{|ft!JwBaIfH
zxYj6C4&03F<_I%{2Bgz9gEzEOspb_Jss_BZ5&}~i&f;)H6U0(cB&kWdD0NwM7CGzi
zhVA)eiK*bEBhmjrLX3Crkwm<7vbWKrGF4RPrphv;nA;cek+MvSXh}hxw$^V=+D)xj
z#882PtXkwLl-`oWRy0@9(ur=T3bEqBm87*Y5i9`|scML>*l@Va*>h*f;|VD?96xvn
zPDd4j6LQem(v$+TdB-pYHbc;vKl5ac#jyS8g!%qy?tb%iHWGMnf5q)z{v3b(H-DXP
z_xHFP)+#y_Gt1eWKX&&Xzx0v!;kuS^^nkEhlTR(-V3)l={7JZQnbdXW1u5iMRHz-x
zGY8zcc8fprXa7}RyV3LY?|g-qKJrn%@$%P5UBi_tm$-HF2A}%HKh58H?HfeY`!w0G
zV>Y|67$Y%@QsQE?AxI&NBOx|A$DT98(358ktK&7p(Gldp=`-g|ol~E&Gk?g!S}1;$
zkMt=j9byXck6<oo%3ZfyD~x7oo}_uAmlu1aso>d~N6phrvu}!Ws{K8v_{=u)noChv
zohDUFAoIxjRdgRyN}mv?LVc`FnDV)$;7<DWoXgB<CK3?utZ)RS$W5s21ma39CJM#1
zsOi@Rj3uxVv(evE-KRM@(irsXrGY1=Q(&I6S|FP`@(;oSqT~;`-18~k)YM{HPPJ(z
zRU}F0hZWV|B9%|@oF>t*HUDWdtX|(km4L;(hcrj2G27_m&P0oRBi7y5p;S%S&11Xd
z)@A0t(r`Vvtc`OI;I&9~+6bvs-cp=qvAI`gb9&Y^MXK!#N$PFWquwDlJDJ7-$x&w?
zlNQq<WyX+60di2ETlGMdx}GyrqobTJQ)yW!_mf4+_O$Rs4)_C8#9PhfG!-<?Z0MZZ
z+UmImh3$cm676%2r0i(MV{ZKZcX@dG9>e;GPyIXp4lzc){I~uV@nlV(1C8@Y<GJ|!
z)4cwhzsVRAn;h8;Bb%|;IG>?0*fv&>P_0hJIIzEeilHAccQDiX2nHHhv$lAfnu>0D
z_Rh|!F%IM$*y%dvUCXLBh}h;+NuxF(${NpG7Bz(!IVnUvB3h))VYPel7OPU$d2{a^
zV~o}?Pz##%rl)Ci+ynt}4G)eVve&PP$H&OfLl<q0N^N{uWm+2&+42vmPliX<jr}yL
zid7!yByOr}I@)Z%fCRFrR!+|2yB}lzF~5E^3skq#1T~qQzcba(ye8)!RXZ|IQkM+;
zpIPTdHyLe<&n+?vsS{jq(Axsd9mC4;-P;}i;D+br+cP%Onu3`grrJ*mwVN5U+7Lv`
zGH%_u&XZ3*Yl1eV6|IW2B4t*h1o4sO%yDwEA-Kk*{PmTyD*fgd5Er1#(xMw-K3lL^
zuSzLP<-KR&xDTox-nF>4V{`u)32RP0vBU?5+Z*^}|Kcz4_P=+7bw5^GV3P-A)@pS0
z4w_bJGr2=j!1aebxO$tjKlVKB{yuvzUSaW`XNj}~>DfJVo}a&Rz<ZxLP2YE{e8+?1
z6Y{2Ked8uK`Xih_#S5Q!frAgfpL^f9&GEI@SzNqK%tC7-Ix2>gs$9K(=53}&sc3Sl
z$s_^*0Du5VL_t&yuqXyOspE-|hLKs@arnw>y!>~5gLLYEKm133nlJtKcbJ_y&D(cR
z*zY=KPFRNx_gy9=$8$TUdF$jJV{G}t7hmT;_&@$<{LBC2{}Hdf`WDGK)?uV4GaPO>
z`QkU2Hw}`6<Y2TS(>sqyLx?A;So`1}E*etm6UpUD%N0{2cD~SZ%fXOq(voXg#FV)j
zze}<zB<exqYclMmWtIn?J0E%ReSzg;J?Rvr{Z427ni;e+rJVR3MCRHpm!(pB-5l{d
z2n$GFy@aKMtOcYNsZ%F7*X-Cj=QdAWMXo@RRsq#E*12wMZwf8AhMXI{7T4>~J68n}
zDD+nf9qL^UF(s+5HMYX|IZv3W_Mn--t=h@mLZz4z^g4o663vMPRJ-?uhNts9*+%6`
zV!BnpBqO>Nw9H~fIqfBv!XsCzU5Oe~^vHT1JDt;UDbqBG#d5*0x<iZ$`c1_7M(0Xh
zCPi&rjGK{muFaKx3`h)`_@&I4B4a<&4g;y}dHaoPEYIxm`@j2p{N*owox4pSc92|4
zmJUD53~kHr+_=Wa?w;q_7cVoubpyA%fYYZr^-q10c;+0*3#5p1ts*#RQ4}T7%sl_X
zzx8kOH~;&;&ma1wU*s^XxOw#^FTU^*Zap|;xje(!OAU9{N1Q)+Oc7oTF(5-s<|&Gb
z@nmf>xuoQ}m?ANaq!4h~addP{$P1b@=jxkpu`_R3EOu1UHUM#&KQ6^WMK%ixQxmSz
zTr1gESrF6W$d7!%*a#|gibL=Pb$5<xjZ`V31)Voqr7vh!5#6yQ(P1_-YCB(%q%t38
zey*jmenjcIB2Sj2mzrX$n_Kr$v70f7zo4R3M9frotL=vawql1X!g#3gQ#OGyr58FY
zEJbqw8<okaG=X9*;#^i5q842ipNx>Ah-FDeDHrjszNFl{ioogKtz#QD7dkyp)E=)i
z7F;PF_1Z$58Xd)gs#=;9q`Zh>c|R3}Qr^pKu#%gqE)!L9;`QEmfTZbGu`d#@#cGZg
z*TvXuIu1fb^qrxx7F1oi#*Fx;#zjRgMJ-9Z(R_VFHSx`=wNV0Y-7@2#v!^-MqEm8K
z`gev96S1TU*=fO*Cxi^j$F}*xN5@dNHe>O@+!j94Rg!YH>#nSP3SB>KTTL(_s*3hg
zbD5lyYEKAtE)x-abgbWelZV&uGWRX1^PGL@{UE|`|INS6kP>6cEWGE^(@(H}_7tn5
z6GG70?DaTsJOmXO%YozyW5BMb6oJ4nZ0hq%DJT+-Bc`!76{19)&9^fqCTLuvwoEw_
zi7Xo7;%r9Oc3caIp@=A<BBQ#F#p0N&O{-#;RT<TSYQy-gC|LK5_Dm9qmKN_kLyS81
zW^1Y+Mx66>u2scv9B*I0%?tZ8HaG6G_sr9zsDh!{nh4cu##^(>*)!jM<R$k=1{Ao)
zEG>(jmbbhCYrJU8O(_B+&IcM7@e9Ye-L(E8dHrY>sQIQ*I|QW~CQ}F<feo1rFlzFf
z?KWjyzS+$+_{0_2_2jlef{M0~9AlrkcCX>fZ!Y-qjXAeZoHgupwlim650Q)sFcfPO
z0i_)$HpeHlmoKXcJtlsW(pEP%RcgSgy<Jwvhpfke+_ZJ1wj}&1qw@h0G+@Pqcntvi
zy9cb-_l@GKUWR36ElTF*!wqL1KZB1w!+65Mg<U!UyejGKJ-)-A`ZIrwFMRelGy!fJ
z%1o`ENZLViLRA^-TF=>MFB9jxES|bT+TS6cIt965r^{Tpbc*J|O)kE8k-Re}#0^Py
z@O~t%ukhC2y~^A7k9h9qev*-fb1!_De0-1L@Q`MAp;U*G$W+uIPsv<e2UMDw9watr
zS%(9(o0hX<bLSy9zxEw|^3VMX3{i{jFZ}w;T>sV^95kMkJpH&Ki~+w`FpR?Bg~xVx
zdGln&-ux8nn~}f#SN?nM+`db4j-Ej5Iy&cRl0zaSQM5j$%yMV01Ek3@4jVRY1IImS
znA5v}j2RaaIi@-@RAz}Pt*4+xc>*B|IZkEtb!L;4zy)fuV?kJdzUPVk6)(LnGQV^J
zONY!I%sNuj=;1G-;!jRU(tw+xYKw*H+9{pO{AqREMj&f(N|_5{D%A4U0mAn^J1IrA
zw1%RVA~PW72%GUBCYU5MQ}XRV)_Qn54f@i&`^jS+eJ(Uw%Ty|epSNhV<#QGK8d4L=
z@<Wd(x|Y8yKU?U#h4My8q5zv=rGC}!pP&3L%dD#NwG7aLh4TI|i7S;oldZ_#`W;T_
zW?n^!`ZX~&4C6pFk&7le!Wb&**tXEjTKX{JEvYZXU4OEo&9J(02e)v1<K|8N%Bx@D
zaPIK4g?{E{$=vO6`I!s!x88v}_c&f1v$^wt7y^supJw-`f0FUkKGJoXsA660G$DCT
zTEs^ld+sp~&R^ih_uk;~3s<;w{t__bcYg1yeDudZ#HT*^9v)o%9(xC8aZ!~NGm54p
zYtE&p!Zd0<lhtw~XW|%$V?z#soLi1hM*7&$<t49v_f5{9J<T}uEM^N<{fg|JH3z6k
z(j&-e*~_`CE2;as;OK>%GqDlur)`$i+xt%@@kR?TU(>~5e#9n_<UOvDt;Ddb6)}+{
zdmmSlsEkUBQ_;8*bJKQ_YS%j<#nW07LlC6sDQuzqN@>@i@d&G+o<iB;N}d`pC9yB4
zL=mB(sL1Nq9jZB?U$T9u$}rk2bk6KEw5h9c)~uE!VUxE?CHw)n%#=V<MYXmUh`2}Y
zV>#r!n`Tda<5cX*?yt*`W4WK02#c;8Ckf|@YA?1fT`f=rF;(ELbtY<m;Uwc52dzV#
zTDOn{8e?2#A$!rn7HaXR6x?J|0@etz_jZMPzU4t%E?k7^d)+jLTF#7IXut-uWf#d|
zB!!VOU!0Sk!zm?#iJS!!Nh-)%wSdu!uFtr{-qIkM>Q^fu%V*I;j=}=SOl$=e?Wv!v
z($vcu-|iWfpV^*sEka#os3T_eFuwWCH;{g0I9$P*eX?sA@7?Ck8@Cv(01i3wkw5&0
zkhx>C8t8LijFCRTIwq1!NKA%;PJ25i2n1%cxvit}xfJlo*~If_jrJX4QJ>cw*@+?F
zKnw#dk;}WgJh`)k)N*URVJuO~v_q0)ZKh|jW_JX$Z7GcpF_nAIB=iWFm{Hju+URmV
z8)GGta@2xZAcjncJxvSi6d7?G9v|Tk_SoDzWDkKHNg*Qb)I8R7U#uZo`A>hap=d!p
z$3l528@c51=Bpf{#n}u#sxo@3S`M?_#SbFL{9}FnNEWD>TlUc%LPI9lIvEU+QOWC+
zB9cd2yc-}PN!8s5kvwGbN{GiD{T;{c>+qd7Jl}lXbNyb&kX!XKC07f8;=3spK=dWX
zh|#X|x?gkd+&Sx5Yv5KiSBoyk79AkN+0%R6yK{{Z6Kp`$P0ImO=>wKn<}yzkGrP;v
zTu+k-O2p%Hv_!)3o!4LELofbOc=#5lm#1*cGralow{Usn>6hL^PQue4dmqG=9cm_b
zoj_bB6;->^%4!^LZx`P+s;YT(4<~DID`e*aZfC)J-v1sx_m{rNt>>@Mp4*|DfpdvX
zf5PzAz~+8nxty_k=_x{7k><O&*^Jr#Ikl^K#2+$KWZl#fV+%C}YUL(1&axdf5`go_
z&XQ;UC;tp`<qGrI@z?(BpJVJI^dqAzxSyeI=gbIPIeU)R?%roT^qgrO`%TNe^$AaR
z7rB1tE?zt_j0`c+cHRQHni(lQP}j@Ba!18iM(8&^$u*4Y6LxKuS?L`{F)Wu|>0PBv
zElHU^n`DV#vSEqZV725*-1d8DPjcx-p1wS?IOxf<4a{~(-4fgq@hzAY2fhQ>;h2N7
zSyQ(JnUiH^YE7k$-e@72?Q7qM@;owwQv>++9xm(Oo~bmzV2Qa0)UP;8r%k1#D$`lK
zUWZ3#$fK>1cllN9CG18T8m)&;$$`mcL$RgqSG*|2nMjOx6UEV5)lR5rkt-B4TLaVg
zCU80$pyEYVD1goDObM(cRb-Z}7Jw6}D5I;Cv8{f;p)StWi%*&q?e4Z9iLe>SsVBt5
z7*+X^s1&-Cq84!ynJ<@YhQQ1<70J#BM<;8#uI0{+TfE}}zxw7kdC-BUfz<H$b5HQn
z$3M<jzVtOhUh~sG|55(LPyHl^-}*f^J01J){SZ4J`T%jcK)g1|s53f-Zb(LG6j5m!
zVhVitBOl;D{P+GJoH~DzgM%k|{@Ew_U;W!Zi@1n5$Agpmtag`p-&+H`nA01QhI$Cb
zq@SCl)<`KuatOpRg7@ShbGTaJ#+LiH?(@X^p5(!u8!YF0Y>w`4p<v&?8Ekf{P$<h@
zsDF@pwv?oh@B3Gn=yYDZp;c1FhYD@CD*L*U{1!!TD~4*aEVWQ@PSn%U8ekUuO-Z%P
zcVd<#hT2w?$kMkhMd6A++F)wEFC|(Sf>sKELZi>-eXW9Pww|HdKcwy0Z?Un=kcu^H
zQ)#>u!5Js2%IlPl?wsfhUZfTkxqg4S*QI!R*F%imdwb^PxyvI<Wm~uxZ?sr1j>b8>
zSWzk75dUa<qM)b~nyPcf>M-N|v`!{;w#=A1i}U3;YH+^Axkd>yWF$8@pLJX?Q~Lk4
zF<SMTt1q{!_FDz<byj#fSJth!V%;h5;0irpQiIM;P{QV@#YFjfilornIjRgdVunW7
z@E8K)I1<WCa)|Zw<y_QWbyjt|NhF5COD><uHnd>BCo5u~?R{KnHtLlXQL)-MDNnkN
zB$cD9Zhne)uOyp~wXqq&BjdoG*WN(-9zmEtehzT}Wk8z0j<5XvUt=6ql(F%iIBxjp
zFZ}{BZM3l#U>p-CA=9Uk<)X#!FS&K^E-``k4Izd4Ia1QXuCP;bQg%a(p|X5(vYBTq
zHXG3HlutT8o(C@NbUb@%k0(y=a<WM*9%Oo=uCJVxo}Odf_o5zqIG=0oSN^H@t2W{+
zpSD6#L7iyYRn{ah%e`#s^hYO0%-V*dRbZV0|8MsGJl3-<yYIt3dpPGe+_7%ms(SV6
z%|pM}BgrNyl1*_?OB5-Fk|j%m6eY3@TM?{Ch-3tagZvRAfe;um5*V@)AeLcUG9t$^
zHBb^w(Ud68WRvW<+5LKaW6f2!?)V$dIeV}CvDV(_S1mgbkk=s4tshza-n(^&-#ura
zz1RA#@38M+Yc``lSy)+VgRWFq0IJU|43{iLK9CDSZSpEL`_svHGd9he&x1JfvSU~!
z(%=|6$l0sO=4wSwJ*n%t^6EEcpZVQ-eM>f|!}?eYTkpsOn;iK%<#Zx7w=Ab#(KJ~h
zX(0Ct(z+v^_pBc!j_<?$+n!su=Dd9;a_?be)q9d;5<0n*T#&R}x8G85t9w~7Fwv>T
z(W84j`{c9qk}8Ex8wd@RF8p?V*fN(c%{aRGwjJK-@y)PIGsOzr4X!%@n#mN0qMS-y
zum&2XH+<rApXE>f!5<+^0%6_|j~3kd>`mfsWPSWeUis+n!TZ27-}ZjZoPsgN!0GoD
ziu0=I1WUj-g7iJU7xJS8t{;#gkhU-L+z-5uFZ`1yc=MOP#P;qUE_!@WrDf_m!hDM>
z?|FtWZE@=r$Uy1?J}UJZU)q3;a1K}Jx{tlY<ntpES_iN8_ZpAW>_qY`kd~*k_gB33
z$+zg8=kop`Z!dc$^Epk*?D(D_JumI;^VOp}w0_F<*%lYe6<>RJo2^Mh@QE&E0zwSY
z^iLLa?aW2cWtwRVIEK`*SluH#VX=J3)?|*8HG}usbf%GZHa0}=Un({#Vpq`CtcbV@
zMaD+9e2+2}n1sx1n!qO{CZvtH#w%rI6ep?v)U|;9aTf35Ce}c<K!cPx88a!v0G!S1
zzFB0!|40w%N^_d(J<~7JFm@s0vq!6+7JP$4U8S9<xsn8(g8A6I>wAp1>65<Is;t>4
zT{Zv#*(mlbtFY*XE7jM)oiQ@zamu3<5to#y09Kr!O>TKQojSPI{$k_yjrps3BgTOF
zjeq)5R2WUobXCFhP}{pRB=z)*HQ7zEKp$<kQ>I@g-4hWc_3Z9!are#<?W9qriPL@a
z`1FkJ%a{55&HH>|)p6Q7cBYp&*t^CL{=^^P)o=d*zxdmqAvEx(|J^^sul>V+%B_CM
zcm1h<gBWMHNg(;CQ;{JMy^y<3<0i@-u)IN?R%ns)@_U};|N5W)$E=3PWGk{95*<Qx
zLg+i@PaJZzJ%RPA#xUt+7bF?TWKGzR6J9KyA`c8hB=rgJGegQ8pRa+y`RRh)y<Luu
zZZq4Oz{7Du;AHdJC=(dU;Za!zMf2v2F;gd|kI~5}15}%0D)ly$CUnyv$&XE__g)Qg
z8jTfE8t*u5t*vCq3N=GB4I(WXcWX{JKvOkzGscabg_~5)vNzODt2BJxnJLwn4Xkk!
z;}p3}{>5TXHjV-PTl-KhpKG$2k=`pBf#Ei0*UsU6HhQPiOmb&>Z)=KO*?Wz3F#hOw
z9|WssoU8AU$8_d}*2~yr#u#*pTJvkYHLseBs__@)i}ZX5K>@5_6y6Y=QGERfYJ{K@
z&yA^&a}dH*|1Orb<qgC(msRn`2eUnAqlo}?`so~EPNu#ZF>Qb%!6NnS{rF(hQU|pL
zsFZav(?3R#XV8H|=~r|LuKQnFCz3_0<PA+fkAXj%fcGZ$Jv#f=KC<l5^}H&1(#3em
z){cLC$rNw}TfZAOmNCwxOzR3ONhM_3@HPym_c_0H7w;TPaqRBx5<UF#PybV8)ro3*
z+wBPl&t2o<_0OxzbuX;@%+czMmtJ~_JNF)Nx;SNje}{WVC-&TG?f{}DOWB@BtZ~m9
z*A#&vsWF?R#H5Kd&NGk+!P70)>_o@QmoD?-mCIb(YI(Tm@lo(Sgo;>KXvVIu)tZl8
zYGWvty%M*P^In*UdLC)w>Ns&#z{6RHfamn|oM;RJee^u@`JC1Hxn1Lg<Y2~xWfo!)
zTUmb=1t=?04evFVb>kc<5}i?tX5Y*QH4z+io#Y%yy(0~QzDso90Hg5V?bo+tgL-lF
z$Y>V9XkUXK#Gqy!V!EM0EwTGP(|0S<YM@&r7RNm&4?OqodT!s1JbEB3RxN$<3|R~J
zWbRvKidCsYP(zw^s+i5C*ABuYbnAs~=n1qKR99Lj1EmNN=ZsyE+25XV`*^{SJ<*0I
zX#<2rN|>^*D7zkQ8cH4P8iX2cP@1;Pt)nB}x_y`TJT)b6?{R+gfM;I0#&kO4{QQKQ
zpZ^LkefYyn{Y=qHD~z3JX<Fz}&H*7PYB-r_!|Ea~24dT)K42wCSTNmhc;eL;Nnd^w
z>3UKx1QN6j&GwWb2Hx{$e?NU%6P+V>4#_=kGAo@yEwanTrRG6WA9^)}DH9ueIj1!0
z!mmkboFJ@w#II?0FY)?6{5j%u&L97=@5K!P|MB19-o+_om^#?*J60~@a>j2>*pVr-
z#ey^G=(^0cY2Z91=Cf&ClbWJv6OlYH-P*!C%_!~YI9r{QB+{)iyNzyGDQkvdE}4CW
zI#RTb8x5cLit^SLNtEU-8yj(@l-81LgKCu2MZ}@^PAiqDj=0_<eufh>L2?b$KdsVh
zjj>Sk0swWhOWHUIR!x?T`RnW=HjB(s1r#nj<v#lHY6`WultP~&n8T!)!$Z`7?QcNw
z7`z`5+WOlREwCF4Xh)^xj#@sW+<9k?rt-K9poF8>>rrS*RFFbST!AkWX^(SOj(cah
zcv?VEj3%5K^XL?=P|ivYW_d&$s_$l`J>rPwOu4l^1L+wqR`h<Nu9V)|=OMGsJt+;k
z?xJJ1Jt4U?YGtkPTK5A<JU35zK6&~M?WH-PZSeC6SD(7d{=or<PhDZQw})F~o`2<e
z{@HK;CWqIjJp1AcOlC7u*IO}|m3mbgqcUnTt(G%MLufoX5A5$xk#@%5ge_?`&u?*x
zEYG=gIOEE9yq7nA<JXn-AlV8VixnxcGfwmUv^mvm)IRm(G!P_mF|4`h7sfJ4G(Iw%
zAF=HxwU^w~iswQ>Ee#cRuf>hUts9pBZZy^?AIs(Ymb1ZWL5drrJxXTjrUq5Dg+V80
zVD70KyToH(YeqFXnlkedeLm|Hvw|dk1BjBU39n7E#h#VS%W^F$#8W06cD4%R_EIOl
z)C;0tZeXu%5yn8U7&gnNtdmu5=5gNZw69EP#p9jIZNz(>JXYTay#|@AS>=U6^zLsl
z_I<?+3B{gOKT?@XjbV(8VCF%gY0PBEG{jCD(=Zgtft}ewb+p<TYEv45AHfWKoxJ0M
zeoyC3dsx<ny66@YrMj17aM5HP!&)uu%Gw(jcWo@ajc@DJ-5O&bOAS}F#04z%BN*#9
zsK8b`m35{ditR_am~9P|lT))l2Zk<_2VEz*8X^5poE-L$Rrb=xy%Ldet>QMmhp(TL
z0dm=k%NOgj?8n@nvFWz28-JGDC<RI&mFM*KO}dK}bcu^@V7gwjx^tJiUwV`5dQx<d
z90xb95~eLk!|I}EIXE6H7d-v^^L**cUt@5dA;ZPlf`fwt9zHtO^JeJOmA4p=Igmje
zy^CJfTLhjMC><PZn>6S`9JwEe((~%IL!Q07$CXPvOeUVrH#FHiGOW>cHIk>u1++HE
zDXW2<3<|>4y~ZMv9F;**<tk9`sl({&wG!6*b+KBpKbtei0!jM!A$t17lH9EknOfbO
zfjF`TorAEMNo_QArv|1H3Q%oqxH>B-&gUjk2nv{GZ%w$@0$+S2$(qL&+c(MUTe3m@
z<S#$c+SxUBsAZ#{Scci)NWGByp42;rKG7`)x;3mXU~!&VoWa?7V6ki%O8g6AR1ecA
zQ2RlBiLVDoaUYYcF0q(p_K*hV^KI1|=oi%@upogIO?LYEm-ZSiPA>><qD_dyv;q3?
zc{6d)L%BS5&H|A=O>9a1+OnIVM6`H|LHWgB`z7A{zxW9z9y&=(F7GgDCP)*wxHw~V
zan8=}u4Se<H5v#C0ZQRh6}LSzrEqPd;&C#x5jUTZU2zpoe8->ty?o|xKB9Yg3PE*^
zu?>X7DbM_Yk1)M@hy;zN@ZLc)uXGuMVXg5|5TG1*x;Kpn#QzwwT{nH(NOJJF*@Wzc
z$R5A^v%kttGvR$NT;j`jzQX_fkGzle%2S-4p0aago1;&i@t^(Of5L3OgNsu{9Bphk
z?Jw{N!a5)>;C)LfcR6bg8ySWy%%@vg@FvIE=^4v@z`4jGJ0@X9ptrDdNR}Il^Ocsl
z*=WM1bETO~*n0Aze6G>R#WqIgi~=Qe;#l;s(*@0FG$*}!4b5Oh!eH>C>ORG3jFE^^
z>5Dd`y4sO|^*H%(<2$ZQgOcM`Yt&k1Z+un;H{ci$NfaX!S1KY`)`Zfz2o9G$LT`Dj
z-=GH74yRHdKQ@Uplw4eC4DEh6D}-yDf>M1;bWG_vYaX2G<#bq}ZOCSZS}lSEy*IUv
z_T%SbIv&boHrGN@r??xMcm1K&1+g~Wc7rDK7G$9B)@0uknm}kHvi4P@)pb1qEvB2+
z6Q_}(?}=tg;}B>)M`x$RCh*k3HEy0g;M@OaKgOp%_a!br`7~d=^E$4}JbB|8KKq3)
zadLc%zx0>?5**zDr$$`9@#NIQP^e#xf{%l;F*Lsu4QEFn>2x#q7s&dIm<4CK)xm??
zcF=5-rc;V!AeSbrfF^=b*K~sFa<&Fl_fk^ECE=p)InP;hwI$)DC-)tbb~b9SDMm;o
zt9ax2D3Jw)mg;=Hw~wvnisb9xjO-mpP8P%AsIfcFFr-rCZhV_s$ZcxgEQiFjal&SX
zX2s0PdFS%u>z1rjx4O1P_pf5!gVXqg&7G{MG+b$v&D6+D(;nlv)_*CDx~tC%e*B&_
z`r6#_vn!KZn-JUeQ1{Z2G#IkCC`LEN&*cJtYLg}&Gdlc6yqr0@ZZ;w#&{KDLuRv3X
zM(s74cJNxPmwqk;N6uC=IAv1>k4;-`;u?ZZtxGB59NtBfQ#AVfI#*433q!@(-X!AG
zJsO+3lGT&aS>#P!Y{jgrIM){UR;km|0-U%IsmMjqo)-P##yO&OR{)cAAs}fmH}9f7
z)%ZTVnnrbfV%-g9kW|=b4*5F7p~MVT?P=ZTAEyPCMsggy=DPMP6T%+$3Kq!Y`@JiZ
z;W|llj~QcD<P&dos7iGnzWEMmxxhJLnFj8^eaxqR`e#X9M@l``qjT-XH3&k#IHg}D
z?jN7<<a?gyvtNE2=Np!bbB<SQE^^O<#eyq42b`Zi)N?B+NHIRM#^V-GFhFVO2SVF2
zbUoAHIhama_X8cUUM+a>aGzJM9rDE9oZYQ0aEYxZG7Swr4XTN^y(*^y>ZjV2XQN3=
zRtASNP})0J$^e3MG{GZDnS<G`qiR--@@pRzb5mb+Ytq&hDJ7<%Wga|D;~3T}WLN=v
z8@1QybYC^}l}`M1^@h_kqC`Qtu;IdNO>EVLTV0V001_Oo@%TaO=5Myww`7C*v0uI*
z#6o@bnvLv?hJ>hNUxq9RNh(}U#bi!2N}wNvAvwBKGP*>+hN5qA<K(m+u41|&YdZai
zek-c$SamD5w|13!RI(qPx~i%xXWk$SYS8tS-5JM^&dCyyWEpT9#Y(m;C^2Uk$%Vok
z5J_xr?Q{I-qFVe{r&kYA9KZ3I&-17M=#O({($PAP?3CD-%(R*D=&iSzfAIY#aMy+<
zTTWR)j4Ihwa;5e-r($^-2Iw8WZB#Ee2~4i<@tuF=Pw?>bw-~x5?Yw1rX@}|4hxki-
zq<*P6vq=b{*ne9yDy4ZUsN&{kKE7&wXMa-;fw6`!CP5BnQ(EV7-I6;eD-Nf-y!qCB
zj_#kqd-u3{xnXzGuz3B;eD(DO%RKPR?j=?NThkd+@A0W+kd6*v)=nuw-MJLPKGO}_
zbT*R~T!btfKe~%<mWiS3nWQB?4M>O}P7(b2{;g;nD+o3!6%dO?Ft@~#+vRO}>VJ;l
zir`obfd{7(c8_2-8*sZ$&mGGa^wJ}~!TG6OVx@5!f0)tg-`2he&K!5e^>&aAKx+rI
z-)M%$_f*hQZTL!dS_W5~w^tbLU@Qit2R8t{HLv<LV$)kELLR;W4XQogU%$x4*|4h@
zyrAMGCdhdVLXzrGII;q4`tL?<73LUObgpIUPhHO&<{4&C;=GKLYx|5sX7r}ta+PtQ
zZ+`51HzrfosHl;fr=3J34J=O2X?OPVE)#=ea41sjJl$%bZCiy9ykmD~o8t#3rXLdp
zZ3LF<HP2q&Bd%uLxO$x*`2!zCc6Ryl+i&0|6OJAn)1NK5`kwdk<A3N6@zRUWb2FU~
zCo>2W%@K8;Yz<Ta&|Gr3BxLFJIwe=1MJE>L>Mim}+f!v|NQRI}okqu`G^jxmRGPhJ
zCT7Gt)u5)tG7n5rOUz#LlyhR)5A?|fx%PSD2HIIWu6Jt`>ZVkwu`*7|Va)cF8eH@^
zKv9d`js35<xH_o0n3XoZVbD_0{c42dYQwPE2a02yhZc%vZS=~sO6@K+57R%k-<C=1
zIH4PxuCZV%n|1k+RA*|xF%20xk_J{9+i2Vuse7jP8`EZSBjwJ#3%oVq<75?l3MvT7
z$(+|cDW%%zraHN<7w6r7)rU|vMw3(^vH(@yoOfgJ#KxPB)J)L~3<%=MA(OLEgh)Pk
zf&pbd`0Dx_d>|Gotu?zir_nGad$jB|W#U(Ei!}|Pbluz~Yc|d3#>>X9SG2XJDJ>NA
zI+67l2ic;G#6a2&`gz?uT|I-wWtlYdlrfG^#OTJ^Gvvf-y=2u7bV-}NL9$M<ir%z>
zg|;)g&FQMtNos{vix)u_N``4^YDL`R_qWzgQX7?8V{JTl#YCrkz6mL9pYW!sU32v2
zO_B@<fx!tUx9;-#Km4f?L_!<8{mYkecutRQadzIZ_skP~>CKyn4|MA#i!`ttdTmAp
z9-Ka8GHE$qEQth@5NK`YodR{rUeo6(>yC<(fot;}E>Bx-t=ELybA4;Vho8C5Q-^!(
z?96FH0}_}EG@+b@&SVspXd$8?b1u63IuWu*M8OdsbebETCrDy{HYfF&<Hg08iHbIg
zVFM6%Rv2f2&K&1MfF@3qVL7ZxX*D{I=Zu7rsUs$XDsg_}^HA18)^n=d#){VaI$@WM
zEwxH=O4N*iivgGZbq?w`#p~iCg3D%tq@wF`m}pL}u9ECVnyxx*>Bp08C__bW%Cxd<
zIJwftYoYIK8c`SihL*@#<b}Zb#VJ>>Try!jhPYVMQHp?EHZ7fw_Ui?U)u1kTZqNw`
zg$y=M4)tZIkReXs*xB0S(eXPLQRb|mu&<NM#k%8P{Q7V5$N%t;5Vv<o7i*f}NINs8
zlR$Qf-1o?IrV~<%hAL?j<;M-f9A2t!fvi8nXF+<4ZfGMRwuH-pCw};S`k;njsAJY*
z(P^PAzEGu|x!i@XYYMC>uG`NzH66i&O{&gD1>_jf_O>MnWa;?GkNsZGU%Som>knBS
z4}5if%EQ~AqdQu$zIO{BclqA$elLISKm4=&<lp?Kyz#l$*+Ga-k+&E$RM`<BX?|zt
zHM?&B=IxXaCLn=c66dRv8bY01ruAMYP}o7>%<g`pCM?&xy1Rl+Jw4()`ja-NE}2}{
zW-0VMl3|@2Za&(kZ3mu+iCLQoL8+SrkYH;V?0HgyA}1d48xb;yHAbpGLpnP|OM_|%
zp#;VXa^qf<F;XoE25?%~LqF1M1f1(o7hj{z<cvs<L#J8{f~;XU%d8%G-fjQ87)Hzc
za)q7uHQ&^JS$&2}5m#t>7_E|AL+J~uTZ$+u3%~*#eXbLYP{v@+s8rUl=$E;g)mEEi
zDN`4nXcRYSL|r!vxa2C(WcR?2?RH_*`2dm!P9L1`^!1mC3{1Vyqq@Nm0;}_m_FzVd
zAKBfWb9C>BWPnf(VvO>{{_Ym*zGruT&I?an<KKDl-{k!Kg0l`@dFBP4I^5^_wOyPK
ztj>;c-7)Rt2H6#_bERWy0a~WodN1;jNYW9SwxZqoDslk8^!ehv5n!@9p8~_s*U1N>
z4b~t+=Q4QDdg$r1uo`+&PUJza?Q*?lnFodhBI+LM2XMKkZHp776*tu3f8EaN6t(Vi
zrIFrvIm{#vlXq<FaT&sf!BU~hR?VW_(D7%Md#`UmY9qV(HDhyIa(`<BUDyF$_v~y%
zy(^k%+jp`xhT3-&2E^mjBmH$pdfsCIi{E^oW&5@rEjUC*ldN2}cSa__INcb*8*T1K
znsnPlwkH!p_Q<klSgz>0j;t7iPRo<pEv~a;C0P8%yAOg@CWelAHnx><j#G5xMMJ7n
zQ`43f&9G6ky-x(MF((>56!9S{z1t$sg7X9lh~dWPMxe}1A7^`MHfGWMGGI&!ZQERu
zjde7d7?#PYg8zP`=^D`Lo!4lYpk222Lbd~J)8$-ywhdap?T2c1w60BL&bd1CYH8u<
zyPj^H8G1;4(lWv(td&;(n9fw{dIUyhMcFUw(Pf56_4D+3Dc?`?SHp$@9wXpkMwxYy
z*MG=kAJVr{=Es55t+=@J07;1<z(8Vs-gCNG=wQa0R6I<lGlC1;`rMb9Y|r@GqZ3Y7
ziK&k)^T7GACRs#U8$A7xS+16KJ*jp+SNudQHV3g9O)^vOa82Zyon3rNw3CL(`GOC>
z?`58U;xc<XbK<l`;etp=1H1Ev>AEA6S#5x%mEK*a*P|Jr^Ti-D<0CYvEAMs_xUw~)
zU-g_X&rOzGq(&RxL-biFmRjAq$A`eoJLa<)!&wJSW>_zolsA-n98H~`7O+eh_s-3-
zM)$Zel4^74UVe{ybG-rBQB2>jrgT0AQvZ!W!G3pN-;xdLkQM37hJc4=L|zMJtJq^3
z$BN?cR;Qvy<ISZ$gt{4RfY)|#NM&7Anv_>sx>#{oDuc!Hn5H?@38&MhWGvRpKgMZb
z@Scx<>UX$UCOm;8D@4@Y)qWDb=p4+PMCFxzooLKv+q%(934<<!$g!>cnP2!AAN|1}
z;OgZG(P!G78L~Tplo>A8EKg2owx+r{m8QEi%B5Jb3ss5<)rSBs*2oX>0b($XTB8(U
zQ8NuNn%>}$zQe~B$%B;_N=qIK6hqJ-8<B1*lg5HXHa<FT>-M{Qr)C+&NNVW#-hb!E
z`ON?4SGf1d&$7R}hdaHXzkdN4<~x@;Iy>g0f8i&{dksJIW8Z_jb;|F&@fI-)vuT6K
zfN$*mc~`uvNGai+V>+EzBaUv}u^u|2fJDzK!;C3Dul3imUar3X3NmZ}s$hq|k?)-8
zFc}ytQcyi8b+UmW!GjANEj{^e%lRN&U9Y&b%1o|i{9cdS@!)!(w?*9}sGG7EDmrP%
zh(0O4xYKE#m)^ThCm43G^}UqhyEL9g14QiEu>4aQ5K35ePiBR4OORub3mgkvUP9j?
ztB&D3(LKzZ-)(q!7cTwaySvAP5H_CF;s`j#!02++Ns>1wV!uJZ_IB`wqQNRo)RdgY
z5p}O-DWU~Ts3@4Ov0TwIquf)LzfpCqDANh7uT7Ae-j-Qz(Z;y?ER7~m#ZU%(qloZ>
zdyja}m0e;Z7Hbg@GQl}kXG`|>cXc}DJ(G#2oi=oRvTLD0g)~Bpk^R+-<G$zkb6?`=
z@B1*%z5FZ^pc{00<x--1bRT*2fG3}Q6>*ILUkb?Jzz1`67Qr`ahLh7!qhLaIxDeHh
z1*3R5(_LHfpRFm1-s{ve>qC~UPs~HM35wpsJ}0_Wk8_Ds2RwnU@94-3PDn1(q)Z+f
z9FBG}Bio+i^Jw-{Z>ig8MNLmzVkh<aFVD+JG#o3Ua(al`u`;OPQE~0H{aBYsF%K%|
zmSuuc8U15U>&D=x)OqAlzg!yE&DW;ZwE#3Z<+?6w6hzge7gIoeqAFEVha+{eTQ@=b
z4g5yUR;Cb=H(AV1nNelGa|#X>O)pqPj7{J!@6EZkyTfFF;rN_^6&(t$4lc9K!r<Y;
z!P($A6~_Xhe;ht|mtMua)Ym~}{ryX$=GApK22U&+QD021sQ(NOaugg5xi96o=}f&E
zW0g$NI-#}6XE}H4`YYyaB+Ybm;Ei(XHZ$;wxu16ydsUu0XU33>1%9DRhQ}M$LjBdX
zVrTkBJvI6s6pg1*!E@3y2BB)lCFR<v=A0_Mx9<lAt7f!tEbFB(<fNXrrO_Ds)B@tA
z3LckdA!nQ3mupf670v`V<}?mvGHA5X&4#Q_Uq`^DBGrXz>tI+cSREa!KS>Y<C-nUq
za@OPkG&)HH@~Y$E=YEr~eC&0Wso~-Ik|eNpp4Gb3_g#QhlOLpEpbd?IQMsN)+1Oph
zfqBT8#z*LTp4r>w`c})$lT(JY=7sB5dF9zBINaZ%ZCWIFBqx${wAu5)m!4tg!3A&J
zdBClUh5DG7^R${gDi|*n<nZI3nG-?O%+Ke<)xCYL%_npr^6)Iut=8tKZ!}*<Ct%nZ
zUI~yh2lE*|WV)1S8ev$hnVSAyQc}rjsm1Ju7qCbuG99b$3t*En&gMC_{@jr3_yXb>
zTT}Nh9af}q<Nl_4eM>f|DG!h_XSM-SsDkEPD7cbp@Nk_(*+HdLBn=AA#gIiusTGx`
zHier)czYEpmiPKFh%PkeJwxuQzQ;&@6q{-1Mxz_brsNvF_}Xm-7f6GUeX6c$MF~(h
zFd6l5rSEmY5kf<B6C^M7^X-!qJy!Jd;l(*W^K-w#pZzmG&SbZznYH-Ygvr*F_1T)U
zw{J5nmQ40`&<3P%?}aKFs_4I~haAo_(2Q5+yhHj7v#EApk}JE45Gw*jh@<HM=rq=S
z-4tPS*Dm)_|6=@<@-Mb2ILWoMQ^eBlQSgecPXhVUz_<Uo-^XvP9?`vhm)+SMSq&tY
zS@dgWhcm8x=qb9>yX-%CiOcVOj+?*s3&eIt%t}u&A6siq)xkGsXgsv-gd~|Z3CG6|
zaY+NIv(HGEx#l`t8qCgLk?pDpE7yv0zpbbMy_e;h+r-mv9Bw5eEN?)u;<RmD4UVIw
zV|Bmf_@d$9fpGb{u=})QdUYTkEHoRniAeMiTEz8;G^n$#7{ylI?#eTudv=-Fm#d;(
zw{x4+?4*Z}IghH2RX{pxh&!MIR)|~Rcx27#DXzajx&_j$Nasts<AIYqnWI~gqdN`9
zr<o7F%X>@+P(`McXpK@Z8wBE^hPXIuq}-T=7pxia)-;VeJu}qGV%N1yu%zl%$1ze#
zf%H87aJa|T{$^uP8faIuC8d6TWt5Z#qD(n*Hf@_u$&VhLGTCZrqbEl*o3I1){QQE0
zt|OoTk(bQg?l$k-eZW>bwe{%rd17E|Yll|g&Oi7VPwZW#dwC5roBXYMq+hW;nb17_
zEZN!f>c_R?%xr9I${j+)#f<bTQnyyKm|#X;<=L<CRCcYh(+Adq*K?Mt1(KX8^Xin<
z^l6n8FquzUE!3PRi({QLT^>kYkN|xTt5wp5I(j@BZ|0pkeUFrIbEIVxXwz5TIB%)}
zNWF6-Xjt|(=OUEm!5njo%jwvxj`w&pm@1%=?w-!#N5+pmw)8d{k+P<Ydij{Ci<R)n
zirK<pqco=F8&dZ;_4X>i&j><pe(uI++m9Qcm9d)FCQh<ct(mwDy{R{Cz7v|@*p8md
z($Ne(-LN3Brgw?K0EgaXdY74cV^KKN!E|_+H<<e1ZoZ^mH(w189ZfX%UUSk7K3Z&y
zuX_doTIU%==(E<q-erQ*?9>WO6m7g?!}WRN9o0B3w8`8Ae85b!G>5oGew2D=W6j}8
zgWy$@SN5jTAbX|xmS@1p2C!V$j;-TR8r*=#n$eup@i$}9Zn?(%<t%h5({&xIJ}FZ!
z+tjuAi#Qo6nV@Hp3X&;v+S<DoGsaR1RrI+0s@lk@W3iqH$|fICX0iIR&I~+N=20bL
zn|nG1eAic6u94-MzF*UB?I|0~*;;oF37YAf$jEZwpZ)d!hQ%Uq^Wv0sN`%<Z4;||?
zV4^XCLp9<u&?lh@5Q`br2FpTaD|#=I2;1$HFkJA$-WKzaS=+h&o@ZX-%H<uV(+S=;
z3f?D=myDkUUV8pI`<GU9{fgV?r}!Le<LEXX^pWas=2~_}H7*cbpz#AwUEXKcL!Pwk
zJ!&{vtcbqEs%aL6H(HRErXC3jcTAl}WX+Hw{os)0fGjgUPLLrRfGl-SRGiao?9Ik*
zGy^$T2T?*AM1hmXrt#yt_FkC}c&X07`cL;SZwKE5uW!u;RdQv4<_x*$9*S<%7L??Y
zFR2Ad&6ab8+Tm*PV(NzaGahS63&l#%BBZz&iqV+7>KlSMQg7DaX473Z?orb)f}|P8
zkTMq+itag4Jq4CyD!EKzl;Ysr#_uhTE_KZO1n&a#`8F443rZY~v*|%KJjn2Kzw~ST
z=tsYo8#i`ncen7n+xS__WKVFpBdwQY*->pv*<cMmFX&N4!|ghhMpe8zB9<abhNR|E
zX+UDLfudp|_I_hC0vo$9?)(L*bsPV^`Ep|?2YLr3lU5xeG`J?(=D%TA-<UB&7W!tv
zcm1g!;q71g6gNNnMN%4=?9aG*{dr#e!SBZRf%U1w#|eM?r~W?DG_>9ky*YB&A)3h~
zX~?<5;|Z=IwiA+}fG1~<l(H#87SBoshQ^#D{S+sj#!qZYR{)6d6st*=&Wa^h4hw5?
ztbr<pnUhS3-`HHoz34`mJE2dGvvuIXiDUnf=irHsz3YzI-jcWlZhK9b3`pCN{SMAg
z$*#ls29b{9@atSeB-TP@j0N%=Yo$C_^{6%Qr^GFFI8=iwashG<X+a)Nk$wT)lKyN#
zzZ_US8aR8HIl32kbUSkPNLVf%>u*5u*hB*bfMSm3H=cuHPJ{PWTzP9=MI8j|dE+*k
zR_gwfv%30?>v+Q<xJ;8a*a3Abs%!=x`x!`a@YHKv*mS;3Gm92xQ&(n9pMbI-P`XiC
z_jEH)oX(kSZLxM|_*O&j5l7ec^xdE?o<<be-JZg2HH)q$C(g5!j_oba!AZyABr<>N
zF3rvq*~x^R3B(C*>rjD1A4Z*z3ZV!_Yaf@>IBAPbLmt+oVNigJ@tWzK@KlO;sr^MK
zeR(iiAYyL4LXxCfM9Czlf$o!u<JygDERRm~nM<(l61`FCb$b%rs)MsNy)Okzu9MG7
z>y?p-;4Gp@DPC%7W<dt}-t*Xy7BFOVl`nIp$3~%*5?3@=dW#F=NB6$6Fr4k{)(+KT
zv`kf9d5#M$ypB;fnn}u_OFt(ibD1v88ZZxrvUbOvz?Ze3i;Q7Idp)jdTc1+jZv`zj
z&Vf29t&=BIa$<B#e90IVwkDDN3Anyv83%?axTqRVpAsPn(I~TnhjoJGAavmyP-@x5
zkm9_(5o=WeG3N*&s=1MOM#J1#cXm#Opa7C&U6;v$k2bM%BYnCwLFGD+v|Cr*<BR-A
zvV#1<D=k=y^0L|bIvw=Z`4*6^tP|ZIqu!4aD`U;6m=%{VP3p#(Rah$7kAQ3$rAr?4
zA<RlW7H_1`Lfb`1N$A&!)oP&Y28KS7lWOBDC{;kK%C@66m5=EN3nVV(+T`-&)%RPX
zj<RHpK+D<uDs4|LlUDuZ^d6LTT}s#b<d;Wx1F*`Fdl>pe-)F8~Jw$Ha$GceN4+SjG
zF>ved5y$V`r_X`&oET)F33HaK6<LzLztC6>W_zQ}O==KY<ydD^g9Ic)lEk#}%tGMW
zrCnH^(~EF*d!Og7UtvC*5}Qz+pv8Oj`ELfcgCoSq{!T=l0Y)>X<i4&wFU_D+aKwqH
z^%FdatGnA=-I>$K06B1Fd(O>ACu#$@>2qPx*;zK&8!2-z+hVoqp>agl&>|$$Qi~al
z)^vmorcvGZN=+%ncyQ3O(f0V9wAZM6`8aW?-$u$nC+jo%FCj(WB(HDD232I#8}#Zc
zzVUb1MARZV#E$b#T3yaH%P&hQ^o_F)<L1S`+!j?jh%1{};kxT4(-o~l`Pw?M*!<dD
zH_5&iy0wNZ<4r#?=oHmukA$p|H%07Cwn>0v7?${^Ap?8cmsy@aq%_iEu8WksL&==2
z*8I$W|Ihh%|Kgv(Pp8P%6lOELK)ifFcmF;vdh7Lyc2?haK`gbvGMY_s+@k{C)viDE
zgSr9hxCrckcR6pEf?x%cw?O2_ZnA!UX<)}x?();V?xokHf7FqiOta^qRTCobJ=uc5
zWwpYmOp_tcJx~AO2RV4*NmfV4>^*gf=Gp<`Ba#zaTQ~Rzf9ogt+R-UZGo=kt*Pa!e
z&U<8lA+L2~6i3@m2reQ5g9yvjx$1#Tr@L^TFJ7$Zg2NA)7=?B|8H<P7v$%S#H+goh
z7Maep*13W*&fAnGXB@%4rk=J6?BoFmq#pVU&-puWbTnn_?ZD3dz}Dr&Y=6mYYt3Y5
ziJP6`+7{o;AhZxCc;9Ms?OP-?_On85gldr~qiZaMt>83Q5gDLRdixcbHS`Now;-)n
z4CgDlWoC7};_P(b;wW?eP&hqtT%3BkmF9_Mk?OkoZoZmO3}D<?<ZmFluE6PKf?+x-
z?~K-4o25MJy|8D|nt(D5D#7l)7$%gWziJOgqm80<C;I9p=*>t&7=wD$#Kv@L-XfWt
zYKX)Nq%y}FpD%}($ntc>Q{VOqpZ?{Kv+Z`6v>h>YbOe=4Cs;35#H~q*RV8>h*xlyg
z*@9VYb&_X#^$RcT?QAO*_MHd#4?G9kJGlAOrYmEf>0?PD_S`yiX55^>sR3G`naznQ
z;eCUMubFwpPz5uja-tMKS#{*DBMk%DqDQjm9-DGzKp1jj$hy}a9&WLFX-;>1h8Gwl
zb1`%b&Kos2;Uv(AqfL(0@yc2|F|cSPSr-;fJqt=<3ALa#ndkWSspJ(rHZ3WWux$H~
zQR65Lo=rKOSH?nAIsh>iW`i4zldLfrC#VI0DSPw?Qo7P`v(ZqNuMi;(iGqKO``HNl
z)`?pI5v0T~`RayUn)$Imkg>@ve8)Op8P6W^#Yjm%C)=~>e1>d*krQ%1;Mzb85E~&6
zo{%+TIC{~7KRdE`rXsi`4CLBmzpF2i>W;q=RA;+Ri3HO9#G}oAjjpPA@u>IIYoTC9
zaGn@rl^B%!S~T?Dd7_oqb)DJ8REdTWVqLU`ab)UXnWoNxWHtVgGHG+leyB3b0$RIq
zisa1n$ocU;E8R2KwI&8&yBKjz11^w;0fvD*^hnBPj%9$8O;B?d`aZGl2bNvWdPwHr
zJ0@mo-zD|>DebdJRz^)~0m<wCx&pS2gAJwmGRhuHq*7@>$JM;?EKzfYtFhzeJfwP-
z6i`d|?k*)=8D43q(fg_?4tSE!WIT87o{)Q?%e_X~WpE<8=V`%<Z=%Vo>};{;H%IkN
z@CJ$}+~_(liGYV;;QFO~<`LG?(5)TMKYN+I{W;Af5?rH=t_Pyij9T#!J<$w>Y%3v&
zu>cBhzw9w~JxZiZ41vZ6T({z>OHZ=Zc&oD<hkIKz?+CpJA?q2a&e!%Vi2-~WBz0^}
zcR9H@CrM_uGeP=<lZf*XLas4JeuEiUX6jX+UjRrgZE6GTY$m*+$}vo>oLdGOA@*$@
z5f3?#<(sda`{w(N{uN&9byrp{!!TH*WTW)b5czDyoar{LSjm#i<xMxVRH6~g7CqOg
zkN)rD^j@h`#Z9YeDVd>W8j_L~;|zu@f=eaZ!5U-{Es(MnM9!s(u4U=4j$`bQALH}s
z#=Kshu`}PnInR8ygAXlvSeC)aWArJHBe~4~@EafJBj5KOeCH2+kl+G2HrlYwCd4a;
z5E?}(T`rfg(i+7IBwu+k8)gA+Lw|xf&lMx31mcpRTWXd>@EV#vY);u8yYf`=e5G`i
z*#VUBc)cWcw0lopCRPv6*_*Zyqf#l#bV<B6O?|IfTWv!Ok@+)+Y&~-s8I+yjrN{M&
zEImK<Grz#3X;o9^9V%VZ`xl)z$4`)yXkuj2PHZw9Sfv$-j^MoN(i{w-;Wt-3Lkt9<
zi4GSV`jl-=S@S6hNKBfj2f6iob&@bz!Rd;qdNTdl1tr;}Oo?|$)ZEh?JiT+|<VY8u
z#oBRv8fovsWSW`H2c|n0H1m#TmTBfQnpwb4TYS^tn+c(9AvE|9Y9PB78)0J-oa#ZV
zcgvhpvjP}tJ<zW^QrFR+cdRcu)~5sAS<mVsv0SWKTy(51gnl{D_v*-<2BUhp$MC6l
z?bS9mLA7US6VWYGQCIaXV{u)vC<>Bm6H!K%hGaE584Y86otTWfPiY*m$%Y=3fl(La
z{Yb+SUz4foBx=lI)q*eALy@YpgwmTjbF<AHKfJ|9e&i4GAO7Y4h>w2v_wc@*OE`C~
z&yvrC<XJCP%*+I%K-LGBce!_d#!#9Yr#ggX9_X8iPAeDZ$o*sdb<15Y>v*$3%yyWI
z!^fyD;kFNGkzmz^4+hv6Xk>eY)arc`xz>U@cWcsWiKIlvRe&;!QsGmE&a|!Gdv<o`
zcm`T?%pLlki*9X_5SysSOklu!VR`nD#(wwOsBZlH+Emr)nXa2c0;;k#yC-TKFLkOE
zGDI9hndUomnjh?Y+r1xiQ1u7-(g;<`oPGXAVf<Lb$*AKWC!8B}-SW2ui$Vdg;y^yq
zic3>q-z%5Dm%46Tkqh|o`_&%4#IS6zDl5CP{7)UdN%<1pGtGrm2Ob{sz-qOErooF4
zgJ-DpS1DO7N{dYzxv}nqvox5x|J{CVOz2AM<CKX}vett+?FLu0#u_<NY+FiAQuY>a
z)MV$QQub>zn;*N^-hhdkKk3j&j|waln*69w_1HR;k+!%Q#o=6u<}jM5Y4bOM?2+<Y
z@CNg!DCfV6<MhsZZCpdtnkjXt<I$j#Vu6%0Y0y7IPW5b9uX?)mKtBYMO?~T>l+tiP
zP0AE=0p4g{#{oy7`xb}U(Ro@7m>wB<Bg>`SS7h^R-S|H28feT?9v?zK{(Swh4nia`
zsFYzEi7~QRFWKJP<Mh@&g3mb196lQoo`#;m^rjxNHtK_9BpTP%OwAMkw0+Qt(Zltb
z5X9;my+^?}kOyuY>=B10D<%x9o~LgdGM`6ttuKe5ug|{Vpox0_hF<lolFBY*rf25o
zVAnU;IqONxoTrgYbcyTxbGBOZ;=pmZzr#ER&Wag`-J5de7G^@k(Z)#5%HB$;r`>5e
zd3XwKu$nX==4z)C)lk=nYX5z$-N$p@J+^t4>8osPSY;~hqUplx8d7jAxIip`{x{L<
zTe3l2b)5?8afY7t7BjOM&ypz)Z>7=G13Vk@H9XcVx*BaVnkm(y+J>Oz07Qy@LA4M`
zdYqvj^T~{q)kww{vjLmvYIEx?(-LqRp6z@DQEP0n(05(6a(7Nl-fwi`4L#Lrc|n$h
zcaf%Pn6_Ii%-K%N@?ED1g$R^6PaXf&fAy0*|DGFMyLL!kF3Cw7c)z_x>Q$@j2Xjl6
zjNxXr-<8sg;;KCL@u{Yis$g`cbpZ)p9b&U)6YBnCl%-ryDR23p8}|0avax=k6hGxd
z_V>}#Z0&G*^Dg_(UZrMN`H?8;@Z{`qequRXJuZ4u5~3t>uK^|@IllOr*SLLpP7@}?
z00~d$hPpnSclO=_0Z(*+*fjJx(-Jv3d4v}TqS>8ZGGQ{Ma{(_I@tLjtJ$%$?3a=v(
ziXoEPb*gKx4kaTnXhY}{J``=Lf(F5d`diAz;aRAEA!u>nnur9&U2$0R>=_2t(e`V&
zIEQcqK4zLmXs2tM*5le0zV*0Cgb1MlAA%J;MxQsASzQ(-pH3&YIScfVk|V8q(z>T#
z4Rnh{zv$`LLbn>o$uV?2!!RJpF(ee>HN8(3Yq&Rxfp_yY4Su7DDA6Epyq@Jh8SAOi
zfH-l+%CJfJhIYK*3qpxeD6}}n=C%}kw%UCtx?+7*&2j;4oHe~Q6IUv&&xcaL>(5bN
zAg7eBYWpk{H^J3PSoRA}kIvZL+2+e%eT(n-!K=(4HRNR`)6-;6zZ%Fxs`Mmrnzwkc
zv%}HFifQmRT~NwwZZon@IM<UOJt8a)k!ezd-G}jgV<4UsXV+lTW~Bl;lWUasU9#ya
zQoccBtUE@LrDuWOrZ72^iHIcgGBC-7pldA)UIuoyXRI&InZ!tRnXX@R-t{)Y#=4;l
z{Zb_oxx~zjQ%J*&*cir-1-oT+W~o5EokCOwOV#k(BJ-%pNn=t8AEW~PO7W#Oh?<uf
zY7t%EPnp_ntf6s#WxT#d!>o)J>T(*NyZXB@J|FJue$PVB_2qqxG<ICkMHXss`I=JX
zZ8pSilN7S?zF|D1%7ns9$gDv15?ObN&VF}w9rma`K4>nt3Wc@UALrZjxqsL9nA#ZD
z=iN%zZ1*TQybm=FMmLCTpw2jzs~S-mZ)NaK8`T018p7$SW`2|_<Ye^NobaKwW*%i>
zd3%4pfJj!rTSjU5C2DhR4XH|NiW{r1_ol|IIHx(PoApFpE9NE~B1CmlP5lb2L6tsf
zGnfaG2wgw0>a)tda;94+x<1nnLdu$vsb^uXULK_|%O#@{d2Ef>rnIcN^52!F=_+tj
zp3TAv5W9Bed+MWP|L%Mx2)m83ywp8<!v?a@OB2M|44Jmba;^6zI4(W0&tLd|{0;t-
zKmS*G<EvjKNMP`urj2ynphK9P@J+N#cI}jW@KxhnV--WFjg^9?W>_g6kz`ZUOb89b
zu;6e$AprN+7tAIT_V#z_vYILeui3i#>)TIo25k61!ZT!*D``Zx)wfCT(OE;6Yt&kl
z%yyjMQsQuH%H_Qolco%FgzZ_&_N3u_q4GMgCfjuA!8sc!I3}@WwO*1uEK-M$9)c%q
zO_4Y;t!Xp>Wy8eFfE+1%i!TO;Hkh&r`X;lZUSL~K<$3q9(D7B$;_dl#!@q7&zbRhM
zTd*^-F=;F(rkc5o@eG>>>*zA52X<07NAF2xBPr(q<*aiC@R#<+j;zheYH3J)V0Zr-
z!_d*RO{Kc%WY$o+3|Nws_X5ciVv8r)Q3-t-Ow%g%bE$`Vog#{6uI8a9G%X&_{@ztC
zRwwG0&*K5o1^Ld&@jG`P@}K?n|C)dQul*HvuOH}j5cB{OZA?YNSYX*`#!1l&`N!&+
zvH@3n1#>>u#@|pfGZ>K~98TG0p~c<KCP0+4BR^g=ZEogv<tt6nZt#F-a%r2%)=V2a
z#4_>ptGw5y#`$2^NE-=4Aod-u(uN#9d4Ba5KS6da$uvp9dyQGJsfI@<qd6zMK-21B
znsdTMI6l2+W-n?Glnt3q+5&?lE+1Y}WZVa(qk&m67UPq!p&!B~mAYvvk+)1!$Ll4f
zk)i}x+a<KmEtDqlnd}3>qJB!=SAj!vFSwK;dGY|cRl^GJ%-<;_i>paiglGjW4*HbI
zxz{+5<dCe|7fDKs8U{z|28MN~6s!)0exUD@qT<;buww-jmNSOMO;j0(jK%7^_-aEz
za>towDOO#9M4cajiZU@3+eESzKb00V*7Ulsl&7WSI2N54+Jt!jSnVB3A!H9obrUWI
zF{OvFx8}Xk@Ct>}o>8R>8&K!<5cV$DgEZ&Fv)}Xk`KSNgPjcn(GLIfz5D)g5+&gEv
zys+X9q!+s7z+|q|a;2RLR}S{Le|koaGm67u7RUX5#pSIn7N-~ZJCA6V!)Tvtj09(h
zeRV~o>|ORVtih>ZV~Ol3f9J%Wt6Y&WR|=$*ajTBJTr(`zTFjCeQc_y4UK)m+>9e|2
zBboiZT^=1hU}v&Tz|&_~*StQZH%@8Ey-o(AWTX6zG_Eq`u1#w-gxNULD!4l8u1XYc
ziQI8@$JF~9tWd4yS|Ly=o||qg&h|O)Y?@UjTaQmWb??|5sbal^KKHIR9OWH5##O9M
zef=A^`I|9<m8JYIlC5l7Tpn+1?Ml=+u5A8=t54AeN;HBy)g|AJ*~3_~xlY2v^EH#a
zM!Lj$of!I&-YrQ1>K<6RL^}!WNJCnmzx$g^*)}#N)3xR>YQAPa+M2foQ9v;Lm}&zh
zl>27USH(oBfb&_5mYYXrg*TrL$&d$}i-h2z#LZaTRW-BnUJbLHt2}&XQ?7b%o#1?<
zb&`R%HIKY_P+-g|3Faj1Z9<w5i{}#OtU+zb!<yW8_?*<aJE==AGLZUA-wkxV%A$sD
zVBHC;F46a?y86~7<MmuCP1C^>hs?JwaqreGPL3WzGKyz@Y}wRRUM69szAOfoh4MSj
zTgSSqo*@N5vng#cG)z?zR;Me*tSEcFBhFi9hr2vFd8iG!6F&M!e?RWvDu48^{(Jo2
z{_?-iko36@;&6FT5Zn%hG$eESRTC$l8Q;G{r`jpe=bol%YlD_h{jSy|dfm1{V0+^6
zIdXb_gyhWKd$(DfH_%5Sj!Bp?-JUbqZfIt$8hoL@?2tnB&Jf?=lTI{)GZ_QwDjys;
z5nAtQePG$0arM%5w&zoVSE*RenaR|%w>9J5#hLAmBfR7p2hzdo@6oRlDMrL4LaQKs
z+}T26t9wX{I4qxcV{a^bbKTcWLg6In=Zj8#HzTRWF2O*u=!{Y5kfiU>WvfAB^X~64
zzss+0$p$sF9%qY_;PH*C6HuM<d23RuQv;O?XsB!;=q6;e79aeG;<;j~F)lRk#))b<
z2q|{mmid;Gu)lMOhY#Q3$s12v6rp;GdGE~mP7nz-$1Vg%+fLO~2SHpQ4Ly<vZKiT#
zh2#cABj8*nr-43o#CC>=V{5*L_mP~2T9E3r#AswL+vX?y{Kvn*<gfj|_;Y{p-{RVP
zZs2`{i#6TplHp>(*0pPZX3Uax0$3)f1>JL5kZ92-_VY@mP)e0rhzuDThnL<cd-X!v
zhHVtrT36Y<%VQB$k#`t30dZAh=0!*+XQhU%pg__3WXZT>4xvt9-4g`{0Y8X^g-2YU
zkVW8guYHxq2hGrO;6<sasT9>DqVwduqG=|?7|9u;bM!+;FCEizz~cxRT2XW_0XzFU
zOlH$EtkAbv!oLd@$o?XGA%~z-b{}z0E&huGUd>0{DBA0U+eR=sXWXFI)cdT{j5O5d
z)OkqG5c`7sxgJRopM-u8T<|*dir_3uwdCpfpoLR%j$s%Wx`emLA<3!}8hX_UrNNQ<
zOd1BwA54xRCvu-jX;95DZi6<Z1$xy!OL6r4#_TbZ7;5B5rR=ydm|e$R23M7W-QG*i
zsUA%F%-C^h3$wWX)`HX9e@iLeN`tyFQiD;?7fRu2_t1@CjV{e>1#W4Yty#+jm9}Tb
zz_^S}TC)(gdfM5XNB56urY(JlJa}}(_WmyY$(la(3{kZ|xy$6K>atzXVzUW}tCx0o
z_-IKR0|CdH%t@bF%!Emw2yZ=Le)|LtcXdCGZrndggCT|TXurEy`Cg_L<@ZAOZ<h_t
z@G|ZlBIGiCbGq(j=t!%J4eggXd!qZpkTboY)UJ@&-P`5PqT|YbLqB9La-t^~4NmEE
zGdx4LW-xlPmo-9uygA)OQ8uULwpkX2m^Wj)%D`g;HtOe;tjhux1#_1!E|&I*<)5I&
zUAZ)nW!ef=vs(Y6{BvU(trsef*U=;hBdu-JT#pU!#x$U6>Be)RikCfOvsKpd#vGzH
z&YSq`W8bNGENnEf<$D$8P}ZU~NTzj}ljWK)Bv_jqM?m_F*z`v6Q^(K?t#j;8Bi^;|
z`W`b5pndKCMjE&no?yU@&m*`f&ayU5lWu2ZPU;NqtA5`8&j6u7UcZT&3EAybs&Fx?
z)ALwT#<_vVw;ZR9i4O`?+CC4Mmqf|Rw7tf_Ugs5U*3W(1z-#mOR%6~^v1+2fU!ezE
zPO1R8?~uMn5OPX-#%Z9bf|_gnGo(bH5?v?s-Jp5*Vw%9LKiQjmu=9Z@u0O@I?|Frt
zy#wCd{W72V<ZqLf>&n<FH0ufg*|X})^Sr@wk+PRqVn8A8m1nqMapjrMm@LfdI<XA!
z0@M^%O34y-ZNuJkPx0k1+~j+nexB1`{Uoc^Ig;U-4}X~Xe4G0x=Lju%=<qHhK9EHi
zQnqZ|0guo)2w8nkgoJaM=!5#uT0CAh?Q4)^@{dIHks%LU-`}V8fz)nso)gc$_yoV_
zdtarUIg)RYK9DYi<@t)^2gjVeeV5ocwhnf1p;g})3^Y+<u}PQ=jL8Px1rMpG!837z
zbxJ((#8ukqgdk&_kn^xTn?pLm2aM8OfHGZ+-ZF&y+#@KZ*4sYsgWz_z5gMKS1j~xC
zwSp^?VAD^FR388VTs9|klf32H&$@BXG634wNGYqia@M*$1bj^I`X2MU{rc8yPz#L&
z9UKLR26=BFS4DLSfRqE@)x$DpAbCpocc^EvHFZYk9i;)KC@Qd+9e|~XFn!p3zQxzR
z_J|u#K1Fgm1$DlxWLu;nB(Ij#4##x1O{R#`i_@Hm{=Ev!)kc5Z4s(VfL%&|oHe1Bd
z5Tj#0-Q~Pr>#vftX)uFQfvt(pA|&Vd*<bw>U;fH%e*6#p2nScUIeYMsPyhPoI6t|^
z|K-2@8-$r1JY`^1el}UDNL8m&CUwsKzZfbQZ0^COY#I-v7gl|3^*Vd(ADiQau_4{4
zR?Eb}1}7PhTPC{OkF3ed)>K4X)+|Fsia}EO!iba3hL52b0pkau@8R~*LmDUGvbknz
zsI@kHx<NLfq0ge58<9K+1kc&&F+m(r1RuT8a7#f2;u2S{UEA!@H_RDEl1SNPYjlLe
z0?vmzz0i9-PDI9mv$eRU>&Rt#U34=xICI`HSaB4JrqwykI+aAnfb*(}bwORBT@YLt
zeO3rTr|&koNFwAu5kl1IjWtd~&KhBt(P>3Wq^uL;MEST^P^J`~_Ut74_o6LV>hL#U
ze2VJ0kZds(dULX&%{i51SUUjg%_?(NrOOsq$rA5TihjM7QiIVkRJ*jaZeyBMa^ajg
zbYhc;&4`CwMYv{yv@u;R6;#pqIZ<t%b;)%m=g7Dixc<^heEt`HjcZT5#Ny!{&MpFP
zym^xkJ@+z`>71@R!v{#cF!YHgnYl$usvB}}xVz2K$r*#w!nW~_6`4D$b9N@X<gR1=
ziLWqy>Hs<12A?$7XM>fZQ|Iyb8vOxsWy@r9L1Y-{FD_Uum+T&1G347Gal8KNrkTj=
zj>XvpLlOoCo1nsAROC*yL3P0~o3}LH(TdP|p_9aN=#UU}#^wU#%+|CauR79Vfp{U1
z$)!mvT6s5414odpOw7xptC^@?P)3g{*Fo2XcQyeLEp6MV8bxI=j18R+CBsy!Y#)N{
zhq=<A%fzm%p>YwH_40TIshvz?@m#)kW4kU9Z#7eMvoS5})viVH7La7Vs2h!P{k`j+
zX!pJRyb2^U#?|EUH>h@degx>6#`EOi0k2%%W(UpC>=!HgRmXa<Vp#Qb>)s?osztu&
zg_EvlGGylA8!+7{@|KZ%WY<lBkLZn#Yi^-pn$Bc^mlsdU8Rr5iZ|EfjYJ^*9=&KIi
zRXIjZ18L|O(i+Jv5I3~x)?9ki6&kRmdFva`D41V@Z8&ART3I~BH8@h@#Z-eDDYBp1
z6FX;hm~JHEBDr57eW&Jr&g<S)_WvyOLuS<{)}2!I`wU&5>5`Dl=+Q|+e4w4pXy-dj
zmn+)ogq_O=#My)-J+blZ?Cg+IR$r4oRgI|LBVAyHnc(ccWasN1GXf`N-!C7rHq-z`
zZ_c)Q@d_wp0|=@bdHf?lj2yo73g`J3dE(Vq=q0gy=RPh#m``~2mFKztTff6<Nc2Om
z%~%>(NydqzA&|x4hy<fwJ1GxXVB#9qbS#H~sc-Zga>=}wp&=3p3^{SQzs<CbbkVav
zoAO;BdWC+mA`KnwWQvy-8qfT4WPW&!r_vLoRY&g}r^hSKF3yZftHxMK%9^JRa*79T
z0Jg#epEFx+WP58$c0zE05WwYtbHeu4+~9iKyPUCzY(ENKr7@}R5$_3-@VP;nz%UsI
z^Ib%u?NwfZ6X)?1+0+JO!;O>hVh~w!#?`#5AQ2lNb;V%Hd2j3y17##a_}3KQZ<1Fz
zyzSry*~rnV+rd~2lnl?JX({fCg(m1W3eD1g+GMrZn2#er2a<HdH%f(8&GtZu>KoIL
z@NIEY)hwt|)(S?mFhshUPl(No-1YTm4_!|iC!@x$Ab3{jic5$@)_uo3_244OdoEqN
z#$s^<X&}`s)xnyS;u4z?m+2gQ^}%ia#^3r|<m6}^#7Q6}_{1;$CLjI-A11h5bMIVn
zmlHP{w4xP2rLJHFMG;-urE?<WL2902)qP;d)25E8bZd{b#fGYn!c!|yY(vlBE(V6<
zbK;fxXv1&1UyAj&xyHE|)Y-F7xIvH{>iZGb@aW+M>mieyaY~s|B4?$d6erXsHjq^G
zPZlI7rSQ?&18v}vZI)IjIOj<jE?v3AY&xsIix@Swuo!JrMnT$Chw61=8A7Sz^m`VU
zxzd<c-H}wlKo9ET!mMa}Wye|L>Wm8Lpx$3!OvKRVN5j>fO>o61$73<A_UdX7lLx^^
zrJs_r8I++-sWotvM%9Y)l*ZueLZw#vRB`_`@F^GfZ9|#g<yT{|D{5*aRj!#GN)az1
zdX-8!YMq=@5Tq0?CC{!bYah&+vo;fo>=zV%a{}Zo$5CvbD*J%*;{-K`-#9;gl?AxU
z6Qyd@bOJZV9N^5Q(K`m8c<z<=^LPK_|0mz~hkuAKo!+CFWo{ka<NYtckDcvZmPZRx
z5L`4<jea0Zj9O~<Bs$^A!+qYkcT5vnBx^zOV0F$DvmN63nt1az^Ed7wJ1>xfy?HBE
zfe}dY-pUSbkkWcZh%JR+m%0_J(-RgaC(QTv4Y-ubn5nyvWRRY;Ub8s6&?y;4A$M6P
zW+E0hBSa6EcK7I4=gis$?|KrMldi*u2|gvfYmmHT+qVp<L%e5@a|KSkX`rx4ri_si
zqvIpB8O#QLmgmzKW3*~)YK4t+`0R+425gOoSJM#hN3Bz78q`U;<YsRCWo=g7#zeQ&
zVDi}H%egW#Fvd}7fa~?87Ui3@buHv%V-GK&i4^Uu86lOiE@hpMCBIG&r9On4LkfML
z<H=ugD2vR+0qrC*-)eZ`(u8*CkX~5%z&dyWj!vKv-Lvq*&}VRo6$7*En6=-aQfsp*
zFZ;Fgco(WcQUx?}Qcxyo4mFKV+%;y24bWV3)KePW!J`Z6cM&{32F*K`K@A7;Oqnz}
z>zU{b$SOy$52M0H#3WWmov-_qt)pUGR+?jx%C0EiL?^;PRR0YDvM=#t2F&@OIrUwK
zm^(B^11{%&>ND%rnyxtYrmW8HL$07^0rbLb%JmmtVrzSkNB17EUM?UBkM7*VdpLRY
zh%1MOT)TRep-UXyeaO-ALvrft8VV?I>7Ca}E>f8Uby{tC)nYYG*^9KN&Lh($qqU_Z
zBo$^G<N7b&-{m~PapU=ykl<+c5AgFTZ@u{zK6fM^c=g*}<g>s187{;D+FUPFvV2o#
zprs?Cb#E?%7YPJ1yD@UVTj88Aq@KBJV8{dyBuAT+G18Hkv_fnK<_CfIK69D%(If7k
z_QYAFjVmPegxv{koD$jzLZa~vUOcg#a&)o6Ij50isDuH@c$_lQNCu{68ga~<hTN^W
zw6{$lbAEJ0L!_P02<?PkVA`}=Cm0(cIvoVLq!JpJbsgkEe-1%^xU>`UbcUZ_vdOX0
zqC>>Fs7<t_ak4Mt-c<Cco8CrZ=Bt${V*7N-v=;-3y%Vx`x~9FFLq#)Y{APN6%QmP5
z-5Oh>YGqO%cAelw1dBT<3rbfPvQaqdA*(0pW5x`RiDJZ>uo3kt#zhsCc8O=6evy0k
zZgb<t(<3tG%6{o9RZmC?Qij0J&LNK;-m*qQST9eRPiJO0;KtusvQ7K6AX%-JtY;lD
zO!1PLPi8dD7JWLc1#xh7Xl9hQl3r1&TJ<6rj1J;m;P3qH|Az1W1K$TM>lKocmsbjr
z&1qeMi0rzQT*gf|nIaLZ1*5Cq4I8WePu_C<UF`aA2+y%YLVD;I9m9GcPFmCG6`H81
z1}zDJK^>NxiROVN-7vBXcr1n`d+xk*pFxa>Q?3_To%Gzq3P&OOMw`%9-F8C=bbUvc
z)=WYqB1{Ag3y%R2kN1viPhO{ZAY@VJ-N1-MyGp+)z{D7q?L)9b-+`g%Wqo>YR6f~^
zF)5&kYF@_aw)HO=od!7}ZJ_yj?J5$Xx6-Iv&Ig^S<N`X3xNn)>=7beI7N3-hd5|+F
zy^Se}WO8;YTddFLxTJ7q^fGdm|9`BO|Nmbhcx#IF`A}@z*8?n<HDBs>RL?~g)uGgE
zL#i23F$%5qyH{y}_a$Pcn3>q$hmkHRMnlybIZiYvI;9QDnJ=_@Mo>%j?PFxeW6=~2
zoEPHujJLo18tV|5ZSCM^6DDmU`fcvszs(cvW!mYK^YytpIQvA-4xe+K(2y0a-kVL-
zxFTbkFULALzH)q@Z{K^8ewkSO>KE90;xg&UJ!l3sP^-M{ve!Po@85g-4cgeUd*u)p
z8q&IFGT&wE@-_7mD2+{x4NIyAP8rFLv>Z5nbc$G0nzCk|O7UG6QHR`|xN>lb<)b5Z
zrwv{PmaBn_6d<VnJm5)Wwt|N))2D_mtqArz_yVNb?^3jcWA|1-0$Us9W40~l68%%0
z*sb^qF5sP4U03jCnr0JErzR*B%tU1ag>IQQFKTImWxUqjCSO>rSy@|Uxrv$ec;`kE
z%*OGk>#0J+1x0?0itp+OVRZPdXK)4Fu(M@s<j3(k^^A6Geyq;x^c<(y97!z{Q%dxG
zPaYCo*D(zG?{3KY+}Ktc9mJeyvS-_SCg~eNzcXW`0y@>YLbp3@yhHt5P@|gyG)e|(
zP?N)gZe}4zYY>M)?F}2fcRqMR48)+3Lu$&H$U{#W29t+mpdZIR-jC0AWpxyQL;V%l
z00?~u5S3ZrQrYmd(1+sk>nhlZSIv0I`KC6y5r$5iaicR!l}@O8em{sh_4Y{_5xLOx
zt%0|!;p_sJu3hKKGtaRz-=&?*xc%lEEEfwVlL=k7qK%G&CvR~0#C7OAx88h{*S`2W
z+`WGf=@f9CO{Z)(fh59e-P5nuWXZO7X9enUCV`h);*{*vD#0jVwC%waODPPJLVc;7
zkFrPhu3cmM$^l>a?cd=0fB1*_`Jew;q+7ydi&sAUy|n+uPa)aUi!j5nP8y39VWdTe
zQ4*3<!n@3*30&FP=3=#Ckc1OqKX@+fAM*O?L$dT7Y;Ch%XD-$k9IppH`Pv(N@#OP#
zDe&f_Q~EIBV%?EaW_Ks@-LGEf2fyQkJaf<xS!qsuJLlq}<Kkjf>zSA{$(Tmo>r|9(
zu(~=i!!c_jN9PZD{`$*2`|Ojnts^f6P9B|dc6vf`kv!<rl$|Gv5PjC<1)FdO?-){#
zIF)oo2O)ZTNw}RI+;j)T8@L0(N3DIt?uTtcb-!~CN7!I_+4S2fXynRgKUUW2eqBKw
z``%8>NjZM=wR7KmGvR-QS0O@Z2)i{YwqsX1l?00wv16+68S!-k*i5N%-fb)-`MLvj
z^HmmonP@33fQnpZ_I7r7c=V8zZ2Dsq(n{A7r`e`O^AX_Er9+KnFfB*7UMpQMXQlU2
zT*Rav9>E7zs|C3q=yR&P`2C$r1k-Pb6!**Wd*vDvqs?$O`kc{KoDiJw#@%~#rwNf@
zu7TdNW9=%`%Qbcpn-hXsgpbS>{cH<Cu74;EN{LsIv6_*wP*DRXZD4d-qld8*!SxBZ
z>hKpA<n9FDxDxm_-ak<anjYpTa+O7e>PCgu_o#*3@#!(nCw$4wb&eiSFNKz&lyL!4
z&IBi9rw!)W;sU~yHanha7QC{xA}}C$dJ+d$E-`6aorajps}{)GP!}_sP0eKqwlB@8
zn6Z)f8j4-0z}eP#F+^g6iVfZ<tHrR2Ix)&Q*AVk0N~g`DQ7%c6P4?8*J=?WR_Vq(n
z%6SqonyxTp&6dmt9A&XTSsR6lA{SY;lo%tdS=Ix<84#lgWT1E;md0%}*5KWI#h@;j
z(K|x<_uz=$YbIL=8ene|(CGDZqd6J}k3~Xg^Qq2;mM_O+kSm~BdNBstHqy3{*m#;I
z(6)it1ezw&#8CBy!I@L1cf{Z<MnsGb2#!2XX>HH)cD;fblPLAczF#}x_k8rDeDYU*
zjlHccB*Vd#EBN3!U!C#R8*kG!fo5kyl0ZtCKt%dP9+b{4W}=}@dgj{RHa;i%en65k
zN*?*lZGVnzO-T12l0N-q{KXPC3}aTQ>3u7eu(%)x$CEF;m&0dYBu=-8ZOe3bk1(C;
z7E_F+s+N^ePv$*-80gO~IJ$qVbW~fn8C|;ruogwOJ((vCuhO3!)5c78!u`dP6g6Hx
z1P^H-y3E{lNFL}UF$^cRmn2H8i^pl^Wc}~aJjs`-sDV|!?5(b71<U-b_z2WA6mx(N
zb{3TELSvlx!U`IjVc!I6ew^x6OFvZJ17qV?f!Gborgt0b*!xNsEkCAAF!lG+kDKqB
zUVmGIwJA5Fo?;4AeugV&tSbaVYic$q@sBa<%cQqVduz;&eh+WEh7(6l(09VnWp!*{
zCS;wFA)9fH*fXoqE-jwzJaAY|h~B-|M!8`VXuZ_YJEAdD^uV+8IVW;X3`4JAm}nE2
zb0VdU1~Z9H8gtQCt3)6MPZI)-fns_FD&3isNPSQ0)*v0(7tQe|2;gj*>U05mk9f^V
z^|95w(%=<54*`l>d~gc3H9?E;;EeJctga{~UU~uaY#m6cw@9sAr;>HCWzW!O)+sYc
zv6!^8$(m|yh#l=@N;}`uDOwYl?(eX5xX;10EBNSH_8qQ?Y|UrvY|WW%w=~np;g!o=
zzI4FO?lzP8l(r4_j0$b=%w{d~=^St8l#=ttb)ul+I2e#TCNfl0DAP8MaO=jxE}(^u
zj|tNW-~D^OgP;3nKg&=2ul^!o2;BeekAd?%@xl$JmKUqpuZfh^Y*SLwwIUYXg6?nS
z+1_dchtr1l?d{{Ur^|Y;o}SG3(6t*(hRl2R_IPn?8w7skt%v;QpE=^!?_@rA5lM&7
zaWUB<&F8FV6Yi}8|KzuC@z?+EKj%|lyGN3S#@5?|M@Ot}${mcZo+RKBv`wS;*_+`|
zPF$XB(`3&?GS5GCmDUaVX97Eyx7j<~p^1^9Pn5b{2kaKFV@wth`qjF6qD1GIwKEnM
zYxb`{gEv+Vk~Fr-Hzw=btVR6@Oc(OAxyrjzW9PB1Et7T3AH@4m&C5bLhl74B#7KzW
zd=u1f$p&?#OL?2fShZU-PXuHj%V6;?xd3W971@};i4`=Jkpk8uj7Ywo*<<cprJRbW
zp^HVr;KiK65--2!+j#q}uR$^wMT@n`2AzyU4?TisTQjDU1A?dyFb$m<0+i0qL3Sfj
z>Y#~%^{^(T6=_(L^8g6D+m|6u2tHW$s3QkR&TFGr&`LBqB|DwcCWPdK4&i(WU;VYu
zSCg2MKj`x4T30qu{atb`JjBG}){9p?uU&H3D7ah^59O@-lG3zDDR(v|ziiyzau1R=
zooSUAPC6B=&t{d*CXY?x6xGZLSaz^9x{z~dx0Ncz_0iEIx+L@=B1W6cxhJ>*M2qQ^
zk`~@!q-Um_IbSYGM4H8lKm6^Ef9sF5?57K)S7WcMPhO`tdS)r2^fyY{Ojf<==-lc|
zu<v~&*x~MdB*j4XK~ZWStocOWa~;@d<CjUMZMOSV*0MN~p^svdaz$_T`#~g`ae}gM
z?eiomLqTk#7DvtwAwUk=h-Pp0|0VY;kX@+7b9SME6V$^Siycy0vL35?DCh{j6yK$Y
zefMXnG5WD7wZ=49v#Ng}2B{6K_hKK{IG^A(a>0A6*_<}7I?Xa82lQDe@;e_rA!^eb
zHyYEnjWmr?{+j4%%FnPrJ_KTngb?h{7$M<mBcRk`7l0sy=m^oWQiG%2+g3x0?3u*C
zVzFRncNZ5u*(J8GUE<-bTZA?;+uBmCUS|bl8q}Drw`q!P#XHfnKaGS|=%em_IcMHD
zKH*M2XDyD!XTE}e>k)D>kh{Sq&-LlHOQeMZPU-F$L4sz~X+(le5&xw#Lf2f%iL}lP
z=WFi0b)WO0SAfML#!}AKq<U5<;k;+k!v0Q+T&(fY(M#mcqlG#=dd<%qQlcd>by^^Y
z80h;2F6de)FlcQ)JvGlV7|>9gPHVt?&E+gPwxwY!XNA&xgGDq{>83OKm>G=}K+=1s
z)OtG~OJg@`s0tlXY(LTr@3G9*tLitjS5+VF$7!0lk_A~7-S~Ia?5iY<_>E~{p$in6
zXmzsIKi(@HUVmS2MkC{&Sm?+0d;86)q@%iUIzUpkNqSZqXbN?$*6Rf(i&2edBf@TU
z>^9#3zQzXrTCPLUpZoH>8kII-PQxi#-lw{qmpop>T&p{E0UEQ(5WKN9VvICxAV$kb
z&B`{&GN@r)-;;(lxDFv{BV65^e03Mb<Ov}JTr@^k4Caqe7+&f^>{I}#te<k;S))6;
zGyAcqcTStPVKA3=R~x)i3nUTNDYNcTS6@MaR9E6#96m&1oU*mGN55EccJh#SzV<av
zj*i(oJmC8C&okZIRVUzO&*{+-tL2iuTao(}mv-hn{nV2@_tXuZyZ$7HJG;zc!>o<$
z?``qK)hD=ec$L{~X6skw2wBt=Fd1M_k_0kRhYjkHqGw+LkSm3~EDi_cj<h}{txiZT
z^PPY2$5@{q^Y#}%!T<LE{D0uT{hR+e>A_vvOIvJhsVmda!!W4SBIZO|uZ($Tpqb5f
ztmWeR{*3SZ&~tqE`<~^4yO(Gs(R<<1@om2InP+%udynb%geNZVFv$a}$u{xY)A)lc
z$aI$nr)w5Nq6-a7CZxQ_bb5`uX9NH3&;J&$Kg_I?f<(9PKO#A=d8@vtI|n_-&@AQP
zJw6LD3;XSy)#{8V4|lkBc^8QZ*LZS(7=`WK2?qyT<fM|LU_&aOb>9slHI^;g1Xc1t
z6k%_BpXJ#xmtK5sjBd&aAFP&=!hW%7cBTSm<4$9LZR~{t&W+EwaU(=wN}nU12IsVn
zD<F3W;hVBS{pOnq|0}+V5x}TPbVjkj05`7`S~0y?(My<KP6LdceXfju$A<Ay(L`)I
zFjk!qc7WRZa9IsItQeimrt}$>i!<gs+tkP#r&A=KQSDfn_y*6x<tKUjorg9p^z_3T
z-$X6a&DJp?+H|Iba}z`Y%Zqa+lU*bYWEXMKv9)!Hiy?!oG}6U^s;<g`sQOx;)kH_l
zkaW99<|_8_U;0Jf_s6~+NkgS1NHu6Fg7l)@+^8zRP$d~Dh~&qz!LT@&N*OAxq1QMA
zDY+T0<Y!>HcGQB#m*S!m{CXg**0`-Ht_eu60@s`drLGZ0on=V)0q6xM0iP3s>QaM5
z9y~a%&2W($=A2Yr={!SDILWj?aFH$vli*ma*Q|0Pka%{>^UT!=Z@n%I{e+=O?9O&^
zu~BNQD-4oCoen0-&uT<tF42I|x&uBc3hhm(>d{d4B*|Qw)l9-WPtJjHiAmL3my|!T
z^-%7bh)pn!`bz;L%mq&ku1a`KF|IK{8z>^05;250Wq<`&^`rH<G98&0XXv-RhXMpy
zQ8G%IojMcZaT3e`MGF=0-~Hk-1~nxr(Kgwb7;0i>x+5PcW)2uAkc=YlyrZbj^{Pmv
zXu??Jdqcd+!KTiZx*keFsX&Fp4}<2ydTZIeGrGR&uSy(**iEU_>)NbpaZ=3QYPRAu
z^5TcSlh1zQx0tn45}xCGw>i9anRYs%-<omElGH7TY0j*L^+M^-{kq4unu(Vs;|HbT
zdCQ2NMYwwT5;yNZV4WL6j9j?P=T;|N-g<`q;X}f&eTM1o_mP`c<6TT6TU=Oe7D6R0
z7s{calN{|nY@6`<UUIT5(FEOqJUHQvU-(Ua<>x-e!L=<0Gg}!bT2#H)YeT=ExO^Eo
zKcbaD$dQw?#5ohw3MX8b@G=l{#xr3}!ZrJ3xuYda&T4{1L6OUQXPlOWIzWF6cq<yd
zx@Q$VxWy6qB*dr`wGz`o0RpYiD*LM5`i-fgy6%jcLCTq@1#w1Qx(hyAnKDk#%hK?a
z?_ZvC>Uq4;0E>*xXkoUMGnNfaF5XBJqaL>8!){V{bpoNEGbEkZr(yg)h4NeWYpnvD
zIgBgwq<|kfnNky&R9FXMJF3ej>u|II<{{GfifX@Wug%Hi2!Jbi;mSDM8>pTv7Ajlh
zluNi6NZBZ`<$8fR{F<ArNo-sJ#-ha#)rq$@QYn{vC&;i?&5y;+6y`<IENk!NEB)A^
z^?S+XR{4n-CD(ugAFSxtiIT(DX=yo=G`r0DvZPFmq9ZQ@1LvFp4T-K>>40Ff)0dG@
zGuhqa`qM8koA1!|3m)CR%W7D$PC}f|xbf8Uq%I)?^!<{dpK*S2!hCC&t@)JMG~y-;
zZai_FlLr^%^`Hq0K62^c5+Tlccyvzi8cUK>CIp>^rmX1>X0xz~!%*FRol{?nLB&G_
z+}FJ33%YJeN<EY5jLB@D0b%FHll-0^`62$!-}q1X-~GRTk{?+;<Y)fx|0n*)zxTi9
z;NTMB?tK8dA=5fX^sq`+r}&@-eole%AmF_4!gEjY+zSWX59j>o_r9Nd|HCh`lFY(+
z9-ZIi$3OBxZohel!zZ8M%9rQNuk3L%?UOD}**n<d$G`vg@~N+Wn$_7E7jM47`MTrf
zYcqb}r4O>cNc{7E{+nF;!{5VPJU4GWK)lj$y%}A_oC$4%7swI_;_1?Y=MSD_HfcDy
zSn<+xFVnBrT%4THwKL+ZB}^Lv0p}v87Yp6{yw<1@Pqd240VxmW5u;P==p%cV4v_T)
zJ2##%$ROiWr-qLkL!7cGi|J>PSZ71GoN4I1O-yrk^<Hm6an%UTl}1<hIfr9#X=6k9
z=6QWfHmKDVGjCEv88-n#aka5C(-qs)K`CQ+kfCT-tvwRTjFYmwYPh-4M{2mZ9egea
zqZXsX1JhGD94|fpem?(&-{RF*KST`Pj4*Vein($+(;2wv*}t^M&D&EAq;{FqcjU<&
z?=mi#F-|U&5C~B<TZ`o>yTbu}0EwOk&%xdy=ZjMs=jhW~iz*rL=J*FjeK514K|Ilw
z`I$~C?mg^T-0qlOZ#EiO9y4o^jS_tGRY3ESn<|^TbZJsUuxJ-=O_9_>spQwGDT00f
zT<!@qlNA9sWYW502tCtldxW-8x~vs8MMou44mebZP%<=)&RTl-JUH&$e}IpU?6O(Q
z3gm#J0n};PTjT9RLt>y$J>7b(I)v1*z1{E&A0PM^Zw{Qb+c4W>I-AqPmeh6DoCwth
z!x_=nd9ql935-GQ(L-#AO=F{zFm8n2Xy?Jg$xBfHrSqeJ1RH6-Y`C!4yYL(IzKvL=
zGII5S;;LC`Ne1pnwoK2Gv1XGQ$%Q^`(;5qnmpZGF8Uuh8O<+@2Z^)$c3J#PtXj234
z{^hmzZoFd9XEb}gp2`3(4$NlS<VQEtTr*N3IUx!pD^^O;UK?o{D=DS=C~G1(EDzTT
z@(>7(Cj?!~J~%>yeO<7_(3(>>s216DGqrIpTCXy|D%7{ac5vXgX6#?TjQ^}sMqA%-
z|K0<hdg)o_vxf6C&%>i5cBlK?y!Q?-9zMxz+HiicAo@smv1YzA$4QS*LJrQ%tvvH)
z#>0olJoV%?-n@CAG<f_pa<p3UOGj_={m(u_e(ft<yRwb{ecyw$0SP6d!f1$PZI->b
z=&Ehq+KkV@$wanE206p3WA*lJ-uUTX;maTY9Pb?6=kV#P4Ckk{IB+FL7RW9#bZ5Nu
z!nd)!caurmGSKj~`$wd9$}DGAK%az(7aBZ`44ieD)EYp^<8)o6t@`3IWYH>md$KR+
zyQ>YM8gdu#p>}a2T`G8zQNw&;VHf}cr6?+}NclnbH!d*7TQGtbwvUweY#Y7p%c^<J
zex#7&3X4)Tvd%uZ*~XRT*4chyL)enOFlyuLd-yuzC6sC#o523wKKpx@nKTVy;@Q8n
z3q5@Pi(lgIo%;$J*_oibn;TUd<vmzIY(GgsR4I0aqUS8?D-q23zeSjafXm+i4Qd6c
zWCNt4YSM!N0Tmz5sy7ux6F7#1#E9gi>%}PULB<JsVK8~`38ASpL+6$9tW<4}#r~2K
z1kvZHa=u-FP-+~}M$#zkrB>22%4(DXeC2U%DsU33&axQq={?K!S`>zxq`^L?)L-SF
z#x)Xs2D6NH0i{973Eu<`Z#>P_=U*bmz|pOngr=o?bc_pn2Ca@ClFycG#R+XYBlV8;
z#Tw57CxJ9{T)uRf+155b1n%8!*=eUp^t7%e<5kvUK(Y92GGNO>wt}w!Z8;}ObfKiI
z^k)Nk@{nK<WH@7eexV*%?H2RxOUe{+PWQGL`O*L7zs(mv{a1PL+0XFm@Bd>w^WN{`
z_22kaE?;|s_}V=NNig(0aj?gFy=L7fdPxdo*>k9wwmM;VIkB}fBVE~Lw%zhazW)RK
zgO9z&lTTga!NUi<`s`KOr*ClW<&W_4ork=I=l!od%j@sFNi%~NU%JF|-~MO##b5at
zN9!%P_$p7{c$V+~f$!o9nXi8J9o7$5#4CY2k50gQ8YvTVl}v0mQEOBh(s03KNW6G>
z$UC=h@!T_4S;oMZ?;f$Wx6QLRZXjL9`NKzSPg~ZDL^oub(BQM9aY8J6qV0!$--8dt
zAwv^2ZfC0@$r;nD2jCLU3weD?m`qGnVC$|dFAUT%G*N+h(d^t}_UOj^kh1?3@UiU8
zk8J^V82G{*`6hdPOE#$bDx-J(FIR$+tEg>6V@vpWZKPy8oXf;$bjaO6x+?9)Q_Q~~
ze+x3wdntyQYBu8y*njT%S9$aG*Ld-TSCKGgD%zX2O<uoqoJ20|UFG<}O~{$luVBbp
zusNN2;hpAi=8TUKvarq_i{+B-*pPfCG=b<mJKKA#7iajvu^N_miB>=vh32sgQFVIL
zI$LwsIwpMjAAOAP`%6Czeoan;YFh<1VC(udwJU{c#N+C}mx|IF8|QTjY}z?-j+9k<
zR47YDQ&-)Dv=dXhpS=a?vljAOGp0AL;Af2<+Kjil=BG;1JhwuNN@hf)R-RfsCg{%B
zynX+ceVWB+glGYIx~UIdo7e1>N|aJhcETDBJ>PG)xO3L=+WnsMFhQ6Q#1opJY{de8
zcz{q%r@SHCzOD<85OKayEoN?sVWRf`E<zBp^Ln^}(r$gW!loPo1;({dbd1a<NvE=!
z0$HcUx|WS%ZPUzx9_qC(4VkkhUC7yw*SkJq8x6FSg4CKbqw9Km&YV#nih{Dxznpmc
zT{3FRWCe{cla+7Gt{%)x!i!fcoLu%1hXk8IJ39iaes6rgNmPd_<zbMGEIn&<K?otx
zwAP5mSc~{_XjOV>3^)fdJ6sHSAJnufXI$4SKrl#+#VJpxSkYhClO3wgc#SDKUii?r
z@rhsiB{TO?R?7N(O*5G=owpFD3eJT@-vr)XbX@f<O|v2;A+0mhNuq6nPEC?Wd`s|&
zm|%O-vOGEA%I=K2M;D}3!Z(pevyOjp_br||UNZUG7umgjiTItbGUTZ1v;d7zDWnCU
z!fiPII!1uQDtg~_<nuEge(E)D{=&yOdHpWSwC2&&sOYxtQV<47qzt_iT#PgbSNG<e
zy?)Nt_7?ZgIv%b&{Iq4r!Wx}YZ!r)e9QWre*Kq1GgCyFbKOSXXWg(3JZd2mm?7dVY
zG3p)=oKoh?oeXBUC8GWVp^i9>nd2~~1C@DDCags>S^y}@b7|MM7XQ{T7fFon8e@dp
z2uhago$G$T2}D))OZho&yg!o7UD-Q)1_}DLq3-b`t0Z{Gd^%$`n}X}<a$*u%ws&Xj
zP4{?odd%tB8S7=QfS1LYk!@eh`rkh2us!EQkW7-nrqpVn=t@+Iftk+S%Nwt*?dcn0
zlC%N1npoHSjZAULRD2f5l63nOZNQt@RL!H=@WAHeCsXO(j$DC+Dor-oMwz4*x_LRd
zOD3oH0h}cl1Z>i&u`w>^vEd0Ncg!ZKA_jMB)Jb*!6<`>~^B`NbRa{upTH`v(VlWfH
zvQBk!o-L-&no@IuFEK75wrowNN@b3Y$@UJuJz?+4p}O%p$8dU%OC47ac1WnZaq2rx
zk9zv$g5`RNKUr|;a6tM&HQ3HGh=Du_NUpkeXS-+KfXlJgQy1ivjD=$!D5@D(--+Bi
z&hFktoMW=J&1AldpG?UHL^@I9o}4pbXODmT|M=JVNB_}(z@PgYe~<aKXL$N2uJc`!
zU4HdrU*J3q3=I76kNqe={rCSdXX{4{!$9L&f+Wky_NqV5o_n_*F~5A3u3ND^Z@IK}
zz!$%CmxrgP?6xfz$BD~VcG+pqdH)kPIJ)^ZPqf0}2X1im;2}|Z-unC}nV;R}*_U7D
zw;$eO>IZhG*BOr9<jI{q;%uAO@86{((>M?p&``R>BnF}nEc*eM2Hf(3AA0e<><7=!
zJ-E#;+#QJVCaxcJ5-P&em-hMLS6}2q&t1biN58(niO`68FSOp#=1kLSj=dMLX{rKQ
zF}9?2$Kf*v#GM&gmiQr)y9>g!Rpyy($c34-S@JqJYP7`(K5OkFDI<e`3>0T^U9~zP
z&9!SC!4}=Gil5Fm*XvudL9J&UBLb}4@-aiuQVXdKlNG_66H(O%l#P#ZvawP07Lj@}
zf!Kgj>fF#Y?2K@y6@WprwI0p)I6gjNXYUeXw8-Kd<Tie%<Q)fxS9x&%ZJoe$JwxjC
ztp%H^xP~+gc8JHS^*mo5Go4K|?AQld=Q-H9$~%h_X6+0_Sjmc39QaI@V6L8ps^y7R
z_$hr579!kv>kj>KCLCy>u{1#gShLtP&R&mK#ri+qS0_7Xg$a~{%Ml6W9B@t4W*`DN
z>BB5VuWR?XWTzGenB8ZBBRL0gf@h`&#fV$fIf8Sjd4%bt%Cy`O^pF)jq(f@B_4-@f
zJ9~sQt_~PQl2L9WnV`+76$h>NT=Xj@envlJnx<ub_#9_vEBgEvkZ`^y<|Wzhje0K_
zC($;#5HsH@uC0ZlrrMr{9A?Cw1KjBui4$B7Y8#JDnxc{%%i-u;)CrLllDYgm>m&=#
zC{RZ<t~QXVJ`M$m&l~TVvVu3Ld#OG~h5D+~Qc7uS%|QvYak&s-?CZQ&$K~=f%8m54
zRz*hB7RtJM_nXuZ1CkQCW`<(x+3uZqHh-)TNLi6{vfyIT(pof60kKNsg%%uf5{XSD
zw5lPk6Q>dkUK-U<LgpKs*U!z87zPbrSB(~k8!SWCCJR-s>uS^CEq*HdOgo=){h8<Z
z$|pWUTMSMl(?-<~&v&<JrzcGP4i}4o%W=XRH{W9W%G0z>L&pk7<mmXE{oOf}pgMII
zGJ|D4?@ebcPam>%aEYDiImgS6q02PW$XQ-;XMdZ2@>{p~;Ki@=p(n23p1nr)X5U}v
z?>5P)?3vQ!XPsc^L!b?kU$0rcd6V0}@ky3nc$3LuO%vhttmCBb@C+N19B2A9VUWxa
zgcqKC296)n#FjoszI5v$tN9E`(BT+{#LRU#8AuLpN7dMM{fah5yi;SBvW5%0rZ76H
zM$@*+fbChE=1xvc#avnUWddpYVG|ubnkLiG&}yjSP?Mzb!QGflZW!HF{!1RuwerZ?
znJHfXlsCKi)s;yEX3(=SDi}{)3SYQNQOoaGJJ*blrYnsG)<)H9#~6vh(RT}SS`nfr
zwsVA@m8_V}T6TAKS%f9S(2@FHb?|on_Gw_X&dR68t3I=U%GDiNHZsn2f|(64Yl0&>
z=)MvCDWg8t8DLfP#OkPSG6vgohCwAuUIdqYokZqb^n7KXDWGC7K*FB=!uOD(?N?@s
z`deW6;C3CxlM^45z2l7Cp;8eUJ6-E+C3swndTP~tPq$HwkA=SYqAM-tY6ZOL8Pb54
z!dlww3V^K4vovV)E(URWi?PX)@G+8dW|b1F^Ao1CDUD0a=PgXO^q3ojRljEM;1aX>
zHok3@;=fpM`tT7$pIBb3p}!!l7J4nRMXQ;nJJG9lH>V0hmU|-Esm>B>T#b$7y{DP9
zIJZ#W0`F+2P4$tS?H$l=?-~7BJyv`@fT+Oa{*7n&Lx1Ie&&PlAZ}T00^3UUA%X_}>
zd)V8Xa&fj~aKbA;@)1rhI{w~&{dee+M_P?i8thmPrZKpAbV9$(_yLa29x?Sh{J;-<
z8^8E7A7kg>GN)<DUYao56Mo<Wuabt2hv!G^%`frZ7hdG@?wotgLoPRgW^spkfG4Ic
z`TRaj)3SHT0f~o?PBo8Klpz+f>P;IbgwPOIiL^fF2S4-yUO2eQ8z=X;hz{RuL(`JC
zCbZL*tq^%|cEV46>NQTzj`_})U*Mu!>A;3eD~={-B06;*MEzg<&@<qf_`vpToAcBA
zyz;^i;7*STlQZ&iO<t{VTiZxlTa$?~sk{yZjH#oVe!K5k_mnK&d`SkNL!<>6>eHUf
zb5&Ram|i@gRQ2@k%LsnAU*D1qst)WNc+Ff4ME#_t#{d9;07*naR6R^fvs%aLmLXU)
zk5&}>GMe|^G<s5}tiB-h`km|>6+3+W*hm{v#531}+Ji5e3Hw|pJaglDzW9|-Go8+8
zCR3`G1Ba7XGeNzF?d@&a$rQRZDG$0?E9uCJcVDLkCHIajblr;8a>+EbWY>}7gy0<e
zyH9X*e3QL)o9J6s{TU%Cf>;U+k>ZUCxL6;mhwDe2_1E~^PyHGn{_{TsW)|d~x+^H%
zOLaZP%&8VBmK9kJ%+dgffa`@fKlU;1edRW+J-bgH^2~c*V0z<8;$EvdSC2(SXsMKg
zf%RrN?`(=nP>Mv3fm$rtzd6Yj)tB<{OMpDo;z+4Pz6^Kqz2g&~{3Pe}Bm!imV5U5j
zIU~h4LWY4H1>blw(cEq`m-j9+-P(b6#>vTDrW0`Oj6QV?2oe;=&N->Bi^r*|*%ghH
zHI$*j#TKS}Tzk*Uc;Aq3Js>*>B~HO;pmidf2f*pG;WILruBp&|?b@oPx=vp0F{xOC
zQtpg8XXZpO7&Ac&pb}tS3gbFd5j8>>!5o{qI&X<WHqG;Th-)EY8Zm1Cop}3u!R27r
z$td~Go`0e0bl=Ta2t^;IAePIfC=I237u9%KvtM15J-C!>0WOm95-2wG9&`d4JRxXv
z+O!R!iNrSA*I0CtAz#(_#_W5&xn!#T*i6p|z}jkCuNtRN9&uNv@_1Ls-DKDE^sDdV
zoiBZnt+v6XWIEb`>14{#5A0vMMC)(jCV`_zXB^C?>}~J!rMt(x_xcXgIH5~D*+p(W
zc*K>xZMG*Zq@G|DH{Y+>o9=M?_I;kbaf9X8-sVCa-6{}c!%<o=eR7Xq``S(3{4f6t
zzV}c3D3_mqk+`#OZq=J@j~k0(YrsVWml7do`g`|z@aZpe^oidgoSm^{u^YZs<Bc@*
zNJ_Y1#$A-Dx8P#S^87w8KL713Zhe_8@3?vQgxf<RO(Ka*PHWn{0$Gq8<m6bcVP`gF
zadfWOiD;8kE##av1)FGO`Bww$6fp3G(Oxy`C9bRZAb4{pmk?^KOl=c2)2LEr%Ot1x
zPK}^#`8wHnFNG~+ufiwJQqg^7Q0f^_y#tDC<>NfI5u>au3<D3mofk-rS(?vgv~9z@
zZJBLPm~T(nnonrk2~E?`G^#z#%6%rZf%(>qEQy`Hmgk;3WWDOt_-aU`yk@%Ha_RCF
zE|!UpfBe%#Ynq%jVfx-YsqA$I%#;E`8#!pAS0LaUz`xu4p6VN<zaJR*DcSm-j1GJS
zNu9>H5Dd%}yw@k%o376nGb3wuohtyLc^1@W*gy+&%^CBo%bnaXqYFV<M8zpv7e@sP
zoyQz|wImfwH}GZG%NpV4vYmb2+zJImddruLI^E0Vu56?i)aOMT-DK2%D@DfNLsVxN
zgXjFgU8YWOyZeOb*qa5`L&LCGk|c9@WuJC?PP?;<Ya;z>AWj{zZE)W4=+QB0ndnv>
zE?`c{T7ULAF^JZml2u+&ya|-joFVu+Kqy9mt_jTNbGqr0N!zl0xWnbAp5*Ykr`fuE
z6+fA)0h}=atMu88y6u3cOC5*r`7o>H8Nc(-{yy*jkw1pp+2h8`FLUb?pQfF*oLnsU
zk-zY#_=TVT$2?f9lzk8*&Csb!ztxi$i_8~4^Cdp?!RNU+yTk2!_qlfIkni};m$`rQ
znAQDL+~EP|7Z>bb-shv=^C7<SwKuu-);;_a19yJ&v+U2dn7F{n{bRoW%2Ryc=^Oa-
zM>K5%aY82W=A9E}KI(Hf2%Xg?(<acbdb;HaKk@zF#Rr~!kvng^!~NrPzU@7)@U^p3
zghOr}-6yR&Iyu92nZ0Jl7w$ad>h2!PF5^738Kx0pRv!s~G^igyBYOR(!LdKzVi;B&
zzWgGqhi~I^Aop2y-4E{}lN0r6a2}Zi)tQC>v4zmuEeRw6H>`E?+h@Ea7!uO25tHCa
zsdtK6I=iu6MqTiSZv72nY5eZKz9kz}(1}gWQLH7kJXXj^F>5J@v6NP6!&srv$V4iI
zM8BF!iK~?0@-Z2IR&i{TXd9*!Ek)T}1qtx-OCR7%uYHmio_n>rx0Y{O!koRKzSpii
z%k7(AR0D^hv&l`TMSl#|Fk}NT8x7n}17{Z}gl29bBc3L-?98`WF1J{B9gS=7(@QK@
zXLzGHc@sq?r_(Q=tpN6LbbgO+c%J+3JYx0s5tC<kt!YV0B{Vk*1Dor?R|=9!AuAK;
zlH2GrS8hB*y9%_yaeVVWU-~CM$L{rgF2C>$^Y=VY?uF1av^!hSdR#15KUZ3InV6K?
zte}_0DnpER;l)*uf<eaSwoLZyy*UXiP8a<Ar#?vzjvh6W@C1fz+DkVONRT{35QM};
zGL38LrNcLoINKrx4>|J8Q}1PW8~SG8i(h?R53MpybW}Th$`q(Js-{gLkX#^nN0>R*
zE%+|8TK9G>Vl5_vsBW{+NPMBaXWd+-P*O^fpGy{<D1Dd`e^I(hD^54e!t3N(RD((0
z%v(+N9Oar9>Z_54PQTT4HIeH_z3I}7PNU-^y9`C=tIw$45U&Mv!sM;#eYans2|7LZ
zPF-};m?4_&d3Dx}l(-?p%!{0{GjQZ$E^cu`IH%?|!PB&jHmDQT6^B>??IEY6kvulS
zFEl{sJT9n4O`M_NPTdb95phDyo)Y6!8aSyBsF_<?Y{646zQk|++|N?e7M&wIHTk&v
z=2zLja?K_#u#$mu5q4$`#}5bYohJ^aNuN)jiEQ)6{Znr2Z?ij%s=3b!hP5(qX=l#E
z+c$a7bJzLYS8lNg5e8`6$TF>&?C<jCy>ou`Kltlh-HN>O^fjJ&^%btZ_#)fauhQ&l
zFneet#F7In$hu?w;1OqEeuJ|wzQu5K$`ptpkkzq>Av?rPxacy{dZbf%#t;L^lnp5a
zBAKi6KwJ-;^qJQlp0nD~c!e~a)Awt<bdP0@3PQU*B^@2>v@=?>P_l3tAM25IGfq;=
z2Iez1F~NWYb%z$q(XF~Rugno2#^@S*L`D-i%m~nr?zbh@V?6ija8oz%@_g0PuAo&i
z{=2iutLXEIE7SX;Np}^HD&I^VYmDr?rI-m$c=E~193EU^Z+DB`{Vnzm_SoLtVK$x9
zG>w^o4RnL*60<0kcd@)+HV^C_PFSych9Q&t#JXD&XMx?FIqv=i0kPN?M;a2YPb!@e
zvF7IMn)C9jN+$XV4qw?a@9InR$~Zs9W7OI~(DN%{u@{asl!-5hR}-WHV$|#EHvYYs
zS(P=1F>NYts7>%_;$ah6F(amOQG&w}%}Ji|K1z{wWZRd58YMZUaMm+NjB4Fmtc~*p
zX#tmOT>#mfdv#A283~e9Sa8tzGRxHSPE^O86l5sOOvI5dE*Q2hSmKz@r?}wgE><j0
zE@(sG!ksX9Vcqq5g;HYZGLj?Pdpqo3y~2aH?~~T*V=^S@9dshlYkqR^-bBh}EY8`$
zknv40`wB-lWKvFCe&Q<A$%OgN7JJvPu=B)q+P!_1Qh1dTIomtySzYT+RAcKM(l2=G
zLqACSh2P@gYai$EmG9ysf9&7jlOO*S^El()>u>VDANsv~&ma1I{G-43(>CSPh%}!D
zm3xKAkQ2Z3>!0R3zUzG)Jh{*D#TnOcJcVx(M{nKZ!QBg9eDBl9dcpeI0bw@f{fF=4
zy~%NYe9rmlIo)!_FllICeUh!Vq1~Pmwj)Cu8Ky1y-UaQ_ag_<vHqZs(-l}IE95FO3
zFCOp{Kl~BC{nh7?<2CJcm!tDHIbIIjd33>S9(nnFAK;C*eur+|^70Ee_~>_j8^7^0
z|CoCZ9zn`9PM9IY7)>$|S?7c!69<)IxhynJ*q(VNmoGEf-zBZjR70E;#OqE@6|^)a
zl4J;tLt=yYMsIeE_;!kqo;<8z-IJ4$iMUl}I6J{jjW_1ipe+{z0t|r0k#b__2fFpj
zm>u6-uW!)?wa{}4713NSq2?}P8@ID5=BT2<3QcQRt(XM+mZeF3tT!#no3BHkH$T3?
zDYsr2E9cF{4qklm1HAG2=Xv(oS8cNAjp8HKc~HFL(%}_u-uw~~A$Ln8cyk&I<Yod`
zW9-z~P`raA^!<{>@*E#_5!WHk6GOwLgX`S6{Z$PN$ql<(hn%fWaCwE~M3!V|XHfS^
zL+O@_QwG*7`pl>PyMMv={BQmk#4_3Oqmfa$5+#(lTm&n=O5s#8@FY66p1sQU(^n9e
zITUcKfwWEx=L;@gdz*_}?{I$a5wj~-m|xlF;C(OQcXyDs!MjA!v5E{vQz{cz_gF#Y
zbA}<;+*Hb;SHiNT-cW)>Ui<A&^OXnp7+Qx*iN<M@n=6e@a6TZ((*@52PcNBC)6n&a
zW*YHLLrRJBi-){%{~iYq99LRM{aUBvwjRhH5nuJY-hL-ni;mKDTSnd4F&A&$WH#R;
z*p$0in3qMDDk|WN&Rg_kVrhVul_;vw@~$+P-kQq|n!MMhQ|U=1zCxR_;uMQbcd{E3
zsYkPuRB3@DW!2ylDFkLu@@g3222rg~0T^miSDvRHa;Ow&rL~1`h`B(BQJsd(D%vq-
z3aS>?)gsH8o-oURWR1lTTU^d*rkmK>E7_PKYJ^N|8``#27ekAoNv3;oB@?t<KSSxY
zkr^y<L+Pb4#@dvM<^5)BN+nwp?F&Gs_wdrQ&+_R0Jz7!Pm{<ekaQMOT&KvJ=^~tB$
z+1X`vzGAvN=d@ol3yvo)wS4j38Pg}XnZ^Nv;3kfV+vc@94|#HL#`XO@ob<#t68gke
z@T?~j9^QJ$hhDkCCtkaSOoZji6XKk8T7z%7o2N_`1E+7D^VY-9asRi!!gSg)owPJ<
zB(|P7ZE(@H>ve{8q7{fKGYQUe?G*HrJ~5e2X<|bjdI*kSS$yI(f@?^ce|qEaGSkH)
zmM-wyx9)ShY00Y#(r}EEju?Dh-+~ZvOnu;Fu^_l$dw!}R1g`8Ew#{tPa5r%vVLX#^
zX)0{58gbLn?bTVFP??TB)`i%Acw^=imV?sLMrUct&nRfEjDJ&)*J2ttcGlG>u;P&h
ziIxMl((~<4p|?9mP)yB%97v*V1G{?@_7A4)UEX4AZ;S2SZQ5pPdA**L2SQTS8a|K!
ztw2)0bEcUFoa=F}V;M7D2kTYRh?QZ*>G2UK=WAs#6ca3qGn1T+jiVGk=admq9C1Mn
zZM@SaHhd!{s8%C*n{qp^&v`aLHCrr>Iv)ivXIKlYP`{!gDiyJ4E6Z67fe@lOJS$za
zVhv(g#A6zMU29qbmHVYX2~1lWy)n^jqF?1wn4z8OP}RIF=riacxdt~RoIpyQ<wFOM
z!P4_&jB-nfl7BnW#&uyAPj$%}qf@5w!4d;R3SHZiheVuAX{U289bRU+Ua3(|mzn!O
z%n9io>Ewd-aE5r#bUGy^XAa=PWImzYpRqj7q;5^uCx$`w>Kz@vZ7A}=Tr<7ZSze!0
zt9!gdlCZew_|orul^0)rf$hteX(o~B?w(FfX%xiG#y%_6rYm#k0uE<f>WRaeVOZd%
zyFBsghsa%zlYwvh;qT-5mtNxLJNNh(|NYPL{eSY0^ZWkvzsb-2<Da2hXNHuSx<G@_
ziO@LcbLRCskNEt@zr+W=>lI#oZ_9(DTeOoIFMseoy!oluNxk4D64xu_;1aHZ);X?R
zz06g2SV5;@NcgNn2#u_=+Q!fLy&w4yF?sr&`1N1^P0kiaOs6fY^%)=e(98Vr_k0Jb
zJ0Z!0`*$x0-I{~xjPLp0kMPE=w|Ms1UBYUc@BHW=W}7pf+1}>0>6Fv66B?6$O+%m&
zl`Tc*Smun!GqH?voMYx1hVG0H|L7mY&s+R#kKkKm(HT~#&r=?(HXahW%itDz?u+hW
z$pw;g<e?{DtkpMIJZ|uufAw|zWKJ`g!)$_UCir%ukxZ^&N_sZSFyKV>x8H29Z^;H#
zkx#wLA|<XwEdOyVva97w*^sRj%;kHGm2dgZwYl9yq6<n~KTsN5iiXEM_C269zH0X_
z=9V{^v^@39%iOv17T2GA9?5HKn50f(959)*T)Xxx5AME2lM{W{5kkU6N6HhU!k`D1
zZy2&tm7NF|%VT0}XkFHcjB~^gIox}SyZ7H@)+AyQcDDBEbEaEf;Q9r)L8)mrN=amv
zY008ruw0#Ta(0Q^zx*0c{_y+tAgy!+t*8oB8jIQ~gVcH<>r`2(5kxWNlU=_y;sx4+
zE!w9J*-tMcT_T-bFdScS@!A{o>jhyxXY08eG&@`5lCg%vbv^yjF=@S~JvhM6CS#_e
zE6&Hc@_}?c<Z)eO&-!@5PyVx?;l%ZXA@Lm(&(%iworU8Tf;0^wCzF#213j&CWa+f{
zO^(@oMr<aK9Vg2ZR(`?5z9X#zLu{GO=hoDzo>2k>8^?$n8wjxpVn!Hja@=}PXtbgA
z9=vbr4a;RpCI-Nu4SB|*U{LS_BYJ57ysLCQA&(IdLe(%j=f+~jbgNqU8zrvLMhjJ*
zLTO1OvQKG_>=DYQEA&y<z|Ib@e&oYUV#{ZK{+CH7=LF)0(Tiicx5c$*ZZLG2d$;e?
zou1YZ_iqIKF1A6R$AnEI)YOYyCc}amMOO-%%Rx1Jqa;c9po~(3^9JqCY+ilTtiWh7
z7|sH`HF6`9jm9TwL-U=+;;>{(_}~cQksKLPR!7ppFmXa~!5Sci@N$Se_v-t3?Kgj8
z{5;bKXJ>`Hc%+RST)WEak6veMD{^tXWN;JOS>W2v7GJx4%zK{NLks#uoCv#DFY(sh
z2b?SxJbSQ*r&sONYE6?p!(`6My;EL&`jF4Qc^lWxSoMzB%vh#@>GnR77x<ky?WCn?
zTGi(Y=DrQm_jrp$@m`D)KH%GES+g30A@&T2Km%NvOpxd~bB<MLR5z4+)*`ICB_DX<
z3hUEjKK9lfZstMPstB!evq_BW*c*!@?#?(lIw3eK?roB#|0)~CSM_l))_|*fe`OB%
zT%R>((Jn}d4lz=LGnR`z8)H-HY(LyQ%k=pa(_MbOd_`<CR0FTP;jU`uYc^|Fd-g{k
zhcZwdDYL~4Z^M_oepZ>-RqCnN??3dJzWe{O_n$$wZs&a`_FLf%J9RpJ`ox=YFMx|n
z5D<b063i5%MwCcV(wNeeM46Iom0eSoT~lN0gKdpHuCXnbM<ZIE(Qrm%B`Q#X6iG0X
z5&@7PfD7Q}98T`;(`kn{tl$sNTJP>F`NMp;ES3Rn)WJEo&*|NJ?{~dxJ<tF7hYs(A
z5hCw7qzqxnlNG(Zn7e3Wh2^2eC~7nwgLt;n>41q1)<jl|h7c{@x58Mk5)b6PLkE|@
zpTyWmd!SCnY+6T|jw!I(qFngBoo=M-xgKaniGE3uP5Iu92`E%X*0wQL7&ADTqAO&o
znj`@dM=J4LI(SqN7r>aLFDm`)H~_a{_}Us=mZnq%GfYs|x$}7`X#z>3YHV_=F7Vjo
z4>7`&O3Z44#Jpj&#BrSWv2dO2Pwy~5i^J?xw$c=Jq;xWgOxG{ni<0%NaLN&I*l|fH
zO9~>sSyB<Oh14M47`oFXu{1E6po)rmT;pS8)wa0BoTy@|g#vwyIOlO594%Ih!J2%Y
z;0D7?iP=FboKLlfhPHD&{kzZc_UpH~bnP;`SN7OD>$q^$v32PZ)z+q5Tj87!;}1x2
zJp|%vPJehPYm$H}PO-Bos+<x18M-d`{y+U6^0)p^|0T<VBVPE8U*!Ye`Yk;92fm)q
zeEN5AUSUduRZ>?3ZBQB--}BFY@iRR3)MJdNCDmzUxm<GP@)lg*BlI3yTH?x6j})Pj
z$UE!E1Q1lAG0~!f01Z(a8LR|HtPKzDK1|UUoE@Ixf1~B%q-5SYzUdo2$~S!S!)Vo0
zsR??+@`x|^ZudcSH9^e^E{(2H6g{8(@gL=l+jls;+wtYkeV(H0nD;W39}^iFLsUj0
zyb^F;7%P!GMq@SXZO<5QSgwEML%1@cs0rl=Rak6Y3K&E-Ko!a^PO_7=bKpI}r>}YO
zOi|XNR4F^1wZzupmZx;?nBMtx*30lhmle7y&{c&h3ZhaN1tz@vOt9bY$Jb<on)BN-
z(kQ5vG-!k7F&Eys&|dRe)|+2k_fKn$!<xR7wuE<Nt;*po-=tBJ3z>50GK!=?A|DcV
zo@vEVMaH8Mlg%COAKhU(-XKOl&<M0n6A%p-FJI;Eoj0*W`p`j4W;d0WNot}8X>A2n
zBT-^}&wP2r%xq({5ek^m)Z;N58yC5MdXqZz6rrcCwkf7loa=Di3g=h2u#!T-$me!)
zdcw|lLeCa&eEC&2AKznoeItE-aWGXG#2EwfCx0Ka16slhBvhS5kp!`DOh{~^6JdbD
zLa8zJ46`$#y0T5|Jnn2ocjtilYp>C}9$S_abxnJ`V*jPLSS`<(UEkw9-}5a{6zhng
zC~mzmxfLtthxiyDG=(qu<zM(!K6~p``U0+N__~qhsB_$R9j+=VBhs`e(N6j(f$2G8
z&gw{AjMDTmvg(#{ylBIM8K&^eW@CtY&?Lqn8i{qLoaAw>rw=I(Kq-te6k1bSD~?h{
z3EE;}9Kyh(&xe^BsA5u>#vx!mRWdRa2<L1flQsKrFe+HffKzdq>3cfc!t0~~3~Q}i
zihm+ep5jpKoXf^lT4>PLviIOME<gAX8pYP-tE}1^xRsN8TnB=RRFe_o*^JT7MRp&4
zh-ZKOGxW_WncTel+6_~fq)iNHga#_zL1tpe<Wh#*vc{tpcS-3BRGNgQYn_?Msfm%u
z7=tOSIP|925M@)|piW3`c4tOVX}RrUdM=e58MA?o68dkH>l<=Z1PYz#c9Ny+nO(g?
zyPV?}O`22)PcFtx#Rn5CH(z^|3%i%;eV{H2`tFQX9r3p3;?{`6Q^#uur@Uu(lO{Hx
z!4w^P*LOJCpYz$*-{RutHkZaz3M|$+R_=^NjP$3cJbZndSKm6M9BJClQ{#DfI>Po)
z7Mh|m6lE!9HHD@yS|WM0uskG|!;2#(CCV0qTe3#uoFgdFGe>WjPB!?B7hb{p9#dEB
zZSF7$foL@SYR<L2NBG@Of00`rC`%Fz1E1r-Sl>djr)y46gd&Fa!?hP3YPwglK~O#i
zSjQ)22E(wbl7Wy;4O~E)Rg((VJh)dWibSnn3|Y-OfHn!_%CRz8Cp{ct=UK+te2jDN
zE$d5FTpy4_t0~`;tv-g_<Mc}2LFdg|4r(~N^K=;$ZM(wtE#7&Y>xbt$-y{bI(FM`=
zDT4`OvR7z}D+*j)p|og=O{egEz;y=YJ=RLjs!UKJI#2Y9ET0$Svbg?Cy-ZxqkRcp@
zPbR4HImKaOnGNfG49RST)SzXAUCwgxzF1##J<zt(G*c@It1(7V8p&1FT0qXEn$H}S
zb5T6!4ZO~Ux~P;NYd{$*pMNr8P{bkYRJzBQ7&9r7sL%vkP!v-EyXubE_K4H12H(s1
zVoPa~ldG&U@_g#t${j*rv!+iQjMYhcA@9TMD9vxugnH+&(<#Mhj4d>(J)`O^!MDV^
zq-$E5w!!x`(~W}JrCr85dqicqf9D?e56|dY$7<ElJCE~@u6KAZhB#Q08sbO@E*VD#
zTud3~QG-u8vQg0mhu4uixA)oKKcaf!RW`OZxqR&!4?Xq}dk<XV!rnzjTbtO)jHoT^
zIsj?F=O>)pyn%{}qAbycrazlwCnwm+m}sYb_?w^N+SLo(IN0YO|F{1GkAC1OzWaxM
zfTw@^^91MVV!&F1?t9VVTLs>7Yrf*|{Jnq9pZoKFl%gJEj^>QUWA+~0<@9);a%)7j
zw}lxiIj6KDmWgepQeBnLD#=}e&=b8SRtE1N6oI?X-r?$FyF9sfh3ii~&i1uUVrWoa
zK`<CwGOjBwUfRJaOQ)Prh?U0g&$%8n7e|K0r7?#u3q5;8WLg-JR|F3};xH6ZQyHiT
z6iVZxL21W_e&`2bHbz$plmMVndE&T&!k$b1NLy$~faE}l7YSK(;(rjDIsNLC=sl(!
z6N8~WZE#JXbY5J3V<2=LzU}Bw;y?iPTBH~%D0uHjeh>DU-@nJ#WP>``cSi}C)**s&
zDL^+<7UQ4^$jxasA0g8oBgx%#Jqvy3&tXm19h?W(RNsj7SHb6LY*-)CS=^J)+H;z)
zi#vPV-M__ZHK!WY0|iz^F_KX#Vk^tu<p(*scOwNXN22ri2v&!4h;&6Ho3MZifxc^4
zEzi<xx0E(g=!)s)4y|igE{}kgxZ1}SGjutoF1CRkeC!CW#k(acc;2{wz}1VFaBfNO
zw|Mzezsm>z*eB2%rEJ2fA(Gs*w3)_8%ms1QHHG!GC*}h#t|KH=YN7_m$J7W(R#<?M
zg2G^{5oW8R_7{k~C$tUS{1jz9m!7<itwxL=x=u7@njD59hf*H-!pr(mN~2Z5n=jwy
zCx7;*St`-1-4B7cy1;X*o&&9@iNw&$;hsb7gTW_#UY87ybZSVPgU~rlE;<$LUc5+E
zO%eqZ$zw}8S*6sFwHZ)@8^1|wO*C9usG(&c4ULiss){mk2x(HM6q3=FodvT-Os7dn
zroJP(jHpPaEU8Iht+&k6a80_NCYiTrv(E6<$Yc>I4x&jVi$71rmjgRTZ^+LKjErL3
zuDJKyvoy;<*{#sqBbH@HciK_}c@d|t-(_*@F2%+M7p`6vmum!D{<mll@5;lLrO@9^
zAX)|9Krd6&Ou1EBr^$xsv4iu;G)>lcrg+H(jA=R}aWXnJq(a}5U&?B7ZXnqbUnL~h
zNU=EVe~A&0|E!}pLPiZ3p;oID3!#l2dJmzmDXzcgy}bO)(_)K{6ju4Q%EZy5T;%ra
zx43rg8l!PZ>m5dybU-K~LCtyOp)J1n)+wX?BQ9Q=(ngOeETQSy+Ag`c`v9-KamZW8
zC+t)ccE=T!DQTMq6=8Afh^MY^^YXp>To`Q;dyOB1TSn@HXd6>ttgr`kx(Bot2S^=-
zifOb-Q5FK)$$bSx(fFANzKq<QAMtIU{6^mQ)KgeH;-CN2KV)2Qfie`<^7_F(Z+i$;
zA(13`ZRuQ~WJFWO1_I8IGNp3^m>3S}kT@@qj>BGbj&*X5@3Ri(h=cYwH%XB^cvM(x
zhH35j=ai!=|5qN>rm1ssA<f<m0~07cfuvDMlMr=gWaLSF26)x_jbvbEXiV47nO?{_
ze$m>o7SynxX{88G(RZHZqM;twu*GoW<nPd0oHUI#8Q;W5H!wgnf)^FW&_!XT^r6Gx
z*xj7aby28#Vc^#dhDqX(FMC`zM)6w99+%#O`909NXGi%wBh!#TRAFI+oH?7$EUjhI
zVpYT%p|1}OXZkF)4j3aQY=t$08?sIxQb;vXi0kG%Mv}=-?t|5^2ceTOQU;?D5G;C`
zYAa<DYbLDMa0rRsTT&Dwv>geM*p^U_#osAJX<`?vWT4a&TmngS#<$gAwm4K>LX$&l
zgSDV^Ao?&Ei}fNpM`LK4j)S+~#w`{UN>eLMU5{9vEwI*tTXK55gbA3|fvqTJ8(>N%
zjU$>7T?@^s#rY23MVynU6Bm%Y^~919ri*-r8qtl%v~Pq2ctj=6zAlIsH?CUR_JEUv
zV_tvpWj40AxP0v@*B*X=z3W%my1dI|dlOra$aGLW+hOw|MRRYT_Ux1}_t-+A7fUEk
zFh-+BCI7*n_+I|T|NXz`{=GT>^l$#x{ODi#pYzzm5AysgZ{mHB=GHqxbd*Xb!#B$_
zuk7;^|MfrNNB+n+apCe+nj1H$t;Jlp$Y^GXQ$<H0rIHhsO{|SLfVCNr4oYM@+3i+U
z22A9Ek3Yc!AAK*Xuy`AYz9%*wqaqqB28FYT9UD|1vC1&s91)|YT{<>Qptw-+=KU3m
zc|%nwriEr=m1NMChPDd?BwM^viaMFs&9)}YwhFF(!^h~2LKhQIR_fP8i4O_XRVlA4
zk~I9e>i)OfBozuhDzP@=`wl&xfG)A6K`navM*Lo^&YmFYe(`B4>wQWLXy5fc=J)&Y
zwb`Hsp}+(maD9gg9-}ltC;CTXppdAG>n<~=aH=8G&^Za5w#uyIX2{>ChVRX1bUfGW
z0^$xE^QfP}p)^D<3p`CC<$uYQ&wF-vcDZx+CMU-SZ0}r5KQmEjQUkra_Yn8*-4;wd
zh*h?C4wI$?eT*1g5Xl7vGMU8DEYHwU;hloXj8OC#Q!?Ayqo<``&d~&1JOQ^r+Yz>y
zVr>QGm=HJ6?Seba5zEfew+*ga(KTaU`^C@l*!O&#IIV_eFY4sZ8hr{355oZ{;{;@K
zn2_{$q!*_XX`mOudg>NHc_nCa-w~Q6vF!;_kV~aC<<^v9_X2iQ2^wo`iU|<vPfYsL
ztUVTbootrTY1(DaU;l}p;7&g$YJ(wg6g8h|0w*y_25AUH8<TD-q47#^-iXHap28Fa
zrEnp#oX^1pN*#%&z{i&B4_v3Vm0Vw=(O!w$t_EYoB3+qGsY+O6D!6kRfUy!>Z;Ta+
zlQx6PrcRq_%=*oDH2p5o{WLLAN#5bcn2`K96q3343}~ZLcB{~Vlo7L)!38u(iIw9j
z3=wO<Y#7?6p<6cM7#P6Bh%OAyOB_Imfl|ZfNHg0Q^YD`&#oNg1&%D8#|N41M6t`L%
z9a}py>~zfZ!jz-8z{Ub^4X%0jwHxI`qM5w+xUMDkPA28vC!>+D2A)WbYAPrP>T}F4
zs%tHw(n@qFIj>Y@*Y5zTz?d{i*HMb;6vHdc=K7k1w5J3wF%6;@?$JRHdz(?~7!olV
zQ@G&R+}V?<)x1f~g+x_IKcdooDw7i5Xxe7Q1DCgX{<T}wlPOwT+TPKbh&D?eeq_x4
zi*syPvUP2OLeW`E)34}OOCEpl0!!z(d)jg1?3CJAilU|nifS~%AFX)m(hf}**m~a{
z&6{`m+~EoHt%6!bDx)w(fhnZ$)>>iG@0vsj)n;hORBAM0->3Boc2p3mz+;bJ;lW2P
z({%TF&m&JU+1g;SkcMXW;x0|-sdr|qR-Ih)m|dE)VN0gar-@)<2*?!64fl>7ND1c~
zt+1X>W%_*DA7fG?41q-F9O-pj)ASiM%!TASXAqRZvwpa@voWAf`>@t#h(5|w(mbIh
zQ;w6YAZIp)q0uKb3Tu~orQ-0jIp3rX?^g}r+%QQ70b!J64I3+?&8BHtE|%2el6KWF
z8av7|QJYzxBf)!u@~O#kxV~kzT(Md#@SSJ``=-NpQWsp<oG>jb?jO%NJ3PcE7LUq~
zs{_?Fx!(raH+5>1Jb`k^t9*AKd1@YG901-(#0Q^ftZLA#XI8Y)LRHjO8cv<0JxWE4
z(eg_}Sy-y7#8`{Xdem%0#JT!~AWrt7Ou*zhT3P<3k}M=?tVzIcrUA=T+Y)twGNriM
z_MR9zB9cuRbd9Qt!2vZWog}N4e!isb9bq)48c$Kt;}>%xZTeYhn#|fz$ykw7;Kgep
zi$DjhaSSxQ;_cUOaqH$iuI_B}@bznerS}>gHC?MYTLygWY46@+_xgh(voMCJ4bEy-
zuE!&BUcm*tkMtqnwW4<+=}t94>BQ(0w|A-eQVry#nH%EC2&f=aeT~$zeb;kz?~v2|
zBVK#<MMmQhTbFjY^2imgJ^UcMmo78eno*3WnC(3#vmL6%3C+O)-TnLYM=SA}uo34w
zu06QLH-6+vKKt~u{Q9r`CLj2Q5AjF-qaWerzxLPZW1tA0LKmz;PnGnt!2q$~h1c)%
zfBA`@<`W-yg75m|$N0Lx^cT^s<LJx3Oh*P#Qm9d8Q5cO$+^@j^4xQqzgw+8F462I+
zoO=w0s69TEgucg=rJO(3fLEwxEBAm&PS8a}RZ}_|UViQ^CbNPOhN&tjo#Xn4ALYU$
zd;I24eHv>7u&-2PRF`yZ1Xg_PNB;<MRHJMSS~BH>Z)6Q8woQV?(mtK-(i7U4y;l<L
zKIh>2RJR8mDJNshXoPQi%;}uI@9~|OnF{bF%t50J+C@U|&|&hf?=ipMkFUuFwd)+p
ztaTK@3C%6$MsSeKh+-)BNtx!D57K~`i6|9uDhvnbu<@oON(Rb8e%`eiiFcl;0y^ly
z?mRy`FnmpJWNA=)7YIJGG23OiIOE=(+w5N08#FMPs#DelJG<98K713U3ZhnYuEpxn
zusLXLQ;Ov|^B+J(&%Bvahd|JQv6^DE!`Pb5ohzLBJGiEi22goyT;RJU(bZ5)&=eR`
zp~Hl`CnxMqD>`R!&eF^$y!MNq=h5%{D2yfUKnnMyt;;Y#IxJGAMH)e51U{s&`iM$;
z#RzKyj%36nRhvOszz1|WAr^wP8YB6j=PA}hK@+`9_y)aRF4!{c73KM2$xr^j{x(nF
zdWB9KAwen8d7ZgHzz}tySCa3hl^j56?nI`S2ZZ7q(`L3<9#hyLo3>WWCL=Cx?Ia+?
z16C~RmBwh592rw{s^q^z@X}B#nILFmgc@oMy09`WMWRRYL7@=3k-{VzN<wRt9qJ9y
zE3eHiBy07ZW<xPiv~^6DKuQsf!c-NL?HwlDTa2bPWmR#uf5hqGKFj$6T!(Tkl`&j@
z;6YB0Pgx$`<!IT{IBDWGcc+}TD;|I15l+t*TzT|Sb{@KdotE^zVR8EqZk<w{K+~=W
zI${e?v#OYGk67K>$9hG?Vq;`uy7#W{F}AJ&&|cOlx<wgry^cYs-Lij#lrtZ}h<j_E
zO2wEQuazAR?L0YECV^z>`sAj|ly(5Yln>~T3Ph8l9#VrEgO^ARmEuyIuog^WV#GwP
z)Jc;j^|eR|E<N-fo`3rDS(+`3hPZB&V+hh5YlU^1m!5r*C!Ty9^~UW~s7LCmq-!;8
zl!^J?gFEcMbwIe+v2kga%6NP%S@e<Ra>b;MJaTzNxIE?LwBhV@$)a5`VM0|!IBl5L
z9Uu6vKZrhii0bpNa_jlGI9W-~Yhf)W%Dob8TJt6~J1JXMSu2KJ*$AgJtc;^CHEeG1
z{`YS4O;29tmw(}3bLD|66xQ(g6CdP@pZ)?DqGq&N()SIcNr~@0p*v?lL>?fXixnCQ
zu`tjhbvkc!ZeeqCM%oB7fkTINA(tt>Sq=f@m`Uv;UM0h$Y?z{DGBz~MnK2}rT>3vX
z&}yZwGE&s%204cG(v!_}zV<NzA>`?5AO?4?p;?;^60?jTov3m6$+@p#XjRuABU(r5
z+OoMZVLF{K85eABjhRks%F^O|Ptz>re6z_c#|v9Zrez-Q9BsRzZC13ahGx0M_a5)1
zxengTq$?Sn1TWK8pQeR|keWL^Wb7)H49-Y8$V6#1w!p`C9PaPhBTI;etspaAvZO*O
zjFw;8Xo}KMRz=E(O?p#gqOOg>YE4mEN~@>}q50}GQ6-tuDdHgQh*9!A^YtSdff%Dp
z{AYu*Me-xi!<nZH;MHJ=gfas#qskE~7+j`)DvgRstEZJ*L*Jvc!A^J3WsRvyilU~R
zY@&$tXGio$2dHLAh`mI^A=%T)in)(*V6`WhNC<{#6-BQofjR^#CHp||fz4?_T}D>(
zCQUUxtJc$a=oUSze#Oc0F&ACH*hn8*yzjuDJ4<7i;@hN-2tG@zz=tSOlT^ECn>;3R
z%xp}K(aKAPwfBkIEm0*Tunnz<t|y#%mh%O7-@L)|)t4A=PPuSpmy6dgvv=h(TNihk
zOg9)`y-vM%k>>6_+WmdnvopN!XcotO{F^_(y<0bV<L)7U`~UG1{HOotzs8gAd6+Lg
z{|eSvYGowFL1_wO=u@6+5iO^E<X4`3jo03|%is91AIEK8M%NXr=AbI6j1$nMQu;(V
z$B@WoT0=@f0&P_S6QWU=lxZ7{LRm$K22~k!)PkpLFm(lZk-sHxg;;oQedZ-h+wtJW
zh}dadzv6*!ewgq3KmJvo{_p+?pu|01D=MQXi;|#vuDy4UOMl?&aRzBnRVj?K=+||Q
zHVI(R!?l*Tbso5hLpsFZhV25{i4;p~L(mF4sVJr+eBa|13sz@m_$E=?jYU_r)RiHw
zBe1@T9$%9UYBjFKAT$agvaltj<|t+@q)xFf>n)iSeVx$gh!26T>p?}VF67Boru#yS
zCQXQP9vH(0O;8dolMZ!jtxQ-_1#%wAhTInkwMH5W1wm1l6<yo1fA1cVmZ}~znQUNe
zh0&2KSFSQYy#c|o3IP)xF*-3NP#UMKAbugE>57OK`V5YyThRLsf@3t@6kVsSnQmX<
z^zaVBFU2i58kx9;rI;>6gN_x}8gAad%VU>z>3v7vHH57Z%_8vnuYQgPzvV+PP0LUz
zB8AZ1qEFO`G)+oQgOD3h$WVM%`zn<<NCQBS{+uWzu^vH{>CNeL9?Ym@G9a`%CHm52
zE-(a`hbS4N(W855+J62Ye2Rbio4-bH4K7UreOeznMn=lA@(yKVN>z|Uw~twqWAQPi
z!A6BKC0z%uTcEX^t4hHW?|naYQNLqStdovY3IUxOm=q5XGQ~Ez=&FQ9S|g2+wetPi
zrmQVLQ0qj;W^f@HI3yQAp-&}>Zwv#?AUpVy(D-5MiriyH3ng{Dx6Ol3Ji_(&Kf&zE
z1#Drd>w^0?4tU`+U*zDeTf}yO_9b=jSnarYc?U)%;icDcjU4bUdg9rV<2!dbI+(M%
zdCIC=p|{5rMM-ngbNl7n=u?T82+m`)<MjRs`uVq+nhB~bX^f#=wN&NNyS~ThWLTt3
zkbT1_X%@0ULf=bVQJT=l<gQAQVq!8l010JH`$5vOW=&$uraDHaOrvC&k{-dNDT*=j
zZ<Lba+b7gp#dTI*Zc1XJgKBv`Wm3+uiA-k~X_j+n`r)=zId+ij8l3M-d9L~Wj@CPN
zcDFdZHOE>_VFP8Y(4n9&0-^4iO*Xmz_6hU5`|Muap{NXXIpXXrayRrG^(&@EF}YZB
z>5+?c%bvq~OKu&l*o&TOHRtx1UZ*%)@u81=fRB9g8#y^X;^5|OmN)iU9-Uwomb$7K
z)uTjrPy2~c0%lo_Do2D#iJ4Rs+cP#UY*CDBln>l}=|xs2r`(7IkAC2c>(?*wnZ75C
zD#}rTYlL81mNo67PbfJIMl8v|CEJ(eXr_F{bf!qET*{n7eFitJa}8uVu8b0Dct{12
z(8Jd*!0W~{QT}z7b*MG4l=YqKFTC@2g?JvsOyNv%NIJWE$6>mb8&7e&WzeDz4Qe3C
zV-)A*E9W07O;878BxD*m8C$MhyU5<|CX?xi>Bfl7jSWWQ38pdwWw2@HghZ(f-sAG`
zuE#r{Ty6!ZA;tv$<T##4=mRe02xp@)pD4&0m5voHxwz>Q$)7=bwRkTShQPaDJSGTM
zrfyLp-o!yv4F*|SX|xt#vMNi;!ZNCAjDn&_%_@=76mo%$##SY^5@&FmrZxH;Kp-yP
zA-TpY(e@6~rv%;-P%6b@7)y@LTBj(-q-YIb<Y)L8@v9bF%Inan>ke&$j}b!{<Q-Zi
zBD+zjdQ4>t&?R1hQ3ktl5nE4*&7AJ|h_IX!+m1p9Y!R72nnDS^HaJh{dwlP4ea~ua
zX}bu$Q~<kM6AIn3?9R~ILDVd}j#b;zp{TWDadt%CFVNQDx*p%Q7&N6d#6ldv#hXKP
z(E<uboDX;e7|Zuw45DEcAA+=hq<Tnarb5bzkJ=D}B4~wAfFvpb$3fGsI=beF)BR)K
ze(qJuQO#sJVPkibi&rml?SZRoU%W^;et`1$nC9*QZsF;dCw$BId;)&yw|VZ?E&hwY
z_Mh`7e)tD@=D8PXd{1Q!g)(W-VW<rB&Vd$Vp>3ma(eS%}`|t6Q|Kxv#H6_|7a7`ip
zA_>e-f)s2T77S5<8li^!1deHiQsUI?qZ>9(0nH66h_{0-1Q1uo%On*?#Gq`=o!8#x
z&WpDgPewF-q~59d13&!zy#I%OkieK%o_?OfiVUjMk$P033JbpB6F>Yzv`S-(iA+yp
zVzEVGuf;5xP%^of`!qAk#FP=v?|W%q>>aU}djzjUbFBs34;WNoD7OmiWI}g*%Ife0
zV=ZPhS+i=~zt2Jas(6%E&@wqlEk$Vs*;jdz2t$BkK3G*mVM$rvvf&0L)Y+)8HZ`EB
z7|B@4piNbomNoQM#HdC19758TiLIv^oO#zIRuywLVy1H`LJ)Mgt}FH~UB&esi}{?B
z!$X|!aYmto<%3Ush|hibb2vo5;6jhpDbr2`xw}&VGnn|KLRAH6z?WyocpsQ<?4Yf}
z7|VF`BFocTMBkyjN<r}k;}tq|_}HUj#bMWSvg+7vOMKDNxt_YLSS>Yg{K{8&;2R#p
z?2M&|4!zjQ8}Xmb*<^y)CpwVMv>~||S%;I0zTB7Q4|`}xGg><oW@<eV9nRG?)iUci
zqYvOia#RK#pl=jE|I?r1Z~oLz(P}|JNd{Fxg)~`KL?v|MOq0t~Pah+N9TPE}3xNeR
zp?Basg^?-e=Ef%1_AU>`7J?+EraL*rDrJVoTug|hP?6B}DjArF8>KcDYs)lMv&oo9
zt=*|J)jh*AAx-nNVd#*PF_#n!=S(&fLnfvG0D;a2HZN`S(36kz-jBVHdUJzj)pGll
zmpQpR=lJFUhp*p3yA~Ces2ETL+#4^@&3l?tP3d}Ut2kS(aL*pmFV3iZg{epEJ@^<G
z-+z^2dyBnCA4Y4%v!D75|K=b35_VM(LXQdsMKz(e2F{i=b<g8J@Nr&zX`klJ*WUF#
zMx{zu0j4ZbS|XUV4|vzh*3zZ57sSdiJ8_aCgn|kfWh5#h<fhRi7f#WfY9q}*I?;YJ
zKo)gsEYpfe2fs?@Br2pS&v^k}ilWlz9B_vH$7;L^Y+ky?tIvG}t1UkCiE5Sh4)QjV
zm;njVFG9gfUw(;ek3GiGom1N0Q`t%;zae0w6jzJ`+mG!sKWw>ma>VAWrmRc0cc+}r
z7c`ycq*0u79UEQC?uO+(PmF2&1}86{a`DOq`i17-{Lb4vbPVtLz{70slsxpotLTqj
z!g+<8Yub~R*eH}Pu+uTc)`Vg<L61k6st^}Ood8SMi@vAr2(E*2h7MB>4}0c);NpXq
zu_jVg5v!q51=Uo@YV+e$hI%o95b4ug1CV4ND)r9B^jx#1(q}q!q{CMeVK$ySClZ%4
zrba4HQ*~yWq`lJ?R)CUB6OQMeRjps;`hF*|Tgd#}^%<8y7L^LlHAo@RNg)F<<g+iI
zkFw8%n48vfuOm%>$)CkJ(9u}KXgp$LbIj(Z#DI)v8<gb;TS%^R41u;?(Ww@Cq=^hc
zrc){je0}zvUDHo}2=va;_dUJ$1I3-hc#1k(Zx6HG0cwzD2Z9bbEi#PWdAy3rY5iT^
zWAgv0?A)pb@IY(Pj#_Q7MpG1;(YRu46;)YKSWQ_KRFz>;muRhVohK+uQCP~tO5SBM
zN;Fyxn&=P_x}-KktrBCVpL;@d7=f*c+Mw$iS|>oSN#~;PlWzg&qC}S!QGr{maJ_@j
zW6DCB4y}@LjVHz)qQ+`FoPF9Dg0?7Qg$}C?M2)Iy^lTG7ozfj0Vwx6P6sXe30Kz#!
z*W(u}n9p$yc&)L8f-a)zS+u=GNTtOX+7_h@ZSR?%E@&1Flm)JZ<-DP7I<#@D77cyd
zQI9H&3Q}n+jdc#~JtjubmbUF!wGD044r^U9s#C22X(BBgej62zAdMU@G4vuh#4Jw*
zF?5T7Rmqt+psdCOP;I2^TiluB<lYH4UVNKpi?6V;G3LsZOI*BmfsO4Mo8vKdHsk*O
zeZKSizlojCJi}+6dx4+)@Bbbj{p82_t>64CmDbd%q>U{>Mal%Qx)5<_s_~e|uRp+R
zpZ@}XF!b1+E2xtL6k%OMCn|hQjcCaF_v8wWlCe-Q8l$XC($8t?^M;{(ko&y!vgfAA
zBBeFPK<pLTDppFd_wkQ&_uhiCta;!2ALFra{1C;~7M_~+_S@Wk@hz~L(kMoSVN}*E
zx&>D*P1$(=`{~LKR3R@~C*#HtKsl5e4V2_}3{FuH^ssLz*8Ko65_{x~4}S3Thy#;J
z7z+`FvLmXk3hfJ;#S-NrW>m}BZ~y%S^{e7B<dw#ZQKwBICKNrRpekjqg-qKS3}l7E
zmJbIqWReUAG(&+9;yS}FJ7S)1Ty^54XYElOen^^(<kzdEp;hVmVzPjbPNpITk1a=(
z7e+!SaUT2`Dmq?y>m{a>n)^pfT;Xsb44I-LOexWbbDg|45q(FMkoe%~BlzVBE(SKH
z7cpf)QH~i;F0)+Rqjx=}7MxgzL}JwzM~jb&w-4^{(C!m>7wDP}JE}0z;~L9b|N2Gt
zKJ);Ss}~a6ClF(ro)$)^8#d|WNE>ke3_~{TNmSl+jYYdBRN<JklUd6r&1jzRB^p}@
z>nbm-|DD7nq?iC7P+eek;`ph*^9%fgU-;+rrI!3npo<|<G{GQ;y9a#I62=(uXwe#I
zjWWqrT!}Mk);2e*Q*>0AsIX<lho1W2;GQYcgM_jUbn-l8)@wtRnBDPGJ*BhLtGE{$
zYX`lIO4)*98s^h!A#TqC%;bB<N|T&&<MN~;Q-e)pc%J~#MasHhV`Iwl{+!pp{4(?V
zCv5FaQPmh7VKg3Nu3e-qYmVQ39o6;VJ-S;mvJo~XoW6aVFdETdh)p2Q4~T8VbdG2|
zqcO~`RGju6yD`SC=A5Y|y>`%RqEi$>;oCqL40z3I*|9n~WPGLK{=Mp5-(!4C1u<eI
zrLB=OtnVQ^^g0JgYpv5>k}0^_C;>=B@H{xXW^hKNRC<`yf*2S@?~`Gc8UO{M0w)PV
z%zMze>DVxx6^)b7(OB<4X&)Q!?s0l_B&I$p6%*lH#7SIF&{et*LUJ?j7nY;D_nFis
z^ZQGBEg;0u%p|IEpk=(J31N%2bMzi;a7;I*jM|>%Sx*~Ej+cRxRmX*T$;FZ3!b97X
zI}@Jz%l|P~?=5)w<=1)o%U_`V%4>{^$i>UBwY$ak_65qzBg)wYbUj5^BkX7lrc4rz
z9I+(X$1078`yi;Dww9~g6OQk8Y+l%-E(*4{C)m1R`|@?(e)h{e`oP27UA`$3>h%pc
z3_hdE)2Dp<s^NH$&pT#(;M~p4nt7YEePfPXVZC={oqRS;8~V$0PnajRvbpC?@Z98Y
zT_lqC)<gwgFPC%0P`d90y=4Hm*15W2khElQHUqZu#OM0%c>cY}|7A+FPHd$4yyfog
zdtACO#TJz`9oZbmB;&RaC_*-|%LQXdQw^DVl62p`ceGuHPt#QA0<M>=%+i+BRl)WS
zbPI<&J0^HXZcJ60RGp6>8#05f!SxOoBgXz-lv--|jLxqMt%bU4jiD+Fs>)K7hVi6i
zWG&-SO<@dmWvT0u>9i(9XqO!!2vAlSLy-X!9Vv9gs)#XQtiqNBMO9#JK`|;B&o&uN
z#u!_G6UGoqngui2AgWc$9aZ?y3s9P51|fLKajeIbrozn^#NY(L@IcoM(g^2!q7xJ}
z)D~T6tS&INAnF1W)lgeWl$%BsCAKK&5AWl8M`10ou2HtYxE}BB(H))Qot)JKh4zXf
zfQo^pX>eUbC@W!B6(g3*o(2omL_8L7G^>tw+2h@u#nCao?=iL@6rM6hqSy4M$2)QI
zU3EQYCv&>C!}saT(4?GVp_m#I!9=V{j=n~K=b>E5F3qG#Vah62+JZ}Fk_wcUJ;A4`
zt%(usM0wu!E&I!k`}a=J&uZ#X!Pe%Kt*s5l;|cX>#wWhzV_d&}jo<wISNOu`pW(sF
zm$|pUk0~Oxwz%N%#z_8m=LtCWF7FZuoOO!ktv4vHz903*mlK1pL<c9{Cozf?MT-Vi
z4|RB6<x<zE#5~OQE+%!hQ3YJE1T8UAB7ZR=YjZL{$~kr8%X@6S_Y-VC`XpvDLTN(?
z4i`F%jokU%=g5QEN(FYW?$Moe)LQX@Z~qp0=g^ZK$+9O1I}$_a8&>U{Sv^a8q+T0E
z#vldz+EY5cu_59>0MTKx!tk$DqRkTpe27}1jHR4z;JScYE`&K(*Q_}XUp<en$p$r$
zCYdqi#HGi8S`P<fHbcr9ne{~OJQkjqE(0YP);9V2oQ=OyLz2qy82*^qTSIevZh@wM
zxc*skbA%9N@~ee5BgehL;29Z3p)1~a<p!5FcG#Tm@rBc8Id!Lu%CQ7|#E3BlYbSJW
zB?XAqN$(JeI;Ke~cok4F5Smknj@iZ~lnvOrq7ECl)%`(_)F;}gQk@(X+VJN68+<H2
zMo<=fPl!%5U=CC;-1)*A?4C8u9=?RGK-nPKb9ov7U=m878|#eLrw9effla&DIX%|-
z&-45To~fuq(KJj@W#WNP#4Mj&^_G$Oy_Wy-Z~P>`_=VpV7dfZ!S_&Rb6z9a`X4s+V
zJ(4GzCkQIygF{!dgpA794KX3Vj(&M2j-y)f^&kEq8`~QwV+krsRwd%fs)EpChKWy}
zYUIq&poWP_$gYw`v_{%m$-%RBpy3fcdeAVXA}cv^lAPzFUVo0+U?saY#wgQMWg@tM
zSCO`}%-=e}#-0m%7kKD>Pq2M$n{rffbnk!*d)t8KjW4~%(f$pB*Tk+TbPaRWQf*f>
zg<?^Zgy_(|r)$qxF=Np$C{!ebmeI7JUG$(8`#0{g|Mn4a<q55Wpm6jAB3-oLp(tvO
z@7_aQp3#-XyT8XIDi%rii!vCUR%dQ}j7E7qumNP6nb&1vGGwRDxCXtH7Rs(k&1qDL
zLg<|2eCEAJ+tj2=^QogI+C&KqLNyMFnm1@2Q$xJgTZ^Y<)QaiWE^oc~0y@P&tm&YF
zpvCCcOwy1eyQDTS0nTopqKw4^P22Rzlq)w*F-H;-GL5uQ_+DaOma8R&F<jW4akA=J
zE*-sB+*<^i=-F&nJaKJ{=rtR=7x?JKJ;FD9oSU~!`K@34Enax~ci`wbu3nn(#77=w
z_p)PlK~tMjSRcxyMkcN!ID#10VKmBGq0Om49~|#_>U}))-p9H9#tokQx{vbqn=dli
z-NBB=)J1{n6JX>M`yi|hj?SMCIR{r|SJX9;Vhvj6b09|&k#uTG_PlJ8lt!(?VRC-x
zs@6LpF@m)Q*PVCIRcjzMpGSG(ngG1u-H^RJWEv-{>9t``GG$VtvnEPBLY}e?mh<Tc
ztxwlDWO(M+5uyO8gorjeO$UJht5r|m3+=J*0><`OQxKgOO?A3g#5_(6fQq6b_@3Y+
zUDwmK4a7*VdWg`w9@lz;Uptx?rKKz@Ha0Cs_viG-N8&shWo<~Fu@t}|HQ^utleUH)
z#iH*heh(_GTv}vsl0=C%X>X`1OI27Vql#)|nT|(Pg=JJ)>as`~!HT*n&=@8~MOhaV
zqk_q7L|u=l>Jf#Nd&E{H)ohF%*J8A2%j7*EaT)XuvuaS@!LoyTiK=R_HL8e&t`~Mr
z@6t3|)|A#7j7Rki(N>t*2=662&A32tBGc%b2IqQAQKCz^zP@c_YFdIWjDWscrv@Qu
z4^)9wH9ZCGYDJsaNo6(0nhACmIN3j?52u9A5mqZy*U58GU_*;<TKs5)Eo*`<=(~oZ
zteI?&P&#5w01EEBafhz=9NfOia=sAoqjxl|qw5?^+tK=-rdhFQdKRlCF{EtTD4I%_
zw9<~4@_H486`ioRmO@9OiCCp&DwjMVa!#$VF<jC|C%ShOGbNNzNCij=;MCwD5q(7U
zk$&MgJy>$X98i{)aaA&%ju}lyeD5be!OdF-eEGTO*qjNCIUu!9p0JnNqAg6v6+5=3
z=sh<+^~*f+KmCiS@dy^3<Uxl7vL#w>9w-E*2dPyII<e>y!%+u96z>T6v#@wt6N$QG
zY-`yEqG*?+Q3RhNT~wgj+7+LXdIS_i*Q0ch^E5<W`i;-Yq!!25ecvZ&S3S2s_X73E
z@W6L`J6$-Vj2A_NZFM^N9IK`kzY#Sf27^quy{vJSBr=(mbk0O<h<cCt>v8?}Y0@QW
z2bzdbi6auhR87aIGl^SMU4yR2@BSw9`~3KtY*6#zEe@FHCuwn*9ArZPA;YI4?mQ|y
zG@WKZn9u8l;?Py)sl_>pS8hhhkpTIW$!?B0oAErQE4fQ5ISgYTd2%nX=sfq9V(esM
zpo)Q!>FAp?o`3mmW}7=)zi^q8WykjRRgUf*(5oZtWLJuhpwPt#-v}i!>WB(HS2bzF
z$gjUAMn&74fr`v#m!POA>uq#Y^zAV^c(e*Atmu`9I33dDv`5coGELB+w8i%k6&zMu
z4qmy-;{Gw)k6xwTnxRLM-KvU2afzB}l?d|_^+%~(fhFg(JigA&W#+8q7#*KWDK%6_
zF{VrGb4FD%%<`V#BDxQRMdbAtZt!3HKmRsgx$_FPuD}7o(H9YIECm&v?}<2~hA#AY
zrKwF#&hzxCsT>s}v@%kRD}@i9N>_BN78P4cW%=OOJ;}AJR|)-jlzt7W#H<HZf*8tr
zOjV0y_@u*%<ie=M^u}i68!_CGYaGx~<6~-A#UC(Hg%d5>C|U2rKqn@2P*Bc@02+G_
z?{Rwfi1yYo-C~Jx1~pYoFO2Eh6Ao@SIIrmXp7F+r&?(N2Tbd@~PaJ;LQ##Lqwrm~F
z(axb9)IMM}9EO(KC<rCNRIC<r`h~+gMQj)JeNT7XGO1^D-HJjRj4lb*5ejG*D^zD_
zZ=W*VnNi1ge`Al%r?o1`sZu)8b<(xb!k!4J+06TbN*b0t<x$>?VZxfnKAc@>HBfqE
zh`1O;au7t9Y)mPXV68E!Xv+<0NFm{(YxgOFa4juR!z4l`vy<&hoZUY}`<V6v*)(Im
zpQ2B{hIT{tmKgE*zzqtYCcHkmyUIQp&&}Z@KHak_d#`wzGJ6%de>TTxO+B_4Z&)>+
zTc<76SYetaH=ci)wwSPY?Hbjn;`#%d{P3UsUS59nV|?l#{W@RxtzY8S$@6^VqmJtj
z73>6qHZ?|DqD>Q{AP6{0Oyr1xs70qXZ+q^0!=L!`Ka8%Yc&&Ky+dj<cZyqq--r(W)
zKT1?3eVZf0GX0T(nWGY`L+5>c=rqnX{pmAUvn14T1}U`$PF2nfJ~v^_D8XQYKDhiP
z#!z~`Oi>N%(=1VT<GHoCE|{XvlhAZ5641e{o72ISH|^CyrjQ};tE`O(&+Q-yd|983
z`HWsS*J-MhCMm=y&a%n3A%uYUJ;6Cz@6vQbXp%X*Q)>$aV66bK#tJh4D4fH2kMRjK
zMYcUtSZq;I78RZA#d9J`wzAS<Xr@hagN*{@1m#f9%h#PF+JeP$#n`<2AeekExpasW
zN@G*bbyXHj#}(6A&1_mTn^bIVOqonZ%r-`hr(?#mG3BVBu4{^-pqh=S#uEW!P#A6T
zjpOY2l#~0XglHKRC3QU&cWG^;A$Jbip4bKmPK?@GC&p`Ci<wMCM;E*l_AU_n#P%Sd
zN)5dyRu&Zn1koxa^PC8UCQ{&{xVewFH_?+RQI*iN1lJH3XDGeG6c$t0L|uv5nQJkC
z3ko%vFdmOFl_gk_zC=}GcS}YtQmhtCM<w(7hurznGn^hT(5k?iisR#s<?@iwJDL#K
zKU#7!?`VP&zYZNaZhPLmbAam{eb=Vs0*7}!UGM0+p5C{#T}Ruk=mIz|v~jJa8AFMA
zp0Ezm{o^9ed9?O4x}z|LvM7XxZ4E^XU;>dOhte7w6HA9aX>GMIDr|v{Vk((sTmhBn
z(|S!<j}hN{`o5=GwVa+Tg%)lM<J$1?4?o4}*@`#by2-7(Z_8e-<Qyte$?R4=c4vdA
zJzxIaXL#(-IVRWMNBiOziP1{XOav1XeH*04$Z<0PPHydEY)LsAp-M6B62FU>oT~-+
zvSmp`OwLNJg(;yyhe*JIQS>2FZd@W_F;)qSA}Y`Y^bPHuTiky6Hc@%L=}-RvANn)j
z&(Hkte}b9SY(8)idu5BDU8?yZMu%30*flishVAQ5$~ANmT^59>QL#(Sb{JR|d9TfK
zz`?sAZH6JN>#Bhyq4UY1#UxV&VK&qi3TWnY`uPg3z3bb|@9X1hvO$fRx`s~31)PVj
z1%1f^#Qb-e(vyz%bYmxS9GpAi2L^F&e$P$b^P0>qy2H5~)30;i2PA4|$RvF!cwEbe
z#)mw-D)`c>W@a=~(=l=j%1UwjWX{ts-ec#|RjyyWK-)M%h-_}`V(!)~+6BdUCi;$u
zAfd&W&@WPRVnBN#LnTCe$b}8ywW3=tn1=(Vvy14Wz>KCyR@w2Om&;m69RtBD`T)u}
zVp$O@BSm3AP*6PD8dmeb>WgnMyR^aPgL{~1g_)FM?vu29Ipc3Vy3TsP^~8ouiOi#?
zkf-41M$$2gb|DpAk}o9%nfIW&h+p+A_E-GsFMf`{`H%k|^Uz2XO|K|SLixQEET*a$
zX+zU41^m!L|Hwh;fq;t=#B59@8xc-uIU!Qo5$)+I<GSD*KK3yt(+PbbW%s3PFAfo8
zGp*cKXv9rV(DZEUpVn8VI4gruS~3u=P~ijt7y6MDb3$JWc}ZtVuQEa#O7zizdY5b8
zRB(o<xcB;9mIr6x3`9>K1F;V*S1rABbf-|xHt-JazJ0**#Bp@zgvI`naC#&|JX2v#
zTAFSNzCuwFyCuOltU`$|#uWXS5nH_VW`q0eE%r~2dHva&oE$G$9M0+clBPeS=~rTY
zV|$8HXq-(CW~J#~K44py@BSvE<ywU(=yf((Aqd!@6P;8hqGrhM(@GFzLUyoJC?5nJ
zk7*LBjC@b}{tyB&_EOL|mnMG^ryRx<Xk&3&VS>RF7NTUXhTzlprmgOr9y?^DR(cJ5
zA2>ZaNK|-fC_{4Y4GGj4MxerDJwJ>aGCI%A@sjK4`gY{}zl+ITICUusz%%IRm6qJ<
z(Zo`WH#j&sX7=GH*!jTY+`4hbTQ?8c+uLDtW5lQ$@xk|9=HdU%5A#o_Q@;4Kf0xnZ
zEy`*}X)I-7sK=F{^m!i-Ua0+DOW({{ot?2fxJx%*Fq&*~VRskjG};!}4Mi*q%1OZk
zk3Yof*wOZ49%PeC=UUUfJ_K?mX-rLNJm=z@eG5pgu>TGtB%b?l<G;1`(wVjXcjt|d
z7=T9-7&1iGsCQ<0C(=aPLgR2AW{Pj77y#TrBfqLF)=xh?2AQA^CSUPf6HRVL&sAUN
z4As_$Wtsx3b<>=Np=cwff;v%q*V<Px9Mm!3L%;_I!DDP<GU#j|Covz&NG@tol=xCW
zUL74FWr#{%uEhsf%vYQq9%GswHI^b;#}tPaA(G<VQnIK*Ca@F+r!@1fd)K#^z0D15
zX)sn%XvL^1*_e#k+L*GlIc8^j#>Uo!+17~J<`&g-M!hk{PDYr~2yKdVMn$6Qq&e4O
zG!tE*y?@H-;Tgv#OM36o-HN`w$Hm>iWP1h%eAA#@pePHW+(rvk35B@U3)pVuOfDmF
z)yQ5JfRun>bR<}v^!5TS>yT9JN})zIRED}NB`#{bMKC3bg0cwor?;5jKSY@VQ`eNn
zQW%TT8f9x-w<4k_CR>RLoc3M}L`&43$zI3t?IkDk6>2hJ_Q(_5eEJ0zCkuk_=?{;H
zy{Gqz)0N}?yk*{W_z-Z>rQt?mwdI>K$c`4g#LM^~PQ)%o!NSo}fd`V?x*6C?0@o#3
z&oEh&hSvE=)3s>bP#8^BSn8^zDhi4sLQ$X)SND(xEJ1-$3Y<$cbT@#cIssiFN9d(0
zHs=@nULu*gPO(%S)`+fCo5*_}y~@K6UggG(JKVf`6O+Pli^6iaf1ii1K0s(2PS0AF
z&pgBU$@gMj{USa{y@K~ihLrTE=mF$Z&J!C)ziKdDPoo`FmdaR+F?f^NB|Sk~tSZq?
z&R+2kh|qds*P(SmtZH;QT{nLOlu>vu=j6>_{cVm;PWaZp@I!p$$Nn6rH{PVba|auH
z9{-+i5s+gv!@H1-2xWct%N{%0;*FPH;Zy(k7btbhgYUV_H+}E-Fy7i^NLyFQxj5;G
z1;q8@N*w(lJIgGQAp<@S3{)7HD-uoF%f46`#^bT1AbgcQz9t*ggd?BtJ%M#EIxGUR
z17rTaa|rP{dgJ;3mM^X~j01x${Z*!P<t{cCCPN=}4((P-Q0QF9Mie^qeE3RWvn}}I
zkzt}e+kS~Lo--Txo!8HJ^PuIy%a8Kd_J}4JoC*>`URsKBMC%%S-w0Jk8PEkqIiu+p
z3BgOvPRK#u=PW%m;;5&fU7etmVlusmGM1v=#wmmAPf?+h?~5KCv+JS7T7xwPRe-7r
zG3wP0-}VwmfWed{^P`6O{;SNcZ85vFgB{oCNkmr`tbszN9AXwDd30ICr3NQoSXbY<
zV3W;BhkT$Q6y7u)@E&L#p>Z^)9k0E5hoAh1|BT;w_H)=GX*Q$6s6ZbyF3R&1Do|)C
zXjD`z+=>_+<FXQJW!jK}GK{KO$`kFS8Bw6T#)h7qjVa&u$?xRO{e3xPlojwyCqt_|
zcgg)JGS!r+ixN~D$o8tDM>0di42r=q8Y9%A=o4iyq>V3W<g_xfPiBpqwxYL*B$Kf8
zdP1HQC5CA4(YnBEjgNgY|26b|psFe^KYEqBw+|^z<Y@nlZs{p&4fJT`P&(1Oq9W=M
zeQ2<yWgJV+_8l>nxKPk{Fb)&u3(LVx&D*aXvD^<-;|}8uZqede$wBkJN4bvJ2<@_0
zkt%vNcdl`=fA;QgGBKG~rC&p_j?jNejj7UcprUCbRCS$z1p%>Oa4j45j1-fiz{ilL
zOxcPz%32B1AiKUthlsUC3Xn9N%Py-SoQwG&4$VXi$$%#WV$>YpyDN=pq9O$^8qERy
z4Pp54dFmz0DFb)uYk7|x@S=6raLg{S!zYz#!YTATe<hvZMbv2T&?++BsMvV_H==FD
z0}sr&l8U(dhbN536|>oh*~s#ze*F7btqwVO>9^T``yRS)aLQvwHTBjG^bWsjXclKQ
z^9B84iR)UdQfLb!Q!w3{2=Z?w&vkoq%!5xnPPghfJ38g^rH9$NbdCG^fU)u@T}y%L
z0)5}&`X#}20$vm;nk?#bRM~VZr_XR`RylXZyhCp^5P#1#dt_SBx%2D%Ij4qCSAf#&
zJfF`%HJoFSGzB+C=y5sXJ5#iW^^>-Vcewbj|9+tO=Ig&cA_pKM7vd_t#sObbN=)NY
zMs^6EX0;-?kj|9~VJe8uuJ1Z2JW*(juyBLN1&{ZRzV~!(N8k4NJ|wb<7RPZ#Y9<t|
z>v3(%{=EZEj!xK)!em0Fn2)6YRtXf<#>oUIC{*cCy+aq4u6y^hmY;a!3PowznAB{{
z#%yoS*x9U^ZB5zSol<Yi&{cu91xgi(#bNM$53xgyOR!^N?D4L_1S@X3y`z~gnV&Ye
zSg^6ZM^VbTqKu*o0o!_tQGqRMuyWQLZHUScts#~c>_`msq9z81wt?6yVkaI9`N$>;
z9a`8rQKoS|tyr53t!#lc7UiKmJr)PkdPFp1it&``w#Rjy^u5a^UUyVQkuWof2_lxb
z&f%QIa@nH9jK*L~O1ni>o$%souTUCz?0xU$y<2-M&z8LQ?2DY<xW#;a%zWiITP&Hc
z8UguY9K8O8mM<D$qd^2OCON1hKz$5yuel%~xQYawT#*BWSU^mMjKi4`20&gVCRj}1
zVAngGbF7-4!iWJ@UDwoANm&#UuZ2ZvnXmd7h{<?T@@YcyRaOI|4;9y7Q9fH@TGKJ$
zoHXP*H@Go!?b<F^uI+Mg?>_hL-J=UVr>AE;^U907=gL*KtmWRX{~8Z}{nukATX1}q
zsPR&J1sqW;bP(;lIL^aldrCjQ%hYMcBZYG<O(R<ACOG;<ORyzTTV^9gvAZo`U{EO6
z<5xl_ZbQfDfhWlkNCWdM0>PrXInRCeGkpE`eUgv=`M*H#4EKKbw<zW<lPedv^pTI#
zDTV5R=#qRY68n~R)o^<3`OMG#3V-2${pV=+?{T_+hfn{^FYqlt_<h*P2(6r)g+A@6
zN&hdgLm^GDGiy)S1v#04G{8`q$r&OzFI4&v2krl8JbKsnnBVWm*Jgv7inPH&YluW3
zga3mTMh&&pIgMYM3FHuU;<|nqn$mM+R{E*$n2fIfyq>bgv>;<-odKH1`UJe|_~71@
z%YMnL2a2l`sC3Jj@_hN`f>-VZbll_7i-zml0c$iqikn}oEH~c1%V@HLKWjK!ov>pk
zG6fD`iyCb!(S$@byzn@DP#MpYKlRaLm1WhO;bLGqy+AMpcC-byBDhmxzY+?M6T_uy
zGNmwJ3Qa5&YGP3%5vavuN9erVf&tO0RU^*sEIEDqgz4o?rn?*1jS*&2W9m}e_{wsi
zzGf%5chIfX`V&vxLDF2tG}%zH-?)^Kx9VxmTJGIG;%7hgY5u`4{3{yWqDmtf$vR+w
z9xVvH7V3tv8k1;oWm(~(BSuZ34LFY{NSutVFs2f1Me68uR2Z$fVibSuJ3h%XXZIx%
z#}sHUbe0?^lGmr`MpVM)<Y-B}&t#O8xnJ@<CS_-$)ZpN(j2R|4Vrvx=)l+DHQN@A2
z78KZAkfmle4s_zAmCUcLH>l``&m#tfQkK<xg>sR@8+W+<(p%J%2^Sx{OmKl$zW4&2
zc0!%@E23}cOk^}FsY5AIB%#MyO;8?%rwcuC>ew7lL7x!4rL+asHbm3Y_6Jnj<I+?r
zMokD&T&9g9s5xEb@uPQtf;!ykI<W|n30sVyO+a}uYZ<20nFX7t`RSVEwXCzoEH}MI
zo^!w@v`WQAGEIsQe8Bac>{V%!FS^)sVd03r+IyM0=7u$<#@EM4bPngma3*TmFXBMk
zj6$Qz=x=6#BvYWA)%wnzZ@uT{rkErn(CI)P<PgJN6a>6W=5ARB8=|5vEe~wZ@OSR<
zrC<Je-v51n233ryN=v=F&HU_)ySMi#>Vom8=7)dmd-<3D)jfLOaB{k$d-*lm(>Y~T
zOGz0trqGmSO>IiT*bu!VhJbfVCetECkww~G%*`(BT;{dUe~I?&kQ=}GG#~ngPcpfD
zjXMW-*qCiF+n6%j-e6Q$bbZV1yRY-PPyIUgUVB~G6Vok<Nrm^4cZciHK@3CADLe;r
ztZZbJCmruNXR^ssIz2MQH{`P~oh=!7iZMmOB<<}w#YPWJYeb@x2G`#KkV;fcvip2$
zM6-@JhGF_DmoW_YhdQ?+L~E5_aX$Cd(3}p1ze%pHx}a@6%Vo!SisUV;GBJI60o}Y3
zJ*k2~$}{a8UEil)9es)x2`>2zq#i$nhzkzaJ5G=09NyZ;bq%U4)BPL-P%?UL(1gNU
zi%~tnMN}D~57^4mHSfNQ@5Csy?@+5iv<1O7m`Nci1u@Fxsdq%*(68Ebc4+z#(YD0a
zBc{8X6kFR^Qxkh}qHWp+jArZ7Rq9bK@(~Te%Y69s=#+!~V@8!>c41Q*_C7$n5<t{i
z;%bQ=dsJDXOeqth5YfemSSe!f@lA*G(vYgoqx@39+#s?NS)&@&X+l`hts1)diq7|h
z0JbdHx&9!oIiYD**s7$S?g0hu>V*6E?y)>Qp_*3gU4MktS;vjnUdC9diHw12Tr%01
zVzBt$@X$l=p`4ECRA98b#n$z!TzdFHP7aUQzwtJ&zw|1%-oA}FI;C%Vg7df-1}E;Q
z&qX7V{){kbocF^12p(r-N-V5rX<S3fUDbmRK)%;>ssTyN6iIXdQj;g=O78=%iL9C>
zrEP>6Qdd+}MQJUeD8>C4SqB-Yl&C5JsSWA;P;r>(W$CKQH9-`;X-L3ih{+vW!=+0*
zT;98YQkHJ%X_gJMYDA?1r*GXPbSuVJKS+1<S*g8pvPFDnldPiDbH&)wSxs{?r`9EQ
zR8m$YL1`vkfms<&?=4uY4jF&lHA<^--q9(8ZW~;)#MBd9zoNSMz?zW}`gHCptgbkE
z{&SSO8+_~k@-O2l3H=#&KK(iBvfu;X`|U6pqsn4nc*GD;J`y@dJ=x-4|J}dOkNl<o
zi0B*c-hG`bSFiCtEYE%BGkpEGe}^>SKH_7Vq73XePY90CJCQ<wq|sy$ID@mvAe7)y
z(TG;M^Jz$sAU+-c$88+{fAsj8Y*2HPxi0$Cp?wbRAHv6n{|@WvX&w`t!~2JSn{R;O
z@6HtuL(R2@wAV%l*=R<>n-h7CAt>EwwkH$n<F{B8o~t`Gi{Sa~TMaMX?5N8vW>w3j
znPYDjF&=ztm`t~5>?vP)`AwYasYlybU2t&pEY2@5w!$h<+G6b{o$ti#Bu`t;QLbWo
z?NJ3xj0DxA(70~S{M0d?24WmzswuV{%aov><Ku$f&zWwIv7;r*1}HUZB$=wFGW6aN
zP8y*>5-~w2V#-*wshRJuIJtSmcxS|PZ;NtsOfi*Ty3$Y-M5Dllq^%9>jI4KN7Km=i
z_XHQfMbXRjo@VJdym!KH|L&Lg`~U3cc;n!<xCkTDyus032?{DnE(#MhMwKLPF-99E
z<%E@6p`z%L0zw-riwR0unWAaYrp3@PscPOo+GV~vrBy~6+qCJUL_e4*fht9UkWHb6
zc#1fzytU?0oL)^~(bh_FWzILTDXs>E&r|#;2>0M~?`32%Xa@Z|RyEmZDmTMQON^5W
zn$Q(!4ZZI<x_8XU;hg5p75zdcucrscn7ZWBgAZ_acSXD3Vxp%kDykZa%CKxs@nMA>
zZ_yX0n32QQHN|9uqPB$6iq2%yFb^yI{#=|TuU$noEvjG8buC&Kbn^xmBBfQ71*o!P
z-W|W|dyLA4QYvT8ij9Qiw*{1UXlu|EDOM+MB`TT9NZ}vSwMD5E`LNaw5BtX80<1)1
zoOSFzpsgkZ(Zm^R#RW1o%>V?Ab50tRn5MhL6q_Ve$xH(d=cu<T#>DoJYp!*ovZ}at
zlFrd1iSBk@>hKQuZZM9}0~<x9N$H^1fx+EerNY9gK;r`sK5&iqeBCvE=l}4ZbGo&|
zo3|9-`ak(mOjTfwVKT0%>w@FsV{RWDfl_?)ANzj3^b5a6>ldhH%l@q!T-v+LXk(Mn
zc$~75BIr~|DuqwcQIm}w-g@ap9)9ZULBZK_#bh>P_tHh~9UarpS3L9gf0mE@=|9Es
zt(&aEO%7B=NTy`vv|wj%gYW(me~QKaeV+T>FY$>V`2j9I{1DD7j_w}t>~H)cul?4i
zX;0=T6_a_MBAknvku{TC3=(xUn*)jK`?_h&F1Q&B(1{6?>9d2>WQ|P@+Ef){V)Cpf
zH+j0lIWQNb85Fnjg!V@%=5mxxW+Y`s5LsrEA=}IfIal1rT$Jb66{J}@JUFH(3ZmD<
zOPaD~01Al;*GU2ByNGtO*Sfx^?>qX=(K}Dyi<7MPveums3E1gr+m3d%66##=Y|f_C
zRznFv3%I9Ci?P{3SeOg8=m>2uI!X`Gc~tlAms*eP?a(wGD<VXVRR!3BljBpG#e(t1
zjJ|ic-lI){Hkx8oQ`BQ@QBjzhX3^rpeX4pwGjBOPowK=jk;(QR7|DkhFv1bKGolj!
z)|!gN=^1U?VJgE&mlR5T3u4=0ZGr8VD7{4MLYP-hnlo9q;)oskfNy)W7takJ#idl+
zC_~gJI{Xl1qF9YfMykg55eh}WIOXL25v#W4!sY9jdJClrtcCs8Ut@kc$68^Dm32hn
z>6bmhS~_~pj!!r~oU^&RMLnLfxjjKu1r8V?bq>=J8++SafBYfd`v*SAt+#IS+H)`Q
z#!GK-aBxW5H0R>iWonw*37J^BfKOmw2m*v;nHW)~qhc%;QCi;b`Bo#Qd)JWIc9IL0
zE{#be3{t{_P~1ZZG)<su7FfH&7M7|kDXNOf7FcUBN<dDMw7{Sekl^yIJ;eM;L`vH0
z6F?^Nm9^V$L=mE-+h`rw*sQpGeS=B`s-mE2=frk}>J93k#q5sp)`N0#ZfVJLRyvX8
zRUlf$_{s(5FW;b8RM@DgacJ#`(czS3V`A9Yn=sv}@V-r~91pHRS&NSzyK@mc+9LQ)
z0JZbdq_*MK)4$Ef|MZ{5O!n};WB&Z->0dr#>)KT=ee@%Ap@&|QC1Q${R3YG_<@Kju
z<RhQ>2=&H{gBM<-9!;sH+gyHVk7u8GMxx5jIi^RS*c4Ijs~8=4CvkIWzZ_U6ASP1h
zm@z5@acK7b+}?Vgs`=IP_?m1`vzy{MW>RuVOowPZR|K5<vxHAeQI=H#=Q&jB|M!m?
z8ryZe`MQP81>X4s8B}umOo%RGrT&<`&+bOWskeOowK>n<=+Ua=%I27v4NNBmW2-Sv
z;j4=6i;wWiom+h2)z^p^%6g2Zpe|;d%%?P~Ge(mc7(tqKS)!a}9VooUvZaVgg#$h%
zJxLK0eI&+?`TQ=E@dcdo1XEBHQ>-e%%s4xHmF>%0RBj~`9BVN}AqRX>K!iRj!r4lS
z9+Nh6?}bif6~>O}7J+*&-UHpSd2N?^dxLswjG2y5m60r2E1R}5f;?fx0ZswIioQk)
zc<5J-#nF;C-nh%BKl?>~;a7fx8%O(MBxDNNgq6aCL`P9+x){amMFmAsj4-B@LQ5G;
z43h2U1AXjKy285-trb;Or|az#YtvDw$Q}jWmb|e7Z3{8K(4OFhMkDJWk#UC1IR?WW
zq4cV?t$Zr}azk6x6{ajOMj9M7xNAsL;5#Rp#3&jW(Bcpp);WbC$p7VgA|s~*ilY@K
zHAzZ~Gpbh(r5qZAUq$ZSI-sx>#7NUcR(DriesIjj&IXfB1!pC`i}=tJt!9NImVom(
z-(f9`#}h`IQ!YGk1yhf(vu&mqr%WGQv9UGg_~?k8%`NWVxJz?z!t!X&+0iMk=~2<p
zE;_XBi7qfQCC0w{F*7kr)26IJS;;ERCP~&iymu%USi=JuRh2!*dzaX{2_01mZACLD
zx?wdWW#ynHVHs*^e6@feQAL6`Ylq-uB9=qqLzKcV_&n9jl=!4M&*;7&DKf3gwBSU~
z)mkRg$_SM^;zhbliYCc><j|z5HHdtUCvv_)$$Mu^0HTWPD4wXm8I9c@@kjp5pXc*`
z{U>P8Zt}jT9_H?T$Mau&ntR{y2`<0)eRPS*V60+$Ym3=v%6vYjw<X{HV}FvDzwiYP
zU-$|&p5<aiRo7Hyfl-BAzw{bXuoA`^s_`aoy!0H8y#GmN<1zdD`#f-Umj~YeDD#Em
z%~$SmL3KRykNzpw|L`X`-akXxloJhfbI+aC5%=D>O<@h!9)6fte)n14c;N*uJaC;$
z?|GPS|It6m2fp!>eCD73G<TnWfzY;Tl#y3V99)oP9}->Ez=BAPW@d(GlPz(5mPxu-
z3=!D*v(HVb&Ji|=^5^qDkS6>&3MUTigk%CZ(0kLLLrDH~X^)DL5QKds`ssM?1*}in
z_4Id8^d$*Kz@({B-}Iauo=}@ovO7l|Mrp>Aiqe*N1*>L->gA*gG2pzXb6tv(>2W?t
z>_;#6oC|5v;T_RCN^98JnX$7qCHja7j?#4)*NY+1$YM&7K4_zHE?}1prfo6H4mIlO
zCnN0E^j+U$t~_*&Mc;CCvXJ*^1B<3%wl!unoq{f~rKKDh%Bn<_mTFSrbcLx(>d_QJ
z<oM`_ZsCbqv9q_!_`-F9s#u$9d2wnjN}^U2A+U9E%B?p}IXymRV`qcW)&x_Qa&LA$
zzK7V0>nON1@e}Y}+hlN9S16-|<>5l2vMZGK=%6s85vnMpVU=A~nwJR5%ev~@hS7LR
z@R8%YH|g3rMi=yphJdFUk7zr`+2H|g)8M10tZR1mE>exB7;RGau|gFEXeo(pjD*nB
zbS<vw)AV0KRdMM(k8tfhkMhy8B`-erBF}&6S#G~|gKpKJ0yr1f5{Gmjs1UG;iKMi|
z6#8Ur=y3!s#&#m-Nzz0W$lSHu)C<izGpHcbA&ipxu}BFDAPgA1XdGP+zU@$FjpXB6
zLs3<fRY742tkDE*u;^iClN%}>KugBWz={F^WHN@mCJ(Mn(5w~rc2kw8!r*7yn8^mG
z&ppq{D|Z=n7pWc?3y7<7%!*I!n<&koj+l*#$)lI(ZybX+*gC~Nh6t`jSCRVSw(Khk
zxlcPMz=%?ub#pF0@ts1IM!;BQj4)#(EZ%sToyXtD^zpC5`v~2FH~+=Iq$tOH=!bs*
zKe8|?iH(=qNkt4E-+Q#LdHvNl__ja(gS7MeERK(uY;B{;igGljm~F5)K4E-uOD4~$
zwhm}w^x%4lhwFM_B_TiqJ|^r=5Bux-oQ%WtTGTG-92ndSzj_{DvkmHy6?o39WDq9n
zRFI@Nh)k!*w*M+Spq+10&udWSU$6gsE{0>>#U?|H)Nn|F%DSUf>F`G!SI4noJe%<O
z{f;j@a~mI~?9Fz#x(y@K5Icn_4AvC9=dp))y@~wR(=T%K=w5RADu|^*dvU<s+j)Zh
z{bvZ>DWTdzDT~q;ZN>!WIG=E-)Ov!Z6aUx`O_0QqK*V{+d~wKVEECzLonyin9W12^
zZ10S)_k|`BOi46GnlY^@wZwn)K0xCJ^J1Yj$fp&f92CYvq~h%6g2UJDQf$`DuI@6q
zFq8LM7UCqBC=|9X#Zbe@G%>c0e%W)hf65m={}Mm*sbA%{pZyA*>ttON693|{7$kZn
zc$qTT)EJ7Fl%lG}R7Ev3@dig!3Kx6oy2iVbRlh_<kF_<%SfRG0=P6?3$*n8+TgMb1
zy~-j{H8l`J+PJmJ5xQx8ijh|~Mbauy)TAku36ngF(qhE0xjVy@Bg)YTrEN0bTGLfR
zraLPmY=QJ#F$Ym26m*@sB88x*F*znP&`k|mi$=^AW}p#z45cbrp0sRTnBdB+bv5*T
z<o0WCa(e%i-W$9taD78Z&#WrZszO&C#E3C9L60e`E$-eq#1s=I8x>wvxH?dbdREJp
z*KfSZy%*l3yMID=y2JzZc#MjQ(t;^!q7K9^GOlOu{vMMuQ1j;;(}*-?ckBqgbGW{f
z{UtYr`P&nUo;7^RDCkM%O4+eGZ!hxS#%PLCvUPQtnp$lJifu@Ut1yKeKCGul!Dq9i
z{Q9Jk$k7@9Zu6R6mrfS?#tSVkZ(}*vG)-h<cAgExmbl(K(|vw!$4n2sj_J~AjS+~-
zP)tgG^soF?dJ6u~U;R%x{N2y-+AsVZTXn(X*6>^Z&40@e{%8Lg(I|9T2t73hOlg>H
zZjiDW_~;+{4i4V;exCo0-(-GvkIAT`Ejns#g#BP-?-31sq$*14`HKDSjOAj^Y&PZA
zodcT1ij8r};~#r3`_H{fY;MvVpK|!c7bqWmfF7_a<s2e3>ENO59o_r{Gb-_?C3o+?
z$?4$<JJ<HOaPbNs|KT5D|FyUI^iTa=SS>Ii?aD?=0hcu4I!&R27WR!yz4HWB4xW&l
z&s4sT)7pU?Z6uTbOaYDwmCx$tJOK@1JuxLy|5BqI)0$YDDy@sD;Ln|ZQ71iey1qis
zS^s|iyftzk1s}w3AZ43s3^+yTBZW1TMM>MXSYw%O&WN?=)}2$L>p_eDwCg+3?e#%`
z)DV)Rbx71jdH<y?G2>cve5LHK8kuyd5QwhBD8xmzEX2Smh*ZS)EiQPR6P>H~p5FO)
zeT#YXl{YA-BSxbdTMNCpZyjg*D^#zjDo;Hd3Fxvh#ZD_sRZ*BqA~>{Ucx!6Tj!!A8
zlFbX_<RIInh^iof6AGnjVqCGDpV2p-(QL%dl^td~+eDM42Zo|;=@$!J0D?zj(1pcT
z$yMA4a2L|PS0x}cM){0l8mt>zgUPhYG!#(zY?L)<Q=HZG;$@tZn3doi^=Qg$_d=R1
z8BAF)@}8Y5SHN27@>0VXVv<%VHSCcd#j5r6s~(NV7}oa{5vMe2TJz+`KFo(c@iA_^
z{yM+;^S{L5jl1aH5rW1?EoY2F9GzXnM}<S-kZgIat!TD`6TaM#*%?wEXl8?^&&P;d
zMrTUA64{4Vl3Q!F0TVI9J)0ytE|4{k(MP&2(6$R8N{mfqsmhA7C`Gqwq~;JkxlZ?{
z7#eAna??E$duNJd78Vm7KT+{jB=mte20OKU<S+hNVp-z%?=qgu*njRO7r4Uc;Te=F
zF=;d?E2fW8W0XV}+RYI~S>lgYc-^B*$v4+lQH{n>lwhQeF(j~El{Ec|jmQ50c6xyr
z+iW|P^za%T0^R+$xb(5_z(<Xt<<77FD-NDN<ip?fO;pz};YuTozKf7DmSf+c^oaSL
zeJa~C9@iYa@;uIq*`?A7Y~=E#3!EO_XSR2NU@~Jf41ll{nhttT=p86c#_W^=f+NHt
zF;}A0;=>`CzCW8){#&Q#SHt6Lvq2?~9?p{@g|;iHB`P7#V%<Gt!Qt23{K4J!JlQe+
zyFcgLTs3E$4uEl{?<G2K#0-<%7?qgB7(sK((()_MzQ)Zv4VN!o;o_`ft1Pj8MRVf0
zxHaa94?e<v55Mra7kKUVF~LWQYLsXkF;PYp(JP9oVl>{xtqw7EBGgYb*4FfH1wO1_
zdDb)Kra8UMpj6rjLvnyq;GtRGr<!b{?TpsXsddfnY{I05)@q@;8HKV%n&uhw2+9jv
z5F+m21l31CLX?#k>Y>WTvPuOoV9J`XR2;o{#NF;D<BKyU7d9A8Yjjzn4RrC8Sd^Hm
zB6!8Un+N>PZ-0e<{ENTHt9RanB5AAAG%=fIswik%NO2PZq3T*=iB?k>DPT}Zm!~{F
zLKhr;aBPgWnNGHebC2smsS4+H(pShjnnw8eqwnLlPrrcQDDd7%#us^5lJe0qa;ZgY
zs?$U#5Y|&ERMKE2tzybd2w0Y@mP#usU&%(Nlk;(DF;Uz`!KX-!nCLsCW=3)ll~&2{
zM_LJ^#3Ukmo#@~)fAK2d`#>9ALi`Ly7ns6Iaa(DwK5_*|h3h;Aw+?yni*KO(8G6{L
z19n=o42nuyY9;w<y*XmOIKi0~r$(sgSuRSBThDSaC&m?_ZLy0bi~U1%FYb6&Yb=ps
zTv5(8X;*W)cE#aa_t4eGyS~G?&Jnc(;|NNE-=h!G6t+EG+v2*Gu4yEweqdKHM0>mg
z?{e$*P27`nVIPtSmZm7|+5{;z7co(EGes_(yN3wLpu$Tgs!ZJTJ)fJLWP6{#9Auvy
zbo(kz1&mI#f7!=8UW_ENQ*mx!@*7&Stj?kCYwEAc$VO^_Bz#=}yV3SGANeDHjG!&J
z7Beon{H@=HeehA9{hR+4+aa<Y126u@ukqw}{UMwWXcy2%8o9ExsreRC@s&rfvUlx=
zdG?oojn(0s)QzJr6~Q}{PuguK+I5Rz)E=`vn(@YquW|9}WiD=vS*;q%dd#ROdGfoz
zk>~%1pJP1gIC$w*E|p`{^{b!+L<lONodd*4S>=q+E)v3s&Cw2TJ^LE<M`lbncesCH
z#Si>1{&)PVzx97Z%}*&pB&3PP@Y$I(#a00wq!|qfi4Umc#GNOpYggYP-XJ^SlG#N0
z=#sH_E~r9CvV+VDQfn?yuD4VOh>U7&pcRM9p261u%A8y4SwfOuKxP(%WCABqNk$0)
z$C%U@2TzQSwrQxU0v!W72I_H%Dgu{wwo>l2?CqWFX_}6<=~ylnbX_MjU!U_@<+Cwb
zrcv5xR3RFEgD^z!kzh1dE9$DIC<}2$4-i6+_q`Y~wH;+G*Vy&%zDZJ@T&Q)_=t5If
zCBF4&4~uz2(*;iFOUlula&}6!HDzOSlX4QV)fl4+x_-rKxu9)(w6WZI>rIrs#b`RC
ztZTFlIKZHZJz_{zY!dnfzMnH5Z(@sC!WJC9>FAmkjHa4SQ>>X?Gkj825t462LR$tl
zg?h)XV9)6SQp8yl_wRusZZexqqYJ4EOv?U@=k`lt#1wMwdFO^4TZJLSPPDHHio(iS
zQ56O~n-PN=fU-E`KBdSfBY@%N?hcn9y2j7`<o}2E)*-Q%b*zoUDvk1DK-I^<$+F{Q
zv0~vOZ4CG*`@eFM!Rx%I?OJ@_Q%`E@YDA$+Sx3PS%%u=zU23g|393#SSd-1Wa+7Q1
zGmso$Ifw8efscxI6=@br;0#+B%F<F*r8KOj5LaX+>tBG=BtJ_v08z~S0s`WzOkFMm
z-a6j<J>NyWwMV~d&|!rNk#SYC|E0It^_Q8wcMoL)fnrz|Q3WwfN`QqL7nnj3eM1Cn
zqSTYlKTQ;krZQj^&2oj_*<t$dQ<AnYoUqA&G)7!=M)lBRsConCJ5Hbb9bWsz&-2hX
zeUP1RdV)@iJch29X=b5O(cuFaV>rBh$fc`S=uQrqADv-sMOld{R*Zq|or@gYzR3gc
ze-A+qD_KAm7xB(X7HiV9Vg!f~Cw`rJ0`Z{9HDK;LGNU(3YO-qQtLgDI*`Th6o#!ST
z$n*y?dU2ko9@f-bVlrG*WFh-71yI8?4Zp9A6tin^+SpicyfJADOg0wD8imw&Sfly!
zb59c@eBkl-GB$=uRbbi{-M0MTAO8e*PFjBM-#o)t-aMcS2CFru$Xc#k45Xq=fs2aG
z?MJ!SoY5_hsAm_2!e=Gy+Q(Mx=92+N);L6+Xrm;3kuvBsCDB@(>SE~VRwq=W3L9rQ
z*YLq7zK(*1Q1-+!WpNrSC~jdfT1m4Swoo42(OhUhNl)dH*%FYb5=x9PbVS8~j)tlz
za3@R7jvLPK#IoRc*|5Jn<G6L)xqFLOUc1S2Z@<nWHmOOr(wL@0ENYdI%tWV4J~n-9
zxN_-1?%zKl5~z$xSz%(=ZWA)5w5Es_uZ}Qk7p)E3lRb_W2RPy|jgLf3^gOzAiGw#^
z<?)X{MH@oWnLyTIsq}dZT&&MIT&Am{bkg{wsYs&Qif%PgbREnOPwAS5>BcM_Vg;y(
zDGgC+in2iYP7baJ!S{*I8RWV+H5`UgObC)^h=lCM7e#~ORy}P8s_U87g~ZJ$O)N_K
z=-GXEhf5Fbv1$W%Zk*Be4b{}3S7Z9H#PxxwOWya<C%OC9KHZHQbgoAkLAE0;K9tx3
zLT7NnV`5}7tLZz%^4=}z0v46xSwzF9v}3eW<2pqQ1uR<H<2l9D@NPerXQx=_=)J=f
z#b8w9`<}LG>D!Ke)k+R-D!7!+^x1WjI_P9rvp67YMTK6Q4Q66M^rXpXO(!~>PYq}o
zEbbM?Scw@h2KruN#l==~DAa`p%z7Wp^i6-h;TXIoh=GA-s-wnhnf9v$39HQVOnXU4
zO|pC;Q`6ThUvb7%>3d~5Gv2wt$A9pTa_O;0#YNMJ-p$9Jtp_i2?GOJTcYo%eaiKK4
z`pci<v5$U?a(jyqBgR-FL0rhK5%7hG_7PJSeBJka2VeT7pQk-Jrs^HV$P$9WbwQ?;
z$n;{gHDkBhV*kZAxPJXIvvI-dOmpw%U7q;h`*~zC<@P6^B7XKcxYu#|%B$>N+J#A-
z82PD5(^3#?V?al&GICvP&1m~7H(tBJgO6Jtc<4dC{N?BP!T<Pw%g_D9U&kHqV}cUn
z4V8;fjnT3%hNM{?L;(>rfe?i?6jOF+rc~x>X{L`26PtXdrDilw^TRpq>X3h!i$5h}
zsW8xdlPMjErIep*fN9@pC#Z%7HV!9bvf4~0U-DkS6QUT^Et)0VyGOg6V@yG1732FA
z!8f%13R@JQ49Y|*tEfs#U)NNnWwBhcY8qVU2fcL&;;7lVPRJxqUY?0yOw#Mch%U-h
zNNah1o$m$t47FA0*h%AK-hKMr=I$0M^mOft(Rjkng-whSTJK^h@2Q=y=oT%_vVo&B
z&Ss}n;|bI4Evm_kw)M0jQcXt$uLx~Gbv^y^zA$jC09U~|iQ!QeuLXeYT*uifuP~lW
zs7IAd-L#<`RgAZ`F-4us_Egf;C;B1^70&IsSt_D(^MFLqkk~>4-PElAK4!owXLggo
z3MJP<`2pZt>z#w_w*-_fP)H+TV@X6|AyLxP{-%p`3X9$^#DI?u=X#dQQ;v=fIJ$qt
zYJSGqykR!kWICz2y0bxrlcWLd=z32Z0`pbF;&{cfX<7D;RqJVcPwzWg=jodc=X{#D
zJ6VIhW3Cp|mALbkRmI3kGC+s|e#GG9$Tt?P&Cp0{ZHN0V#vprL)CA`-N!J^?yx03g
z(Kq-$vgjJ-^MwF4MZu`5sjG@Ific#Y)U2u$E+7;FlMDg1RyZ9|W#I9TeSk|Jct2-{
zw;5YYI9lLbht`4VsOJ7NZ{xa_?XQ0bJ+h)7P00j`Fzi`rJc?H3;Bu=qXj7m<V*M$D
z>jQ1O!d}>8^FyDM_Z7N9!k10vAmW=jqe~B?V#ng;FYwAg`+4?0@Dz`H@_qDuC(rF5
zDho!;A60aq4XStCKRDo#Cm!bhy&D8?sH{#o%n+kzHr?j+`_H8BvB~@|LUgi^suVNh
z`_yb_TDMwvo+>-_XW&MjWAJEDYMr$3RrdIrY)~N;NT~=L^e!ZPMQ1%!YVOvVxd|y+
zA42IUW{m;=+dqiZ2&ezfV~3;%Qpp;BJ!w^G;mYeX&T_WAPgIT%yzhflPBSeH?a7MA
zAHBe}5B)*@)<611UcPZm-%Dnu(TTyLQiGRJY;DB#HRzaH&cN>F_jC8omod!=Wwnj>
zny5#hI-+iYI5et?L~qjPbP>pZXN^eIffy||g6mF+F;Ms=AAJ1lan2F679FIuXkB2G
zWN~6N+JKtrWXcf<%a+h2=d#3n5xaOX;V?1comjdDCwhfQKnF3u_>HGu;9q{>(=7TH
ztDyC{832Wq1DB8hf#hr-p@<<B<`M5b8>125{K-#n)&xHHxu-Fhq{EWTP!kn8XfPHP
zq12if_Q;x0bYwc7apqQp=!K>kq40qRwzs%<bi&@|CZX>z%B0WQ%CUQ{_{pfHi1Bpx
zNfO#ka(T^-P^R*U*_9?@6N;+`ra&M$`65WA_BP3nBVJiCtcr?=OA{3}7}TsaF_QC1
zYED%U69?aP#6oP(O{pOi7;BjAZ7|uH5esNfTY_(iM$z;Q%ezOo?ig<>>dB1fpTCQ9
zOEwCLiLoI{qeVe*JxV)V=&&P2)Q-vKnAm9=GlBk8=<I1~<RVx!&MTBI7}pa{m(mRD
zu7B6}m(_BGah{^@&{m>{qR~X>@SQ{h1fO(AT2WZJ4`PfsCkSm!6U=yi1Ik;>Iq{FS
znX-Lu`sx!X71n^5Rx)kSMvStIHUm{I_laT9o{Bz|kX>TebfR@!2*b}r3l%f^3@Sy8
zk$8!y6(&j=oKjjMp7ffDfk7r0={@Et+pw!>L#Q-Qee1XJkw5g^guEX`&%wzF8<P>H
ztg*J@J>T|8Zom8zr(ga&)gti4pZQt7?Z^K-K}DhG>q1-;V?-G(^iCNHqsoH!e&U;W
z=~F+$1`di5$}7scz?7O|T7lD?92|4@_I;ZBJ%>l9KtX8d6xMR{wU@a5$b0$t4}J%~
zbn`Zw?TXW*W6qv?kq5v3o9P@luL!0?dr$O8?18mH->VI}D%jn+%8i$v<K*~^E3*wQ
zU%Jeq>-gh;<v-zH{dfNji??6HxC|OfD=O!aO2%pmLLg|3&rxOhjET_qE}eDy9m{IH
zd&Gfqn<t~jHf&k|0Du5VL_t&o12^VIBN?=%3GMmmd#0z$_a@)_#IO+X%&*zL=bP1Y
zK!}fF$Z6E+T-L*Bt&>Jr)317#y{B6)sYWAl=xjQgW<hVjj%!M5u|$$A4Wq4?hiREY
z1s#U7AyXnliheLk)`<8ld8}4gV})|8MXT?PG+9a~D!mVb8?B1xy)53nha*rFmfFr}
zPgmSLTeAJY9@pRdel|=&zwB7--@~1r;QBxx1DCE}V|wX2J~|d>XXv_QcWXn;)Lh{B
z-XS_d=X)ufQ{5cA&=QUArD2(jn3g+~RY^6jC`yB=N{lhdC0r9#zQz*vBdjyMUQFj4
zp@-Es0L7A#KCH7C2X|_c18v@W)|FfS&7Gs63ka1Kq=0j}AiD%dpDFC3$@k9DbuFv;
zoaN~$_YV%)zjKRww+^^-^A`K}4mdqouv*LsePlEmp^JjX^=w_(=E|hxiOm^}a<pAw
z)i@Tbp0icQ@oK^GqG3Mo2od_;<9dlh>OHbo3JM@*dQQLrt-!UBlXebjL1{JPam{Ev
zVl=L)iVCYNF-Sy-4+&6<PKGX8sOg$WP+Fqm5_LanFW1&d{gdlN6+uNyh>^babgMH?
zIl~l&QB_jcHC0tn8F7s+k!<ylqsP+FW&F?up7_Wom>(Q4x&8s%D^KGO5Aj_xzEXkl
zc+BFZBbJ9JT>j*f6kFpoB{YP97_KEYNN&hXQKGGuhA{*p7Uvax)6jM;J74!<%*96$
znA)S2mkjbOLs5?4TI}=!S{EFD<!N5{SHHs5kA8&fAA5@Vjhhss5!&m43XV2}s8Py7
zaA+cJ8yRiR*uQ-PtqZJ{T;dGMj4I2#>A*XbF2(N!4?bd)oUsZo^y%mb;XI{1I~<GU
zoB;u4k_Sb0<Im{~|6YUoRq~M9C)+oh+_9`RSQ+J2@!v6c$O2_~$N@wey=SZJ^>LdK
z;keFIUDpUn&p>KO*G4$f(@J5ewc_>HUf`1-{t%<GW;C91aPNTep60W6k9qq4`j2o*
zGy+;DRjUrE0Zv6`rVs~dhNF)e7DpS!rOWT*?w#jllBs4WU840!c>MIBGv!r2Bt1Yk
zX=0ije^nI9nN}$)&3T@>u*t=0lkJNaG4>`d<*u3(hoMZ$7PZM_d?cS>;RC%sC4^Rr
zpS~X=3J4k<JQ5p{DUV{y9f7v@+`NCxKmXjXa~8$9PepOpi{29qQg|CB^odk}2QBYW
z&|aZ*?`?1Mo!|MbeEyj)@cJt^smrl6(5fIJIio5t*q0N8k|vZ`X*fjVQPUCVlBU)~
z&jn@KKfTYTr=Fzgy3CIQ9i?C*pgm|K?=wNr;H3!7YX_nj8!0u!#1ER)n6xJ`uhrD3
zB)X*1hGZ!v`Z5K&_h>EIhY~j;^fsj>W<*YOo#@2twUoI7iUOkz!E2h<vA4I&@tvD&
zK5&U@TC!ZM7;nv(uNL&dF|KQT?>IR<1n(GaLbGVN`N9FyNy+B6m9?rZMq9d7kB%B}
zXe*7!sGcxy8vOABV+>ttO0fk!lEr(rSkbzBat)Ddgi`d~yRQ%P0b<t+B{IA2YRNTr
zy%%ba4@qlf29UvMxq^5_2>rQ3Dh_pAj%CqE#;r=uhsi~VOkpAlRWanLYmBI<*HrCu
zb)D7<AJn;~IHS00Drk=Ck*)BZ(Oj_RjmQ#_V!{}Kb`?RUsb1n@LXhcuG*Z;W?24}R
zz#frlo@V#aYkcpI{UvlYP6)Ka8iV(m-Q68-ynTzki@Wr`r?L~i<HvuTzx_Y^B9B$z
zUVM>TU-}XceEg$?&`XqpDH4-Gi34kPvy3p=+CkMbxjxZgr!{I;qqN02Pv0+?Ufe=s
zIXZcTTd&>V>LZV`xl?dS@9;aH|1!H*t~1@f$j5%<yZOTZ`|mSa)Ns_$yz&;aum1qS
zNG5Gj)tJ(9@X`%T$|_dll43HVC`asFdxZN3ci6jfiK~|{^2IMd%kIT({_tP?%lzg~
z{}gZi&TmmFMbz?MGVNdJymDWftRX$;LazvT?-Ogqrfaum4CP6uoQW!*9te1v7%b!f
zj5WoVbiHy6hRBf9bOr_whQaV7otgPiR_o=v?uyfX7K4C=XhrN5#so@BqGigt<oifZ
z%Q8gntt1n3-uFaXQW`~B7>tnu(r7W`iHRX49zL3oKsoYD@R8ojWK}D1#CAlCj#%q)
ztrwS6TcC9^0ZLk2*?I+F3h(|F!*bC9p3)ZRqNFUxoE^2?e&ZGoe&9)_o0r(QxWVl&
zKSww3n4g{S_{B@u%}XfL)2xm;K0M{}wd;&$7lhH|BFp)lksV`<##oKjHMX+Y(qhX3
zRiqs4p_o}`!G{c3B*}-$478Y;YUdmE+(eor8%XOTcr<c;4$UQsm>ht`-&e2cz=LL4
zzBlnY1~*jMI}OGpy15T@UC;bv&dJd!`}_OcyLp>?`}f$tdxyh&$DACU(kxeWU5~hl
zr74v%Lgz*@MnkXZ`c`PNXDx4^oUuGy(&K2nr)fLd#?y5H7g}77mT|eB@sNNRxfkVr
zD@#ppG`<(m!zVx|09_k!{gSiO1w~;PRTZ<@jE&ihjjc^8TjF|8yIRrpy+nwmnoS3t
zd%ZNFPH{=PKnFX>J%$t;5O6!;1i^Xc{Ys{*7V5HMJQ^{n#*FHU(rU`W;$lSO`2Iil
zV|?+~Kf{yX@Bw1z=}wPOJ-CpJ_tGUSsseZ4@y5USA{YMPLu@{N1(Wj7V_F|dIgBnL
zv<!w@LDognt>{(B_WRz?=EW;SMu?)^p%NoPp(W2bv?$#}1dHX~XFtvBpZh!?`jh_w
z#oi9x(Gkj6lvgMtAB2@7d64vJrJ(aTZ*g5GhKCw#@>LN4KcG!P-x)#|P^BT_P|*>5
z4}F97t+@DxlqjKqDA}kn+63eoL|LFP+2A)t8mTPbQxS@<K929#WP_S#3Tv&)dTObN
z_#k}x!JLP{aK{RFZa6&GaV!i7b;fwZ`BpWq3(MgjrQc(Ul^C)y$-PCu$A;a_DI3!b
zx)%P;GoRyZ)uCP)(Pdgx5>U(Xbh7VhlU%_RHk&?0IVgRiNO|Y58ba{w&aQGgKY-F=
z%!odYP+f<{qf|&|MAA77C}vpO(jn7ir7Qu1wuVdPCO`Cv@8reTpJ!5>5PU~akx-Q4
zMw$+?VS=rb-m-#XBPBs9P0%fBk;1lpcK6L4(Yh!D@J@0spMK*O3!kRDK?2IPcPJ%|
zmrCNf<Ohd}V7(l&${704bM4|DyE{Al^w0h*peRf&YeOrclqk@_rv^a}o2~cR-PT~0
zCMCL2XiXQyd3h8)JG<Llc>D>LArQOdmKcTZrwBNm3<+E+?i1w+d<q2@vRl_B^A#gB
zNn;c)m8)7w6Jrd?U_qRYW6~QcEqb+V=%b^;=>!0z&msaoI(gsPur4eZ0+H0f2ZUG{
zOjTi2fd>u_j+t%Ec;Ly0Fe-BU?lH5SZCck7j3X8SYf4!+rIEwbt;B$^ll?jb2+H6n
z=v9LWhLQ@UEX72jD~H<`3YBl<erQd^CCai^9^)0EaRlpt7K6w6h<B^slRYM61E~m6
z2_-3nfOmp)lO1l8m1>sXX|3>ioPUl|2_(R2*dd00BUyN<pwA2s#n2Eao#>sp4@vh+
zNYu3u!e9^*GksW@OdCN3C9cjyVwzyaROG}onGKowkoP5>pF`Y=RWkSqN|+pxG~F`{
z-HS>VmW9!%s^a>SPx4KF>OTZirzR(W4|H9}a&^Y?^o+V5ar?$Cu03>}KD3mj=EHyV
zM|k5O{v$SG&+q)5f5OhA53zY^Z!l&FS`7f91f|F1o?SAYZW2%K;$2HQ+5laN2C}H2
z@KlAOc;q?{`JR_weG~P_HTtsT_H%DCzk9@&f9p5+hVT1hTzl{#?tc73+`a!<)MCle
zGq1Dzu@6vP*dbO0<z#|u9A}$n&^wH^OfO%;j>fp)*}kyDy<4xdoG;nfuBplieebD8
zC7=AEKgl!Kukhu6`7fwR>s%%3w+bjD;8M_OjfJ!yBx+=EJ*Kv@536<45Yn>K>(w>f
z?YTJ1jG>Sz9QoECw0{hZr^IWmxkb4-mGdYLYz!%YV_t3y{Nh1M6VrJpXItO(v|3|J
zLmxF-l_{DFFeOTfmd&>vM@Pp5?=iNZs!FEgTAEdx^A^`eV8O3}nCOwUj>h{y?|m|^
z6A-3rdZ5Fa5~WvYr3H8;-3L+Od(r!awbj(S_9&-Q$;noVU?a{bs$$IXoiqC7^K9>J
zb9Qi_{hJ3Eqgbw%y!GnqJpADcG>7*&x-;is{~rHe_WnEAw(HLC#6BzRy-&LN<$#9=
zJmkPkf~na|O?J*v>Xt0`j9eOzRj!$uvR$^PrphkWOxdcj%Cc;!H8XCF)assYbt|>H
z+3d(nf*=Tx00AOA01q$Td{5eYui!s^YwdF$dR#SsfTb#O4vP;j+<Wdh`>egz@Avzr
z+0hY~9()kp8)17TtD`NfEwM$Fnx-5vlH<%+43xS@8bpPB4o1?Aod)}A`OCUBw@6G_
z)Jif~Pjt)(Xq)nO^GTZa)a6XwxcDBT$NL7?Hq2)=)59s_g9$r3+g!bQjcZq~a_jYN
z_HXPnnNFC`8ye@5X>D?&PM!;5G^SB_qBj^7@j+@LZIiSo$o}V@V_rLElLLa1rqcU>
z2?23pAPtZuszg`<Nq>$)D>1o5X{^=E8`tSOy$^)osDl^8EHPbL4{lO3p0?cDJ*27(
zE5iY+qZL+GMhsSmC=|2toY`zj>l#D_lv#qZwlfB!l@V=!6socK5G1CARamQu6tYkI
z$b9aYPRHz-3H^S>O25Z&P%-Qk-1pd9iMphzYo7kX6TJN+TPPQq%pJ~2)^!F1qK?pm
zvgxt+`77*w<5lkb_@kI%Pc-@gymuG{ngXpHxR%*$M*sZ196S3k-u9N*j3H68K@fb1
zp2VJMu>neNxb~GVv%hnbcm0iD#FrJ!C&Z@4Y71VB*^F!Os*;E=pRy}rpqWYCRgVv3
z7(+58G>hzKT?7gnnT}`ls-bAq+ZJkv8_zIpBLG61kZj#T^il^HLx?u%pLNG#(t&R1
z+zp2#>9u`#J-#O!RG9>aE~iQiBE_)|=mgZAYo+|<!dxUHsX+Gy-3=d;25f1=U80|4
z<RNzBLit(Vd9ZnUm`;HfuHCxA*74J9k6WJn=96eMBrp<N@<hRt8y%u*<Y%g4VdamP
z39xrG^_*BA35`R0a1O7uxVlEoS~X<c91@Mkm_D&u$Im?~HmFS3QIU|d4bjA8JQ9NL
zXtG*EPka8#5B(qq`!90r9jBS8Ic<<)qR?cQi=@5NWD^BtMMGi+J&bn*2cboFVlNp|
zF~{$8s8W=h9NENLrFm`dMrt5KioTGdG{%G^WwasDj8qgYi`AMYHk{eo!q;=Y@!WF^
zszIXS(ZwSOb2l~YDxqG9MijKh#kmxUbZ0x2G0a<sv5wVI$+=5+Q5!|$1Eto;=`EDD
zOlelips?vWrA9ToPHWN1T3ezM4cJ(nT>Z2hq$Xgk6>EQ^!6c+zCx_ki;$lqK)F^Z;
z2qp@`6(o+rm?&B+vU6)UACNvP=Hf)7O-0!&ps>VNQ6G(&2Ftg<`fVP1?<D}KKvuuv
zjfTCeJM;$?(FRmeP?jr1rEx&xz35t^!8;48LQzW;k}kw_qqNN023>jjx=#!Z8^`)o
zHZsu;KP;$9#d!aa+0HILDw;M>77^tf(P-vT(K>KpxzKvkwisD!K@J|YLMJ!ul+U!J
zPmhG)@xe$;$g-(XYCK4L!XkDfnS~?=NFBgfD{KU^J2_o3vXBm>=R%r2A3V-GoOePI
z3o%96q&!t6?*SiBK^l;tGW|Dn_hDBXDj*nc)8gl~s#4jYCEu9n(IK%Qf($_dHT}&E
zj-5Qi=}QlC?*~3WFg;Y1=o;re&3s1NNW{o&Iz=f()$eih`c1Y@on$tj^Y%}Eh?hV6
z=d{~9tobQl`t{%BkstjeXCAmmUZW3P?2pXz(w?p1;BZC}qd1S2eY_EuOsy2X{(#F*
zJ<Yj$@8Z<O)10{TJO|?`?cAd#W6qyEPqEtY$~PY8uD3nL1E2f=w_bjg;u|lcXAQR=
ze~x>8<_<zpqI-SXc1k(u;U;tJuw?c0ak&SL!fMNC?L0d-4p`gT<lL!K9F1oTR|j~b
zc=Q7w=Jc6M{J}r^e^SxlV=IMTHw;a>=9pc%eMg@bN^ry`)CS0mT^4Gm#U2~N^0}2+
zL`koo<F8cSU&Wz&(R(a4DanE9w)nO@NlA#Uic3`A5O7Gr=DbgF6rRRt+TfWr4gEnj
za}lGkqsGzi_0hm&K4UU%<UF*2c@vpVTgs|Lr}(ju48cNLh?;y|<T|+!a7}}2rHRpK
zY@%Mu7uBhG4X9QTl1X0hg6jI#5#pO~lGNW^7k5@jObl^gwK3ARo+~e1$2BcMSJZV)
zpwClZ`xf4o6g|<;9zS)OvM344P?QGSTc;C?FG1Q%L<t#%2bCzZOVXy~;gNP1=?J1)
zn4{%ABRObykZGo2WMWog`lQW8*d)n7Qf1~2!Z4}phWUKX>|o6J;E1DxG21&kT)T0V
zYu8_A=lV?!_a;ma=D51WC#|J8N~;tj2R<yxy%b^um{7M!&LdBn?&Kb*bSb_NKB5vM
z!CRdoWg<4Y0}FRn9C)=7ov1ZdYKX*&5;tjSNQ0Bw)Fpa;RTkL9u*-S9t#2heKD$td
zh?_e?t!QVCqrC}LQBYO|{Xs>)-={yU=*K?wY|eZ>#kqFj3ZL9}L&{#stQ->)5}gK*
zK1B?oEGoHO3xgpLTF+rU<6wHo#;C_HeB={Mb`Ka8J-+t%*LmQchnY?fnbcFFHbj-A
zQfc@UR3sK2t2OE<aQT1uGG^6q{9X4@oEXvTS7<zbHsf%zhZ(GJ{IMTG*%97UUGvtd
zG?SCLRw~h-B|6F!8si;TpLhz>tGM*ZkJDHI9pZeB4ubF!94c0L9|_t?<WXAoM5xD8
zdi@^lT;i=tt+2f^3DHB6W?6csWO6vc6r}(IG2&;A*nn?Ca`6@En&1dlBrjRekWDfX
z$zc~znV|5@Uda9w-%XG2$p*D;eae~=tybd#L5GeuNpz{$TDElV09PPswTL(;6bbK>
zF+yh;5jq-7gv66)A(BQmM4z-BK|A7H+<LXr5E_hbVKOBi+vH16JVjBh(Y6t26K6dp
zI*}kjrlS!`<KkRPJDU*NDcZMK5VuVv;s9O9IpJENk=89eQ!w{O;s~KvFx7EFvxkl~
z2AR&wHCi~K>4bs?v=}`p$1i>Omnmmc#w#OYxk?=jP1DCUwa}|zp<_#jm{|Pl4&5yJ
zmE3^N)3!pRh)uhgs;3SqQwycwjyR<lPv?ZFDM%B_j;g4W=w1%RnA~c$64bYi4X3uY
znA9_-(}tm~aGs?Q>?m3WZOK`qA*#-OIi$@{sJ+tIi4msIH!HyUnoIZI2W5!|%3eXw
z4AClLlbdEBc8I6$8r0aC>8ON&wywoki!nAeo_TPsq0Q)4>X17lrTqP-Lqs!GN^ln6
z4Gv`UGk71n@6DhcAkUeRH6!#TqZQf!eM7M_z<G}!H(a}X#OkSC1}n!n+;16KOQ|a6
zO-tkEw4oq`DaL5}#SkAXK?R(07#(F=>>Wlc<}T2BM^#SgGi7!46szki^iItvj#ep)
z9`&T<=&2V7b_~&DQAA^zwgEqH@o0kb%)bN0V+e6k*2O4f1D!8l8Y-Ek&mxE;X-Ip5
zPcbkH^dX`CN{|_qJ`159%0yKqb0cwiOtkK0<0TUFAl-I8QMtU6F!_+2UbQjULJBEC
zOby<9F<?s5b}_<9_f*$#$7H{lnvnE!jPQ|Dvz#2F6WXeX1Z`-o=RF_$aW1~&9h8G%
z=T=QnH1!;90?s>vYdP54=V<>B*EY2Cmb$4~-B`t;m>y18*;vCXc<(R&HJ<oK|1b2s
z;ozxfnLqX}zJBv{?s?}s8Lh5iV(9W_LlCxso6o4m+h8NrXoRvwa*mZ)j96OEU%H<w
zPd&-{9jCFqRR)D-I+?O|a>#J&IK$O-ZtPxR=hf#~J#mgZKmIPRZqEoecbGi)Dta=e
zIB|xuKVo)RqjiP0k>2Wn-e856fWhEH%gNK5y!6701Rq%281d>iUgz|wV^U1U!10Uc
z_=*4Rf6DLv{l7~&4|2{#DKve~U?ihnszXkK3pf)nMJ`UWi-O$TnF1a{z-05l?8r;Z
z-c_<bRU#ioolB<<Ma?;oi=jw9|CYWk|4HR^ZTS}Gy=dB_?+)LNqog8?K^Gdmr&H0K
z-bZ4(#T=8=4I&pZmBpZ0wo1|MEpk`!vQAxaf`mB_9#pX6Dxc$-lBtrQ$r|Mio(3P2
z`AMr2ef7=XW9(p%<S2=@uMAd;8KTmf-atuyZ3yyOi40c<96No6s#nQSS1GiWjC5g1
zEj6YKQE}%uA%6;qIg$VD7tLH07w?Eq45@5-BgM5efT3#!BP1497N*F3pXs`7J7+o`
zGdVb7f9Ht(okMnR?y|GJ%kJ(ic6WCf?~j>JYU;VCZCku=I-|FchO99xuhqQfVrn`U
z46$6hNwXZ$Dx{_uic({&$W6R!JI80O#pOCgMQJQnD+(?2dTT9ZS<<U2Y_S-6c;|4f
zqit(k>m`#nHJ~mzW=koM+@sU=FAGUP&}M*A^7%H-(ac+U9_LezZ^NW+na>RK>6}9|
z#TJUfXnMVhs$bGK1AObLr*qn-NsYKjp(I;*0mCSh&VMXf=B1NVMCEo$MM?!xM~Z=E
z{n#;XT)j*k;nf>^T)Q>r@)y3rXfnf8B_!Vk(+LHH^$}Gh7Lmf1m}%t3A3cp`&ibh}
ztS&fQskrp3zlu>71}C9~&`fD&d%|K!bt5rIJ&HzoNAOODuOUz$9<g=rCCqRn4Q*S4
zZwSqd;AOaEeL&fk7z_l3cC8pfrr4qJ!Hlx1h_$CGz=nV>l7ZWjGk;kcT+?<L^+Yh0
zB?h4@hs^(1nt&3swq$Chh#d$e$gLL!V+T&9-BV{{yzj2Z_hf^Lwt~ckm?n@)IP<w5
zO|BU}_{8xh>3SBi2|1=fC{>ZDjp&#%bP^ZelTp=9fsyDbqALs-E!2*xUt*NTyE!^G
z+;z_dUca`@3opHf?)CAlB5E(1o<vEO$zMR*l-(IUO+CiXkHC3KB1Ntilr+{MO+IxO
zqAt`0ncTootub$oFnz<k)!6bl!3SD@gpM^jp{Y4HE+(2(-q7#gx`Q)=Rbn_`k2UI9
zU|4DP_gZ>`o@DnafONR31%lcgjzvTVi>WMjSP`3+w)W}k-r#hql8c`UG+xm}M^*L3
zW=IL0(Z;OTgvHD^(GjB}DA6<?TV11ZHREZ+sOZyrFB`Q^aTDE`H>>QV0Mgz6<+Cj)
zFj)DUC7RI4T261Rv%ay(+y``sB15sV*|@IRHKH->u1&f$#w;k+N+(p+Bprzou>{DG
z76Qu10L56*L56gwChr76%8ZfbBo#Z+M05}oE%*po!;zY*7?;;_W*W#k*G7SrKx<Q#
z6ulmH+=CkWRY|?;XvRA*7;wBW%<Gz+D>w0>;o)~Zz?G{vncRAv#!FL=5xt=*#55z8
z1u+=<WsgcJs*Ry!-fgTiQ$23p+`|kF`}<Q4Ub{+L&nc^+pwil)ZMqf_VrcO?QnqjY
z#vYTib!PDrN!m{%#aycCRgs4$Y7y&_h9H^Fu5?WF_ashGB=j#0PO@$@BepK-0QEvQ
z7qT;DhP@H(XGnBoF?<p87gI_kkkCKnK5N^io7CcchWEwvD=NH_^&^hcCNUD|P?XNf
zL@8A8Ne8X*8dlGp;QN2^Z%~fb@oivdXNOl_c%D!@qF0o?Ui#jMF$U)XMPU-<GoTC0
zt($wCz2gG6UcZhhG>i;qAGwET@4BD(+_%^$9oHX!ocH~$|B$1DYdrV-Gu-{~TPgZI
zVbUbW@Ot+qHq_{HM76R`v_;x6Oro=Uij5(6y!~Aq+_=v8>J9pv>+}aBR!^TmRTa9h
zoFCrDt(&hBn?vrr>jADDe~QsGpmpTx<4<w-Pu_(s3Pvj{%!Y^f;29o2iLq83w2{4j
zFsx{5k8cBI-_p!#eB)77){+}W$ByyNkN-4Z{#XBmB5Fc(spik7C3(-%DaTNEG6$_~
z$}J5`1$w-lE}jwcrT+)Dxc`NLmY-87>M9RrGW|T?7nv5G_TSF;K__nnG9x1_SW7XZ
zzKi>+Lk)tGc%=xf^BtWwBMwAasu@+5qddRA#d#<!h7_rk&P5+GyJ6vdKr)dNtzOE<
z4xTJC$vK%ZJ%{|5utd#&^Bz^dpPH@(!eWx%-Pk0dNm3A7^%$-W8LW(w<~?GJlDMn3
zrNpC@XtNU;oaO2K^6#>yI6qW(tsOZYN@0?vLg+s0jK-2&W@Ajl=lP83WX!?-K0CLz
z*}i^@TQ_d7ePf5+ojoRpGv>1f*Lvo4E1z-lM{sG-m9w5>a+MBwAlI=>^7=~*1L)Wt
zLa&W!e^HpiqLsl|jY`?a))s`=GC!IURVmr7X%i1AqR8X1z^UWM*;?Pg7M5tF$@AXf
z>Kf--yo;SON*X&dpKD15rj}^SF%2L{gM8738m;IRmX$#d))NiA72RvwNR(M{(AG6h
zH8hRJD#dIvM<M2gMX#V&_9%xV@UkA7rcKq2>bycoa*U9W?+_*T+7QxEQ)7&z30T$g
z$a~&~>Gx0}FjyOLYdYiie(&G$flt1lZ(jbloEXnoANDXRh@68Qjh7gR7J`??x2h_v
zFLAe8ipo>=Ek0^)y!IN$``78;^>&o%(<}QjtPLKtC&pIx8lMaaNrMvCVh2OO66>1i
zYE<xeHz(AM<R(WYpi}G6^SL<F7X>;93Lk=EGMiIq0XUQgN-#wrDu>1kLTthIN_4-Z
zT3f{>vVZiFX5nHjgJPtXGm7L1A`P~WDT_8{S)uGJp_KucR=Iio?n6)Ed$d6<N=xt-
z?_{cJOqx+;0Zde&d}=+qG2!i#<{+WPt#1h-2xnd89xLlsQ}dH<;*1!`^fqaKLcp2=
zqYFZ`v|)yA_jvRz5A!eo-Jdd<8lvihlcvx+m(YkHH&6;i4iW0vgm%7%4qhloQRr8J
z+z>i~S7Jbrr&Y)#EJAX|mAub;F5boIvlp388eV(-3fucT#G+5|YqVZ6{H$$hy~ikz
zQW5Pvy;Ae&TYs3F-~KAc?s_Y)@9xnY)Wmj+l~ZT2YAjlS<U)&*8(Bq&#-<6sIBdqE
zKvzKo=))d*=3(w|zDZ4On#gKJ>t#wEW5CsckG$vO-1>vxWI8_*&8?0^7w|fL4$D)0
zr7#+LWx-%j^4hH<Ow>%<8AfS>(%6*MCgy{3@EOw7KQ0i~Tt0<Vo%j&31aX0jfgUY)
zo<B>27aRB>R1ooD&}o_|lu%*5>5eXvGdh-Oc$vzHPENilVn%c^skcp3UY#P9q*+2}
zr#fkgjY`Ht(io$?G}k&1d~Utv3AM^-N2cYaqAnEzQXpBXvX`<8HFaSzy@Ey=R2y0E
zmrVB^H*ah++uJ9^Ipwht;YuT}%rT&Kz!ZIg=`owkh)T11<`gH7oyC?b930NMarqUh
zap3CJ9U2eqVS_tpiN0Z24k$MZnw6H|8@!Jk1w+$%aj4dT);Hpi|0X|j{%A(&r9Ti9
z9P-Rcp?$<Fp?axQAa~4)tpCaS(lq@S1HUENK!>i9SD!SF!Qi4yj>U=7BN{Im3W@QI
zOO*c<byZjjTVRVKIYTG+)lP5NCiwu}AWjV3<o(3V8i_jXZ#pkdX&eG6GQi`F;lzbI
zc<;~r5}1O+gPV+2HrU)c$x45P%g;T>!S)W#d`3|e_~<dAz`H;*Z-fpVBc`ZW+t}dn
zXrI;9HTG}qG90X<(Y)sue~~}=JAa2Y-7tIg6%Jl_mbLruqc>dVYk%+uY@It#Z)J_m
z^QS2l96t9A%0Zv?JMO^rhWJdw3rRkghLdVov2o8uF^1ENL1Yw|m{>SYox6kae2+m-
zbN9O+=K9soqKV9}?xOJ+ZCKk{XTCqhdCzcdjo9r;Qsk?`V2i5DA01W$rjrSy-YAv(
zif9aLr_Qo+<_^YJE<-}F#4bj4dSTK)LnI9`D<e7vc~ps1mg(AQnyRw~G%huix#3fE
zRP#Li=$xc)`+C_Ma;A9xH4kSpJ3#)pU><Zl^}Ji_#d(}1BKZwytvf_v(Lg1gsp|eH
z@`2lAI9V7Og}$jYL5a&|=O`T#O+DRz;=LI#I^vxmbjs49l^8=dRE&so^_%^u`h!GP
zSVYpPgwp8bA5atpgVB)w>JV*<G^9=jL`o5SjSn(_Dw4@!kS=fO+AQsBG06FsjkE-M
zOtjr>x2I6ZAkI5SGoLb>PMI7{INUpAe|MkVTRUvuyvgqN4tu-%%%(Hi+7TLILHUGk
z`zUozh!LOMw*$$g+#{&9654AP6?C@TdCkI-Z-@*~L}g@z5^ZHjE9nj59_?I9-L$yi
zIB51z!2NgM&7q4-YbOjW6%z%#AqJOFuRuAhD27FHp>?PrntQFgYO(_`NR2khyh&wb
zZ0U?xWKO4R8)L`jQy~Qfps5N2Rmti~UzlXhQO_G@vnCB98bTC$vx<=rG;Q57)iaEi
zsJWt7(qHYNvAEjf+8Q644ww<DaUO`K%vB@zjY`siM;^P6ANkw=5%cj8`v)_0ugBeY
zU*y@RUuONQPjk-?{Se>$)E_dLPB>9iIFr$4jZsmC1wb(1wMKhGunJU#i!I(MCWm{d
za)akS^C>P|eTB{UevA-$NyOkg76HZ*wMCU94B*{_IB^0@wNo*xa*p5{Xyq&<4dq<~
z>%cT#d?A>k%TnTMlqHNM{-+{PgqTdyl+0v-Xk0Qf)VQ`m8;e$gjE5uzvqcHEB^oO<
z=_oa1NJAG{A129Z5;H1E5@XD}VyX-NXBXe^j)&1X)L7^YMVgqYJeiGxEQQ=;=FM7m
zy7VmB-B6k&Q`khKlEXj7u3Lqe+<j9xbztGbsx`RQ2{7SWY&_)s?|Uo1@tc1{q)OgD
zLUC$Cophz(yfkrI2kQBp`FIcQYbvc%qY(%=S@fhP0!L@v6jRWQu|nGoqK`1zu(`3p
z*$Wp5E-)Hs9=PXDTz!F;UVDXWd)F~4;2YWCbaIIeO5EQ{qj~##-_7H{_Q&x075b-F
z*&G~aUT^dK^_oZTJx8Moj1Ls#iF`n)BXq_+F(MkF(qNn=^eY&&Fe%Zs7n|^G%Ap~)
zK7~(vf{vVA?eWe_@8&b#{8OPIAyZ3Dz$bl+N_qfm6vUQ`=P&U5i?61b6))lBAz}-e
zZs}++iCV0)Mkb`<KxN&O6in6>K(gphu`w#lS?~84ZHzdy1)*I;Z75R1t2I%BS*BVk
zmJA|t(J2S5{Cq*JqB5PMB`O7^>o#3evv6)rKP~F?H9_c9TIWFMCF(Ld8TV*Gk&~8C
zC1@f?peQ8+ml%+YTdlA~AqR6|sD_quYeXCt^an$ffy>Xl%w)We2`#9W7oL2X`F=xb
z#jRB%$mz84xLDG*fnr#&x;kQiujch@SMVXSyVIbjQwkgC6=L$`f<_mXL1h`55?u^s
zTB|Kq6=*9316m2q^G$vPpPITQe>G(jZ7MZ0DrvoR5bJr#K^3E~5Tr@XpO$FjG#GLB
zmcP$ygEZw%`s&;mXF73|eAH~3CRFjzQGfGZW~@Ocy|OX#e{mFc32BSz=gH@uHDU6n
zu%M7H4S$r;-94@_$*4k_ay1<Bt{?pwbUDN|Q`)(su`_yoi?W7|t>d^^&3rybqbRBY
z$`qKPrJmOu?(PvZj4GSxX^N()2{Cf>#&zzz`)<}wt+Vx(w{iHjuTjpXT>0YTY}|Vn
z#c;rV?|d&Wec>~xzGnB@w?*rPWAnm!R?go+P)qdJlzp8e##ABudP;z1k(({&giN*r
z!@)YydrsbY7cUi0veiz|^O_qkyvChx+ahSq$-B;C2L&Op&_5;}r&5MqZ$Qp*91e!;
z+}daL*y<A63A!*m{GJc+=dZj<Wz>S%!xF8wbKp)*O7KMGZ!^3ReST*nNY)O9kbD~w
z+9+!>c4u4+5v7tFYwjWA;%CXaTQFC;c5TsrE;XmQ*v-^~{Kcv9h?zaIP>G-rSN4$h
zMCft?Gixa~ARW(++$77v(=l6ff1~9Ori-0p@qJODd>Rhv#XYKYidzh_W>iW6P`CGa
zZ_2|~<w9$mntWvp+DL6v_AF(uqUiO&$_oo2bk5(B2Pz1SilM6wy7oRthIC=bDxGh(
zL-S$?_}0<PY9`|;<HI8kb`RLOwacxWH@I=@Cfm1mm>f=M=Az>belfhrA4LZR<k<v?
z3LB#|9ogJ0WpghruEgF-!<sDpNxPa+;?&)%3aX-@u!Th7Bq#C4wbV_+WHzC0q>0l?
zQJ8{WQLx(U(O+3*Fzj>e*cuR+ZSTod(&?VkhTy#DR<#m(r|w)Xy7j-{ZV3wAi5auG
zO<tC1SSjx(-%C=XMbicP$r>|h=m$iMG(cLSWXi%as0!9r`#2YvO>1V;IZa(BNt&Eb
zDoCyCoTHu9A_FT8Rawzn>0z{$+}oyJ3@W8ZOHoha)~<`ldw%ExeB!VCRf4TJ+<TE{
zzV!+h?mWeV4?W1<{t=IV`Elm0<Gzo6oU2bg#mle0z-d!5C<;8D!bXfTqA*K`ThNk!
z8x#aeR1JPSq4A!F-v4o4{Eh#Xa^5m}??>@fzdH|oJ)`xWqUxcPBbWkJ4AE;NOxqA9
zyKvaZHEtcsdqV4Q0dSH~p}c}n5Sr`;+@K2ZzJT_SqFPDAI+fz2R5~Z*NmNR$XkA>$
zR`8OBkW4L6po<~_3Ng=o1kqQCg&?urGWd~(A~6S(cG^XG&hN6v_hy5dVmlNXmrgj{
ziHsMUv`!jJdFTYGFHoa=jO(PE)bd2+2m?AwUN#Sq%{5zyprF+TTNpx&jC%XL|FMVp
zhyU&G<Juv?8)9@Q18tjVra`nhrZR-qaddEtFrU&>O5!ZgL|mf4_(baq5WO_WshLu_
z5l^pGM@^v$l<9HLJ@=(eN8CFVV0)Se?zzO7@oC26DZ`Z!y}qSqWm6uGhG-Nk7f-Ny
zauw=`@inTQ@wV^(F#Vz7+RLx-jh9~LzKdtELru|(m@>JYDxnLTWPqeIjnNu(VX(cD
zc2J;4C9W1tP*yzUiF@=A8<0$_hPOZXAYXgwadsyA6e?m-G-%Pvb_jQ*P?5XOpJaP`
z2k)in7f0s=O~yzFlI2JWj$YsZEE*1hNio`hu86uI#*{Uw!G#$oj-NnVi&2GS#%j@f
z8g06DCQfxZZbNq)uGq5YCSi$g+!-AtrI3-fK^%v5a`(+<6}f4VsjbllT>@<`J{%EE
z2(2`rDq^C?Co?Qv2$eksnN;VPnXZsEQE1uLGN0D;1|#s6VgQ?`*I7NjLC=)D{PfEl
z?%bqp#}wAk#DMmO{z@OMEj~s}ga*U)-G*LogWgJ?a%Gje4s5KCICuIHieaB#RZ{dc
z{mNnrG5aVg)5RocvuNn`8&DUaCA|6QhW9DnCrveDjL_z$G_4;kbah8EXHLa&a6aXv
zf>Nom(+P=9MY1gUg_%Vj9OQ~U=E*#CZj2#KWL2hX=b|aC4V^NIjgcl*D<g+uFsQ64
zi^)Zr)IcO%mtHnKBVFUD$${i#x5i|U;+5r*kAISKWlLN;<;_yp4ZVR>uGMfvH5lOn
zBDV;N6K75^7_Cy+g7Lv2FTeOAlj)qzt#y2CsGB*x-hjPZw>W>{PHa)|jvx6!e)}t5
zrC(23|MtsFuil_Kf0q8{fR(|Jk!`6)BZ+qz-T`HqjOSv9V5k@+TRKM4M$mnZ8X%g4
zl!x2md(t$E&9j@FdGK!9XP==Aj-6+o=FWG&L-Ozn%dyiZdF|y_DXSst8zX#r=18Eh
zMPlMahQpF8uiYZH4pYi~0ufAL<HQ;6dB^*?{PdU5G2&y}`2@)DXwfL<W}u_5W=2N$
z+2w|N;ryK6PVByt1*1ce1|K@D%|#QKhk@NaN9@*JhwO4&+uAk3ap_j=&QM72L8T@&
z-%q4yHr<)uEQkmBRi~~onZil`oAR9VY-Rc9rRF>6G>u3|Z>||a*6pig6crb2290<f
zWasEN`(cWL&UH4GuBIqZr9`3FqCk~}P=KS**zq1^1yv8;3sTfH6s1mX%js%F8Cv?*
z<J%TDZ<tS8rblCTcXrvnd5c@uZgT799(y}SOvgve=5w05O?y{57xEsJO|+I+Amj!t
zN--Yty2<E!1`IMUDPQ^wV`qJ2T7*_>tT9x*A;UqBLBCH~7AU1?ontbeGoDO1I5=Q3
zpW|AGR+_>Vl-AN4tk5g1fDLI_>)MvuIcA3kT;JJ7MV$t@>HdsJ;}}gxft7)U?9r)(
zAf=6`jozgd7ZN==n{#ILdHG(DJu^G8FAucyJ)DOjIk&p2WfSYeC{0mXDqFC!GQc^{
zd|uPc8=9sOeRV)|(s9AynKgmgbdEKe{-DorwI^nx4t&$%LW|XgQ+KTM<G=dz{Of=C
z8+_td{wmrQG?P8{cE{YfdBpCmTU@;F0p9(A_wjgXc>F70;l}n&-tpM`sW;bn<(p4&
z+Id!u!naUnc2^XaWTO|_-C)6Z$LwH_-s$_91A}+Gn^*qe&$wHSC_nHa*-tE(vWGi7
z#Kk$v6hs<|q5xA8iwd=VhGOM7{@@njrg)s76b6IA$!8fI1cMC<?E|W53AF<+5~C6@
zp+TzxqvU>yO)EohqX3IF;Ab<5KuVd_IvtO}Sd1-YR^fu2_jw4@8K5p!Lv~5mO5qk;
zDV?d<ciH26vO&$}Iw4JJv+;~PQh`T4qZfb9^dvDk$(x2w_WFzQtI&lP0_KiRl_)s!
z8szVcu{3VZN`J<OKlmvB;Q#Xn_%IR#NJqSji8}95UZ^6bQp_j&Opo?3p<!SO4AKaB
z<%q#aLz?(oF>UbqfXzl3Q5*0gz}Kb0X~!cE-%nMQ;9EIN(xzyPv`m}pn{2JFF&vHP
ztrqCg;$n-@^7Ge*BYK-F6fUx+OP<+jdG;Gm@xy=h$2fb>ogCbJlvkdAmc1)CI8$%3
zb<)t&y<`ZK3|`d2fJtd1A@(+4dj(CuL{%PLdlqH}pu!y5NN61g2Yakiv$ArY4?g&P
z{M*O>b=S;>41^;GVo?}|J;gou+|9F}{w&@UM2t*c@tvce(K5I&TFZny<w;6Jjap>i
zrS+ndJEo)+H5e==%z5~pJJ?sqK}SR>n+|Z3XfkP%FR*7i?D8N4LEKUSB+((JqipIV
zYLNodh{2Lp7*ixC-(>6*RYF8;nxsqdZI!~gWEg``iUXp7)IOx2Z4&yfK#L|%X`3dx
zsoCyp%3hztgBDXbs-dBAEy1_6^M>)^0f$Ei3`Tu|8WFu`rLXB#CA~qP!C=H-R5BP9
z42C7Weo0wrik!t)*u}L`VA3E%<v~&E_Eds4V!G)i(^V>*kea;^{ddHkJ>)5W%)=}a
zdYX3=P+BNGIz*K6-TDmP6Mg6k>WEJ^6lsq!8oX$W0TwnD={64}q-jAh&UuzGp&~L+
zi)px#n^Th<pc5|I8Db^mtfL&MeCr8npNe*)6x!$or5Pm#6X|<(lqonyQJpx$nM)4~
zO7A`Gyu~P+&=L9Gs^6m;tun72v1xEFvcJ8{aBUTIWVEry{f|7tx4!W#M+alpH`i$5
zMDAA#4)zZ?cJd^nlN;CzcjKQvpxN8!%4fdF-M{eDD6OfEZ!+8*QLV2Kqak?TA#It(
z+@XZ&cu-M_(2y$Tm>U;>&dwy8#K^tc`i48+b&0P%{|q`nd-WEFS8lO-_JkBH#&G(M
zbG-8GORTQ;L<bjR%C0tD6V)G-gm%v4;E<K$n-IY&!+buc(t#iN`Jd%?4{vhw@(Y3p
zXy2{Dyg)K_mzbIq+2p$BuOka}hh@fo@_<V%HK8otXZo)U%Wl7?ZUfdcO2nclUku&4
z@3;+RUH*HKEi4=DC30tKP9=X+Eg6+%*Vv2*XEbZ!CaYDKR7P%|$@fb}RCDXF+-&9s
zI4mK)3)pKJbbZqw&S%p+Ed!-||Jp=XjS<(>c&(CKs|>>H`HbiTwkW74Gxlz6%XKj@
zI~p@S*k^lpm+f1(*uAyK{?3@m(VW@1rk*=tW29>zmp;clNyzTsnR4CX^J%CimnMG(
zbi<c!f9v+l#SkQ>VX{#gt1N?o#F>mneTKt6gW-U(Dgi~?G#pOG?CtGy<K`{K<0*C9
zK-3gQBn+FQ70GB;nw;6|y<^tYw6ifjN_`L#^TQ~M(Fs*kQkO(01Lok{MU2pGjY1y8
zXku`vD4r97R>!6LGEv04ePlt&k4w+9XxJCGrVkNi7khu!KQEXyG91!Dnj5VZRvQZ2
zCk`rcMXy_tO*CS(6ukJOh>2dxAU>EfJs4B=Ed5?ZujsQr9J0B!$`Ajozs{+9ALW<-
zi;EP!4cca#y=zxF7&jQLc=5T*oVs|CVQ-E1yzenKwpRJvUwnz1H*WHMAN~jzzW<|K
z{`}{dHZ!&g(cs6bkU3yb0vKp9yKDoL!7|yIvT^r_t1mvo=Eb`>Jb9U&&wr8Q8z(Uj
zKLBk@)CEP+<KSSI-f$JyHZ*#MwF+%3jIA)XhuS<vIT+Ahe-(A)iN3}wPxM;+8St1k
z4Ih-kl#w`}i+7Ah=-nRh4qw-(Am&W@$kuqK^Oo35iN*?ZAu~R-4535xvff-sXnfAE
z&p3+e)&L}6DXkxsq<Y_l{pt5+gNoKkr$7q3)r&(g9YiWY>|F?SDp{7#8qqsI5`Da3
zg0VCjPBfzKkn1RZ2H(!;nO#2m!FTYFfBjR;+5+E3Fdpyn0p~;mV<W}{4)$*1rV|Px
zeO+SEI34jJNFxziF|o*tnAjBrB;kG31tv&wuWd;%nup)^4vuYZ5}PI!X&PdVmy!QR
ze}oN&VQ-yYzfW6Os13$@`djPl?u;qQ$Uz)&`f#7}_({I-XJ4jT>+|s+`#4*79B1|9
zI`w$Q&C9Rx?DJPRd+Hclr#7j2oz2ifOQbc*nk2Lk3Zm&%c&DIgMXMMhUa5}i(kl#)
zzwm9|`{)Mu-+PhIe`}4M$*#}}BcgKvf;N~Kc>4qQv3GDlh&oYtBhiS<Z6pIG8f!G(
zO|V6YcLh+Ssirt*O5HBw0HbrrvQHDxhGV@Rm(HHz$?ZdwGeWaeF|EN|Y{)EJX_MG)
zrb7{Q`X$F>kZEu#X-{(mjZO`)lDVxi1=?7Y$*3$S-7cJowMyEDMCsLnF~^wcYN;v8
z&5Tk)FVEuFw6<b2NzWT7PCAN>lGX?GsA9F~qirC}$MpM#hu?OA{=kTNKxHXQK|ie#
z2Vf&mXVS=KDxOMjHT}#yTvCf>R2$Q2ZIyg%$#iIO@5O}9b|sK{^LO^l-pU25T%UzG
ziySv%M-shD%!}kW8GSOMLOuYmBp#uo%R<xv(jX;Sn96;VnG*|gBb`Z2SHNVXPo>C&
z#ahbdV?Ky<A%>8&Uja`xT1vTq*?n7+^fj_J&?HA<(V~Jh<Dx&6wR6vVKS<OiAO(@+
zc<L_mj;PSavbw&`(a}UcFYjq;&&}7bbN0e{Vhj|$f=dtF%Qv3*8qKW5lof4gsrr4c
zU%krl<0tVk@b(XXkS{&;1Uusz|CMiW=SM$=IkrZ*HsWZ1n{uV!QP}B>V&t`E#JV$}
z)pYvHRDq;sBp1}N8?fbbLdC$w-KRPJ$fMl+)>D{C%?p3}Mc)1^KPk}%#?Y%YXU?4y
z^N%wpu~jMPPdiW9hl-#)=)nH&4lBnuyWxlTPWH#j@WX%oSNW~K|M!?&drjz+Vk3@1
z5me}O{!$d>g}TsWimowsMNx=}W)`#2Tp)8+bLp(L<Sk3Hq7$Wi31v;Xzo^uLa;y?9
zF5lnXov}dJI+$qb9$miYW4hTA>ri$uB_=>3Z^?P9>3f%u)7x*@JlI?OQ}+||Ygy`;
zBR;Klo#@nhi7}PFQ!Am{e}_y=+GdW?8WpU(2JMMf%<;4Y%Swb)D@H^!*D&5cVtP2C
zolJ>M&3ry(ayXVe(8(M(cg*TJN8>sBM^oxH5Q1K6?q#r@&1{rjT(^bPMG^%%DGbSZ
zJYU01caDPON>q(r7#igwTt&dK*6*{nw!-G-I%_NIj7B}Q*0jE5GM#d8c*xc3*Eu>I
zb2OPz*EQmTD~M{(_89htWDkcB9j$ArYmZBsVIjQ4bk@bL(UkL1uBEt~Cj+`5&8u^=
zN9G(~vcE+(tiVY9hU~e-AOOB!UW3_zbuk9Y-kcqh95cckyZyvu+5m~EqZh0zk*vvp
zQfT?fkTS+3Z-b~U#z<WgBX}=bckcp?bF_6MI@ajr=b;F7&Agpcm1FKXd5RDH^d~v{
z;9HrG4{@_8w1*sCeU;Z<*=8DgtZyBsu-{_))(+*_Ayrjz-$M_vy1u~|Kl^3=<-hnn
z-uc*jc*jRR&dXo>GUFT9IHyZu-BJ{iJ*vbtts8g*uNm*`qqJe~;E-N9VD#Si@an(*
zchvvt-*A3w1CDK=n4-`G7nx0`7;9-7hi(;7JFJ;elm@LWWl>R_y_0z3b=cV<G#)>%
zX`P39N}&SifI*__+S*bY0Wm^QC@;wXYJZ%hR1)Qf@>o1gQ^S0WF09PMO+>{Og(C!s
zzaaz}fCMK-gsx4WS(hv>EF`6f-e)o$F;UjP16$0$_s92SgDR7M5%R0f{ykHN64^Uj
zqm$^rWT(R=8c^B{vaYYA*d=GZY!TfRfNA=U666aa+VAp1pLh@d%|HGWM`K0wCFq9e
zJkBeW_QdEgg{5_K4)<>o+ZyAcvL!|d3gk0YB(^E1aLLGEsrwFz8fB4;P!jk5?4|p;
z|KUg3dF>UM6k=gvz>Y!Fww{OH_EvUo95NjTR!1w?sHhJkdS3IkPdtbk_BhzP%Kpj_
z-FnUz1+Nw*U;d-7(#|y>|LKphvQp9S^;q9p$4wt#=jwGXU%tkmXE}Cqh2F4Cx=&41
zo?tZIK(q#oMHdR)(|Bz#0jM1+6gV_dl^j2|#d9xwn|p6uV}0vR-f{8m{L$C{L^%EU
z&WWznu+~$2<YVvU|NU$K9Br&LjG0NB=vv+g`a?6}o#zMq$}j#R|Ji@}-w{l)gtDuo
z!;{b26eS@9Ttcf{GvS^KXIMYc!(H7K`iC-NMr35Vonv+&b%tbQKT1)Y<|s<X?AVf~
zbZMv3%SBc`B*D9ewr#*_+NKe=N`$KD6ZJDDBb+?P&P6Oq*A%o86g}w^RigA|!z5$1
zXuGwdu$rD-r%Y(4H5!{-gpJBmbvcxUdbmI=vTi3Ax_W8j?NauXpx7!Q%2Jfdy_dDS
znJ7hfsf1G_pKXp6SZX}p<cH8Hh4v?Z5pVmiydIg-0&*ZN7w<6@^EuBrENS9ZLRP!x
zTZ|#(4v&j_UM>AxbXN=t%9`^3Vn7?vQQ|2=kO+jRq__?t5Pg)azmWD!)uxX@5GZkQ
z6a+u(^?ZI0`6VZoN{ohfFk<uUMQKEJq^TQX`mAkHh=jpdVuW%~GFn?<|CST;tPt4W
zImFr<oH}=kK&00%dEk*pc;Q>mF|<PhP?iNdJKOj~v%dGy`}otU#2)MsZrtSc&p*zE
zAN>e};R-ijewNj(4a{J@yO-rw6b)|fH&iE1Ovou26j~J23&czYP|GM|RE~##<}sSt
zyZG8~{%fv1^;M?V-be4`acT05Vr^@{&bDRy#tvuhJc~jxneJiqMKRlO4OYW^GN+l>
z*s8=ej%GGvbLBYBd5Y1HkNxzo@H_wD|0em-c^Slxz?msQODIl<bFn~hGTIdq+L*gQ
z)eX(^nWsCwE{kT5rI@uP`gzuuDy7qWHxEeY29qHsjd(X5%}4IydAc{2Ke9xp?lf{*
zcl625)i<C2#M@Bd#q-|=AEzIwx}UQE$6|NMG6K9nI8>I>b_1~Q1c<6)OK=)|L+d@w
ztYKQunI0Xnx4X;X!7kJBoO;p<k{W74<1pHaR|TH3SBQC11l>4v2$)2%3_%GkHEDZu
zoLr`|cUMT~zhC}r76Y7orgSs2JSbbzs;8YRnF&ghW{bc>58lVw<0mML=4d)$`_?w!
zesPDrgMDVRS_X7c&Lpi2r7oxjl?-6K#|2O8T4v20=RGc=gK`gK+Ohmh-6b)IE|=`k
zs_1Ci(I$$e5x3Yp49(_<s$<D@3h~4WOYd1lyo!W`nnfQvW4zpqW}Wxae5ZSj^0F84
zOx+frhT|$GldIIonl1@oK_kx3t<ycAwMk%2%0Vp^{a6wPrBJ+mplw@%_H3Qn<gM?y
z#C>mjJE!h`kkRQ&w6hvFneZpS_bEQ{Ge5|!tG5^*9x?S3v?(~6PTAYtN12K<r%zxj
z!yV_(ar{R&_{x{R%2&Sl1@^YD^O28#f}59L=Cx;^;jAuEjYAtlqb1MOMnz#1E-FH3
z2yM&$?jC2)+=*YY3?I6Oy{|om`mNvQ>@WQsO(mbj%F2M(ukWzFzK(O^$)SyreY2IC
zrLi?tHKaIkj@Gr5p~WeMnJL=Zi{rnNEZYW0?PSnraMZC7G=0_(o8)AjB?wC5zFb}7
z_jaK$1Z_}8@=rtE5}UaozRrueVPgoBLv)>n?OIY7Bo9>)wMEB(_a01QDj4&ZwzBVr
z$M<A|+J*P#Ln=LV+Oz<PxX8tAmXyL98os2hNlnN?tCPu6$uWWD?~$K`RYZ4-55Dhi
ze*K?*itW7`;t;PINDXJy$x6B~)bk^nx<<PQt*37)v<f(q6}gWngldwLV;0usD*5)x
z>yVx%fVQ9&YbQ?d{XhAW>^}QER^^IPHZ-H8VQ+)u;Sau@OP4P2+6&jQYdub#Kh9+A
zdHOfLL}h9=POP8`3%!c1Q^#mtyUHCK7y07;73wDPl|O!h>sR;qkze`*7cQPep|K;&
zse8_G>h7}~?jEvz{VMI%9oE)+tZ%NP3kkxG&WI*g8DbexK~TZaMxt*4gY%BL(wsDg
zE!*dFuU+QDgR|Us=^noH?B}`FPKYVbP-{b9Ywo@K1ZOW@;$UwIu|R2|fts{75&;?=
z18ZgA``&gZc6UMxXq7Stli@^AYAG*HqoT$WDPsgT=i;4rvNNt}C=de&>9SNZ0YD3l
zI;e~U=v4gZM7d3;W&S=Tg@e+%qh^X85kXs0zTbRSqs>fAr^pR>GURF7)R2UZ-l#QL
zZL!u!7MW2=+gXT4u`n2;D2x%8UL(k-(saf{9eqn~xWuha*<MQ&&|J(avG`0y?=qz>
z|2JtJbB&y%EIKZeO1hz}`^g1q`dw-sR9sLTGvfZ8*w@8GD5fmsg|X2>6c@UCw97K<
z^nke@PKIVdb=nkAD6f-lJ9N!Lj<Lu^Ux!|G)XC5lIyoyfBcPI}=TQ!VauT`KfmS+2
zYGu7~$^}fwp-B-(d1#Rll$a5PjxiDlN~4YCj)&fgF@1b+MDOuVOqjHWqNtYK35D9V
zzPZlia7;6kW=m<sjcYeh8qS<QMe719t0Pv{*Jzs>TUeS}^i|Fa1H=ppPCfD{hkx<s
z=;*lmmB%^%$q!+xlKJjFZgPYwdYGc07%@pN*6rzUu=DSlu>6O(pf-06ZPuzKN69Ft
zrS54S`l%1|$N%L&;48oTZ}|TI=s%ERRVku|vlq_s#1qf4y0XXU_%RL-cO~PRz-%%R
z0#NIz=W~i)kLhemTi2AmUWc4l>q9O)@(y19(r3D!F2>xPF0^bpJ}Tx$ZqY%c_p*?r
zWKDB-4yLsw`OA8#30;`>(EUj-n)~jWEj796za3LzNyEC(s=g6z&5y-C5^wvNi@l?}
zH>BJ6OGs({oVxo(<#RuOZR~zde*O;KW%<8LNO3~0la}C3-(!CFw|<9dJ!LwZF&Wpm
zneTFaqYr3pF<R5F3JN0w%c2moAlobG4=SuRX@7R8o+j2UMtMSXc;6;QkCh=r-q(YY
z+~uX~A<dUWN@-<of1%ySF7;Jh8W4sU7yESD+(iTFg}PolzW&W;*xA`*I&E-1QdrTI
zmenws(#VjrjRD_C42@6Cs1Ky*pEMu~Vi09WdA=rRuV(ZzvBtWkEQ6H7Xo-)r7Nw#%
z-r53d3iP~YUN>@WGMXhDx;$5^=Ry!;raTbJ$YwUO>sC;nQND3$5yhQfODi1G^Qa&Z
zVJ7W2*_aEh2r-XHl~77qBvXht#zN|m67eMNDMo@W(Av=W87J;I&4+*HN4fiLZ(*>$
z2E{5`E2i5ovweLEQE}z^4R&wsakw|3ozID?#P<43CpE*>btbbJ*RI{<<f#pe6<zBG
zKlFaio;%HF{`^b)#y|b<`Pe5u!RCA3%`0E~8fQ@)ODr6xm9WC1c(M4RV0w5!+XPHm
zGC7zqdhi}zeeN>)>Nb4pk2&+BKghgoD2-**?=u;XDXW6kIkcCgi;xCy^ZA5!)-c*U
z25Va|-=ioibg!Xbdx8_QCKVN~_B2S8o_HPw8WcqcX(*Y_z7P^9qM&VY`(u$3Xd@24
zDiGR0m^RR~iRI-*u4Kl7wi_#ZiOQdmMwJ6upFy%!#Y4mM-PoaiPd2DZWw*EGsjgm}
zH)>JlcJ*XNtnxpn23IzVrAaG*UP4aD2V*|eWJ8Q<#zXfV=kX_A<h9q2D9m~nO%i=V
zYP~~S!*sHbjuA_RZ}usTl?^t86h{!2hm+|{m7+Ku&43UJ<^EA%<o(sE;UhouQ>?A6
zu|1nh%t^A5R|y1~yD|5^^&uX9#{;~2<#md+l8xiXh{m$MHe}lLDL5nqk1b20F}(Ma
zA7T5~{}mg@HyK=6<M3#oVld+J*PrLVdGQ+W`-%7QeV=@cm43g|wr!l+WaIP}ZPRkN
zv(NQwd(8Kym{3z1Piae(R|ISqh-_4ZxnknQ(%#w;hdyw!*W>xaU9N1u%7YI*%7Ygk
z;?JJ@60MO#&=@##a>&p8>d(?2uF$xMwn9aMWIPifC@V2Bz}!|m`NARp;&cCsP*l<!
znzSZ@5;Rl;rj$ZnDM7!rr{C+d*LaRzzJ)gfp%5C43oap+;(Qx*#E=Y+bT&3pDRxqK
zkpj6%&^jcOE<s_9)*a<jS%bzfTph8xu|?Sv!wgVFub3W8Sl?J@V`GJ0uSZcSs$L<q
zmyC)T-60w3jT>M>Wzr%~O;t8bQr$#YMV3})Tzu~`uFw^hi~B~tPba!sddHn+ZK<fu
zf#He5B*h4~|EzA@th$<2soP)DoBGI_wERAoe@3oG{{Ovzm|LQRN+gt5Oq-=i3`lOX
zcM%;HCP$fFrZdWw4O^7%w7CmMOj)DpiN%;Fw&HLcVxokHg{x^o%A%1>jcnW@$ipHx
zF&Cg>AR>)0S`&kWke|8hUNMo<Q3A}<XKD+BHJ16j#uimV)+PR6b#sjyuTMKPt0*e2
zzJ3F(;N-cpwDX!f&Y$C{uYZG;^;Md-A-e{nB(L(FpZo#7^6h6>sb|!;_Ni~~QypKW
zZ5o=RLzJzsYXzz-yT(j)&N=B`Tv&v5O<c?qm1Ud%R7iDoc^+A615vTEw!!K9ALZ32
zzd^fmh~69#ywFCgLCL9;CwS?_S9$x1QylHzk|?d#akRIa2C3ry9%E#8dj}PvC`+N<
zW0=nyF5G_~&wu%|6uH=40@q{8E$(Q@YVkE0WlaiZvcZu`-_teWnOaZQovLm?vA7I*
zZK|Y=zI}w7UC;kALv8VM=}_#{IO}{{EexI@=cne+XQ^rJ=>1Ed-4Ye5+Y;VzEmYb^
zqhHXKW7oOH4h4=&H0iv><oBqOY2tUHbbR@R*H9Q-rjrIh*$#noL5u`r8!3WhRn~P)
z>m8=j%zZ<TLJR~;jf;o@j!&a8wWMcPL1YFY5|Vqnyl>efh*>8HW!IlB_OX25DwVRH
zQJuL@HvsT*4YPj#XzY0M>2IN;#i$bW0F0+`UQz>^W|1B3(te}!FfesnIesk<1x@GZ
zAg?(M5j)yA;vy@}rVMJ!s=(TU!X|`OgZGi)aKOrNg`2l-B{o1z8fY01NIq^5$whGK
z=OxS{Yx0-$sxh5+=?T)_5f{yBZdB6Rk5O1OF&UuwWD*wAAi?C}SYj<@!?w)&(n&_M
ze2<6Z?5v!k_KwlnO+NH9KhAycejn9f1%i>!EY$2?d6}1AyvD4K^o*sQlzjbjU*+z5
z&a+L8EmlxP#m3fYZeG92otN&Xp3k{?W0#H1Rbm-2MsxA5yE(Rbj6eJH&+t3H`CGj2
zLm%Lt$KKD?uRe~O%sJ63F+M^8Q5k}<6zeCsdF6G=H2Bln;C$fJ``^utKmIK9&pyG%
zLl0suoTqLZs(!`p!68*yp+dyZTTq%<DADjnaIU48j2R8LnA&R$!jTwO_DW3Dgl3Lw
zA-0jS=;2z0?e)<0T>K?;gd#}e>UDA<HdvsY9Wg(gr)*n;QeN1RA>!sva!~<o6rpg$
zs!bq*mo*(kuAr?ZdWR1>NrQkQ4O8uRUpx2R_m2K2JhGut4iwLg>cYV<b_a3P84XBy
zWM`wFN5~-d`b$)ne4uBGd6lV4xjFTm*pwpf&;RTj^m@l=d@ES2&l4Drv6`kH6I@GC
zuF$lH6xw6E#@Z)cS3+SjbtS!$uJp~DQ0|{o<J0(-PyF;RaQ6J&Y`=CDoie*JWjPS3
z{hYfWx{nWj?4!JL<rSK_r`J0shkpo^g<^td%Qxug>GcW%B^Tat2e15kjqV+C-vjUD
ziO+tXR?Q#=+SxvT{2RZ|Ghcj;AOEEv=I*zgr7%J-i9~F_VC(z|j-5Y&Z(C-EbB=Zn
zX$~g%qbax=j7J%ZYCUz)vZp*ZXH%+CL7|6CRfV1@CQ1=j2R!_?ck_i8p2S7RYNhyV
zzwiOR?<YUWosWHxS3dW3Okq=Gi{x-cB!0*1h}GhxH*<<VeD)ea!LVFOg-c92JD=#T
z5p8<JVDUkrl*h*z7cM=@T=lSRBvwKl%zcW^&1j0N$QsL-5rb58lJgc5TH1v{Xp^RS
zq7&7rurjs)9jR7E^hSMbS#()lx~Qo7meY5hV10c^)w5~Bs#4V8Qjv)ukAlp{Zs{+K
zESA1DRcH&UTU=U`xnX^Sfl7DV#ifE&b#)|Np_=|J|8+)dGV%)D6n-Hz%e2NNEafka
ztKa-b?CzlaTEaqCn)ia3V?dS$T54&U9w9r9c7=a{kp3=YleTm}XIp+yqV>#cC=EUq
zy1=ww#8e~((Y1usPzM|qlD<{$iR`KvGLn+ks9vH|#~8bKoCpiFAk!w5MGsdPu5ES3
zOLRq@!D>kXJ~}v{SM|FOOl!^h#yW=wM~n{+G1lT-ORuQ7`ua77gCWD!5oI9*nAvm=
zh?}-|9%}{tICcIshu#v$4gFGa^~rB=_eUPX7)w1qKpUYLmv&PIczO{jwt&u-8r7up
z>k8t0`Il(i()47+IeUMKfzN#(d5ovO_;tSiTmOQ${moyIdpkHhaPE#RUi{WKnH?N5
zIXplS*xT90)wLkACdm(+0Odd$zhiHAhr$@jvWHfh5Ccc!1CE_IMsIb4_F#LdF_pKH
z9i8*w(svE)Vx*aW>D;3;tkh|q^ZmCN97RQkG-rfup&3P*n54_hU!PkBm2yzK@9wzO
zszZPm0PNE1B<Ikk*SjE2EdIQ-*~IQbWb9O>LGK&T*5x5uW=UoSM{d62;#n5!e6hb|
z7E_ns`X)ZgvXa&!4}yG@W;KZ+Qd9xn=LRgIqZkS5iDq6a2F9||D+wOSk#>Q0*5aMV
z6eY7+O%qz2_k`d&y{!zDHAWd!^cbyB+F%QfvtqL6U3(ikp3eoJ5KoUMv<QtfIwFC7
zZzx+{8oR3?_m$I4ayAvj$Yok=r`@F6LqpC-&rLB)Zrt7dDFZtdr71AhB$<N3Vla|G
zC`0JvWKW2+ZQx)!#(8K{w9IVQ5b;=Ru{w42i7_Oy2~Q>gtF$~4k4-W!lSxxvcYkT6
zs8r2m-ER~>6rJ9S%-~WwL}zxkc)vj{%mKS$O?JOl*{o6M_BNYhj!NGDiI4KukAITh
zXq`+ch_Pn6bCcI!z0USQ!+fp@cxLk{8^=!YJOAd>{2%^zzlt}SYS>46&6#s|@zp0k
z!}+`JWqoT4U(XpI%<w%&f7HiV7_F}G$shbU&ph=_KL6P-vVHv~-}jM^aqa70!|&{I
zR`>7$TvW7$;=&zw@r6%+n)@$ZNQk*Zp(u}UpwFISKAG~|Z~aT||C|2-tn>+?r8np^
zo6o6A!`#c~5?qj2orHYPrxW`975c{?;ON;eq9?*4aIT@L1I^6Q8<wCoQ`b;MOB)rT
zb(l(_dlkm?u);gQdneIGMM)Dhq4gLg9viAvxW?lfpV(^=Zxw|r33JI|RobDmmz#zd
z4ay7KNrgzz8eBwaFQKj9WsmR42DO{mCRk#LxRo@2I!zE4NNT73Qn#OW**@K9iv$G6
z&WIuCxS&@=O-y=@svPs+y(jsv|L^|;)nEf(E0lJ!g{dIASYwDZ%x4Fz4o=cEp6F&2
ziE0+Q{K<q|rM%Nca$?>BNeGPYXfEQ+3TH0f#e;8q57n?_esgEBsVf9g@hzv$ALqfh
zzLl3>y}@i8IlgipuM9Ki@y;+P40hOKIEn0E+oLWcqm9#)8!H4VrZ;c#;lKVvlxJ7@
z+#mcQVLBn!0b>={UwDy!{C9qhi|=@d@BfJpaPiV9Dr;pTmZtc+C>d^+3^rE@DW|GF
zoH3uzsQLx5am;6t^>@96yBbH^wzP*em*;`r9d~i*T@PS4Hd(J$u+<RLj2ZPbcR%zn
zm%i^uXq!l3N(yC}nFSK9LyJOFpv5{|K@%g|8f?^fFX7Fy-{<30i|d>!0Qvk%70@Pf
z?w-3S%N5$`J}4(eYE1sC#&$@SQrU_=rp7`(V^oj^GHYfs9WI)MQH@UHS)(N&Is~HD
za&RUCBA4<JwE<HJeYdPkH?-&)Ls;S%sl}gj?Ug_Swfyh-%Q{3qmzuZ#et{gS?%-UY
z8%yN84lP~O#*0F9@plWjBNvl-*t94%bIaUK{+G38-+>;rV`rtNG<2wFKxNn<YZh<&
z#@zg<+n$HiM2gfj8MI{TZ$d}2>o!2}X#kg9!x7{oQ|NLKc<zBi(gO#)=&jRbktmJS
zxTHQzN8Jq%vZ<N(p`+#rC0O*Wo%^5$WejKTyI)+NgU2_mAhOB$DpO7OcX#R8iqXcJ
zG~r5erca$d$>rIMwr<eIP}g$?qaiPR>)Sl?_P3xa!-b1?^6lrI$7)ScmV#WUQr3f2
ztlf1t>WOWlYq|R5S2_3gdr>yxgSdCv@h++;D25wLji|iY+lt3-9hz?KD&-gJd}#o3
zn~6+D41u-NC%EH*2YB&|k8|cj@8`^ecc-Q{P?QBHP9Eo#Z@+-6Tbg>twO3x1wG<*o
zgDniR`ILG#W@B|zrt3s}>zGZCIDPCqtH+LU>tK6fu0<-4wFJK}zrSv<8S~p)4wPA}
zOKA+2n#gW3NU_`LJ=Gfw&ho|U*5smjUHlY9cY(TXB^Bn&*Ig|kmffHy55e+1xM*Iw
zVeMbi+b%!yzC*WPEclj7-_;Gq()pWTM_78^u3ycC{hPnX^r{kLL|5N9NAMQoAp}JX
zjnH0$oS97<p!FyfsVc*~X$ej-=xaQV;J`UW?L2kUqO`?Unlc&)o+5^1U=~vU6;NI@
zz9xGh5E!lunark4#}f!rlW3a;t{yxA=VRxb9@FsLJ4ZB7g`BgQP+B5H2M)lc_eScm
zva_~$5~K-nVs@m}6vbymh6IU{83M+Le90PdVimW0VTjnY?-{MJR&{ccU=$M{n9o~k
z=NS%0gp?CrSb6TkDEWOhlnXI2(PWU|mxi?=CWIA{;?SY}bn7{5b+hVMFOI+DL?4OK
z5w-2k8mS3Vj7_?4b-MXX%=>Lu1Vf6lv&IsEQ}^A)M}OfL*gSV93PW%Wp`9^4*kO13
zfP;f6bq#HhoMxjmUOP6=ALq5-yvDUxE@M_tpbCRBuz762?*4>>{X_aIBSxbYLTH#w
z4r%sUMyo@rUWKtG4?p@aTbrBw;UE18pZ>$&=M$g&QMO-xp6i#dvK0)bG)xAX!Jx;h
zFFenOKJ<Rtwvi#H_sr^s&9~mm)%_Xu&Fj4Oxi53><KIUlGUydty|&Gn<6CH>X?+bY
z4L#Cu-v^jX4%k>bL0CJ(i_d<RUMb*(_K~uvIdT6vZoD>QGM=*1v#1zx%^W|q;#H{}
zy+Y${MJyGnH-uhAv9ThWQx^%LA+{b$E6iQ5Ql@W6eLzkikYOe$f(zgzDM3f@Q4G9_
z6r+d#&J5fCeLlW78`L~ulHbZSN#}wuPij3K9-18bAm^Q>BT1$F%I+Y|!Mlkhr525r
z_(B>&oO9nj$NAJBeuaA8LzxnV6G}*oMD5T<jBUo_o2;+jL0gApQ{JW`GIliHh4Wq3
z&@L<Q7IY(%iYcWHXvLA6^Pm2${}rppHi=V5vnQ^cF*Qd<!I^Uxxa%!<v%BjU9y`Xd
zlV_-a&^W@;jOngp^=uE@KMe<$seGT<R$P5`hZE<{FzS^|uDr^Hq2`mn@EDgqaEVX-
z(|^mAC!S0BT#6XST>jdZ`Sq_q%UzG$$NN6{J}y3RmZE1<zF;=cNkvLws5VFRi&e@>
z(=!FZM?s#W<R#YAhL4{faplSms<Nn7v1Te<ucXCtqtSf!vrqEAU!F?w)a<if796#;
ze6A_?PNzaeMT55KObtHiL6;m#@yX-`w3gXf44xQkG@9dECoop1gTrcz+4u%N=|!}*
z;<%S6x=Jq%LUevUl|r3WM`>NefaDxrvp~~y($!=NPo}|kQ{;%&J~5y|mz$Pug~dH`
zTV<!@h|I<+ohx+)EOQ|y#l_+q6X|zJ@R_r{NKUc~UCiwno!$T9!Z?YHFta$RRiZsF
zis{TBSk5<GM#1upvBY_NlOM|nOjj5$S;1zVxV)a!#J$l}scTf18f~S)=fQ|v52YeO
z`7SCeQ`$mQs6-``r7G1=2%)3TDkUzDDnQ6_6k4N<Atp_4rzcI+Frl?C8tC9tcAkr=
zcui3UF;mo~z=|qx>aI(m#C%B`ljICZ3#KF@!Zi)MyE}|FH{@ESCbc*0bNc*QUVG`4
zRM5eAJZ3N$@cc8+@W9*N!m-U&zFD_;tr)DXEZo3jWLDQ)dgohs>WRnc<GJ<1OML71
z|Cr&~A-&O>7|zvm+;|Ucfz>Ngl;`?eYjI`GpIL6$me*qb#U%75edY_36t#38MBe`K
z_wg71=y|^M4}XpC|4;s7HqV_R#E6f9i<j=?FaGdPD3s=4f1Am{K5N4j0Los4(VFe;
zZ3v30*H7mZj1P}cAySkjE9)EF%*B3!0AnhAe8`T^xfzWMW<(ONW$Ito_QeGQhwQ+b
zHHWvMi>m8+l~U-uX4F!n+bw^!*z=a|;BF^UupAi`1?Io)f-e{L<f1OK5n{eimMPl#
z{?CK5#cg-n=lh1==O%k$Mz*xAEDa#?YwPl~{hPkUP?namFOft|D-j`0?NVHb5oD=#
zIM?FpR$jAKG|o_}nqgHiw;DAEYb|x%FqyaPPbN&}p2@tWZDrUb24;q`u=I)&TUmOx
zqAW^Gp(zWAG4tNjx>ee_!_OOnjyNBg&1!CJ-=uCFDj6Dy$DK-=_=t`=hbs{AOZ#W8
zbJEyAYcc2x&@mVE!ql%Dw#Fz~?ih?#GSo5BP+6<7TI3@Vt3{HMqy%!6&K#9@>&z}P
zSY6VpMq~<0v~{gvYio;szanG6d40=U2@pIXi0)oK8%NzV1Rt?Q(KV!MG5wYGomeyr
zhObbnLkQGugY^MQi`IqYo+n9KH&n|*CmlPV43)0G)MAEtjNJR)_wdQT@>kK-kQf}h
zw_d@u4bxf8&dmdy7jKQAME9DB0a|PN$2TcAPxJXNJi&*4>gSjv4j!Z9oAifkTz%zL
zj-NTnT-EfgWn*iL*=)+ucuZZl3<m>j+2hn5ck&ZI{S*BD@Bab6``iDVpZNJ-q*+^I
z_qlH~>>H|+XV|-OoxNMPSY6-b`tEgtmslLhqBT@^pT`)_YhV2u>kr>gd14JPMDLl+
zYZO9R$IB2yN0SVu6m8>~H#64Go<$AU(7uJ>C{S#yuVR^SaC47VmFUW1y<_GAHbi{m
zXq80V^iLm0_g1mKqU8$3N(F;T*1K<r&JkJ%ttU2t=mJDXu!e@TXXa#vB(Kn^MhYG=
z*dpS6x=(!gu50MN+uqOrq=)MCMOnL-1B>z`HX&Kd*#e#UN2cs11h$(*Wu1;7U!o^n
z9s|qsj0OQ46kq%DOX%`MI{0Ps9-_ecZ9C`S;2LMoUL>>w8Z`z}lZ{5YY&JzgveK+E
zQTk${13-4)i=Yiwi*ZgH6+iMbKgS)HE<#XDZ|?GmkG}(V_!_hQIiU}m_gzE}*O>T{
z<9Y>El=#BY6pCuKVq?9Bsw0DbLBBtwCSvA?96$E}{c+9i)qO^HUc$fnG;unnfBZBL
z-E)rn|IdGuryqZofAin{OAcS#rV1r02-<l0$#3$~Q!lV_`Xq1r&|A3wo%eD4#2Q7C
zkm%Gxc|6fMT!5z1qA6H%%EM~<>+2NzGn&w%bwT9~&THBbSyd&^e)SeFJozN+8-1=m
zdxeMYdXO8>K8=&WFbXA6D@sR*rlZ~|T`kd;avVq|7%8-_FuKHr7NZo68*}O2_YzEs
zU@3=eY@K6&?*=g%T#C?9da0qzMn@_iAeL<8kh3pEZ>hR?2Qf*K2GwXy$bsV7JV^<a
zQu#DkF+#~QYN_$%jZI#5;LLxYGcHwz*A|UfF2s_1v@p)|;g|*}8gA3@<wqK}s8l%T
zq97z1Q-0pWS9&Akm(H~_{a$qqUp5I_eDQ52o%z1}4%qm{H$o;I$8wQ)v-HJ_|ESvy
zq_TTsTzaBh;AhX7*oBhH9pZ!Nk_m#UhXCl1yjs%d6aekf#*hu0GI||COmusp84(mA
z2wl%un+5{98wiCcPL#n1DG*|!_bSN{l=WqBD25wb(iDeCVI@O3e~s3f>AXgT$n@w)
zCdNruMRIyxU0db!xpTaB`IT;{=v<&}8zz%6!@&wo<FHDce6u!IA$r%Xi)Xp_V;|=F
zpZyu8o-upzMb5qT2N<0_gVvT%*SLC2d$27si{X084^EAn)}3o_TF{I8A;pj_2=3_~
zT54uHV;LoSuZ_D;F+6pG{TD9t2Y=`9^2z_~f6c~)^91i0uC35U7z~E&-n=2MkHKMz
zK7*B2ybtW%yh))9gVCVd^Y?dlC<==yEY)Bj@fccA{7zDv?vM}`8sl`7+}4ben}fV&
zwQyZKxA3??FjYoYwG>ocVT|fD(F?RS>opf{-phUMvhMdU&5zQ(lt~hqEfH>`<=&oG
z8ah;XNji|Xt}O3~`T3T<@iwO#b^A+L-h*IKEBue$_um0<Lu-w(hC#naf6&Lbj`7iy
z7y?b(5<@FSEiq7)MsE2;V=c91I2=;-N`|Wyrm*zdz)Dn{Hm50j(kzJDDRgX};3IA8
zXc|Y`IO=&#(>64XXF55gZCmQPrS%@?J>Gd*?{GfSw!X`smg#5^nj&;-RE7@;)zebz
zM5I>JQO-iqT{^UhDK^eR?2^zrb10(~h0+*p#57;U4(-hlg-XM(PG6nOk~);W8$K+f
z>+-ss_esaEld+=4ps`8@KOst<rEeRWx}~XW+SUon#0N1KjG&#Tbu~Je+vg(7!`%#Y
z#5CCNR-~L?-Z`3>bg#ieQBV}((5%zzleZ;WMiaBFNCuTIY94&&yZPA9|235EgZH#<
z&c@azu5Qp~!h9Op+ZoF>Hl{P@i(bs3v{jrvdzL4jd4})*+2l{4Eah;-)`@dmy?mLs
zzV|VF)XZy#agkncKyNUlna&s=jVT5L2E!4X$4~OJKmW7*&cFCwe&?V6Gycji|5eP^
z8aG~kj!Snvz{}4+gR5(_f_5f`Od&CY+89{7c$y>Euo5lL|H)tQmS6l)T!?IKth2MT
z%c)bxsOvfb`4Qy<R-4oW!gRdP>gpyZF5S!ZXP&0-13_!FE~%#_?ZGx}i8x?a0ZJ`N
z8jV8_`fS{PFLe{y-rGkPt4w$Ix%NEthXZ<}Rm!2k4ytsnC}JDHdxDd^ToohA<ju`=
z26aK1Zdk8fz!P!l_kYQp?7QIcJ=vfx4@t8rOGh14ifEEnD<<Jd&g;@7O@iu*UzLRG
zDitW`&>ebY<R(p312y;EbDrP+C%;9}-yk-TK$}bfK$*yVy2o^~2LZg3hQLOJ4=oGl
z&qP8F^0RaHX0FwR=_y1@g90TJM2WArterf?fAYWo+sx|&l$&tw{u7MOohR}xR5RxA
z^?jaub;_*mF<M{2s7UK0!7G%Hv?elG?=kFI`ayH{!WsuJ)EwNp!O1c(S|4C7yk<Rw
z8M|NjI-@)9Mw@~H$2;DAKX>1ICx7~TpW(~@?sNFYVT~X=K8)GDdc>!H<3;}LUko^R
z&t2U6);qa)-yLioTc<1wtQHqjZz8UpVG|mM0$oaeCryo;ObOmGYg?ZA(rdI06uP2B
zaWwV(lmGmm@WCJWC>wf<doEn$iRZt`PDT-w6!>{~5z*KfQbQ*Id(e7G!l)o<3f)Hs
z2(h7O6vNSg<7ZFdk;$em3zVtYI$_wk@jAg&Xj70Uv>kmcD3KCnbYZE;xy>jjEDuQ*
z&6Y+?FG5Vzp(w?E@V;~I)Lp@*7WY6`Y%MpEZ|Lfl!AP~J38faxG{J>7u|rO}pO%Y9
z)!jpao_4R~HWW3FvXxrc_p5wOkg0I@r{x+k=GNqmx*pZJ+0wn{H+hfA&11SandNX>
z?Z4c3FaO(eR~?p&+G2jMi>82_x0!}xdHsYS1Bu{M;g<lfm<?5;G!n*3(}k78Jy_W{
zeAeIxaXVHjVoQsT1`~DndBz}GQWt#EsfC3uFUt2}AS%no9T%}>ALoT)*R46|*5rIP
zLjle^#)k*2tgm-+hD4ZT^VkMTYhHilHN1}0bxp57<nraqJo@lkg*vZ9d#XG1PqOfO
z{ffIk^gdqu^FN_q7?{pbQ-|sGi5PTQQ4R-$x}ljIVZEmqu4Aey>Ashmny~c2bY?NB
zxb7f}<yDcWtvWSrTIiYY{+XZPbN`QD!*B2Lr~lyZ^OOI@{{^Z74Qw1g$?nT9bF{aI
zQW5VR<;XA?4Qc9{ql0}`21BZT52ZBCyyo!WfZ?!*F_xy8FExrw1E9Q4V&3mmw?5SJ
z($sW1<8G%|D%#cZXSjSTWi2#Ps2Y<jTVlMOB%l)mWC_*DuVJw>E%u$5bj@8vNqRwH
z$tCxV!=bJr57KPKZlhRw9m(WvK^YJfc_E=*3>R+uezjQZOB17RNU=b@-uyjAYmEU)
zt1(5%Tr0}jqV!M@&Cn;th*8PF2-P)H8Ym0RuwT*}l<2|=<uGc3QiR~CCr7kR%dDO=
zspm}RbEdPJdhThP7T+dnzmg$X>s#6w@eZOF$&&XGqZR$Cy6pq$yghXKOvGZSLk{KY
zhDGD4goD=UMXizdtqQERqL(M<eTMAzA+=D*AEKB!b)(lvcU=~f*rnl8x(7l&MM7G-
z(I*tlOJf~lVp_;r_N}9B<}^($2pLjGSX*F>mD(i)nx>_3GZ_XZ$xKK?p5W8(-?(h@
z--SGwT%7sV2o<%h9o~7GrlBweWm#eii#Fm_A<;8J6V)1YVL5&0J$&q!ei4eET*ugA
zt)Z!FrsFA-@eGW?^bD@{-Ove{9ZDpZ3EcO<-TcneU*g)eYi!+dH!*_g6`Z`|6kmS+
zX}oVKdqZ#v9|M!goTAVaWkuO5nNMdl^Md|xKxsz&$WQzj|NNi)Gk*VH{wsd@SN|87
zVZmT!otK|`f%VZEfreRKqhp{U5Tc_fEOR%9jWw+5F@EM5#@BAqKXa0*S8;T7$jKAO
zF-1k~=R$T#`-d@+qA<*-6DAz6apnZCK5NjvMjOTU&LQlV%!7iq#g7|VqcIY_2Oo$w
z@bX_g%kwW^Vb+e>x^NHsFW;bk<tpW<$6z#Ius&e4F=Di`LVs<QvfoDydT2uW@I8u1
za2}(|#K?1U26<0RUN(6h2Ok%M)9<3k_hf^*pam@^w6Z^?Y5S7SRG||!3JCz$8NEzJ
zch<1##pafG*gPG{1xvrDdF92+Y;V^@XNam!2c}T;LOW(Y-l0snn9+lxm5EB;zz`HQ
zAKe~0V~UK7$V4!qPew_zr9~y#Qt@B^z5gr2o?>V`cHC0*&T@EThr|6nUc20~KU-nA
zdYsXEL4T!3S!#R}*gu@o)-5V%+Qu>0&?|bZtPi<0cN|>XM(Kc2ivFm_^z<pr({pZq
z<|$79_z%-x6G{XDwzh`+<X`_dZ-4(o{Ja0}r+DRCFQ?|u(E650pq(7@;!|Jag(sgx
z8N>S4F)lrPkyEGc;?Da{aqPkx)<8XX#Gs+@l*!8dXm7@AFTF-Ht-1B$HJ<s}3#_kf
zGHXg^ZceEpSFY~y;*-~S^qu#zapoLnkDcLY`*pnXQlKYos7K)m*bp$v;gz_s5tDY5
zfYxACDF!ayVPhZ!$0KifuVl9>i?K$Yv#?bCf(O<#d($zmTz4pKmgr4U+y;|-u2(@m
z%g(eV3fiAcA~J#|hKm6eO>$3_13P3#z|@opil&jyAtc0B{pA{}tN+rg>Y9ngzc2c$
z1$rt)oJ{|^T)RB{SuTi|4u(7}UJQzsOUPUpry)@u{OF|uHdhoKGP2ar$6R;j(qqwG
z-=4+y<~_n~qQ9jEGktuEf>!>#@J-4umacJCDmSEw8l8~4+kl?17*gduEG~75*|l|1
z<;&h6Xe(Gsa%P(j4RPM)HzFFVs-UoChX|@f;|(DYT);V>3a<`U6RT{6Sos6@y!}1+
z2*wyfXdz1?^7Rj5m?G$GV1H+q(fWG#y7Lqn1shwd3<mdd?dnyI_6}&<2JZrO<0vYF
zZyStNOPR*tooBeUf$8_Ln<we*-bBw@4qm*>$|LuI%@AIJ85EdaMQG+UM>|-ziWv-+
zw6YmRT`VWbRZI@X%L9`)j7U03hEjMuYZs1l;*kg0``Y8|zi^$W{_XGcmY@Df0)dlf
zj`5l8Jub{!F<>E54*Mvh*xxyjh@P^b?DbG+#)o6tw#F7Cv@sm+?x3}pAl=?TN)r`g
zNTzJAYhv>+Ipa7pjs%?v3GG~<#`4)@Q^Dn?a&d2KE!QkJ`6SdThK2rk`7TsT&v3gT
zQOe5<@iw%nL)z5hK9@RP8k5`Zx3t(6fNSU))a<0cU|THRcT4ju5j-s4fC+`ui$16u
zH2sgEz;D7s8I7`rXpGQoy+I9z)J;ucB)3|j2yG+#yfwn^a*o-oW<G0}whiT6V~xSp
zk?Fi)GH;knXVi5=TSwZ^k|gUGr7_A1H8z6PB}FOw`DAYKvl(scFqM%ZZtxhTu(m+!
zI<22HxMH#YFXo}id@ZdbTNL!lie9frzbfhVD~dt}^D#PH?QlNO&gVGq@UBe;YT^j4
zGqWnNG#JfGGc1gt^YAF&sToyIy7-*k9diwsd=4a@X^~&*e9L?;jf&M3rq=^43@`7*
z%qJv+C>Mp2n(M~Ao?=GzI!K?z;6!KU!18rVYu{Q*W5qNpp>WQ5yqk;0)@q8Pz!pV{
zkTVc0z2Q1P{5O6TsudzF!L`)0Lk{+K+1cI0#ej+h3d!X(R+?z572ZdD5dOMWigOn(
zFrGWEymXmUcio3~5vw()PMu=fTE>$jRyNmAg#sT4PUAzs*DZ=DXlh$SZ7HjY(P)hy
z|EZtjpZ?>2%=1q_!-aeAV6u0>jcYeKbN(Eyo69wIktRwmsI^A+ht(cOwPWM1yLjmf
zPw?Q6e*&d7o12?#@9YX)SF?1EdW@;CTI#y-(LP&SC)qf0n)$1j@jfsQ5$EP$4O+uw
z(xQ#VXMYD2Wi%Y_?6Up(Aq_{^(Jk6Z!&p1qc#evDDDAP8rZ*h0va!O-<~l2DYmC-M
z^m--Lu!pi1Qy7#HuaG>P7As*0O)Epr5QxE|{C7Y{@bCHYJ=vh<vqEUR$x^&CXIUJL
zQRNLwEs$2|BzEGPxfIK?Tu93gfaH!w@T?36eERqP2<IcQoe;GYLmrQ&rk?Ifu2#yY
zLPer)Ks%w=kRn8aIH86-f<%dLU#>@0)=SvV+0$CfTi*6CAN;_Fc;tcO+`RERx31T;
z@dUhbL~AVCY*ARn%1Wdt6=hXWSVd77s#5IM5B6)Cx=mNeGHX5STLq0C;r5P*&ZDeM
zXwN@#ftUZNhrYhYl@|`!yaP_1UYE&_iCC+->%KGm>i_ZQ`Ru2^#%F%-GqmFwB?_k;
z1Winu4HX>Q*RQkt`YY)8CEoV_$M~`T@CU$0W>@zZyTHbUO&aIX&hq$Qe1m76e2Urj
z0SDt-m@;sBeVt<MVV-;Ti}=`bG%xw)*T2Q_^%I;sdxCq;-pva)Z{SSCDxIh_3Ktw!
z1+3D@EfYICdlmIUXQT_XQnbFNGM0Yda?kw_GMg#170R7T(_dW`sD9+#_gHM$;n}OA
zSH;V^(>cBaP}-mpwO1!9d1nX_lBOhr(HdnG&NV0sT<{p%;gsZ*)`k9XiMqs+0mBj$
zxmd~zSHS#ynU<T&$Yo=qOp(t7OQlk5Q&)&eJC)Iy?4r4F<X!%C`L4}3Z!Y{2LZO$M
z(~K6fVBKXjBkus61MwRQ<2UQkwZi%5FE{n+A2Mo~cAGS5${p?P-;|4z2x4g;7eA>h
z(lfG@3bO1<y=VaAqVOWh5DV}|5n?N!g<s0$OHP!wD9~043GIdY3!*u7-qSjloCsr=
z$0~2$CmOn;HHOvWCq%1lH1orn(a?&{HR(wuH&Q16D>5A)bqI1-ge~pjRlnelix)WD
zoN(*vHR`tE#<d%4ZmzI%b500p#m7i+PAGNO5Vhju!;j*={8==C!<Sybjo*)5+vtWu
zDh7fv=>7_{4p&c!!BGrG#8klKf;WH8xtY5C3|Ioz;!?AfzgmHhHE;dtAK>3U{}eg|
zp7`{i^5{?f7|IxqZylp?k=V9`%Aza7%GNqD!rsmfJ*%k-jV($-2psI~V)A>yuzzE_
zJHtALP`*DSkj@d6hDu#em2`ZPZ=1bnI||3reYJ#KCKg0Sj$_O}OYZZaRo9&7c$V0)
zCFt(>1v!7g7U>8DVbRodLot@t$WpXgRExn4Y2RDy4T~X;+$#%K2TO=*Mn6N|vpTAB
zT;!c*=(W2%$@(U(W9j4Hd~_q5PwDjBr4>qhNWKEDZJEzonr2SZ%$YU~-aA~~P`3@y
zMJiLUu~yORRd}a48rQVQuyv(h!k{2I6iKP%lLIv3YAfJfz=cR#D;grbLbRwc83dZ5
zz!p{_q@0s&s2h^Tv=_?fV@u1*V1<6aPgRu+dwt46?pvh-&AetlpVBllntD##w&DWp
zJtQ-r;5?p4RKiTkF+klJ(9Iea_iEO!<~6-k1$NWL?6QwuFi1jv-XO7A-UnhDF8e5i
z+TcRcoqC}_w=Hek;#w!HljH-C*)3g67mL@^`4hy&ES5LyCDBW=fmU?G2vF$krXC{R
z1-zTnYK80d84N3a?r;Cc6ziLWwq|y;%gt*y+1=e^Je^XvEjFFWqm4CmQQ@6qI+>u2
zp&IlP(?Ir^_04svl@U)p_if($i64>y8j)k0E2yH!jhnYPd*M6_6E!7fTj`#f&T5FB
zqLB5awB_`f)4cD)AK}ZN{|d*?o+i`{ll?=Ez5QWf+(ga1sd0#3NYFAI_1;sBDu{}s
zZ@$jK^;?XNZ?V3y$>U%73h#c$J1BHP<0P-vD2vq^A6k3}wBAwIb55K)!#7{}Hp5;8
zMMM<}6&$uIm<RD|iCUw50A+9(_70~^<_)S0Xj5RV?c@_W2BM2JO<;O(z|QMC#OOp*
zKI&2R2dr+aGguig7!Fw5T%#QI=&g?^`$J5Bh$5oPrW;;p1<L==F23I#k4!&G8<Tp&
zRJZ#wRChFWrFPOpWtxxJb0_Q~ZEzh36jgTs2Nk$={RRg|b9AhTagL5gw3Axl+IiXl
zH4#A&oo~rhP6^A-<Jq)nWppZ4NK)vhf*`lmT65vPi~Q6t|14W;oA_w>^ryegaPvI2
zJVjBoC=(bI7Om2BQ%mz`Ohg-*q-z6}HLR;1<B6uu6a=H$IK2vLh#4O;-#wx_(MKt`
z_~1Fd<zc;T8P*3BcAb}Byu$hOr?7>Qo2<~7)gd4M$;Wut2k+xkzx@TC{@gcllOuTt
z$$2mM7Ud(+L|kw<>nRFLJDoC~?Q-n&3ACT%VhI7h_4UUYzjBqrL<)Me#&hYt4{_>_
z`#C&#o$1bN5a4jS$5+1kI43th%mWwi<{Ouv;l;ykF-(aNmBVXKLD`XzM9UR~fg4;W
zVlRxvw;rQB&dqtxW8V+9kX%VLg|S$hvP_MkH7o2)J>U1fljz51Jp0-NU8DvwY4}3O
zBM2!VC0-)A|0+c^2tuElHI2}Chzdi*7~7d30mPs>giFacXOKr;SJxLUAIu9NAZEII
zUgR0Agsu`-0`-#vUUKo?Jtd0<l638IAt;F^sZ>>qHg(ZRM>;Lv;$OSW-=$aFy|<{O
zse??{5=Mi(pJe>>CO`7@ou%eG8(`#@9@C^-Cx>nFzjs5igco&<=hB5-hVJrpUn;Ca
z_jSwPm$gjkLV3B~%7ZsaOQl2CP<tPeu1m~Id~hhQ&_S{aLx9>fxHix_NAz)_qcvcH
zC7aV6zj!yw^txD^?d#X+ojZpv%B~O#DvG0MdI9x(O4Bshvgi=wj@^<qohq`jK4fL(
z9`<hS@%rmm=nV&$QYQXcQ#+f@=?#Pi8x$P7=K@!s{0i31D8>_xo_>za@Bcu8T5?uk
zDhSIGYYe_=XpfFC!yy!9YWhNA$SjK9?mr!ZzF-C9Kg&PMerv2jk2H_|{7>`MU;AhD
z=M(Ctp{OcWHrFVJD;$kytQITuR)!3RD~u1v%#V)HKvnfHM$yb0j`ohoZtb(#jOpP%
zg?!YVr9#(0=OL?-_@D(FA!Q$vouEzUBpG8!eOfYO>)zVJpP{=4mYPYGVv1r|#GS=u
z`9fkmEM%O^4c+o}O#0{;7c7l@f8^mzx`)Ej_h;rv{v0|zY*<35LUyfA`w+>&I%~q9
zW2VSZI=%tzqHA`viA0~SM~sB!xASK0G5NYUzzD^5fA^4s{X>q94r%6dlrm&as8<SG
z^{FaDPg|%0E2BOqjt%Mc`^=_+e%Ug0Exx;kkr1qyX~jrTQRuWuNFRK}XX>l+a`s?R
zeon6_7-)krmR?aY)pPm$^B$=4KtVo~5F`CTpVQ|~Q&=&y@lDNmQsaF~+suj16P+Nl
z+JGU&shNTp1(od1rReb?c1AubtwY%|h4~~Oi|)Rs^G!%sSS^_#g}lbIo-;Mtd9Xu9
zQG+6Q$9y_l3?q|!eh49X0~pcJhJ`0UH-yalc&1e4a|)f1+@*ozlKG*;)g=0@&u5R+
zfPxBYspkk7EeP{3{Kx+r2B*%`x|*9;U*Y=eH<;HAZPNgXUey<Ba12bw69z*~+qO)`
zN3?azu~VlzR-r+mdwnilyo(pU{Q|CS$d%~E+K67S$Bk<@DQqEfu_)%BF`Y3esCs?8
zcN`oXaO_xtE(&~byzAZX<e&b-e@I<>VpB7nGz?eP1nqUA>D5Xo<+Evvt%TC-bfi|1
zjkBkD<(uE+(t97Hta?l*p1qs9^w)+Mo%Nj&T@)g9aiF4Ma(KwGtqlezPcSGfQMW|b
zVta;T7tS$QJ4y7O`K_BAynF>?H6eH=wZoJ{R*(1DJbjA&qXs{pplOq-r%2JF0-x%=
z>7?aoQls2HueL7{l%h0-!TJirts$eWRW>#@+1T1*Wn+#0%7CJ(utf=l0aNka_W0gx
zP?72FayDpJz^g?elqqGI#z}HsRb;iqvN9)8-%??Y&PWJSq@j8KnP+5D7Un1dAu193
zdrRwPiHar@x`=Svm6BKrTWDrAC>O8_0x6~>Yhz<d7>Qtl!k9kC&YtJTfBENl??>K2
zqhLA<_;!en1<FEEM2r<Hb!`&0DJXG_?V6PUDe_q<D0-SzQ!=d`t#@dvSU=ULDM~`?
zIr#P~oH}_Dk7sbA;Gi0?rH(lG##5Yq=Y8~s6_;PV!s(O884ddj9RrrNjSYVOfBA!a
z^2a~K=l=9*p8mq)9NyT+H9ot56LrK88Lb^-K5fwiHqRbsb#;Z;Uwe)I@#D0v#l?oI
z-$%C*Yc%DC=F#`Om9n?NL+}3xU;ooj(H`wHA@JhPHNN!auk*2YKFVX4?&sAne}<+^
z#Yl)$8gvM$kOLhRCW`A{)Uq}-5v>DFGo>gr!<AJYc<AjM)s3K-%Al=9>r%2vjUrHR
z)NHU{-{M2>TO)qO@Y-(D&uE=8CzniElm|4%Y7xXMiSn?<ChCn)q9h8$&^87N-<crk
z&M--gloIYP#bf7$yD;hL5WWtbkiYL5S~3YKF3Q0ra0BF^Rf!Iz6-$Mh>daI!%90yR
zbiTXNb9D7<z7A;^)J?N>=W`TwOq2ceetbd=JDyjX+~&erX}$xs<L$^Txe?C4Pc@Iu
zNb~}&R(i=XTwb7*zbY=h=0)R5S}Q6c>cI;=om|AfL95s4RhKAjsq#jvNC;xs7E>m1
zuo@e{d2tB!K6Z_@kDj)X;=l!;vZ4Wv=$wf7LQ@&TJ@0sT=kTkQq9_VpeCAp1edN*9
zCnODzk*FJ=%tyvYhiq(Zb!d0KxN;3-ZJQtiz>O1|)UIY{=cdpKjqVCO*R=FkRwQmK
zdbTdy$>rK%O(e8%^whW5`rrqMg-K4ms#{Yk1kqnr6`^ftCliW(gtA;L^h|qBpS#R2
zbvF#^qF3?)NdHWxg(|r7(ffGu!F$<x?pdY>M~t>g%6^a0<|c=SV@8|n^hT@L!V+9d
z*{i6hUNpl#Fd5Hin;Jy}z-)X(Xs2Myu93{IHPa|V2;@7UGegSvCo1*dQvBw@RKE3M
zo;)QsLSDKHbZ3bLkdU@4QwV8q$*$#jEiR9iGV;2-mn@!Hb?c_f9Z7FuS%VrgGL;56
zi`h<YC=u35suoo`x?@<3x-?=6V~j}|wu#an#GFRXiTrj0F^We*>=YSl@$=r?T?X%3
z=5s|z5qCwQSz8&Qd}O9H(K|{b#)8fmj8YWF;B}y|B`c$nm3~1~9$(LKp+$CwNDMxi
z4aw)IwLw{gnWcItlaB^X69ZoDF`l;sV6?i<uwT&cRRkCCEf{U2<yBHag|PHa@?fp?
z9PI63bd*|#<gyzBg|-AMt2Km(4~`IpVHJEtg@BJibWhsK-aWbM8~^}-07*naRHn19
zp6<X1-2n-zJJWJws8eMZ^1m-8Pvk6D`MsQZow1k_xkL;(3tF`NRv9p|pN8lc_e7%h
z66HECuEAoR+$MkM4w7Y5mu`*vj)9}PAx&r_L5umOQ3hiR3ax~mY6?E|!=L2D0}tWa
zmMbrOi<`F&QdFT@>|J?4npVa3t(y#nBdjTywR5DC(@SSs;O%dDEC2n|PjKVfHpfq&
zBr2$S1*6Ro*RNfZ;a8BMfH4*y-O_yx+E|LJ$KK91$4;I?8;w<(`yPIn?W<SW+E}4(
zTdJ~0-88s%jthaPj0`!Qqavbgz@XW_a-E^l+`4*`vu}A6T@*a>=p%gPbD!t?KK3Co
z5+tVFdWqum!J}g2@Zf+GTU(sHco#*bIXb+BpH46g=$|^l#<`0`EaCiFu3x{->Yz_s
z2UJzjTQRI1FIn4KV>%oV`-Tu4u9?v`4ZiXCc1|obZte(ZTuiS;6*vOZCU9`;5N;h1
zxq>Y${n3!o+JM!q6*jlF*jQO-bz_~C)gdR&e+Raie~*vv$p*D;T$+B0hDrrR6PMue
z1n=a+SsGdo%8@q=sgpI{JJH?5m>lVZI;UdHQIP}{(7~~{eIN~NAUOt$_At6!^sDZ$
z@904~5TipyElowzW|%0Msw(Alsvrnnp@*D2dl#Sh`5)thKlm<sqY|$oJq#-Yh(RVd
zE{24VCXH=OS&dOA8mUC9WTTs$=xUTgmzsgG)QxOr#%M-&-a))|gYD-pbLNBZ0tIGR
zaPB>i(0u-LT>0YHxbK&K1Y4GzKYNDpbjsnugk#6nDJzRr8l9$3t0#v1(9gY>PyF~}
zy!O&9p8VW5`1aSI;b8BG5F=JstR7pJF+o+aVtTlv34iZD|GWI{|LXsQvL(k)oaNgu
zeUm|1&=!#o{J?uSarPKxHR8;@m)N^?#Eox$5j~mUqT?H{eUrP-t@F@B@8p3OU*v21
z*Jzcar!+bmvi^h^Qqhvufkp-hWkGO~F5z9nCw}~A309+=MfnJ(5}L13@<r)zccEZ!
zy2-HG<71C)@|h=(cxBtAsb{KiRDf(IqLma&i85=W#uzJ2Yt*7kN)1TsOh@C*Q8OXg
zmKzCzL&rSP&K2z9;9D*nV=@SnKQE(w33ccKYI5e^?MR6Z3v{$=#K^C0(TocEp9<8>
zInD@hE>@Qj)>K3&orX?v!6Jyz6^TkIf@gt_X*tXTQJUB{`H^Ff)RJasff&nlT#PlA
z5K_H#T{9sg(Xl(k^$kV8%0cxR1s1vi=|^_7#^tzyK<uc$xqsD4#w;Q0p?rQ_hQ>OW
z)p+N-VS`NUg--5$t~8CTC9BX@;kD@0=9OWvcAT8k2a(biY~Q@W@a2~|edk4p;2KAc
z$ue4sp7GHE8^?}G1D)RVqS0Hx@j5l($2K>3?eb+*j5JLR5xi?<8f|U5wwmGA8ogti
z%&xsm6BLJ+uX4w<Mz0U$JqF+He3|<mwMG>s)D7+UkYXh6=8Gw3ZcLY7+Y-Ydy;d1E
zD7jt=qIal56WYLoAN(MH`oxp8<1wsn5_M#4V~zRFn5Y%yU`PmxmtOcb54`1VJpQ>a
z(0b2!GGTl;B?L!dq*$LHj8WZZy1Zr<FE2N6;+`6a+H`A5>tw>0G<bO}cXZ~CUYlXb
z^!^ezHfuVg%FSs0nQ_6a2>G6-8yIV&Q*N_iiHV%(<Eao2OGIF`0IWj{9rBr(9NpDV
zNLF_rkr3WaH?2B~aA(w{gf44Rb0;h%jaEUTHc~O4hkGcKBH<P<DkJ4@_QPa{Y85b6
zQ}rrF!y#HJPMzB1)t6pkKAoaeq%@XjBxAf+7ED#dCKuPTEb%_jGX>>Jg{_2=sEdfU
z8e>v>7fff2G<S|k-7uTXnAUT~^On7XBiiINJ)bFh*5GuTVyj45ck;Qp6uhT9inI(H
z=1tB1-Y#pSA*0n58RWMOZQ}^OmBD~>sVViTQS}f*8Y)0hNUk^DcPTJ2?U8wus&tl;
z&~1Rr16dW2;dAmmATtn<&r=Mbd|uGq8uTIHyewFi-T9Z#LIm($kdALhOtXweh;sSg
zBc!t;(!~f1fSRVasv8s~gZ3zc6r)7y6;;Hw4L&MLS3>1@>yP{-J_fG7@)Fmt-y$lT
z=vrQ;G-*iVqwjKIkM<9#ssXkvMMEFJ6q@LK;+Y06-FJyb6+HLSi<~%pLfBqfv9-0y
zx4!WzuJuWwB(+QgymMV$5<;XbDh_88W|J|))e%ue?s?!r9)IEqzVBV{pw$(oTuJHk
zDN;!VlrDr$9RkB*fNxq>Pn=}>>J?6|Z?bp&I=z(@R@X*E-Q#e7!eGV9`}Kx)-r#~T
zf~<2K?H|xgrfgj}M+gmvhqovZKNsh`IAt3#zfyx9X3)bb$L88PO?$}d%8--ik8yBo
zn^GHej6_#6YipX>oOagGx|U`(qn*#0H*;FmNSzivSQ&Q6KB93hGCMe8ckcjRyC!B~
zWx;SXV0E~{ue|S*Z~jK}@A2_H*`Q{}!mj;F11+6SyO>?yR5D{A{XLn|k*2pH;A1Lq
zBV?BwrBbGmQXMrex|Vs<$U;j}(io)Z2?S#FX`-Hi9z}GHsI1hR+E7vvwI>E~#w%@!
zt39L9I`4V^dwB0hALE{f@8iVzljyRP!@`BGC<`S7Ck|jm61>a)CL6j7k?4~Flah(}
z^5k9Rh!3T(#z3WLe57@eyB|8o^wVENy?(O`&~Ag{fsa1QHy-~B!qE=x(G0sT8`7{>
zG3xi(-rpy>$f@I-m?Fjg<N~n==N~xE#fLshXg|cQ8;4xEvBTHD@J$B8h$&<<4-x+S
z51-;@t!e6-LBHbceRuNpFTOyzzQS8S_#h8G_6V!TR_GT6CoXJq_3A~2L(i3`p2zQA
zXW}P(<{Mw)mp=B3{J=vW=E`S&n`<h7g&2I&N@fRK-MJyEC{Z*<$uyl9xbLC+xaa<d
zxUsV@4TBC?YcRT`C@MTPD2aLrXvXa-H;z22JmUTD-JpEpfLFJZ6RwV8a@9>v7f4M0
zRi*q&G-#XYKS^_{7tp1AuF37Vh-j6~lhp11T_P(kSaAu(%V=sg97!KQ27{uA>bmK)
zS~ygs=a($WT<prvlNF;?*7WAmAvbMwO=+etqB5I7cSw&EhapYa(~#PUaZhLhzV$d4
zaL(aeq^Volc|+SaoH+mHjvkjaallf};(`hoTFId7@=H_Gm#^J|$q~EBIMC@lRYVGx
zv=`-1Zt?$A;)IasdHKb0Tj54R0Alc6<U^*MMHNtu$NNarxZ4VQ+42(dM9Td$qLX$}
zrNY*7{L=jxQwb`X-hWw^^veOSzj~S8pwH^D<H;~2HG?6bv}Ha%N=76s-t+R>izLmT
zJd8EW#z(j|BWIfVyk=!IT=a}up(@Mz!*AjGD=*{PK+(>b@9r_&7|Ca+CB7;T2J$@-
zwG73W2b#kJiqR^%*X!<|1(cAps?}ol)4lW*DHVcHYjq)O-bT*ee<!NwvHjAEY@R!h
z_bnT1mX}{SM0-bRDqi~LH@SHCJ#3vk&DP0NG_#t?(GgACpomy&m^U-FPHxIjZJ8F`
zp&@B+4+%;23sfe{2eQMj){5u?(TA=OUeMyxeVNv7TA(65NWagYxmx}u-`=puVNNU#
zg`zC1=wEY#nhG@U0$v527qm*eNjeM|6O%zoiazRY24dHok}{mLZY~DZF%B~AH}eZZ
zjCcaBmdrsFQjYM_J(s8~-Tk-FpQ>&+@Ezz+sfInQwUniy?3ElpzKLrbP2CcUlFiLE
z_O&LuhPrMk3r*FtY;A1fV_-DsGaM8g+gia`g>M0;XoIJ19kcnA!^0yE504lhPMA(S
zb=^|ej=FL95OFB}f9(BPuq9b`o{4>XaQ8FaG3JPj%*Z*ja;PDz3N;rB1%QQtAOR92
zn-VEXl-M$o%{C(?w$%Ng2la!bww7dikc^Ub(^?WqH5(*BG}t5>Bnn^^fSRYw%FN0+
zA~GUly7N8f_^<~(ti9dOt$tL%%7kjSycu^mgCF-jthN4s{r_UIVA*!Gov<j3t#EC}
zylru=r7TB+KAW8UCxQAOx+)<?mTiNMp0e1)XhBrH_u!<L6Cc1y=Vu6^r*~&MtT6_g
zDEZRXk;h-^?d)%v@8@>b5y>^s7*#^b1vRGE1j$LG0v}L1sk6t#49UgF%r=q$PqxS~
z^!Dd@m~Bz=Iy$gz@^}^Ig2BjV)F;^k@={@Na>$zonKmu^bXgS;pz|$3gRTsJ@~`|=
zC`#J-9`9Ydj_V@YhLt2)+S`1HXd@OyMk(61#dRH3RiTZdu9vKhH}Kk#X;K?o8=N|Q
zmRG+23ZMJzr*Y1MQe3)pns0rx=6JbeZ93`M2~lf;(gf$FkddtNCetbVhlh+NW6+At
zQ`_9#xyxiU#uMp0G%gUWg(eBO0m?)|bOaR%Mxi#w*!2xmx8&~I?{faJr|`=0#QUD$
zwI963hd=gQ+V`NVh2HLchjO0!@R+0HBhEkc5HvLkMbHJY6O9zD%7k8PY&l~zozS#L
zJoDk_+5f$7V2TQB73*swY%!9RDohxKR@N{V@m)vTwYavUYZ}^RLsK_&Z9}_kY1<m-
zT*^06Zb87OwWz5bi+Z2icXs{=R+%5|@ne~w7G;^33m)&JQQK&nV&L2`&&uYe`ouux
z9)J{e=rs9^HVM7S=Ik=brBW7{@9ZW8yPOp3)c2$5MF=pmxhV;*Txik0NMa|3YBZ*6
z13q*VN`r?Fz3@K%SO3X>#MuWg&^9&4^96f*2b5LCXk1d5lH7dc3HanW5uzgc)MIMp
z^=(S_f|o^F8IdB-eTh+L#7j09QR$Sx_rWL6@Qr^_qi^p^k4VZ(t~`5%FLjDbKGNN}
zi`iO7YlHU=9~@`4HigP}yrijXHn!HWg@Qy!&5Lw2aQf0Z=gv>L^1ub&eC-X&!a!_k
zUCT3{euBUG#J|sEYlFgAChLaL_LR^5xu53g4_%>}m8{QXu{gh_*xp#@^rO$Sw)Oz8
z{>~qAbodU}_mB9_SHH$*Uiche*uBngy!thcVv|t4EQ_x&NgyMaD~+v)zQj~e6(zs&
zm;NG)rU50A?g5R`23=@$A+%g=5jtZG_+U9&ZcrXY&QBUX`KgPXexv2vFYn`{7txa-
zG;9GBvv;X%MiR&keW(IMD3r}KIo&5BIVdnz%+d#IncOdxCP_vj$@y&xGFQ<dAk!C9
z3Y*HeLy1=sRNW)bx$mr>U!-6+<`>VQp-P)~nr}WKIU(YF5YJ8L>0H3Ij;;-~%a*2Y
z=~`K|o27_@G|QH@Y4EkDYeb-;>l|HE<6KMYTR!n8KXqT%n6MIU#Pr$o=gQZy&wEnP
z<kNzqwC)4uO!@8CB!<h^S7%HPN`_>#|04F4_t>vzE6M9hK$8<;@idiy$h(+=FQJ6T
z=m;5E=+Re`62*XtvZ7fV>AHX}JWqb$qXWBI$v4$xgdUYtt>(>FUgEK5K0rAc5u6(k
zEdpKJ;JX%EmOa|LidYDkmclkAUW<$Qg0`(Wd+rQfD^!1L66G)1GKCm8|H23P?yvn*
zHk6_X9jdTsA|WN3avu`bkqQOUf??2Ag<m+D!+k2Vjjcv$+Ni#Wxhf!((d#@@a-t|Q
zRiRIwbfvlS;ph3@Z~j}(Jo*@ut!>JA&GOCb>`$kB=lF;#k3Y-#%U5W<<H={=$M?SZ
z4fc2T8C4@fbSNd)YPz)r)(lz;dF<q5Q0p}6#K%zepL?aK5)#@INd4z3&G7+g9BMAP
zUp)*cXBtyF73y*yog|N|HeLDJ2->2JP84l*B1zRcim^Q&AN_D~V@$pGsn2gE{ZT7y
zk8Bqrs8j6M#nl3H&tl5!VETMIsSam>DV64T$ZKbM{K@iRe#!gK(x^sb8M9h!I;I;N
z)J=;&S`eF-Q)ka|>dYBJbW~PDQ7{>oy%%V7;JcP)u^{4E&TAG&HFfKyc;`Ha`wP62
zVsrEwLrG;VBP$B=9g6kJ61>BEL>bsC?jF>%K~dSrY^s?}rWj>VQA0FDpNr$NjztyI
zkdbp6jVp|iK2#NZPEDGN3<cOc2KutN3|7%y8<e!=9;D)C-pf-_Atceaw1@zzKvlm1
z;JnNOl~`mdq#i{~#x90sDt(Edqay}OB=miud5B5hezNoG#iH|mkOjN?jbnv<p~%MQ
zF$STar(b75%zg;x9`6;_j4cfWs9RxK)m_W`e)2N}JlEfRjYTa!qA@1Ya%#mOr1LQe
zkU{$0+ookS9!oFhV!_7NMz38Gh?L`sCmws4x8Hmn?*|6Z<p<8ww2r;qeKuy({$pw-
z3d1gV=JTbf4ch{%OO$CjI6h{5Z3C+<g(@hmL0QeBp3`)vQoI_I3cN5H$D=XPCEGKj
zC@yYMUp+z{H`GVR*s<mO`EA~M^$lDX>4FFjDJP$!Z5_%7>Ul%EXr+i}OW6Yxb=xLC
zaHS0BKsl+<rDe1}W^!r5XlxkIDojx_o{UkpO8v_o9Sf?|8lZ?cFRTFX1Kv5hwxw$t
zoO5(-hjUJBiJF$K>2OUWf@huBySd=#+<z~U^vB{bCQtTEcS{EJeFZI1l!EGu;*jWi
znfim4O;(FYhEDXE!4oxE(?m&9gYPJc65l$Z0OfTjvz7=l(X;}DE)l#(l_bgaK=2Xc
z6;ma>Fu(Gbe~G{JxBe=|YTo?*OI*8tk7YX}Mnh>Tj4IhWQ*z<KZDyOYAQ&q?TqXw|
z2+E{vKpXKBi#An$b58>~=tzNc^8YlX+#?3o&!6I;DHtDjEN<M!Uf6^P#palLwn2;w
zc3*vy2cCRPk{%;MKSxJPCX<TU+L+negn6^%-n}DEZEsRlHc>ZKpCFk+Qz(aOYpSxu
zxqvpB_0t8j(=*VPu8Z8cwcy|R>%YR|AGpH$=7bm=R!fiD-5ZBI@yIskE{(Wy<uY`h
zm%hB<aK7NHH(utki%;>9=ReQh-gUls^Cpe;IO8x*W1~UoAU?FpU=Y)Dv?A)j-}t+K
z8>I@IYcawa!PpXQ47L<1Zr4bqSDBQ2sDRT2`^#<0{R2)<Tb_JoN(hecy(YA9*`Ex8
zxu(D_^;4RJfRku5!)8p6M&_WHR8=<G$~?%)YmZ8;Oq58El=ja@Wv)}A%MOTOzbW=~
zsny_5iE%lis2V+@ZkX&zT$@x5TSwP=T;r+hhPrKN>K0c!+NPzgTiUvzX&b3F)=3eu
zcDUBzeMj_?sQDOragUhJK?+kzdocwMUfq9r_C8oASMTXXU04xfVdc8UTp{qj53ggc
zyeC91`|^^4IhEFq+*aF*!Jv9HGUUKVtARSE=&R)!0gGwege-QHC}ciHT<nOrp5mEB
zW0clXk4X1jDOnetq^YR!n9*btRKV!S@!>JE^%>P@LQ#xpnnS8`#2erL77so8w1isG
z%L=Q6Dm|Ycv00pwairA`U+H1&YQ@pvA#K-k{^CWZ(;1#fp$sb(k#tR9cKHmm#~xyI
z>n2OpVb;b(vOP~?E#!VFMe0pW_t9GHctSTnqS?ELK6MslZQs+}-`hS-m3h+#lNqH@
zA;Sn5gqHV#U;F}J`|JM!|JC34U!vE?RE^?tS#tXaZ*b)U&+zC^e1v7Q#8|^@ZG(-C
z)9mlv#U`p>2tfj(2uhVGnjQ`A1z$4FHrcA=RsbnN>R~>o3QAVj7FxXYt*W#?<Rw55
zr_4x72v_pPm0r7^rZn{Tk=HCW_#o%xFI(31Ysyd{1?j!cHXZ$_$n2&QNK)RzS11H|
zEUffF<|4EddWK{*7h+=WO-_M(?gdX8F*!r%6Qv$`k0+Pa`*n;a6SOrHRxw(iP);WJ
z0OL`>R0Vo%EVS6BrkmHe<&t}cJ2dmTC{mk}%D~tJ%CRA~9+ou(g9@5LD>m0h(rX(6
zp$oL_64y9U>5all>0Bd9!OM;+SQu}jwPOF^AW@G?VTmb4Nc3@|BFd!oDJP5x6;)a>
zo=veSJqy7Rqe<oam@Ir;UxZVMaUrpsmcHs0!LCI7N_T!3>oUHw6lO-#)3`9X!*rmv
zud<8sTAS_6l7>+#ZiF6HC5?{|{J=~JN!TkTta;7W8NrWPyDJq7iRd6_V@%c`NEF|M
zN{7_Dm>nhhT3%47prorv89Env>V;>~Wr1HFaOd7#+RjsyMPj(n&)Jw_g%|?bN^4j(
zDrwppr8h8zWqGhq+qRfO3g$*>qK-WG%(Hyst6$-8f5B{XhKg|d^cj>j+_`y|2QObr
zGA*K`qET`Vo#W`>Ncu=8V@zpTEL+OTQz@8jPH|1m=5)f<ci-XkgWI%SM+^d%6h$c@
zg;Fe+bFzZ7w+dB`DVmzQZ{Fb2`yK)vxOnj*H{Q9)r3X)QbbL&smK0@4@PU!lboB!7
zy3|r&uy#xrmZ+o%pVx@cdYZOFZB3}Gp*x;KVJM0j$`n+ynS9m|iQZvCAm);Ej3_Vc
zFQO!?#k9nABS~C4k9UsXI#J9|aguL6UDMLF9i4A+-G6U0^T*@Rs;64%L@CueHyR{l
zDofKTwW1KOda8$1=A<Ikv$~WPqg_Sc3l~vnWpKe$PG;!Qn8o}KzH57@3>VV2sE~?a
zA5=<ZPa}rh#wa>0S01{|pZ?3g!jsQG&G*0a8t>gXqU&Z@bBVRhHMD{0+L*~yTK*4r
zJx9BDICuFhwldg4;e0@u2tF;!AuZNfIaMb`VV{<|ESh4p@aR+3*k}<vu&2k=({;wC
z<@#5@&EtRer|=y_rP;cCk@D`_9KHH7>Q8?Glt^<crD(n3y}P?yIDHCR8fK#rYZoVY
zAGx=)pzCVRoLQ$RN?hmYd_Zf(E8lsWN8k4Zw$#$M0JLpjzBuH?Z{6bT_Je%vGtbic
zK-W0RN;95~_{x`WGcG4Q`TQouxTLNdKJ}-brw%Qz{9et$@)m#a%J1{%zwl@I<g=fq
z4d39)cVDLoR+LU-hs6@3rlg=UYiQl!O3h#U5B@I2`Wj6mUZg4r5)cijB2t!;Nct{H
zf@-xq2HCh>DA_++W3~vGGV;uGr*R$p;Oa3wO;%@N5wS<g;!zpY&q6{v-5<T;KO}XN
z*lP=!)c>J+x@>xE-X&L1zAOYm$mF3!QS=fR1TPg2*LAoM<X`7;oyRwhwh_tpwrS+6
zcDSV^n3>YjHVtjtNX4*qLd9*pjKM_N9D32hU^$VK&XkD8ynClYQT8Ivy`E7eYL@q6
zbK(Q{`{)z*p01mcn}jT()X)o>F7gkhGpjE~FN~2?V?#&<GnFa1lGNrtyyOrg_eG_E
z$mM=iRtgIF?X%Ko9Eww<z%L95Y>25JI-QPAC_%P5QUr9NXuP8+CR}>_3F*(#uzU9&
zN6Q1&W>ca68k1h%QE0=vZ@tFmnKOd2#*k19&HnBVn_H(=P_6-u${<8cmH55Aeb%O9
zo_zWl$!AxPJ}L9%>j_aCu6*(*`R?EQdp!E|1rZ;LvSz7V#E{7}`1q!GRS}}K*zuI+
zV265dm+`4HspmPa6mN#0vqx?iPFe?*ddAWiR)N`tEl!<3%^6crM2}rJEDMdjc!>{u
z;S0={Ev@UQ6sQ8OJo+%-{`wu7W=T{L-#KP$GlCPt_bd{YskT`WT4$SwRhTKC8cC=v
z1=HejD|Q)qJjvLVYt@@aC8b_r&3ZyeY?)*!GbDV5LAsQF<jZAFdyNmWFQ=MVrU7Kj
zoEXwRo&MMh+GHW8biZ?<x$gt-f0rQQG%3`Hc{nhK@<7Y;M5(OIE!RyFhJ5kl``!Z;
z)faT`_c57G(N>|YW-=KwosOkHvu$aQYU-nX4)49o@%}z--BM~;+Zbb=2q)RXQ7A=G
z1a!0*<tc4L@TCYF`G(juxYkj3Ep6>+7A;-l>6{{T4!o0s7LI^HjVeLEY)Kc7@F<)@
z&~2mAK+iq$dRXtWBC~Ut!ZMvsh^`al(z$-!S6S9Rs-9gEQfq?TvqV!heQAPB?2+4(
z?a>C2xa1lmQ#B>cBCnbhnbTu)pL=hte1aAc#a6}xRxDpavc8E4`3&hx3a80RCfPg*
zWjPM#(E}yvUp;)5%vfT@mM$;pF(CB%G|yyac%m1(z_wZPGr#)hh(wNeZ!@3QwBCpq
zRied<^0^cVWi1G(lq!_Ph{b#lrNnQv>paH?2W+3-B&SEw1s;9$Q5vs!``R_0-gp{-
z>14!gZOX0Nw?z=Izm~GrpcUikgu<2_9UgITaDa*dU6#1UF_})8PRHEZxyzO3o}_IZ
zi{lO#4XRX3MkA_eMbwd|Zg4IVyi4tL0kgG1Ja*iD>pGX8eh42LE?z#vcfau+9)9#9
z$MYr8Ii};86h@|+x><_OjQ50SnQctiIo?AVMN!$*ex+!R7nG`GRB2*Wu|D2lJTn*r
zl~qhOW)fosp{xhziC)B*V(>&SeY7zKbPz2TqcvqL@Rf+NdG9El$9E3z9KH*<PHf(~
zuBBTx_kE4|Q6E2+32OFr8-hVCR4X-XPSHrh8ZqMM)g!4xzn<2S7<8{h*ej3bDtZR3
zQsq7Pj%u{V)L!K1;1;o~3DJQLD5r=zFE~2&HfU5dSQW80@P%LcX@2IH{{$Bvy@128
zzuQs7ZMFggSL2nZ7%3*>5!3aG$;?s~LjMdtvU6{rjm<TtYh$cUd%F@rlo%qRb*Lh#
z6{c&X3!N15vY<=e2x6~7s&qt`nvDl85N^E5doRAm(|_S-@DAD-c<6)Aar|3XS?=sW
z+oC38;o>V8SDG8O=B+nx@!&&e8BGd^9%bRo*)dT~SS(tuy?cjIVc6VQXS6xtPyJ7R
zg=??hq%$ptJBJ({&gmM>`fQ7}%~O2jlaEo(EX$oE#*>n(Z|`z=;F*mT4?lGpZA1iW
zT8w!5`HRH(agO&E-2Cpp<$K)b%J;v<=RfgDzISVfUnwr|#cN+@x0n-+Q09sNrL~Nv
z5xcd;pZ}|Woypo57r^QYZvxRs|Cd#f!WxQEfpZ<ih_N<n^UHrm#$wBuySpu;HBDz)
zo_T(Yy~AVf9@<{rHzq3(q3W50gr@tFdu0+0PegenDGI%Jt{~VEQ6-K!=Fi`g!GKIT
z2@;%o>0x$0&~=Wc&b<;|dL`<HWxb?ch?=NtI@-qJnog?j&dFjQBB2v>#V5LMOjX-l
zxfKM)OG4z6qIS%vUn0);LMbZI?6TlQ%13+DC`n`FfFd<xDyv2E-x%)q(X%sHp@Bi4
z==BAMT-%Q;0o9-;lX<bjXHN+eAzNDwWX4rAG=v-&#XiZ%FP{qI1CEr?hnRR{^7qj9
z1ZD(Qx=ck>E+*y17y~#(OhPKoCxN5XhyT#C&l0sFxRz_zud+E?M<wFBEmF^La3UV&
z14ldeuw|9jTdCeR%f*0d4$)hG@8yySdV9EYkG64aY^)CgCH-d|kQ$I-aPEoAj5kK?
z-`L^xFaIG=|J)anh|y4Fh&d<@!`k2XYLx|MvPOG+gemqYH%|>?AX8>DydrGgp86j3
z%6-)l6+9}om_oC?KEWQ<lq%49%e~O?%%?w0({&i5vEE7%$F_`S73-VZ9PRHCeZZmF
z*xaV89X>{E4ssLfxtd%-R@6-%%P|)kSV4GJ1j|wZC?oQ$Ac^V4PS6$eo<Yp|2eK{+
z@)HvMZ&0!xY=ruqIE~qiO34~Apyu=<XMHEKO6%Z|wG2h990Md(ewEc-GZNPCarrfK
zW;)n;48^;Z*X!9O`8hqWbjbMc^I=EQyIfS3@%lQm?G0QA?7w%5J6GSpAMMgC7ZkQ5
z8qaJ}LU)8x0T&#8VTo4apDrff)OpSEAsik$8t26d&%4Cn0y<6C1RRoVN<tJNd9G`#
zlILk=ehDI%yVO<B>rMXiE7vTqnWZfll_j14WkHMukoupK#gz}C7yF7@5kj_qNr+?!
z{dJc)JFFYBriFe61fSKS2edpF9(p^Y5b;TUSgF<at(Brrl!U{D<nS8u2#K=BM%nX(
zI!ix4`njU>I2~F<GTUdcaLUTsGWJ71ZifO%&)!JGu5CPp0foV9&&5ZcK*z}SYwzHe
z9oou00%-%&={=->DH|5bx?u{9>pHymVwIx}`}_NBZ*K`ukfz<*+Kjce3E%w2AM)Ju
z&q^d+M7GbK;>OMQaL%Jji^ZgSlTdFftp=Or7PE~Rb<@z+OO}f{d&dVn@W6wFQN`2G
zJu5xfg=MW0W?C+4;f1~JeIWWMOzz-`R^yB%?A>O0utzZ+F`icVP{?!YCClRjF()%n
z8i^ZRC(8EDd(1>p7)=|%Ruv$i!+dX_LTR+E7_U_ry+K!dtd;hz+35|u3w?|eVnlmU
z3Kph`tkod|qI2Tq+c|tE$f@%ZKe?{M8-sTiZ^2nd89ZI6aMk$!FET&k<Hs^V&1j0&
zijWZ0Y-%isi%C^nmuj%8FR_X0Tz;%ri^xmLYT5Teq+UB|$1`Ya=(+{gj3}yg);BJ4
zcyI&XEwO;ZU}6-7bRP-E2wf$%eCnrP;BWqie~b0)4e7sA3hNaYE-JVnXVNyXw?Aj^
za0x`PfuI9kE2>KRCbmc093Re6Ix=0KqO{OPtkDFQ0*KDzk)B$U5MX7J0%H=B=&2^D
z|4^eq2cG}<hk5tEd<(TR$1P=22jB772Oj5le%*0t5pV~GjAoOR2nqe=%+`c=u3PTj
zI^yib4XUwtr1}(1PsRn0JaUQ7!QtVYo!fWN%CUWE%5-`f`}jk6qIhHlvA=ld<~?FW
z{I{??_|R$g4{9c}v3O)<Yo26X^YrtlIQZp{@>_S`<HpTbc=6V&JoDZ~e)<<b&%gZp
z|Cpb@@?pMp@Fw41zDJ=AHbPMuE<X4mpZg1cnMLPW-a2Ia)Fw)6ifV*zyaY{ADCD*(
zY3oM%SPP*At3=^cTIh8yz;QET?_S60O9ip)_=%4`#20`68g*@Yzp%cTo0F4FTUw#N
z2_mjRrOIs(U%002XuZRQ2Issa_nk|<CxOlfx@Ak#G}Nu5oj0;LwjEs~$Y$rnXgqo^
zeTuD538&aD1TV;E42i~@>3-g)O0UPeM9&MU@=OjYhd@FFoxo6?L=Y58TS4uNptzBU
zmDwoj$l2pgN%cGQB25`dOO^C2Jn`S5jsE|BWWkEuSCz^=Lm;0^<=J17l<XyxH1}Ke
z3GQkjl&jC-hXSgVU^phkYw$%)DwZkn>U)aGwBMnRDJGhlutSD^*pSXUCMs%9P$ZyH
z2+K;LIvH=#wydAIfFg3Rw@0Wwagq>hqbY2e5)Ar^aa%8mQDMunr`Wl+qib7=a+I#_
zAoL@7lDvr4u&fWb_`pR;fMOVur;I$rK4L}1NOO3|=BQ$<JvV;izoF_{9{$45KvA5a
zqh{-(bQ6Z^BMMuMaLtnD=m0w!p+{4C+G+oOBxQF+Z?a7GDHQe3s3f`n>VN)E(L0AI
zThNpRN3G*{Qu5FKtN)hwee81-V~uvOm^V~~Q1#DUxWL`_ZW4W<s!GO_HFgi~VA44b
z)c;KP832rQc0Fy;WTZF;x`L;!C{ky1UGAm#shE+FqHOUIl4oel=zMRXM7lougqmr7
z%gp468Sc*ow;l=`#{D3ALqG0&hC3-~O^L@U;B_Kjj3oN~Ny^CJZkZG1l_a)*kpVsJ
z)r|WY{X>2I_xYGkCm5r!mEp{#Gi+>cVZ6tm*=GN$qv>|BLDp@Y<6x)8Bm*d0MhYvQ
zz_FGBop+I@aV(D`^SQ$ZMOjp$3LPU|kb<VNR+tkQbnJS>SjUJl1r!G9UXg@OJXKRM
zAoiwwWG~&2)|-%u<)ZGYh}K|2#D)SN#d=D6g$Y*LMPfjEpG2wrfF$NSmO{5)6f>jW
zX`Ezal*}4YSx8;26iP^fZMp3UVDPQZ<0CU<a-V4ovi_=gvS1S;Cs+vsRd^65$`!<Z
zcp*%futL3;5=$z+DJdL=ydDZCqeqQH7nr+%v5H4O^dzPn(;nYrXXg+fq}?meCzTiq
zz0G3kmG!A$sg0Eaf%6z$P@01I{DA$v1J+NiqbQ{OP$-^!;#uB)?W(ZGyzFU@J^V0V
z`_k8`8=neQTH?%9)a<pD6_lpLRu*OxV%Wk5hYvyQg|xzTZ6-P5lNDi%(!PWT6BXWt
zexLBZrP|(RxwFsljazJ8d4OW1IDclFmtT4rtEAArEDps=s;Ov0z;}Vzc35S(ee*80
zk5r|V{YV?|4f{L$U?VXU%n$dOow~?$yhhhWCR-cC(BRW`_TGs#oA<phcnE<iwMzu=
zMdT?ep#-~*&<RTF+Kz77&^0Y>C!$JS=P};lvi8RxqvOXiK~+jBePW)RX^|=6OqK2s
zz^IbgFGX2)ekF8V5ugzzxby}wH-^U^MX|*g-+8=VfHf#RW43;lgZ;M&O@{`N09Trh
zT(RqT;loezcmLzRgDFam4r|&7N?VN5nB-rH0y|Qixv<9B3+vQN&*5QBUAK5Ioc76d
zgt3;*?M)624l#>{$#jY`PQt)KdcM4i;JkQnTJb*>HQ(g9>Q(PMrp>CIPUOs!XLzgg
zRCPzUdx$>0Mb|pEFK%+=6gDUhUVoGE0~dx6voOpiiU%(ke7j_Ev_PAwSoew9Xj-p5
z+C(<D$82qmXuZdELND@BlI+n~rQTLJ6F^zj2SVP1DmAu5pkoGFw@!4G4}a`IcCLS#
z{eL*;u)EDmZ@t7X{`_Nn@~{31U;mAN%ZKbksQr>RZ{46;8}s>}|1+F<;t}-vI_pt$
zba#)%-h$bt#S|7(lsMO+bikAvV=c}_tSK;sk)Ab|1R9cf3pNs>W@o>koM_fhX^IWa
zGf$u4+uz&Eb?AQ6%X00#8*jF6i9+4gsmKrmqHO8{2M0$S>>hHwcg#C)zAGrEOUR`c
z<-WWLMK5&AoLEW~SWvIL5eJ{tF@x)o(Ci`WedyJFqtYlFh@H@GwMl|pMk?6B(f8$w
z<b{Zyn6&Ybiz1{s8x+w<UvBgR)mac9Nm&PeCWG>F3PNI%&X0ct>w1sArGy}+h$)}@
zP~rFK99N&0?=kd=Cm?oLIapo^<yRt^)&8fPjAkS?MxWGr2hk)&q6jP9BL(>k1S!m~
z#|l+7G01uvwS2FTd#ZvsCr3O*Q2;QV&IkdTuI&>EYm}&8s}uxUVH`N$VpSnyKq)%c
zNgrx8N{DZI&-8yEBDS=A|6AXIwqyI$wj_e-y5!!)Rr?+l!8aY>{jJ}lJYGN%n2jpl
z_-FruxBm5S^W?Am0vBKSICeJ0r+cA74t55iyci=`OEq1iIoM};u*bA4iMcNwxgNRy
zS7j<`^y$1jVNtX5&Q-2``#Ze#hu@_*su`6PE(Q*pC8t07e*Vng_;>lW|Mze3^)G#e
z&;E(e(s)nkJoS9RbbZR^_J-^^K2nay6lKBu_z3S?j4jBO0O{Gw#Z^fdBrwltlO$X!
zkD(MqYa{a~kF{Lh=xK@MIW7e!dJEXt_iTpL|11Et_-p5Z*yqCJIh=|otRxzl5gcW8
z=*5`wcpQdjD%QmTyv#D=JtCS1QlHtT-wYx%1Dzz#!O-9Hd@XuJeE?9?sQlpq)BP`k
zJ^lWtgd!RoYnvO4Mib)NjLDe|*3PXl-)YfCQACT64MOAhXch6TLeYYTrjFR6B;@{c
zW3Z(rb^&cb8(B+~336|vMtcR(NP#}almsd;8k<ls^0o1S93_}cPt{_Hrj7LFIWLKw
zkAsrA2xb|Kt*{=P0bP1Ihu~!M2qM6j?@Np;Hdrg@w~hnldv&cG)TlFNnnqpBEP>dM
zb5?t$`?Y0AfMhKSJ+mYWghidle<b=a6mC)hH?PGpiV)L4;~NTwxxFBgt!45&mI<P=
zy;GitY5v-nP|(8ChQLq%%C7>E+c)2)YdpDf8-nz}X8SyCw8Vld#TD7Jr7fuQ4s8o`
zVUnnxr=2etuWjL@*yKI?%+vh(E8nGSg%KB3<mtzs;*0<4OU&oTXgx+3ppu4$%4+^v
z=5CBigvqqD$=Og4W1wvs=JPoXu(@He+5JJKJvIbPvh#B3UivPL7wa)q$#QSb<a|vz
zU1PMq&THTPK36WD#zaM|TYT`e+Tueb8rhSSiInT-xOMA$oPFp)7Dq=g?`Y@89Pb@d
zj0-yNQQA;!uJPbA7io`<F=k90Z=(DWUAsgyYcxen)<HgsO^;8gy){G%`TQXe84+Wo
z^a)YzI-(cB$F`|ymo4?OrkOAA`x^73K7MQy)V%r0{*ywo6-hyzXa|FKT&7H{Y<yv;
z{i-a>Pwc5>D3t;=C`V(gDe3A3sDPPxOf_LTyTnm*4UNJoPZv{C7!^g;@bCYJ{~pkB
z?dma}O{z&jQAWt_0Wk_CA*P-h6&X)8li7@}2^<}Z&`eziMw38QjX1q^nw`TvY|)7X
zIYFo$g^_B0=Lnq>B-IwO7;6zn(i)V9uIr>)RTiRrsWp=`>!@lS%aU8)ev^lO{^N9=
z!ETM<)M=FQ+<EEyT>R|EQ&1$ELs1zvwlqyUqMo<Z%Z8a9rG#UsTIZ^sPeDXsEl!Jo
zfL02Xr0hj?G*N6+QcgBdRaRmWI|NSIHV0rdDf!gTzmL0D@9@s=eu>)$2i$%672f|#
zKaV<hk?;Kd{|}FiH+l3EpXc<$m#8*o_-TO;0c$Hxoj%LmYgZ}Dj&iE8BZXTeFS){?
zw8FWLsvKj>3QQy#q#wg)^-MuNT~OS*9jK<7YOHzSq0_wk?jiT~JlU%)W{Kszsj2KO
znkYeOI}QX~=uj@=yk}l7Su7o8Ip+9aN#_DO8tMJZn{&!aloy+YK731vmCOT8P(A@^
zDe_O2H!*Dx(&H(8)yilTmY`fpYPIwVNh2k}<^VMGo`)g?q|^9jDEg?_D?(}&`f-95
zYP{(^-DFc0rmRr;wU&ffr3PMO?|<;P8p!7$CMSymz9rgcl+Uz^1ZDPok154GM$<K2
zL2!CRb|rBP8L8`S7>0Z{s~yq@F*y!T!4#{itV3dV5;Pj37L}67j5L6(5za|JC5bj#
z6eXrG)XO<aNfAbwwEd_++e-4Rjp=PX^8buA%%-y*X;8zTP+DO$y!hom;NFe*2+^Sn
z+pq1t#YCSesQz;(tMOi;M<sP^F`c7|8hzw>`9J%0zW?|CIn}8d4}R*SY(4Wl)6=Ig
zlM&dY7TfzE20`M>isjJ(<z$MTZOWV`phEyBl=av&G>ZjCcW!a#t#`Qf$`9DRcAL<&
zj1{aC2!*9_fv`5?C;z8^l~eD3lAtPn;m`gOfA9bJ_xQjIA3~SXyR<lNsK%C}s3^*k
z#eB(ZV-0OJySHwEH_{iHDU#{a%cU6ZZOqq7(a-iFa2_;(d-1OXN9Hw@071z!wHeU+
zyq3sUMs`9HehYGLaTrs5a!4e5sml9g`<9q!k68vaq~tZBP%9QPF|CdL9MLf$LP03i
znMKjJ2Bh^ekMBIt(%4B;^}l-^iK)+Yn6pEzdL`Sq&&TG*7TQL-u9NgHdIAAk6^v&y
z%IO&8G(I?*`Uvd+LoY-Wd`1ct!aPk0R|M}h-baFwu}x|}%B>(6gHC8CIxzt-Xf1_v
zliEu(I*DYd-gZfJJ!0=K>!MFZNhzW!l@l#lK9rF{ys-x3Eyj4P(fCdt5R<xcPVjQs
zp;y}P*Wr_3gwN|9Lm@-0-1~gjhS(zx31m(fSI|$$);A#)0|e*9XWDTbD4s|!>gxKO
zZH%&yFIf~Sia&8AGZBXK8GznFbS%$<EC?mY18D1c;<=9yec<-Z_Xtt<`>akZtKNUS
z=K{)ko0UG5sH9g|jZ&*nkeJJP&$3=n6a`Uv9=!Yj%eLd_=!mtA8OmrLe()UFf~IL1
zjdapt8Dl9hR`>HpX9{%`rb}=R7Xn8I`yA}<GFw~c%$c)PRe>=iz{%rM8<HpDvKB+s
zv_T5zb$3XyQLuaK9%rAr%wloKc)jHI)!SUYbO!Al+G|wTVMk-E(I}&Eb%QNo>-;u#
z{QwVKIEOuRk)R4DuH!?OE&|~4`1Kv_?K|k%n5I5tG`fV=+ZfvtV*|0n`<f7>m$a)>
zyVLTBqrH8$AAC3!<`h}tFQfufOu|V;+flV6x`|`liapIAx8uh&K}~^t@>3M=P2KN{
z5*#K3^oJ-?342RPsYa)TAoj1D>Od!UvPe0aVyX$w@6s&ilu8pPn$dVnSUbz{{<Rcv
zMcE8g;LrT(pXAbmXSjLu7JJ7RSf7l918R^(Hl!pk7c1%6TR@;3DYmN#T|H*8aQG(R
zbc?M@wzoIgxwp&dtu=H}^u37MC=3RCz^5eCmKGIF-#4nQp-|9uE#5hFAyv#`T2h=k
zi>a@2^_$=2kze=(R#}2lTzu*w;?=h}xOyGmwCG7iuHvf@i8f+Ul)=)}EoK4J*?0(8
za-h+#=0gWi=cQ^^6){nh6O^VSCL~H?ls-@uH6aMCE~}u1p1Ky<*q-v4Km8GQuHRzs
z)@ywKYp-zSXMdVUf8s1n{AFJL-FKKieg!p(1l2LpCRKVprZQ|@ILoa!uJgd74^SG5
z*Iw*5ib6IiAF!oGmqv6#e84B$3qf@gB~dH9*W9|{xcul0Z9ATN;w*pgjhiCyVU<{V
zXoFFv_tsSzsmzJDO1`!cf|KjzoZ$M!xS*>+xdsr*tcXO!6o5yfS?M*53gy$HZ;T*H
zLEhL9iNO$kQtS1SAek5i8CyXVV=Q|5W2Qj$@Iy|dQh+M)z)FQwr+zst0%y9XTjkog
z63VCt9~~~4c6%>H7w1KJG6X07Y(AV2^1NS1$bo!ctse?mqJx&TBN+nVkZom><&2Tf
zl~EeKLg5~wk)c;AC!5*-QThj==YPaFP$e@%wNE@4`s{l}N=|t7r5v`Ohl;HFn$QQ8
zXqz!eKYd{ndM|#0XV0DIy?5VEtb#)P??MDGUT{WfOj!zTID$ur1(zPUj4i9d1%%vp
zECJnHum6CfojXj&C5uHvyO{T?o!Rd-Ctri|W<XVz=YHl>eEa|MB3qS(;4l_kj1)Se
zbRgc{<=SujH{AHr@1wWYQ5$P)o<Gm@(m9IR6kALfuWwLI$Bd_AiWqqF%U|Z~<BwBT
zYwW%E9tZDUWq$V_$9sD;2S@m2i*7wdq5y8iC;_N~MW<l8$uqzFOFZzYk3d!8(fH6Y
znpQk`<uShUZ@$RKe&$nz(9z77xQz+bXiPPlu(Q0)>2qfZ&hy60FHx2vAg98RH04~W
zFVg0iJr!}X%INUW+;f}7U527j_C_8OTqS*Anw4>x5=@U2Eg<PPtn{j;^<db^I1zy7
zlNpkReBx;?WEyWOWW@~mCx%pCo5(8rdEWG>?Fw47iW=q5dg8*#yo`BY>OH2_N`jLS
z`&BIUejh%$0suxO%OXu-G*)+*C{`{aHmV28AQ2Nmd4-1*`>2%AMSS$Q;ORV=ps*N}
zPkq1|bksnB(k`)2gd(hsPHU7#nH5G$Vi2gbS4Lw0_ueOVMb$Zbh6+75-LoVHn0?Yw
zhKM01lNgWc*QF3b&%Q{DaVnxErcYLfP76dX^rJ{F3iK?GY+0mJdv;DJ6MoTmS>z%}
z?k`kvSo=w6?l6Bs4`%emn^btzTI~N+VjYnZwU83{!Ek3_G4utV7+2<p8VWf?BAO5X
z)F;rUVsWs~a@olm8pYo<KR*L9qFJQ-c8{>a*3wGRb=@EyWi?t$Pw;YaM7h3205&$(
z(6-{8ci!Q-4?GVM%5lNw=7hy!!E|g<F>>SslQ5<l7X&Ra4QU={4Fn$q1ud&F4?poF
zqf*-NLI|Sb--XoM8&F-KUDRnEknyM^QgCq!Z42()eVg5PZ*l5@OPCmW;PjN@?hfa-
zH@Nu3;}p{wS6_RDMO6@#rdxKH(qK!)-Mc%u&T*xv(BrYR9BBg>lq%86v3KoNHqVTx
z4?5oY>L2jbPkxqQ*OT!=3DIKJh@cuw*`liwoWk9$IXZkB^hK~0MYLq0N8^KZZW~1~
zMgXO<M0XuU7%`f5e*|mHkM{VnOi*=Ngks3kJ^w%E_S7Ni%V=w+4zB)=AvvXg4Tg=A
zy!dI#$}-zL!`|^Ok${^Y61+xD6r<UUda;RX4k@De@CDcLxu5(Lp*`T%9ZOlwh|!5$
zdrU}MN=|#(bTaB8XCvPSijijBmej4scM)v@Mq5swI>XN0Tbwz28f%SI_*0N+wZV5j
znJj}Xv-c*D{J;uTN%ht_utLY#c<e!V{T4gdZ=k%w7>)Ce_kHsHy!pn<6o;B_?|{+t
zEV=PseCriuG(-d51Qzp_!dj|Hk*b?2(GLgNckjJ0Bts;)z|u=4c~t3sF^!Vz%f?pZ
z<RJ9dDxi8p?@al5=*ew9{i`46OaIv+JNwso>vw;bE5Gy$JpGZ2w4LGmZyj-A`xKL+
z!xkc!uhOnHULR3T*4Vqd$L57iibCU~*!#pFJ+ihaFjf-OT-1ocN$~+vRX7rqX0b5L
z4;t1_8#d3)SX<K^AE)Q!(U&QDE9m|(u9M<z3bHh^_?sePOO3WAZR_wpFdA7=!At$3
zK7^#yE7jYWl8N++_~3`6C&4hQn=O72qDY)jQr43Yoxw&TCut!@5fkY9GbGXS-s8Ra
zfl04)!23w&I_ZaXp3nlF$Gg<CLrYMCG<h#3@4dK9-pW->$lrZF;$U7dOibM;k0%2E
z1WQOi=t^*&#zIagy%x4aj|eKE3P&&FIA;ISOd%fPBL;|-gyODxpALaQ-(HVO2ITZ>
z+^R@guI8zjS#Uzziz&H|UQwi^GJm@5?Jcg~c$>Oz7>%r;Ai?3ghPJ7(ra*^Il)*!!
zm{weR;1Q~|^+9YT6<cEB)g2w~@!nf+Qd*%+H3yEjUw@Sc9(ouvs(OT)l~rDzFAC)k
zeBe30a%z*K#WB-lsbfsU_&`veVr(##rfwX?!Gbt<+<fy6{%FCnSu!0LXr*XSVmlBF
z%SFqp`d=}wN{X@+rB$scOk`>_!HB?xPYA}MhEPs8|AFUt?$e)U^U*8BLW?S9J|AWA
zuH*5?9_H8n;ji(L&wm`>b)w?fN}^(oMwgb2txe`fhg^O6<)Ifhf9^PJPbc~#GmM&A
zMhGj&P7H$&^vdhRRG8=$c2z{HypKrFcyE1_1!2f+qd*qU32BhV)qB*R)-WGVRIoCm
zAQLnD-okz}UOC^4pbPrb_W*~!gw(eo$a{q}T~#lZrLrOq!)M66_f<N6e;&Y))~<+@
z-N&Qryd;%AU~PdmnqVSzyTH+*ts=B?t*o&)<rBLmA=eJj5gn4^a*VhLor?q)i5|RD
zcta}et}NAvzD4i)ni3dH5}P!jwJ;2V%GWI2I;$ziBSzH-l)^br3=5i8#!HWUtq2K)
zJXVzGhPWuq9_OXq$R~TBm{g==Lg#bQAgDaDLXYGQ7~u*dB*-{8L79zF1SK^+gdWo!
zjw_QQ^wwrECBsAUB_?&@kWtp`@1EB7zMz?s>7f{Zg0Rw0)qV-i|6B1L?&<D<WYY(&
zZ}{9ViF)s~t3RM~X`DJQLRp1DMR5}_DP|>6lk+Arw4XrR2wE3{Pfh~T`sJMTiJ~-<
zNyX&{FLLARO+NgQkFac)+`DrZ)z<9YzQ@Me1Qo=`+&hnRBdSq_)`6%Mg*LfPN(>%r
z4Mkz7=W`BsJB|;Iq&=(96lJ3QC)bAhctNvRqLrnZtuviO=EujhJ~G<eqOm38jTxsd
zoMk+&c=DP5Id(Me5$@{LHXDs&uX_WHrfEAWZ}4cgXWP8=jhDFf<_$JZt#kJLHXCQQ
zDc06dWkowb;`sHqC=YA=QBCj*-u|60@$k=mo|uwv2|Wy&O4P4ajb@3Chiq4dwat>q
zI-&Cf?}#oCw1$whI6{Zg7HvI7iNIMf_kWG~Q6E2+32Ht>pkhErS9fmJPe$>o8>-K;
zFrjj#Ne|SBtP<A;fGdXKN^$<eW$s>m4P&KI+Rb<Hp`onSSeu<<r=8P9ht-O6XV-Z2
z$p^T5`+%q(l<JuZXzV~wj0r!=YON~wn+*xK906q&l@<?9Ttt--WfkjNXE;1KU}I}D
zM;yJ_j<yElMCRQqjV=qsTs`(w&O#M*Z9{Y(6EtU@y1?Dn6mD;hpD&@3fc4VzkMKR`
zIjs~&Z(L)1`8<$_B>A!%=@BhLYV1F*(S@NXr9#^WRJmdurN5;Y9PyE6*`ZOCmBl1w
z&)6ezQX!2=kvODeX;oYZ70_1mfsZ}P%~w9m@gE-Wjeq?u9{Ivgp{t6=UO2}+zr?qm
zYHdU`T|zvQf?W)pzP!zw-+r0dY>XaTX?-XP>B%HwssdwlLZ756s-66;SL;&-Odxp0
zy*o8)r`Cz4=JL52uO1x}l}(h{bnnTORi9#bp<c0~FioOMm7{{zc~s#jF^ooXt-Cfb
zT{jeyQb^xn@R;lSU=q~sW9XaxS;;*?625FW9bDJ<xCS3_os%!u;#x=OVv<aEIPd6O
zi*t_Hc`=fA^!=<sIq9Pef!L+=JOFrnOucV0;6tMD#wh(iD)-1HGxy}_E22ahGQ7V>
z??pOXmcdt!q2Mw+VCZ8~S)?Yc%#SogefoZ|eaQ-^3hxK1@DSLq2G#joVju9UOc_~O
zE&70}PaNpI_4;%(4(m&ZQrN@GcvdJpj<HRfagaXKayn*XYm26C7>}kDg{5_!s3+EQ
z#*--;EA}$B;L*pPK~)t4yKGQj*D>N;%UiF!NNGcl=HeXn&JMe`?{N0P2YQ4pQ#vI%
z8VGF4XwLoQ$NBu9`bD}$!|{!G`OW{sf5L?;58;DDg(Yr2m%J@nn*Dh~G6L0Xf=Sn@
zG&WgSfG#ZPNNE(tM6z~;CaqUdQEP*rZu7`bewI_uKEwF*RzjS?Yb7=xT`LTesPLi1
zxyah)m>4>``BGXj+K%YDlmt39POmW;&-n6JeoK5UGcul$td;)VRdh0t#jZksuQNqA
zL_uJCZ_&6y-&M&6m!Y>bMg=;g_sfay$zrZ7enSGX8d|N8WEisQA&1V%Ko%k+h8`!1
zk$#NiakH9+^e>qopMF1hNXF!T6MIXpOa<<FpZRsdP^|9HTc!OrBU|ag`UDhwpVk=H
z4dQsI$Qy$cZrj?UqDAY7incH8Ds80?(gjou7!&D&^av`HSi?o|LE(bX(|wRV---5i
zubivUvJb_C$QmO7xHcG5BzBALV<D|gkAN11r5u$QYw4V$={l6_P)gp{`K*H=dr8}N
z)X9o$xu~fZHBDR7b!}3{1(aA~1+uVS=;x&JA?@?3_i)aIjkLtdUYCl^z5Db^#Uu1I
z_LyR=WV<ws+#YpRCur?iG%xf$r+L2QqHgvX&x{XwnmRG&`T|Q|q*-AS(0fSq=bOj4
zS8(RaIZi+D2)=E&clWjw#v{_dpVabo?g>vEX|45;pk^c+Fld^^ve&2(KjMfA4iz*_
z(^6I?Mr$5>>=8QGaQ&Tkh)qi*@VEZGzsBa)jHdP6y7?Z5`@3|FXEK{HTbnYTjxj}n
zH#)^2vc}Hm$K1Q~9*4UNv?-WON=DTP)hR}u#@YfCHS6Ow&TVfqole+3yUE7(I!#^E
z)g3$U-QnPHpZ9<I{cN1xMrpWv^Ih(|dyD1nA<NG5=!f2qrQm_*-_O;n@1nG(YdU;e
zqKm-h<~qxH!%nly?yWs;zHy!P%{4a9Z*uX8^Q@n}!nz%CxOWHU3ySD*%Qrdvx@O)k
zQPYy~xr<CsKZGd?R5X-@r9IlG{{Gvv`!`T8EMR<5ijy%Ae1mt5SUUwpbOzleD4&SY
z8lChW{x}^!mI<oP5krWHK1ebG?{ST^5Wop?jxvd&l#@=4N(*mKwe49)st<^A)B3sd
z5AdQ98x>)6db;Hy(YI9N4JMNfj_12rtN7^0Kg`-x@!oBZH4}6Z&?cg_M_YqdQ9Lg7
zN*J6MTeb3IUeC1j-W65ECEE~FXsXK6HYIJ_Qk12L-{k9KgfgpI@QoAozbZvL38}M2
zqso$|X$fey9z4x8ZLkXVUV4qS4?l^qhH9(Aub-w^9&qP-Z*cCjA5K<2Dbbf3g0YIS
z(%?&$b;IJgW^HpKimmB$Y9+?u(WgF+e4o-iTrL|zjFgqd8q-@OWF}9aj`$ViIaMz6
zwJatDpZ{|&@L#|4F7@vB`Sw5fb)Ndmf05bdj1PYB0lxW_YbY*a3vjU`C=VEnvJ?};
zWV+3r>${wN_?!qrC@o64C~TqS^JIFOQD7ooIZi}1iKbL{1x+0($MC>IXL<9T1G-@P
zLdf9H7KUO)O44LJlZpzm5!%91j0()M!D>x8Dd=3FYa0#^7i^xIP)#ZcSQ9PTdyn&y
zm^$ZZyOyqTbZsl+sS%@S-v&a{N=45HylZheNo`$1M+3oig0%Q7z%*FQL?C*XHgBCG
z)zDKk^L-SRPu~|8Vh96WKZ;+Y3R2DYDyAM=GW?cWc2zcuy{|_I{lx35;puIMp3NLw
z<j1NUq~u^YoGhMY4?k9t$RJFRYzGqaSnfwrR+w4+UA7{~zsNkdl>}fVWe7R&&z1XB
zh!Me%*v~D|my>|V`$%Y#!Y2xq1e&%tw^=S1Y;J5}O+nj4TwUYq8Uy$?FxuMU$>%<V
zs!CjN8KX+mB_&NE@aD@e;+6}7_t?>h<0BDSt41Sky#5-eE?!8s1rZ&S^#d`xW1%9r
zuEkUqr7ddRFfFgJeCRSi^}qgK5bcN<JniC`c4wExy+iI^y~^VHRpvKt<Cn)GE}|7(
z>xeNjUo7Ydj5pSavk3u(4?;(@lM!o|&a?gK!#w`j<E&r42xUn~h`00y`y@FjU&bih
zvZ3)Ei+M}kcGT?>*LAebQra*=6lLvp!NZSSp{^JF!N2&IC?8WV*B}(7vRW-6F^N)t
zctV>(912bYZG51Q=k+sF7_}1dqdx!2$z9Yvq7-8CO3va*Y4KHIb$1;|i-S#)?|;se
z?idl&tS?%n#5sAd=5-*?zCHm~*&kTc<@;XNIEX<-Ai8vYve->Xsa;sbcEf-MlLT`T
z^(q-QtrYDBajyHm#(0Od$!{cCpBSAIW0%WMa)q@~ePg=|0p&ajXqtd2VnRR*VV^ib
zC|eK83DZVI_7o0@m5efp>Z-)kTuZS+np`yoJq(H!eE_Y5!KSlFlC@YZdy>|GR&>sB
zbUep5q9otc4fS$K-7IPAM(j7dBvsjBsP9)+sn=779FFzCL5Qj^7Ugn<Ub!(*k=Ru9
zd(BDoa;Q!S3q=q2I?s)ey+kv$J?oW(#3+ai;$uBG<wWr&w_)@@pQx;?FbuMeKo+y3
zcTnpWYafN#_L*P)1&E6MJ2z;XfHIb-mT5Fg<*G+PL-J|QNotzACdJ9x7><sQq(Bw|
zMk(3Dji@^}^EqW<@xZB#Q&_9nxxIs{k2rh&0W#lztukypegz+{aCErj_;Ak7?mh0_
zxr6H*rZD&r<=!uksY=7v+9ppu{y1BwHW;lHXbnY?+6|n?wH?j}T-z{T9&!B6KJK-S
zX0f22*DMZ~93AYlb$XNIdCTd;n(4*{8|NQne5%8%NF6-|V4~%T7e2(%z1z5MNxM`O
zSjy2@Tn$p(;Oanq`-raTxctE<iS7tn8J0(fMDK)A)_LxJ>-%^U>5K;6WMkt+?8b<4
zdy`^o1HHS8yKx&w<lya>nLb!yre{Hqu)4t5DWP2A+ogy<C4PDcDk<*&Q8X+4XpbMu
z1Qn1VUMo=zRVg{?ds>F=QuSFLNWx%;ic+Lk9nC$gA=7RXrEis$G+tZd`~#11_u7l8
z|3VVnu33VDYO=<`yyUn(=98a%KPuEL+ciw7u_mBxL>H+C&qyCn_A1O(G?o3n^07jZ
zQi%AIwXy*bq*rWhZN~1-UADJRr6kRZ{Z6`0x{%GtbuG$(EiAGr%4V#MqO1zq#!;@#
zSeSy%Q(L_B#XscJAA1HLz$?wU_dP}T2Vcg&`wnj2px4F&dYUMLB2{lFN`>n@P18`;
zmhq&@w*Xy0^a>kwqJ(<9^C%_WYB9p%xS?x2)u^DTtn^o=o1slo)HEqD47pOdn%%?t
znTikpJD=if|EFub`L)-0{AcbFE}UcI)CNyHcZs*Yv%{r_wq*lKJs2S&dE1vx@#eQ)
zVeQn6a(x65y3WZ0Ur6IUNC2dj?vwSrp{bNJ2}MtHbl^EPEwK|=U$g8T=K@Hk9;u!(
zo5w>>Y05PPm3m98n0D(zC_r{%uyr9~8cWB$J4fhZA&94UbSe1^A>y6TXrlLmiUuci
z;LZ`9lfOAJq7G4%fJ1N+9J)la^->j&h`r3J;$kEagAs%!=F)=}FUgFeMZc2R`IN*3
z@#6!O9|8$Mkb_9ss~m_A|DK=%-p9lDJ$x~-DiR`<gh%7*o91!1;_I55Li>khE02C|
z56DyqLTQdGN$iOuu7p1^SKIrj=Y;*rD#jaEUh5>nm658P{0cP=m3y$nmSBE(oL(cL
zp1^EvjqC5+C7yK{C8%Q8bV3_*f$d9Yc>MVfqKZOj*(nh0k>r%rUw!L!j(6^%+Ky;6
zyLb1giVAH@3S&7w*kkYB9X8LNg#c}E+_`>}a}Qi#G@17OVE69b!dS7f=;{XLB1Jjk
zVE2gii&Kb(VsndfYm3=qky9Uet{18Bt(c>C%O%axobG5zIT<tE-olPY$*M*t@itNR
z#z>)%B#0KgByf`GCEwo=acxIvJA!Mu{?<FJZ?7{yTGGxtmh}O?Z8$zU0vD<4C3Ri1
zcW;*$KJ`id@OS<V?e2k4_0&pG-6u+YYDMmWSnZY05pi6(*L@P#)4bETSn-ApDhyUS
zxo4G?K08+CT`EcTiDBMf6FS>D-!p>xv@i$V$(uRKPSrCkgi(;cSAAfgX_t}|4Ae`Z
z!_(9Hh$LlIdgb9BLGrnvMCwai>6;abK}s^hu$E<;ob=oKeROSy!pdF|MOoO`Qf$sf
zCDvM85Y$Sg-hH_a4wO)NRg74byv*@*LDPDV5R(*L0PCa>=AHC<+Oi;byym@9X_G|G
zgtcP}+l$Z*O3F(56-_}@*<y&i=m;GZn9mnnz4|Vp>k|F5P5X(LITEs8N7}pd+MdS{
z^y4Nn^lz#dggwO&BIVx7RdivP1L}v0{P`VLysJ|(*(*+`0*>yJ*4UrJ;QNy2BdJ$Z
z4TT-8db{M6lj%QGE?W2Jm6Pf;uZ7;cy3}jzWW+~4`Ll%Jxc2s|5HulK45C)V-ZW@5
zUDsi>>GugK!qKw=j47z=I-$owSWVtz)QHwESvCzDo9lGWQ)tVz>sR^ICqB+g-~KL7
zzW-_VZr;Lmfuo}%y0#T2URkofIbr?OLjWx2OI+6w2^88e9+enlQAW<GZsr``^_ZlL
z-qa4)HQ2IbR9d3<D6JWfN7zxx`o;!mi;BXiYuXy;0{iot!=|AqOp0rPrDrsr@~uDo
zGB14kQ`~sxEs1eki!l(D6>FamKpRwy!d_~YOebTSS6}AfjaLcH5#CtgQ^bhhJp#lc
zhfcA3Z4WhxJn*siF@5kXcmLoO;(S4zmb6!2MY#o|Ggr{-7YTM9Q%z8+!c?-SboCNN
zNP}gFm;N{%Kb8q<7G;rmS3Ewo9%^Y(GX$0WbCRrlN&vG`Tuy2;a<z&^^k`=KA?nBz
z&%U4A@BRQ2o16lpl*82xCXT60i8Y#!e*9VL<9&R5NbDGFNY*maUny_5BF>P}GNn!|
zW-EM=6zoc3Zc+tV5_HA-<~GO2b0)K?Y+zavzL=75Q%YleTRW68*dh#lNKq&TrnH##
zF=}hZ_FBOkZ@nRQAHmbQj)y+{0p9q^SFt+>v^)EZ*T-ZL3RFzqYLY;jQc-z}n>!ZA
zHAP`5O4G+zO36lPwV^ehuJgDy;9Ryrfp*!^EINv+pqdnvl@V*6VIT+^tK$&2AsbMP
z;y3%)2QTu*CqBmf_x}yQ|9}4neD;6yw`r{5{N>Xe>;(?z9cyc)^t`A}P}xE=+MJ+l
z!NJ}E+ha>~5!ZUOEm6kw_dzy?m=I#I)F^YGLQ<_!7?$&h_h4+`!i5bE-Z&y#8|CC)
zW#fD33ocP*4LFrVh2$rzr2x-BFu%{(SVJr#)hyzIrE4Q?7sa#Edy)UosAoz(B#@KJ
zU5q;Q3k_A_q-H_z9U*1g(XrAe7DXh-w^Dfz>6&X<Y(*$cK`=={TA?WD)hV)6gjGNx
zy)s#e8&R`3k-Y1em<B3Q_5^(vTHO6dPeBmG-1|7>qB*E%NQ^W5w9*ILi^GJWU<M?b
zdI|`Y?rDyFA8SrR(!$f*z^EZu&xvhr?imn(I4pK>LJaALRzh+by?S0zA!?P03hIG_
zz2lXhczONGj)+0jtd)w4MpJg1fa@Gv7arj8=RN{92`%N`@w7&V5O7_~jW=Fr_q}&f
zzQc#eo%e2Y=KML7H^k0StWAkg@y=^M;C<`s93IWN{{5FYK0e~Y<p=2LpD5mZ@%wCV
zpQ3XO?Q(&3ildz)4tDn0xOkyY@Onf}YZL`#F@njI@uo_#vd8=Ebwq#dbyjzQH187<
z=hcAllMogs_Dw2kn&V^Me)%<wayZ|im7`g-)QgVMsHCZD4iAnP6$SXf-P^Ybonx_o
zz_)(qcQ64&5f!o(Xl_f%^A(lp;dy9;9_c%QDz5Y%_em=tczH-7R5I_PT19>&DDTC(
zGKDcH(19FD$Cc!5)ibgmyDR0CR9D6M*ejLxO!;1;KxG_rND#6La4G_Z1T}?h|B-qZ
zL*hdBNp<S4&k2UiH{Z{JzE?0?I%Uzs+;$QpQF%^GuKRQ}O)Hifg~g65v@#fL(Z-^T
z6*{U4DG4$JrSZ-Yqr$m>Ye~J%$#Td;7oc{Iwu^LL6HS5jvfg${6(1jZmEI5nhzPyJ
zQNdx12#gt}32G3tGs<9ffl`Geyq!nI4&McQD}_s?6`k`O9UjRN<>h};<n*$)6J)%R
z#VFH~mo9Ng*FQVAv9h$~0`QR04z$`_Xw&HjHB@X)6w;;clgE<Ki1QftVW9EGm}%&F
z9LAG9wX5rHq96CRV2P$6LP_$k5D*gTo6us3!7^f^`Z=D<A}Gx>Kk;Fz$r^|E-sAYV
zmAyCmq+ycBdtzTjh3`BalI)57VIVVsqEc9E2?>F>MziQzd<bX-ZQW4Ujd*;z$j15x
z$FJ`(9*=qSeNS`q>J2tFCIla`qcRofWPbRjBersX%TiMo70!Fwu4DJ;7(!rFjTluG
z<6^{YA_8!&b5J_QtPy<&QJe*$RV=!ewyx=RT1a$cW2~^q#-r4`D*n46F_yH}v~|P&
z@d3xXN36{@sh11NX-8F6GPhzbe56D_1V&ZG;`P_5U%SQ8y*Zt02~kU5cGD7<3u$|@
zIw9MdOHX}(_2-{s{@P3Eg_AySzd-LsX6IY#>o+($zQy{L$I#UXRZX#LYnZY^X@j>7
zAub59OG2A}ypA8s1eJb~$yO%=lA!u(=!$?#PZ3SEEj28LlDH23hNA4NdQZWDW+MUd
z=9x2Ge&jiBz4aY*XcJ1JiP2HF3)ZG1e);pC=lrE@-hJgJr~<7#M#a7#Nv8lc%i#2h
za88zcgje@L!4N1)WIXs|dZIGe!q6oIEND%1PWn=^oVn2y6m+!{q(PTxqkFoGR)Y40
zN^|P*$0%QUlj5ku&uikSAOy#$%iFwSC(K;K;T!KVxpJ}Z^%xfaBsXusR)(r8sq2Qt
zykWzRlLY#J_E>AsRz#@0kCLRep00CrjiYT_nq@;hx0I8T@nlR{8MGF9T1=t<F>hG?
zev#CbwB|Fv`cZ!S#qYBB)?4g;?ORMf`eBNq;Nhpv@V#%o%Pgd8p==*)nW|vz)E0-k
z_n54&(X}4ebreN`DI|H)DlKnOL;-A}X_p?~f+~e3mHqrWuW-IVn}&0jH@N=p9(5<v
zSCH|Q4N6J%TO~wYryzfoq^3~V<j1873sWQgqzasKAjU)J7$GV7>Htb565Gl~sEFcO
z8)FigNTnB}F)69lMv~8v?2&|0rW5#JQ{Z5MGO1=}Uo^ckHdJq0H}rnRl%S@BG!An+
zC-0F+G!YdB|GFVYS@AEvk4F~EmG4*V8CH_Q{^l_R|0iOnAAVe>i}i_Sraopr$k@kI
zt8~fXr{TFdu3mXphIbvPZE^L_{oe*kn#wddFDI^0J?(O_XyjTeg=<=j6>nv23!0__
zqnWO+F>gF)&!6Y9=Rbrh3K>5MVK*ihRTPWGg4@^LX7A=r{Bns8j-7jZoI7`p!d5Js
zIlgUsHPPkXKKE|F$GdO7!*X|*Qx86XvIduYoN*nmz4!xu`WJqd<NZSsYI1ON?~t9l
z_joW}$4E#@&{9k&Y>`AwB>M|K&}Biev>&Jy<S3@V$2*7C3g-j+ckfZx3$)Rc<%qUk
zQXkK0>jmv%&hqFOx(=_U5Vt(o$Cwdsy!Hxb&Yq)f#jJfcn$fwMySLtBb7RIEFTI4@
zKM=-2+I_SF-_O^<=ktWHUfKXN%G$Sgt`3BZ*u&6+oQfSWF&RRT;#2N(&R<G}fu%%%
zXy^gPyoO{ta?c9U{r)M1+`)rX={&AAN(Gt6R?cVWosUVKUQ*WFPph>htM8ipzEP4d
zqoYvfJw5vZM{-MwD6d2oH!B9^xv5lNs0mReb^q+OZvH6pQ|iTnII1XOWE>(W>19*a
zB=Ml+4~ifzJ|s2N0NyLwR=lxG8`0L%HJ-)?S}z3v=L6aWobfWRqsIGyQ4ViZua}^;
zv{jT<NjaIIZ0bSP46%_?sUT|6Wq~zT1ag{|u5~!yNTQNhQ6V9NgNO4_KZ$AI7<@p}
z%A_)usgvMUj>>wFdnFoMVX6#-6*6#<u~K<u5ql&iYH}|4shD~<SBiPLXPZpt&7w>v
zi@330TLzR@MX}3^G4$t@pjC!5qnF<6Ez9zG#+B9$6|vf4M+HCs7ybg#dv0BQgXm?x
z^wvD7=#aG}v^MAn?Xt!i+pqOOiB@yo*JD!9HCp33NA!{?9vvU!gZM3X(Xlz3aQ&^T
z94u;zNkvmHX`Rd+pM)4g5N}(hjG&7#8CgU_sSC!V8D&x6oTqMU+89|ZTl}F<McmK}
znz^<{$3#6gspr{?U0w#3wAMte@m+^T7=gmZ0AmcD4-^{Kr)zxW4}Oo&{?zAq<=fvR
zI?rOUq?%YFS_+aPgA6fn_MykQ_1j-W&BrV?#8@ONra<(LwhJgL?5f%|T>RiuocrWQ
zaW`JaU%y4zo6{6hJNAny*Z<3xc=#{<9IyZ0OMK~{yvX|bP1aAZvvK}3o0re9c5aJm
zG8NBkqwwt?!6Ne`K7MQyRN3g_5S%Kt5=7=+9ae&~RLL4(Lwc%Q{~GZ`sF#h)`wTLH
zCC|O^6CB>V&hqdcK6-*GutvNxV|&D3_#gaPRJX^XtpJNwxxrr+dYuxvK4?E7z>tyD
zVF4tp%90PZ9=aF92t=jX*xcaw;E3t^8f_<i0=CqB^$?>mrJ-vYSSm)7s(+uLQj%m8
zm!Eo^y_er+TFtog${U>f&@*_YC^yHbQ)ejmu5<YEtDOJI56cLQxl%ZoHD@Kja#Y|t
z&*HddG%6X55_Kq79(C;PRg9K|HB^QW<atfqQ7=SXq1|g~7cHaNgweF1ELN;L`V1k7
z>*Qo!!`kUFk9^^C-1<ki`1XJC&-wJT&k-h?@!0VAbC-GfYp?U*#j|uYf}|3OI<R(j
zjoUZgV{u&L8lj~XlTtQ2m4P!)M5NwLQQ#xQ#$$|F{}{7U9uAi1I?Sl!;Y%G?uNTy+
zBB!3cl)hTIhA9D7*d)lLGN9P^0#QUt4RKbXt)gBm=~@UTpmoH1p(IBuJt`^&bcj$S
zk43M*Yq6P0^sCS}ttCX;C&k}L@|#G>P44~8(_{tFliww!#LNOnJ%|aDIze0P6Tb+V
z@)C2#M}Vwyx3Y<^L?ie0=wqi$^-VO|*q?WA8Q(JkVqP;csyftSBp8Xq$)=a<lgXG8
zl9L{(^1$9#w9kf5CZj)`g`U{v2B5V6Yh1a^IguG`6+%KaG)fuSyLABNX_hUM*_2b;
z+su!SS=-p4nvLm{;kl1}hH^An@w$wCH5>1_d-pawH{PXN%yC^uv#6<$7o0tP7G*Sb
zyP&Hb-giA4CUza~z40dP@iD<i);CXyeMZyK)g8xocG$ap7gH7-9qdEfv1}amvf<Xd
zw>UmpU<v~{*bI^A9bH_Cw=QC}rL4skm6+JCRIzgpW4`x>$nt2(TQ7c>!#j6~At_$2
zeqLl9Q@MBEL)Us}TGmfb`OY`K!N))SS&om7adpSW0~2muy9Uv-wY|xAzVrtuC%u`m
zzd=7#f=DQ&UP0FL$joY@J*u9L9#Kt|m(cffCq%R#T0#21qv`|n7&D5uYPpg`nIfs0
zGbrTim83`hqWhlc+*591a(V7Ei<N{VYLw5GC@GiJW=Iq~=!_0$)oYL*!KALLJ-rGs
z4s=NU-ad`bK5<GHFs#G}>AZyAzOq-{=VLKn3hh6!3$)T$TczT4w&03{=+gL2v}mtr
zT%b+Ca~mU}bF^)w^$O>~IYI0sNlk?!V=z`@j3J;Y3PGYWecf1tE``Q!3-LttF^HnC
zQJ6vs;pMoZ8dahh(M66!jn*~Brs7nHh|#-Gx(T_zPA5b)Q?JvQ=nL-y1(uY=CVIj!
z`r}ZX%=>+g1%pbSlRDW2nxWvUvra);m$M3eo-ZmTo*~cYv~UU&Aoqx_G6AyJbH8Do
zXq$*}1vts$N#%b2{893Z4uho+IX;y{@{#ABX6x(&bc;id4i9^}ezsRcrS_00h^W>Y
zZ7j>91B#+TXN$NX-oY@~(uCkK%FuPve_t<`93Sq<I_?9WKs7EHRTXtT7hMZwXzG?o
zq;oFCCL~}-F0ocxl$|5EAZ-pIB<6ymXQZqm3OYoT7oaDmd#Dn6@00JWR;j%L5eN%D
z(zp&7CH)nT*Wz>#(w^Lf4r@!atvEbhFq<lrYKU4>FP3Np#u_Y+HRIU5wnOk0?Zzgi
zl0t>_he9-l5QPPF`pP3b^7)^k>yFU(ZV=x+q>F*pOgVLNlbgT)7W*&1$EEjP<;wfd
z@%>l!xcbU_xb7{qjuexUwKFre9zM_3g|lp(-Da}B#`**If1UYJ9zT`|YF0>#h>968
zqB^Nw(NiW?b`+H*;}v97D-LRhd2>MNA;~49nS(V;)Ut4yQN;&8_6vONi@%O?N79d{
z3XIl#;IT72@bFpG;tiU5imD{g(@B&lqo;jDu=-D*<mCW4veE`4)t=<}sUqU}6;ltG
z7Vogm2XG>Q(zc!SwG^g@jEz;qVCmWxTNsM6NY%m2Ba5t`+2jX{1?RRmc<uMU&czp=
zlj~q2>yKQ*eeGTR^=owV2D3Tp7eJMhw)A<FhN4tdlalt>b9B6<D5e;_f_nWBQl<fA
z1?@A|U@J{Ewls^9<)Wc$Te<^J=SEB>6;)*<3Do)Y!$5^2S6%~8{)wl!_2tJ1*IwZp
z|HVJ%!++y{gpZ1~je_lqXE;7;m~C3RUdBH#-kjl`qp3R<$4i3t(m!B@s-4G^?*hsO
zYypL_)b*0Wl@!Jhf&tZHDp|CZDv69a|LBAm7TmaNs6%zaPc+|ZrNY4cSt+E1n2gnx
z7U2V<1yMGda%32dEv^-dA?Ka+YUQASL}N>p;WPnyiF@*9A0q@Wm3$|Cs@WbU=IVYD
z>mtNN-I9LF-VP@Bg+)@Y+i(tY?^BT|t`vj%#C9dI>S?bzfEER8RNi#&e}>xI9`uXO
zkesfRj?&~^Q5{`P2Xpeb`uvO*^uJ5z9|kIJp9HU7gjHl92RFHAH4Xw}4DU{kb$W{D
z$|+|NomG@LtD2&P2KKW*_p@Sk5hJ5&%-w?nrqe0w>+8IA`v#la+Z0vF<%b^T(v?TV
zI~D0GGX|m)FXo$9ujAU9(9|65?bFsvy!XszYtk3pcKF6gZ)osB-PRiIV6nfCQaY_w
zhWX(Ei=#P<g9Y>3do;&KbZtY^)D$t$E?QJj?B2e^%{Q+yS)WOZMPaCq7TmpYlj-_I
zY@)1XZF`f+#s&)TE~Nb-F;u*;CbThVrP#T1hj(85F3r(V3dY1XD65NR8u~D2Q|=ZV
ziZQ!)cG<tT$C<OIIX>7!>qy-ka=gFG=H@1c`#W6!!K+j~kzdCB3E#vx6gV;)C9VeT
z49KZW?LlOy??om1JPL(SXJj&uWAD+%_ToixWo?Kn#ngW6_LRk3l!^Vf0#B&{z0q=g
zl}grEQTkAG@hi^&ZIbG0+DGIeX6Oyi33uxA_6TySV<1FBFoEb5I*6C3^rk9dYxvO5
zg^;1SY)`kkG==11yu!G;pGSRMCxOP)#-TJySxi+DjV7c$Ei)~=SF|qRYERn;l8RQi
zCgMV*>mVS#q-aG|T5MTTSW8)y65ty%v=brz#>(i_Uz%Gdbkb1pec>uA_ZHSL9gSGq
zTub^9l9JW+93MK_w~XjC=%{)DJ)xf_Rzmc{Ih<TZ`^s5}42A4Ip-E#tW?Q1r7Y8N6
zTPc!jb%KS{2O3%>^gbt=5>xg4wE1^=eNO)2S#kP@sq;gTBhQJ|!r}0?!=OqbidL&Q
zNqXI6#*^sR<+S2we)Z2l2<+dzM%#3~O^>L%TNDPgOYI^7V-&_(eAm$|myFk@{Y>pY
zpVs2v?mE#k@qrjT`#Xos4-P5HNk0~Y>bQLQ0S@*LIaQseG?wG}5yqOdr(|I^pQe6F
zL?aS=<Rm=yD+Q4BV{{U|iUH>uQKe4q7D`1~chmVskQS8O(xByYIv1!HH405pChD>g
z6yF-d`edDNe&Y{${|8>+wU@q2y=XADVDt1k@4dDweg36oqXe~c4_!m6TB4hytW9fF
z1xnG*7ewQ@^0|+4@e?1z8w0yniMQ_3)Q238Ae0sFe(QA(-`c0t6YjkHE@wVA<>FT0
z^=l12csl1<E^GGg?DFQTZ^Fn@O~<TF$Nanhx6j`Hh2}?i{8%QaxxFQeg9;Bm2i_TZ
z$g7-K(S#rZH?ohWMI=+ve4@OiNtUTfkoQ{&H3k6%#pKim&wTV}c=;Q@O=yl&5jgUR
zpZE~PBoKBN)XizsNTHNR7L#0Mjzgrma`d4`PFfR9Mk(-7_2^Wd?b*XH1hF$QMll+X
zXuA$uTI#xk=olGI&IzLu#k~+B^|Fx-(hik7g`(P+u<s&|%_hA0`r9ZieESgK-1{Hl
z+8_P_)5C`L&MxE4i~T~I{mIg<fksZS7%4`vWU*YbYzxM^Lg(J6MApg3_Dbt!+D{Z_
zq$EJC3hG6}V%gEm8*q^@DH)9ow$MXeKSesyiW~@9VJpoGf8`6j`hR|x-EX|a@tfBf
zKYE$MDjs=ao3DTAWoGN=u*ML5#4C-dDy%K&x|*Yd1>+4%VRIEPB?n&NYLBWkrcjil
ziu!m-*E))#1U#w?jHVSXio#-PHnH4h>%tDzn&O>n1+}w%Hj^vD5mrdD^y8b547^j}
zMQn8yR!59Al$Gfbqt2z?y8arcQ5=Le6LWGI#FJJ<pEfg@8{P$ib9AmFbREHZLg%Ef
zReO}ns)s&Rqov9pj7GVPrs>|FS9+>4%9AWDR7wU_?q^OF?hG@sT5ihLL)k81Na^nD
z8WUDRmJ>p2>D|=Hgs?C0hvDap0GteNhX8wZK|6`i4s(3<=M1RzJAF>@^K(xm(djvH
zC1}bBIH|uiKPn8pi_(IiG1_zY=9?(rqK!rCTs?MJU2u4KjB6WW^ps<RDJ2omKGJzF
zHU?e8wYT18IvG(P&1u?>!<{{<!ZMvqMSIfA7f3*%qDBQRf@_6BcM>QS)rh^jJM7%K
zhpWXuwB4H%yAIdZ)QcrSMO@QiwWXWa+_`=gA0y++qLpTObc8J}zCEUzOgMG%B3fD6
zS_*s-h|be2798!~WB>LYVuZEL4Hok`hj(vba(hJ%y3>4J=`cvBq!d6y=ZM;~zIlq@
z{HOn%_dWkUY+=~HeVeoAFR*v}7PHBSYp=b{%isPcqaa?=14nzf&x$xO3VJ$V=--q6
zY4ODG2DE+U<@)q8dsIfTdLmQKLr-Vy+pAZVqWeTq4SR1+W`|KOQars_lI#mAD71+w
zVb+3fM#%PWMyo+ZJCA9d_TdmkXg0EfYKc;Kw5bQx%RLIc5Q=zGr&c*)qPGefmlN=u
zAgx%^BvtFa$aJ5NX3<jE5?{GQ3)LuNvD&1>F&ChqUOMVFP%j*9<B1rs1t^0yhEf}f
zggT~`Q4&pUFv%LG?^#TOgCYlztz)!2$4H@GdQEML8<KX0w%DRzJgFFuOVA46iI??y
zH=*eawey6oLm8V2)@f0U8l{p?d(47xL#s*8Gf`?l8dJjAw^(J-E+xp;z&u-F66oyF
zD~Y&(v4iHwYMLa4VA*B+A>HSd1EXh^4WA1_c`xmc9{tQe<N-B&zSSf#kmHkl4OBnB
z2=X~L&usAU6VKpW&Ccz2X`P5fVH5j6OHnuXQ~Ka3P||y9EX(5~%JGP3(k;mLGZ{hc
zI*CzY#Ji6Don1-rGBYa#+P3A&rH8q9`xaZLwoqju3cc1QK{%nj_K0|25R<vs3qGa-
zR_|P3287vpCqD8)f=H#2LP{z!Xrpn7i9<-Je&?icX)MllbWM%5Q<PSC@6lSuP@xs?
zUcbi2Km9qBEooYZUACM(eTKKb@glaUh|7-09>0j+UkW+Q8f@F4iUJn{CW3Ek9{<^A
zIsb`I;@lkXcF@NTb^Ru)>#*BvEbi`e_=9&Sb~|QT(`m))fAA{RMZ?3-Zu9m#cWHtE
zCTJ<Nn`r44f#vZrM{)O$V3qmN9zT`|s<DO4HZMV>N{!P}8IFB_0cN$jHS`_ym9vEM
z^t6i9f0q>uAxU`m73oaJ^F%Zor?z?g`Ooq4w|<-G8)DaR?(9WkTtW=AVcMUw%6@a%
zMkJk&$}3f(YYh`RB`CQkD$`D~M?j+1qBVsvES3#gS*+32$2CQ1(MmQ!qc!CuZx~W_
zwxy-5TbjC|nwAK)O8RYdSx}unh3OWU8%ts*i-r%LvyWfmWenRG=C8fY<cW(zsFN<W
z^zaI5rgg+B&3If8T+3p<kp7i6^42%6bL-AKeDGt>vVP%IT1bZtBVCGgy&`I&Fonk0
zf@aa;x<Iq+K*2~SPuz4axdzbxM#1T)&rv@64Dt8B%6I-B|A0^YZ~qRVP?V+NxevXM
zSHAHY4_!Hh%hy{es_6{Z%xUK}MKwyv5#)^uFXAX(MT{;e$^uhVqJHZoiCf>SFhzk2
zfP=&9Eo-B*RO6b-<d{ommt4Cw!v!V%4#|s9XU|#^buM?y^t8W(I;ZPRQgb8~3xqZt
z2ZTE9z&_;QAo-E%gf8G$0?UNZYq1iL05*-+7zZ(=RA~<eR4|CVPV(6_scP<1)fPSE
zdNKC2I@vHquqD%wRG;0Eyzo}7*^_5n3<MQ>J<9uegqXt=d7qO#sPv*n`2*!8X9!~Y
ziJvO|dZ06>&#(H=v6`6X5gbpV$iqeHBc>iijYEw*#?^a0BqSLLp#OeA5F%3OA_Wvy
z!N#n_9_})~bDh}(52K=DRF*7{4_VvTW;`Bqu)71Up<UKOvQ&zoJj?o+<?@(ouf4^1
zVrdpj7V{%^-n~I#HPgwIuItc7JRL!-w^X-eadg1(!6B2`s8?u*p(mAOBJaC^YZnkB
z&2mAvtc9ZM1Im^hA0E>#7R1m|FLmG3q-Ej66eWwJW9nr?@Se77Sk8}G>>toB7T`pL
zBoaBkb2A}-E5dwgg#`kAPp3{cEYS;vPU(o&6+3tDa(KAMr$7HwG~JTjgI&&^+Xk(;
z{`xz7={J6x+1i9^d%G9Q$oOeLI#v|jdsI9qf);_3oXn&V*zZ@_9wtQ9?_;DfGqA;D
z--8*`I3e}8Wpp!yAc>4xWqYIto`8zv^DCqJYI3G=$~+Olwyf@~wdoVw-ugoT#ngM2
z=eHo%{rYQ?UuKLV)|S?-puLEjsN`p=axZ6M_;`N;6_#L8^>ab3%*Nb*`$v(WhAuHv
zx**AOx<-Yy6vh&wrfXa3*5QJ}*eT;R#b{0AQTQOqiFW}Xl~^MAl*A|nL5fXLNpZRN
zT8*haNU343R=QUa*GgfvP0Nj-burn>l%taMjTswTYXn4rzZk*B#)zhA**`jF=iY9L
zceJeM+7NB%i}>D~0i8^RQ<WC8hzQ=Ol;mUq$=*Otr5Taexil}Nc$4C)oDlWvcb_XJ
z-I5_0>_N_cz0YkctA)`d9=6i9+;3qq_M%RGk9Ji39xlcARDem>Ix}kek(LY7J?mcf
z)zAFm&tP=H!R~u3=QXkK&83eajVY2iTRypnDm2w-%>LawOzMVWT%t^3N$EiLZg)<s
z36)kX=h8QCl^~N^8KP_Pu47|koom;x^5oM`5t5i(ZePg0u~INoeE|XFIxA(;J(HFs
zrHnA2RFpNcYYDzh&leV$(gtgis8jHXA!hK-CA%{tNoeQLrOcJCYcX|0Ij%71r2MUj
z(Q$hF9Cxl?<Do|$<F%Jwq3t@RYwH{=8_c33wl$Ca+^0})ydre=QYv7&lOmP34pWvK
zyz(x$zxtoEzBOe$shC{2gxR{td~d<q-+G&)<qp;>tdG=D@)UQw=J3VaT=<3Oss7dL
z%<B~kqjbY{DuxxI{3BRpezeDrXM(B+N=I2%1f?i!xq<@3UX5?4#pNm$C+MZ#JG#!L
z-VcU9^iW8k_b*gKa1eaNnT|7;&hftRCwTAe7jf<`3%?HYBfPT^y-=HyM~FzptG@CM
zDM>(7`pgRTCMIf>{6bdNT<Il>*n>7Q&~0JSt)N;)3ysHlCn{lGOFb8X6I04H(+ZR^
zlx0ENbhyT03)3rqDg&24@HGBg-(eIhk=Ncdk_c{22%GEJ2#2q|&FS`|P?)?G3}MQk
z6oV#GSjGBm!n@yni#NXV4JJp2oST)57JIz>?H};apZgr=pMAU+yda9tUrz~3P4v1j
zn3<+9n&q;?hd|ePbSol7Mhi__6R}o7>Z|lg5aGGM^m)Gf+Ew)RJG}eFuW<PbpTPm^
zTNM`{zQDom9@EV!J~+H;P*r4pxL`drIA?pQdD%z<&Pkt7RGK10j0IgPnj=r=J7yaN
zv*`p-xGu6>!nN-@*t@}%_m`Z#JYr)jvVGjLb3DqRKUvIc<=y(qz7L@v8VP2Vdzv@A
zys<|?5ljlMvqz%RTEw)JC?IQ}Hr%AxIrIi)>Y&Ua(+4%8*c;P5QCcPSL<u^BPN<yL
zSx+TJzKKGa0$*j62x};vWc~Ai$q9H4rX$IVffGS+)Myokd2$~QA5ytFs|&A`zcV@&
zPoB<@OsxK)N48SH(_0GA7oi6FZzhGRm5+2XSeEr+Fm(_A9auFdq{FlObt8X&;yv?_
z7SS<`VLC0jaN#sp9=yc0SH8v57al}u!*pD6`@Ng2Z)|bt`~}{8?G;X)I)y1C2X}WU
zCIzkQXy!G?JNx*yVYD&k?%iDu-n+$O|A5O6KghCfq$=(`ZRi-67F!tJeDxiCQ*-zF
zHBOy9!*p{4YpjSi1up>%Ml6wnPlXS|^7xppZZJiO@=$2ca&d&pD(9()gA(;&f~L8B
zTYNsdRy=)^Z*L#;B4bm*(2}4sl1X2rA+Oju8HxiXHX{Mm+8ST{?cd~sAANyhY`Jms
zI%hUEz_q;g?H76N%im(%7);t{dIW5hYMAfk3E({t@%^dfz&#J8T)0>bwsS(4WuUWK
z{6Ha2Mc6O|<RR}<{r<E%=1+R=s+8cymA?t;Qum7SBw@q6i;M7yN*<7EXg@#-yDAPv
z8xanU!W<&iz#vh5^L01EJPLxGhLG4ig#^V)B{U}W$LH&zvXZO!0|*xLyuANwjMkBm
zXzjtJ!XzH86yxa_WeSXi+4_XCT4%XvSuR`Z#^GD>gB80kNz^^kciQ*)rT##llcOFb
zhM;0njz<tnqlmf4tI~3-RZsiQ<KJi)k4k2f5k7iB?Fwi{1!itIT+GQuRHZdWX}s3x
zXo<=ZRVtLnK*&r1zp~23i1v|y2-qkyh)L!)pnjhAR(f%mjxmWQ_IpL1&6$~#W<uH{
zV-$;)u;Lw)6KA42F(*#2R8CNR>Eo6x?i6E4^zb1Om4qwphiaw1nOF5}iKI2_r`GuB
zXTCsi4fo!Ahq}uO`?gO`r4=HHDu&S0e6`k8<1t<5SsWd*Hrh_~;m~^<E0DDi=}A4<
z+lT1TMagI~0-`u8xQ?<M@!rkrDK5zfa$b*Cdv;f^`Jlp|k?ia>oadNtJ7U)ogU1vK
zZ3{_IO)juVBCDf#w?<<LT}LDf-hqh*qYFUMbS=8DXe$DhK6q4ARL1h9FaKNq^soMT
zCesnire$1M%IS#ipk`~NIeYm6$8WucvW2K>YYm|rig(Je|Mmg4Y1z4U1U^!2?D5p!
z{foTuYhR>(?*KEkcm+#DtgQkz*0wpiamd-7In`Jb>#pCaRRV={UME+a0RFffKb8q<
zUs2P6pbeUk44l<KjntX?ruq#sqo<0&hbk(AiIK7>R{FRSGMg5<=u_ahvPT7=@CD@r
zCX)&0FQ1`V?D2y)-a&IolmufVq8UadS$BF`tIi$e{d^XRm5#bc%9P49BiZyrqGVza
zK0cmk0j;SjOTB1>B4rJ(O$bGd)Qc8lHDg<%?LafuR%5*+sc21+TKgiAOOIXV=2yPK
z=46w-SKegn6OZ+_9{T(lxV=NXcMo^4z@A<^ac}x6y;i+)sRn|LoO<vKXTSYT&Yhb8
z4cLNb!i?{I>5GI%ub?Is3Q>oPQF@A`MJJ0%MT~)JVo_GnG=fxm=X<e5$!`qb{6}Bp
z`Jep|<@QvPH3D|6<iam}oZWx+JG}Z&ev|Vb`2c!r9i;=8AKm7=JGU6SF@-f!O;Z(3
zEk2<5C_+am5Di|_H6G_9+LAU0v4A=3TDE2rwoh+@lHQ@CosQSPc9Z>o`4v9;$uo@B
zuCQ1XteJ|fndfj3XngA9Q!);<>a)**)|t?^l|nGEn#v5>Ykr*8Nen6HO*y$l5tK>E
zl2r10dYy{>Mog-VDxC$Qg7iHrML>G2Lqc(TiZ+_4ePY||!9-old#MxzWOZMe2D!2w
z`p3kSEau*!zM9@AYWF{Q3^{vSy`p{6HzW^P94CK21pRqx$DS>5BEvm_e6o^#WUgp_
zHbY>xl7LE}FcjS~b$2y@AiqKwf{C~qM8tgl@?5Q8G%h%EcAazQHrd*qFj*VX9qmwV
zoTe-+Z7Y=8ty3H9?B8W`w!!JGGwj^G#pe13suXl&=k{&Rp4;MJ?~ud0x7mH?I*-2p
z8R}(&(FWH!0+F(`*lI*`bj<#}eP$cmSZg^xSP+e6yuOApmO_jFV%InscR`T<vK&#@
zHL>k*NWfC9O*vTX%ASxS!0b02oF{~KrAIoXqMhljP4czTnR!5FbF3tdq@cIIWy#tq
z{W)kT#}zReZrywjV*(F8@(}y`_bBkp#wFkSZ-0+lFTcfFS+Wd*>WaE1D}@m?+^>G3
zOM%qqfOO?N`cDu~e!p5T=|?~uUUwD4&q-|Pzrw&Mh<as&4`ZlD@H1ScR|+cqLY3b+
zDT1%CJ7iCkJ&^3p3Ydhb#+A7c(tfFXtTXS`s#k(V1<`&;$yme_63UvuMS=$tJuzGC
zh#b6J&l6PTAG-ec>rg_q_ukWWEg?7}0isL2bQU`jVLPi0r7vh(htalIixrlojJtp$
zWbr8#`-CDhJ9OGVl*$aFm^^(`&$HH06ewdc#`h}i*%rkbD|4|ZM3prtj2BPNW%Mk&
zmZok~zi^s+S`>k$MM)D8o1CaTI(YfK*`93R2!V%S<lGG@F6rf=hk{G~DbLNAin2OA
zC(~#{N^}G*UfDA;6Cth^RPy~v`(N(O&ZB&#V3aDtoN$du_f};#9#Z5DecU;q#l!kB
zi0n~VKK$vAF`BG%y!$SPJNrm3DeVqDRn~~QbL<i`CWt4h)*@tOOhJ8o#CUBgh@_Er
z93N$FIPX)xy=8thrzkACu#BfueDt)-Ih|`LiV;V<qV8*y6$W8Q?QW@fp~7%iRuSsF
z?)DWkC53(8G`OxNx=vb37>P)iHGQ0(TknF(%DtALJejL68UZb4B;s97+v1!Dt+6^X
z(j&Sc1<0x_QNSaQKf!lie2KMUOcxZ@B=8si=Kl=6J%cA6rf3xL;1K3Z5t&rtaUI(r
zD%NF1j0)E-QL*IC|MR!8^O|a{q&~Y&C?YM6jm;6kY4kF%`}R9re(Mm`rX42t)F<C*
zMNI$x@jLE6x9$HAj()>ave{`R0fshe;LnpxF4E7!5=sc$umZ_*b03BhDuLD}DZyzJ
zCOzMX$Uvea3Wd?2bfnWA6qagshIjW4@V-E-Fz2b|L-p0-tl9^8@@6A@kqr;oP<#ay
zk{l&bW)wsxRc)VD35nRkV9TP<&|Fej3(C_pEsLWT*LV@lNDGVB8dJ#YwoNMwOzORx
zZjZUQJmA#1)4cc1m!vgCi5lbShc6LFW2_4-Z|<R1hv6#vwp#8-C_d%-lx}mKs9Hj$
zU|N7Gcz$bxgI8bgMWh6wL>6B0lkSsyrA48ytPJC^_^U>eN@wbcXbz8gcB1*x|NbBH
z?hoF?>Xh<yk%vG3A=)eFuuIK%{{BBCdc?W{$Fm=PAA3i~6s4umlI(Z2lLc9I1YD1H
z1?Op-C82BOB()|6jSHUH#+1|NPeGwswt?GkAMn**`ws8@qyL&Kd#~eO{0`ey$J&{5
zxF8#US-KS(=W5`XchJ5(p8hrr69po~#R{E?T)~t_s2^GZbD=_dg9h_@>1ht)r{)~K
zb2#sk^}&Ggq(LpoboLQW@2j=UN1enF?8?tNp}N}W6!gfvGZ>ULNytK{>bb;C#)_PL
zHL9pa1x01CrKnIEbN}mf*<Ueu`Pci9B=ytpqXg3-icp*P`FHexPx+6Cy@cE=B;(VE
z{QQ-#oR%lrR7j|D(xH;C)P4|>$dOQm22Z}s^olvL2*c|S)3Gmq_skNlp&A*^o|$p!
z;%UyD+h#l)F)9lVcXn7F?10kDrep5ix{fl6^^Fbo4)%!wn&mO``9AIPnEl(g2(D(n
zcf`G`H+bi(-{t%x4^THP(WJh>L}k><FfK;C`1NnFarO+ev!|Gz-C}lX6KyR@3k6?W
zMbj)HbQH>@;TEVy1<U<8u5tKIP@C!II`es*5Q!83r1(gD4p*d1a=@78!q6~01o(aW
z7D=LcgMy}L>*eCvu(y<>lCBAir(?eUdtc@QAAJF0U~#ZV8Q`n`^tZV4(%V=xwSrh$
z%%tc^_I==dB6ZAXBNJTIJ9FP>JkeLYN+Ha`E~^4BeR%mpy>zR>FRN5y34lonR4ULh
z1nH}tgsa7z{Mc1Nu3SK^<)J130Du5VL_t(mDe%mNSkWICc9UGVQabPBvKDAH1it<C
zC!>F<$1SZdvc_nP5%pSa)9<ZlFXYEz2IXOGQqr7WyVtC`&xg@USR_8s)D5m{lDJLH
zvaZEr+lvCPwVI+b*s4HTd0rNw2>G5SBr8Nsh$<nflCXLdJ}SBhDGg4El1}Tb^s*Xb
zFomHkN{Yfz7_dnw%^Hif7VkxGAo{dM1;ydsA$xas#oHAKc#O5Zud6Xe?179<_uz+P
zF5RPT{azJFC|W-n()pTxuVJ-qDp6>C9M;y8g}pQmL#E;m=x0VuL7f=a!&;hmz14)4
z{yGjtn*4agtG9ol(9hhICHHe7&znqAA5F*n^soFXu3m8G-M464S^N!2OC(PMqtbPi
z>lnQxJRx|BvS2iw(bg^X;h|`)1ksuZPKq4fB|V6!@h+f^r5ug1WkuULn&pz%d92Ym
z=Ww0$iDsl$Zj0*Yc<wnK+Ktj@4E@9KF3>eKUA@FNO9(E}dIcrb#(*;Yy;ecXc`Ff4
zGe#%96DeGK=Wt!yPfyo&5X6f&MknLOYEEyS=GL`$sMe;ejmP*1&wuDS{`!CPx7mLD
z3BqW??1hhV{?Gpc=l}HQ8NKgOm`+f};$vjdwam5Quq@c$UgzlPE9{K|2RC=orA1E_
z_S_ok{07~*HSSF`hi3}*AF@=>Kg7#l`X0+gmxNa3#tWvFP1s^zod4r={CFm)<cgUZ
zg6~Y9UHPK#+mpxWoF*nw0G$>#oi?M4aFUz!S1N58hmA=ExVMK<8cU?I6{Q_B-8fI{
z3cyfA>5CHG{X}WXf(7Q}M%wqK`he>K-mlPkl@i*5RjK(KqLYQfrDP%!DGJjssM;D_
z>%c2~<5?Wl)boz$he|W4f@%wkiV@d&vRbSimn@ZHJgR7R_ERDw4eh5My1>#Hs$#_P
zo7Yn>0xOl-0j17Pc=Fj5#wzCLudsi2i9f0d+7PEx%23mN_Z1OzKw(5iFs<vVSDMeg
zc{vfYRsvXSvQD!^Q}129MY}uag^xYNSN^a6h^ycJ9#%`Gt&M^A{l#D8?B+J^yKi&*
zyDt-CqzfLU99JH@%<UUH6uP1um$b1ZMvu{!nEJhfk94i4X*#Leh16T11I2j4r7IWF
zw%~Z_x%&MZeElE%F8u!Q^Fe#enL<(SwOs!vzeiXcp+{p>RWKckH2=`QI0Vamn5ts0
z?3gRBJzm-)tgO(%hjg}|7pWqi5y+Jov`3MA2+0iI%Xn-&t`ka^Pb!!qys$#SLiL-=
zz;ZB2@L(wZ7;7<wp|A#<0wQCO{$j1tmlSriwP;gdbb&1`rZninV#`8o608wA&V4>k
zCWWjdmpOTr<S~m+NTQmPQTg&IVGJQB{wuH9f5ogV@k2ik2|`FoRW85`7LKa#F+B0*
zv!x0Hnh{rf=o509@-aoEsw}6r*LmQ9vz)tlis{-KrY!JQVGs8B%D?zcLbGIRW0Reo
z9bD7!)YDI~v$Ichg7~<$Wj>#C?UmQr*j(ezwRic!*S^X5i|0jfMHwmh*@9|1MJdZ<
zw#FN;z0TIzQ*2+j$Y^cCcs8Y4pHi+(P{u&=>N7^6Ep)yk1jlkY$0^0Fw{KF7#yA!6
z&au9+LEDPAU$&SLw8@JvFaxsPtF)(RQ^v0DgFcmoW>SH27{_9PpiH9WMiCzifE|wr
z9%h?seE0YMkn>v`%w|*W+<2FJufM}Le&b(L-#fw>OJhsCF0iAKBuyC!&r^SeMx2jc
zk=MumUiN}PaV21mJ;u?Gl|CU|>A95QkrPRI@4l}t)A=%OH;W=g)vqxz=J}tlSr)$O
zizXuA;)74c2~auM%m_<LF0~R>S);Qsk4g!q6bp^fvSt`91+Qd1p;Drvjq0DLlenMO
z3YE|qlTad4SSf&55hhY8Nfk>KIR{~j<igt@!5YIb$D6t)b`IaQbWKaUYzeLJ`;oQQ
z*h2imZDms8ElD0)r*+%o5elai%rq{53lSH^HpGR1^BxzY^rj|HNwh|l1<G1<QDH{~
zv$YA^r?**KU&9&$N)v+=zrAw_Sr5z?$L#IgBlt*RY!amm!)4Q&s7(@WvV!_0<W^<(
z1Z5Ljh#~n*i>f3<X$S13`E#?kAn9TmlM_a<U5PQQ6aWQb_PPH#CBG5_rh@oTLpkwt
zU&zUn_mw%;Ke%Va$OIkSTL+r>psTP#^2`O5G|mF>{7<}}_3d-a_il2udx-B_Uy8II
zNxO|v!x|M75)0;JSi@{%g7<;N!7;9`B{>ap@05xJ=aPV8z#36*A5SJ2ZD{HxUE7i=
z&q_tQy6IUPgFcn&6V|-W=w6^|SYKllu|?O^_@=?PHPJbU8lx@77HDGyMUOh|s{!X6
zp_6;r-z%ksLE=4K+ew@oBwkp~=e_W0-L}|dFQg*8`q~fBR&)O1IXd6*@lSr7^$VBq
zp`ln`!!H*c?H=N^;=zx<p9?R%AG39uINjpHXMTbg{`UWn_y4WG$a8<=&-3h``#H8A
z+@M_^^^!0*-+70{a!%(po!2yp9T7J-*gLgN&>9mWhGCGYl>*^OK<AIg@ncEx?S)5l
z=o33R21$ur-N_Q|tJR&XHk%PS`U)LAn@0^V+4D%^iT6)5S*7txV=V+_u+!7{P7zH*
zsXJm?u#HhkAuqjG>YvC7c2Ca=Q4p(6QB)GVr+G=V8tWk_M`j`?5=8opQf>9#V~u55
zx2RaqHA_OMIX<+MlS=wc2PTRH0k*((9mbZD(p!r!$50xo7zu3vYlsBSUb)13Mx%>@
zqwDVyT|gDA&;?K4s$Rr^h&HhO@HyW8Qs7C`QoYw;2+Wrai>vp*J2Hh*=W7xN<+*%|
zas?`;BsHV1Qj~~Hw+sH|Z~hK{=CA#aIlK5CzyF{9zxb2?yT3(s_5pMeiI+DqFFeQW
zcfZcJ|NcMbGyn15p_onx3Z|PSo7)@A?=6`W>!?uRd@GcvV5I*z1Qv%&n${(o8%>BF
ztqfbI)-grF?#`UozwrZZ{?=D`<Y14BqsRy&;_%MW{-EXVZ@tL5U-%HQG)y-v_UH)b
zSJ3nH|2}T*8>3cn)ci;K*57;|PWDMA{6!%F*-8M~CsH{X@}lmWds|}8!V=6M`o%N_
zoKYo(#*|zI380CBt}3GRsG3{^_lZ%EDN|0E0Wn+N$n#AS?=T5<B?X?nr=wE@bN?6i
ztnwMt+{`M3xgUMya-O&g1ABI`If!xf0Y6L@%>K9Jh3P~>k(-j^>TBh<SqYL(iox}S
zv)<?L#Cs-GQDUWtEe)INQ!ZUP%Y_SPSl?L3Ce+emIlOs;>tB71uh}p1!Jqy#r#ENp
z-guSm%TF<#j8Tc!9%A75&H+Yy4&Hf>+izUu)Tz^`Y7;w}P>pBIHfNNx5qo#`@omT5
ztJnF#SH8uCM;;&=#b`8QTvY@jb<+_XbU2Daqft2T>AITW9cRvOf_jQ~zW;qp`83|O
z_~02$EuE`zO^Yo`5guDX@p>(R9vaLv!IXez0ibjPbANZQM5(e?-3oFNBi0&BQPFwF
zXguQG@4U+H58hyN>nz{=t>5MN?mdcyrwkDnBYtF1g`%33l;g>8Usin|)1=Owo)W7{
zUw$AJFLEy-dH$y$IEwhsAQYFs*C+a)hcP+y@`*b4KnV=10XYG!Z2$OtuB%w@aP~Q{
zk3%v_W;x{{tA}SN6;D!dD?PONbHtt&OHin&r67}ML||nNOi62yM2sG3PN_rc84lV=
zOGD5DV(o=C?R^k6W=JYx_xI>rphOY?@46n{>zY8<bVT2QGE#6(MG<Q?r7h@e5U+ED
zuI!b<1w?B>0X}=Lr{ZwbplrG~vR@fQ{0M8IFzB+RoJ=T8Nx*RW%r@8pqYZ_w(AuUp
zsGN*QFRyDob=`t8gy54nj>2dWv&k9~Vl^fSso44@+O^9zG%|p*WgbbjIl2fY5EA=M
zk>fa_xoT^`Bf^Bi_wj+z(x)29!GEOlfi9`wix`kfv~l0-mgFAPK<;Z@AsM7Gl9OIx
zEo87H&7%BKGQBtEqVmv-nq?U>Y9^mcGuxc<%YWss&@K+S{q8lIP7{1U8Jqfq2eCn;
zbV46R1WpP-UO<r;nXS*z*3v9$7KaOLSrK%^xsDi+<S%-nau}<V4{t#XP#-T*A!3xY
zmbuOe%PxcytcsA3*<6&$acIuF`s6eQT-(t#4X#-dx?0$b$ri`<NU*|1v9t@R5JK=m
z&o)~6cioC2a*VW1Ls?c>W7%Ksv*sMiYP!}l9cwz@Vw9$;#>BQ`>)aV`-MEP<AjFn{
zM;$D<`s=^V|Chc03brgu^ZT&hGTR(;UEX`sRc2-R?&_}gJ<~I7#$aY*dN9Ob00JTi
zLZAeRP>|A#K!x<8AYT*+DWssNP%yv{LkIv0B8Z?w3<d*WyziNw_O7bCx_o7&&ufp{
zW*I$vYwdHgfye5g)Xh^-m05Xjo_o$dYp?Zv-`|~`oMEkEa<EI^MxrSQz`d8>=FabZ
ziOL6hrC?lBCq`Y<^k_8Z-ilRxfVy@WRYq_!KwI=JcW1|3IlRi^&TX7u*L4hMb>d0?
zfIWUV8&ss2kYhQd36vI1%lhL^G=wx|3PKSW3eM!pHdvK!I1{EHAipGSYc3w8FwBB|
zg))Jl#W_qxg&Iv*E<Dq{Ek@xEv(*YU8p+hlm?7>(@~-qF&Ywm@u!ayE{i>tu6xFDt
z8kLFOq%aj&1RV{4@^UDlla{Jb^bUv~T_{YaXcmsBR+u@AS0k#CK^Lh(Nw3FPp{Mqp
zqpVAmitJoDB+eX#)`Uevn9AQ7UzpOFg2EX5(J6kp!tRXLTE|Q$R3vA;EUK4cuiPDT
zmzur%_c`1y@V+N3J@4E-;ypg#bHKY<htmy3emZ^D)Uu?l(HrB52#1%iu=CTO;aC3N
zzu>3;(x*B3?ce5~{$KtffAc^4Pw9ao1fKnOeug(+|1Mka+~YgH_B*`rZ~r1#SzFhh
zy~tbPA>Hdo6be*KHvB;mdO?!U&*yZjj`6farIZ+JEPWTb_2xUg{l{NJed(+GxC(3;
zja6DGj^5y0z@M+U|A((KdH>@~p1Vk0hz79f+=iJ}rXyveo^_MChLT}0Mgp=HCl3r#
zg28%iGmL?UO+KUoPPA<?xj}~ES7IOppNhj2&Ez9p>u8r9{i+{wBnx9G3PU+ElyyN-
zN}jE)6lHCwCYGuyu$3U7DuX&c;9O7WybNhp9evZ$H7$MTaIQ;@qfT)&N~rYV-7g+}
z*r-T0;jH-R{=mjpA3?t2dW{Z^M*9EZkJuRM#H_Jn_-vHcNw$+3fRK&f*4n%L-|Qm0
z{%QGZ*``JfP3`8(F;=s)J?8S|eXc%siJc2OlvN?_o7SL<in9kNT-px&&OiAT=It3D
z`P3&lzWok67cR4Zu*dy-5133RoS&X^@71^2y)faO*KXlQmghe3L9RXfG<%O7hzqt+
zv|Z26!3lr%E5FH~{n~F+xg{_C(QRJ;_P2TV10Uq@$?Gx&2ddIyR7KzQ_(pVk)po^X
zYm4b*N@2#l^&OKpX1s!NT@!qy>sl=35ZX>Io=9dT11mv7=Cx591Io3&SEW5HQvhQ^
zD3ZOM_zM&U0`$uTMP2gpAN&dTzx5U)V>o~7CM)k4rNU7a1swq@!`9w5+FEHS^2ZX_
z#TOf&YTnxzKEs4$B4$8?6KZa>@^i&?A^fQBIj)Bz>&-PW0z#sQXY@YlP_wo!`h=Y7
zgiht+Je|Q=92nQPj7~%0tmYe-Q2D2CxS(dHOuh}*r+OUp^D+6bWwRsFUWdw##i<EU
zslQC;L<rf{R-t^5+F)Qlgmj=FIi5yIllpxCH`a5*G-!w<_f(H-8lfo~LlH=Oox&P}
z)evJxK;eSIc_mRF5ds=d7}zu_A#EUgrcTsQrDb5QtmLGZqnc`5Q`I$PJpx;>y)#9d
z60I$+7xq_KRcM`Ec;x~Y)-oMW2*`dF+>Ge`z&?;;YEYL7#Xd%Y1?3gd_+%18GUgK(
zA(f2vRNDJgI-3Q-Qy8l;rARzffIy3Kg1kjzaVn;BE7J8nu6MZL=$z;ZRSeAMGodvr
ziB-w_f^5OrdTjF~*!(9GFT_n=wuP9Cy*9lG(mkMudF`6Dp*I$TR{XiY@bgS|4>`H{
zGV`+q-W&17*ETiWYS6P{q`}k1r0bgV$2~!5in?U9waxkL9;?$C^|*q{;QL-os$8Fp
z6VttD49Zyg;OLqL?M32aN+_(w^}P(8VL+x`+a<O_GBH%^q3*!!h(Q`o?{Hm%YX!|U
zRe>!lP~h`DCm5<YR69@XdNE`aUY+R9jY{!kvblL2O|xP=8lytw{Op9?-96f_#a`IK
zM-hD%s^Gid_!`fB>?2IaB_VeB)j8Hy-1*#}Fn{G8I^WP5Lktl;s?b=1kIWvP;UAo^
z>m|c?^!Q~)(+RhiE!ui^C;MzSnxlIip}oV_GY16Yh?TexZ(W%1WB=p7!kxeSU*cM~
zKGS1-q@n*od;Cx~sQIvZl=GiC@%cY%3YU(W;Cirj*f?Y|i%><r7xrU#BGw1@dNPz9
zWrLFGtThs)5p-ly@6oFTl!n=&<M+P&B>=QQOTTobxpd`#$Dg>&m8*N~?Tx6$$>?CP
zuwO?6s6@G0o-H{)Ub4MAWxQQeRvN1mvCyb!=z5<f)S@Afm#Nc~S+b^d5zxS5=}=}x
zx9AAFbuvuIZ%-O<TNd<PhjR{N6^D;sWBK`SQ<#$3D{r&)(f8tsm{Mc5r?|ewdWSnZ
z$8OhYy1IFzZ4g}&vKfe|k>$g`{L}pI-~F%n$bL<)3T}rPudn9(Z2DZZ*6Tws=M9Fn
z#r*IYDV6B8DUv0MNdg9b=6~{8e)U@~^5XA(ffpWooG-ur8ZZ9tmw5hXe@qUG9Yg)u
zpJx1D|1(bi<m;S%`2FlZ{|q5UbfLNN+#$Z}x%JisnFhy5a1q}H7R!dk`GVjTlkGau
zj5H_r7rgP6H@Wktf5dxFPI#s$sf<v}P~yOgH8cwP;HejpoB!e)JonURP-RVF6IveD
zG@!f$#m=2|@xfZ>n=xwjy+eVxQH*X-k`xSurw>6gPcseA2^~`OdSRH7hTw@l;+&^B
z>sXvEX_hTYTdHYIF&R@@O;Ht8wZayNHoDf7m7%B%^{Aj4mz1MgG)~qI###h%_X}O3
zkflt$(2M@Xw+`3#^o^rkw5*y=TriUp<Gc9?S*MsAn*qUG7ajllHW4>8O>B;kA9?P$
zo=VBj%I?ar(J%;oSVnM=osI_W)c4@9(qpW@dw6*p2zrc<-ap1DMs>lZOZ!}Z{1W?z
zd(@Lkrsj!i9{MF$KlC16`Qxwf{GQ>9|INST?mO@B<G=7zELL;2rz38^v7}v`a{BOu
z$)w=)=#=VkmmmMhKhOT<OOmsx0<LRVH7i{2`TVc`OJ4euFHse+IG(dRn&8fR-uRQR
z^XeDA$<B>Krn^(7yW5zcnT*A7Z0q77s<sqWf$Ae?@4UmfGGr6G5J6cDc$(Rq>A^N3
z=wUz;z2CTUN$Z^U_CZ6fq!?d+S3;%Ig^hp?8n4zv#^5|gE6$FOc;$0n#yy-f9hWri
zf)FB=PNpQK#utj+iw8qYNbLP!#xx+3Dj{7V4(_%C%SJIU6ds}N=FdB~v`K#$hfL9p
z^^%7|0|LJ}*pPkP`*o8VQt_TOe<AD2SU0B`t;^?WZcI&f?ahs9rf{a8C+w!wz@%nU
z;=H7BjKg3c4)=hdf)US#UXch!WrpK0AkEb0%~_MyGPL#@ar^ZN!0@6242YY(QVQkF
zx>J4^ACVLhW2JzX$gM&e&)(6y04AhHFELy+3Binst{Vs*d{81k7>s2y#Zo6=G9z_@
zUK?9b)D=}-Q5FSNJz_MPP>*X#G|-yKWP<a7s;Y3kcrJ`b6}FUIWEBHJJF2?m`s3GW
zx}N#6q3e9|^-yBCM1eK|uN)B3UK5lOb03u%F=6nn7)m$|I$HT0t(FKkV+Q@JHDGNy
zTur47(L{VyEZYX}lWsCKy5h3Suy8YNG&&hXi2NxI1FeC|JFIio!&ElzyT}@77#J06
zeJY9VpfakP_a5?GSuoY>Ph8^X|JuJxGe70dTd&i3B}t0Lf-(5)fuh%ui7HXFb93st
zfVJy^^zOk0PVU^JTdr8mme^2<0at|3r=0VUYffoWyQU*LFA-o_x)8F)Sq9TFMttAn
zox{|Xe15W1%ID=b-n$R<eTQ!veAf{AUQ#8B0&B{Yv#rU)0O#fGZ)P(>??l$46_lb~
z9@cXD41*6eizQWA(sVr!9=^?VI;CwJ`cCSWCPr#g@Yb8Rxpe(1b*;E}|1MT7nQZNI
z^yTlMmo3Hyg0V!W#bLOR`^Tz)R-W2eoQ)jMPS~!tv6G5!bw=+yF6?Y`G@G$|+%S9X
zHsy1>L{npmnuEgw=ErYQj!ODgGNnB0USwcW{(wDxC>zuPRq}btorT%?wol^yWDV*X
zkF8SVg<4a$*5H%c{J%9K6?5S)@O#`09!?|KpybeSA(<-h5IuU5d5HYRpFQAVQ(^5k
zNB1A{(pPR{tmDGrlpD`p<;IPRTzPDl$#x|=)ELr6Et=D^syKQ0kcYRAIJ|nu!POng
zv1m1-RT$4;+@gkqK2qp{X64ZZpd;1D@Zio0qGffq#9!LNk{az?*raP{jHP!%v)#LN
zi3f`>a=5q4$#>pn@1xJ-wJ2StHxBV{-@`5f{`d@T9IjjWjlPOpY{Zy`1%YT_`rIBL
z`JeyW{MJACC(LJO6uTvV>F@kUbg7CMW(P>|DRiv-d}{<G-_lyE9Jz-f60|~<@XP<l
zzr(NoC;yO#Uw?}mSFZABfA632zMuV3ajpffe)6N7|M8cIufM~qzxuoEKm7!{Dx^rQ
zG}oTL%DuOr;pFB!Y#~~zu63NA%vd#@Y_3Y-J=}le9^d@6KgPWJIv<}J4opR94NB)s
zG>F>J#z>piOto^XUV6aGzw%X{`YS()Ha)~%#w%Grv9WOpb0A%znCX1$TO)>iTB>zH
zmLa=rrU5v=)<5|~=Z!uN*}y(X(G;_Ny_beHc+b(jGnQvY;u~~L)CLzF#%aoEu~AV3
zO;u|~2I^=jO@S&4C@WM^qKjJS>xINC`RtOc<seYT;dH;&M`<N)&IXKC6c(rrX4$h^
zHt+ftGtjftx&V2k8PyvFzIyaaZ1kny8;y`Y&~@|r=*G4NS~F8%lQ|ILBc@n+FH8`b
zP}EYPux5g6{QV7tRVgS-&83UGJa+An!>bn<Z`EQ*l{l=<MXJdz58rr`AN}*6;S2xx
z*Z9!hHedhTS9#-`U*khR@#DPz6Q9OJ$F0|2#RSFf)k{qFw|MNiXDG%KR!xgifq-Mx
zHr#&c6<+%N&(SPe^wt<#mP~ZP^4>j0wxV|{)Dq_3ZJAFE<DCgt9=po)V2AC4DY~>+
zThXr?Rwt*t^p&r0<3le9Qf35=w^c=DYUZnkz0`8)s0M@)QhY_)n<S1&<Qju9&&FpN
z2O+OqJe}yjnq2q>LeufuH@?lSufNPzRa2-))Ab4cFhVVinxe97A6~%u)OdJ57&Q$o
zP`XLuMrQY*FDLsW<bqKexb*-fyUpv3s(CHl%g9G|mR?i9*NrMl<9a`6tz}0|XKWBw
z64?tO6;?V?IJ0I_3{p~4nG4GAWhZ4?U^Y`pUmETclu$s2m>Gz{4+srPOOy~2k&=gS
zQ70sIXxi04`;CAyIhQguB03G9T?D0*k~poGs6cuA_ui`S&L%S;SiwvFh%y$Br|UeS
zk9ZXlLKTE+=?9CtB&QI{s*eg!VpBymIMqsXo^m{mLKOyOHMT6N#x+G*V#@+sRTOoN
zDGREiBBHQ1Nr9qgRE_XPq4WrAHe-UoBpT`2V$QO0pbbS;p>2V8Eeb;n1{J-;L}lu6
zCP<-##%a><BG)pBb*7BQSj%WSrZD2x+OJxCbo6~k*LS$C$9r+qca0kyqBG-%lwY1>
zg)(}t(ZeuG>A0q_>WzCc=JR$#+nt-&jUkUjHtS)XtF%YB)U29>yXvSmYDu|2Y!Ucd
zf9HRNu{HN@zsP(U@S!9I2eup<IKOE^qzuMbj80nOC@5v5lKMzTwsyDC*5JFI)%lW&
zh-;lN!F(XNhzSZ6L_2Df!nZB4cPN)*tRg1Y6KU;v=jqxOYYN#{Ld2xJVM%F9!*nlN
z*U<O$ZG-C?hyi2ASX-iv#td~%9CDY%NU2GKn}S$af)i(H5@r#V)G@?B*Lzm0CF6R;
ztFOJvGhM)YxOewHdnsk2YZff$Gk*VH{08s+(T}k@nxVI+EbhHQe{xK~;Z*h{SfNxY
z-Zl{ko_nVY9yTlVHhkdIzrdX@e3g+Z2yMXYImKjxI$hy9;JhTBHWXKPu~gjp_FKIC
z@-23%eN<ecj0_b9pAr9qNAHCn(grmb=Mssw!B+!m^2l@{KxvMT9&&hPKe+_PAvib$
zHRujDpMCv{99S}pT2npL#%r)j(R+=x7G>wW=b2r~-Ru0_|MdUP!|oC$(PithMClr>
z74s#0^DFoH`j>8^wd4BZ`@Hvs8$5pFkjYk+ZtjRJ6<d=DN4HLS@Xm|ec=|fmo`0Ot
zv_vb1DJ{O~hYeGw0z#{#&FLH{7)@(dGY8r+pUvo+3zQS#q^cbEn45d6@x8*k!1Tg4
z??i_W9p`V}lj0yX(fdzcVfFSyOf)pN?lJk`Q$%al)9fVn-9&P79-nAj6;J-eQ~ZZN
z^}iS73~6Fr4471uYXk`;eQg~pAY*o!@TJ2}eNVDqMxn-mU;fYjr?~T$x4t;z@!$MY
z?tkaoTz=mNq%f)A@n8Hb-~IdlHLKU}aOcY}as4wN70MKWYGio+$DikmCl85E4!pkg
ztj=4SreS+Bp<8r(=?}il?azIY=VH&}<C1NquvW|qv@K*R?;|}C7aTrlDMU<Q(SvVy
z`Oa_OMP1Y=Wd?JB948=UsTtPhS`QXQ+oRXI!#v5&zaMCw>GvUNaD19R4z9gEQ+l&O
zN_fPzFA(s(#?3w5Vn*Yw=xz<DnZ=ZmVx$?3!K;8OH6~~b78gq_1xi(7l%WbaT&+Qv
zl~G~^dQU(TP!c5)GtwJTK4k~$07etEl0i-R?r(!WWa@23OfyQAE~H;0M4ME^VSgBa
z>)hC+xST<2wccyiQ89T~ln~qjiCu3?d8RZJ{voGku0!kdgndn;B+>oWudggLd%IJv
zU%SNROBdMQonow!&)fyHHLQ+~*?p>}S<KjN4VT{k9?o99#m_wRILBS&!RNlqKmV7X
zqeC;fbb-rHJwbD6N}()ozV;?Q1QrXKm@k_pe%0a5PtgP(JUk^B8BFNi3A@uLnZI)v
z-;4gwts44eq&-=&e6Zxs8@JiNameKx*Vx;?!1=ugeDhDg%Ek9QLA5=l4{9(EGo@iv
zDq`pXO;BN&P-p)E$ioAbKLHv37&B<5l*&d!Yh-)~oJb}x?Q+G@Yd1N1^<`A!+0sf_
z2*%L+UWRkVGugX98(NB;af$&^>zJn){<ULAD0@*1W=QnCjlpTU^VjF$<|!CKa+76U
zx){Z5X2UQh4?kjpLNj_R_@PYqeKzJwPn2f;j~uyK5}&VMOccjWCezyNjbTVOti>E#
zHGC%0n8>g(CI+M$u7^(rYJSK0atzOxnvJlL{hF^+);X%wysLEnNfZtE6n`^BJgHPm
zU{T+P0@688kxZj}Lada9=sj)U(YZj-9_2NvPXlo;21qU@ri?C7%I8%nXj(Q{%u|ch
zh-bId!di;t`ac?vFvd_6rTl6ObWu<y)1DXuMOiT!*R-o{7=S3^+88IoYPRC^_>5K8
zQx+vvEC$-V3%$e#A=lX$MNpcE*#H7$Z_BRkwh-s#!WKyvE}5!<2tgcLUE9#L4PEcj
zx--OHtp9^^lCm&t&_Srp3AM@#KE!~oEnCwm*B`sa)^y5S-~H}d&z$yDm77tN&}#=r
zr8o>}^6r|~+z@+|<w2Rqpx1kGjA9P>V?XsHJoCaQm>=Kc&RaKe(Gr3a)}PVqpE2mO
z5%!%<=48XZ5mVxT42O!UWW2M*?CwLl<qBmr-gkgd^n(jh*Eng`jnag^!?_+4vI<uk
zRb!3(tf=u$k`0tmDLY;Er!39LLs{QDf_L;ygKJj!uE!PyT8jfeS}5rzig$|l0UgDi
zw_UD87h6a?Vh9o^=v^=OtrpgtPtv2~qa(I=_OQnC;NCrU_jkGV)=jGV36za!V`;ht
z6Ki<+>#wj=C~(lcb(83Nn!*wbFeZW$=3xvTO`!L(SM{akBcJ^#R8TZCjh&xjykgY{
zykD?2Dp;-zWgIj6?m45qD=e2sT;8ks#0Ngco8LTQe(%1l=|QrSdXRr3lk@+-A3u~0
zYIX<9Q_<n*NV*gB+E~Fx!|M2)aCLV;2|+~V!-SeNCC8-0D?pS8aW&BT)(7oSbDC60
z>xe3P-us?yKK`-i`Dg$9Uvg_+QPn#rO3JzhTL@ZYL<dr6jZ$005P9%GaqqWY!?~|=
zcy+=@KK2|JFYZznmTFq@_~RFO?K>;J_T_JJ^zfYbe(X7>2O~5ATNqrAWT<JAki<w?
z80NDjwk|NW$Kg3W>KJb~w9A(1UY(}sX*)^Cq)ttd_h82buGE;;QTqnhIbsnAI<o)7
zCBE}p-{4}g#p<0qs5IG*(F_yuAu%Nt9dYxFR}ruE5SI~=fN33tc1J1<Ra_si!{k5d
zg>}-rNW-RvdML6{$dmG8N1DffdY3Cd`HL`Hp<EoaR0kh^lGmPiiqSV;<o@scDF@F#
z#pvQ5cu&-Uo&7DIc;R{OeD@~7N6t@|EY4<xuI2RPoG<*&A91*v^RZFM#3`%-Uq*U7
zDir9Tp^)iz=bRMLLE~cNY*bKw_9HZmIX=D5CqMlO-hA^mz6)!2X&6jJq(I2cQXDb?
zGi7&;F6Vn<?cN$<Hb4r==`N%Rb81GD4mIme!{DBjG^##wb$`O@_Jjo;T0>clDXNO9
zDk(;VP<TfcQ(MY$L0J{lrMMy6%3#Y9TN;Y86#Y<T&`~0f<Y0Ap<%Je1l{_(k@hVXr
zy%;+r3;!6T*m<`fYu03Zq=<TCn6)|A+CVHf``AanXXBn=T|W-m!t|8^(+X<@c4%TB
z`T6UHHY057<{@sNv~i$2su0jdv9mMc#uHb#c=>?c!wVEujUnP)hq4OScPvhiIKOwq
z_~K>0`%iy`>ys@W`?2R)ynRmh;10W{;QiOGvCx|1#exT)|2psd@fXnJ5y}e7gQB1u
zPwD2Tv`fdmTSs^dTh}k4w3SG>5OF0;pSVnH9Ori*;+G!dJhAaKtz&U?&fT}}a_iu2
ztN{}uPk!Qk)H^$rqZ*@<&y5S%qNJYGJUlvP_w0nx)>!uTkSFvER5`hJVlF3F%_o$_
zuPJN!{y<^1#`QhN@7&?ecV3};c*fW$Oza6>5e;;WAgATTaIm+>mFtJZs^HsizD0Ju
zBxaO84g;r0mx?sbVHh;2VV}+%lZ^5+G@Xw!Dxz8=rh&jZE(6g3LZXzbLD!d2)u_|Z
zCGQ`Zv61&7Vlukf7^J0hdAJ7%!)-9oe)ZaPDo+w~zV!y~pJbqG6fm!4rQ<+j&3`uR
z_31B%4PBSk>vKTlIqO?ZqY~nUAf!s6dW|w(hIeS9`QFdv-FpO=D7rDAvcs{;I?jmq
z3hgznk7zCXL5_GKh<TjEwM0SMvQI&BQr4uV)Jk5aQsSl_jnG21PJgZq+FFXTq9_Da
zMJYzL7<UzgrLL<%g5g}BK6gE!pgt<<vcgAA@Lo)Ow4%+FSDoC5(@;b!g((ZPDli6Y
zp|Hxx2v&g$J<fG3mn(el(AqK{jj%;QX|=exlUWQ6T`$hwgQK+0AGCbNS>6ybxRIJv
zZ46~;xpM6~rdI6iZ}Gy%KgKt|^kqN~`QFjx!DAd6!Q6%pj_ZlRMj9?=gqf6KA53E0
zMH#iro}(zo1%Kr~_>TxaaPN(8({#cJh{-@GD230lO6jwWM659c>jxccP>Mcy$`}Uf
zb0o5LVTa?}chSAWv^`o$Se^<hv80>;8%RUeHY;iHW5BC`jS=r1MOom3BWj`dDkZ}|
zLJ;b5kZk5hh5|0&+J=6$z;&&d@)ZTPD9D@!jY%%#Kn(O<$MXCf-?ao=P%H4>CG$^5
z3{HkQ$%iH&W=&1gQrH#S<6Umvyv6Q?UFP#8v-5K%lNx6u^Vy8?&Xm1riGO&E@{xG-
z0PlLJtz1i++@+D|o~RVPw~TGU`0^h6&)nd(-~UrgcSfr;OBE@VMq`*vw|V=)G5g*z
znN-~Q{jafmX&=T@?%%peKfN&cnFOU!QJ4jrpu!K*<A<_A)vP(&-B3UZq$*997#}R|
zJ){WRoS!Xen})ia5Xn@Fq$vOf4xD05>meWB^8Y$lf$5i+VG{+aROooZhd=lks;!#O
z|Na{k<ullFL_Hn}-KDVD!iYFuk>U!H8=GubRB_63?)ltrzKL6%as9~y-v7}jsJATF
zpM8wQ7teU@rB^t+d(Qhl`93Z^bwQp_2aGmC!_*4zJ*LovUeorHJy=gPw_lyHy)$At
zTd^0m(8l7UPkN3N%AK;~LJXL?q$@^r-s8I#aTk*BtG6n;B2swE?CueMzCd5t1#&(A
zsF6`dX|p@3O7yIlCc#>*X=`FppQ?%MY^v7OVU;VOOzT>IkOSj!pk5{yD5VSpDKO*S
zlwNsE6jKSOJ<tB~PxAFwUSVso;LTtEQ{MkK{=5W32cb$o@%|g!y?LK*wPLmEx%cWF
zj$Zx-)4R9%z}A@Y$fBIbmxfk3=#+$+Yned$&f#>#M+03)t2@pv820|c3(WeGvYPOb
zk3Y-Kg@Oxb6Ykw#5pd*kN(Ko*4!6h}RZSVBS*IA&`j4rhU-JXj)(%;B8d9)#2tH+f
zCWM@Xn8vJUlKfLz!^!<6XNx6aJf<?BD?>f57>`C&lbWKk6s5*gnxc>*#Mla3$i%d)
zYN|>a*`ko>2(86v#e1JLWD2`koJ(d)a_#oQg21nhP`(fBm@q7=xb9IOnUv>iy8eCs
zWzD9|i*jR6*(`U}qu)2=JLfN|&6f}#L1yz}AD$u?MQUU1XIyG%WFy+bbXs!t>OPNM
zJ>cMQoAIOuD~2&fEBX*|eb3Q(%b^a`yO$~6^B!LNgU_?wzQxt&p5ox6&)~bBqx+|9
zInV9{4(t@S=rN&S<`&F*$Ks81R%XVr@0m3{k&0RctXA}Wk5LvyAbLelAQUip;u3DQ
zq<wgTT6JWT6tK9{79Tvtxa8$Ody&b79WGqC%HHJzin`+T-UDXu+`*nLsP}jI;E#R~
zTUNLb@U5fo+9A_#v7FN`TA~wkE^7=`SyB|1(0RJmigHpAVqkW1%JJ(rIeGIIe!ieG
zirQ+*LWwhF8kR(DF;mUu{av1T@-lndTSNu(Rg2bzG$16zOC=4eVn8*CYU6(0K;ko9
zal=0$40_`@Fh<s44=DpRh(0mqxE<ERmq(sI4T55#Eqk*5EDbL+LZ2zP8S=_U=YUQ!
zG_7RTOh%f>j{0gC5acE^YhMR`(uPS@j4>7V5ivwcvKEzJxwso-WGuVC4@OP-CdtM%
z=^oQY$<QLo8B7dNge0v{C@*_%7-$IZ@*@p|NtuqBHKC%%1&4F8MlfDbG#_vw(z`$i
zJt{Q=g;6-Pe9oy0&l!tKX{;71s4a}_x5}WS#3dER48K}y#dt_-bWt&?D}3J)L&O-1
zHUVQC0cj4<64#=Dx++-p1%(r1owj!<ZL!9J>r(Vj6k{AyOKgzQqId5dUALldmdqC`
zK{c&rT$fZ;jj1ihh+{8G405zJ_+SWa#Q7YTlBnJQA?e7|df7DQ(i`<2M7E~8bSuw;
z+jn?)?>>)RxyHq-mw4;tSJ9=$jw-CStmZ3WHU-i6hLq8npDupSDmK~3EB}ei!)3@K
z;&7CKU-+wkk-bZgb9(Dl?%sKTr$WbwQkI|)rWdIm-J}d?G3G3c#l=pXntf`9tvDga
z2)h^ddE4ZOpyZ~XFH<k*sKf{w@p|aI7%llo&=9=Gg$`qLX0GT&jVT6wyO=A60ZAoB
z*;o6XcD0~iEv1pIO3I={n<AZGV&df!lS4;M*Y>n$=W?wh_~3_VmZ%IdI2nRqg&u4Q
zybl!0@bKskqtOoc9^9pGJ0g+A`GVc83CG8$7;R8xfwqzDy#i*AaN1$Cl{FJoGSf|F
zpTXmu#&sR7aXfM3GB5w?Z}H%zmzj1wT|LF_?$I<0Rz6_IW6E8_vRhG~uQ(`5)crZj
ze#vD233jjVa5GB^lZn^&P+xwa9zT=~s@$I{Q5Dl;IJg5QDCW10X&yS3{Rv&aVAZUs
zrs6ak@^CFG9-%)1L2V#+xiKQ^y9O0Y>YXGPHJX-R{KZeOcd*6R{^-l}tLx}$B4OGV
zl!d}-j3NbB%XnJp99XUZnQ&q~rmm?b+uVQH@!kK|=Q+4m@gqO}JXfE4n$xd-m)kcV
zu>9>W@t%)8!;>Grk*F7nKBneei5{UW6~_;H%86w>DQOlR^OG6N*&Oc=uqH)`IFF8?
z3n>`1idbt2p(jkoaK56}HJr5QagEXirclJm4s8>N?Sk&kDfR9iQAfgnILqfEvdC1L
zYrtn+ZwlwmV}d9X!Vr=pvC7l%T*z#ILPK%FkPVy4e9}it%ofFY+CTg?4L*oS9Ys)q
zy{E5WpM4H<=N9d^UT5~5+f3hoP2P(R6cfdJKJqLt{qa{gdgX1-KL3Y2J*t_EMkpWX
zLLgW<1auIMWak}$LQqo`2~H5KGkVGoKgPu$dzuy7>^=P~+v73VmWbovaLU7z6(LGE
zJ*in&n1PlwXvp%QWaI8w4+e4rpNiqEJ55NEI7g}>gEOMn&V{*Z9wy+5v*Q`Fqf?p?
zS(ph%7Z?}lS3R9m6rH6i!IUMc3{<6}C}C9f^dp!kLs5$!PzMFk5~3D;OFl78rbg)$
z_$jZ$^^rD0;{tsfa9yD5#l^Dwe&{#ylsv7UxN(SVFdS3SK0ro8A)n)OAfu*n{e2rI
zF{C-n@LU<G9VlQqN{fv(o)`(a;YbU6K$RdhtjP6ED3p#csti{z@A3HKm$`6pkLms{
zrYIpu*01Oyd*b{Q-#PyA-}^`W?f>n6%nP6WW!|}Zjr(8tJTJfgDhK{`u0Qb<PrUC2
zXZL3uzwtI(&SPuKQYl<pz{0b&J>hh<BznVsCA7UW1x@QA(X}^SWwlDp=(uKb<1ze$
z6ZGjUIa7L6Omw!U$28#1AMoIvBklvGGE~a2y}d<kCLF(hm*4u&{~^~u^ehKAZZO%|
z5ogQMgiw37M>TD^;NjcvusA;Ed@-YaaK!oDW15F^CR-Kt<tgV6PO+`0GKNVYxyUMF
z=TXLBb%6uAx?tzp6-LJM<lzOz(@JRVT47TLrAcg$wQ)~IU>MGuyb!|18rw8$83uIO
z00)LWc9Y({iOR;P<>wCMiw$b==1^}~VTr02axW?q|20t{`LgEB(X=UtVZTcIdyL`;
zotw^KFq<4n(|XmLLz;9?sa({PDeUWEk<t>K5>h0OPW#xf$E!7xJKR0lL4Q-OqcSoa
zhchYW8!&ZW%E$C>-{mbPFfdwlQKWrbry;H6C~Ft+UBoF6EvH%=wWZuw?ISkE#3D)>
z;pkI@hv-^S*<3~?L#Z_@%%m)KTWX5JQkE6Yd5pCbRVk=x8X)@QOR$L`MjaVXDzw$~
zlPS%z<@D^75FESPV{9A`@l6p=+qJZ-6{}`Nw`x%_QdEYjE}4$UXq#DB3581oDo-S5
zo3_reYFnDFp?8kHb4iD8laH59pTFdM4lW~U$QaXl%tz1a?13P}oyS<s&;8=h5P$A7
zEY8naHcMW9`7K7HZNB^`e<tQ!-pT!thP!zRpX(ND3`T2Q2y|VaGCd>FO4?wr#Ng@K
zryt|z|3`m=zFYCecV40kmZ(gs4UMpa@&HKZT+ua9qEpVe)<#f8*16abp#YD@BPP>r
zn)8KZLz^@hQ3^7X3NZ@n8&7m0Ndg$oUt^1uX)oWa$?-f6lU=`^A0nb6L7KW|Nw-=O
z`i`P1D9Z}8k=I~sGGh}ISfdHPWB%|VJ~+JZsf&HwYA)#yMq_#<I`|OLMW9bLgho*s
zLuG5~vP8$o+37LuYDH-(R*mQM;V~BuE?|oShew+h_3#011zk`$BvuC%2vIU)C1W<?
zTuXyusT>zS{w)3R1B%;o=54?%mQ<?12Ty<2Q#LKJC<J!zd-g8w!rl~Bw=}1xcswy^
zOk%MlSIiCB)(_6(hq6K4p!;ti3!*8}DCT!g=ucLJ#fsCFWJj$|7Hn_tK$)!m83-DK
zzD%b^G*6E8#vw;)em9$`7!}qiM&*i&`xiO7cf?mdcbC8ZzxbOdUdNk?#w|FV&AI>J
zl+%+Hh1zCRPO(;}&Cw=$rAO-kN*n`2rO;)?{`hG^)A1X>@(rGT?>@VGJKUc?;ONdN
zFMjStR?D8}e)K79DQKG3GJy(Rhbd)*ygY5N+7f!l>bzrlzM^k?s-j2^ltE%Oltq^*
zZXtkEk^QHy(tYklwroZ7#v6>Te`wHyjt+Nm$8R&z1<Tj&GI`%KVs?`YijWTYxY1~a
zFrd=eQ8Y&&1TWKTohVu|O^%7ex}HMjrb!Pr?wf@b8=p@q@JV%8Op1!Qp0-B<81NcC
z_7{JaZ~R~XpG<AZ&ENPUFFbn@Q&~b(xEQ%~eV5m#2ONF%S9xMGp+M7UpL8IKF^WFO
z0Hh-lbdL@aB3-MJUO5&;<l<lY6kOS(vo#0TpJOzcvN}7#g&xg_@u+0FRd9Oj6VjYa
zQ1sdiOPb1n$qkH>>zGY)a%@c8Xi#%=D_>i$Wf0UM)1Pt-V+^^Nkwb4(@AKyBj4pJz
zFhePgu?Aa|xY|+F;7dz29%0cq3as)3W$^}FA841J7(4ntU`!*!no<o0H4#A)v;vPI
zYAsao!r)6y-@wd!7IR0tYQ^FA-8b=l1YH{bWCLv))>MWyHwyjXi~rhh*Nxcbwa<Fe
z!NB*ChWL9i+x+!4B9)(Gvx&{Ke4X4swSuD7T)Mo=wQCo-bZMWh-7SiGgfie;p>74|
zaqWVW(}#TZi(ln${wIHzfAoL;&-u}x{b`>5$a}c@{8KFM+~w9Ee}PxuxXspEM;u(b
z%G00t6!+&RoW1g0+<Z=n!q<w<Lo7-xJ+2Ua8zVP3ZmR@b$@}+dFsLcDMO%a2+Gf-T
zLfgu|qjFP1a+f7(lQqgk(2n!d6M~m}W%1@czVqrGLCaL?ZU`dFNRdiPNj7nTK}RS}
z#F_%yNb~j?<ERG2C@AnAAP|!I5!>5bdjAvbUcZR%JpEfY*%{Snr6{ez2Qa#zcY$c*
zV0M*2lt=fljBIYKo0#^ktnVLCqKs4w2&Edp`T;ad(J3M7A<AR`YvcN7=lwG4t_P|l
ziMp#ZDz`SKQ#qbTNu#8qCPck%gwl%)sS(QPrB-^#Mvm)|@7irRzt@ds7^Hwj1)^4%
zDEdW-j9GgSh@L)os+p}LzYSik5#aRY!O<U$QF87CBGIlDup|k}yZy*0u}<_&pQ*2r
z6w4u=3@U&QUNX|X?BPC=&PY(oqZLAx4KmzQ%Ak{eGaF{9bbXahk?i^VjQnfTJ*SPP
zFdA(&wkQ~FZ(()HqBdHLX^6>HAB@)2Rf#bL&WqKvp3k^{|1K8~FA?z&gz6c*10N`@
zrM3lIJ7Y|t!6w(=AZN0V(Dyxk?~~iC!^ePko#=3TPt$ec(i%ZoLvRwE;*-`^-d`LX
zqKEA_ukke8?Ojhjo-i&Z9A2FA*u_Ks@HhTBeW$qm*d_K44tQ|)E|cw^Pk!pB`J+Gl
ztqmrV+-V^untK}N8WfVhEJG<5A(&+O7$pWt#lYYCkN*xuy~X|4USvLRMIxmfN|)kL
zoeq@{BYI%><$X*j+{W5OX%?!eINPUKnh0pk-r*tdoX#lmWShkpBhkC$04z+Uu35<j
z7rju*0pE6vs#-p~5QwfP>O$_t-r*DVI}+0HNa39q<a9A3v>nD;%Cg2(rQFvcilcfI
z?+Gmdr?kt4^V@H+qk9g<fgK()iY;n+Okqo6VX1a^(PfEm8@d)|P0hT%%xpBmyNLFY
zab0uo_FXZ|s|<HuzsY2G1kqDi4bHLZHKVh8_^wOr5-*W7Apt6`M`2JNOur&DkypR<
z7Ek**2OobA%NOqwU%QWQdbAM)m(VbYfwt+Xt1*|Kze3!vXv><jw&UpL&0!)vjCj@?
z@w>jq{P+F%;cQSdb%(^8Pn>?MV0FC09i5|EPxLE{^%UMQKc2ID&{3Nad6Ui#aw;Z^
z>x3Q&4T)4pt}D+xMU`P$6223-`q*W@`Q=xz<%qxhw||k{3k{x2Vxe6mfqyc)=$ptJ
zuRY-NUwE0*v&d+)3(gR<l?Cgfn0lb3S+x<nHQ}&&nxnG?-N}qeS>b(PcD~@%FMS8s
zG(7v`Pg9l#YYe3|oS*erTT)dO^V2iYcd9_M>R8Pd^sOUIgf=47PDONrqUK2_MsfM6
zr?~S6-(aE(9(?<|Jn<7B#kB!v9ha^jaP;->GL99;FTKX)pZhUlr3a_Akn`I@#H2W%
zNSdDOm?&{luxP*;p_zstG#VAMeo`oSE*biWHbf{ck`0EqFQf)6Mn80;!ON6h8;vRi
z#a31{9no8Q53%pDMpIlo<h0u3{LU?w_vSqO`+<uee2%@n9m=ABQ^j8AIk@p0XGd?c
z<sHfgY>>}a1x1;3SzQc-UN-Bhuq<3pe|5}_zy9;gti_v}$?gG@ogK-&>m0Lw&ZKOa
zZ0~S+Z^Zm`wSh7yV)R3Eltb@zir`VH1J98yYNOy?SEF%Y=H(QdTzIVwRMIu~kCw?X
z1ddN{bAECJM2rPhlo(xNwW6*n>MhG?G-AA6VJG5PTGf`aw2UfCIkuFw#uOTDrGV&z
zNA;TCH~0uH88!uvxKp}F>_zk9`iR5Q+7Z4q_|=M4drsH4@A?)qOvMLOCbxYFl}_Kc
z*-&N#KQ~>2n|0hQiq+6iz#7TP9m2XvWrG&K)}^XpD5nyVu|Wl1zudJ>KbfXk8@YJ6
z!{gU3^Vs#vZ0&9_nr@?t5`32m*#N#}b$r6TTX%WSCqBc$)o1zZ|LecQpZ><L@CU#7
zYdrP7=lJAL{Ujgw_x}dXe9psfyv(gHd=vXef5^_(gva0WG+|tDe)}HxzVjBR?TjNI
zP(gFLY>BSNKemT529wY&xn2q7(6X;8t1%YXRWz>R;L;V^hbPcFV(;;*UgBAd7R^wA
zNS};91Z^>BN&sWfQOuWgHXcf5Og<xQS_($VGZxKXGNQyA#3j>v`Y1Gev$MtC(~mQ{
zw1=uSW>jD&Bl`A)y{#=ql_hG8H3fyzoGnr$l9$ZKP2FhNm}+G9Nla@ZpFnYA_!0Bk
z%xhzVv6$DbwBhT&pHbS)rWn*f9bUhJ_=prEpLZG2%CS01DU8;j3`X}DrBPbreQHn>
z$9FKd%V%wRU2<-(qu_=nHRe4`t)pUw^Edt6<g~k%h9sk>;U0`B1}B@;qydmlxS!gv
z#v1OO)C8+2ba!O|KQy;|ABJFBDS031It8Bwx+bBh`7`wpy=YT?5D7#!gEC4S>kSAq
zNQ=%=SxY?{Q&(fO(kzz?`o4ieVN=thl)~BqQx=qUNinLhWl2$#c;_jK0#jHDYdPG%
z$lY6a@!qp)76TgY^AI|@tOu{?dnx#rrwf*gCTV0nlgSo)yIX9J$CQPZeAy83(c@8c
z&f{B8?|O0N?Va2kaqWpP3{X_0uoi=&i_q(ck9{&8)5)(QA$?I{w2_*|N0KWa+8FA3
zL_Mm}QBjtbVl?8<zW4>|2yGV`kGB|eigt>(Z{Ol$AO0xg@s!1ECXTr=;&qC-^8sZH
zK6qN^2wDs3YqB<2@@feI1N?=*`b#|h!5^WU&$<2jE4WNPHnMj_lcSC_abVh!V&sIy
zrwfgCq9KmT$te-COv-?l0s8iZJ<%c~``(aMihPDj3-#Cc9vvmul?YZBwC$4V_B1gT
zHKBLt$`V{p-?frK+Y2K`TTS%PuU7PnC4JMNwZ;~epxatwj6sJK`K879ODj!xdXBmE
zbv|{yWvn{f>LIE*rGI#WTP*3E#f(Sv_eShLd6})lEtCt4&(E2(OUh=SP+w)~Rv2w~
z@bDoY|B(-K_ts59@J#iXqADdSukYwWz@Huw+BrV9_$bus=pEX7VwAd6MTK`gR$1Dn
z;M*^qad4$)_tFkmZ(OB0Ip^e+w+ZbEO2y$5m#Hr7Vh%^lR-R_jbA0<04lXf2J0jwx
zt_#ENyusG`!Fv2~HmJF9PB*NHo{<^RpDl2AkEvD3!Y{E}Gct-ID7x7h=l35{RF_e;
z+@LXPIjnOu$GXIXOzTbmuhN8tq)$ObOoZ)G<cY`kdF<+Sb}nzRoG(BrDqRdkqE7jQ
z3QbWe-t+t=o_pVAoP*b2xz8W|;cJ+<gH{DjgV!BO2b7nF!4uGR#ON_~UD7Pi3GE6K
zVL4mz=8HG!JJ0(+{T}p4;rbq1mUN4TY9y|}DmgK_rl+0H=~oSrtzm-fdN}}WsnJ@p
zDT9iP4|iE;&qcjrbtiD^OE1wZTEKGp#$8%JqhB3!v}}0ozxoGsrKMj*`o_tm*L(WD
z$H#!OB{3R&)IuYPJxWDsVywp4lENCSw$!7N$?h&TYXg#)Je5g^=b{u{uJ7?aBs!U7
zh8d$VTC^+L7_7}f<pnBe(bE(q-YQ~|f~X@r``3Pwswk6QvOpIlwlJUreiV5A%5yyb
z^Urep`dj?Y|LMPE*A{7-T@diN&=U(UbSw;$5SjI!@lSk+CqMgR9IZMWnz|fg>s^Y{
zGGF!dO+(-I-0yp?l_R#cM^u$!)n!Vb6j~vKbZ}>0-uLWlAvZb+G0Kzw^!M}O6|z}U
zNcnnUqY&_-kqgPVqxWKUd31hG(?_(KFtQF?O(-V?)pSI)SF^P@W-=X7PfLor#MUKc
zQAieQX(=j=E^IRA$c?qZhbWWdC<usi4(FijJ+5`EW{%aYWwmTsE#~;H!_Pa~<wB;P
z--qfk4jR<_|M^<35z9=i&5S^q5v)7RFl<qoo+zJ5rUA#{E9-$+F0}K0l#BR0WEdLa
z{K6hVnNm?7*BGxvRz>#qMqIykz}2h!T-e{G-rho&1=`9r*H#momVR;0{Nw>QZ_oH^
z|A)UtP?6EWA)o#${|-O;bHB(NU;jG)=s*82*xK9SlfUrixbfp3=He$m#L3M&oPFyh
zj=u2*#d%9vjySw<!1jebuC|Vo`{&$SE_hImS!#oGK2h`KUmX*DEXaLnt>L{t@k#ct
zT;|)q`5VwQT>QxMjCZ!>{f2;FENSkaa&+?!^Lr1a@iB>t7G+>*Q0qoG%Qn(KN(V7l
ziVSFr&&NPA0O^Tz9;_{>wzjDz6U=0c-Wt=?mhB5uCfnQM2<sinb!^omthHcEtg%DW
z=tICH_JTB^c^zyFJvK~<HisaOn05geQZv7%_bQziV;t7Rdbp7c>B5>S8wUDoS}R(u
zpHB<jmDB_pGh}=XsE$fSvlN{=F)DQ4f>;k=!f;l{WaJZk9G+j+sUFx+d56ypc^>Ly
zc8E&!;e3B&?yk_FH8_YkG2}^2co;HT^T)e(OHz700Ep>%13`4D+K5B?rW5hIw#Tg5
zUxf7_5^vD4kb7ca%M2~Fe4jR0#0kDAk}fkl5NjD07$QbT%DSSg%e4PX-nBB8svc8~
zC)8EVXgsDYD#}vUEFn^s6_JRoG~2t|v`tIz0<+~Dg_g!LMEcILYC1Xx%f*VtVu5R&
zAZfu57rE(r=CdVQMG98{;QNp$!~xfOybJVQ3j}O(-PKxS3WG5P#AKL*0wlpe6$a%*
zo{%U~^7o7#_FI(>%4l^;2M~{hXP$kIhsXD5R!geMn5r6~f<YG+-*&Xlvsy}%g(|pv
z?<UVY^%P(I<~Ie43?hk2+q<l@2u^7NmOzmBoXq6X5vvQHdE#+?`9JvE_~5wr_P3eM
zJE9UlhNx7kDbn;rrA*u0SVqx?W^JpsLe1_?lEvi1C#`EA1NC@BJ(+^*hnaPDY4*-x
ztiicX&Zso2Br#Tw9^7O9!Xd8fa9xM*9JUZ23*Yrb=LkMx3PX$@=RMtGNxxh|4A@dU
zD2$PYFO%hC5F=CVBl_efH~R0eb9jb3dq}{eRx8};8O8>RsYb;SzV{g25SBM_{S_!{
z?4)H<_UukO%A;fMhg~?>;?m_S^lf0(wj5sCr>HD-Wr@)foV@?G1K$JDqJvPDaSGS^
z<aq%~m$;NO)dvWT;_WwPET$`}@d>t2?0xVWZs9QA;OvM+(=dB&PB%ZN7+ad1ZL}%q
zdM|!2nYupc@8e)D`h)cNA#G6Ck8xR``#|?_&dHm%n3N;vJDgjJiG?;a(XyJYXy!6S
z)MbedVo>BAgQa*5<{s!g19d10RgMTo97HvSx{6#maJ=VzPZLAW{Ok-{S}BBdayW$H
z5Jtsh@FaHWwt(l}zt6MpJ>cYI$shdQOWe7wDa(oERBBKuLL?@YxU(9)vrA!Xj^B8l
ztx5z9M{l3;@}IoIGatW!tu#i7SlxHMY#O~I#ulQcZ(G_`L-cFJ#(9T9)AeGOpb+G<
z*xuq!f68MYd_UJe^c<thdz6zJGcJig@f2Ep0??Sk;-f}IBNIww5*#TAt_K`o(?RQq
z(WJ??5pEI*#nZ@P&4%fV%7s>LqCkC*Rx%{x3~`&y$U;aPz-Doz(#D%NZ~1=TJ6zwP
zv>_N-5-S(!{Yvx@A<zd;^pSqyIbNKz^UMn@Uw(-(h0w#a944U+SQLKC@?dOv`fvUc
z7eDeoj*jPa!HE%-ttdt#BG4>XgsvmBnzL3>nNu#^*yGxjU0#3tOx}lAgeV#y*EyN0
z>zs+WMgnA7EqWvupl|))+!>N%r0YGw2V5WM+fIsv&L>NFpX}U&<eUa5sOW>ink}wf
zw3vDdRicI!5nEcKhxWWhwF<Xz*ilC{st8p@HLg*$!aJ~|h$;jDu!X_ujOs|Bwz1G?
zFdjnF)Ac=31)S>$twZ;bZr<UR5?|t6Gf+O?%}1vF=Z0`7nm4(-8EH~6p&IM&&KkHd
zGz;>TwT_*nBSow&^RusAYsgO%v-OSI*h=zQrWIsY(~Q=pXVb{uu{ABZcI|-2t{rfA
z^?-6*QI2aVQ2I_@PwePcOHLmgb9#0R`%hsfhq$I8wk_6!nT&bi6F<rm&%B3kf9>o1
z-hcD!od1LWjvxKm&+x*J{}@mH(r0l$*K+rz?{e~|U!{5b7K=s8gIP;&6lc1}6%z=e
zhYdbAOMWoIQQG2s$KGRCP=(<uzwzr_+@3If^19^bCf$BipmxSo`#W5I_8Osa^vjOW
zcB~e27N=)4vs0Sc96FDUnlcvHsFH*r@6B@Gx)2Fb<AGQx%&4Z?ttdtn6Q${5AXXJS
zmoKw_?HZ%W2#ho?-uHsO22Fc<&Uou$qN9}<G$yTaAJZ@>H%^&aOQzTk-SNhX+u$*5
zAbl|zZEWh6vcu>=H6I8RYG{gwG0R3noLhF$E;68cxp&n%F@OfA(lyISYiVA!5sJP^
zjbBg^d^RkS=$=7#k)UI3Hr{WH>qVcg$)H2bM<{Y;hY*v4d%7pp`n-_m)q>DuvoMvj
zYqM!s8d&HJEn*%H4V|&%P)8?`9p9JQQ3)b#l9TYjc2ElE`h>6xo5%+*&dbq=>$HmE
z>>3SPDGGuh_DZ3QK`To+t|><qqsf@EnxL)3dIchd(bS`w@uZ?GWf&{#Mh1jFc#JX0
z?*RJHF&b4Am7%a{Xq+TF(TVfwV%e}<cC=kDkuJd^gL$cWbb!t|X3G|>yu^ce4{4As
z%qpSl8m~~nVT>3%)m24R*Yv$()hvbT+&jENhWfc?8yF`LgF~l`O&3KDpwoNwUCZUm
zPeAl6&Q6HVGVa=BTBY$`Oqlw<=XB|5T%<A;Z@qn!Pk-Xa_~v(BLODV2O-6Qg+L)Dv
z2s%W3R74{~e_7Rm@o2=~`rrI78SU)T&K`2>&6^Ulmr>1_{7D4AlQod;%}AE?tk<m8
zI_XdoqP;Q9HmSD4D5l#}7Dq?qBm~j>dxG!Lwiwn0DN0ajgEf}7-g=$)eemNn&786v
z;oBBpmIT+)FBe1?l4L=m3tiLEE#?H@QP?tJI)Ugtu5F1XU<%QV(>Ls`-sXaT8{;0*
zEIegx2&)D+o244n%Y7Uc)V0QrBEgiX63XqG(T>424gE5(M<gugRK2G)6WnOb)`cgr
zHZmF2f~3bD9St@@m^WA_^<(FIkqszK=p|_b4AJxn<#!Sp=!Kznd@{#BbQoK5>-L=B
zJeC4ffi?lk5^Eur72a4v--@Qy14`~^DIfI40Q?8*@x$4mro-7fSlyX(dg~rxwPIWq
zgwWG0Pf&demBkyw`Ld;_VY?LM&MkX%FM{~K51<0-!W2^%>&bE|nuxORXp`xv5hzd!
zoN0LeL+?Q;Pf!7412%0mnndf+3axSnoC}R0H)s0)Xd-+2BYycW{|LvYOMd@%U*_KJ
zKw(Ej3?VxCejTw!nzB%j*nRR@9=`T1Vk;3X=XcL}`)jwj{{E|I3ksiNLn6(p$9s(}
zq@Ze+OT3e2$rwWyI%0I_24Yo01QQMC%Ln|`|J(nLvaac4lir5K=l5g8&_E~JtxsBp
zbphs3MxiKBwnQl<8Cot7ootT2mBvt6Y5Y}k_ge?Hr|TrzeR)q#8q(+mnebk8v{`#8
z!RK+9=KJVVF_<*R>3N0%5z^FELg}@YL%||wY$VB-D@|W|?2Qq8vgEt3zs7bSAUa&s
zn5Y@m1uG4s4?NG0{`<c~abe1;3p9N)B|>A$5mlveU7%eyawr)==TFaJcfMqA&vN0+
za<+(r-X%J5SPcOd^CiWo8m9gUK1;MHF%5Bj0AoaBqLMLEZa|$+IcV~4)(7Y485JYC
zcU-x+#ic9zy#Lw9`TKwGSLnhqU2Mf*AtIC=D-ON3s4%vm92v?{L0y-OCgb$)i1Aj<
zWNSn{t*OT)<*1^}&c8-e7mBJh^dV9df~s~akFt?yz0mWF0b@~44(83C@m<?u(&3R;
zpIX=c>!x+kQKxBjO!UE+(W`Xe403{vzpt{P*(MKo1GC-u{ieh^>jVcnV_0jD^ONUf
zJf$MziQ($ieaSn$dWp&QHfB6V6@_Suv^X!izN0@sVtIbT8~11Y*f0NWyi076-iu~a
zgEA#Mmk;>Z&wQHqe&hw-c=5Y@^Q&LstAF%G_OD;$g-?B$$DV$Miy!(+&^4Uhe87z*
z*h2IF`G5Qq{EFee3^AY_MEpQ!R6&04T+f|veVfOxUZSc;P!uQ~Da!&=T2xt5lw+#V
zn0h>+C`;<GL>?(^gwE<^=+d_x&0@jw<c#IX8S~>ax@DW<3VIZQ)og}u8cYSTlzYg<
z2)bareS!UJPqK6I09|T&*U+^~i9ztaFf*be;OJ&^irsyQdC+Cr6FsPuJFM1v&!H*J
z1$AU&C)#XSH`a{GoE|nB&=ieE)}yNRLW^l%2)WVCNTM3Pe{jrAe{8jAzt>b=i7y)v
z(cHME+*mn71RGIl-_>Y>*?6NcIQr(|I{GM1uu2Usz{5?Z2HZ=2AJ+qzM|kkL<6Lu{
z#4XksM9&U`ZZ`it&4vd-ejYV^pNe7#6>`%WgyQ|~?=h+QNblD>FUYh>do>KY-x%~T
zY|zQrC?>Sa*aB^9tgW!+h@vbh%ZhS5rWn=evP7u@=V7&4v6#;|?K;NG1=?Idn;PrI
zT*w+rp$oj$Ot&U@WPThybiGXTjYZa5kml1^O=$}X-C<NrNgWDZY@*Z}Z4^E_mWy6s
z>%wA{5*ljs$rw+l_StDR>WD6_xWOBXa}q$UV~||-;D}vMa88EJI<0x~z#$Jn<d0>@
zEREY_G~#@AO2E>GNY{%QP=_Hn2&y1P!>Wf4MQJUkM<;CW?NXM)xKf3p(262C+YaVp
z>E6qF@Vu_A)%@IF{so@?&_{rd+pm0^wi7z8%wTe!H8Rm646d>v4MQnRLW$LS$YG7J
z7koY_gc2?^TG8Z=w#F=ukJ9i-4f~Jxo+1?J07|C|l@1vl;m++leCj86WFXvjbZv_+
zEPb=0Z(5=c=t2{sr|&xY)dCVjMi(-j3tbN;r3>gD6&#bWWHNh+X}C`;OPp^}CbF0{
z^s|<cEm67=b_N=wEY?V>LWq{Q@^pSq8w+AF#$oXz&2AYOHE{R%9dz|LqwQ@0nN%cr
zhxdU%PpK_FEK#%~AsPmBPFQAz758caDmYnl1fmh332TVK;G>aj_CgGsWU60atU{>*
zZ@?6WcGU{~9KW%7Z|r&B2P3xsz8^o74XR%jbh9&#UVVe+cur`0#?yjWD(3T+Rnvfu
zv`t6X_nZLoKn=ey8mhelQyNywCC$naW*x2(dSJCxql<{{t)Qb`$;LO#m1WwbZGqMS
zMTspcs_BBOSFQ>bSVc^V5ztzt16^+TsIp*xlS!QDQfZNDC5`HZ-4TEBmp{b4yEA_4
zSHHo^jW9(8*c2inn~KvBT^lYu_6&Dkc}WV$AeyYAG#oy$i?s%?J*I%R>4+}MWJfDj
z=S%v&r!v*x2pWQ;?*q;|thT5S>HPvby3E<~jOcp_(zaU6jI<$o1wPQnDDO2<u0$fU
z7K0Nbz88`G5FereWs>eI9V(forq<!{8!drMV$GWK6jGzCv_dBhPo7?eh!_hfuviFU
z*b!n#u8&dz#AKonl8Hn%7RbeZF0ev!dbEX1kylS&<l3h`#o~|ufN|DZnUcGz=ed9H
z-{QGH_X%QA;-kigF0pS6rl_c=23u(s$1`vpN*7Q>LAaIS<N=JQJ=d=W!mAC7FcocW
zvO*8NqnRy*N^FW@f~%5ia&}9JF3MUQTq+aoQw!Zm#gNj|+2E;yvKm}qdw0UMXP@Lx
z|LMPCHlDIv9MKz6M1V%wg0d_{%QGEQ)D>l2VGB)B+7vHSP+D0}uI*?{r0-jn^F;Zz
zlJ{myg((cSu+oGoF)pcUO*g6Vog=O+J&n9S4KWx({C-e;H`W|kFBpdoQLVedVc@|=
zb0K*GY5fFP>xYJ)u-*%XZgaz3G_JpQK*-Z;h#TLahG}~0Q3iwyic)iMxWlz;mw4=n
zt4#OzFr^XKUc}%n2BFCJXQ!-APq_Es7WWpGC!T*l-UVXYL68tYiFuIMU6du;7x#H#
z=c7FN%;VgB<rUs|<840wYoEhTzR3RNLu?_r$W7C+xPQiKDT6i_<3Pt-vm63e$-YvK
z3N9S(V|<6LDs)w#wNUc35$f!8cb}cZ%hcm7Y*nC*P|5Q?yP1h8ADa~aA(@5A78CK#
zaqq3Sx%JI&<7YG4zQKncU6yQJy2`~HPcqp*ASzAYHZ-dRdL@xGXpQogpjrrEqDA#!
zw87d!L>)0TH%c7h{l;1uG^m-!DXT*CbCDtDENeQIT03;hYDsjzWKNR5R}CAdFjQ1b
z4zI)7eza(o^Jo2aH8jGh=~FVqTsP1O0oAgP7$d)GrSK^xD#VP?sKGQ=4-?tp2d(Ls
zn-iQ(WLP1CFqPX+HVW*#r{(8dHzw9vq7vkBU@@cvIPZhQHOvN1xtmD$WH!|bF^VMN
z-QHscG(3@tgP@7ZrUAhQ&sm1OiP~9~6h+Nsy2E&Dm+^E$F`1yWNNC#5v1l5WZNt&&
zjM;p~{Cvr3*3$PK(RWmp;jwF%DX(2*GOj7gnzAY>tCB)nyc5(iBo?+wvYE25Y;R5J
zJBRa<sVr>vh$$7ew-nyVuvrzuy+&eXhX76Ez(=&|K-(d6Sb3>%RETmOrR5Twr)7oj
zdxDe9SX2NDp%rEjA&)b3%1Ji~Y>?~cGpZON2G9QCF3Lpa=QHYRLetI(u|N|sR?)Ze
z9*VLgv>uD5EKBCg8rOMtwk~k&&rlJp(NrZ&%6c#$)FFrgpiy)^f-(ok;l(|E_Ambu
zAvoIOBaR=O3J8%`L=XzI>LD19u?5N$6jl&!AH>`(MyW^gay3R%6a~R$BsstD)W}At
zM`OrlgfR%U-SuLMlxXwo#0B{*x~>rfzA!?GZDcUlE*0H!PVCdqQU>33w5t`-^=J*&
zlrsEuZPHLnT+yhuOy>94UcACqaZ2!>WjDuYLp?6BDC(_BhGVuOwk@G;L3_zyje*!7
z(cU>>We;(C&(ZlEimeIzyHn;zZ&Sy>)%_`N9p9nRHQNVU^v=_JC$$)nl?{YZiGSD-
zyB-%bIv9H8iLS%+23-`Gvc#!?iam-FY|64ub&^F;SM4=6MzEp<Hz1O;N|HNV*QQ~1
zT(di3&V0_a-yf{U4`qXT=PTc3d2|=ooMV-yC`ybiXx);2)e@qkaZ2)(`~nvvW4%kW
zXz6DQd>bf@p{OhQyQ324dg9WfLe?<^$>~gC<JxGn5({$C<@9{$gHNCfa025A3OFEt
zve*XN5y_l;I0rNLoT2EGqEYClm#<9ucmHRf<U23k;d6g*6H{EkSfNo^(~0`TYvR~)
z@YoaFee-3}SLy}UXlwyH*QRI_8BGd8*W-FmS&Xqk)6ILjrl+V1p=T+D_YvO*`mU!K
zmCRRjE?&Nj4?U$VFhzydVhyg08Lmq%it|IMFdV*{7^rOCHn~Y|B$I`;MoA^}hV`|J
zLld7VSP{@xG*w<lv{ORiQK=XTVtqe2Q<4rUo!o6x+=7ZacfH9aQx3Smnm&_$@ESc!
z&9f;qy9Z;!$KS`<J12Cnd<%@C8Q1*CfB5fl`Ge2kOgX%F?;_rTQWa%6q8e2Yp;@+~
zPf?0!tmwL;ruB*t&l&B`c>3`r=l6T=E(_+t%A^=EhQWulaZ6ykN^Xk-k{siDI-Rlt
zHz~0Zk~^?kyZ$D$hZxaK#aI8;7x~V;hLZ<pOsXe{^%9K7loh5hj7Ampq^2B?DMvMB
zT~Uk*ib9N%>cUbK1w~~=OH^oVZ754aQ5cCzP@<poK};N+3$%SpY<rCFDU}DS3C7VI
zhgUsaa6%FLK0I<kn6J?W*-#oQwOMJi0c~zFG1rDjnKGGCsX=R+5S`rGZrnw~`p-~L
z#)xz(&h_BnlGiRWG*D7x?d*=Zdi8)Sk6mE<U<W%MVTvM++@ug9I{M{|?(B$%_m6mZ
z+HiQ~qu6>Xh>7pPyL7*&K~A!Hj#{Crg7L*YZcN_Kr6(Tax#M#lo}BU4n{V=<Zzxn`
z|L_ub?>@jPjSD{IA17UZMt@}th*yzM{JEc}U!CL5Poxly0i*N~9Z`>`>|MITXloZ+
zmVzJ{lkzK*HZnE%I-3TiNHMD<Jv6Gwa<Sy(_C00~kEzBZdK87ez|P@i_Mf=UcyAZ9
z7EN^55rV_^mS8RP+3XFJ*I1)z+68r4VQnqc!YECd&UvTV5GQRUVW8J$)I<K0cD*&t
zH)J$lhBD}3JhDbwQ)8-fHuT2stF#sq6k}M+PEx}+AYAJ~oQgv0C9k&}5i<0b@}4rK
zS8K`I%nrR-zMAl#^f@PHM~bA_%uUS$mErviY=QN=$Y)K=E>BUdbIOOaCN=W8fhD=6
zX12p7%^;m8*$_(VbG`nVCJt2SkRrO??T1z*qbWhSB==hKvqh(GlZ)<@@pPN%berk+
zF4bs)DQfx{S*;q*W^-=czR%gooW=Qy<*LE^UYvO`)T4sh7)IMQ)=Y>*s>*U<Z(HK;
ztil$GqAajhXq_lWH5!4Id`qhh^|)r#jHOnJk=}R0UXoBxjL}%DsH%c87HgJ>utw7`
zCDKKY^F1mWVkt&qIyH{Mz6$uLM1~b3QG0y!5-b{IuhUkcqeb@)ZA!Vfyh~`bNuCOM
z?_DG2R)d{P(YmB-BTW<7*%>qKJ)!T>Xv)IinhvWq<5II|JUVzL6GhuwP9EOp`S(1_
zAAkPO(6+$a0v`ii>#&8PwiR2uBMM_v^vH@1$0{m5`st6cvwwi^m)w2pEmpzf2BR}Y
zY&xP6uM%qu(bpCxMd&Chr2cYFw8SBsOd5^07Hyq)OKc3G#Fr^Z25ni0R$eKzGQ_T9
zl+ItdN3+s3VwC0fowxbm`#(zOTE@1bTb&a-F&OG*;<sXpg02ztG&=DWG1`h2x9c$p
zuqo;qt0JS>E$XwcG2W4L-Z@8EM#@o1n5`J^?9sIi<HL)Ty5sEj8=wktQI8(yoW#t;
z61&>tY(B&9DvHq|r^hSKkCt56wpbpr8+*==A}jkii@q0M5vv(jHQGh!8&uzbcSNsg
z`W97MLg;WrVmYFY3AFN#GWm5FrNn3wjlxpIfYx4ya9R_zMp-$FLSkRWHs#HFPf#gJ
zFNXDwzfnW~pgevk8`R^szlotp{Cc6p^sdEsj;`<MLuBrIyld&TW^4O^fF*jzsHiB%
z6{dt><c5qy5$3yO))5>&C^36e((FhKhNU+Z)hMu2E_vaFpTpq*QbA&qW&CDyx-K*`
zSxfR$GgQR}tddGlE#cc*N8bOzOI*D=<&S>%Rqou^RMi;Js30Z?O<|}n?NL40;{M&c
z6t=(~8;Sx<8L4(A5`^4}cBdX$lvlL#o_4Wfv{elz1bq+Q1-iClQcXBNy366=d+9?E
zc||U|@`PDu1A<I3P1ERThk|*t@X#A*z#6e66{+hc@X=;><E9-nMLOw%q)_oHV!aYK
zy_lxn8$~@4na<|vKoInHlWHdbqP!0cX<dBJP(2lad6He$1?AxmTOa=z_s-6-<G{!N
z#%I}i`Z1y{a4}+oBB1D8#2|(S)yPs71>LG6G@UdW#z=gG)>vgICYE3f3X9r0WXh8L
zlZP~`nl6k)&lVj%IB=<jj^bVz)3pyyiC!zc=13&<ouai%!P-M9nF>Xv721eh{jH<>
zyz;GAh~)(1=X6R98D+ZnXd7r&7JF_e%9^68smC?8k_NOI7gVE)(O4+HRb5hz3XB1-
zG8N2H6gFiAuPIr*^K`3@X;;(LJ^gl|Kiua0Y{vX##_5AoR?Cj2ZQk`gX43>I-<pG7
z?U8SM1c;JhNOF)?3E5v89j(~}n^|_`-Zn-lR&yf|Lqc)ZaTl3#n9+ry!N`uLD2#^j
zxZvtz`&_?%k&9OkDeFoK7+naun@q8MyTYBHvU+gF(d~!aJL>qv&-{oqbbv}umBVG&
z81l+M!J%}()`sy;O;N$l7VK14`SxoM=u{-OE6LZx%H5Sw{!B;5411*yp6l;<jyJ#Y
z6&YH@G`uhdg+ZHwYBXj%*}|0N5Rs9!X2ad8<etd!N;;Xo`3Qa2Gdnq@SuSX1GYFb`
za*6#L7udUe$Y^VdHlpWp&LM8D68EH2Q|7fMD2+7+?|V$tEN6?vpwWp{D@}Z+ev^#q
zh8$GFqeew4U*EL%guGvFBA2P5AMyqnh7~a*u?k|;>pfVhlvOHJIY}K^n*a`LY2cO1
zAVLipjMA{mkVW2E2<uE!qYXx^VKG*kIiHLNwbjGYH3{)`N#{O{eDXQDp{GTw0a3`W
zWB7RUo=&3E<)buR%e;1_R@8&*u^P^;JUDvvK9(aa4KKACGB~qi`n&zmS|@`#*#~?O
zglKE`kPm+B$LUrMTNFerz4vr&&(Zmk)0;OrJv-<4?2Os@jOA)Yh>@zOnT#e($0O?P
zF?BtnusMz=3i_xOwy@Nrn(f^k%DO~pE1#WGgXUB6$kPDHd$iKxnrjthVd-1BuEF*6
zZG-ngvMQ}clZL1v-`j~Tm74v@sHX3%<N_OmQUMjjyhc81*(0nrD1zu}Nrq=>Fs&RY
z3#O`2QPDX$2lHp<)1XSHVYHmFLfy7SiSvP`X|Tr9(}M+TEzWzkCnH?jLQs^2VWlMR
zxSka3E5(C{_xR99KFAlo@@1kfq&Dy|(PJY`46tmmszV{>VZkb<lPN#)>7M{1^OO4=
z&z2HBm7K*<dXV`@tD`YkOf*C<gMyeEOO>LQdfyXL%$+vL%q%zNDV{C_VOUseDC!co
z=u-+pWWTI<^77-nbbgY`tC-12SgZN=D=+bLKl4i{=Ll%#vlBv4w5tVOyTsZ`k`sKu
zhhAhsIz=VAfFfXuf_hw`RG@B-nH+tM?eT)(G+?p1psGi-r!&gJ&@GNAFI*<{E1ISe
zFLt$%dZt^W<S4~zaU0zPx^@pW))ZqXW^ba*lT_Yu5bvXAXT<F-4zAq*VC5vCq8t~T
z8cXjZe%Z<}#(RQ^7?NRL6>+A(=!&kNQI4UHd0zq*gt{#G+&VD_G)4!onkcMwl-0!G
z*Zb-)jFi1OY-Yj#06l&v8`KPs$CNqggQs6PI@i)i=z=E{P>grj+umVRi~FrD#I8Gp
zj=ppB-V;071U-%-Dxxm{Bc@0(fDV{60r6N&6|q~6&-~2CQH4n_LZ%_wr~z@!D2dw8
zuH-f~JA0-#GtiD6F$#<^WtAEB54ZWnzxWZp`lUDd+Sl(<nH?Mjz3TzR%6WWLtd<Mz
z+`R=nhSj1Q+d6baujtN~6jm`F6+#hh0?V@nTNftiN?g{6iV!@_s>O{u#$%bbhTw$K
zwn0T)&-fE!I?%O-O5u_BJ(zMRDWKP>jcc?@KVm8c>+UjXIpRjqk`Pgy5m1$AK3;P2
zhS!=Ji<oqzf~@EbdRTJI%-@%uJ7=23tVN1z4}_@1Xax&fyEVH{?XsA^pTlbhFxiy@
z$vJ}281ddGx{g4=7MAgJgw~4rY)%MmiZW0MJ=W+F%1OlQ10I}Axc%CS7hhfx^?jba
zvQLOcvM(}Xj37ms<dm+EAk5yQqfC@z669u$X*MefQSvXN3(48n<MB8XV63=UO{rDK
zNv(Kub;h!Rf{-YZnpjjoiBb-uU2@u!V%IGpcAl_w_*p}kj)>!u*i@8tK`b?)vV^)I
z)&({>f<_{^D7p{VV-TB4RYrC%R7}Q--QAkiqNQEAcYTiu3AGwXg&S16eB0*5y|z8d
zMqV4YcNiAk(EJZWm+w8023_2Gni_|LjE#FL;jI}-Me+lckwUC0H5U(ex$*dAE<JWg
zy}g63#uAJUNgL}3Zb?|4vUu=-x8HuqowJU{*F5#U_X#yTM>b{DFE=nDU6T};u7a0f
za~#TebO}Xes4B(o_LLXD@*)w73qGN6YVd;?VrIyLC#b9)ba~PQ<pGvlfU2%VcbCjT
z6nX&ChHD#h^iRspjH1O~&SoqYOZruV>pRMN%E86!Z0+x3N2R#QMsctv?c=#YiU|SA
zGjFX^Go%dKSmqCp7y|~O|HgDrDkX*aVAz&oU4-$m840z<>a%nqFSD@Om}PRu+9_+G
z@J6-4IEmRKI=AhcXHLF08+&km&S5WH7h>73DCS&ELW+2i`&GNdEYcdhPYMY&oENEm
zB?O7FP-%Zssd0|1n|PJhf7moH%J)J{&zoI~VwTQC)~uI&O_RxCK0nwPE~#}6tIpR=
zt=A(YmQdKBoxaPD%(hYr@Zh?RW;w?@FvW<Y+qby?@F8dC=gj6SmWzg_X@!vqlvPPx
zm+b9qQB^gCE#<RN3T+}qp@=F#Na&`q7;PwPF-{s!CfKr+?9)y3n4HaO7(TspU#O&;
z?Ol&|j;?KKn}$9x<mD<OVCSRRu@Q{M7}2a&rNw!n;wtAPR>ewYy-sWitJcGn;a8$~
zR3L_G`FnNm(7nU=Dc92n(G>=XS(#MAmTQ(Ru~~wuQNi-ql><hjEmpHL`ruhEPr0x=
z<-xrptg(zIV-{x(qfx=7gA15wSuJK9UfJhof9g}HpovC-2IqU)(9^dbznT+Vpzj1(
z?fWH9zyE12-*}vMamws`k(wGs7d*#DXWYJh#L4jqXUAu>U5k&3t({#iKK2+-J^ch1
zFYPfNmtt@joH$Hl2+pCkrYg(S6uNAyC(h<R-P;<<y234&GB6-=X|hYzw^$_iUMJ0Z
zjEYsWWPkS}F?5(%(Dy55vonfvOut$XeI#fxR0^&q;)ez~croZ3m6oCixaFKt=$Niv
zq3Vutw!nslvf3u#SUxxrdb!ous(_|tF+aw{7F>f0j@W6!ydiV}T|_WTYzq@#vwb>a
z`IUQYKlwD<tGl#!ZsKBzjw7}gGtQ4^cx6!+_VG^kmGLCP2cDpQ_6CO!XPmutpW|Ei
zS)H8F_MON{I!~xeOf|+e4b_&%YKU1HrW}eO47RB7BnR-AT)>UN+d|fNpWMoGjAWGG
z54-dK(4hWcJc5E=dvv0Pwq1)?3R_lGg=K3}qe`(Q55W`qmh*N6swad3n<hP=z*}+e
z)$6Igifam8G?AF}M$z<)CYJX+JLQ=do+IkS&m*R(NupZi##9X@!J4)@G<Tb_nRUS`
zXGGrElm68z7->HKV^4AM@-~0?yWc@E1w6eEw5KhN>)E?>g*)%O%A}l98-*d@+K#9#
zXQwAjb_})<^p!rcI$P2<EoE8A&(IM2NZ)nL=Vx5FbO|3LR&PW!BoTdfyGzRYLBo`@
zV%L!z*}x+|asj_~Mhw{mW0S76abU`HJ*@v6HaY}B2+?{dgF;BuzFg$RO!dk|rW718
zAF5HPdV(+})Pjuq%I`zY-p$kX6f;zgG<%n}I6b&P(@54~-v(3>DYU#c7X&pxX{xbh
zG%9GCj&`{scfLwlnLOISWY<!RD{kJI^Nlaw<<)QBVL5NWI_|!HkB@zPCl#*>uteVx
z`XUvW0hb&zLlAf3=sli*auJ-^&IgwwWk_Z+&f`NM_&y=Cp3pf=3;PGVl&d-C=SPf!
z7+(px7r+~A2xu9ZP?#3mw-m-oGzT@6DXFcdu!<T@83SeSDU_jz1vY9-D4;h)qw$G`
z9D*l0kLew%_cF<kj*@^f5xuP`b|(pSeRm(ZAkHq9sU;iMCmW4WNJFN4cRza2<mPg%
zAB-D+eRGnNe}+oaRFx66JS0IF9qYX$GY~R@D%7wG`&&Hu<W(Mf{0dupJLsxJ7X_e2
z9~3>VJELFCSv|PV{ad%VcmE;ht3Wx~X8YhWAt<?rV=pFW0_i1$H{B;`ctzXd`z3TO
zs%=oKmac6n>yoq66>gQjU#I&n4SZClNu}YHcL8s~1&6T~uSCnKjResvBE}Td<1yM=
z(ZMV8D4M)M*-+^Mh6vty&W}%VeS_~iv@#qVTxNT3kD{uHO3QtbvX+Bif1dI&s{y5p
zem!gh$x~dkE6QpQ9nOeGSRHC(Z`TP055w!q2f%tTwqAMbm6Hb_o0{YtAMwaUGk?Rz
zB>oYCY}|Newf?zk<9qX(6e{)FMSY<D4g*A;5@J#W%`jAwCc<bX6wQr+N>q|Ds<r5h
zy+=F~GJ2ZcZ%9UMVgi${RrohuozwuO!NDVqrA&e#hSZ?uf#lGCKYE`=8Gb%`@8)L6
z49L>E{ZJ}7Ag3YB?Bt9$Rxk0+n{V@tuY7~sw;wQHv>0nCZOQgzn+rPy)+BQ(%Ydd!
z*^rurD6$D<RB}NTed2U$LRD3Qm}-P=r7Tt(Y#}U!fhL`N3IdW>I#8g2u5EG7(RCet
z*VFeN=YrHpc@NYGg*0!Z$p=B4pQKqWJSfkw*F-C^EGpC02ee6iGeSs*ni4H$MxRwg
z&W5g|GL~!CFLV9I6|P)6;KKd|#?uK}L%V1=zH`di`3#IkjVksJc8M->|Mp$(+&`mf
zRy=v_aoS$fEa&X)?a(zXqv?o)?QN`qGaZ@DPI%(UOSFA2$af47+*<E#4!|hM@NPQp
zKRD)NpZqAEhWYt9^HtBo<5OOG@ikuk&f6^K9pkN%qR>pnBlZt25R_%Mob$D>ew9D@
zgU?frMm+QUd-?E(KggxaTNop>WXUoXG`g?_T8RrZB<FrWN68MhWs!z{(zx!{6=%)4
zfH(PmSD=F8mA78uqaXSxO}oVE32nP1I8Pr|xTZtvg5cb`9*7>$*s`Q7Kq-&w8t4MG
zi)`y7$_KAfPAs8!gsAC5PwbZ{Q!%PXXcg(tXAsV@s;6Jhq|Wbp=pZ%{C+Hi=Yz<BU
z3!I0l*37?sACxERilTdnZxwW&aX;tew84Jp)AZYWc%@mM1|ATwr7+^^r!I2w#wAqe
z@aHp5?>yxA?p@4s!E|p*6CJv1C{=|w5vP)N7mEr4Vi}xx3TO?=%KcLmxTFn^={}Mo
zHJ=Y^EjRlCd;Cx~sJ*sSrJ}4V%A&+p70Oxy;6h959Dcq8?=ii2vBqeyrbnwzGUJR)
z-&FJ>I@fZ58k8VT#MFE#e9**bDR&Ac+buu+i+=&HlDk?SEt~b6noMJjB)!o0xVfdr
zphC?J8mtk@ys@jizXw#Ixqf4xU;GOn;x~W$TXeGm?WF<goh8swmIe2Y?oih?w$wyc
zbR8`FC0DLoNEoLDAL(W*+Ihp)_EbdeR?~Hw=mY1c54m`_M;|;^<=|_Tj9BtqK52o{
z+tQe|mH#G$8k7D}xU*~1Nj20yYwBdCyrrUHbLhXu2y<hZ(baVjwpQ|43`k<4Cz?#P
z3v%7PN_k%!^y`p5ub4GogGO!x+0E!en!qa+7)=VUJb9V7UwV_}>4I)=o7#IYq-Yq4
zg)v3JbZd-4Ge4V2;B`n8G82hq&vbW;kD714^nh=F^(}7Sd?=RsPSGz%+@CKYO1`U#
ziqiKOe@@$3n%)wl!ut;IdYt$8pw~s8SLsu8Dfm1)_YPQgvi*<f5HT*I2r!0D8@4VV
z@-!9Nf+-ZXg0i$!DQB;$Dyq7msw=8mV@pF>mK22@%#Vt~U<!*ZvTjgke}<tc+o+%u
z<tS4dbJ3hBoH`*e@BSX+*TI9sE&E6-xdD_8jX>C-gzJpTq_N2(U1@etP2ZDugQzyY
zWuPV!)+R9-_0ICU0rg3SHTB4H?Xd%{-?+lw;U2cG(56UKc8^w`;O6+n3I1fk+3kni
zdGmzX(&LR~df@<L$M`-#@Tkxy>iNKtB%7ZZyhM7<&S_5{5}KA69nGSrT|{iX&+Tt~
zjn1cx%lz6?P`Lytq=8d1n$reWZ6oOWkUK3yr^0G<QDCbYW%MJ>1ep<$>C_59wAAf#
z#q9iy5FD=U8IQ;8Uf5?eoeb!!4`NUvkqK-0f#d~yyz7ZRfRC6MWlxI=uMH|{maAhv
z`b)pWi@)~Y3W^wFnvkos=cr+ilBmX{h26Ah-aH<LeTGNYOs1jggk-Kk*GJL8M>(uQ
zA<V3vH8Q*Y?|{fAlvu^}5LIirPsP(tni$DSP5M)#aYhl5$P_WLBU5Kccd86D5^X)x
zkG=P}zQuc=24-<UzjNz8&>Le+grzv_6H1%GW!Tz0&@jW9vB}8MYJDc8zdc+o^oAaH
z*yq>RQGY)+nT`DcCFgECZz%PMD;FQ5Z#C0NoAjs#V<fhPlx6LMPZ_&nKBJB7w;Ev@
zxi+9RjHhF!+Y>JAUyy9mq+QLr|Jqi9q83)_B4Y?%oMj7RQgVUR2gVqB*C!`=$q|yF
zLl~TvRbpF-TuNe#Bp%8L+L&qRDgc4b3({wl!X&DsSBa|-g6t5{4J-krvf)<5g^p2e
z`M~?$%L^~Oj~h=u&eryrsG#eXEEfx!W=ZQD5ZODNGCtgbt`|2|w?Zj}tqLw*zRdZ_
zL#nFa@}(`5+Mym5JpR~4TJI@KgVl<<u30V4DW@Z550*0QP3B0lF&b1a9wx>Z^mM|0
zVY&L`bNJ5l&YO4mlRx+}H{ZNRS(iNX<P&V~?NUz+)2a9b=)w}Tp$(ocM9x=^J2xM4
z^TnI|<1hZdxpeIUKk~^Ba^tCMSPicCv`s6676nCV@m}ux=-0dtQ^@D5Vq~f<g|1l5
z8f+!W3~`|A2HI6eS*EN|?^!KZl(wYl+N2fsX<+ZE3zgPNz{Dk{jc6tJW)<h`niZ4T
zcNnP^bR{8JRM$gTP>w9N06#yaU9@Q9QC5lh)vCoUduRjNL0CcCxa6+^eJgST@4#4y
zR_Zcq8cY=FfwlqKIa|v&SbkHZKJs%ZsX`GcXcj#vMdO!rU5g3<YZb-gm$`J~F=E@o
zaz%OZGVOeh^$}YZ=vWZY>3sBrC`L!h8qftU-3!JPbWKn6-QXV)m6qBnu~@zz42}O^
zc>GW{sMoJw%5yq;?`YeOem<x1o*0&>7?UNnrtlh#6Zc*3(Z&lkDT0a?lZG9}lp>%f
zQj{g*ohhRoLokM0Z_b&m&Y|r1%xC``lbtOBE~x|reY4sOR3(*j%~YC<BqVLU1oP3q
z;Yzsi>(IKYbs?;iZcAI(-5>LB{lbU&^<V!AH{Su}dU~%|bm#2t9B}jQEoO@uqp?L(
zGMlfcMitZPL};pLVhp&}(au_27l<}e)CGOx2wh-je<x9NLYAu~y0QWl5>l$uv@<yl
z>RgqiccDlSklx6bOU-CDl+np)cPRQ33Y}ZgTxIB-&xOzmA3-73S2jCEdX*a7e7W=M
z(o#4Yy)O7-rbv<~F<Oa+CN)DLigkX-%(5U<y@1fLivo5pO<6uTpk1~sj+ad9J()_T
zAEFhKJ2$RbHXY5XBc|!5iJq!}dUuOi6L{stcewrPIj8sL_|6kziR)V;7G(|Iwb-Hn
z2cypO-m5+HJGc4Ttu2BXF|xflEtdt+T9mb^$dR}tZH4k>tmyZ|=p>mHY0)9+M75Hv
zN6^M%Q;{U5D?%wng@LB0hT>Tbyuj?7`3S`@e;xzku^wqe!jS(qAWIw0tC?Pyh^-8f
zP4Ds}Y&58wcWnCVu)sr1S;D#b(sFMN?w|?T2`V*dDzhxsPrPXgHXuP^BRVF{g7S-q
zDx^IoF&ok(R%^JpKjq1%E^~PG06UVwo#a+TbnJ=!oNjSWSgkm{e}~&|-Q(o6$C&~O
z&7~WUr>!ZV@C5Ht!>xyVHYT2Fbe_<5xW$U*XvX5fL%QXHl`_mH7dSq>&#LJp`XFy-
z35Ctiv1t&em7<7|hwt1R)=-EZw8f-@-IN8U5Gt<Faaq$j^?EOeG0?Ux^V2iBwiTLf
z2<+`&VEe)a3R`SAgo^=2*RNPC&MAr-rNrROdq?n&=$#<?Vx2)CBx5!CoVPDtp^FY(
zmc;0Z(aL@?06@vfH*Vyh<_0t}pvnwyVLh-L=+&(C<@tWf=ip{@nvunIzR|#cUY{^(
z^I5)!8L)xurhakwIXW?ebgr!i9jcPp4I{@u2%f%o(tze`7cv7t53&w%xsHh`w9Zq7
zyli7a1yfVz!$3K8E^>Z6V}828J8{Ug)-c`M=EA{6in^9TK=g?fwK+RZ!;Fk*Zw|-e
z#`Q_tRD7i2f43fy-aA4Yu@o392-s*`v9+_s-f~GfThMhqeeiVMinbNRvh6yowM-^s
zY0Q<l_g7Ux*NgjWsV%lJY;SF`dts01&K8rc2^yHsm$Xec=tGq*q@gfklGF#ugp_kY
z@-6!yW@V+Vux2$hiGrR9)2q;y>GlqFRZ>KaO@vdTXEtAOzE~whJBWj?^+amC?*)kq
za(89=bzZR{4VDKPQ0%#S`GAjo?1Mb{<l~?<M@Pqe`=ysSJzcO`IjL*30_EvDM`2CU
zOB+fPF<N8{x|FqHqGn>iHA{ACE9asUjp4MiXl1Y%wkG28yL(~G{cuX~5*??a0;4dh
zK&f6BEmaLhv$L~<DN4TerLXeazxFwz8*%yY2}WBL2Zsd*2iuIdCWPqeTuVr^xmeaX
zr8(F)Jn`g3KJlp+IJ-aR>tFpAzy7a&kF9CVPyW=Wxpw1<II_E*7y`{|g()m$SrH@@
z5TY<plqt|%&(8KPdUwL&l}Iru=|d;la$~rC_ckAR;UmoEGqfsbJ4e^`VgS_V=krXa
zHI*&FH4u6XV6}q?HbiW9%Ehe}JLZ_~%~N6zMcq-D1#}UDqB8++6@3Kd#9IRwiG6?`
zqDwWOz>mofMebo!YkH?>T*UQ}*b8$Y#DEDAQ)+Cbuo3FepzpoT`ODwLzW*b1Y0#=n
z0nwo<i!Dlg3%%=TyFd&r!L?`&#f?jp`WoSUPWSKxzij01R!Q_>q$-qSEnrMZEG?!i
z31>5+>k>Ic5Br)_6X6jC$Pd!vhq6KKdPU#%bj<?S&2do?LqSwQHX&7_2@)=E0)~i<
z;$m2oO0*bgthOQ?FD%y9)K$rNYT3QE!|r3d6r+N>x6gR%wKJ^t>|Yx5!pA=#6DlQ>
z{g`MkKGR55NM=BKI0#iX93dO^#En8AQHnDakPT!e>h<#t(_PS3G1{{H`OkicU;FhJ
zdG(tfWh^Ui*d5hW#;{zSvsxCEX3A={VCO<5w2sW63K7@!G_w`1lg-o^L8<!Kv%7l{
z7e(6;vx8xN5m6I!ex0&wG%+$1Pz;)}wP{pF!Nkot4o?{$X(V$kK2)>}P-`0X=bw>X
zCG+4iro(p7%7k>tsgy~nlu%uiv^RoUC5K-lsIJL%VsaWCUYJ&i#+<DFqe|~VbUq;{
zu03~|x4-i`tMditlPMQUq3k*$+9<ZSrvwzI$1~d1N+xwiQ&bf$Xx?~Z&aK-=+`oNB
zdm{N_-g|spf_0Ro=K9qg>e3Qc4YpZOg~-7b&DQ08rtiH<Z+5^!G>Tx>h~t_~qJ|3{
zm036XAZ8T<MKC#AZp4|u8oeB@i^>kn`HF6!76S#*tQ*W>7Y>^}>Y8>pko`8tIqN{{
zwSF!&so#S@y<3mW5dYVCH~*&Bwei&u*0jWg3}=0Ff=nOjc83Ag#(u!SCFRj#dH6-8
zNqKTiP5Sq(iRH=bm$`K1BGq(?vL+3QL{AxAgIk;u7Bl+uIp_C|IC;1NJ;E4IRTNx(
z;;BSK^$;C8I7Bp_8$d<o)kUMR<1N&I#f)~ScdyVsywBOgd-U@K&F#DNZA&Cm|JL+;
zjp%RItO>-WQdwSm@msv_LoYB-T5_EbuAmg<c#J8E&6aAt=OB$t-?W^cp0Zlbr7?_=
zq8@Q@=>p@)G`anfP-<ZkoS(kU{ncwU{W+s}k;DBL1j*?W8^QIsL~Zn5kgG_Zodias
zDYKJfR58T|hmJ+^I|#!%$XfcCo6fMoTwr~#J<_LctP)@-(l!y%f9(!<gyx$Ox;Rk5
z<^I_)0efVL=Vom)8&z$f^l2z*lKGbocr9zOU9FhS7qne3bl>DITG|3*tAz3;_v6G2
z2nh|0F^Usu8fqCW?y@17yM@%y=Jhf=UvPf&K8vHdXwniMLGL(S9y5P%&edn0q)LpH
zVaOFp!>xgSu-=m*gQ?lZIBnxIcy}MG<&xez3Tv^>qC$Yl1c$iXLZEjoP21D;j^%QR
z_lm`0PV0JXVVO>AwkA_yf~6sFSz5NYr$VtMQr1;sn@Q|TQ59S`+~f4*T%~gQcK`r@
z07*naR3c4`IIA0-<AkI!wN{$6e0j#xDShV|FXt>*a|&xDGdFk?aIn3{!DNrncg)V`
zoG({6-xIB-sw~^19rkxG@b;bC49Q@+z?ufUIQ7P1*r!-WAEnPCq&Pnt__?3?Bp?0Q
zM>#t^;pK0Bhg<hfI9qmjFKj7o3So*Vk0zkCAvh(&qYhN53EnZPEw!?^&eJp<RvTQ`
zL7}i#43(@_lAlTdD={Ohrjyrb%49|q&?@B;XoEIT)g{xt9m-M3sF?8LSHH#||IVLL
zkETq<2UrWA_{sP1+=rf#L2*^kbt^z|{Ki{+@t^*#m?w^EitQ=I-Y(Vd1t!-ouz%$z
z`Oqgm#+U#2oBXr??zedA>1%xI(?81gg{c@<HA?8XbdS>7wZdS~v`j8eI4gR>g%Kc#
zXWolWPEUCH>1R1USy5U`vzk+wimq=6K2X+{>8PYMf!MFGI>=Bl?Tc0`s(8v{JLl;0
zd$hMs8TkOAN4pZ`9J*7iP6OU~oCRxC%6>%*r<@x0BM1Z?B~CHZ^)0C22P3eq)5MU0
z3jslb()d223K?XUuBUwMYjlU#VE=$%Wlsy3p?E3E0t$)f3n3CZi}PZ@+JpvG71U2%
zqjb=noYT){pgdMtVNwQ_qGSN28BNBt=jWK6TI5#F6M`amEm3X(f^z>u%*+qU<Lvl^
zZMWoE{gCUI9AAAf<+izuG9E=ki9@SEsV!AusEUHpmXgV6G{#tonb8HNRFtK~R+f4^
zVzfJ9>)MoRGUo0p5Bd7%UZn3LrV4!K7e13R>lDgbDH5YeoN}Gw4Q%?oCI*MnDTIDw
z3a*l9BxYFTUn>$a(in*`8(AcGwV1N4G?XL5XMgtn{I^G6;Nfj2beibdog6S<tmt+c
z7A^R=U^;zFo;pP_XcW-}+IdH_=or^EF+y2b&d-(<MS%;Q(4TV*%z%c4gdHax6QVsB
zj&+sa!@7D`>3s~%U!Gp6_22Y4E@7iN6)MDLv&u%iKmu2jp;0nV6>++fCM{pi7!6tL
zrjiA{M?$|<BEV$oR0N`zrqcUBKX2)mEu;Ni^tc*64^mWy0&D?s%W(Mk6>h(Nhw=85
zcFRys3!-uqV@olzoSn{@pPWN04W}tJZ@#_c_FJcfD$w>FapfQed}rv|NVU`R<kMGq
z`UBTEJQ!hpK((H|8y6Yro@R2NPI=g>l0HtbeVsw7OmWK>e7JJ!0yMA7sIy5)S|zz5
zQrU@i1Nm1W{Yh$0Q%ujgl+&q^6b+HehMNg>mybIPbnW3bNL`=GpYz~I8aC=gy#`{x
zFW|<bLz71uNiw~B1DxL&AZ?nT3=MrQgjN0=*2qnE^;H9HT&?c|)_;F9Zb_%-*GAko
zOC5Rq>LJ&kyvAg2hvcv}e74bd^vyYbb&6_N%#I%N;NFZ5gDNbes-!JS4z6BH_gf&k
zB{4d*sfO5)4IZTICG`kX)|jm=s{MW3#Y5`*7nt6C$WBo+@^A6VorgG6CB!{9s%bE%
z62(33X->oG$tlfzflk`Dn2f!2VJSzGw1*{k&P>w}fu>n<etOJuz93Uiw6RQg_Sif8
z|FZXIF}7vrohSBN)86|`F-P1fZ_b%yCYdB9krJhns#K+_b_-R7?y?QH0mJC_yA1<|
z;U@!r@{?`&!3GT2fMHY{M!|MB>~aIuWt5~Eq>@C68aQV%Z)V<kh&j%%r#1NDTl<{2
zRKFNbLa<l|Nakf^+!H7EUVE+Y`~K5D)>w4T3QSq&7<`M-f#}Zg_A_E<sr(zn*c19d
z*SGX-OV`zez9V*?=ravoX+rcQ6erK0a_iN1(Qb@qg&j<Lau+!4iwTLtB1tZ)tamLt
z&`Ye`v_`D|5*2qCX8siIbR)0$>9ssGnb#hD)Os7cz7GsK{Nepl-V26&YNZrbD{SE~
zP7^|+>6RQHpU`w3l*X7r51-$|i22R>9McNtEIxXokLV}^E|XDnrNQO~DXD~yQOt^x
zrmI^{p1k1v=^4$u!6-wK8zrNmw1!bx()+;K<0ssB`%SbJ#BM;d6>2jixIPBZ*Y?9S
ztks|0tEpGG&=FllN)=i+D5L2*@c}Se<BY|dK)vkqV8;;?lmn&LIehf=ZOeExVtZ$c
z$!tbdl|qAzLXQ^4i<lc(r<qhUW?O=0$29EVVmYd<$=SMz5PD*Ylx2x4G`4b#XA_EZ
zhc$-6SoUYT>}+i@DJGmfdB*Z`fhh_mg=6J3R3lUh_`c!Ai{~63-Qd~TDIs`iqLt1y
z)Ues6b(Xiv1D^HikOIH>3t!-G{k^}-{U81XfAq&c<oLWJSxdrY7Mn<FbdvR`tY~pD
zHlH^E--`j9Zv(M_S>>2I&9d)7%ivf6$tTbXtMj15hqck24*_cwF$Ap9n@80|CbKPW
z-aTUbV2h+IFP>lUC;#T#SUqBA?-jbf=5PLkuknSieT9$$G4=G(gNqnFVOox`&yOWr
zIEd?PV<Bu$sqWroboWhOy}8TH|KOW^`D<_S`@j3g{NMk{Kjlkbd5^FD!uyOy6~60)
zvTBAwrhqD1Yiwbe?QgT1FDMVTsOL*eD~6hjX2siYzQg6!1x`DB@9FA}(PV;eBDOGW
zZ;eT@BZiJ5yMm(=Nk^=TOu`AT@3+{?Gv=S2F-jVx{a^wPUSbmQQR0M@q*kC(uAN!a
z{e^rYGAN~_)>J8xyaHuOB(#N6Z@`Ke6rtTPu$0Lz>?txyOVZ;Xk#>KTfE9*|ktSEG
zL^9cQF-QIxS7Ci5^p@azLI^Ay@%FGg6RQ0ks8`e%XV7&R>u?Tnl2(ecs+f!_;?9)d
zJ3{Zp^TS8_KH@_nMoaYF&v3W-t9kugHmEzQ=9}s{|MhQPviIJY*Z!I1|8}lu1E4*X
zsVJ?bDhjH?U<-#U3$y{J4O&~A5t9vNC9BcR43n)9)lNaz_I&uKKjfqDKfwo!t|EFA
zdE*Oj(e^EUAL-9K{Ia9V&Zou{j7MWelZx@~l=1c$T@(-l5QV-nOmPP{k4(2^J^eI7
zZ%NnjRLMjc+Nc4mib)zUTO|J8-+iACfBcMeu4!YW+9|nQUedHRuCx^6g3=XvI;Bu5
zpfF;X(+G+>+9^;-gvj>pjvP>05xt-iQW8c~GOVlBoX3}mzd&w8hm4J((a!&ylFH3&
z0z!7v-IQNaXQoW!7<6tqU|~Q{W6CalDNmz3K6-p7x|nv=(JgzrWkX;0^j(KvwuG)B
zG##<&Nxc+CF?f70MlY$a>6?z|dwk!~2SAAlOW%09dd2?N-sD&Q<NsFHIb>Jg92F3V
zL>1URns9MCVZLgpmKI%%QC2gaRQM2h@#K_#wWQRB)I0wCTR-8nNvN%Y$+*H_^aO7O
zvC%7DeP@eb``cgP)~kD{BA`veI+<+y5a^nk%caJ}IVwnsYhn|Zvu=*k%Z1lZH&t>!
z+0aG@QeTcfiD^g?%6~~~Y4nFiEW2+C9W||s;Iyu)hatiS?~$h0u!f>Gn9a54BRf$J
zs0Y_;_Ovege<m+=tqC1|rR!b98bXuLJQ#KlzwLlQtv~9z$zNmG>qcW}@Lx82*>uN)
zNy*U28pN2)ScTJYYj4C`Z{B48a0gpe;xrw_xFpe&x=Z5n6x~PqRm<h^6-}$KWy!d#
zpt4L$!`8teLhlvbj!GF3R%RXHv@z=u0~L%SX$MyJ+Uab}`0$9?@d?`pyS(w}89(~y
zAx|zAwAv8OFyOGVhBEzJVnZz6HyD+O-a`nY{d5U8E^*^Zu2*iDLW=aOn#;>`n&pMK
zXD5jdn{98iyMM%ZIu&ZV+GGd&(4+9I+7lEj8ut@gevIK&Oq|fRE$wQB@0#ok8gf=%
z5T-?TW5g)>n0R*lj5ogg4W_s6P(S~KBBSOKPb0%F6xZA}<6wrI2X<*Nbl;$#*Ij9c
zwKSlJ*Y@1?Qv9#@^ID_4e&*yG^k6o34X<7M+777M_2xQB5hq#8U@hEv^;JIm>;dPe
zm$;%rIW|b=KrPm0{aM%9u1fNb72@ie1`4_e6&du%y>HhMDDAT4{OJW(C+9TtB_<`d
z3P)L3#v{kpv|v0g7#A9YVzF%b@v|#dmzPWrce7(^%CR&<^wZ{At0BsX%@%p>3H_Ng
zszd&>N}@qaKF~L<7~G_Uvsxm%a%{)tJkkcw?%obv(||Ip>W02;aAm=Cd&>U)9;3;G
zq@XM;T^E_pmo#mM>q16J3s8xws$|>G<~kwBdehcoQljqbjJjo1Q(2-<#4ZtiBK3)}
z8?!Ur!h}Rux6Gzn?C&1Xb}g%RNs+VR&9Y~EHe=3;$<_h-^ekt1%F`h6YL2KAAtSOe
z=BSSlvq_w+qS1Jls0=U8<}7+eF`W`afGERhWhE<ACy^+v<wPpT*Fj7dmBfH_O-o@l
z)`_uXQ95i{pi4(-49;4T1|K5EStjE#wydzuvb8;Cwd|yhFpg?gadhi|Td%!BQChT7
z_(ji$fBFZ+&awZ>9fI$8>#ZBS^TB(B(2|rF4=D>M2T9Xj9An5eMpEbig^#$nLN9+p
z|KJh*Yj04z_6BdhzRT8s^EdhKw|>O;zWrxB{OnVH<=4K!ox8ViO40T0=J(Hz&aSGc
zrUlp$$!^m=^7K9sJsiJy!I!`MWnMgeN>G8m&FjWWV>_<KSS`-IZZJ#COh1Lj^{3o9
zXqm*C%f}5>8_<Opc0^Rr2L>m1Wn?cm7L*R6C3lMG6a66XiV*u8HD=^zC@-dy*<?Pm
zE{3{T&R=XcJ2jgKAWWpxf$rHo(i`uCHuT95jSMo#LtpZ~sj~@JDhxVmY;eRb2#|pI
z3#|H{q!i`tTUZ0_d_}WrQ6^GZ%XBtHom=!)K@3LX^V*Kkdb*Hk`+)BgUjHm6?SHke
zpUVdIHLdxzeaFt0r@M^YKMzbSV!%>tF;*i+Wg+G*g~2$J6Pkyds9fL~gDx$$Dk!%c
zu~IyLe!;!(KIHVYCZ>Xz`SjZd+jPDqoWJ1V4^N46gI_G@+YYBAQ8|n%FlEK$aGS~Q
zjICQUW=DHWw`aJj%1D5hpn5e-26LRj@FWhEa4uZ3z%3u_;w}`2@o!9EHT#Dn-h1~J
zKls*yh$AJ>$QhcZV>I#HINZY+BL)>w<pVmBq9(M7)x2TSPDDo(JvVOd6QhJ)hZxt{
za4Cs(IM)!=bvEOgWR=zk2|razNl}{r6a_s~N)u3-Y9~<;^Lr0iJ$X)8_4Lb@ezBx&
zTYTFQ+L~B<V$%}(p4j%NeueE5HYABSNQn?(tQ8YC0%JFu*MO9A4eK-63?)1G`UpWO
zjJ2eqpdFXgTNCQVlJigRv-|ez*>YfTdmf?;63QrEdE+ME`;!mZ+A1kV9pkMLg>{_0
zSWsWBD6JvHo<IKGPpB6)=3vC(4RHbPdqYf~Vx0KJzxfVd{lzyK&!$8IT1T7~G&7KO
z&(5*xE&l45Le-*G&UzggM(7PCX5DS+Yt5AslvC-orf4J7O467x*y6BRt1-P4p(Ovo
ztkFPO8rPc6YX!zjj+lSRR7+=_@DMQ(Gw`zU&>cSSx{3aol#g*uv0ba7hRe66WUhau
z%ywX1VE?6dVPGDuOWuL5{L{$swI}8}`kjB+puy5wwzR?oZXb?$>(zY@4<{5Omp^{W
zdrKsR8oxM$uEEb6=FiT!IPNK`ZOU<lat<m-EEJ>Z6s0331$@1LYkJza;fW1Ul%OBk
zWiva?p^QP71>>z5)!_lN!+q}Dy~W9sGd{d`pZg~l%!m7z_YN6Z$zFHkl~)<>?yxvL
zBc>=$;!5F)0%yf2#&<1k-7vp6r*CV#@9ASl3NYQ;VejaO$#jZ!71~%q&yr|Lm4X;n
zoOX|xclWV&MQtC@a!D6X+3|^Xbw<BxHqlHWFk}V~aTgec?>r}`&v^eUUuQDgp?f9K
zzIZJAXBsr4LyorQr4l5s>7;UI;d;ls)`aHuH&Ds{QhT}~LcAQclU`<;47&bI_TQ*A
zhQF>h)Yi?%OKZxy(n8lYMq!L%YiEZL0+$yH_709%H63l=;X_Z~_jn)0yrh+TG61#4
zBHBJH+V?mh$0^a0vJTVYQ6#Tu>zcDCXVe#0&`Z44{%FK>RIoL5Otu`;t&*ZNRAq^d
z&|NOr8kd}1wmiPPWV*kbU4+$|{=6|Kdiiq7lX-oBcg-O0bADN4G1g}ECW;wj*Ajfd
zX~U>0QCicQmfm}sx<i?Sb(IX)F~r~@X=c+c_ICH!+S>&ncu!GOXcOt$p04X@mNi>j
zTWDp`+G2E~Zv#bDu)Dv<)qGCZ^;qk~z)ue`Ws0&Yv1P^l;*!ExZr-@VtFOLEh>5o8
zz<X9rCoy$xL+lOP#ST?9!mnzY^Gj6H+}hjc+4IM|_4=!Pa{mEs5HAm}5=sY18l9vL
z3E541FgKLJN8m?4{+Pe@cRSwr+Anfm7u2ho6qD%1qbGQu8<&Jne(l^Z=6`W5Su)5V
zD@N!RE2^ytr8ct18i%$PpCYmEa&u&GMS+=&7?%~+ICl2-=!3yWML8Wa-QJ~~j!+g-
zKS!GpkMDiT@#AxLW`|UTA&e5g^7no%*H|I5%c3+!*`oW!$DgfZtT09l?zEUdx;`<P
zclaOQ<MQ!i>=)nV#?c*q{WreCtvd&N>tFpb|HXgyPx-?8@9~Xa{YA==!*`vqPh%h?
z&v-gPSxq@9sH#0w(Dd~)=9ddbWyNBCh0%uKJ)!TZ#v`inh(1J&)udG8ltvNOk47Zm
zLSho;>}{8D8Hi_dtji;Fzlm;BaTppHFkx+Mi6Un#C#;jZq%?G$fELDxaTX@$!KMH~
z&StF@i9pIIF%F`a;giZ|jj|R)r0Q1qlPAP#N-$PTOOip^BC}0mcEU!^HS$o@IFgIl
zSQ5K{_lf9AQWR9Q>pX6}%Xr@sf*7)nY(#Gt=t44S`w-|`OTX&rx`6YEzVmeL&yuh4
zSM&P0Y*4>CUUK`b4s{bAet(aLW}nH{h*5}CBTH#3%A&xOB}UQ}veTB%dB|3D7uI3M
z8Z4Zj*F5>`IWL}`5rS+`F*Ycj(0W9(@O<<~f5^M9ZF6X9T(QOS@`8)YOLR&YB~!^%
zXqxASRatR98#6k*!Dzb8<Y>m$?L#KJ6WnAZ1#RpB&(IghzjL9d1`Xu;KV<YV7XZaT
zI=}ftx0ez<kzf6rZ}F4A_(?V#3fVX&VO3&WO;Ay19pVrKBkv#kKzFsGYqu#&P1mg$
zRk!fI7IY&bu5`nsGa53S`m#Pw#f%tal5NsbAiq=q#LeH)LSNpPmx<0uB{=`+Io-eg
z1I8$vO^j1QX-8#5Bclrg#RyWFC-ys}WQYlT><C^q0-{F;u~Uyh$sQCD3^&sTl@^n$
z!E_}!ODs$LxImR9sc_JAgudb6-J3l5!S~sH{Z(Q%2pUj6lQU$M)!30{|8R%n=a*Dl
zqik@MIDL8wAz+>3&;HF1dHm5C)&3Dj6GyvRGC%I9iYb$Q#RuPbpLgE7i!n+ffD%&p
zBkqkPFgg)E)aRPV-#56T<nEg#rR!u8ey!;fbCMy;c0eezE>~T{zz5o3=He(N!vkrU
z)Y>L}XiZZ#2_g?M)-eU-#&`W**HN9#qS&-?dicZU`xQcQ&xrhb`(9IiH?+Z@?dzv>
zegoSqqsnXgZp!Yb`J+iAFgz%mrc(~-jJ(Fp;X3>zwWdsOe%vru7#u9M0c#?+_AB0h
z=Qek4?=jo1Ky^UB&P62!`sD?&UC~`#v3RlK;`stSo-mp05ekc)jM3wYHb%;-B9SmY
z(A8IHJCRS75%P^*JmYP{Fys0iH3@V9+F**3@oYwUaKPl~m>YNZS)5$);|GuU_|Yks
zZJ;0Y{3Jq39Nc~trAleuvc@gNNN79i`GU6VSuIv{^@<n+ZQr7dV*l`voxK}^m|7>#
zixpE8k|44^)bwe^ljdX2<9(E#Q-_CC$+1Y0U`|<e&(L8@vR#8PEp!5tX=q9-R5a90
z&3tjmYwv!*-M79#k7cwqW#!8CXO^5+a&b9OW?$|qh4OxFh_N2*T))=W7s9nIHTx3`
zh;shYdMypK-?d@6THmGYAgwk{?;5S;n*NV<42m8Mk&GsU$no(BWi_G?30D}vVXaU^
zjSso$7B|k&M||)?8I6&c6pMC=@@qbg#Bnv97>wAG(0H2p6*eTMIx*dLOe)KGR8ox`
zRTbqqu!=G%^r{yJf8S$F$=+<r;}@qQq0Ul~q_69#ukA<I{0?4XF0A<wpZ{x!=g}D|
zi_sH9&iYOft1Xq20nl=x#W_6)3u#n2#^s2r5Le~0u#6^S3Tx@ToG}To&f%(puIs56
zE5_p)-b<8{F$F~>Q5exDMx&9`P0CW01zS5)ifl@zjiD$j>dPg|%S+ry(>G%D=ev%@
zYKgLjts8qBloh2joSdHW?C~>H*HG<F(W)RWI?B@reDJ~h+<*8OI!~947*$Nf)C)Qr
zvpLq_!w@2)QOS)PH+bu{SNQIqe23|5hxb4D0uLWQp<W5CU!$-}2JgKOv^jsgZ{@T0
zT}#t61TPNyrVsS01C=m~B&?1;F)|~Hk;9D)?Xn^D;yLa+XqO%I5p66Ln!TH^;3gAn
zIm!lL9kFS^3XA3V?2^)IZr<KzQD5?n-~3fZlM1v)VUdC=u3e4kd(Iv_#c4|)HU^E(
zDy#-#B&dMu6FZAQ`xk#fed`SO_7{2cjXUga{nz;RAO9)e`_7;7{OMDE>$m?Nd%F{n
zD1=1Ud8RvCxKb0N!)eRW4b9`vp7HG2Bi{S+`<$PgNJL)BbY}(asu8d!=8SCR*L9uP
zL?ujWu}0xs$^6ALDwU*3SpykrJ(NW$v_lwkM&xofYugDS)6L~U)QGdL(kX{R==CAI
zjyN<~qLkg~R3324w`F!fHsvdAq&|5BH(nzsa9Z@qCPGjSw8kI^J83X4Ob~=67!4#V
zQFtK)f>*>~2+=``^u6ftclUR={q{|&_jh^x=lAeeLh<#^(&>)2_1G@ZYK1lW|BKz@
z|36+omksLO?Gtw1U$IzB_=Bf6DGqlq+EF-zshpTH6c(&S>qIgt5B)W!OsHf?&Jc(k
zA1`@&{{f5hC7}~jBCI2I9T*WlYapy5_x|uB-aGgzH?}(#fBJ2j&l<K5E6TkC%Bqk_
zx0b^t_@4gqoc8pLD^t=Oj<~qDPqDMlXn%|CyNApUcd)L?`DI~HLW)jefN0XS@_U#B
zW-4q#=N>~b9+&d>Ji=*L!nfbv*+wyF5LhFJDMs*-WCLB((A5njO*xXWlWY>3@bhF$
z){G5uBz3J`N}Gs`wYNW;A{H2&PX$F#pn_;>vw=~}Q!7tovNxrxqzTp0qkJcgcoJ)P
z)hp;_Vk!|Vo=7&T$_~RRDj7^ThblBmTatDpV^O9cSwk8XxJiMYO;BTpnT}Cg6_f?K
zEHI_R_i+5|1*;b?(1!<H{OCtq9v?G4JlOO{WFr@4)_IS&?jG`|fAl@}4#!L<j;8S}
z7nf{RBR=`T13vxkX9PE*j+(lL<w;9fZnIrle&L(1^ZL67SZCLw3YCpWdasFC+P35D
zq~^uFGumez<;bDhDW84sf_J_;!q~VeXtS0gt?vmLY0>%a4HQ$ZAt_nlw-NeX0~PZ3
z=!{5Q*G&$wHA5r}kn@;^MZucxOUw;uULRW4#y}$;G^p2$!!<ga*I@eTs{M0%ZRnL7
zTI5Skp2M{ox(YT8z#5&3*NlY**V8;$y5>c7&BS7OTGD2av=N@N`7R(tW+TgMclNpU
z$|2kP6Le{1=-_L4ZzEIO)GUbe1(%OcIet>p8cVsmPqBMMu|L6$$3$ZYD2%qms7SFV
zHZ^)S&MRa<*)yS9A%#HhoQHe1o{*;`lYea>BdXQzjPk~U$=M~lxAyqTJ1ZVPJ?6Wg
zJ>*3lsFUo|Tl@Qz<C23rH(8!tWpk9sYPF<!@PJ}GA_U3%b!EZL+jrR7-xUKT<FeDN
z<nXSwZ7D(MdGO*-F-I-u>PIML=tE>_YALG1jMOKh>WRLk?^=ioqg2*JO2HIUpj~w=
z>z4Pv`c1YDZs8S_TGLuX&{DDu)NP5+5$EfdxwvbH{$+jh^`>aJuBgF_?Amj!U#6pG
zI{tOPgEdOK9)PGds>^0IZI;;Z=J}EhhB+qxGen*=b;B?IjlaPUzxPAVPcP8M;e)4b
zS~~CPLcqst2+~J_ixS@!3RH-UrxUEZ$=TBrdLQVOr_hRWdzaODO?`fWjh@4C$+*yr
zM;dJcu5egw(a{qck5`Je>#2Pbck&X7tucp3cX;yrV+tqk`jL$kVOXQXJ-9wz`RT1>
z)2n{|dyG<%!30ceg$m@eaBzL@um+Md6s4H<gp4>=u3~F4!dS^TEh<Oh9LBml#@1+)
z^>y0dgCI{A=coACF~2hG?(Yy2?Ck8ItztIY%~bY;ww92*Al#j&Z)@7s3S(hscbmn0
ziE|PYHA<12#Sz9@oO4{9UGnhZ6O>bY<*V;Aoo#b<dd}tf1q<f9`-NZU)@yI@)>~iY
zdq4O#u}vG9MjE!u%vLapTX$~q{`+6#oiDt>&h8c$7ni*8))zT`a>Bz;9#KvUW;YK>
zDYCe_qFs5Sj@UwDt4cJsTN6}Ltm+!hFIXL45Td8o5#tofIZROzLZa^i;cAYxn$c*B
zYJZp5df9VA#4j4`sGyuxXf#)+=WIJGgM2NL3ti4Y_4Ma+ZXN6}{)I2lHXdyZci+AX
zI;`pU!_;0(;}o%()1J+-Xp#w-5K+ovjTkX$B~*KiXmn?7qI&o#i~0)p?pN5~+u=9=
zz2D&W-P`=pzxucQm;dyi@wfi&Z*cR*E|7@9v$MC2D+<X^)dn+CjJL-4zF|CaoTc?0
zz*s0;!P(U%yZcA<&72rJOo~|+B+*-gc04AJD#VnkEG(+`M4O$CLlQ&EBytC3hxo#Q
zQK$v>%%@=t;0i@70TnTYCME?z2A2Ysq_tP09ugx|-gosT<6fR`4LXv#h;3?;uPI6^
z;F8i(hNzUKF_~?c|E<s{BLnj+#zQBAaS)S%81cQ-qzOZ1EMc*tXs+1#(rdVx;qkxu
z3H}N^pbAM>FfQPn5trn@X0M;i2KCm_1)-?;lTYt(JUODQ425+RWtrXRgwm_5&4JO1
zsAco$V#}iGIDdA=lgH0kob_lspdn%$k=nt1&|+0Vp*5AZaFOUge$4hO9nGUNrZ;a8
zkEST6!34B55EU_MOsOfgW(MFBzFx6BIb&%*W3h9@{K*m1qa9{9_u0O6h#Sk~MJ3Uz
z@>4~IymWT-&5dgi+x@<eIHNH^kzz+*w?q}#*`6_)j4)Z}qP0vZlhS0nTU1E+wxg+Q
zypO#8#w{`a7(%hhMatU1C<@7c9Ty>=MQ`e7`8$vc@wA~e4wZOrVv`DaLmISqQk1*f
zN2IU(0%}rXg2MTPGYMra!9>83w4>1Z2Nn_S43rjKh&~f*1VPf8P$-mX(a8{0B6Z+>
zr0XLwLdb<wry%%5)6{eqXGEhhrlQ>5p?!AAy+8Vp&??Rz+-H2S2QgEb^Y5fo#K$Nm
zTw}xG;XY3vK4<S}o3oQkl5d%xM?U=apAu3_2qm^#u{bH2-kveC9l!J&Z}8STM+}HE
zT4g6iEm>qu<owZ+r=OmY+J-&_Dwn8F&Y1U_zVCSby%KFVPRZAGpKF@if2GNfS~JYA
zzpf)bW<XgKvZ)V^XD)h^F|tOvz93$H$Lr1Irtr&$z?xb-=+!pwF@z7L>$F()vnd`o
zA9T<L{`7^}G|wBbNro*Ot#AI18}8-?z#g)8^H66n3>v7{`r1%n(Eh5VFh*P|w<^n<
zcXzmbYn$m#iJ4YtSI9FNI|l8#>3jU5<@CWRpZ(~RFx_Ex=MGzU@3MXK26|FqtRrIR
zV_Y|%#129y?$?7hk|c(240PY?T{m2BvDfBWB$76QEEt0-OYHUz_QoOQ$r+Qw8Fybh
z;_~E@zj*SLA3u1)-p(GAS;crdX0m<8;`BHh*jT#Nf{Tk24(|xXa6Fr#bGF}l@R8#(
zHdOUYqxB8#@+r&H=PXo>jSW-nAS$fy7)68DBQEs=igXjB$BfE~GHV@U>{vA`x~>6N
z@ReWwb&By8K_#p)5Ce4`=^2WftgC0B!*7Ne1M@=G__e79FE5>Rt!YZv5x$q;eAds3
zX5$JkKSVPa!3}G1^CZRfGxYKlC?m4cp-MibRTw)&Q;A0F?D-@9&fouSKKk$@KK<w?
zG^<7$*^nrDPutY^q=fdXC4NIP3wt0D`V|K^wn#>d`}(Vvv&T;<+m6{3bkaDJD6K*1
z%qVKSM4<Szz8)!QtQG^!iRyXpgI_?6rY!&X2a;XsEFl6Zpp0HeNTuu$e0>WV43w^)
zW5YA}xxa?|wrton#7=-1!Z?&rd3!}+EoD)lmBYG{!Ww3?F<L8J;V4IAilP9NG#o})
z2_Xc!+T*(%;WA1TMZqht-C;VLVvHj6Jt4HzD@C_hFdC0Yy`rfbeAf}8LK#g}PPjO}
z<m~K(zVFdV=$Lig<4S{Znf{&(ldT!id!9UeOf*lqadgDnUwN0kg9Enq570^T_uP~}
z`u4xX+JX=~$$M$Q&}{E)@yhL6y!!fGM&pXJ(-VIGU;RFh?mwpOTK>ELz5fokZr$ab
z58eY3FCISP+4C0|YuMVI;*4RnsJXaWNG#8)1}}obuxP-0QVN7T%uZUN%L0`W^~D_P
z#Z#h~jB$k{juL&_;^#FX1o}R(xLQ(#$l;wWs!;_R`qh&2i#cu6aCLb_yKEVaD@NNB
zY#CVk2D}V?2hRqH9@7lbX{)D?pzG=Ngp>lxIjmA(GLtB2QV3!y7Da+lq9{&JX#eb6
zEWh{xc59z^zW5sDxa8aa`cL@1fBrjs{a3%v?ZZRLkz+QS;fhl9zDZHdDuU|xkN)HT
zHupaK7^7sksfl34h-hbLSF+Rd5KPJ|X%0=!kB>m>3wrM<ZtW6ZoMB_aWLA*RXT~rh
z(^{TOV4b`$7~|<YOtYhslsehYibDRPPvE82SHW-M{Kz8Xlt?BK<1l0vMqkR(hg2xs
z)Y5hfio&3k2Wtqfl%Y&-NG1==wIOLmN=ELP7EP-j4#t69M5M5qpoAJ<q{z|1gi)zD
z|L&(;#+!@|Z&R-x2r3?v05w`X4XpVY)iHmCub;~XHQwFhdyjAO=e*B!Z-Ox%Yc<vu
zU^TihBqN%p+PAFghKmK%=a(!NHH(WCIz@s}L=)&kk&7^mHK47b99gV}s*q`-)(KOc
z@#;VLRm#bn<%yy>pHYk~F^Mario#o$=?ghsL$pdRS@y~asaJH16Ba*u!s+83RyS^N
zwztFNaF5;Bjwojnv@WDTOqupd)*;2TiHHf2ltkx~k|NYVAE~<zUte+ijXn0aw?(Iz
zQx!-XXVIa6iNQnAG*?U9)KeCB>3t_7o-`Cmf^@ALopt>!MYbNA=ykNgfS4&1q7(RO
zij<gENHou2Ew2#6hSAYB2Oqq{&h`{^M423Gp*O{tjNpWV=evaW5$`?Gdy?;o%PwnD
z49NsiXrSpOXG|NCEpVeT#+GPTVMZlYDIxEpkZ6}Hs&R=cOG58Ceei;2e#y9~Xin$&
zro&XC_lrq3XD$DSWFlTEUVZCT{_uDIkc($mEEaQ2*YWY6-Q)Dhf>bDqVucP%oG#ei
z8uPb*`vY#>*=2(^%f^L*w)b3~UtxR0>XVQ051(-SJd!3O%D%=b%i`>UG;VRltsGf#
zt<W5n02@Sbje)Nzfdk9!r@o#9!AdFTosz#NiXNHV^s3=tSR?-fYQZ{+C|%R24$Z?$
z_3^bvSrDjfuJsa{dfh4bXS1olr1V>(F7lO)!3z1w+!#uw0VcbKlC8-Q>+7u6@2wM=
zUar=cUFRHStu@vdwADCkC<;UA6w}Iav{Q2T?l!weTbNSlv?vXsC$+N92|a$%aPrv;
zKKbMfw)VL3&f9Fi{W`OoHz-C^(1wtauRab_eJw7SDxhLm8}lfIOuF?j@g8VaKYbt4
z%T%tsXNbXCcJ18W#!jc0n@70wGsfp<9KN>Cue|>@IK^mppVDd;wjebtE?yk-?C~>p
zZtd~jFMWgY-T`P!80fLtx!P!(-6}K6AA_)278f+n9#UL%G$ygVw@cHUvZ^nsMut(G
z;lqrsZ74&@q}X9+=a8}}F*?$<3og!Av`xco?<NPgUd0rnHPJvTK?tG>xv+W}>9@vW
zbv9`luGR32!g@;ovUBV;)O>gx*HewGPkRXf)tlxjqx(|04T;^?9)5=FHaw>r*8siV
z87!H9F2h5kG1fAkRqX8UvRE#;_x(TT-LHIuFa5$dIC=hp`=9)T%i|NeRYMngLfeUv
zj}P?T(+6=u?dlp8p<UKoJb#W=iZZ~i_5>5w5fWBwV%M$_=8(id#W{tm99mnh=1X)@
zvH!{euf21Z^Sa}cKm86jU%SQIhx?p%aC$MP?Lv-clItb9#JFaJr0bhwOdC43zCMKc
zJU5vnab-D{LT9#xN-_x2)~?a@vT)e^|AlT_0*Z26FdmQ5+L91|3(@ac`mSfSTF^AB
zHPX7jx5s2U#d}ZN)+l4?d(T$sSY4dcwl&r%`o1T0GKh*w^qL{{D6J^V5w0ldJCBNr
zi77}b5%8kf^kQt5LS%pMfaj;j^i9W;Pajb)uGl#|L`BWcjcs0i;|)fx<o2t#7>!5h
z6j;?u7FU;CU7qnrfBbJbJ)0AHM3NLE82IszKH>eZeH~j`j-Q{RXItE;M!3T8;^7M}
z&X4K)mhre?HXXCKvqj%|npGpwZiO^?DFjfFWKA|X8j-Bj0m_XTZ|$*r`xZ(m+QmF)
z;N=0hg5F0i=1V^L?A!P@;4`%yEqkc78Xp_B%93`uq-k3I;Gh2vH{U$s#?70Iw<g$f
zBI{5`RMMQ?`xK|lIvc@ii&p)b(X<&V#dXY>(oh7=WPXMH@OyN>@=aEH%3E*Vr79fX
z`NMDXtv~n<Z@vBoUwY>@dq)Q(dhz}65vRfU6>oj<HNN|u?-HYUTtx2)L4gC45>4MS
znQal4H997=^6NcbX(gzp-R83oAMu4>eVwbeq5tt?l&?35blJZ```nBgar0GKe=B--
z$y1LJl(D!8#5RE`<ymnN)%GNxiCi(8Kr2ZLkZfhqGslq7IkGVYPgIVi4Cdw?7OR@6
z?<uDRIv8|4m)ve7ZYe5FGLmB+RYEC8&_<*JMx!v2jGe%m0_6<OM`l&U!L2Q(22MU*
z@Iv3EwMXnN9^teCBWJoo<#RsbyMNc9{<V1h@ajDt_IH^~D^P(zqzi%A1nO1CvgtTo
zu9#odEZe2zAW>jx*IFuw_+&|1GSrlcSPV)VX?C5%x<E=D-e?MS!N32Xe1qbaBl?2d
zfBOwCJ}XF3tfy0o_+?9+H-x4qhHHjIDoLRi0j+SQX1A;eea+(Ik6F2j=H?Bq9v(B=
z-(vr@n@kS3^R%pGQ}E`@q?m|a@}tCV+tbY>mzOI{>UjOm0h3bWoS1_|B2H<U(xrjU
znJJ$ELfhfHn%f7r#9*Kg(x4i(cD_t0<jGV<JWWQmhAH*{6lDZR<t8=d;*o1vs6+WL
z8*Sd06gh_r%1Uw420niD1d%*M5*}i}dygxfw7z+$;F89u5}o6NFk@6@DGE&(ZJ|=l
zvmbxVM}Pi^$zH{)zw|D<uiQcSQ>;6OArsYxNFN;-Eg|m?ZoPSztNX{O`ke7}gkLT&
zZd)3IT-a+QXO%vJ)2LD8mDg``|KkVj>{QfOHTQq?F&!4wSaFXYS#G^L;kW+&FLH2W
zOOUfPXe%^PLDzehmn-_qIp6zd-)7V=39Fj>KX}IO?haiHSYw&T1z-EMuS-MEHHvIX
zIraNx;p9+{4KvNa0J5l;zAq$l&YOPOfaPVYDK_@5n_E3JDnIqbjMB;Tu%XagJA8Aj
z#9->ewFY%So?e1FKIfMd|8X;SNkel#pt~Cl^Tw3>rwpI2{cA9&TIbRZ|JF(lW1A_I
zN{rl0KBTR6Xq~96W;Q9AjSV~Fg5530_Ou{|o_@6=^c^}uzgp4PO*RXOv`f#`m1p+a
zYwW)ICR?}fFxub8R29lteD7H;TNd*x=3PzKtSHOb=GhGa9lS_>a;~CE1NxoT6mNN+
zuhB*avc!M@4!0@|4!BAh)v3WvCb*-U#ET2Y%N1R`B%LYhRm+n<|8wsD@I!pparD;f
zy!O@?FerT8pvwxY#26!@(S;*rDtnBP6a%qYvUvW4?&3MD7R;ihd7?l^s=Wi|Cb8s-
zX*ptk(U96*w)GJ^+c$8gAjXdQ<tg*|1-;kYc;#KT_iyFmMD})Lv<&r>rcaIJ>Lm>W
zl8PWuHlqx@MD<mFDPCr{c-Q;>wZ40Jzk$koopr8$>XFz`46g0H*Vgf7>)SNO*EC&g
zeJi3XedV=Rh)G;dZ{K}|2OodR!_OY@`0;aYzVaG3@4U`ee(@W)kt1}GljqNQ@$ez1
zCnsE<p0b!<(YKMl4=m?PdCZ#@7Zs(`q}bw#=t7t*T4}lfXe;MWO4vd(-XEbcI9F1X
z6}R8}0?NY0#TnoI)(>d=6$iI&@y*}<M?84=F|QV5?%li3PaZrKG&|)Hsuok4$iNQz
zDVjqbrfGGZ>G65)F@cnNVpP~<qz{%&0*@Gar5I5$pkqYGK=6PzbbUk9!I+A|S=zp5
zd9}cm6KqDt+Fl}JCZh@C>4>s)l%tZaYq>n_vF?z9#Oh)}VGP<9Xzw|{THr&>MZIP+
z8{>VTYkQK(E~g=3om<OXj8^n1&_|&J2OVjB&8n#xS0%TO4teq7l+Z*LXJ;ZOG9|Ne
z#Hwq#_vt5WjSG%$9PrkA?@?6~e&_%Fdn^`bgx-M`4YNuS)oUKyd&vCsl*!>1Tl*C|
zhqp<}a{l~^)v}=rfx7SLd`H*xwEc=^xg-Wp;S9yW7S(7ZQ8~_0lof>)r*E^>k&q_;
zbl!6H*)w8FEa!7p%M~#N862l!U1)69xht)SWOOV>`bDH1Y4j*@c5*=%I#y4fQ7@Jp
zfBKZYn|mDIzQycl7h5^#mK=Zd2&*lT9-TCrAP8zU4orw~<zQB~9%lN0)dizP!)L$y
z``r5X{w{Qy{hb-_fAxL3R`KzVe!|u9ect`x9X|N_S7_=Lg^rvbpK<r?*LZRN8LRn9
zKnx2i=rubo#t1=bFgw~Jwh1?|$5JF=Kto7@Rovr~Ke^A{58kESy3O)?-=~;gVPb#~
z(MmFE2}$Hw`RUV{p<}Ef1<9KyCG4c6rx&oNDoM(abc5=}^CbDSW-lq<lYA5twm9sa
zxt35`64=Ty^XUP~IG9aIR$)pjLx3pKxuk@}q0pJ^kRxxb!j1}TF~Ye5R~9)J8jO=B
zwF?1TCVFL=cRM_*w}@94+^B>mp8^;&#I0%Zy?M!s@~_A1=dwZlo%>t#{(`3NxLTgm
z)}H8lLL7=%MRHb}rxY@pupk)FszNCd(%Zt4uqbQL32XvqCA8Z69^He|8e<pyjla3W
zn_qhs?@vh==Y*>r%Iz6ZIcc&?6sz+P=ocN`*_`gICN6tah#M4$w6;yR5q3s~7y{kn
z&zRr;jMeTPuI`^RzPZERTQ}Ldv4u(z@7uiTX?!1uy~hVnjM4~So?NoKleo3NC+@a&
zq)v03HjKBX>jp)eoC&7#<jE&O8}Z$W{cK+^8<NiKg^XDZh?!2CaaDGoe5nQ+iq@Q4
zsW8{5i-TFtPzp%I&PKGC3j!Uu`~J;!=yQrWZM6&Zv14avCmYdbQ-zqs&?EsdNN{)x
zB;VuG1qs8g4_;xiQ?c@ja(9nD228xBqm2XQRcISpMPl?=<46gn`xCzOH-Cxw=_y5N
zWkc*es>o|j$wU^SgoPXNle_cgtNe>^|A^@qU*W~Ga~AVw^nQlX+Z4(3+B;+Z_CNXt
z`?q#63dvv1$YB85x}{mRoZNfNy?^}|G*8Y+Q-d3g8EH$IJjN*s<JsTc;<b0*Bq^WK
zlWX@S#h3bo;ZHFSYt}BzNxmi~UUH989*iib2r0<nl?EdpF@V|>bV-I7aBUr|F*SLq
z>rD+C%J<Oh49vtd(0PZ0Irli%3iGua=jZ&oj-YO?W4iWv^@jdC=uC4w$2wP5>-<k{
zOwE8o2?bBkQ8C_eg$$HR=O|qvZjnZ!Yn+ZutYTuIa!?hT$=FejOO&#NXix~6sLLtZ
z8=B>UYPw6YKVepmVKk$d9bu{om;xUZvFoXqHJ29`ESIO8*B3l^_%ZLi{S^{Um}q?~
z(`}s?Joi&x1MA_=Fv4EHu66YJey`Ez?0+q>A_|ftsYEOd=I{u1BQARAI>stdy!9sg
zrsU%JGZynJp8e^cuzmX$lY>LtbQ`J(rW&C}BZ;IjGSLoQO?`33>gi+B>JpP8m9wOj
z*kX(4KYmVsbA{ScSZ8Uv6>jdB#vLkcSS}v3TwKw13nEZW4mrC04qN*-Bu^1(@<iVk
zXuFQml(h9qg5zZ?SW`&NfC#9V_t!zHalI2&o9ASh99?&u&HLkE@V3e7%?RtjVt{KJ
z>VdMF){A*f9UdOV^)q^%9V71%qp*Wg2#hBag6c^LCX+EmS-`xZ8jZO)IpzGtF{%)!
z)A4xBY<rhmufEEy*I%P;mMkvjG|NjCmkYYpN{U{caD^j<NVi%LLyu9K;60!TnNBXo
zOg+8tXqPp~N9y^UWwYY=-U&tYj7B52CKGCJ=!smM+~>Vt{4!5Je!_43^55X2PyPu>
z+cjcJ+~nbI*cxe_hdN|Ns-FdV3KDas5-vxTDWx#$11X?W5KU@I=+KjV4@r|^z$i!V
zgos}D+~^~|Z5dA{v|UfvHjF0|%A%wyg+{wvE+HkRvl+%%npI2PHKemdQ8;=pTFaCK
zxii+%`$#pJFh9N86zOP;ju<03@YWh!-{WH>6$O1Cu-dR()$Hxeh%qqRo=~`w$M^10
z*pltNeP-L+sO3Upm<T+3`ht&t@)37#-Qgep2mgSxub=Wy{?Gp@7grY$f;36V^Yq12
z?tk`xw<qr~JKQ034b5V~-rkIQwav39$FyxPLwqf3yP9m_3dzgW31aVAH7$+mFt(+r
zN{lg*I|>-pv9v9)$Yy5=j729S00at@#OTBr*5?6QyeO2FTRGk`EYB}!`xVJ3jI%6O
zD>|PTO%u=VKjrG`gje2ugVmEyX^t;Z7|;P16-fnx<Z#JwaA2KX+X4zSg-eXe5#Rmr
zIe&g~%84s^``5n7$XN~!4tV43yM)V2PM$pGfB(PzKk`5MpZ$+{<;~Yo?Ij=o<TL)x
zKlly)^?&})@m<0cPJX8pH>UL2)qAy^vwLtrNQv0baU_w0Abu|~XnIw0)lT^2k3VAn
z)-9&L{99ao@*~W{PjIouAZLlvc~8s3KCR_k(9V#Qur0LFn9+o&TOu09m3Sni329v5
zLyhh`(rSs;7Bvh}W0DxF08wYXx3zRpV_y3L7s^sgo(ByP+xH@2PzGCwF<4Zhxi`vU
zwL@8r$+7`u9m)-ReZp8vwjQRc3OW*X+~u>CCZ>vA^D$Le;gj?gIteQM^4a;<>-BTl
zp#JF(KErC^plW5YN@0uxEyc17mVy`-5kXnBE`dleU{M4mu4ztnqVdyW`(D@{hedZn
z@o{6r>#qiW>p%J?eZ0Un3+iWMP<uiTRY)kDNjN11iJK&<-5LFjn(o;pt7qp>3xXOY
zi2?0ncG?w`rYd1nXqwB%T%0~-`TUTJr_Y()J>uZ?n@o1gjMQrSwwIvkD4NIUHOu)a
zzxk_Q;NgQOoGun<UD5$p7iXxLusbu9HF@fX7&I6FMNEO)uihYal9wgCiYSQYP!u73
zM~b<~%+xxSo04@t<_1PxH>8_f)Ev%|ZRw;g8qm}17Mmcdj5G}WjaE4?QE`60Vm6)8
zbqzlB#L#5(5=&IF*(l}ESc0waG08?yjVKQcX}KiU9bPL;K3vwFQ#PCu$4)t@l2!5%
zjiERgu`|0x>b*GWW_q(yni!)r2uTx?C-{VN3RNUN_~mbK`rt#(pTD3{fH#cIf;aBo
z;Mafa7rA-+W;S`c-k>GgRn5uc6MpjhKj7rUr&J*_af#6OOm;Pg+Y_pCL>E^q!xg{z
z+y9|xDbm`4NH*&=HAtrOF>bV1D&<2jQ>asl>q%b9sHW0c&zq@dD!ck7l_@=gnNZAM
zTO)_*TA!lUKQ9i}%{c<+r5MvpryZzm>t5&D5CfIguBQ@|_p_Tk4rr4|Z>}RtLm`{8
zEq>1I)lz^+Zk){_=}MerbwXP)ZLxU}=1NOdS!T16$#hInSgbJ^ZO~dvEQ+t$tqNu%
zM^zbIWhuuas__`D9oT}XwG>S$VJgpLyG3b7a+Xw7Bs-yt0UcV>vZd{6R+n?G&gU$y
z=G5&u$p-HI_&dD&_Lot`NSG2`4StC-6H+oF^c#@6OqY~BVW7F@HY*KRkIm3<Sc?NS
zeBF>q^M+Dj4JjCK7Bz<LohdtCcoU5xv@NUiORkPz&>TOfK7NKW20I?3iW00Pgh=cf
zx{C{}Z^SsJs8Ckpy|~Eg&~xPWdGRRHsh*vK+wAP#VtaH*N`d2(D^5?2ajuZPb2MSP
zcZ2Q2L(%KTJVeTllXYEFRVBWQ)EAd*TL;~sp~^!}V=z{jBx*p92My$~WnOFQH)~&Q
zoU(^+T%)O*;ZGXqs@e=wpKH%cj!5`RP4Knv<JzBxy*f!IWz5W#E_lj9BC@>iaYe~&
zdmC34j3*<iQAJaCv|Uf=JLYG{oIZKH)_OL}C4DpJ^7Mj>7svRfr)#^6s6*d1#MncU
zvn_U!;627VM$;)pRidqvGbbdfqGYmL@#-r#>0@MGw>0NVl5td%5k*<@<o*-3c4mC`
z=*fEMmF-$&wvq>dD$8lszkgC|nZ!%i|8sl=--{tjG=!K?xi(QKERjfVP@~7kz%bxZ
z5rPi{)6-_kDpr#)Ear1YvkBU0c6YZJO{TKn$3W;j))=fSC`TiVR#bc2sKOFjkI1-X
zKpaD0RE;RB5ojfn7H8Sn*~L$0Bywbv7Jag{2BV<wJ>Cbbvz$CV$CV}1*@!;$j3*<s
z)>PvyDpz2f;{6Za<<pNpqYI6!<4{R%B#OWI&L8r{4?f^aU;Q$F^ap>4SN-~IX;v$K
z{NtaXY(eqKGxm-qxN1bGSM>7*ec$1|7hSK9!&r<E*bLYZI;kxzC`}g<Mki@Vv=QUR
zEU`&y!^$A!%+Ca9q4cBWSvT6|7#4+&iDD`PtoivFUI$WH5LF`iMBgk~T%NFVW1oxT
za}E#pc>JyJq55H14#5~ql7Vni*C@T$nwq4Lyn79d3VwX@oK<7^$+v#Q;}>Uq@bCW?
ziGWi*^h>6t<>L7{|LOnX|HS|FfB8RWesap;q@i6cd366tt^qRhDtjZStXWghqg3MJ
z?3BI38-%4o_X?W?ql~G~tTIOkBN_tFA3mWlCrod>f;qfP`{)z=>2qAW!ge7aDG}$a
zu+f}CJE71<FDx>uDN#A1Hl)H5R7u#Kp{IwK5D3p6p`++wwb77TX&kM@ttF3`2HJvn
z_dVwO@3ZiMHU|36<C3N{T0E7Fq7MOwIB5&BLF#lJ9VyDXF(^nT8*j?_<(wj!g5DOq
zI4$|{qZM06N0`eq_NEWl16rkJ{m4EfF;nXr`mfRJ=dwX9iUNZZ%V-dCq6$KXFabp*
zXh%X5k(@9EB5qcZsC$fZjGSe++-5v>;*zH|mD0o>LhlLAVN=bo{Eb8G%n>eL&_2)v
ze-k^JiV0HzhCnp(Npz;dQAO;i#LY&yohi$QC$uMXbnn-+DT!6lSS4J0QqQO|i~=-Q
z#~fc=@cgXd;^_%nuifUAw+^wuyl!YaPiZZ&5BM7Twx(}7ZX6vj9Zk77IVJc&-vloE
zQ&bF$cV@Vv5GS%E7U<C_MmBKs@Q}V;VRf-~=L{)IZmcGUiRWT{XhNeP)Ehg}e1lRp
z&lzaFDQkEJPBHeHrfQhj=iud`K~n>$c@2S6V4TATd4K0dP>#v`S_$a}f{*mbXHzK=
zwNN(9q=G((6Ji$#))Td8{FD~NB1UPxt#$N17nAveDy_i<lD4@K&cF8%2u=ppasi<f
zL~S^F`;aG}-Q)87lGekBhQrkGcmCnu=g#YQ>FQSWMkGuj2io%Fiu>RHnET)Q7=OBA
zoFYaAf|rT9H<9iA9WIvV(5`sr8((6)H6jvn?LSaa$r)d4kbw2%HMfp0aS5;6gsN<=
ztL4zrK^&s<{ZSd=9vV!E3dnaW7p0Q7n;0^FQtnMsjusebgbc$PhP=oq8nKXwG50iU
z60j0udlgVR5|ifUd!O$flh%<)*O2IRZO!Is_VB5+QYf1!W|q-(EWcC80<Mh8uN&L}
zbq*05B89a~CKIN!3FApA$f`o3A?glaxA?-+D)7lLHDk1{P_aU5C(%hE5&B%v>w-FX
z+PY!2To6-YG}*<LBLc8oUNOJC!Z)3uqt-B<R#;b{oTXV_Vaq9Iwog2Jj9C@3hq{s>
z>vV_}lGu)Hn3je}gbh9N`W1bt7g9Q#!l+EfNbBK<(n=^?K#F=ZoUkLx$%v!7x8&M;
zPi%X-<rS-o3!2Mw()^0hv^Ydo5mb`=XD<e7pwP<VwB`2hYZUtjm|`r$P30(TLFFRO
zEeJ8<lcI7{wssCNLq?t!ePBv~i>o<JyP_&eg7^5YW3;_PT%JmfWHz=#8=1%_kBS3+
zxw#-ikkOiFu+~FvzGF>9QNus^Z>{HnYV(%YjjM)YeAuRcS_6KK(XdVp%HnaA8|;AM
z@n`qBapxvGyIVA?8Xp3?w~p99xJ@xGS<V-fQyKKyQsPNsN)%;53LUHYoUUK8Iy)h(
zR-$o>1<?mWXz;$H?^+o=Mn&*}W!(ab)pAbLTvFMBx~WkrQcN6ge(6hWA0ASU$9(qD
zCtMz%Gp<Tz<BG}l9v}YbClqgN^I!Zw|8vG$M*{XU6x`V@TZ@l?n38X<!$9R%sF&C7
z=kyA`mnK*%92$cqs#aV|38dH%0`#$yL4o(gHsDo{>MXi<tb9l8SJ<k;jmOyJ@O{sC
zJf<9t==%=m9LAM$riXxY1zIcWw!xMqqpHNtCRpRJ&g7&38LA9Rp<OnVRmAL-gp_FO
znznB6ZHxDvoOjk>os&isOL4cPYdiY3WmHwT!ZF>RQcgyM&eJswuf2Vj$!x~y*$H*M
z!WxZMhND}D1m7{gxZo?l^i@9o$vvJtdL#|HO85w$e)0fajd7HOA9UEtGTolAysGK?
zfX;D9;)Ib0HAWCf=L(73Mh1!6YOFTvW^|Z;#uT#mhX@u@$TdY2Hk#Cxn_;98K{^H~
z4fv+v?Ba_0a!#rW8RTh8Og&nac&yN%%Yysg`Yxy6dxEis5F%z^`{(_UA#OTtj3-l;
zi5MN29v$+-A3xx{?Fms6mx21j2mH&Y{}*n)_XSQLpQBfSB59N>IQjS)|F3`ie`Wv1
zlx?@o@BL^0f~(6~u2YJn5C}0rMj?m&Ahtak&Be(J_V*46*5Q{YltwDB6e7wPlrkt&
zqMal4J?D?^;Y`Ww&K-((-Y2aV^v6%>&rh&jLyCUQLK~Nk6ckBof=Z~d!j22txu??>
zzk8FkeaK{QkD_h4x;V$^0-ahZ!itg<dO#EFK+qa?_6cu%mF3PIYF&Ykgto!=vd{O<
zVo}(bqgf3U+EXNjGYX?(&ZbR7AJERAgTh$x=%5<W7h{%9$)Ek;k~VcrcMduI<UY6D
z$82wxq<-*$k(yx`R1OTQzizLe%LY~YqezGlbVL)Rsft<xn^lpg$v*2%uIQ7dP@0jh
zn3a3X##5?7QQ87)BUYe0Y!a;p4wCD+eH8i1-+7M|&rz2>p}mEvDp{P)iCeJh#qHW;
zBOj#*5v*}2tI)=w$^tbyp?z?S=>kzn_La(Lf{NL#l}JpH@z`;zS#bH}LzY)NoGe@V
z(@Wm@<+spE4hT0Ii6KxIIeBo5PCdoQQcepFtJ};^&RHzPN%8DrL5QBM-7TCeg+gLL
zsh-#0yF>CTak<S8wXs$xwlSdHVEU0z*tLF+{D6sznV0LI4PFKf>iVAL$|_~?xf<w!
zN})o?ot+f6M&+qpj2SSH<}k&`cr>E#1J1h4j1!T0%7vFofQh*{mDQqwpurY~(P)He
zYMBhS0bOJYo!%658YD9}7u6B~O-9*e!`u|kY13>25HmekHg)a9#qy0WeVM=boj(&A
zNs8PZO*nG9oIH3!dFuvk-?O*3P2YALKRe~a-}`eeK77hJX)3FQ{+kk76=G*Tzrv^y
zs<)W3;OO1gsoQ=X?GlIPJPZZaV6ep+0gM6<tm7mGs-c{P(uKvK2sy_vghV6|Vn$&@
z%#kD^qd18ea?mW0;RhlyQ=n9~&a#sKr!;7T)iGapjgnA6ME{vYyXQ1Y88n7<2CDWX
z9YFVMO4jH38Yube_4kr_jr?vg3C&jug~-m)7QgW8zsRd^zQc>hPkH#0PgpJ%v~@@5
z#3(FgU0KTdHx*Ux_Y?-PxsN&si9sodk*4lAYdp)kq?n9Zwh`-Ev|h+$Jzryq^@!A~
z7N0zQ>sehc7)>W^?*<}@q&(JX3gZ}UZwp#$3yg(6^d!IF&%g68`IX=Jhq&F_bk854
z7jty7q$;3;S!YToR5oXlLN5-nJKC)8{f4T#zOd`-DX5>=_NP1)i->W|uvU_}E+5$B
z(F&c+LTM?tMwB;?)`)rvp453_)6y>&^ou#|<t52C5Tgtc+YYNr7zd2)^8RCp2D}M*
zrXv*XqMT6{Q(;P^ti6koX0_t%_&GOj-Xg@na<yVKshI5UvwV7Q!#c^PTGr@HX%o|q
zxQ^x;*wT4lT{~G`YZmk4v_V2yH=9F)dEGgC_*1&p)UMa+wQH!dzH-Rs{YxcM-q^an
zXE9%4pS@svXG)7gxq{vO8|)t4r1t?g-a;uw=z7qC_Oz>-ww@CS)a@nh;*zGh<m&7x
zv1zzEo6{{lA^NOMa|9wqVR0rARl%qnW11Sb?C`OsP?~_JU9I@!kG@U0y~TLCg|0?S
zCL=;0X;$#y+uvpP<`Ms!|JDDR8#iyt-jtgN#UNKe;*;VqP?Kw%hVzNx82kK-#~5UQ
zl0tw^ld=w7kiE=eN{Q6Aq^2V^p3t;{!5AnQQH^KJCKZ#dF+LeqtA@qp0;M%w2#iJr
zS}D4wq3?voS`-C7L~Kwrbw`XI?E}_C;ws<@8CHc}_HE~!%-TYtX=LD~ttBRj+K5qx
zb$v>t=xOT?=cHjvDY3n?%hlBtKmPH@crQkhli7^vY?4`InysB}w)S@c#o}tt<A+aC
zDqxIaXJ?-`-h7i6&z=+du(ndTy1Hb!T#8AiHnj7G#j;*YE5uF}l9r5i2D6-OVyTg|
z2A%JB$a|JiCYxr(^|`3B+p_$|!-UA7jJUjFjWn#8H5U7T4}q)ar>ri|QAiv~@R7nQ
z#*-<=D*CR5&Y+d$#dkl#^<uyiRWF_oNy*?hQ&dw{Sq=JA5~LU@MhUYs<9qj><KtM)
z(-a_B!bQuYKm0RLhA9?@q0j*p0zdrS-{;G}_B9$;@Zi&j^m)%i$yqJ~<wPGu?liEk
zVvy8|<C9}1+dFLCewWJ^pHYTLksCQwk240<L9&L@mFN_?eEKnGeWKdgVfO0l*!RCo
z=QZm2L)7ItrtN7{z!nA8IaF$(tT0AV=#HpLiv1(>_93>^)QdBGf62&c8T1DrMB11L
zrlLEzO}hOBn%$eU+K9`#HYBUDZY!IdMtlgQAb^H3;sp^jbbZ8HjkBJzZ_ri~d?FTs
zr~+=bg_`cLsz==WsHLW1_x6Ov<pn?Z<L~gw^abyIbr0n|N?Ey=IhswUxXxPrS?n=?
z)vuq+26a_0grA<wn#L;|PA>A)3a2AZTMAn-D|e}E$;eskU~1A|VuB;urA${WI9;HW
z!5NFPV2h4#{-d`s(}s9?PJg*aGJDv<VhW*eCew=;JCRL=gr{$EXs}KON<+14Fv_q9
zp8DZ2+IIw1lJHp{D2<+&WSZP0Yi6Zkb$m*_s)=>t;dIRY-7V5;MR1O;SrI$M@}dK+
zsC>_85-F`^_x3L3)r`g2oESRlD^K!)t=%15IT77$(Q@<FE^XUn9garnK=k4kC^Aul
z))5sYmn+5GOy-Q(bvEDiqBN;%NFJLgg=+x85L1!#OC8?kkNNv`l6DuPC~Qg7^^`?{
zj~yxLOciBa$YnKUF1!>W>o3s?H@0~1as#L305ci!$zk_HD`GOrqtt-S$^oQxTpKFI
zJY5xpd2l<`7$x_T7*SfWb>z77!RuW9!6(=z5`yFGU;mKiOLrOFy2Wa_VpbJAe(->w
z{N4}nj~e!7QoJh-ee{?FHd&%c^oy3s?l$h^Ioo#+c=q%Wt4AFwIJD1HZkM$*Ig3&z
znl1yL5tU3Xko9_Mtc?^O${36(P}-rL!Hf-s9o%L$Mk%yOvXSSWL~Cg<l{PZf&Be2_
za!oKp8LctG2NnvfRcK`f_GrFyLUD^KqBHs=se39<t~1(Z40>%-{rSDtPS;8?3|iJE
zK-ny4FfEg3UXiS2XYY_#UVVd|$rf?dP}d7$kT|0lBFX3Ac`w>)Z4_ncP%#lgj}IxU
zVq`F2jb%F9f{;+gg0AqXKq2=sMKEb_l7&<ls!A9JvF{0cKBM#bSu_dj6iJI_zU_Lt
z`JCnaimQ4_>{qDbKJUEoWoA2jU@OAn0@XICY63-t(hgjvvS}@iB6|J2DQB}5OPanR
z0$>9~4Q|J3bM5k7mt~RGS}O)dir_lAMI9Ok{gR?!y}wvN2+U-J+1;W_npsNVJz>72
zdH#s*;+%BZ5$k8@S8rkVW~8FPD@B(AC@YzUZp6;sO<W<~1<qJhf~HyV$-R%5loCVH
z_K|MYG1=b1%|>FP6yw^RTp7gp>YAQe<<R(n`Yw@N*Xg*MW-~t)D6&K(BPU9|+-PpJ
z;KSfh<tHxh?SotNT7#O_d**d*_(mriiBSx5z_41i{KdDv%Nt+(BHO!rjK@<M9_2+C
zVx&_*-;(+c-?y}@CE$te5)~Sf?=afQIci$b1ga#3Uk~0ReiB+!jz=V8neOef=mW$~
zjFeOfsh})ISTt@t=KjYI`0$g@ST5%HHnFH>c(t{?D|?caW@K>aHD=)FD57SqfzPUv
zYp%YVXaDnj^=&H#jxo|E4I!XZpeRbpA~Ok$<?8sH<yFUYG~sZ6pT6suFP79-=PWKx
zu@CRFy?a2hyU%3rfO@$E6`5|$P*q9jdzyMh=w&~t>4d_oG~Q~a(+N=s9awF##*#wB
zmK8-+paHFFx=x(&L)Pk9Yl$&nD2QJ6lN1t0L*I5-=a@_<+`soJCoj&4=seU6Eao*=
z(=pCkj5d<vS~v<<V2tSdR*ME}5=vdNySGhQRWx0lC3}g*V$O0tXSyvqSw)hXCDU=O
zu_P53SOjTc!RPHT#33tOqX)*35;h9CX;Dh#MZ?fS=d&#hgQ2|fhLr2k{M;C0>E<gm
z31=-qi+_d$dI~yc5?EUhL*)GNWAw5EWeGVtCJwor1A40w0A{F0bavua5xZ5<EE^tQ
zHuPo&z7g4pMzHx@BWV&&DGF5(LSS!uo1&_C^5g{}f{%%^%<|@th&h{8LUBAX8$u=$
zzV+B@#9}@tguvwB4N7D1i|5QQo?^VB^`1(@s7ypG32-PzGthCx>f!fjTF-cAm)RSy
zv;9lIgtasJ(-*YI_wkE4sy8&jV`9Q5gRLyx#S7~B3CbimKcRG*-dIwnQCoYgMmJ&q
zHvMQyryRY~^j$V8O&NJsP&g;L;4FiRAriCOxDO$lLxN2K8zXI>sGOxpo@gr;<u=cs
z8s_&dm~GV*TO;cE72o^TXPiEpb7yzT*Wcb_Y$ABQ9#)zJAt!vOsL<DkxPOgaKbH+^
z^bM60Bbve#RMz3F#pO5#YXotUsZ)@Plv+&cWg!p*#Yq~SDU4caj1D*j${LhQy!F+B
zJ73$!cS|@O;rk;PRp`QiQ|Lmhr&FfzVe<|+WL5P+8YrEhG?-~gb+k>tSQ1VbsMzQ4
zMJ84{p<|NbMnRz>tt!z?Nm0)kz4!s@-UW4ii~8h}qBNxCg4h-qRk3IiotL5@j6Jrn
zjHe~8*k*aLpk3AY#flID+xt*rc<T$d3AN~UOc1_4TBGCO1Z%`OFJz{!hv*bKDwGPM
zy$BLrEOzIK!FhFK?y>%#th?G!IY}W?I)QbB#o*?u)+nV=^Kv2K#el(wOfwxauwt(E
z^zhy>Yg@CEa^4VZRp496)rv8pyq64Hh8itz%0>z<lMF8*rCBFA6x+m@4XmW#R8ewc
zWAsF&(Z=A3eDHU^!VkXvd(bHQRI<8SQdvuTb;a1gr$6|Rd;j|TxXZ-WRD<coSj7iy
z63wvh#f+dBj|q#O(bkxQ*I!}p)!P(%Q=F^NR<3_S^1)(=gdByj){8Pw`v!MAwKkqg
zSxaclwd_#_mtQSwnxP*U3|d0A!C&95;VtE73Fq}Cdg0)v9n<FDMrVz(QW%}Gk#Iz*
zK#D4xreud-V=>yHlcxBb_n5UhFr)n&>YvK=-;E10N~p-Ek1zP>`#<64?K`whP1`qu
zn3Awoql>ICQ+YjV(QXyiP?Q#17+lsoYpt=-qQ(VkdrXWGS61xq-e5A@!HvdflLs?7
z_i~_Z_ianpH8jgP%lVSF?dbcS6eGd+%ojED)tu$Lp<8&mrY9xQjRh63=7RsnKmOnG
zpZxd#N7(5h-Pv>6=g(xY;Kt}`j4nqos=!*5D@5*KtV9NAEk;5{3KEsSCXyxzJ5RxH
zbf-fTrU%BwW>7R3*JPMBBeHRgXkRx1O9L%+uoeZ$U>)2qtwj|D<NaO6O;0#I#~(i<
zK0PILJ?iEy?5M&;N9ZHYjw#Ct<54BWppiYIt5-bw<cFBBWW0R`-w7JiHcNJI>{CRI
z*P>_A*;q~|a5<ZG$a@}gfMbel28YCNrL}3!U@*1u8&hk>!_a7osoLhH^e`*{i$HY0
z7_OyW|J<SRRe7!GAu1?qUQ^oa4;zZF!W3?Z7o;qw96x?WIU38l%EN;cJgINczCo!(
z+tu`SjY=M!_o6<^(9BgMnq`BoDhjReOL5FkI^s&vkWO}X=zGuh&W_}G_8p<AMaCeR
zURZ5~!O=$k>`y-C#~(jH8O6AA?C%}WtA>MvBhj-dDg0A*%Uz?S*{L_AY}|64T0JoI
z)#u-!5_+MJ`atZWG)Z`xt1CtaJ9r$+Ma}$Dp4r9XnAio@u~gb(or22JwyiLcjHYcw
z&skMtR?9}Nd)J|X)k0xyq>Y}g_iURPVLZkZh0tJQqVHO?)-0Eviz`ptwuG)mD~nNr
zIxa33#3;iHm8Ch#$a80{WmHyNU0(3#Kl%%rWrq)uKE`}jdPs_{=`hYQ9*>A2;C)ZA
zU6MkiS++E*7Go2Ah;+Wil~SjLtdFi&HCJcnRHLzEf+~pK3Hxh^T(W~ZvJq1(q<Pf&
zS=2`5%-EN+ir1_nQl7r5Y_^1CrWzSiYICh_*z82~sfWU1suD*>J-;N9C{hV2F`A9o
zzj;7alysMK=FgwdIZNLO8%9U99(+e~lRU5?vcx2)jHa4|n@)K6$uT}yoQjx%j*B8>
zIhjrYrxea=k~KIiZ@vEoR`Z6~%ivV}TAY|orYOD!HcQa7Fom+VL)a}=F>)ooYq&Z)
zX12A@RbMbZdKaTC_56~0am>6v!?MJg7H1;P<}s}68BH3rh0D)A;rK@%f*rGS_cgXZ
z_$IUcBMMfe<vD&nr(4#9z7_Z0m<Ul5rmxe)32iX=WCU<hir#y)*5p|*$r*^7{Zqj4
zT&qXfH+4S8NXR-1qGJw5zPAmqOoqa36Z<`ujoc9LJMMk-h|_0}S<SB~#xw5Ty2&@+
zop7+_h1r84%H5ER)$9U2m@NJ~4eDQ$*9SLl$)UJ5nGj<ntrRK+bciBi))Ae@Wm<!Z
zRvK5Ogj!;TTt`jvf)u7g=oN=sp0EG*7f@A?dQ{U~?30SIG^Znra*5<}VW0++kU@ti
zlR`0>7_@vIjHcM0F+SR3F|VPGD6AAwL$RQ}Xk@hlV_94_TwEl)DcKoyRI3xZ@1J0{
z-z3xvP!-QVea5S|N*+A9;>uUlog%Peni3{S=GX4ggnC)fTrOG6S7@WS@r6USc1ARP
zPfB*3p%$aW?kMT+#mOwQD3bycG%9%jA~x#{veRd@B%Ri~XhIOgf!rx(Df$}VdX1u%
zn{^qGX8Ld1Gzuw4N+=~0%i%ClK4sUpj2@7I+a!88jmQC$7Rrw;H4#JX<24rY+O8zW
zEadoz-lK}*r(zz+E}9#PtQ5-`QAPB-i3C1k0p$#PM_au5^{;dO`#&VnQQSS`>T1FC
za>-@A;NI_kpK2MIjt!_lw23wfRoBKujE-b9WuYmH5!xC0PBWfvqsx-6j<l<#G<;g7
zpD_ga-Lo5C+H~2Obs)onfoABFCkAmC)ghr$C9Z)w5Hn?7M-laJw6}?n0!c#4<hJOI
zB|br8g30DGIv1;A1%fst0y-mSf@*Z6<gmdAH@lF@i1JCOm%4-GD59fq2ICB0{_UHe
z`#!T~S1MV*>w;x)uFks4fpV7W$dAACBN;rDj%KkEI;Rs;Bx?-LIjmI}tEozdG6k-1
zRHGv2I?7X}QqFVB8e1J<Jgz9FBSt&B6y*r5^H4}Wl}x*1!u1`tt;J<=x#IZb1sCUY
zn!2T_8=~*=nO1L9fyslV7!}SIC|5GS>iJ*%5B~>z_3gWS>Gii6kM~IRistg1_~IEV
zYLv2QS76E!uB_1G5xN?oN|8Q9W5FW1(AuD!n2{ub+ezAHfLdi(BR6D9Xl5z1G+0yq
zv#ek}Hd)`Le8=ME>jR5pSnfk6Yc4{=Sd&IKvHORl;}g=!L;Cg+>CP*ZMaAyHAwgFt
zWAIr+s$ycby5QrV`~iJaQ@A_0vLYqu`VOpSXKPAodz7irsmqOxd{U#tSVIrzk0L*-
zdb5Ty%~tDd&N663vzv8(ROL)FHnThOhL^6>ciHIY2S(yx2$$b`{Ysmmi5{+<itE3q
zmCi;mR*HM2>H9=o_q=%aka{(zoQx$h#dnzGiM^-qI~mjw^IM72rN(53+Dtjl*@r|2
zDhPs>^@GlppcJ;SxKV|28j{B-MTj0#6qMDNXD`mVyu4&G9uaa=y?d|^PBhj^=K<$n
zf0_@V>n`9%M5xy>de(i%+VJo5dv(h>At{`87*k-C;qc}TKm7iWxOe|kF<WT^Aq9Nu
z1%Zr7m@6S*B6wx!I*6-=)U@m>MNyQ*0CnBahluYwtTq^Du?beo8VYeqZQBALJcTZC
zMMdvDvGuI#8sGVi<i`|wNr=m-F=0KpQwp3DG*z2|rfzt2{}GoLS9GmsxoQa633@2_
zeacLct|Np%(>S(wX4I>ideyL6)ikRXWfRm5m!}tWUWR=!3B?qxI6XOL?_ggfR9cgK
zpAG7?pr=~PkeDa~JtG4%X-3TE-m0O#V7)$4PH|9H@|#s=E~K>&hgv_M5n;~77-f*-
zy=Ze|&#GzZ`W*Ea1I}8?QOS5bp)5z-zPZn$Tk_H4GeQgkpQs_{IHE$3eMDK*dN`A^
z2MDO-F{6Dx`}9+iD=D<bnF{cD0wje)7*xsk_!uxw@#<TzG2WeU^6-+_d5B=0H0{yn
zt$~0_x#7>XO&n0-97B{4Bv=_Zd;Wx?Dk;ql^SYr^1>+m<iVJez;(bfoElIH<g$`vR
z#v(>rI#J#<l$GOBC(b{7K-YankHVE>ifV!?C&HN1hLnd8F-ff>qFqEz6cfr6Qjcko
zYos7U6qWZ`gWe4BQbgw(wa+!6N`^KXlu0;O;%rG&mbQZ@FFKZshb*oxSzMjbcS{Tr
zs~w}!EndAf;cIU@Ufr%qK@+q^M@95qKBESsD>(}W<I%rvub;~X)u@mY?oFP;=OA$u
zCMBVr>4?r~UTzsc#Wgw<^BJ9Dk4~ZiO)lZ1!CB4Me)EXg-7VtfoaS^&uzT35!Z;<d
zG-U!4v+hb-uN0-^CxH%9@adFkEg=_D*5PIo>|}(m=McTn7d0q6hD-%C4x(ape1*~l
z6PvJOLsNG&Cy8>`QLG}*o}Ln3oHEjZR}Ukf9M9?c5q&Z&`anO5j7eA%8BYx(w~cL9
z6sG3=UwV~!y+qpz6(Z4!W1I?645hTmw7cBM#oX|Po`jPjK#J0=1_PKJLzAQsB_jeE
z!5DZ`vL{3a<0D|Lw_A&^UYaH;`Qb^CRHUjZ+PcLSHlq(4>co)Qm9&=kix9Pv$gQxU
z8{5Lt6M!6dp)=iZ&}IV07@Dpd1Y$sL>(*2-(T9kvGi_oSkVcric{aXB7c#JT|7+jm
zcmBnX+4=Gw)jJ0~{p2C-gFT*p^dWBEFrAKMt(rvYz=i}GqPA!QqwNvpESq7hI=1aT
zPk!_nytu^Yg{^d^aAva&V%lg=MYbYtr)fX~#Q=*Gi7Nm8A#do}b1CUavSLu-l1!hI
z4u~o>Pa1s`w>+g}SfdmegANv*l(^zmj$ZX7A5g(alalGy+88hqZz3u=j2DfjwMH7$
zXh}*FZ9wDDUJ7<q!k2&h*FV=iMw@H-tH>l<=ff;rYmQWo<w>~{YU{|;&z>`$RQbRQ
z#5A~x%aFquFjnIV!)RPGo>Ww$0^_nX;LuE^gw+~j9n<YGli4=TR#;P@jTB@<a1(>B
zQ75bg-*$vPaD05qM?bokwOiSs0deEi85{3Y5NGBXh{3Woo^g5koK(#C&b?E<d+!;A
z>bbKw<J~vkVE6C{mlR>q(k)k{d5sR4#bB+V63(HV!;}V9Saju}aA@abLTxk@g;3A6
zAvq`6c*aUTu{L7Lq(i1QXB3uUi`6f|rz#t>$p9wrBkR2&??0PK(mE0-O)ZqS(UWb=
zi~BTB9x;3EHoI?pk;|1wr%1P|>DwiBv*N+yZ$o&1wnuDl?^0MxjNseAY-bm14SnAd
zZ6b9-wM{CJ5T|*~K+D@uY*l($&pu$sY1sOPNy)l?$z$0dF*hj<uCxQPym|li-^)NM
zYpto;=&FbHHhll^35K`OqQ}y?$x~WmjmBArQI4W0X_qVJ%X56Q6zy9I#1JuB;u)e&
zG6=!a_KAAssh1sn8|d4PZsl{HW?loyNIswsG|LuO7(wWQ!stTuyg{S2L*aP*=moux
zl;xCF^rX~tc7Dv=*Ivn)wX7Ak(xayJV3}bVslio~^|c=)8$Syh-(vSo%4$M2s+ew1
zsLG1fe2&^$admPZU-xqM`c{%GlnlX13YQ|uL~PQ;KF~Ct=EX5v2M28J6?mUm)eW}K
z=U3q<Oi4`8g$~_noHK;FV}5>xvnAG5SZhg9;bIh1?Ou?0iSJOEERuvdmNS4AioWZq
zR|{5)r9`8xYTBly?|PE&2zg+UviodQnvgWchzXH*JuyZWiv^+c_^!u?j^qQ&#gda3
z&*{5vnBqud5+awES6p3Ova`33DKsRFiYBjMv!=oh$F@-t9i;P`P<k`q+W_i$P_Z^a
z%zsw7uE^^`(zUYRgpyMsBIay(j}IMvyCQb2NWIY60T{}1gmVt!0-8dM@IsC%G@1Sx
zV>ZwkkUsf1S+JBHpuws@krYo)=GdYHJQj$_Wd@M^W1MBSyUX2oUgMpwe}Q5;;_=hx
z#I6Mr#u{-&H-^sl(zKKPAGE=TUO<eL_k_F;Xgx5h5~EQ?-}XHJ>=Rs3a&UBnn~aGe
zu<9D>HlSjGH50T+dGAWtl<+Fjg~+njIIT!nRB2J#5R#&8k@{l&jD~!yUfbK(a)->0
zOM|>2Q>M{516cqnkv3_3A6eC{_?wg!TRYo0S5SpW*9WdHuei86<?8a1w)R{tm!#O?
z3XQdv!WBaCHi|K4d~w(E=HV2YmGmbF{kv<u<Q`jbiXMiFY<Pr!tzJKu4QkXH8Ggw@
zY*N~=3zazanv_sciF^q`YqXO|u+j;q97AxgQW6DWjlmY0*WQ3Pe|eXbYW&kg3Ww;)
z2wj3MCA%=`+_Y)6ro*Bn9wmiHh?*2l-W>coZ%vU<ML{{6(jQ*|17*U9Mj#9EeBaT>
z$lgK8o`SmXP<q7jVnuQzn)-@oKT)(VPAE%5@}7~?ytZ$7d^V@{7Oxel?nquyR*IrZ
zjEaK8qU1}z{5EyB1YMAPLhFbQtQ*n6rBfv(+}^mMnWdmn%0qC{;2?;dTB{?-n_EUT
z*O4(<+p*U5ZQ>3#xqa7O>xVagQ*I5MHOv<)%F1OE;oLl?l=YQrUDQcpLPorz93V+0
zLW*cNPzYI1(uQHfz~79+y}kZ<^5;!M3Ut<*rA+e`=i1?Wl0@C0@F8&b^<Abnr@Z+$
z-($&wPDjo@d_X#0FdY?w47-?P0TMRI;KWvn$^I6j-7$7-34H_|Fs;E~c0Bp;Q(C3)
zG3!pWmJJw90Qxv+;x^GgL(Lus`cqOE74QlIolLx=<a`Cy;#E&f0h_EKn^{K}NZEd0
ziyo4=*7@j^(5VmuG398RNUS`nmqQ|=q|wBYtOf1qwU>Jr$N}~-WA2XNBEiUEtvZbg
zGBElNKj%GWI<4~Lb;x#=weMrhz?@Lq`#uo+nB5ztm=!argmH=#g?$iXBE*a$4B_eq
zmdg%%w!~S5bwa<gPGOw{$d5;3s?x~)PFfHwor{8N=&2ma*)&6GjF!f!UMz9Og3lI^
zdHsrhQD+I2upUy3^r53&)uOBNQVjXzXrkrnxZ~sFA5to)%E<m~%By$ouz#?F>mqTv
zB-J%K3R+;1LQ5Ny_3T2SN=~6FgBgv{PN9nuQ;Z}Qr?B80xB^v{U`uIUT|qRNtziWH
zNdx`$rxbz%4KBNhtm*9QB6Pi%h~ZXrns~5<nS6ob;+S@Mg*mX4g{8Y%vb(!SvwFe9
zhu@>uFKF8p)yUvVhtZA@dWy<0nN2X(kjer^V?wiJtx(Vr?S>4=VLuuSus4nAK(QTS
zj0S{Julw!HdkAv=#1uEu12v$gY5ix0wJ{8FRYs70YQ0~l@+&pO;pC<^k)w}<S)&ao
zV<@YVRlTBlvSPNq#s1!njC^{U#ggUK8KLiJRtvhWp{xq5s|bChyIRs-Es4ITt6RJe
z(oi_?O+*>bXbe?VqK%~}3`$ivD{Lcc9DUzlt;0A+y=d?;QB4Yd>FfUu{^dXa$JAE~
zaiTSw0$SoTRGwW9c=Pq^@KgV1Ff02@SM2}yS2^9mlnzrmY*7iqX)M~5gjC=|D+5^X
z<wm4H11OUSXe@@{G&%-SD`^E6XXi|}_b^tGq9+D}u@+kt6yp&gCUn<9iu6rK<2<Qp
zs8=h=>U9p1L}o>;iTR8)*BTL>67(q>CrRX5*SAC;G1lS=k(Km)PxKN8n1VFhF^UOf
z%4^;Ggzr0ycQkEH@IL35%dZ?`<l_8-%X4uZ_aTbRBh%p9uIJ?BnCbSGAf7JLM~w|m
z%od|~B@EBPfc_1$N3&V=YiH*HMHhyO#C;95o*E1jhh{P6MiTjA%K7VrK&WfFx)o27
z!imhoXkv;4G^<t3;ZBK5iPhO$Q0AQ3N(gz@p9op8pj22pp3AIXJTk0vG~J3Nicwh-
zI>7<8PE@61yffz3tvlShdz-yGH;7vE>BGl#k2<DXJJ`aZLPB|AI2dhmWK2FQhCJY2
z$@!PjN7hW2%!$iV6Rj07^sKH{H1jK>NsPu5cJ>c=<?bD-YK%&ertfI`j?fEVIeJgm
zb@bljlP77RDm&$cb)u9s#73=$#c^QFZMgWXclspjCwQsHJdwWl1eIi1SXPY636t@R
z$#g`u8);L|YB}fG`4!Ew!FPdXv7&Bjy5Q+raS66MiaU9VqySzic_J13!q<O^_h0wu
z=8B{X0cgl{<Rne99vDJHb1fV7=f21MRlj~N8`PbxeK{zz0guTPsFY17bkf6+Ts974
zg@T$7B%@?fD*%tpQzI>!4%74YS4-@H!#`i(mq+Mwg31nI+FF$LM59n9tsP#6r#S_M
z?h~m8l32YAwARRgAO>_*fYXq<^qP#|sf2TomMt+R%5fms1ifLgT+-Wu)6+z~dW21p
z@u(#BIln6=#)anAPRX;Y7OxBmOCJMz6^XIH;Cb!!F{6p04Wj9@TKK!7dC1fEgcQ6{
zC_ZQ>`dytXmXJ`&XX@@y00?3v;s=G&nTDIYl{6q5>%#!KW<xIL`uecRq)TKf=`b}@
z^r<KGe)DXHgmyz=mxmMx3N{!d1tni!>s1F@PW~B!z~o75MkB1grdJp~?(j1+O(f?3
z9z)2k$D-RvD9N^qK!|kF3uQR;eCgM}%FgaC^~0yAWluQ2pmbtNm0~Yis}#gY#v0tX
zV0yU2^k^41HY5`WeZVSBxacS+4)v_#!GmM2lp>@;?D5l}Ly0o^M2s9D5mY47k(4JT
zBSVLTI5De4a-B>lgGEI{G)rO%=-@EEk~^mZMsMz$R@rnag3+FkdTa=!WbiuCn?N-h
zGu=Jn#e*}HQnWUrVuucj7&Rs(f(d!%;RMl*k(3NkC%lnqujGhzm{iFD_c_0|rj<0J
zMq!*;ugQAV()&J32pr3KLk!u>N<v<VArO5OcB;{NGM|Lzzu{XDL!$3INsA7~SW7V~
z*x8$L^Ufi=`v*+MQvjN_p`1-gN?$V=+Rzha5o^?dTx*3Z3f_3@EshQjX;(|Swxw-b
zI^VLYYuZ&u?*l0Xp*vZFwMN+6*BU#t7#V?~PZC41Y!hc~$ET<FD0N_~Fx=eR;qKub
z$_V~)h3$F@^2}OwS#P6JHaCXqQmB{CqRR^HD)gknR1$ZCbx@YlsFovi=}5&0oI_bb
z+LFoWhPJ~@X@frIy=+a!&|0Fh$c^DJbjvQ@aa>{TEn>e#y||>9?Vz_u1RBnprv!J-
z$SOwqHqP#%wW04Cv^7j;BcT<>h;jw)))CG5F_X;Lh+1W|Pv@p@(8UilWGRf5mi<r<
zMoE9kJsNR+HTh>T6w7Jg@vdD-2YPNAG>e(~y1uX1h^&Lj+{U4FDBSb8Fc_X`VNakF
zyN3r<<&?$Ziuu(Um**$>XIP4|q?*of&XGc*S*?hDO}$!?Qo@c3Y*!JZ#)pn_JjP8%
z=%^S?#;BAi#}(zM#FdrMHbaRDGAW8^iZms7PY7_fSm1-AnijnI))qgwamdBVQ&y`L
z+q=7Y-<IY`VzScu%#LYu{;UV4>-!<kmiieKk1>HwFhna^ZLqm62|5yuk%kRL?0U&j
zH%SIn(Gazv*9vRIL{BNra=xPP8%E;^QHe9Dbp~q{wlLC=`4L^)5>mnkkM<pPS7TdC
zHMqFuHDR&|ov{X!ox6PunS$DqlDubCjVKC-_nyA%u$9A<1+H9S?3^WvrmpcpK2uED
zOCS&PVoDNKlL9IUSOAHb0*l3*)vBfsA)l`yOK4=sb8&vo#n~C#`+GR!2%*bnMsdxc
z*cdqu1T7i(4O;KPg?@PU#88m6ydbmd*X2ke@AZ;Rmyty=)lt4DwBjHhJE8e&GcXZ^
zA{~RUxMqhp=<a>YV!4uYn1~L8Ihp(%oiZ~`co?#~#ms`xhN>#*YlTye(v;Y;VtP1X
z@6IkeH+PsF%y4eRe7WM|k00{MM^AWka>6hF)~|5;jn{bb>?uW`=#!U7A}uJoin3=}
zYoyAv8oE5l(z556b)%JMDA(%86arDpJzlowT%Dcp$%jA07LKjGefE!TFxlB=G;?_$
zh}c+RR4I{MCO@M-;bXw}Jt1_o!Q*3EBhDsi3<m2gMQJ2U+#1TFpeXVgVU5Jr1Vz`b
zsOuG7-La@^PR_1qR&Cw~0wzYfuBGc6q94|#(05H$;ge(v_njBrZd4dEqL?_WX;F($
zF#UlfU??}FF6Ww4ubcT{?_Ezj!oNdM|605XqqB=zVjXa+(z<t2G3H72K+8*Os)x!G
zA)^JYG+HLrMJ`)zA9cL`l^Nmkl<xSD=x(8_3T*?*8B}RdHlcMcHq$V9&P`cRM6Vbe
z5HqYRxl+Qd6pp-(g2eVwrc*?W^AtOg`eI4Tgx>WeD~`ICi-antJbiWrW=0VMRu%X@
z;R;cKD`l`2wx)(YTFzII9xEHJ510_yoAlgy^A1f2_~^vlO{Gj7LW<m!vRQ|id<gy4
z=gD3oX@^QdsI{;zij@(1ow|-FlaagZk_Oit5(nBZAVFJtL!Vq5Bn?fc6iPaQ51r7;
zng*1G-V=jG+gt!=t!jQ7L3d-$@4QCIy^hbOp;Ki;*B$6x!<25rI$lGMxj9X1I&X?`
zjhqsa<c4~Qeet@d=>xBR>1|STg`YR{^FR?hT&W;+VyMzNNEYJ)<*a0OV~6qKHpTV`
zqcy?z7#&Dj0^UO(I5;}shxeazzNqn0OT(NAxH4%q5OaJ95pO(6_t+enV}ccAK@B>S
zB=@9`kSCZ#g^s;6k4gf&jgg7IMxj+9du;Tm80dYEHGyai(Ro}Y#wQQ%J)*>6fC7jn
zplrc7fQ}xeEm2t+erOHJ;Zh^(%nU7$$)*C4^fTB6U1_gTzXfp&J`tV48lkeHG<Dq}
z0&PPYVk=T4L{G%vv=!}GO6VvNMeFr59PCEpO2=qAV&`Co8#fQRdG{4|b`PjVB_RYB
zSIcb7F<48b>opf^KC8+(od!nZ5@!!M*xwndDq@nsL5MwF({pxw#=Va|rCu}yuPF+L
zQJNUTn(CM`(l{6~`ApMONz)~b50+IOIC*-(z2~PKZcX{hul_2=PN*+e#FI<Z@(R;5
zm@c3LSd%}$)~McN+7-%nq|3QbpK^?sDhrsDsKTMD3MH@_YCMu=wJ5+Duu%}$z&a*j
z(1@+|gV_yS4f-*aat@sopFu%z7HhWYmZ#i*{2hvELKU9NlQSlhF?+LD7>#bzG>W<l
zBm!HLsbtgZoayTmue|vdKY4JU<=H8tO7_Xp7T0;Uo7)zF>y7G9H$&F}yfo<fH}5mh
zb=Mo-a5l2WkF)k$OlH<v)Qp?vQQM|~AAY3Lxk)n&x-V&fC79m2igLC?wY|$;yi4D$
zXjfOP7MHa3lEqbn_pP8o+U5uqC`w201+zm-3ZCiCHpUp-sAM{sQH{ss{$#CRZR?hH
z)zU0#iAyOQRvWYxZL_wTuJ2G*@%H;~u={&k96x))YQAFs;2;l}6d{V?47tIH@!B)A
zHr~n^g@eC<RzH&lHN=4Tnh<k5mrjrkY?6jxEPV*5=m;U=qtED-rAMI%B(Fh>v5V7|
z#j0k0HD^4TVT~0u%4a!Bi0fSIxOQ{SgZmKW>{3yTWEAK^DCs)uHI2&6h=-&?mL};u
z%+8Ib_jFx>bNL@Dj1OZirf{%a&@VlG-Qs(X_Z}aDG~&6pu?THa8S$VPkH?q;&H4ES
zi*7D!G!KYVN-XP!r%#_?tAa_IV4a*DvTThEa<s{3_9mZOSuKWJd4Es?Vo8u@GBfKU
z0~teQ+GrkhCvjy@IXVpAqhmm)h_7q<Mw%XJ`ZUI*jK(Uo>Uj0dH__cCtIIPMbxX2F
z8qeq<St*H9SVusK1V;=i$&}R;dPX}(T%Im?<<1>;4vyHpvyGdL=xR^;|FHLGzm{eB
zeJA!=!`}N$cZxY8BQtWYthtJ;k)ud9HFisCwXGH)!8TfGLv9#;G5qBJz)uF+upbP;
z-3Ho1Ly*y83!+3xWK$wVQlwZc)tFgTnUzD#!@c*6dsu@Xe!sQPP3koZGOGF%qB1ig
z?!D*iz4lta-|stl!_!9{AG~*;AHMgH^JP!6j$F<7<~P2>&;7Yy<kho}@z%G$hj&2+
zveu9YWEZJyL)A1GvSi9yr)*k9RaM7nQIj!e)ehRgAV5ISXUit2jAdvCp51@M)4TWa
zRZTOkn9OEW(>aq#gY(j$`bt=ZN%eR|YEU;ym)3yvoWzSkWKf9^B7NJj9+qODmPGgL
zOd@szeIMv!Cz7uy_tXVJYlj%vY;|~(f;cKyUZTaaoLP&y!7e5u6%2_K5e>PwT$)w9
z_TqDxxCZ>FBNj=@MY^~tbA8I_<HxT({RwSQ+ua%Zh>%?@M=(V_V^Mk%d88@{Lqn)e
z*=CIKWbdgODV2@2OiahCpQ~t20>k~9oNr*72Is-3whEicPGwHksKZ+nm)jvj@3e7|
z$x)$wN<O4a-H4G)z&fEv0vH#l4h(0j$k)DcPGT+ESR5%@hN|YPzw$a~>qsAJirKhm
z2i0iCdJJM#bor<zr-Ac6f;V{Ju-@?68!r$s1iVh)C2P&#w7D$0j3}`()<BMO{j!Ms
zrEx&1z*S-Ritf@_nedyWrWZm*D;lv8y(lh}rJ2oUv}iB7i7|t0<gm$vZs<D}lVi@$
zpD~>-$RUz<0V<E5D5cBq5dtybkV7YF9gxNXB#&srNbxCL-lUN=DQj_6a>vSh)B9QU
zr-K$WLG)ocXZfcV20<GbvSE3?W{5pDWxQ32w6U_GW{a~8^Gh{{*AJOrJ*1j7m_`p$
z<H2h5&Z41i;b4fo_Tn|({p0V_MQt?o&B$}n$Q;jU^J&FYASKc2NJXd_uoYMvsp3R3
zGU-H5nWG%CSq>(LOx05@!;$1_qK=x4OLz>f5@tf}4Fi_l(^F244rr+G>W&8iYaBiW
zY_3T*jWix<loPVm2}0;Kf~1&Q{3!CRCzDzj6RJ&4A(3pRsyrb^hL{-oFjB>G%1ES2
zB7{U3qJ*uNToHp#X2xZyF#vXqsY)@k-S(WHZK&!~`dy}()})wOueLOk21CPS(uwIr
z5kh?k8W7F~#^9@3)}}S26p2HSLp2K`+Ef*{Zr|i^G2@&6?oHOKmb$KpLnQ4z$S8;B
z_(T=CPjN#PnhG%|cFFRCvn@A$#V3F23v8Dw;;tp_TGDz$I9n0UmgMCU({;F%Xi~-}
zgHvQH$AIa2jA=(E1m1x6xJfMo!|4>en8RFuO{dUI!F$oQIzu+HuV>}IipP@O?<v}z
z6%xPIwVMr0X2)F4FR<R-$90}pU-&e2bB%sz$ka?19zP7MR!=!PJRl~6vz}-(h5=^+
z&wc)Ne(<k;mnvm&hDkjU+F{}HTo|OA3zFIk+nqk|^8UvuS&=m8OtI)`%U}M}F0>+H
zQDd0V(U&q-Wp9$icx_BEN3z0-u+9qg&lY18gP68Rv1q+v;wqZy0f(1vieq#Nq@kzp
zHuRgd3dl0uW`o7jw=E$ji7;~+Vqm*Er`@bXn~MxRov-M&9VWqIu>dp28lg%GgUN9H
z>UF*m-(v^^v)P=}<(WFTXR6v`lMD%mB!i}cI-9J<Yvr03inT6sqjkoQg5o>$J)}%i
z)k1xZB0U*GU`U2O8~Ug&=TZeDOd^SQvR<@$##L0)DV6sO2|oDX9@bZ!T)u=e;z+vN
z^@MbeLmRfra&&x|*<ztlS5s0p^nH}Gzfi)}P-x5wPdc)44xf^^++)Uww&cBNlzq{X
z%UNVA$qLOT6NZ8H**Tl_mR_6GoP|ARjpPN_bwe|qaB#3-aj?Kd!*u$XyZIi=)e>8B
zI+clXe!gV-@FDKGThyr@qpPZ>Dg@q<En&%|Bz1~SRx@Y!51p_q#O*ohAU*2fF(bon
zt3Nk|K#V;(CF~H%p=Ss^-Fi*zdbvlSMqr+3ppS{?UV0H%H4JAPR;w+sSJMiQ$Z&E2
z2KLWr)&YS;M{B&DvCJ-CV^eQ<ZE+1#O=)vrd)BkscD#S@3GY2PXVVVEoJh8jXDfP^
zXPMvq%|GB*|H?1pOyYar{s9pS#*>Z18)4K`qhqnfB}q1rXJpS*Dc2T|NG(?pO{?r2
z)qsw@t1|Aa#NVVLuv@L^H(NqglQ2}3r}j>UP{uRFo{%66K^*P9#aH5>T&Y={YdrJA
zW9nhx%6!UXg6tEi0;>awJY}=%czm9*M<?vIEt}Pjb=$MvbaZ_J*AUtc<HaRAhCtix
z7@|mdrm3c?J-)IuQ%~(1LJUky&rg5iB@UZFj8>3xic|VN!+VTG9L4T?mdcOY>nF28
zU7ad@*T^JK`RO8tb~+stt*JFdpM{`!CEB10SYP2ROq&_LuDNrk;@0Oa;e5led5v5j
zVjC+6QC*2(yUt|q#7KywlUT9kP9g3!ilypNs)Oi30(l6;7$6LY`Y%h+^deAQTkx%a
z{6qTKkd0TmU{Vu-M0IIH?FPsP3_0V~6hvm@fn=p1mRinnyl|vVW*aQCI&tUD0h2|;
z4nvL^Z`6$y(K0z(tgeIVHZvwECBZ4}x6lyu{XtKrMh*UCbG1(Nzb5S|Y-96RkT1DV
zd#10rnw8u@sUMUMS!lM3SjHG}4%%JIq@I#OBo3L}4TRQ-zOC^%uQ{$s#8gHzMaOJH
zpg0Zh0o8(@s@=O4?P#IR<$``GMJcKNG)1AJiXo9uep5oG(nA#8j20UyLW|?z<d}7T
zPVR+n9Ad^tOZG6mTyb>sfP?GDRP&h@+@j4%n)PNf&=~R&gfMXX_6eVO@;vvRo{E7N
zsAe<clEsBewAm(z%cwOV;0QKHqVa$i^P_CAvB4!rohqE|m8#j$5NWKz4+)bx2BK(c
zstM5xou;l5RoxJ%=vzbE_RP|T!-I+HdH_?&#5`AICq_ypiLbCxBQ&WOD<p=->M+HS
zlNY4_qc_(C;QV~2R8&Rdi&UH}8EIW$7$RLK@i%2)k*y4*LI}hViPnjc$i)LmnFD1?
zY^}J6Z&$q-Fr2Qr_rW8o%Hw^7F_sXZnNB#oe3vWNu5fsA%zU<BzL-%r6TEZM(8s_K
zL`SneKV!XGvfXakZFdZv(33+D+k5LXlS$3BE0=lcr8~U+&igpy*mQ#y=!y#KMwUuZ
zf8ii44XiK~EZI4(-hQ6X|NNhqVMQ<3DOU-1T#;u}?6pgTuBG30#BIlV)5501h7KEg
zF<y(pmhef|tF;DCz@G2$ZY|SnZ>3qS8qCoVcD}&Qr<mykCYAhZCZcDpC%CE-mVi2f
zU)Xxa=igc*>&zP#i)+khSLpkeu1jF2xVn-2WDHF`Axw^V_~;Q=uU#VyJ$=8XTb~kE
zPZ*Zx9A27}ms?1YNmXGHS5c#Sc{!Kl8rqSLJQA#BWTcx_&Uw@UI%CFoG&`bU+9+tT
zGdfgiL`G>mOUz2yL$XrLiocZPVY^Y!YIKN_wR;vPQ#UrdjKg3vUK`^a)>ULDxt5i$
z$SFXI;-sH?rjv>or;v58R4GS6c&+|CgCY8$rlo2|Q(V&Zw0QBQTU52<^wBfE^UbgE
z=<Yo#>scJkseOgXiI@<RC6fusKuiqDr7TVW0Du5VL_t(0VQf^dg*_@K{pm&e^+%6y
z#1Qe$lX4XI)~Io1dJpr^OC$?Xa(yc=cxRIJYi*6ImF{ljn%8wh-v{n}aF>4QIXXV3
zZl?5cjo*Q<#I)*QcEsWFA@yW7(%_Sxtrqa!sLoxlLD7%uuX3%#ILnUIaVM6(5(kqa
zsj5j+k!++GiG5E$o#N|?`C`WI$urun6?Bd)1Dr;f2F<L&dB<cjVNx~Z1ncdF-O!SD
zN!G0q9q7<U9zA@5^%a*cU6%7$|5sJ=*(8=LkBl{&jbznpjUoEdEF?@F{<yqfW5yaC
z7Gz>dTF*&sN0f~Dei-OCE4tkln=+NH=wqg_Nh0w=rtyiB%QuLjBW_MvpDu~nken08
z2pz1*&vcp?phN!g{6LyllbGJT&e`%ItIp9bd!9VoaR1>sXYGbICqjbkD#@uODwttN
z(ev-V_HF*`U;ZWD_?fTJZ+3j}<eY9N`+wydyq{pa40W=JRL%mz9<t8v9fc7C%bc^A
zdFkGiM&*I%W##V#$v-y^=hTfHhw~N1XEcT=F4RHH7Xz89s+mm}G!svDGI%v9GYo+o
zT1=>U`PG*>-aX`aDwiq7L=23EjVWjBtf8}p$<Z;-?24|9*eDd^z6V#q!Qla$cFk(N
z5<|GE0TY=`8>W+qJg-9#t#(cv)rL>pxy<ti78eIGtAsIYS#gPRKQm>QH-*txe)q@g
z^%L2kI(0&m@w#pzE|vC_f~VRX#nK+9j=tct!5fFmkww+<>0g+zuoIRK63>5pM!ndv
zyL(LX*DzIu@di^_$-onHK|nI~hy#Wo+^{amq&XKW`>5jRqy{ok8oItCcP%LhdSFbz
z%>uL6j`-f6+@tM08Bf$O=z_TPDbdCqvx7)J6Olcrs~=e@kwKb6Clr!tV>uoi)?^MQ
zk>_5$MW4ihPqacw?X+{blwcw@8SH2-r5j}`e*xVr%O-BIW)M`*7>q`dkR&!jrmkA~
zSWlso6~uOYddurWF1}_gTIA0P5asBH-8i|-#ep|uLg+;Q+d8Vc-kVxo)R9`FQ&Ow+
zU#U>b-oJkfSxs*Qoh`KB@$FqeQ*%nHj}f|5jMC)A5XZ)qC_2RuGHqRPd3H#B`i!Bs
z_#w;vb2ZK7Imgc(u(*7PpVk6W8!$#GKt!_9hRy)9%xstwy2z(qy+MpEyVz+oOeu67
zE;?Kivzlnj$stg2G8RY1M$roTWH5Qa=1yD*lcjc3F58xCmo_Yp6K=U<eZD2HBHOJY
zU21rgr*sE3eo@hFJ?S)J<G|6R!d6%0y_k&4QKB{!$+b3PvKLoeH5D*kCizx#Y|9-q
zI81UlFIpjeC_mEIy?bY49&3>ZjL4Ni*R-tv!W|b)lsxmX&kS8q%@N89Fe6N643c|V
zRf5tcZ8R{Tv_HB?&p=E<PdtCR=GmiD-v8daRFjI?q@kJC_(q%;t<vaY4D7Z$x?Rt1
zw__N3;*fMuqm2#*OvKlYrw^WS`_>JdGgPjk?FK=p%N5V%=*)X6YF5Nin<J~b#ONGf
z{<D7;<7-SxOcx6=Ou?{Qu4FQ;#T?F&r!~Dd#EB#IH7+J{KafLE2U8tdC2%Q;4n8DY
z8gMD%muo6E)Q^_<x)!~#wPbIw^9nO*u#*Yy_?YJT7vS(1<E#BzDlY7%AgudLR%;t_
z$~3-K2URDOwyeza1oP>FCm%dvKCiJUvU&10Y4wbLwIhXuO$oD@V0S%}W`cDpV;Gqn
z(%c#S|MH$j_}Va<Y+!W=tCVvn+E5hPG-J^tf43Ax(m+^O2DkhBS){0AGge}3R9|K^
zdZqlkHT#BFGux!#8V8UU=*v2-HR_(JZ${UQ{I@Vv<YJaQO<AA%JY`0vgh^%`ER3H(
z^@Ilp6Pl@Ex$O8y|KR_NMOYAv;{z%$`G;2X2BV2`0%b>XvWKUHI$7)YzTglo&;Lh1
z2}^32CWFpAMh>~EmiwE;MLLw8)f#E!vLgM3ePrbtSSyHU3XapWbJnXh*REgV==hk)
zd`dm7NjcLqFv|y6@6<I`{wWdyb;TdG|5=9)j6MrRCymN~5@Hl1F{t|igRmb8MOYB8
zs;OwE4OgyT<;CY;;Ol?<@A&R_-_qeoB4Oyeo@}7o^=#Zm2S<+8`HH^pWNc$4wk)NH
z7|<kA+w$PveR4`1A0IPG4JihgWPGk5*I<)q__bMeR@|aR>#2i8(%1l^20)o4x>wC|
zHBs0qF%HtG#z@?C^xGZnYDJ8LtUGUUF_N7xWNHpCpHNTcq|FEP%O%_8R!sj5VgRRJ
z9m(i^m)JWB>#!stgBAyH<tpF!wKsXPsd%>B&~+<v6hI~#C>|chiquRDEhPbCz3lkT
zAAOt8|HWV6%YXSlX8R9+od<XCQP(xuiTo@og71t4=ZI;LGrAyw7hT%)Y%$(yjU3r$
zooi-uCBrRmCseg389}<e?qNgU6M7i}V<WcmG}Z{)u|k^Mn902~^FuE+Yg0*LhO-s5
zgQ!iS+6T&T4Zt`K>YBxTMvfKBvkvDn(ORm;GnrJZHybwV9$(eel_lrEw5~B3&QH(r
zlbWWP(D#AbWL|seHZNbF;ZrAGEM`10_1M&XYmEBV2P8=}=8vO668wZVsFid2To`@W
zbn2yCc;n<@bd<@Sx>Z_(wHb#*F<ieMdHv^mnrUYDZxhY!L^^E=@kLBE#a3Xwfy!el
zFg}q>JcEXMlN_e+$*EFQ4V~_)AU28SG{;EpI%3;O0k-RKVT+#)Ol}^qI`4S@`)gn(
zNH2gQq{${yb}UzGCWnS0Gz{yGtO%dUEymWO%u8|@I7NS`M5}V^&LP=GB8`|bIV+j5
zY6KzFgNvJ>9WBKHHKox4F3aJG(uatpFj0MqEBxQmR4E#(aSIvMh@}ff;sv9my)jYN
z{kZ5xq*QRWX4kbi=Y?)!+y#nOCJ#v{nO($m9@jW*ZABwnKA|ZrWkK4@=g4NChnm&+
zr;o$FIZfJ>7JK=qXsIIIDJRkM#4MSjDT|&V%V8RGA_YrGj=}Gk&nw!cAX+%64n2z-
zhs>@V;v2WO!ymO}Y1ICvEXXjm!yOvt!<>)?Zk-+Q{d-$d1RIeeIVVUxLlzpM6#a6@
z*%D`B5?(*E&yq)KHQe4Ni#LXky}0A^f9st2b2Em1Ms6d+-N4<y?|A5Ma`pAsx%5*n
za{SyCQV&nRexG;#@awFfowDwC^g~B5aV&nys9Jo6oHVVaD0QOkq@b8t3-F}Q!%lR@
zF@5yK<1pm$3>hB+d*Q!xc~a5^>Z>&U2}B3eNl-YAHb6ODh!e3&8#jbb*%wORG}cgg
zF@2~st1qjOnllz}4K8MEO0uU!hpi=h)LNmi)s=%>lcN+C<(_9XiSY)qsj$Y-ZhB77
zR@#(VLfelBNy&`M`9h=qA^s_<O%$~I`U|&t;nhzOb0)??a)h13Pp3?bWfx(4`jla_
zkujptqEG2-Jy}dGvN(We<-r;i4CbusAPXH{XaW%vdVJsEGE@mX8Rs0ewO}HyF}Ux1
zhohhV0`nJN5(B2fQwB9q(&F`k3^Y3{5lFC~eCz#4ipCrtUE<OEcbU-Q+Gn^pP+Lnh
zh0PQ3hZ7DjA5nRyh<j3`Bwd*MmBL=#eC>to)3RY*Kp4m3P-(Y<Ix5|Fr16%9QW`Wd
zexcZQP(G}=ct0tZ#%dIT1qR*I-JW($X%r3nhhW50D4OMR<@N)n0!Yezl#N|!jioqG
z@|@Wc5t8@Co_+vrc%`{WMuP8&nTPiuGO4G85QU;xJF?D*F^GoG1jvb?XtF8pvg2U%
z0v8|IcR%V^Ip@ULHWQ*mu2|!!stMNjqNzp@vwn!;O015uLO~VgjqwmuA}0CFFk`pf
z@cz4ZS)OgUbnP++M+eO2Q-%<jEVuN1#JPk`UeL(M7$cNokw?k(k13AlUKySUk}l_<
zB@^^n%{dWc7?D=11C?xz%BvC|``D|ryO!nIsp<nQ#`%$2J#>L(Sh3q~IemIYN{Mdh
z*ll+7eJ9SW>R_!96ftDBn;j4DJz(eu4lkWB4S^6M^<*Mm9qL9b9s-GIg66oZ^S!Rs
zF=i~6EcSvNCy7W?v!+tt4ZD_p)6%UsgucTWgP9n7Ok>>y(tO=`;SLbV?V9$yW7rkI
zKnH;NV&b|!v(fC&LES4%PPmC9H&gCDSaBX2hSW;7cq(4Udd6bZu9LCevp{1#zxU66
zk5B&mr^u<sn?M-Ekjq-TufY(*tdKJ<N!wAh*`;?jNgF{m!gIb7nT3&iMH~>X28VGT
zUwf>tM7m-j4uQ~i1dS7_YCqQR**ao~^hV6StQUvnuI;eiGO22OWeBljarFxHTZj>t
z49*#nvYaF`FEg(kzD``Za>%2nr*v)0w1HH4V#@S=<nZ`_y0V1O$?z;xoS&Z3Oe#Vb
z2<tN@vxcL~C)~Pxg^AfHkdXCyYm-{=l_D|Bg(0Zdw=h-yX9V@f<5g(`TFz&OPDhQP
z6{bWe7^f3sG5Ijo;!LKlEUvN~G?uD<M!0gw<O>cPEdAqSOnp_17o1F)j5C-Dt(67g
zF=BD2N0^B6jW(#V4M_f>iDDR&V}Nc)>^oA|lDjRwI;T0DQXd`hC;#KO$Z3JYP#H;u
z7)VM<v&13s^uaSe|M}1Ijo&(B(oER(Eg-a7W1={Q=7bFn=PlM}{LJFI%(dGKLbe1e
zN4h1+wbJ6xW;K>dN)rQ_?l_HOK=qc2h}wc8i4cyQl@>hG%XI3je~wbb$^$I3zY7%&
zFp8jH)P{~4QQaj|mIJ)&dzwi@7<#H^qNZ7T5NM&2)Vv@J7S~usQ=J~H@*@Ro5KHx}
zx~I{c2`NBD>mvPhvB^(awWS&<v)31wX(3urEA1s^ZB7F*h%w7{*KzIAA!i?a0J*_6
zHS?<{EKUw^jR!ALG2`%5DOa|bGdZD4S5+n$s7@N@!vQbfzRA<4E6!J~SmkGlLdwSA
ze84(z*-)iQ>48xWR1g{xc`V8;ncf(V>cGn%JLI)5HMsf#**9c6qdvMtb^A8;TalyV
zpW)~eud=vxjGYMTa_jk9yzs{J{MP^T@3VUMP3BY2;KYHFEXS?UL4}w?s2g|D1WQdU
zWUV9!gVa4TX-+24msIf|!7CRAL2)K51rZkq8M11vx<Q(3NkJ#OE{x_5r93a1C~F*X
zh-}VV`d*n9y3Vaq?3}s?SKiZ9l?;k3RFy;mRMU#-tl{|bB@T`bm^2N}SyD<2F|j^h
z@#OI-%hNUOt`$T->N?DU;{Mup9rMMU5CZ)$kg^?zI0eBgu`Xi+XZDJTY@j)s^VMJa
zWt<a)G7VDfguW;CJsD_@W{?A`F43(wq@fcnm=#o!RLs5f$`?8VNE2LG(jW)B$z*TA
zSM*AOC<bMQj13VV1Cw3Eo$q+|@Skz*^PlGOPkj!zIQr1^-;~%M!`Sc`iZ)yuIU^JA
zu}9B|*>uVWkI$&8H95CfUo$zr1isOxD-hO84v#a9cf=u&`PqVmS_L`Gc>niiNBaUt
z`asbHx=P$t&8QLF4{gi-U4~ld8ECB)3Z^ZlFgiFK*RkjY^=fD(D^1AaWI;2X6LVyD
zwj5atW_%L%#npZnH{<u&|97D&78Z&c>4C(H7(G+7oPDvN{mEX)kIfg(4B2+S_N}+r
ztb44fSWFH%IGXXny?ea#&U@Urc2y&C#4Ey(1kp+nuf{VbiXJ>Fb=Zyzo3Y{l$oG~k
z#xr4%+|N{b8CY1$!O<ZB3)NO?hA51JVwO`DMv56@6LH84CX#b6np<P3>W01_I6q%8
z<j7{bWj>p#w4h?WI%hhY;e3+#Cp9><QO*jl&)1L!ZO-K0S!Gd(o;``@M3!N6&XJS?
zNll$X9E432r3OkN(Nq&2+`G^3{`T*(+qR-JbRu~Wm*0ULBVE^#HX7kp8boC*r5s1p
zIjXy_&`d2|7kKpWF<sYjd~(8cHc<6F&16b7sj<#uhp0}!b{ycCqU%={!WcJ~MHinm
z+gHwc(J!Z=r|WyV?UrHFk%l0*#(@czT8$D*^iw@;xPJRZQrM9;Yu3+}gcOO^UXWE4
zO8%amgPuDkjpQ=O(99=n)?3coNMJ`~kcL|Ga?5#&0u0VtH4`-o>ICM~2-f4R;pq4h
z&1^-#JELp2!fepY&g={}XOc;xt2eUmWYuyTV?_yO4B127K<Ea1Wf{7i>~j{pYp5rU
zm|0a$?)7d%><4T%)JpLdO0$uqjiRBqhB#zmCrzxcbcmJ$&Q?_3bMo9B&foctoVgZU
z$%c=;09Dx{_E>@W(S-TQjIL{`x=1_p*p!$p8X&RVY)CnfgTcCvE7y<3J($F_o^UXm
z@#;&LxHJQLFxiuh7=Dhd5;O7;^^DPG)RcWp>+AoyLH+S~)rt<5X^?17BUsr~jT1VS
zHHC}CsKIeg8o<gy6(h6_RA(?rgm-2{Ucgipwl+fV^$t@R@L3wb3zXOqMzID=lH9PE
zF;Su$a*{$Y^#iHvh~19Vb<nohc11HcOrE>OH~#1YR@;V!pE3zOsWP1H0>O&ybuylm
zJWn2+bA0D2%V%Hb=<>&~l^}>VTXK?uGFh~^Q(dlygUb!sMTS%>RaXydP}+k!YGyEI
zG{RP@6BMUpqs^u%r;|zJ1}fjHAfskH&&Rq8Mvzse--W{UVcJ$wx8o5F9TC(Eb*jk>
zL*!sOXT4fcH52){)?9ec7hH8ylA%I~*jkV&<M(JqIgqRo{bPA=rP+=tQe2sfOK*zu
zD@O?omtT@|nN&-ZViaa~6h~B27T3QZ)WvNM%&*O<tHh9CdgX}8r8(8aV~x1!jkS?R
z5b1>;C)g$ndhBAtHV$_<Aq`Vry>pYd-+E8=O$mq+*^-kOMyYjrG!h4tj1}_-p&H;R
z=3|LVtofC{{TeTP`6f@k_3x<CZCIY-k8b0e1;f25;pz(<zw!wVZ{1+m?U*(WZzY#?
z@!~ap<-huyy!~rO{NOupvFutsv=!O4lC77@KP4SiyS`DxG9s8+qn&cL#O$@H`KVu`
zwTArHSQPFTq8T-Vx>ohiQxffa7&5p(Hio)(qel{1=$2Jg(RLl(E+~y^57tWI8HvVV
zheU9R&_sqDXhKEKl`!Tjm^q0!sp^VmI+46Rl?cq{lCP)<khHsw7$bd%gfQsj)?$6n
zY`$Q5wj!qevz9KvrG;);-lQ!Jo5@tun$LXYOFaMDr^py$=t)Up7gCC(7zAl(d-@ot
z4&<2)8$-X^((QH({UCdmHeKV!p+BQEGs3)3BxlS(W<OYRx<<0FBLbdXu-HD)oF~HX
z{Rx}Xr#%0&U&UX&eIXzA-=BEr{NX>_KagtV22`%Xq=wDvl*z<$`NkdGWTIjQS-<t{
z2*ZqPM|18y+cBUS%SPQrT|qd?eq{C+{ld=`bVp)8thZXYX|rnf_hkG`x-VNNhGy0|
z9p>#De><{9_OxTv@%G^Oi0ij*aBy;o5IWAEKIWY#PpHgfY@YX%|9!i<e}?w&y=Xhj
zz%C=S>b*mh)221;847Bym(w0o8V9KQj1JN8);GS-Y%=9=G3WCDkU($0U-&d&Sgw~m
zdGJWqj7|9L_6VsFJz-21mlA0-)hl+3g*EY~8sd-eCHlBXj1r*{hKdlxV6dud=8GA|
zL%*{OU5Cli%w`N;QT3?O5p2Xoao-NZz+~Ep6Rx#l>edHNpFCsN)YQ`{iOl?P!SgS^
zNHbe}$SY!qQ4vMa><uxFhJxfY>Yh^;lc0q7D;Nr*8}uBMjOf1YSe~Er?C}$xJ$=fv
z$4@wY`i!B|2s>2YnG${1jYc@3IOP^LRf?l3C}|#NL7>MWqR}~dNIZRVM%(rr9vv`W
z%o+Ngs;#M;2IobBWVNPqI*|DA5F~4MyJX55{kh~s3X#;wV4-h!#Lx@7s~7;GEQ_e<
zp^*&jJ1;-aWOhL89}#vP%d@p;qY(yGky7Q7KW`H$Ni<>_pHnkByG<`$<<Waj7@`Fy
z41~S?N%#=`exzQP?^jzq1Hb&g{2NS<uXFl?Z_~`CSYK05XCx=)dgy0}F=3pc@~Sy<
z!e+AG3!9H5Qi!Z<4xc5rR1KF>5Xp<t^C%HXyPeR>G5C5Wk6V!?B_xFaVI-x#!{YGX
zQ&-~VKM*ic1{?LFX1UpM@9`sipE+LC1U0HMNn?e`RNi6}>|(;X#O0$o?Yif%Pvlji
zt{qk7iCthmt+5$gRSC!vB`(le!@*+C&C55qbF;?9)foMwoHH<@#|8Cv8Mm!*x79;6
zkI%u6+v_K?L9J|QpiswNK_jdaD|D;c7;DqG1|%~|P{qnRCOJ_@Lmn1VBdn%`?kXJ2
zah1hZA_6zwYLC08{Fnlzm`bQWRVEGzI<XkfAd7Vf<ROuVp1j+T`WA+kyj@b+bDFEy
zI6E8o!CNh^KEm6~>#xr^e6i+hzxkLS?nFZnvIQorRxP&fIl3&-C*C@-NzVzJol<9|
zkaidn4bHY4-keB7py;Kw;EW;+#Vt__4I#OtvG10LYiw85y%<I~DVGdXBUC$^#jG)9
zjL(w0S*Fo3k?lT<bVNIq7D>6nX<u}X&}I2^{fm9qQ6JXCFzmmVz3_hXh4w;{ltrhK
z5^0E-TBf$nxjp?XkG^ywUe~5PhBzKJf<BdraA`J`{uIO=HJ3&-0zs)$BvL@uL&$_a
z&}DGdKr=HK=V>la@DopV8q1?dZ&E{)y-S{hl(eZcf<{+Le_S}4VanB8Cp^9Ph~;V{
zWwJH0HnV{wlWtp#Q$|`7YB&ZJGEtql$S1yd!nL2dLEn9i$^~qDRSLYOCFWPzp57#0
zzr*6zRbq~WG{t3u_u^)2tmW{z8DIV{|0*y1o45FbfAEiavRo4s!AnNbGvTtTOR$WL
zh545i*=3Z?%F;7vJ@=8nE=F-kGg6B0eW9E#Ap2l6&Y+C0WNn8&VFU}Is@?bjoO4uF
z&5oWFG`qAEDi;o}Y)TYHBJ}d-IVD2h({?S>wkD)ROqp)zna*a^-pLv<mcH#dJ3D8&
zJZHJwu-kNmJ}?Z!m?<dFT3|9=@a*ZC6uqPiN9F}oO)rTpT1E@_#4DfvI3NGq7je}@
zkXb#Dvr)?5FfepIL)Q_9UJzW9q4xNLDW><>T~EJj*=={Epw!^X<9wx;WRGwbO`tTQ
z8k1u4IGh{bxidsXR-&^Q2dUk{cfSX#r#$zof063e3rf9`LfNMBzAr9_{r)5dBT^L0
z*Es6>GI!to7GL_gFHjvE342CO@B}EFu()x9uYC0@{QmF!9`{bS#G>yiI?uASO0!@t
ztoNcH%?2tju^m?2E9LClJMWIMDCK)u)U-&E5PP9PHTxBvodLvHu9-}@_2NssaOZi9
zg>ARu{PZEq(=#dPt6I*EqP;HH{30-Lp$X1PV-=QB9-X8~c7()>d0t9cDBR;-Y$4l`
zI$VmstW@ZB+tY3arj_U8pL&U_*G}lWKr@-JU9~LFmR!1YfXyNg3R(6Q@10Q5b$}_<
z^0;pT#xv<hL%uU+-(ba*7=}R^4x-x=qZ!q3IwuXnI15mO+gYV}>$TT&qN+S~GofvJ
z0Bsa1$(Ia~)#(}UeDC|@Y`AgzHuHl6X*{fBh*4wb@_1(MY46JFDiF9SgKa|``my2e
z`;KngvRSWr_UH-s-+!N{j~>%@9ep2&2{E@JVXUR!$-WkcL<j>RC*6nG%R#bmE0XSk
zWoV4id{L(^)rFXqVYS?_*>0Imr_2{KX44td*@UWYRR3@HQDWx8Lw<2Sq{GyhBn~Hr
zKpY|rL2JugnD#nLEs}@5jv5QLHhk=}pC>UuyQW|7Xxm-~M(M)3*88iBHDg82U^FKg
zm3x?+m>yl>owuKo$T%ZLg2Dm|h(xT=6&Ov7Og5Nl#cnv~m;chwap%*YrQ2>8wmqR6
z<nvS(g3!sc6LUiEYru=^a+1Ba-?n3paCS1(D9pUV_{ce7y)>3NN73!-Fx+Vjk<Q@B
zNWE3Ae+&^{SNM9Ob1th0dLj(HtUH3n5?R)(CAVMwS(a}bOt+@?l}_C<7*EUt$v7&Y
zPlnL-Ts}Bp*{m66GrHY?F`1*;0)K1?!yp6JK2bLlCe4)j#Bq2~apT&8g^f(o1_(NY
zlNvy-vz}9BYt%YAQ-&e?<u?AukJ;-dwn2qFI`>+21uYb&%3BR_7uPVC4PNQD4omHv
z1g=jtKW~$XPY<BJhN)|%*BOilQyVpd&|T}o1RWTW9qWW5nu=Rs2+;SKp(nR%Qn$f&
zTiC6M!xNe-9_ore{*8P1<_0+$yzRLBe5AR&<o2zK_uq^pUyG%DP7H%**PZeD=U(8C
zf9naeqsvNZQ#2v!4(TF!@Hl6vkA&XZ$4cls>J(?P(dN`DYAWr1L5EE)ls>aZvox|N
zD`G7aOgB<cq)~9=fu56~a(Fpd0I^6b#kbuTA^Rq^=ut;Ud)<)AR8oE~3{hO)qL^!p
zZgOhmkx}dODBVnP-xDLrjEQnEMwdn7QS^nnn+S?foO(-xT7tq8c@Pvdg+vOfMb&~o
zC22|-vYNce@8^(6z3NT-Knk9eTIz*^x}mDo)lS#7ASU~!xfGmP8*!_Y*Jx!rXeITX
z=CENFW?a8>o%6RokU@z~_eQm*aWbqRtJ#uL;Vqb)@B}U`Dn9w8OT6;ePRQrqB5lqv
z`6e;UNZkR8b|5*+nq#JyFOdUO6VJ)Q;u3^ak0WBufU7;pByRrP%lx^Q{tCbIcYmGt
zKX}Lxl2XRxvli1BOj*}O&n(kqijjqC9<9w91Mv~RM#SpEf1@PDVoi7fS=mQ+6vS7*
zE*a907_1z+UI$;3*_6p_!s>jh6e%@`DavTAY*N<q6`78Qn8{rtL>aim2r-<o-E=&C
zdQMeUd+lM$gdwus?AWX~bh|+4BXQ88Q`a~kf#dF!^?HZ*^#ylijZQL(@FhlcHXEQa
zy!NS2^870=bLHksMAc-M%aewI&~^;Nz|ge}eNP+)auDaz65T-Mu%@PIJk>BG_5=O4
zrQPlbaVsvq-s7uU^!1cFvOEt?*NZld<+d3{BsE*Z;2<^&QrqC}Kjr?v`WM{!tN%H9
zaxl`kOLw>*T<!0F8CAv*m7Xik$p?ou4^n1+bRvJlf=RY~2a?UC3U0me8eeFay!ox~
z@q-5|hVmV=+_P+@fRqegiaU-t@9I#%I*Y44&iQ=<Q;d)<njl#%R!54jG^t$Bj*T-I
zt*|Ha1=pW{fjcj~$l>9F?e?4}58j0snAA0sYuDMX!brO-*J>X#k;i>%d^nOIZIq1Y
zm=xwRquZriSw+H1bDH->ejd-N{c{L1NErGE#*tFb)tgsv71+U0d5Nr8ua;c7a;WE;
z#oCNVkmQ&W$q53Ka@K*zejCUaKh8(|8odl+WEcX2Qe20AAPfUTFH~9GSmewq=Yf&o
zko9&PFvgh1{LrdysOnm>DZ40Z#5$U3!*tr<tBMpe@BZLz&d<)cbmc0SE?;84IG~wS
zB44VdUMg!!hPlO|HA~ZED>7nebiV0XEmySLEp5A_-L!OFN4weTphV4<Qex<ZWfpUW
zLE~bApmZ@OVn{-%1<f8!W8_OhX|{R}SbO38DkPV(N({sxjp}N(!8uFQRMbt)q?%~+
z<MuI1W#dcbJw<6y$6`dP6lWckHe=oiTgZ5!p&MthWW1N>%~%Vr;>PWp9GqMxryXgt
zVSToy>!To%*^D&c!jMwUgRVoRyOy~?N?-=6#R)(7y>F9@#5x%)F~<w$l9@!nWH(B7
zg0pN^E57uVPw`8C^{)|~!z53;4XoGgD3i05Cq`|&wRv=vr=HX!vn0nz-wlKz0Ky_G
z_X$u|n_Q2q$f(E+if4!JvnlENu=u7{RhSG@$g(!jGsu=9Zb^NxP}LRWz@^KVIlOX%
zyxFmR_f4ji)uz!2voa+@PPh!jfxcZcnH_O-IH8M)%juZqW=-WBeo|9S8*GZy)-m&Q
zZr;Akr2~suKBJCnVXmn>LKq0jgfeC4-`5DT#j0cUh4HV%3GLZHKTfZo$OhFL)sz|}
zh0QQd8NKm%0~Sv+%```u+-LgrfJ8t;V=Yq~MTc2!;jx4E3g+MtD$(`W%8J2|%j60T
z1y=s}s815KR7{N|<|XMSW<y8pcci{0_gnJNVs>lnaE3kTsc&56>;Kb3+Ws0ip(A$*
zg70YWKW5k-P*V}I=K5iYRGi*F=j4^E*x$ZS+#X;W$zc_%{s?$MPmRNOD-N$M$hib_
zJFL-Yn?jK;?WCfG7hN8OeN`w#rI5%Jhhl+J<er#h#$ICN*9Ud+4RVddaWzoF>Wi}*
zAd}#f#>O(0SdT0&fdxb@4QesbNI7aA?cRyk#Ig}+qodkJ{Sqlg2AL>A9H-ACGzY5v
zl#Mg?YJwE?bV$;aMvW55$w&hl#r#K#F=-Bj+NgofLyzerHf78(U;>0LGMP0b>qZ@3
zRs_qG1hLUYb#G5DGXr%ZR6VLRp&8Ru%%%<3u3zHb-N&q#s%=a%jL^Igt$#&(Y?Vgq
zd}Qhd-uUbxpZW72V{v5(;RE9OjJ&;y_Xi|v2`o6vpCUcGLiX2ass$-J?2t&?$FvXc
zF+94@q%s`6dJA{yh~z60Fg-ru&-@pEmEZku{|Das!9&&Bi204NPMUp2s9sSXIphp!
zPq8!>C(Nji{8db6{&ZbB#u4QG!Bc*(XaI|mi7`%bV4>!f1IjszuPP}bz1Jw<G-@8@
zaZEZy+7o|L&Z2!X7Mm?0WWuIryX!E|Hd3hR#NB{IAY=$Js++Vv3;H04o>rQ<kPPw3
znHYoFNK<m3^%>kZQAJN>$j))+mFJi&4tf5SHvmgYgQ6QUR*8KebRB)y5kt@+k5D|z
zz0K-~TaZLkS)8qCYDXO=qS5Pm`nIEMS7b8Yd#bt;s@{c8t03<My(?^uSfX7_B6vfd
zH?+G6&D{?;|L)g0`uxv-n8`VAO*#x=-ykJ2yIo7|J=LTlrpR<rbLG|z$tomb)wpe>
zLuV!M9xS;2;!D)oaOL<J-}=FOEMpp7CMiEwb0tNl;bj79WM7iP*Ir<s$`qrlt@3@w
z+;1b#m33ANW;0T#P0m;czL{|2`R94*)sJ&>{Tj|0P9MI{;|K4-&{I|F@ahb&yzvGP
z?|q<+n<+r-g@;95)%9WyeJ2flIX;p~H`KVql;=DPf?4!uBh6CA(0lFOhp5Cxr5C~s
zYU)Ph3po&Dq^@eh1qr}Kq|6yZWj)EOxuMgdSGW1X9vGQEANd|5_c$gQc!VJkf_NH)
zA&}xARMH?UDT%8RvnN5*?1-uh-8a#91J$Hvz1>pvgVacF@ABPL6;)LUMcitW-bI#A
zPw6&Ws@mhKimLWZrgOaW7$dqsgCO-H71*%bZj?Tp1Zj#&Zq(1^yr-fR>kw1sNq7Rg
zUa9|buICu`CN^1FO4^W$##K?%EJ%Do1;qwLSOLyCb+|5B;kv#MW6X?rNyL!YgdOYE
zR+(daCWup0FsFcra~9_;m6ezz@Ac<cHLBAyTdpm#ZY_9?-l=RwtwSN_3?KW<XGJEq
z-7su+oIP8P{*vR~WyHNV7Z!&pTfqhAZHyXzdivP#!PA}8Oi^SxK{z8wq>M;<*aNb}
zFtC_c{Eh$OZ;`47VvrcS%CqeQ**bb7wVS0H4Y;heWM5Glf1_E%O;uAx@h*_O)O}mh
z?sn95Jw`zWJp;;f==A^<#+vGglMFw?Ag<A5^n2!UmX<Y+Df9tvBwsf71FwAib^5-e
zIXS?z*GXqjaSF%^DB*EHvW6-Fp@VkK(Y#{QdSc&GS5ti5;C)Typ*DtT<2X5}n7R#a
z7pUAIdxh>>`p)GVjWG}CS-!8^#`>*nnA*(evLF1Iy?!DaRHrmVW90&rP0<#I%WUyf
zoYaXoetAv^hWmf?jDF)8wvj|)W(TIz9@|;M_A0r)1&zlz<d?~0XT=M`mIYS~fXrwX
zWC{x5#Z1W(Ng<H?9zsv<*3h@uc8f`CvODGAg-g8u=88x65Ac3UGLe)GAvx~-Nk@(~
z>!&q%jhoSE0#Czwz2f-RCGLFuMZWv5AF{Z5B0}@BDXE^O%8}VLQOz7(@)~0y+Bi!d
zS+_<h3C5a|8X#z#wR_s3xk&fbo1mJ-jMrUH{#=@g0C<v(Vo{$1WTBRgyxnnXq#LnX
z%mbrKW<gPf!Y5y==7JauL+FKu?ev}~ifgRc+nXdN6e%J|u5KJ@q&p*Sccbybo~~w;
zLKR~o^ur!G4T&@))!0b$o)YThtMsgtL^GjgG}4@A(lAJI5eALW2$<O7OMRBdu6$oy
zD}`wMeN&t_<xouGCokx=vl;^d)udv2+;H=`%e=Gc#KTFcE=Kc3FCwZ!{mvX6bo|_3
zxy?&IbBA1Q=<hv-ZNi2Hi6e|@s3sk=wBpPka`!te>7eJN8VFrayMBioFHEtoEQmLs
z#||C2Jtu$g1ao){M<-BEX)ZPV!r%VSIQ%ES!?%C`+w`qahEjrHR12G;xb>Q(Meu%r
zQh*wBf%^PWzDmT*hrV-(_2EKx?s)ymnh_zXu~wp*1pXmq7=|PU3KpTc#V7}_K^rzj
z6U#s%7X&qHPM}cxQxaXVDIJR0XY`hX&J>E#{tT430`eXKHicGcuu-nNF^Pl2IrC`)
zrqJo2@8h28hdiHk1JB>SLuYD^FTJ3Mu0+TTA<*txx~^sD2GTIdv!HI*<(WeZp`4{r
z9hbV*_MlP?Q`O*SwG1zYNVn_QuC|0IuBE=NaB6I4iu>!x_E4l#_nd4j5$OC3XA-M#
zeS_oAe3@)(MNO05-!j5uYy?eqgnrA=Z6SE7$)RX)CywcSss)r8i;%J}<t+Q9@rL=$
zn+)qEFHUMMogDJ@H-Er`<xWsQWq}xHD0)Av#m^fKFP-2j$L8#m93>*`=JPi>x_l(L
z(3>^O^OfvNxioZ&uW6R4D^2Q%#yS=!C%pEFPjT($EqpU0=g5=$@A2^6x5%**v@NMY
z8-^>FFVVLf`mU#*7es)Oo@g$hr$gV;?KYw{*Wt!kznT4WCC`vPTZ&?fG&Nq(1OpTX
zg$`Ab;#Jd=MJtwj5=k<KjUqNMM&=Zjh_wdi4OO;#TCsKN3||Na7qYrP>OICN8$(g$
z7?eg@m;f=6<DibL2yLGdHB{JRG7u}rYD$Tr>j`}?%~exVJCE_;r;d;U^iZ1yZw%ge
z8a1r3){euiq3!9m9n13-XQyWjeW%x6?z6KN>nkSHDNR-5D|!B$bz*84KWsKt)fJ1Q
z19rO|yWN()EAd&fAH<8^4$7)XY46^ja?!&J05GnB(v%h|yABJ>nO5*NS-XmGX->Ng
zXyuDcVIm;UiT4igEtU6p=LIdb2Ino#=}6QXDs7CN6J~?W23tA2v3Te4m8Y&77K;U!
zuHPo7j<j8|IX!2$-cq8KMvrS_gh7y_eg+F!G{6=l;s%3tfqF9K`*-hi+C^dx3Q$O1
zX(VdonaC~V5=n8}4;}ye|HI$LFBbB3yDiCd-2TkVblr-l|K^#{h>ewI**Ltbz>Pd_
z^$8FLL^c`gDw@ipv(^!Djv?Zw4xA;2jP+gyZn>PFvd&YIK~ye-tqjgs@uet37`^|+
z`PRA$R~d$oFm1<t)=<|CDeb6cGn(gLVbu@#c8Q6ZXbmn2qMA~sGBVsB+6_}%ab@P1
z`xzKvV%FZ$cuVCX6&YhXY#6YVTCteum94C7YNK<-e%NM=nkko!Wu$_PU-Ki|X#NAf
zej*!GuTe2ZO_5wKHu^%*a5#w2ZF;7@!vE@Z4i9R!Zzj&)eZu*AU~w2ZdeLE+6Z}gg
zcO>*zCq-|zQDSAhQc_(ZNbC>a44P4;)JUJPDG+0ei9Pfia<{?kcDN8pJZ1L6oU@0C
zH~$b!btKM#rNPUVXU`64?1J9TNp8TnjB}N^;>W<*!)Ltw<rld2`kWvBiDS3iF*}%I
zYfCl}m!o7v=AOmNj%)@oR59XoEa>UDZIAh|dv<}*NRm<rINcZ{**euqFg|2fT??{7
z+=^``XK^Dm8E}xJ(AIJaTCiD(bI?O3tFE9Jdt}9?bFx~pt3gqrm1J<PqVGGZx)z5z
zr5|N=sLN3c9hWe267Fe2A!^fSi_x7jnA6@AMh%zbeJcVQ0)#{yv{;V_y|<VrrBYN#
z^O;i~X}&0}ktkWC#Dq(7h{!e1<Pi5LZRz7ncQNjLSzU+~VNsXotnnf#M~rvm##7B}
zuG~1}@!cn^&bREl3y=jT^f0GQ?_$>T=l{2_bM><?61R`AXOBqBNZi!8dX8J%Af9|0
zv%7~)1Ksw3v-2l}^GBS%|A1L@&QJZ!9n7SWhHBQ328V4Xn6@P?m*g;Du3U%7l$y*J
z{@Sl_?e<N6_aFXCHrtjwL~OoLSmcOT_dlDBqRKi(Fne>WkNQ=NW-hLEqg3N!EF`z4
zn1{)JgubV2i!M@|Y1M6oK9Ex2{B(u0l}x6c5pxv1KgAG8nzb~Vc1lAoa~&H}sn7O%
zktvY~`WgN-HFaD^LjU~`LKH30B<flax~|_TQlUk<k!RUt(LcMU;!~e_gB%C$eBzU2
z;|QUZ;atvyL7Xjj>#gJ(2GL%l2yWKTmx_)}&Fqpk4<pq%$=)lol_<}8ZJGKBlX*kv
zBi*KBw_1yC)jR5GO;uMH=&XB093#1hF=zU!rZpbcXT;44dw6AR;*CXVWuoQff|jdo
zw|HRKZQy7@p^;QoC3}qCm%SG0!axAXP0ixPJ8bXW<=Vj^Uz$w$#y8*O-szg`y|l+V
zfo%>CdHr*r;4@$OGSm5-oC2HEB~R~tfN_b#%O|A1gFKKf9dZ3e<oxWM#}A(AP>%u|
z3fiQ<jI-2>8PC6ThnHUaB(9p^yeGwurw{M)_}&lbH)r^&fyPi=DXp)_IdX6?XL<gF
z#mNl{lct~vWgUlMpxdm4*|F#GYSSl$w`Qg&;cbtMB~yHy^^7RkgDI}RWnd>-(yFQ1
z?OI}rxXO|Y(wG^jo5>!r*T&b5aUNPwW(&rW=w)Bmf#pSQ?MJ@Hl*3F1sUb!}+@B>W
zrLl3+Rf+d@9N=1IY*;6m;wkDtHbr4S;HXo=dfBT&NEjOxeJ`;anXcUdVye{lEtOY?
z)tuB!NCs-spn06N>Qg30dxHr=V~IxH>dl8VqB3NVXuEnUZuNQFLNMw|E7u@tChR!8
z%93$fR>FtKyJlbSjs6p69}`oCmRa}iY|JRz0nwrx*wcdx(rg3~^<GeC`Q`CS0WRiI
z)(E8+V}-@%oFJ}fOq=seCsQUf&vZWH&g-vXs|l%nM890Ie7dCT!st?-buEmm)aV^A
zgY^qTS!={3Yj*v4e&?I-(uYi9Ae*Qtb|NG7M>bK0)+vK?thyb4<v;!<Uj6hZ2{GUr
z0S=b;9#XF=o_qBU$!31{pZ`9gPdG2xxj2LKPMN3$F&3W}k+vz)S!zM`8cFPjx7i^;
zMV0}s3?Y)rG?H?pqSlh)Ani+5axQENy@xrCvVy)}lcU2J%X>d~j~mZj7B}hXl*LOQ
zXY+%vQ};bNMPWuJgN;D|kqka2$Q{>ajx?8namjBTO&rDThSAgujle6E?+X*ReHsMg
z9#vfM<sO<nTTf7u3uDwDuh&mxgIbYsMp8JuiFhsAlOA5qI*5+kRXq7OXWahsW%x6n
z;Ml)~e;5hRwp@96!t`ejh~|J~Zjeo-^j<A2EIJ*DLa)Lz28~k+RP7*gR()I$+Gy;F
z{T4H9vE2$%N9GZ;ThMieZ~oSRt8S3I&~6=!1z>CEWD6_f2}Vd7SWj*&E;*jw9dNTX
z)p5o37Z-f+&Op2DXpU-}lV$Fb!!{e{m#ztVO2(Ts7QZ8Us;;S`MMQO&Ws^@jwUT0S
zRDCIJMkeB3X|jrh7|;q&P|aK>URHkFdPPhV$><c)7_xO`&rx^93sdVNz(;kcibyqe
z%@78>RdX7hE*on{XJEN4hUDz1+enHqC#9K@itAq1VS{ccMoc`4&MGBwT%<?zb5W+<
zVp4?^{85_W6cVYA<ROj}tu!Qv0T+7Q5U@GW#4SF1QW(f)fY?E7gc|KdQ<ukVHl4JO
z8M%A(OnP#mVOysZ<jUixHH(u4*Ppx0cHIeuI%#8S#IwGzo1IB~?&q#?^Yb64Pb<vw
z3B#imE-o+_d_5sguHY8edD?nT7IU_DGar2an=tIyZch2sXKQS}LmmdXZqAGIT?VQe
z>nG&(ium9W_T(z5_GIUI;TL|2qgS8jH~&BXh{xZ*j}1zhF%?b^0LcM$VBL6B6d9ZO
zQD`?x24#ueP&jgo9vE3V+Nc%_-m*p5g1CM7cEy~DeP(sOBc)TCs=*jX%>8Jlv1gpi
z;K~?r5H~T6O=n6aJ5LWiTOw{QOu0v5IAgO?(7^Fy=yMXYICalwhk_uu2Jb~&FJGIK
zf+c80Oaq6DIhD&C9$ytbSr8ME(D!V%JKF6|9WDn5nOKn1k|(I_h!Hg{O`W15*`l9c
z-mleXQrttMU}lwNayX$mXoy{;-F58N8#ZAr6vIh_uN}T=Kw}aL1-LVYo{Bn0c26F%
zIJ_zj(t2%H>lFtFCpsT0^y^5!+cI}G>+MFgSFX~g%aG#$#*g`*`%nR4<Os^)aI-_2
zYXhe1Ij$Q%*EnY1dxv-4f5L#pPv(653!mYOzx0b#^QlN3;y?@o)58U~4_+swfzYnm
z^;^2GCk+Eb7;whYG&Nlx_JcNO@Wnu<skw3I1@3(8brwgL$Vn*Y?Rv@Sll%1DilN;R
zvxI?R-G#o>fJczrr_YF8hifLf73~LLDF$|{6)}jDbD;y1Ra%>i$3TG}%38G=iD5_T
z9~ai?_%M~cr<`e;nr1#_vuo+vo@sN4&rx!ftzkNyjYgU3ULn0q5;lVvx>*v5sHT6h
zn1@|NJ3iu98DJzmm!h64q$JXi7?j$YMjg0u>R@iha30mzS;n169ZTh&SYnFwaiB`7
zo6gmU>}3ptQn;NpB!oI%Tzg{(;~;M6#9cRgugG7dA39+U<wUBKI!<AHC>oj*!w~3(
z{z9v{A3Ueh$i{suI^JyEus);@rwkX$vQcJ@t`lR6$(|M!I#jf(vo@M-TFk_(4zawt
zvwG*KoyTdD>WcZMv13DPouF*qTcI&qi*p|DWKd;YMKh^sYB93B_3G=S*uiei_H@ni
zbR+2iW%Il68ktiUd=6{|lO@R4DgBt)A@AJ1FLd_Qj!ZQ2pjDe>i3=s%k+S2(=T7*G
zf8#GPB=9a`sHmn>uH3%G;~&1u_rCc(mS-E9=@Gl#DKU5C=%7aOu2sGw$qOnIKy{d9
z-RQMXK|D{;TF)6vRv!srm8dU?N#a)mkOc(6t9fPGGnGcrF_j}&#uv77WMk<==JCU4
zJb3pp&%JtE;=tw$W-q)#_x5-3#*!}7Q!dGwZL^q$#uP`yeoR5sYiba$j4{{Qj4+B0
zsV_X&T7T|!s2BcBCJcL)&4>Q@$FD{GiEL1(h)wh?WsZ(J=GV74Jm+^WvFhfe<VX?H
zF7oJ2%gsMolmDXzbJ8(9d&<-`%s&4r;l}fXM+w|QXiwhiR4k1Ha|)E7OVznY6{JZG
z<GQjgk#oR=9VT_yev3(o$WyAT8*+8X*Z<K#%(rxcq<nj$Q7<NAZAEfGia6weDcUD%
zJey|+q_C#Ge9YC`2RysCBK4NI3pkUhCJnWLgXb48apaxD>(toTB*sW;xM6eocVtsQ
z;%5ML%oJkrUT2!dhE2kglWr7BQ%O+`Nz6#4$%=SoHGDz&!sS8F#t`FvDyq1y7OYY8
z2n)HVAgr0nR}4d-GL|@qX@`8NKJhvg#aJy$Wh!S-s$G!H=wflapJ3|Ysg&emv7Vx&
zy$e!m#FaQfN;0jCgXlft=<qALR1*hs%-EP94Wd&V2BxgB_6nijLM`rvei%rN$Ht6t
zY9>;sxu%>{(lB6!l4erESk*8lz0WQ~T~W^)u03~&hj$;cTgvss6a+&C6c{*OBtG|_
zeuWrU*wef8PgbPi3I@;4R-}By!QqQIGh=#SIPYh?_rve-u}^)B;r&w{ou6{8zDaC5
z0KBirNS13>stZgcZ*6r>dinr!=>*eE$R=?3+!6oj|L$+{Yya2(miK@76lWkRl`N$o
z#g}pH-DOKw>YfeoQND^1ldK^l%Svk_OQEIeGo#dNqeGgaBh`(9vG|Vi&lodZ41~C5
z(gdXg8)C>x1JbX#Sh%Qb;+S1o4(mc^8)=fVnD>X|zxT<;ru#!~+6Jc62?vLBycepn
z<o4=xTniqN0$BPmu$b34Z}81j1~x;>(05{za=v79eoDXFk%w4XwDB2B$_6Og?A~|I
zjEJQ$I*TStkxVD|+c+<}&cw)CFur0oubITTXkXi&e%sM+*BH0KO=>31Y#$i_^u|Fp
z^iLm?UlQj_NZ1Vb-g}3Sef&i3v)vb9L)YRK6KTeWh&2_&o@z2754||0a$)dQ3geWN
zIU=94Iy@olw$#;xquTPR`GnTINuD0^Xa4iQ%H;S6kf;x%2JO~4($LF3WD>4!Se#sA
zo}#!T$4Ix`fv<S{@Ck89O6#?B7H-_W$?Ko~3`f^)l8iLW&RP21nkNtMGVE4ZpD<1k
zIYE83praD%o%3ur8-{-1^yx#cKKIfE_gne6?RHBViZPO+3WXIyF;~*Fr051S%BD&C
zrCRtBri9t=pHD$O6Lnp4>F|(8_aC!dt~owA5%ZmF@U>^Ln9J#5N~2;&jkpYE6PP$Q
zEQ1LQh3u`(LdqZg9#i`BQsl<0#y~o#F~uJa&3VU+NN}bY_=!=Pbyl2uof;7Z$pPP)
zR{pH=kh5w<1B=B1Kl9qul;P5VuWK?e^u72f^ns|RO`-3x(b5@<vtD(nhL9t>wxh0x
z3k_fXkkfVFb+lc_X1gV7yj5vVWiKktSk@?+3t8ABO=Mpu7#$*m4!n#RGor02R~Ow&
zv@FP_iPgcbRbP(L#n-A{SE$Oi(8<T`T=z?)*>#AV%z1~mp4vA!pJ}EOs>(wQoUVHO
z&~bQlnH<&(>lK@`CA&>4(x8+CjZ-k7?E9nHj{HORk*J*+fqHS7@BHu)n-IvkBO-3i
zIg0KyyG)9epoTy&mMX0H-~65b3O8*?!xm#IVFfuEyzRC-PM@|sdbpzPTB>QK&x<3*
zsDq4J9K4mLD~TB;#}my_mLZ~dPOh(Wlz@pc=@QdQr88qOPHS%6BL(3vdUV+woDNB4
zAR_N4r9?BC61ssD66@uTZ~w`6Ik|Ge^mvA=BV4{gwLK@?e-G0~OeN7=IU$+9n#pb|
zR12+-oko?}BBe<ej&-A&*c8RzWn6LNj-@A_j*UiNk+PO`ZH@W8nIAzJ^B?^66WO3n
zlEGt{$BxTKnd3WW9N%hK{lNj>{Z7x?S6JimsUof-XWxCy$uIs1;hS%gwgd6XEyCG^
z?KgVr`Aw>Nf~&neB-V^nQxi*JZFFo?N@Jx_WN`^b9qn=okOs-j8aknMnl<+Ll;+Bu
zxBg+K3%5v>q5~$A5wyuzsB*&DjH@R25LB#hNCnODhL9WDRZDdcsC~!DRXBf`rGN_q
zq>N9I`4vy>JB*p&yumqbaB{kE$mg;6D6^h2$2LZb;o_>IR{x``97vvKL68#^JKUV2
ze71rtDy27T!<cnSU797dK}-d$WN#W$G)U#Dqi%M_RKyghjVFc@Vs1ths=Nvcmi04P
z)OeuKl*SyYGKk3g_eA+s?r&0kMbg4cro=f#b*7YTyF3ULm{9(`m%=VaVvvKzg`TDf
z^t*x8S%<$g8KZErM%g59v8ww^2%TCo#`YAZRA{a$&d(Ng%Tv>^X~XPf!Hwr{^4^;t
zK(sO?Pck$p{!%Zzd_>js^vidNXKSpPki6uN`N;v3=RQetC)j$*EHC)spS;8IEb^(Z
zzD9Vo<?e5<IXJB7&X>4hhMP<UWjB?$HJgmMm(>`1g4?W!Pfy|C05eI%9Pl`P?$3US
zfADwz1(t+Q1K5ny3GCR6=>eDxI0xQ-^f@zGbxFmnMt(l~C&cW{T}oqG=!r%t(<ADW
z_EXs$<Xhw<23fn*;=PjtlVLQRF*c9!ImViCeN$qZ)ZulEwkdR?@_rbX@rA=vpWM-)
zXiuwj&N7=fLIsRvFs41>h#cr##AK%PiNq4wKnN>*n2}?m+iut_SFBG@>6ROz{KhQ$
zd{>-`xiAbeqYie@nkaPG@=rT{ZljC}SrR%hbdrr~QgMg1<i=6WE2`ruHYWOZV7J?H
zc7BHOmU=eDTF0!mn94DnJygmmq%>f2=3oEzzvR}<mzf_P%KJ*0kOC=m7*mR+0I?@A
z;C)TISu?9@xt|xDkSS(G<x^WjHZ|4UK-J==SBSpm%YXULapl!d&=2BE08%*TVIU42
zF$QAS${<W9`+{ajgYG9Ys@a0Y)oVQW;w!xWy|-C!)=U;No`3yg+<E<DRFi{|6=RKO
zvsv@x!TZE+O$r^}LNlppH@h(=C`TiCx=zlI<@pk4D%Pu0PP!YorcuPx5JJz;wXz?L
zo@V>PO(J9VoL?nf?$4DEt=PR$m{Cr<)q>D!UZnSqn>VlX=3DRb@ZNo%yM3FkYYDN%
zd8jK#%3;jzEI-Gnv7Qs{%U&u_w2dhelOX>;3L~~+YL{#zNR+8IED3AGv?peHcEmWS
z40wu$ThAuT-qp6CGwOogw>{oFIjif2loH#`j<d6Kd|gphwKS)FAjib<$uY)xhM{M(
z+_K%QS+7>KyPcw`601~I)rCQVC4@lNNyf5hNrUXsQHHxA41_+g-K=T1J7SQpwN?;b
zj4Hp&UZ5LTVeb}Cio*OdW(=BA>BoMDD=~obUgb{;_0$$dfqIsh63e7brx}01evdNx
zeacyBF{)8?Ok71>dFop9rqNoqyN>0(2RwNAl)FzK@qhe3|DT94VEY}r({t8m=Y*lx
zCapw_6;rW1BGx%&sX>)_VgWB8CiUeT{NZoBNsfsju1EcT%KdnT<P?cEVv~dp{`tT3
zi`@C-%XG_UH1kTH*DOqw?QY4Vr)NBUcFqt7O+zSjTtm}L!9W}m-L?bUiS9p(OR+6_
z6GeV)0UNRog2rL5_)NHzNMk-+)+n_LR#0P|DwSVM;_WgFJt-!<bxbBxNim25uyk$5
zYPp7#`PRSvCZGF-&k6IWno!+-jrQpinz$qNS>t=eaT%kHYTmPF#<Nq08L3FCl=WkZ
z8?O_l+sNX&z)nJI%@V`J$fi;5!&qyU!O4$cllc$)`iX2%YYVlpENTzymWi{}Co`ro
zGIKjx1J+EL))lkLlg>A!Km51szP`e`6AnIplXUq8E+1gx1XESyTBbN+3+lvL9E(*h
zylirXIAMg_Z_OA)KT=F`#0))jJ>(XqPguM@<-Ok=IP34=4=PL)?L^KQqGUOF0}~?i
zd5@h1x=kdUXCiZ2GZ5VX$*~(UtEXG8ymA%eTB@pMaiyYtI*3*!cGO2b&EWyvG7F2z
zC$S2*aohlGmI=Hu#rVKrO&X&UjLD)GD-Wt#btiqiB(gCJMK+11V^4urH{Ptxd--iM
zkh2p?FCxA#RNQRM{-H+s`WRw)*e}pW5)zS9A}Vr<#6h$c))n+szmq9Q5jvoiLw?-+
zRPR!bw^6G!Y6tdoCqZiTV{3r8G?Wly5F^??zz_so4FRG?yhJcs_?i@$<equsIDfok
z-B%oR0p}yRZ;6$cNqBUs`_m#mON0jn-OKxXC#Sd+^42?<D|j!#(u;<xFI?rxy{BxK
zTU^eg@s>@Q#=)IW-66J5NRM~qt;geKI+z^U9+UkghQ1=_mh)vxReiuGzVb<SPnUF?
zB`<vG6$m>vkDn6q8RqgOs)Gi1aDbWALRogIL$Quz=j8K&`1Aqp_y{+h(4RgdQ*(6f
zDqag63bIc@6|&NM9Vtt6&-vNYk9?0AJveklQYf^ldntvW`~@|2Dvjm1y7q^y7;0r~
zWypgptdJtHi^O2mxW<k}sN}Y3Q$9vqNi(J_A=O=#xR476WEngaCc}PiJJRk=(b76Y
z-Bc{54b!I4!e1jxELfw4I!N<ooW*3zcC#dfz;?C7z5fHAd;N+u0%0JuTl(#qVb{v%
zCS&nq-;q>5RM4TMljLHAmX(?}YItD0)_YxIxhy(Buo;t_nBR~kcUVoPh~Vpr!@~&)
zVLJ5Njv@4{vmsc+_Tgh}>PfD_lK9uZ{oDM(@BAJ=^|_zrQ(yY&v4Du(4x5z{tp+A8
zCx+1DtYdZhm})vz(>|wz4zK&k1tiE6?VF<`(FbT=c!hd#LfZ{s3|<*F!_X3j9#SBO
z3+}Etsu7nmTC%V=FpN&(H($C<Ool5rZ&OVt3@OnM9WHrNiu7Ga9(sn|7Bh6}F<|gj
zJyNby3P=Z+nM@~ieM>(G8)xV`9^QMOTQ9t#MRFGOG^X)hT0Y!BFtU0JDq1K=R;44d
zxOF5sHWcGNu1Af@4ot)vxO4j!l{dWq?mb@m*coTb72VKt`REvD&6urR*aEVj*b7=t
zC&-L8N!dHmOru$^KN57KoGXTG5&>c76rzkZ7H2X+p&LQd3av1Wx<WBoiVR6&W+b9W
znH1J4eHh5SHa?N%=^4&DX0s{A7-9(YeJ8Os-V=t%db8sE^o-qhE5?Fq9+h)uNK*Gm
z?c?Z&ffzDv*V48veYYb-k-wxe&`r{~Dm*o6U<+bejQcJ=)@686*i0i4+*pl<F(duf
zSYJ*GrMB;f?pd3r^0LlJV{64^5hFu6%U&2^tWg%58P}o5Q9H-PS4^9ly7n07+4Vh7
zpFQQl<1-#VddzaYAzRDwtl`y9ejH*;TA#C9F4?TNgcvTEVU<z1Bn8OuUxsZ3Nww<f
z;c{ehaEbSx1)i*;?r~zwn2IFJ>cHF}_lOBM4rl!NzwwvpcWY923X3c1HPVr~fz9J3
z=TA41N>ErXxi|)UF-*!p9;hY`vL%K{ziWlIYlPzLe5EeV>M4*T8PQgoQWFAPpmwJu
zy6dR=V^Y>=KQWyYy1lUGCbRj-ehKQ~VGLAF!*1OZqoDb3|KKjy@7&<d8?S(kL@MUD
zUt#;*uj7+Yipv0_&~B45`s7G2zPxEaT+0O^mO)%$1eyzHSs6y|!5^dNpXSdN{rq?q
zl<V=Qkd+^k*H31H>Jqa$F`Ks>-?B7U9Nd}WU$eOHx6I81V**v}nIBAX>yF(&@wE32
z$hYTAUzxG#27GO>wZ+y+jAfkH5c5p-Mx4itXd6d|&7!$1(@QKS2SNJMK#nao_T<nL
zoBPaOGi*NSS#Lkd{Q2wnZp#n`>|ijx!KcjJbsQ{uX7h9ElNH$|cK4@PbIdGGI8BkR
zZmGHnwVBXATj4+6<6OpDLo@NruX%Rc4av4#`9vn!mN?8u9fKi@&y!9DCGRwAK_boP
zh$db@H6?07Psd#BEJ{I~ND^`?qH}A<2Tk^;R03i_n@*t=Y4L6fr7H`yLVrd!(f|t!
zWS<0D!XB`OlqKRNTG2x#Eof6QrWp^F(qxu{ozaBCP(tD5_hU-qM~zA1aMGZ9x0tl(
z64azPk)oy)sH3mZsF@g(An8e{w>I_UI7s0UTimdwYG%Ci?o-z8n1hf>eIq%kbx*FG
zXg^|A_(^1|kt(Ck;{xTxddc{*LYr|Uf<aJ^l$e|>xc1^z?tbTe@&G}bTjwpu6U(KK
zF9_Qo2tqYD`aCC?%fy3A<oYshc0hYp^WdA`C*1o!&wu%ooWK2$)w}OOip;KGCC?Ys
zw+=Y}#yf1^`#zH!GiKLs(p<fYIhm0wCy1}+O}K*vgn{AV8TG9z_{Al1c)*=ko+Ax=
zJ2I2S6ip1ckV4$4-YF)I&JI5EJ;sdhGmZaV%pS_rb^Lp3*g-k)C^?o&7mDKQoL$nP
zkHM+|&(H_@Az(woJ2y6%^1aJ6S~N%2g7sRsdW-YmD~IzE!b64-lhCn3$(duMv6o40
zHsV_9Ys=Bmf`j=CUpvvBmT9=jdsC1R#RQAPWJ5KX(5FP`I^O=1KjPBmmzZ3>!s1{-
zw_ag@sMK~DC}c2|qF-h-0Vz>8B-I;HiqtY4DuXUYE$Dt>lz%mQc8Jmf3yNJP^Ah3I
ziwm&J*v2r46%cu}+2Z>R^B=s)I%odD-}{IB=0E#=T+{G_x8LQ{U-_a89&Kjmwm2jb
ztJ3DcBGC=rI98{p%w`k(;W5>0A`OJi8iQf?`;XCBkRehZ-Jr2GF(h`o7VD${ANt4;
zgHUDZ$+07;=FeI&ZV5wA4g;|t==zpn7^E?<7FRb+ria|P^AfhIh#}yeCxpnevlS2S
zKVY|6aqa3Qy4{A_s_|BKRIbJviDa-@w2#gi=CcL&@84BV0f@uEc6G+HCl9%F?K(Lp
z`fjKDV;E7WJTg_1N<7MtQwM;iI9FeI1~X%JCb?*n_k$q;o`juo<K|VKf9@(jeD@xI
z{5yY0+XdF!C13pFr%6Pd-7gx4)%Xr+LK2AMyEXWvw2YJ?ODtP)7yqbVS&e~&+N?uc
z?S$*9q7RWF=ylUV)+$@07?&B!x-T_v9{(QRh>2cI#4=gc&oWewe%I1?%VavkRSj*|
zar*33vSr(jz8mPfRt*njU}m*BkSMrB-}P7=Z6}$hLx>FhK->0Xq*T%3XVu!eqP;B*
zy0V7$IoPOHR*I0Q@SxLSwL}Vf(T8dVaM4;@W7LmAB@xO%vUWe9Fi2FP)n8*ztH`KT
zimB1P${S1NtRSXMO;gnvW7rNo_fDVj<m`+mXJ;(Wcf^pfZi;miW)sgRKKp6rM<<xH
zqFtV`UalE-z1ARlPs%fr@<@p{DU(CSgp7#+V#H<fnDCcx@SDH>O@=g#&b=W943TUC
z#eqC!iK<Is&0qVQe~ZPn1D<~G+bk|0$@TAd<QzDC`jkhH&w2K2D*!+_fAU^OnXGz?
zSVxEw6=N$)T}>If9lox?$Y8u3I${_IF~~6F{G1pC#7Tb&Z8T}`po~G{kG#XOr(W+_
zY{F*P?-4qf*R#ph6H+krCbHW^zW?p_xbeaZOq>JjahI<XT?O45YrK3Lt3Th#pd&k3
zcQ)%9N-D)u34=u)=P$Aj6&W_D3p>SGTGu@#JAFTBEeBc|vX&^CAGg;}WP>_014qXb
zZoFcsKh=?69*Bo)%uAP8T$&Qk4aU`YmpQoXa65>1J#B7SeEMVbZcgkg)oe%;maU)z
zSqqBsu^1<=`+|8iy(k)qEII;{2Mht?j$A)u@iIiW<H7G6PS19<`JB+MiK!)ZKypk{
z#2=k;?Z%3RNAP%{ZI4+!F|5`t=V?tloHN)hA#5?#g5?Je%#!Kt2KP=Nh0JWy;HDd@
zOC6U!;aS}aghBd4>m>?Bi~*8v*19=l-O95DGV9b_sC))eP|A=K3S5~Q<tWX$5r{(Q
z9D@`RMi6MB^Oi!?B5g)cqo3l?d!Y$6x*3cOsk!hg$=}P$Iwlk2ATFqRB1PQTRG2Ze
z8_<SZ8oZS9Xn~)S&?rMv6A%4&QW86U3^`<R?H!~+HQ|C|Z!vsHO6Lth>AqQ9;8M^{
zI6;<-vK$jtY-y^2?b(|9PY3Km;&@URNbQbXS7h(WwI@Yeya+H!i#ibWR1GsIMLC7)
z3m7@f5+3I>)iiVC_D!Ds@DW{T$*R|xn#lFJXIl44Gpfm4VjZqhJ@*FHe1UByY|a8{
zIc5Fk*LnF9x2Sd<t4B|`^DAG)JnOmp2VbK(STVhRi{nqe#`0@#vHs>=;=?WWGoE_l
z$kkK~WlDQJ1Y9+t+wNGs`7Vo>Z*q9^fcl+Gq!Kq@ty64{M3T_Jv*ZG!C{|T{^xI(3
zB#qbJ7-0!$Q7fA6V#ZO5UHQx2T3qFnqG|}kfU}~t&NktE)&|=^GNew5lc)u}wL+;)
zS>-_*Z%|e8t7$6g%HnIoWKvPp6_u|r$<Pmx?XF|j_O$)L&_`m-qVvvU1S;0SY}Rme
zctBk{VhV!FmWJE75ygOlK<I@@24iX-JbcQv#&B!C;E(>^f5+$lv%k*t@(GtNUt!qp
z*sWF!p%WHvG@|tzwRL86N-fTvWx~E^Jd{erjzxi>3tANDT2DC<6AUS_*=#sJea7Qw
zr<^@|#^ziog}V@lxg!puxhtkA&RLwFGxwS9pZ*)pS1mDFaE@-*bNcKY=d3U!hJlom
zuryK@J!lGqG!TcLD%O1e&F^z*-jMp1IIX~YOf|#TQ=Dsv4k89WKgKl^>R1Z>R<!QD
ztQ8D|5ahW|ff#qh*b@iYk7K{4?KW&TJ3>rUm1l8y$id-tCW`~=$pq^wQc7&rYjR$J
zh56!;oE&d{;~T6_pOLerKR@Tn<vG1Zli)K)CnwD2OG4k_eU0%I2bZqWbsKgoiN`2I
zz!U?|9^M55hsQ_sZ70z##o$a)x1uw?h>qo)a5z0bO3YG;lCUbt&?2(X-%B%VM00Pg
zp_w{<@fW_z<NyAD;O@IWBs<SbFW%zXwM$Y=YoS{-_k|{G%D}eds_S#-ouIfjF=lXn
z)UOzWQhJp+fmPy!>;ukbs$w>%C`~Szi{6iH2$DE7C%xXOG&x2bXN{3~A-QD2)^N6B
z7zTzAB$GQs%7Gl@>m{!;jLk!#J7&b_&1546VMe?hLW!XQ>Z&G%L>D_s{GDE0oq=oq
zX&NpJ&@MFAW3w&OF^`_xWvK3qpyJNrz3N4a)3&lr3bj0s`?DQUP}yH)kG1M!P?%)a
zij!}JrLvC3d)Z@~3S%s7Kk(@M3C~WKJUKgKv+am6OTMO=;3{dl2f+1_Fa7Kn@!pZv
zE$w>8Zn-1$gQCh>W9BTbvk8fjNl1ouFp8wgIcL*=uNHjoy;B~Y_rx5?b|9Ke%7Y}Q
z8>8r{ey*JP%*S8ktAF7a=(kH^yTVUzLXPC2qw5CVf9F1r9xg?`kQ3GlV31UQDh<|N
zPU4VUSK}-(X4<x8R#n2tt1MOB;8IOfh7CqTpo~@1qr`5r#y9mCizjThk}Ilgfszm*
zE`FnwB^RbrwikQ}4OQ*ww=GytH#i>Nf5zhv?sM~%=kODVRmJfqf11a?^BXidfOA-z
z#Nx?0j7^vl)u(8Vsz;QQb3tlwfsU;N@v(3aigg~(FL}Qf8yP)b$fNC^d|ng?`Z0U`
zWHzXN0=q!U6YS9uOk2`sAnhP^nW~lvsCO;Z<t_Q{mSNlDt~AtF4_I|Z9A%ROtHu@9
zqJ$1`1+&*<^nz|hmTII}iIsxNqzI`ehaLD8)r~XE70>b?Z#cVug>AcH-R`i-Voa}e
zB8!VH(LTd2w+zbx8&3$~n9a(w?KA7NqaPA6*K&~UHavPW!g7tj+%ma*KzQ2H+05*y
z<?0s<_An6NKO&`Ckjx-%usLO%%~-2hjU$>0#uja9!r<lW0GdE$zf#;6tXbW774gW%
z>3J}aO2%m_`F~oQnDPi0Pxp-E^U0*NP<k=@j5C?wm0Y@VRBO=p`21Q@><Ou-H{d1{
zLXd;H=nAY8M`oFRA*5v0Amt=>1QOgGh1Q$1I`hg#ISitmBV>l{j($4T>{HEci-|nw
z*M}%hyd^6&4MOitn(vi_f@72D+p4sonL0jrxMUe8%mzz{fw{Mk22#5vIZLd)3=5oS
zMoH47o2WT(T4awCMlGVOwK4?AiDbq2%hjIg$&BmIU+2B=y-SXfqXyns%(&_X(%FhU
zt4PBMn{-0+;%m&V+yrW3N^IK|kADBxxc$b9oV@lD{pmBF`}`}I7>NtdtuKFyM}PEn
zPQU#Yi|1}IfB6cb-SG5-XP6rgxprfU2{obiVnH8+uoNaWE?G`LSTQhg{L*bM9XZaQ
zBrsk~L&(J+2a_bv)r=w8>J<19zr4mP7-LkQ1~@Y|tHq4Os$ITS2t%4zyDx&AL=yC7
z7zaGrfVY-l5+=&QA9V0xbeft%#ySbNA9Zsnf@yGG5TS}nXywrmkr4F08)&<Mt{)hB
z8BFCu`Y`1nl)|H`Dn{*93A@+NW~@?`RD0!}x{p|k37MLTZ~m))%NIZO24@deEbndj
zXW#k1@!FsHJg@!g&vE_s4f@@N#}6J7qL{xB1)Y-YM~%cuX^fzWsnDm`BMKK^#oS3v
zMTmywuIFoC`v%{A>-#)?^mwF83*WQ`BCc{&UMaU&$Q3cxfG{pn&OCLIoC6lmq^@z+
zQoD*6g`ygAB=$WfXSo(6^>PZ33?UBm!SL|mId|WFkHxHFx|k8G3DMQ`)s&OlFL847
zHkGT1WV*g*=sL(j4UWXXCTJ$4tSvRE>2|~+P}NgTA3dU8OgOxDgG)CjSl19!AcR1S
zf%RrZUZ0NzP)d^P8gpX3I^*#0su&`*9VtY@#xtATz&T5e#q^1GwFcujJh{fn$rX}G
zte4MNpPyl@V@!6+i5vsTz~lSxar(HX@)cEG3nQQaVL9#dZp+Z}LVnVSA1FFFHYjHQ
z%#G$j<5|4l?=YD=FW=(t{GI<bUEA{X?2I8srQWNtT4_?t^Q$ObNuspLkgcV%(x|#5
z^vR^Y2Osq-DT<bpM85>1?$wC-Oy#{a23giF<?Pb6G5SNR)Wj8osY0!`NF!k#&N{~Y
z$Ew16PZ$E5^^Ugd@Xq3WMO8OsyN@5ULr<_~M0=Aq#5orCQu%j`5gKrgqET%;Aq?__
z=|Gvn{M7Xz`zt!w)6?JWY1eSU4YdrGmF8<fh9LUqr2!vvG>tNx)V$7QLBFgOGfY=<
zO^xU?4H<78mGw+2$Fymvs|sTrZQt|w;WM6|FF9SEv26n+kF_;0$5}PELiTd&jm)gu
z4WIb*r^vA<w_Cbx%Wm6h6p&Fmx2^$gYNUpchKwYs047PzX$=gASNM(J_%;DUj=ea~
zIzdjQCY6lWU?SdnyiNS||MGu@tquKyhfI%8gaH&fQqDYke8%~5%VxbJ6J$uC)Z5}^
zVAaGg=>t?`RLLZqu9X-Pwkm)GqwFXfLr~PyXfAZd(sl!flc@~ZV^sI&{Xo=|TFpw6
zKE~BpE_f)YMp@0}o%bYk5X``C*Yoyw-saj1*YK5rsv@6Ug832M(=(AnDO<?a5o04R
zTC(-z?B%_<GKecK*=96-qX0BYyUW?&%%160npNdmkHAZj7ZhVG<rjYB`^*17ynZ4Z
z)FE4Z@SJ`>uv&T!UpZtlo3Q(q!}<!J9maUvQH#61rhj)wbT!orS4mX`slhrCeVZ61
zk|u!h(wvGJhZ{FX9oLHWJ%yvM6dRMU4CL66Lx;6J96zIep@DbaC+;lG%#xq<OaVWe
z(1*;>i|+KuExC0oay;Mh@UF*Jo|sz#Q`#t6r)ZUKfx(%Y)x*g4%?ZtmEtg&j<bFfF
zSaEdYDV8;9eT&#m#huIuRU~Rbm8udelLgJVFtQraH=Pw3E667)jYw<B!;`NX$63+<
zCTS`p_cW7?HnPa|Bd9~N6$F+sNZY8sMkdM0h!#WWTe49_y$1;(M%umuJJ98bZ-h=V
z=AaqLB*O`HBNzQrF&5H;uGsqL3^4@a5D768`hhqM#6dQie$%maD=I6<WlD;chIm11
zniMtF+*KGSm}2!S6Xq%oG`T0nnupH@+H^$fTYT-AO#*G#GV~7W7TZ@O=P)j4ai+sY
z<Hqf*Xn_lI><}6aUh@S_hU`gICQlsKUb(@8#}7Ff5<m4q!_9Xd!p%Fxw>nNYE7J9=
z)Yq@m99$y|8SCX>m{o!4QN^8~{UWE|`vwOmC%9@V^KqBSM^mnT@e4fq!*6i<tv5Ni
zahb`BmvM8;_T6{cJ%2)Tc!KE%awYGXZXj)Tu-lQg@Zj60_@hH!{KAWT|F_<zO<;x(
zPbiCLQ8G!^gxRwIKgyR-YA+PXPA9##MDFQ<s&B}SzVr$*#yA#>Mk%^@)!gW0-x)}i
z!AGIZ;>0bHoD~Hru}K+XU@#D3rrQY>%KII@a(E{Lofs1_X9hJODaIS6dA^t>X%y88
zF;FEZpF`(Jg+}SD7aFk_>Mv?4k}AW=$tAx0)vxf)U;B0X_a4D+z?h0}{r%tI`+xk0
zeBrPEFSvI5I@|Sz)%jAMfhefJ9xYRLLDJ_v74%g0qmpw+q5CGu+_TDr7y|s=|K`8r
z>2iZh&=^N;4UT$0b<bd;JPwUF7(0~#otmU|A<^d^;|(bUl8N-~KyAQQaP!)nKK68d
z4?2W$9&(mq&N(ycr-q!!T<6K%$J`G)j&D6joXxoYvCr|`>#tK!W(-mCK^-VHcS{^v
zL6d@-Wa<4$LnI6XIVe-0Y1nmJj-J0x42eEuvgsiP)eUEy<~B#7<YJ}(q_Mz{eUAxJ
zY~>J0Lr<SOw%ZNOtRY#;wZrQi92_z^n(_4U6W;vpJ!Z2hmoJ}aBalZ5+o&5J#Ep6w
z0&Pkh930_hbEUhecD!U(N`*!VvCrCwYAq^fOD@C4y?S+Q5sli6XmewX!FmTVGC!U$
z37%vJma7(Lge_o{3X$CivMH57Da^)>hI6rGW9CTAid6mx>c<qLJO}!`+vouKJ!cJ-
zvzS_%=?-D(;PTjH7bZa>3c5N(MfN}3#LIxmkh3RQ^_kECd*8R1PQFpyG)!|A^xesT
z$au}G)oYNIiDB4BzZ4`~8lOZ^>Z;_g`h86?QX?f()8NO3vrzJLHY6Lz!E~Y7It$tA
z0J#{W8QDLKv&0l+y~I5eLw1fLXEKWFN1^PRjL#WIB6%4~T{^yuQNyQgyXE22b574!
zoUL|rL-Bra5}#HOwPF-$r8&=5wBXJe?%cY`wVT(;u_bIbw3{vMt|RI&QsPyj4&t&j
zUYdhz)bk>RhzDGXR43Q?y*KZ(YBSjflIzJyWG*<N`lcuY8I06?zx0bg#VeowIJw_3
zY&XoW-o`{>Jq&HnlPAwuFMHZ95_5@JQQrq!@}Cnqs?njE`ece9N4c+Zu9fY&FzUq%
zfxXwqNNEmuSG-8}{tja<wl;X9Bd%2=JS-UTQZee{owPp2W_(?XW<F>7E^zn$GakHm
zpF3~7B%i}JT>Ijex%V&rIWB58uu+Dg%|sh9E@DFv7Ez%U+TtK@Nk&ppQ5W>A`;agQ
z)PY`+Y#F9#t(`99DrYpA!Hk2RA3yx`C$vGGTs|ZRL&`nfBhT{LoUXFOAWpoN#I!Ng
z*9Y>|Q_OX!@`TBq>*UFdni<(NSRAP{*v61TggQ$IvrnZHkRPa;$q|7V7n9I>#l5%1
z#5MQ_RM#i4d&Y3D<?!WW9^QS5X)ZB!1Ac0WyU1XmHi6r(Z@BvN9pUYkO}dItjyMg3
z?CE!&93^HZ+l+S}Lrogs@i%t7@J3*A^(oK08F?D1ZuaaRM27dLm~bqDMQ0SXRO|U6
z$l>nxA5W+|83?0jJnJDOW+MoF0~9?q`-WA`C$e(&<-jb4F(^&bsXo*|_9F#VTxT;`
z8`0trY-D&y&OuOAbuBHy$~i!{UNJeC5WAMJ+tCeRk{61Uv6$MIG3$j!!;xYjlF31|
zq%mefC(UULK{8B-AdZDmerDVaFwI1}pLnJy$Y~A<f|yi{zfu%5R}UJO_Q<S{Tbc$Q
zKi;vmbG&P?KC|5peDbq%?*8D6HaKEu$yH7A9$Pt*N$NZtC95}>J#`s5II~iq%7$v3
z!x2f&2*tATOfN6E^RXA`KKOtajvAWd_n>-2@86(4{uuY){D9|PImFM8W!k&h;+<jj
z)?2*sSN;OSa?7HrumQ;t^lo%h<j8U3mw%e>?HBpszy3q|^<54Q7c`e|aQ3aYIrdYk
z=dOXPp&N*&TiPc}x+fb}n*r-5y#H_C;l)p1=f$tw<nGs&ocB>s1(Qb_Sk6|5CE5V~
z2=*A~3mR(nlt9C%50oeoeJ0fLdMv>8Whl9HivDIpvSRF!kvv{wEzVZNLC}pshaxE%
zJ^X|U79_o#v@1n`OzOvpvn{6fr~{)J^PDmJn4Cg=FQ`P$LTC3@P)B3Idzq3~Ud+#|
z74r}b_-4ZEU-&8B_}o`mzW;!4`QOF`L&`NVCTy^T@2&Ws{xAOzUi;-Qa{H@4&G!6^
zOr~k10m{}8!=TTZXt)atpg3#WBozEIkuX~E7z|MhmDPH~lhad%Wck9a7x>xFeV$o;
z#CC0%pG<KJ%k;qECKbN6)Q!PUYNnN=G9D82+r;VV8Nc<f{xu)md%)4*lrMbgi#&Jr
zn8oD-Zhh)gblWw3jM$+QL@KI#ZVY&5rGX5PZKgUfH1nGEIrMJA?N5K6=RWpXaE^ZH
zbrK&Kx*gs2oHX=Ab$$n<u`f<OyB1Bhwj=}HuI0+D8#J>+tdj^Yc_5+`&cy)EsE<N|
zUAN|J`IHz2j1x1bc35(9`4aaxTdXy7ea~vOq@G-3y<D<BKj--5fE%}O(VcI&d~%4d
zJt=nN)Z=|ai0aU*(0nRrc$PhKxqOO2GD?#b?}8$SaFi@c8ElR^!kn>IWgqMf$&3yi
zjmbjGw02zg<?j`fGV8=7%X!OmA~7l{dYw&{?CmlHVNa4uGN83ageXywo?^81@1gl0
z#j7Ba8Bq34*-CBBvQMN;1S;b|hi2KR=q0L5B`ccf)%XmJW|O`NSN1X8GUG6a_k^(!
z#lbsALo|t4NBXXpI2s+`hAcE)q0?tVlm@1-k%~4j4q|8%^bE}EQfn{lJ2^R>PZ)%t
zwKpKjT3^KEWVo&7Ib|Qt#^FkiyP6}7eiMaOnG$A5m=v*Qz+1~~S}|)pmABNB8aqU0
zDf97LSNY6WzrdBxeui&->uvsD|Mh>*#%8*hSa*>wM4g6KI47oJY4q<%`yq!MN~9>Y
zRdSLM|I=UjBK4#pbkFFwTe|Ir(D!1tn1kF${fuCQik?J9L<B;>CMoi*uh`ldfArn&
zlbj(8tr{a{U?6L5uQ4Kpan@3qfnWV={|VSc+N`O3g|9188pt8CS#CHxU9xLCqB1N*
z8?5!7wIlNYMVoUrlGUr|p$uYGhJy|oE}Hu3dM@X;{7J$Xr~7?Lml)M!kkqrFG^tk8
z7mAZJr3p`Cty)}mtEOVRX))dsh-`N~@4WRcH=euB<j7%sA|G95^86LT-KQAu<uk;H
zi3Q1w7?-goVJe4lM!>76+GyiOcj#hLnPXOiQ9H6kgt{+{Y>7g$Wl&r$QDQ%o{4qpP
ze<B;yH~y2$Jo;CUd3vwKx|)=xw7ZsU4YqbPmB;%n+&N?RvlVUZ2ww}-FF=3g8Ll&g
zZm#HHjp<|qNU>&g_NvPoiX%dpRj8PCRhcyCp&Q9DVC)taJD6IAZ@&jOuVc<T;>Oa@
zaqHEF=pNF&HKn6r+GMVNuExK*Wb++QH&pZ%8q;87HEtLgnT(s9zz&1~9>3$c(;D1K
zN1k+0w?tZUs_5<+s<|b(QaF3D`A$G@NhS!YgXG;=bq|&4c2vEjpvB?@=rQUrh*EoV
z;S8cQw=5I!6qS;rPI4J{LC5PzCXln=>rxB-N7366oi6!?r9stKQe9~1dn_}$KG1J>
zben|Do(Q8LJv&InPTW{b_9TMP`chVu6mlO`n;K+Lkz_JxttC61?1hZ=8iF2z8YE>H
z1W7GTtMHj3;1oSo>R=M;Z)ICzcHBD~=vYuWK{(&PANlgM;>r!rgZH+CR<g^iugI0?
zUkH)x<vM3I9mvkAkyQ!5M%^h3eNJF8>t(QzW^nW6=Xkv9u$#x2H-3uVEwG2j47-8n
ze)eOyx}sg5ldZ!#L%Z8iT{&TRcFxl`-{k6|Axw67z)a`l%1MDpBAP(`>Itvk_!X9a
z@&Vuf=f6+8dynTncbn<MCseZu=GrAl5T0(>z5AH$*+!7q!Qt>ceD4W&Uc1UmZ#>60
z{>|GAM)FzqG(gdx$hwF>l0ByKa@~v$IGnoY7E0P^%%LoV!agvvQvh6uwU9zG={kxC
z9WU98&yZ_0iXr0$82TW^Ct{jZG-ar{OUVq($!I>TzK253tprK8P7w>MdRldbw8r8q
zDd<bH=Uq)zX;6LTY3d4VGQN_$L+d4c{u4j*i`=^N2CMT^y3LB?8#j6PyYEvQBM}ow
zref7Pe)#L(=KVi<n_bR4Yd2i~*vow4OFzZ67jLsTJRky_^@h#zoXv8{`s|!x7wLB$
zA?lQzVp35M8>xnpjT{^;c<tp^dH;v^I6FP%x%r&M<s+IabJBq!`Y1Zjx|V$4B*?ow
zO42OZS>*C$%3t`4zrf%7`~Q^lCm(R*&J|vM{T1fZnvgdv&oai(sZyY-t<Wu1>R^o{
zXz^|VQ^V}wn3dU4U%kThmp_U16Z&B*rbeNqU7r%VmN>NP)|~c~aWDHuih_cOf0VFR
z4lZA(Ik+m1z({pf!lN+G#+uN`a$!0>!c`RyAAca`Y1XpcZMlBy8u#A+fNmIwA@KC+
zIj85RkRsDr#ic7p4BIVX=;*tes)DW!5;x;2j2QsfYlHKivaOo__41TFpD~%uls2l(
zXVSu7*RC}sN@mY=DS5xvIBi19kWqEt`uPP67fN;{ka1&kXN<vD72@Nk4R1mG#-~)?
zMz*Ed)4`$_+N@2&1o~0XH1_(nJwj^~Nh`5o)?|sHDvg{i49AQ_n`Saz-n)S~1Yv1J
zF(g781EJfR@#m&w_GUF8dqa~6DblxLjE5@Dy`mqKT+}`UWgZ!&rDvj|qyh%YhylcJ
zs$@PIz|>9*fXE~rM%c)Rri-y$9N818tc?tQbZj#?T@%(@iG0cM)3=}JORwMI(xrxG
z(J-0KsiJ|dW7zD*?B%!{=pLQIuET!wEmqGS@tMEym-w&$$Nz@k|FwV0-~GS;zj?Op
z*`<v46_d(fvKj;F`GYZHF`>0dDp|?ev=8l)FMRo@#h9({=(k(8>m8vLw|_y|veXM%
z_Hv@4rU`NgxDX}ICnn6LTm1STe4A)2DRp3i+{00LmT`DNj<e`IzxuPE=JxBak@}Wl
zyTZ*iE-Q%f+WGmK&8B6DK{I&u0ZAp1LeNs<oIXFw9@BG>Bs%0G!70O@!jdTUT)AFG
zt=ltZj5jjg3b<jpuzwVJLsY4l8BH!VO=tfA6@H%XAGR`B?-k?_|Ahy4&v|tBDYvE#
z-e$5t<jQBi!o8>e1m6L^!UZR1PDBkcg@wWpWN-Gu#*^W%NC&b(T<yyc6?<Wbo)sMy
z27)m~f6WNqhykF2Iw@ZGU_U;upU4K4OXnPaHqvbe&K|*FVB#~_KwW|J9nIx|$>+fy
z1pF5Q^(}|LzNWh63Cj)JyXWL^S@=JGf<KrOTSMIKNY3J&k=PkiHcg3{L5oI%=nhOC
zFnLQ19my}Thf~ax2ju<|-Y(d^zatsP)lVdDf3au%;GFe+&vxVR(@1>-QX4qES8=-X
zSW~eGmNrCUuo%r?GX$^+Pt9~{+3hOQ4=3bTEtn;Si0J~(Od-hE$OP6vKqw$CtIk&5
zyUj{NG#bJt&LfJ#?Q>tv7~Uuox~u~Hu`tScoaRfjn#tKALF(E>jQTRGR{37dsAOXq
z<6X)w7c)k+YWuKv8Jy*ih<z)qzz_(zhdvSGAdP1*I_-=~5f)0H=y~?R@hK)y4Peoh
zs-K>G2J6TUV%w9ZgK9>^)faX26(_zZIuX^Hrj&6?i?Ue*6H>&bmf9Jfopp3>ig&e8
zMKZLZ=B;ns=Zn8|o8_aoS??UFcciu^R+SncfJ2=QjYZ}H6*(HLn(HK`U5UAfrej+R
zrUImii3`WgPrOPue+6E9fwb*t?|z?4$0w}vgva+Da&2*dX=YR&l4<FeJN)g$!O>Ov
z2X{$+NxpT1%2pUF@g3GVObT-N&MZe?y3J3&@s~OM_PZ><{~lqpBX4%3b&q+vrn|S|
z$=Bb;T)IN40Mldhl;QC?tCJ=3dd+MSIg1XHoNB>jy(J@IA}%{M7y4*lvuUNvQK-Gv
znh}XG8k~`Gq{y=6;!3fk{*l_lNCTLZkC&YzX~C9bX}pA1mNBH@NU>7XMjUxXBCYXD
zsnF^fi*{#>9+aXvt18QMT61tTqiHI@(Cq@N<&NEMM+n79Gct4?y){6@*qUn62(8j)
zp)F+~S!^}uGe7^!+`9b=oAXm1zxzX$Pfoe{#m}+0bcOHy<9|uF+cEWqY#rS&U^atL
z>5S*e`;U3=2aoyY@BKRtuAgxIx#zfk^ZNgfz5kB&G&}SBu+Ir^_@$exZsn@(st)XK
zvYDE30%tfw&S>&_XLhwRY<XcA`o|Uw*)m|+h5_4vY&Zc13`wwT%VD$&t+cC^c4l^V
zb~&@;kQ@$Elk6O-tGdFin}7KYC-{%&yuVws`nL%g<i3sWuDbCT-gD0LJm19Tr481$
zHrU$Q;E7vLQ1<$`w&vZ}-{syr_o$Z@S&?I`O@pVH=*pl><hOt4*Z6<_2mcXE5C5+}
z{R+SH`A^Y%G-K!aTg3G>VvN*vO|A_Dq0HmFY&{`hlkwFk*ZkZ|ALS3fvfwL!^i?ij
zy-17;w6UBlPT4$n1)XVvcVwB|i{1xu4E3V5b8SU6J)zz{N4Y*^=f*8eR^WYuQIW-L
zLQ_qMZ4E(U%7Ox|ZK8Sxly}0)NFAq1I!>iBMr-S`FC{lj*~g{mQ>0?B8yeD4X(QI=
z>}+1(-uv&O5JRQ9UQ+gQmh%NMLR(b?97KoJ1=V7T>t&cMqv)5Eg<)K`h_t>I9QbkL
zDjqYuB8a0o5!ZT}<q}<Zbe72=E3tGmvN!28@DZm>wTiP>bc3Hjms|O`CKX7AKWZg?
z@H&WPUpMS@l9?;uhCTyMyI}`suAJhL5LXDM@|vhZ(x`s$S4SQ0Xpe%Z%AO&5OmX4O
zlor=nYgSp%9m8owfz1T133W?VH#Buk=$u@;syN*lCTZhk2_(%Ufx3=(?}Wx3K}Wr!
zT)N=#ZKSQ6q;>Ww0xrd*bemxMHCl;@p9*L1)r#Gtgax5>chm;5MB(%y$nY#_ZH<*b
zTWb?zL?@FapS1ploig$}pL&`Xo_U_wwlt^5_;>Cxn>IAlC2iwyjRTd@G&Qvo6gIR@
z8t~UMdT-rf@Z=?K?Ox*l`oH||aR1dioGQcj4wwABfA|%A7SFV>DHc8;l<N@v%0ECW
z85WO5J#Iep48b>q`ILH5Q!g5V>kO<qbAU8_^dWg(Ao8c=cC128zJ8wD2Ne&F7io>P
zcrAl|?WIoBnZkz_WfYmoFmb`J|7(9snyBCa$a_7JCInBlY&khOrLL0)M@lt_Y4}09
zucAgAc(p_k$^CbR^)*ek8ivPiLblS7ud2Bfy}Rovvn->o8**L9{jrJ}BQ>{OJsMX{
zZ&W(@uBCOO)M_26bk9W<$?}|b-Xg63mbOvcedi%t7uOkNLsaHzdm9X|Uc=peAU+Q!
zNzn}Gt|ko0oU8RjC(W_W#BU{}C^lt;-4T^Sb@g>j_t&aUPJ`j}&uTa6B4*C0j6a55
z=D*(Sr?Np+7bEuJn6+KaWa^nuJRU=@&{$PrOiS<SjN(a+*ERa0N9_g_8w7I`Uo0`j
z9Ic0B<qmc<Bovld7{bJ%mNg+)C~IURp_^uRLGc=5H$fJ9uTg=RYqW91hbNfqJj>UY
zoILEI`+;k}Frs&{rl|v4JB|m3fi`4tplKc`_U`xDpXO+lv(P1VrIPb*=WOWFG@Luv
zaDB%xT_{d(57~Ss<<vC+Vu98@JQ=}fMCH?j#;^2S7>`ypTGvFA^b;zBM4O07>nLj{
z<)*5T`2LAr6wYQ@-DJAUg;6mh5>j9PSVPfG&jA$;0e`j`Y8k(U^xAgclei`++mJ-X
zN?KPup{`fCfUzbur=dIRg>IY1RUx!;xCJM4*bvY3AKmo2YjjjXh{I!3z;uYOet(Fv
zal0U4*C8a=xz6yw0|AwqUmF{89IaP8s`^ll$P<bXy~Nbqelzm1U(}qxxkmkZg>MSn
za*iqrX+Y8jsFDT`qtVKvvSgGL!kM$IQY+VD2_uhym8RS%B8G6jPqYf3YvSk}Eg6f0
zIp;6$5}PHx!BASOx}vUT(xmkZum%430BuSjM;i-;BB%_MP1C`M${=iHtbgnZgKJyV
z4<1n+jZwvb`raOoUVepUz9bYidZR$xupKJ&IJh&Xd}^JwwTi{Kq)`w<XRHIA=~WDm
zQHegltH{NMKGnp~;kSwAuMldS(#gCeic541oeQsC6~J04RF-I)>R%`XRzI^?IoWp6
zM+zy}LPvQIXFAQ0bjQ*-p9RjN0%gy#xwXdm3p=c>4e_q!cz?pl(S+GFW4WwxZ7Ve1
z+>#lMF=raB*ao~exBxcG$ci3c_?6#cu(3_OT(I}vyJXh#)JI;xn4JBMH3k<qc=*HD
zx%<uU;ZBy=D6vh;*_fa-gF!*tf}bl6UVp@+*AKDEU`#;epz??<Gm1gU`nhd3Hnvz_
zTc?+ow97!~fwHO5PcK>wfnK5cSO3=U^85ec@3Zfx{PS=BkYD@!OVkg?4A<7FmkU$~
zESiQukICsN<*-Btk2NsJEzUK#Ci2vWF7wt~SNYznFZ09iz04;*`2x$TW^eD9(Z(+2
zXpMS4rYJ|W%L*SOP21Ac4a;hZ#!zGmr!A_mY@FK#9ckMI%jp>3F3{1VZP%D6p?)i&
z3&*4x?%XbQGUy;eJ{-_AE<uJ;c*(z{%s|QO)5-Qaz)C-5tfDZ6bK4hqw09r0Bg--_
zUcSI$yuj;jxMk5gvbH(IW*J3RqID%mN6{xMYTBleXsvjra_qEatI{=e&1%HC2CE{$
zXmpkd9T_hUq#b3~B(H~P5~>uE|3fll2_1zuInb-KxDZI6du$deH1I&}9Ntw}Yg03$
z&bEq}B90Pb8;C2$l27p{A;`HU5mG+}p?f@Q?u8L?=Dyn@@)!%sO7w;?*vuxap2!VU
z6t;)SE#7(Byv5`-Wsytd&?@dD?N@1^h@>oEQdXr;u|PqivMfm=Jx(Z2f_OSXp1W8r
z(b#FCO&Zj0gX~Z=x_GW+aue06IM+$CqtOzTmCWH{G78I$#YHcBYGNqlWl3f%A%>O1
z_4Q$o-~HtCTwTwozWXM#2d7kZgAX-}5HLp3Hc3L@YXXYKL>lL4CJo+2lv~i7*64SR
zP~ivk@{GJ!uytjF-}w0ReEApt=ltLQqrcBLzVjWT(I_oi|4#p_O*a@aDVtdH(x+Zx
zI2z)^9A7om(*^Z>DQihVR5H{S{jnG+rdfdKxr3|?r3)If$v^wnH*q@PgU82~sH3C=
zs3@!zFGF|b9UAzhU;Y9YpSVGA723Dh{y<_AT}=pq<)UIbnbUZWk7w(36?;U)1=rz|
zD-V=T>LD>Uj@C7D?sR#gXYOgKF%8N1D;0?<<@_2;wOmm23ZT<srbbjK4J&3wcjl7x
zs;dDlXB7WcZHmHVhKdRsP0B@G^5DTCmmVB(V>FO~qAD2P`Y^{wr<i3e##YV>Qm!TA
z)fhz57Q8SiyvNmv?yi#?c9t?vjYcJ)ASzamuk(Kht7RU}BDEf?r-ixklLw*xR5qyb
z!3id}tnK9NP9w+8;I$&N6-5>?J<!`TqMx9dquC<rXYhO&!U*l&!J2@z9=p*e+BJe}
zP^w1hAQ_CEqb3PmSdqrl|3gcpg;*15&~{Gj4PmliKGh6w<TUqZ%vrGYLP`HQaR}6T
zWapA%aJ%97RP)}~FW`e^Z>l+Q7M7N|?xBJss1^}7L8cu~J~`vXPeg{cVef~1ru!v3
zZh<!~U{DNDzK3-!)qIH>^oe*W>a_=55lf&KLDd-3$~-4bA!uUE5>L&5>d7Xt(gJjw
z%VV!e_i}W)mkyyMHM}aJsw548N={}$C=b#bsuk@)7OXUXXNQ>*3U#9R=~PG<t?_Mx
zR!Kt;G_j4~Wg-|83fuY3s~Dw`R~m2<Ss*z0$_Oew*wjQM&2bb;F<x+A<>QL_q!Oh!
zhJ>Id<WhR|G}s6p6B=x&F-o&H$*8ju<<B;Bl@9yUMdZ!zJmR^}Y%xB3$jM$!@P<$=
z&^DLJ9V$fy$plwvp~)mDW<^2jrjc4{aruk#d1Ga&9aT<SFDNN7c@H~;$*iSyhZLn|
zJ}ddo*Z!O*Kl&thU%$<*n^y=LOkbg|p2Iga&HgbVO7WZJ19|Q#(=O^1+Nw0Mv}6~0
z*z7#BJNvMA%)x7qIK6v7)+^-z^*x+!QucgCQ)`Y77HsVHDUZiAtjpSur0m>~8pT9$
z{D3csd5MW)o5nF(B@K2e$U_?bq)NFf(nA_JbSP(3F}*!{^-y)9kw*<($a>d|B)<^3
zB|G#XO=!9)zDkd=Gjq}_3Y8|*k<-HoZPjv6X41fTXj{qh><@EXBZqfzjy8IHt8lZ$
z%5<qH3$osTi<hqPg}?SUvBik_$pHrs?y<hL&HBzcw8>bGPnn$@;Z@+=b2r(&b%RH*
zy~{ga`wo7*fM{4akJpCMh$&N42B$POXjBaYr!h|BmI^!ZOdsxZ5FX%&WY(g!p%nlW
z-M5r$eMXxbtnF-44u_O$eZKfxzsi65pZ-H$*+1a=oe9@3T_hf8^6e6z8_*sTG^2r{
z^`0guTGujNG?=I$(>Ue%$j^S5gS`{J@}K-E7j}2Kdg~gOMv+l&DSHL8sV4x9cPJeg
zAMJB^{~@ox@&?P(Q?6aRLNEr?D=CM4ntH}!HpV+~>lM11CbR*(G(+AkX_j-*aw@rx
zGMi(wk`NtDy<|g`=uFnOEiyvXgxKw`ieNgYS-D?KvKa_%i(9rVU4WplalqYoZ{vN-
ztq<Ko8(1zHibC9jRAv~CMrdPbVk9pMmeqn@Rie`#>Y}g<#1o)%H43YJJOrVvOP%RN
zr>?qlR3%r^B!ozDA*vfTX_c5tYK4|}<*hVEXE7lw6jdYVf%8}tt(GBATYKuJ#psy!
z#?^tfq8qOe8=a8Lg!F`@u??N}{l_o_TQ#STna6ZfFx?fv%A&26=#JcyWm#&V14d_*
zd4{&~ngs8u>Y6OqxJFY~2ItJ`Ig%I{b=%7O<GNvQD#DeLJuMB&obwVjlXUmpu&o<n
zYi(ANoX6Dgt7q1s!<|M}b!?{2Tq!z2y_5lRnNy6``PfU(aqZeIUir?;y!!GF@Ie}-
z+$x4;kKx)m8s9QMIpmTJ{ND4oI5(}BzWW~2@tji?8Lyvb>&55D%7RcmV)3o-(OX#R
zc1C3su5D<l7Uwmk%_)W%`LGZt*dXWqVi}0<O~Czt-bX9`&HvHw@sU4%n*ZSc_MZ@Y
ziWnS0DNNL<bm&}-HBDRdxzBxyfJ6C~c2TjI&#9NoWF*-E9dVVVPV%b=iKS%{oEJyu
z`KRA|g+*8jX^VhHOJf}ZQMoj*?hL>*$~1iWZ~l&)Il-e`gUyST3@N(6bUb6eXrxw3
zl&-drYXAU%07*naRQ&FJTj?fK>8I1MVTHP<TC8my&UuQWOc8;?7F!J`V#jugDK;hz
zVvzf_bxmR-g)=ii)XF%hyHxTk>7+}Kuf)eN1zERB1~GY&3xI$a87-GB5AL3D;leJ1
zqDM55&>zvi{shh4+t5a|NuDYlGbToKa$F7}4XT3Bj$`YSM9he}eQsrK7(k`AOa;)1
z^^p7?L>ibj=njbK9KwI{`qQ7z29?soc7Z<D3^#JLk1T743!0cWP)6GOEqpg(ulM2l
zvtWLi*leTQ_fX3UV+&%L;Z2Q-OEf2_`UKUMc;8FIo=DKiUV23-5RN~}qC<m<j;I~Z
zDvVkqyz@HqHe+~EGkkHtylvTeaR_})2o7aE#b(Xc0QL?%@4ja-Zpdk^St^6~C9byg
zvW$>SeXMD?a(&8+U#u8iTfmWL>y3=}k80v+iQSrk&WMd7E+Wb+sHXU!FnJ^vOUzn9
zwzf;m)^KR7+Q+!JQSCm_FQ83Y2p0)ipJ+DF**d7Q6E>#9CON4kkwHwSyL`;1hS0CR
zmKX?05HD649<L0lv++)ovq(A|&tzta7TocAA=y(By*Tm3x<X@c&J$fLa*ohVqC1*K
zST(~cRZo)p+D+Y4v9cP+ry%qWClfADYEmRRMjSC8Yk(1B5W0AXbkN`<+IzIC(YfMi
zYMF(B6p}$qO9Em%U`#~EoZGKfJo7UJ7oOQ*Ii9kpH68^z7lRB^&yrb421s-v*lD*`
zDf&dZn21tF^mbYbTBDI+LCP5ncyyLC+FqwQIKd3o@%NVW+29ZUkN<%6{+y4z@UIj6
z0^2L`DDoSZac{hj-#;dneT<HT?KPCm2(A^Po)&JM))1XS=?XlI-`?Z+_WOi3P!}0x
zXi%z2niFw;v@vJ0H)C^iKtG=_n>RQs0|l)Fg-`Q*ejGMQb={`8kH@HZusTq}s<QQR
z&j29$&NE6|qI{+ILa6LczSGO24>2{gN_5wq*-Tgs$j+#Ft8`#3S#6nBWSJq)Q$$Rn
zU->qooMO3{;DcXL0qLgl#-xa)AWcDNF$SzM<VD13g)^34{MBFJg_nLF)9d5vCCk~A
zbC)jD8*QSrVlg@8{yT4>e8gxgu|d{w?u94WxOSP_-}wO#-*}H^e~Q%_s})Wu8l%Ws
zPu?$?FDp*lngYY1S3(4%ETyqR6?L8vTO5(fIsBuRBln1?B5m~0%V|nbw#Th&&$9RE
zA^+&>U!|vVg3Z$a2hq@bMNR>_6uE<L@mgq&UIlEfajvD*hOPBY{?Xt23L9%Z%ED6d
zNBr8~{F~$hja8B3z5C1;HTQ4t^T+?>YutT!hr+;b{?;#Z{l*P0-*^I|BYH=_$Wfyl
z7it!ZnrggYdN^Y-uV_?@&N7Tz1MiX<nnk<FY%=BK;33trCGsvUHC5Hp&J=3d5Nw0W
zQsJXzice-xTAKPy8M3UP-!CZ#1Ij^8uPoTu+F?2wbMN*&OlHW-5~C$r$>v6U9ehAr
zO<t57pPaCBepAjulSw{fw-z8VQO@X|AvLRAE~e62A{~rqNM)cR#(yg53cKNr3@p#q
zjMj$OihQ7R)z?~MtXV;gopUIPB<T?Jem)s9Z<cs(&W!oG-6D0Tu{&EjLow3cA5+7h
z;@I4e0o{;6Q$iF~iUYGkgEqOv*eo4;in7-e!<ICJlmS&nrVS<&6go6s^yLb?H56G!
z>m)1OCj_W%97eZ9A0KmV<;*lqiL4;1ZV)T14X*?>e4GLbX@)>o6LjZ!mw%hiqsIn_
zDg{@X2%}8%Z~i;~V@74k-otx5Jel)}&wq-%Kj4#3U*Nyck;Ul|TNl@<XBEeL$BfSJ
z(9FhM*tyK-uV18ibeGxf`#hKg;?0|!|KiVb>!}MIA3tKz){MBtg(t4_*8k%lh+Z;i
z+7^fzqyB)=dY_O5TJNYmXzxS~<t#obX7_4(y?cbwI=}SmFY%!lp5XudfBN@$>+U0T
zW&vSiDU+gE&{!PLf8;r$Z=rFt^NQtUfo~i>NT#>QegGen{xD4@oqSGdP`xeQezf4-
z2M_RCCtm`W2A`dt(Ge1*HabmIp5OY--(=_NHPNc#aZN*>7ZQ`_9Ik0OIhnAmU1I2@
z0ZbaO$^f?WA31ZejVfgVBU;&}u8Gm1Z1;7X$uHE|KzB6>dTe$fWnBUz&Oq1ny&ag5
z;*!oj{TQlx#?I@$p4f%KC~TfFpDeL9!|Rs1sn~n}fTQb|DA$K*-4Jw-;@UHW$tiI>
zl{42n0X>YM(nfn>cKATla$X9hU9YIoAp|iV(@9Nbz3jy{fdt+IXAQrYCngb47s2y^
z?=JuUas5;_sNq~6l?T*u1BZ%Yy~MPUw$&Kpu$e+H)~J2}!%>d9{yFg1(6rDVqx=F@
zI8^TdvZdVQdMwBkm+%$D&_hLyj*hr8y^**NqFDC}?=DTTwZnv**-6RC$uYZSMYtYW
z8*h;Blmxv51mV*=p5a;zKC)~E=%8qV#i<OB!3Br+S)z^v)`~G7{#3)@`3lz_VRS~e
zzJWgQ)Nfy*cy0;4hE{ZogqFTlESgiIf&T7!+I*8qtqF5QQD}Oj%P2obnF-`QV%bNz
z3Kj?W**@q~D7H}f4odfhPtTb-i0n0@j*WC~rDAn1bO<f(y_nS@AJV2Ll;VI^IbI2c
zw`(GaiWPe-bf`dBP2g077#vn>e7(e&J}M;CG|?bqeEiSJGCdu_p`&Q3#~P1L0-#p{
zrBpbHoxI7<o<j$W#uU9nH~5Z08dGVrgxV5nve<&LOl!^56+|7RST<r%WswK~t-(e^
z?Q-t@aLSFBHrc**Lj8J0s12wICbL2n(Q0KnVthz36akD*+G24|mcZ%m{?scvS3rzj
zd|S)(wsGjGC#>h_&?7H<Ec^+VUcAZdtsnB{eh)hwVyz=8g&mdztLQzs%lyrIRF4j^
zxy3|-S?{AVE$B+h-vy1T9kFUyoXnUWkD-k$BUq)$6Ot5lpw&TK27%mV%nv8@Hw)s?
z0(wJ<6zFsvsaXv_1_X8gV49HVZqS+N?82Q~p{rfBt5PGXbr*G{R>9)kbho=YLm8p`
z3hL~;DRwfL(474|1*w6Q;@)P4%o@rvqbw~&k&&edZU{k~sht#~+NtE=Ek%&?f!N6O
zJtVkRTPrLFt#L7M;o>E}{MY{)TUVY$mwkeFEGDN6wzesI1GF{FPEL6A&fA2hPGRff
z#2rH*%L~*9E`013H$VSL#t)8o`1);*-haUCWQGB<e#ylrE;Cs!x%2wlL}#d{9&#f^
zdMeblR<tn+$lwE}jbuvE`iNH<lSR#PsVGcFWGblQXr0PyEEcUa1wz}xIPi)(dQ`Bu
z*ryG(7^k%@x|Shal`D=8=iEL##U`z>&f(GVf5?CT-}>)TpFHILn{TprGUK29<8QF6
z9h>C_v(q_N4cOYfO2~%1_R6aqJ=*8^@R-xnW88d!WkhhAhqoUwACGaN0hE}b1Wgk=
zlOao{4c3a$PnH>MDcUWgG_G!NQQXJL<X#Cr5D)_!8EPezQ?qIgm5OAUCC@Eco~3;`
zK+t5?GF%%lnK*{)1t;@JFYgo8h~vW+qczKAps6a9()4;gd<bVbyNMduQDaFo*_eie
z#u$v&kXebC2vMBI6ZNzk@N~^jOn;YxGe>*BqP2JWYgsbl5NmArSfubB403LL=xJ`h
zb4T)QHNqt73c}PBNJIszDlysgrJt*%*!(f*!?f-kidB027Hx#io8`I0y;v(Oo-XTA
zX>2C(EIP9kHYY|wKU*Ehj1lMepwci(w1cj7I0s%ujIt;z2)mEauAEqXa#L350Iox>
zqsE7h5fqc4H|a}>E0_O{Qng}LrL#0SV5M<l%6W~7P_=~VKGqI-;>HEm%pnKg{x;2G
zA3s_0@m<4}&wqsS!ZtB#l#iSo%qV*=V0Fc#Km8`NJNGdct~31gPqFsYQxwBK<TX2&
zcFBE>)lf}lY`*jakG}CHs&$kmCo6jl@&T=P%%_Va4blW97^PC4RvBhK5O43ZU06mv
z#cpqd|K0!L-{KGdv#;>`|JhfWy9R5abwY==R&)9CIX2F%qYQ+qVm=--Kb;esK$oQy
zk)d~VzH7PA!HN5@F<=Vjy5wu$`x<Q;SVok{o=kH5MkxI%NIZ(s1#6>>U;c0W7EuLk
z^t7|NXxf9AIjRs@E-FsO6EV74MF_H50B5FjXGfe$wV755A0-dowRKv5T_nWWx>ZrH
zzA>(}mYqeCIPO}T$#7O{VWF+=6FFmb41aa_9J@y+6yvT;Tt(NU&r4jl4D3UQv>~9H
zhUsj{gL{W;Uf5vR%g}g2UXWdV5+C2jwjN)%DK5~Y|I2+Btgxbjb6~_o$pvZDB^t;<
zfYt?6)5<VO$r_EF`6x<^u#iD<e5`)#*jGPk*H2}GN>LE~n9ztJj>^R(Ad3)#MyU#;
z1HP=`=JN#eAyhknaF^&#!7fpSBH9VqG0Ln%m`lNEC+JW>EQsW(>4MG?Rnq`%LiIZF
zr6mRnvBgg(R1X7jkzs}o+xIB%AXeykL^VCkMr5?BC<|nAZ#9F^;+$rQ7|pmCu_}@&
zxUjC-x}orE5oVciqsp8(E_m;&CHb3I*t*g(yxO3wCNnMN#R=_Ez>O}lbcV&jm`arl
zH+uAk1)0@^utQKMkY|M0hb*AX1x$88IDVbb-bWQRs@w$AN2@X!ScsNWsZ1Jlm8hga
zw5q`yS>S1-5UNy*j#^N=q}h~gQ~4Mq+j~gODXT`f3uczgL<qjdXtOd}5Z5|GbY3)S
zso+}m1PLWoA?YVlWDDsn6dUu$a@Ufk0@XEkGErBZkU)71N#7bHrt4ye$t+29sX9ug
zUgJi@%(A_0#4*_i=8GxT8j7M2g9tphKyWP&->$jxNz2Al=Na$qGduN!WsT2fV)d@A
zXjo!r4x$Oli&L{U0_xdh=tV~lm1n5Yr0fwfflB`_qsPr#%JV&zAu>3>O=fDI`1noU
z{<~jA&!(`pDVl2ys@KErYI<0jw;wRS`;g-5MG75>YbCMhkau~J<Dy}CG+}x$qi$Ma
zW@(KjXNjhdYdpHlMcd+J%>^ve(>WKP*e1FY8kce{yJT+CH1o$`lGOOe<TUas$~|GE
zNYGs<tvV~16=K@beZ3T*-OQA;GyE|}TJeW?%z{6mO2!zWX?FThttF4tX!0T>v(iu$
zrNP)PdLwDee4uRub=A@=MMLP)VXeFAk7gAcU{c20`ngSh`FH*mE?;|+wl(CV9ke#o
zvnhGeqZo_;MKw8P|GjtdO`BqJG&%^y&?#Y?2My)=8n!oJEw_xWZqrVdEDlduOs8a}
zrS&cLbjHT!$GQLdU8>1k=+1RTXj`JvIHd^&vZzvST-pu-#GYYZ!-X5y*?;dYF~ZK)
zfJNPMG(Jvfg*cx_<mc%i$eU3GS+vRF+@OQSs2p^La$2aOs-caMpcDhEc<=uEeD~{r
z%5xuihWYWFcV52DlQ*8mW+f-{DYKn9|Lm)O!5{qb*U38bvY-iQihjXhxXzVJ8*FWl
z_~@rDG8py9b1*6X!I0x(z=eRemi}5nmRs<eX6cz#6Jqq#p`nc}K1j2XXI5f#j7jDy
zmJot8u&!mfn6sQNSxhU&Cv&RBoW)|vd@>dNehM(uT626j!My*7qRd4Ak(y3xG=o8(
z(MF%$3m4hBxPxvRi5N+a!!h_2o7lOTr=ggaNG}=WB-*YuiS{WCpf+ZeZQ2<jrBx(m
zT|VGllUM>F1>HxqA!S`BB-8|4yMVZ0b8E<Gn6tH(v8-C6aml&%jFKu*Xwdl%oKYRm
z8b_?KPCn>M8(BMDd6C(inBE7WRBA!K(tt^8O}{9U7CT~%5e7*cl57UFiR78aIWLVv
zvcQc#3Tz2jWAV=5wWo1DQJaOzC})~W&tI5Dt1_npo0GGC=dijmpz7KjBMk)U-jP{C
zq=TqqYCa~5nzz347xbREgll1Lg=SyXOWge_i*ZG4Gy0XIx4p^0L`*y3@E}sDITwHR
z%M6@mzMQguc*ytu@qf<g;VH}0eVXwYUzXgud5tfA<}>(ZjrM`QF_eQfyn;nNr<yln
z423il-X{=3<)9%~n#D2_-`&TRinW^`rn!HcU;Fh>^U~*kj(_)m^Y8QC;XSMpb_h!G
zxu5%4IR}EHUCfyrpRyRI`Z9!M@)Q!|%hNgJ;$<M?v6zj^eC2C@!Q6>X7M(ad+9ZSI
z1_=R7XJUjH8h-Py|1Rt2&k<vTYAdSoDZRCI(Ym$`-Ur6x3A1@c>%BaV$jW~s#IRZ$
zouSn#e7qa#LmGM{{i!``MnkF)J6dmdGK;a)*}C&V%-iawCNpKH2BV9uQfYYF)mYLW
zCvAP#*d<D@N@Yfxtq9WFnI*LiRu{B7P*n{N9z5d8;WoqVAu&(%^X*H-#RPqNiZU6Y
zZNRq}CxaQ~Bgz{A7mPR=8y`_#2Fud8re&jpc%L*9+e4KkSe@J{tqbo3d{N!{3?do&
zNxOb38&q{@aEFHWcEDADb;PnIw;Jtb;@Ub0R$+!0a10<;#PB+pnG~M3fy@w14n(5P
zfflkFvRVvTf`JZIOYVs=1ax#OY9~Mln&@-LHYj^r=*1ZSpa-`$P=ohqXCBjP{Gn#~
zAg6aRFgWKK4lF@iZd{Hu)0%f5W*j;XT5R%7)MVDs??=?g5K4t!8lugqj-ZWwj*k@8
z!wvS{bbR<%Tl%|WLUu}6H|TLiv$#sN9HUgp+U6SlL5We2S&z3PRI?)vz9APKQI_F#
zfwBWQ`XOQR9x6I%PO|}+QgSTu#89KKKtWWQ6c$biqNz%Gja*AqXId$gm4;Gf64Qef
zs#oj~SrQVdWHLpff^@v!poy-*=A~>_A&PHE2zWd?D!fmWTcucq8>D7QC9MSM<9~%h
zxk6H<5m8pq98)RL`J~4ZR!v8u_a=>DNT{BQvZ;j-X@a7+-D9XMN~a>sL5vN?8v4Cb
zHaZkB3*a@2x#95D3A-OFICsmi{Qi>GYusW1ra&^t5*3Ez7N??6r<F$Q0Ko{oQFR*4
zPAjI99#{$_ts!Qbys#|xDh4lHz{SAYcAvVA3|@Se3txMKSO4@oeE4tt9IL!V1FFc#
zch|{uq`CVDcX)s;BBpPNJ%utQEGMXw3C+QjlXo9bFPAvy(V4{=(XvG}q4ubLCRF4I
zA=CH>)2U~D(Qq6sRRv`p$+M6&I^DqGgKtm=gEAe);cSeYDp_HvrNUBo+Bm&(aqO~$
zy8>55b=K^v?%YpQJ*`%{8r4Y%bc&483Tq5H{oPtErsJ6)QN|jK(iolIOQrE%D91sG
z?kmeP(fMg5`&6g1TMb+kqs<YY{JBr@;-^1_ijn1X!r;<VSe?->7hnx})<dCKPABZY
zcNbTSp4-@r7=t*vDsiNBp~aynhHJ#A&{;;b3cINojtcB@M6+CA77gBcMmKl3^!!cQ
zNyYf#A+wV)^?XiURm|oUuJst7&}OS~0bHgyt(FW%n;brPz{M-ueCf--K)=_=M#~Rg
zdz0^c`+Kaf^|*fP8W>m-P;*aHd7(TmYMRzjH-Xu7jLH;k+u+-V5DY<$2#q4^D>g@l
zosB;A-1C=z`6h4OIp*P_WqNR*x^k3d!TGH%o_OX7u0DB%t(`6MUIE(ReBjZY`#iXR
zz<hGT(VMqf&SzA0Mb$KTCn|lD<@CyO#rD`ax5?#OyX>rQFg<RWEtXVG%c83Bv7&8T
z+E&;GKE<D{oN%KU*;s3_+K^=yqYZhM(Hr$y-`%3$>r<8z!=rV?w>9(0oRfnSjt-BQ
z%qFyLi?M<v9%p0Bp5g7c?viH~V+~nmD2g6y>qEBBZ?myJOu42)&(&RqWr`@lSeiy6
zl|mGQA8j&hW~DJxF-0?+<(sP3CE0*Ii<3~DgL;rm>gcE!qVrQaaBg=1xIjn0ix2`j
zQ}l-g^VtNO;>ON~p~584n8cWgE0<EOI(@em3S<1>i$`PSJy9BrHIhZF47Mo9i-O*8
zK-ufzLZGc`GzydD5}(uQ)rn+9NuFC!iurU(HJ@UH?}Tn0I#=YT#pM}3876rjaCM7r
zTZ|S2&>`qp7cCQGxA7(2y#^hUq(TL;sz^;|Hz@6LG*^R&wCM%U>OECJ-Y>ay`3k3I
z$oqGWFq1L9)cB|fM`Ktlu{x)ECqgx$T{~dnJfVsl9NppW>6{<EdY}EtjQRMK>G1)v
zYG_OkvL3Y`(opdJz1R5I(_duq+5!E+8n&Mkyka?<vz*tG#Tql5R}_Pca;GHAb95{T
zO^s?j+66-8IQZcs4)0c2J*EA@D_r^f&+@<ecmId{FaD?hd)`0V2j75qy!7eM;Dbkp
zhH8Au^!OBCH_1FF8F2ZKvSOhNepf*gjiGn$3g7tAyWHJ7Nba&>Ww1v#1X3Q9LsXxj
zF{}+Ue)daWK*biC8S|rKV$j$u!#hXoJiZAW9-dM)5)Tv7d+kAn7%K{Ga-P+x8%)ij
z3^Ih$>YXDiQUu<Ag<<fRhV(J*cZbZP!DczL=?NRX0u{~b-gq3%?U-(nPN&@sKf+)6
zU0pPx>bi9mv6*36EwOoy#}V)xpG<l5=#<U#+w`_GRGy=v#a_IQa}B2PD3_z^n%K6`
zdZO{@))S*4kfD4l&%*_w{yL%bMibE9<CU=Ya7u=Abx7{!NIt#RQX}fK2sKpF?f-;b
zKa~yY;+tcdeGijDGU7suq9qP2sswEYL|4;H8^XhfsB5<gnL&m7L{$@r7@eUF$x&-b
z#G`Zreu2}qY#wa|Y5~@+nD7z{l3a1U6W73~!7F^}C_eW&!n+gpezfG`Qx~w?8RkJt
z^CQRPy~uP@u>P#0w_(^Q4U9^ze<~8DQ)XY))Ta=$oIJM_I5FpOC2Ahg4wM@b@AWvo
zy+#w)Y3qm!8S}B_=;eyymn|k=2=&7$l$wh1Ne)@=2%*9GoZMt6a)KVAlt;kDfKjbX
zs>~MZ;!CKLxA4ceQL%(#1jPWQEx}qT(gR?6P;3)JNsKkBacH-|lMvP}Xn7@Q?;I7C
zgt!Oun5${hup|wzIGlwQjaCH>&Y`SBW9jm;4GFmkT{{+5lw=h<3ZfVhoHY<q_^&2e
zk7-er29uC^V+__<OhQvcn<|@bP+~ySJzjAgmQA%<Hc<4@L8n-Sv^mF^LamdgP>~{a
zq7b0G;r<Uj=Re$M?aDSAcaPZLt7)T#xikz)8=1gbFNH`KV4mn`Qe>F4!FPv8I5QkT
z;X_By3Dk8<j15-jL>sZa95c*m_D^{63op_hPnf*%7Nc7?i8d4Mwbej|KDSPm8{C5<
z{Ctic_0hdRa21?PsO}yzy?@Nf!wHQlXhTb8EQOUqHmZye93dtrX(Ll+r*aP3A#eT2
zQH3Ge_Gy+2)}o@!RLY(Eu>h!)nLQg{s582g7-;D;WHc4FT1hc$(kZ)Q10;lXbzrBn
zx@%?y`3%X7CFxcZBC8+!*JdW=oU%exP12%<L~HEmm7NYdvl@fNNpvpW%k)>t@}ekm
zUi{R@_{ir!$NI*I<$Q{Fj&ig~f3S&nEx~(iUZA3)nxArT?_Sc|NWOB2V!^M{dX8x@
zllKM`{UOWwOsMlw5Fzgz!OMyb)=+M(Gdy<=6(eq~V(t1Ccqb+#jbk>Ra{s-3GFzf^
zMU6D-cMlGD>ucX8s};MuTl~(y_S-CG$LOJ_p3ivlsU3R7N2y)Qr+@Ax>_DUw1}i_?
zx75p;wrz+}6J2C{d_d!B7PE@kq-Hj0DT{(^(4!5C$!yFYef@Rz9!$BoyU7bLKE>5*
zyR2<&(i>RndcpqT9(V7*!FOLiW-(b3Ylm+|bKlef7nT?$C1o@QL(U*K7%TbRgDfNO
zWn6mdJd@%+gX@~fVvh^wpCCTWn9S#d&`>WM7FCP$ODPzg6po?O*C%rjl3qSWpIpXS
z-HS?VtkyywwuYiC84P<227PvR&vEVMb^2>VvP|5+XR{g8(<!GXCmbIgv#jeCH80C8
zMVZs@mGsJz{&2v#olSOjw&|5Qd12Yy95EQKF&)pTs)nL0p&!X(E-1ErjIq*1N+^Z6
zl(xiB3vJIgqVK~IqsLf<QIXs@bg5`uAj>kU#&NuVK)Y-Jk0wupAiXkku{ui<AKBYY
z8Z;SWgnpdXy!|misMc6amI<}jrYM=rlK1-bdVPvsNzv;OBlzMh-_%$`>srxgJ3(<e
zGb583MjM-Gqgl>pv{j936)swA6dh_L;+><>ilAdcz9a)x2bJ!hlt-E#t5F)QI;DN`
zW=OP8CGd07GO86zL-NKw#u|oeBl7-`NMzPHCi6MR5AX5r?Iq7HJkOX6+s=vF(9a9<
zzD4)IO^=wpdyMxDwRf~$6QiLpC42iCH7tnvI_u{w^XVAYE|HZz*3^)7T6#^7>095X
zj26|~WT6(+i#aYRl*!19X0WzKUgRh$7RxztzQ6}f8zokRD2~G3Tf*@PO4ayr;75P_
zPk7<ChWy+Alm7w#_P_H#!@0=L`hc6y-U90hCo^XI2h0w~!j3YmG^4oKg+rytE7AX!
zo9DTGaKg7=`JwEW-X#`N8mc7vNQ@D*CB%qT8s9AWjo<oBw$E?jmZvo1DT|XCgY7M0
zF*Gg2$b7!w_;`#<`*cUkk5N!Zd4s!pL25RW&azbqpAT|2B<j8}%u;umwCEipp}XQr
z_#L~%L9~KuvA_>EFnJnIrF*IK3?!_+Ej^@Er*(GMp<_a%gEf6`h@IQLtdH16oNZVv
z;QibCT)cb<Wi7o84S64mb?k*3#J%_B^)yEE?&})b7O%y(p^Ht5h+IH9PtXSCz2y3O
zL7as^BCNRR6O}uJ6f>04F|1i3$}xttZho?^pUMVxdfP#zq4fkCF;QUxv|7og-vgZ+
zigtv1Z-Tz`D#31{;~ZjxiY3u!;55NBXd09Xsldw6sz!wtnkmXUU;s*DFobK~(MW|-
zjI&~CJv)Z;*Le2VevyaY`8LE1Q}$3X;EyvVdkcKtBen&m*RZjrv71mn1<hf_=7wfG
z?V$_B)>_MMFVfC5t;=ZdM9fo$xVOgmTLt54n>qr@U{XNz!*?3CUu>|2MlULC7@)N$
zQ+Z-_dbIWwg~4PNa5yhCW*wYlYLX7<Xu)QL?dPz)9m4dGxIBgB5!&Qvn}O+}yd@gp
z*N5IVrk@dAg=&R@s+1>qaT$(E=%n)dAVig#zDUrSpq^2qlMzu&NTlk-`)Q{#T7h?1
zlZly!5_H=bl@zflP9WKPtIqf+ii$m~$Xv<9<1x(z2>N7K4TR8ej3v?aIMdX28@gKg
z4X4fCu^3O2#;O$!8xWne&9Tc+^-?g9%rZLa4^cCllsx?QlFOf1W9R0a)8jc+mD8%0
zu&Btes3H?HC>7B<2xTr!<y2~dL&^+NU6Qs+f_<VGG=+%4u-u>0fATzSUD4}}<dt65
zSl_by!V@$zM>{@YdG`V3)vJ(MLAtF5m1E2r8ik)O@O6#yo;aJ)K00A>Fy{FELzd3q
zwZS-tR!|g%);mlHsLV(~WE9#2qRAN78He8DFa#|_qUbGoQ&Z-e&LSxW|AFr@+Nf16
zAJvMayUIOP(s{DdiE5Qn&eGSc1{rbXg1GwrbZ;uv#a?vgFWv2wT(OB~rY|leGo}>p
zq{WprcDabUGX?XKf7t27Rah0=S_OjBw0NF)>Ir`4OTWVIwW~xN%gM1&t93@PegR^{
z`vzO|FhpjjN1Pn&5t=qpnUkYpLN-ZS!Kfnr%?;K!cbH8lRP(u*tAym>+vR5{L$S6&
zzn9ZnTVt`9VPh#sS#IR#8ADtw8Sd_|esPyzggV?G3>ZB);#=Q-g}V0SMagu&Ao|G3
z=_xi-T)lRMtSs2ud&FWnWovDVJS(u`Vu4aQ^*|hs(T3OtHnvN=3hdoKVB9_+`kc4#
z?{R#vPiZWdE}Y|IAGyltr2)QnU=(*=eS_oEW5$ySRaFzCrzPM#60MSot0K#>(SmA;
zrKc!{l;wzF-XqUFMh9G6(vHUL4Ic6E{G81X^%>7Jy^Rs!QO#s=o@fKZa>#Txr)g{2
zrWRUsLS`iXK__SEP6sLz{SdwU*`Fa2AyE!h$}fzZxvYvywCF|io@Fo?Fj^lnS{v}>
zGq)J63FW(4EE$hag*rH$b8<8W1-IYU3<f>cHr6=5yUn@t+ia|_k!KlA-BLFTB114O
zxi2tQeiurp!8#SIZBx@!Q$kad7gh}AGGPGt;AtDt<Jv5v92E3TpSE=@W=niEr|enm
z+$NJSkpihStkXKC;dI0#UWC#bYb{2np`SMLxIW;^7CFXPL6CEK?nPOWmnB8NM_%Mu
zn+r2Sdt5R+qodKgNQh2&I|1+>=VdRP&8K8}PF7@i*J3c(+^(2k&U@LfQ&NEs(t!Ek
z#T+h#vxa0v+B|zt6lXD7F-_e}6jxv89eJ5^<H;w<dwupFJmU0p#@?d`gtnn|4Y)dG
zi7F0UV5^_8n^{Z&YExs*kH9KI9>{xQ5M&xfGq3TL=XlaE>Xpoj0m_azZr7NU8;pFP
zrl~Q$qG}%U@ekjmdh~!odvq}9x<aW)t_-8?9>t&_v^De7147*pY))1vigZ7;Smtem
zCeZ3g$mHWO=GaNg>wogceEfI++x#1U`(NdM{lERc@T<T38<fKWAC^=nryM-kqncOp
zI=W=;lQP#GDGN3P$*u1Xnd+QB{@R}swA2Pc%dp#a`rlNablIUGrg;~(M||PSU&2*0
z+Uc0-!4##S>=hCt69Un}@$s11yiLxu;jB6B49`~VFm=X$STP0V=d{7mrr~8&ooC3u
zFeN@KulOq+W{1ow0f%IrP8e-&CZM1@bi3hm>~6C(2#IGOBd&hB3c@_<^sg!z8D_@P
z)-|?Q5Mo5f!0FK``wtEoZ1m963FSsE%(US-!g__89ALaAU{J<_Z;4fdasllfAt*^5
zBn`W4AbKM@X0I_O;C#S%i`PE+YbaFGs|M#~XxO+k95buo@=x0JQ`w-lwL%A3L?P6u
z*2>L2cj8c5M9jL!7U`y}k3r9gZVsV`WDfSh8+52qu^{S1_wYGN^<^Sd*U<AshO34~
z=_GV^0lbk0H5h1xp5w+3$ewtXU3CmPmxjnTI1|yirFTwaFKC!K$|2aoOLl70(DxY|
zd4?`Em(K+*T~)++#_{Vd&7B^D_xsGg)92`5L=zQ3%XAV49?yaS<2NGbpEKyTB9Cj*
zylI2V#I;hFJ?uall@Jxm_64~MwKO~Agis1`#8;@I2gL?z>lw6bVL2gIC$KysHZ#$M
zI)iQ-h&2p$Q9X@c%w?*C24jfIN+d^zq$-OF8f6R$X=V)?-_fVL*aFp2eo2mJ5knWa
z-oc#_WdceWyov;s`2R5`Tl}toX7xT26+VanDL%F#Lb8C52JaH3*Lz6O30fJM4C7aN
z5^;}{0ubk@PRHWd?4+An(Q^<3r4YP0=SG7MPH2!J>3xzWfdKb@SaI&Df?{`v-K!1v
z-mGbzh6RAe(nmXmkAa|5khKOwMB60JPofQJn9@)snSMoWaf=xyTVqkp+1?m3T}<V*
z3W{(#BOCUKO4DDnw3`FkM+dn3dt?{R<BA-u99~Ptq23tEbg^lOhX>RLCsYSBP9ILF
zYcZc7)Z&V19Nx49ukhYs%e7>_5g{vBM8!m}fz9!5DKy{!EoalxGws+1fBE3f^e$;|
zp}Rx3b5jnoA9RRxg2h*~^v+&BblfFaA^s}a_y@X4a9B0GAw>}gB~J1MU1XKFCb1d{
zqpf@_QGiwV^E*AAN=yJFY>F5aw#@k1pZ@|M{@mxu2R&RnqneLN%Y~x1MqUnxM6$dj
zwk_k+1185Ogx0SNQb^igtrT8K*PZtV3^%svjn;`VaC~$i4NsJ?_lP1GjVW>l8=DMA
zBcUK?IkWjZNu4_5C}^T)85CRBc5%^=4GOY;533DVZ?5yg&6~XW&Rt%A?QM!)pQoR^
zNYQWk)Mr19iH39ML{HbYEoDzK6pdD7g(j+&(r36<Q7s!D?myzrJA1tT%9|YQoiZP{
zWEw7C-Q?yAH|Q0L)8i#?eD779gT7YSI*=ENrIrBu#?`oBAk(N!W3?gI3Zr4r8(<XF
zE}(~2au2hL<<<i8bj$iPFf$D=zxFy04#yN8)=xmq;QVI6hp%oC-`T=cIg|OA`Ep7d
zYXWV$tzvSyO**&7G-FYdSScNaFRYxRNzoO4<y4(y6-hs)R4ZEdx#IL_hEk7&)^5O9
zP1(;Gtqs}Q*=GB~1)f+N5<{SxFPMxc%qCNg562wtAEJz+$TK#!)>+@!;L?>{%Jl(x
zzohIHiF#UNwN1}K%lnl(^iUI=px@rL)K!DAiY&9(Vt_4e()}f76B?TrxOPkkEwn8<
z6O=O7$rvda)pdKbF_z34Y?fg%OKuYN*9g<&gTAu75Vz$bCo6J{$tdyyTV(P%Yl#|~
zwxM3s1TWc)-igz$Q3{*TZSQ@0uNBcd+PdNR@PP9dFYv@u&!7n$JiJd^H$rtrqk3sb
z76Q>ZL^3Dr3-5fY2jbb@6=T}_)XJEqOUD$vv}Jp1ljlDC485M=-M8N3&bxQmI~q&w
zXe}A1tq&NjSl?J9kDjd_ltl!qDYpvxTRmD;<CP5MoOeVYshYri-V#H`*cc|$C2cch
z-b|>Q22<5cs|pu`G~RW?d&={n%LA5KK*b|sbo2)$d65%+WI3I(oL2<pIjt7FeR4pf
z1JCZP^F+VE_AG0rWIi9`RUoJ|6pl`u<o8c``x}4CXTSX0JpT{=bAIVdzaXe*y=1!g
zh{Fd*_;mh<wB~|O(LTt$SgVN2a?JWU{>fLqPStjCcnP5lQA`oznNxpEj1{9aG1UC_
z-}pT?&hOxw3G?GA^T~vBS9Z}>;e11gfvT!FIzC<Lj#tvPjy<d+U0`QdE9tKBe2l3S
zeV}a`(O;(XZgmc)J*Uf3?a=kE$?c*7h1I4g^MdL4lyWe{7WwLcRPpRlz+=rSafObm
z1hcBn%y6Z1mA{7wE(99aCYo;pAu^jUxPR{+SFWFDI;}WwGK%$0Pz849GQL@27c+FG
zAjvOm-$--WI)Zc3sHOx8TmT&~O6o9W5!BU0td^gltd^dk6)m~<f~I=s3C^!rgFk84
zPi2D|(%opZLX&d7g2y>9ZA5!b7%A)pEy2$ZXPE68=W~>@kQuaYfB{-<OXg@hA;ebn
zrL+(yNO#mvv;q&YNg99%!JY}5LnQhl3963?<pJi24-<J;rt>-wbVD}O3~xpBE?^)Z
zIJ{mG=PgVEW#-vlt5Ji%+PXtM*}zf5=zfp-sK@kAwmEpuQaM8~UUVR_!RQn@5*5ey
zJ?B26pjx1^8Wn1Eehy<amPnpQbS}t?a-utR(G!iu5>ZNufEXje_e2|^0;nKad-)Em
z2hop)8s<mD$z7P8pvOmIN;13xy#SL1Dp&#*R0Fyc^B|SWgjSdMgeZw2M_A>bc1lr{
zB5Ki@MlGEF%nG%oqg<Ye_`AS)jfsLhg@ji56w)3OJvaK~^p<{vOaW6>*KOQB5Thfu
zsbNl(vM51+GKEjgwD;nYSDlRMBFI$|#+$A=j-8i)Mn%Ia19oMSBUND#dqif7KJWbL
zm=FE>Hd|NM8Sfo)JhlXr6PF9nLJd?=rn>T65^*Ty;u%1hMAuSXq)vL#wPAAHV&e>Z
zex15*&}B~?7N=v34H&C%!4WK|k)c<fV|n{M^rJo0?k*|=6(uG`rJS{PUg0Kls^c-U
zM{}0rB|gZzYLKsWBx4hkql*mP?-6w*+KfeX%uJ7^gF05UMrd7xnq1cur4o~<Wc42Z
z#p&$_a4oC)u^i;CMmsxX<zsTl{uSLRX~+=d;Owwd)m6BOR^I*lIJnx`M6VqEIvUei
z?<>s#@-s}zO0?FH<)SalODVFA(PWv#&_t!F>K3Bq=YQolx$^Y0D646jG4*^*Xp#mt
z>oHv4P0vi@swI<?Lzc5y($OZJP>0$o86su<oZetW*&m=YF?wm1Gn%F$I62S(w6gfv
zvax%f?VXFX-qGt1(ZTcJ?)`*53)1gmplv<H<~i~~kI`@qlWBBrWvJyNFa6x-dH22d
z(8}<Gm)|1OJwElxEA&*xWO0I0ivFmeu3FJz4oa+!WJQ72k?P=tcV4~255D^bZ@m6C
z`wtH(tYT|potw{Gpy)*=M^oPT;UV*?rmro%;fPD;OLnd<x$=n#YZo0~{pys5Z(P8R
z6}eZCYe>$t7E6pR{@jG4=75d%5;o+}C{%WVwWp7Gx%xVj@7A=(8G2B#wxLia!dNrt
zMJDez?4Q4bjYHhD&v-n;`8v@xIg7<g11Z%Nlbvh2Pc*YG7OTtM?6ie3u8O?XZLUf3
z3Mn&XM(5Gm$bRiLzV$TA1(V|?5APnLj3zHNqqQNM+na1{ZE)k_d5luj%LS9kgvETx
z@!^Ei<0<#<zR&1K19mTObM5+7uHL-L=Jtl{@2xbV&R2w36Sd@m>r9guf|9PN^a^53
zPPJ`=RguO;jMWTB1rKlE5kxZ6SOdm{)pHCf9?TfcXuVHa^wUtsV3IqjB$9pbdyF*}
zn`adLK3S1V-f42L28C+_O}(V8Dx7OETH%o<#XB!eSl1Y=G~fnT*VK!O+~#PVvA_R_
zJcp-Wc#fv3dH1z9MSm`?JINF<xvis!F31_#-CV2rno3RU8I4nrddKehZFVngqZPdM
z>Z{znyGPvyngDGZFiNw&xkj%n0Zp}>6N$KGP0vQwMlD(e?B*I|vUi56rCMsVuL&Vg
zJHs-R%om=c(~7y)s6wHYVc;EJIa;e}qNde`K#9#7p8WK)OdcIGxqCvH^=N~bW`)*~
zWdSuRnbi%CCR6S$Y8sPsbmxG@`UWrT7*uQoZBE*LqMUh^cN7@jfBE}d|HLQxwO{!q
zc5hvw4h>;^!v5X6ERW|HZ3rZ%<gOX%nl~km{N=`Z{_>SKxxc?pG#%0{GSiOlm6S?}
zZFP2JJpIfKKK%>7fUD<J^C?YLfiV<=KAx0I?gEQN#cVdmg|KS!IyADYJvx5S>OJoU
zt}DuH(l56yD|4qaG0f^P`>VDPapeWlaoj>0$lJ_PR|}T&8AH1vwMx1*&*I$@a@u`7
zox$Db&)9XOCcgW<8Xu)0jLr*Cg2%ar!-GSP504pbjJSXI0oQK!umej}7QJ&B|KK*(
zHN?z9D{LjQAWWo~6I;>YYS-eGFe{_hgkWSiZ8X|BVYV2Ba>8=4O5vSPfQh(od*_M%
zEOP26>H4W`Q0au_inOu%M9(yMCq_B8ZwYG(HIBpwk!oHOM+&<M+KP=hIw!J8P*aqy
zS-AtYOR@4cBd~oUa$_4HM2pyXYA~sp(hWg*nYOe!;lX{>xdOkIVS-1Ip~9SE%}`!7
z_&(^7VRSLDn9d2yNWNaOej}rI0&`d6SwNf<GKbyFm^^e$-t4n%70#Two1qtEy&lvJ
zv4QEVpxWQSZk}R_8RcS5V-?y)tWj8_QJEs339W-DCKX;s6fG)RqRM3((Wzn7V!)w;
z!e}EjwY15F!47fj3Tn1bJbj3-PSNcYl}8jaVw<5{BpO9j1(=ecJW6Mv3rvjCJQB~Q
zb*V+|oNz^44n&EZ(8=r~M4vux#r>3S5~WwJsw$?2j3if3N~lOG;B7ZN*NHfp=qma#
z4WWb3Cd52Mp=Y$7=>3@%*?F3IO{f-xwk7C@^GNZL87aJ18|)c{M)ax)_4q3tY&r!)
z<iVYs%^#j{{+SUMZ!MU9e@@+G;x@_@%04={bT62+#dWa@R+`<Yr8!NJB}z*ctk;%V
z1rOf1!;McoOI<hg%09|R{OJk3%jZQSthDGXRKORO^71Z=H{T&s7PYg6&SV4k<2mi|
zgyrEW?aVVjS};3lXr0CdhgO~tq`{9)6N4ci_Q==Q@tMWv5+Bkk%X*nJ&@I!sW9h*8
zh_a5&5{m3h=c|&g_=CPg!K_x3-0+wUe42Tzwjr;AWcYTc`dKgfm{v6*psDE%oq^H{
zU5#h=EWl%VkzMm^t(YU(Jel8QR;Y+Z+<x<1;%2PT7^|^b*alHWF5h~Vk9_8f4AwS?
z%2QQin)wu;4&R_)ef<K;mNI#+7mSY&a8-jbnrJkgMX!oV8sR)AD|(o`5O>M$wF^=d
zxw<}ky-bE{Jj3-ZE?l{Z$qNQrbi008CANg1RW2$)j5EW<i<fY~&iV7y&Y_cIWNccV
zf9a##`06+4-`nHu(+RJ=_8K35{t744DMTpqjJ1*BxH_TCEjCwp7dd%&#M?i(&3FIu
zRo;B(HuKpMqT|BuHtWL-6+L$z-eXqPc&EsGV5cyA`sZ(O;pq}}vCrs*+dMe^0Y7{I
z@7E(X?31{-1f|HdAx4LZ1<LlAw0GHl?g8u1Dh~d^Az|+VF<P{p^V(bQa1_BrP3ik6
z-=k>)*{Gr*;1*EiktRPx_fM!Nj;5&zDj}W83HOX{B2+gOVse#CsEF$BW6u2MQb8&-
zzfRkg?(uYUcQcZ1-wOC-Mj~fJ7un@QL^;pGIjZ>y$B)LmYrqzkwe<n#&Tp}Mew*!`
zZRYbC^T~|Xx6EcWi#H!~@0|n8pMRf?osw%euW<93r#QcRj$CB~my4^W61jzMMHYb)
z%5KxtxVDj<Pe-sB_?DA{1CH-LpzIeExgpO*@7>V|<r(RuD+?GE$?|Na^Ok#B?l<#e
zfH!QBC$v@&Q=4V0NS3x~<Opb6v{IC1N$?);oa9+sy<+fatvZ(?c|VmR?-f){O+Fg2
zIT$jVobcNBf5;0T|1ckU>63itYhR~r9MxjUVo_1o4%d3xHsBq2r3hMX9Gws`alck5
zWrS9YXXnB>2BQJTM@Rg{w_l;1HRNTEG6hA?vb{D$X-!ovna$=L9v%qA(~Dbs8zV<O
zIOh%h%rc!Vi9L(MF`q!JTNE>bfjahCRE9^#ElnNqg(qr<vJq=jl$H@Xu!@Sl?y<J!
z>Gc<c?Z9CBBJIR8-Jg(W(7(KeYimxX4eqqZUVD#_<#=tWoaW`@3Cb&;yReIoGrVg>
zUl=_^jSq@t6EIry*4O`xPyFmJ5;Kdg8jkPYWAE<XYQ2RRWnL5l9&!5DN@}reu*Sp5
zobSE%2Ek@>l!uU*Um-cZbC#(c&xV_tmf!l_zs7KTlXiZ}{CLi?Y8Y*AVR9=>pcrv3
zFq<xz*OCz7V@OS>N^6NTs%|&2SKY?35-$;hpr_6?>5x-r5p1h`z{glk@vQ3U{<He=
zq-Z6rSuPj!h9i`=E1!<e2dP8rSM6!1G3{a)I@wF-j88{G3xw#&Y)<QHY%xdxvC#h?
zy#Ih_)}CQ;Fy>)=kE_o=jmk<W*NIz~2}gIu9LXA#>nJ5M#GyzLGCm`;Ew0goHrZtQ
zNVHb!Ei1zTty`SYiLsHW%tqX{qmqo-=zj7b)St=*)tIxjV~kpf_q$EnFC%_oQ5C3D
zk2|QSiiYBa0b1ok>&!}&8X(<bON>(#4M7{oDpclB#RgGlqTTeBOkuD@vUCHA*g1Mb
zR4ovQt|T@dwf{D8{TizA5Hg(7WLp6<^mt{7ITTk)IB8(mVxAgOZaeI&f#%KvS5FBp
zpw}&RqiBMrjnXKvYC~$WtjDEKUZXy4dHCvGG#kurYew4-QN=N)-bor;4S6Y=wt9hg
z3cn0!m7z<6$xF}$N*RoIMCSk}RNTyBQb;i>5Q2v2(7r*n6L5!wa7-uz+UDTKsHTRn
zAWmyh=4Cm-B%>wMCuRe%It8$c_&-FU6)Ca&gY;ivO2#w9k|6YCNWyY21%DSAAb4z|
zsF+p5r;&p<uCi9sM6b*2N{4Vv#UDw_7h6ws9$(jl+JX0H8Xf0wwI@2sQdCM2+JH{<
zFl)0^$i<{D?Q&WZx)##(F^Cq|M2%71L{fsiU;C<KZ3oJWYn(r<xc^>_Zw)>=RBRw*
zDfXvEs~m^8JO(d^k2Z#=BUou7VwYtXBc`9RcQ~athf<eZ-^i)w3+!MZh?!MM^OW>P
z-Vv+;-NPS`(Yb<Nj-6M`AD&X3%&3mWRF!5vZ)m*~uOd3;EZf$LyX0VJ1M&j$9zImm
znPxP|32jT3Yc@?}BLP7%y^b+P@M3z^QD#5rYkPZrg*E}HQB2xW)kVmx0M1nOs+AsD
zE4}(!vuZ-mF0DIy5;8nUA>%5UUCGZ9!<(4yd82I71*IEZMU)a(P8T9=yC6;yeN3rT
zM0tMxOTWg4KlL+U49-nxsyXd)iEm}yL=>Zq3*`L~Aq1AQ3Crn}(2DU&@KTQ2vJf3-
zCdITeDgUl>S%ejuy{r~u+>!<-A!_WP;PR7Cq4Sa~w-{xm(8vV|^hm?yqe5AW(NOdX
zw1?5g2DWWh`XyCHOn{&H@~`m+zyJSbdr@)Rc>d^*zsY~^#x>edk*#OwOmXhw4$-$f
zx^u*9FTca9-+!C;?>%BRtI3Vw!ulr0ikAKO@Q7(sVTu8x!H`@zUV3SlFaP2tUY;MZ
z{+z}4?(%5-O~L^5nxdL|4j1>SbIV3=o4gHpuV`b%(o7lMJmvCJOJ=@i_(jbR|KA3u
zdu(2g+;}GP(cK+J7cDWAj34$ne$;1qGGp@g65sPI)+_F3$6R~rkmltc(aZ268lPP2
zA1f~5v3pjKPx*XI!`y_XcC{|)USrjXCd48gu-%XA%t`2eMT&aroUJ=4MH=b^A5dP=
zx|XBmgwulwx8J<O`qqdiZe3?%cbj^-z&VF&TAYV=?$~eU?CrhAx4!-ZHg*Qwdin;>
zzVIyD+Z%*dP~$vP1g9jrCPHRQY*s+@G|M@UZr@}7-Xk=g-Y}ymtRNqqCZKa6NKV>O
zOVc%<&G9~nX;fy=Sw@x>*gWIRIOc=9Y+jJ%8CjM~lbU5jBHl-QaA+X2nVe)A@=|oV
zN-5DNwqDQ|E&c_Phb@D(emTHo8Rf7CG14!GoXnQod+#1Ee)5xCzHx(BUVeqeY)M_$
z)KyE}){-y0Y7Wj^cw=WEWkp^y8jiSl<pTHa-sSB#-ysHx*viX{waqm)cGj3r7aSiS
zQO%bU!w~{HPo!X_Xrn?o_|aZP<19~I=rbITQB|N)hFIs~En*Z+Ftpw?TRLV=uX1)<
zR}rG8m*oUovsid0m7`~R7;D&G@39;ol8-cYYZEtBwACqtUXOA(K<8Uz<BD*1PG*Ic
zA5=hR;Cq(uEyl!2pX=M}Xum+!kw$027HR`t2UM#$eD8gp|3!}v4u5*W-WzYTI9UiK
zICzoaIALT3m6%QnlrD&ab^iRHejTS&io){AbdJ>-(VZ`vGv=2unooT41wQi9OZd9N
z&F1)Jiz2eNwMOU$Iw=QtzL?XtF41G-s=``HpW;e?l1z$JnwdYIM;dXy#k*E&o8-07
z8ULjMV+F=4;$!<vsv*+;5fbw(gaFRd)HNnEkA2OV>vM=_!?RVb)@7ltd=28+ewF|%
z9|JkCtd|sJAuhVn(AEu)9vyJ`>Sc!gA$MMXi=xQbed-p`TJ-u3ZgEQPC-PcpVeOD4
zJvvA|5Vb-Xi?%t=wS?qu9D^dL41B;UFO9JY_|!9aAEA}(aC8Ktll1B*>-wo|P<0m3
zUKWm49&LhTBU+)K=~m(Q0;V<iQ%yCkaqA5|JwRzkv`WyX2#SmtGZN<$rDrmg?v20<
zh!|8@K&XgXgDODhs9=cN3GLTgl+}r6?jdIQy27Bq8OhpP8<1~;DKvgr69br?jJ%)2
zl_7p7N0}qky<=>t=|8Q}AB}`s;ogWav$QT5?5K$H8ffu4C-xc!&unn+xX1F*lIgMH
z{K+=i`YD+m;|PRIqkA5uE1F47s0@J~s+Y*2(Gzq5CIeLqm063ikuvgnDI7i~ieGgI
zC+`r(?+~kFlyjJnnnnfDMf9*B?^*n;#ZOOAjX_Zp`s?UH50^<yfQeeNqew-Mgy{i-
z`gOj%tC~sc<a(<lOErd+xR;83<Ah!pI%h$lOa(%sR3^8zDE90j{)_cTB3lFqjmNi3
znq`i!7HFr%@zDi>k9gO}<kk!IB}T#9rJ*3XKe`n)s?%t!oM96sx3j<Lnxc><hAJqQ
zZNU%zV8$nZZ@|vg9w+-#CS!1+PYexN*BmVo3at|p2AfCl2CUFql=sQ}CpAV|bg5S^
zpXcsd_qg$-#_#4dM|-Saxqy$YtPfJ&tg>RhV>J2Ni008g{`8bMEm%A}VSY4aK3x*(
zK;1-qE2cekhCzhRDW5=KcwviTZA7iCG$(n%+8~q0Ceg<faiz<R>7H28p*!6B$AF-2
ztn~<@nq48ABzmh!!;lr-_HpENg`TPv71pE}i`Dm}##J`6jt-?(6LMj!tAu)Yg}-E&
zsuZ0BL1?61#;;1`DHYLJl$D&|&wTOM*}d_!(6pN=^?Zt}TY?MX1g#=#Tjv>!Ht{hg
z=g+0Qu1ceg#%3B@<f4l+>BxC3f`XJu*>S71Hn^os*h6xgRLXGq`c3lwnnVoPOrBE^
zR>WC$qU2%@25VG{IX25_LMx81TB3&}b2ET%c=l5_`Llm|o+o=FCb!>We>~<N|C6us
z#V>pmYei8xo6ULoFJIx6mv3`2o-&?Q^!t5oUcE%{4U2ltY&NA?I>-b4K@X=L8&?Cr
z^rf3T|DxlqlLu_v_zbt6{vNNt|09$R=uCqf^6}qZ<F#)da&C8x^W_fXJFuu6`|m&G
z3%`Du%O6|kEC1kqF5R?jU-f+H-zYhFr{w9EF3}EdQ@ax$-kUR@m$dbOtc43}JH+$n
zdHwj1Y-7P>895s5Q<fLVd=D?=y|iDP#ZY#!AL=pG6b~`BXGFY?FQ^`i9fGq$QYCbP
zR1&9q>}-hFEr!*0xuPKgD|SG4kEoC&%RVwWp7Y+@x7pd<;N0#mS!ihsOLVYYRyZeS
zK(39<r&I2~`vd;@Km9HjuW$0<kG{aoC$Cb*mY&Tidjq_8v~|VF{vJmU?=wD{V9Dr}
z7HurHGtN_95ZLrjx0XW6Bo-}Mq?*mC5c!a@hCI(1u8k;q{SSPP$;%R(cTT?Mv6r;d
zuY-#zL$*7&T*O+7HVTE6sG34#PBBsG(LzrSQ4FM(^%A2rMcJcnT8<AN@#IrCdGnPw
zSyrCfc^dC%U67{6oY6FOl7!fVbSV?L@#Hm7p1=IsH*r3ot-)%=*3OVCSI;w>E_iU~
zKCbZ)BZW4EED(cU-Gj=4!a`{9s^s=wjc%7*JEw5b5Mm(Hp4^U@Rd706()g0o+M$MP
zxXPim<yU^^qg=VM#$a;`llN&Bfz!ho@4WUZfBN@d=0neYo?!2zhB?~S)RPm6?Gi_Z
zS}#}}PFPMC<fEL+yF=!CW3t>(6R6sj%)$@m$DF&ii=HeY1YBqd(Ga~8HbNC>rZxNT
zyu<bL*Ld)~?{ojH`^2DeL_%=j9X<+jN)!`5P?THS{N-zJaX6U}jTR%Fu80a9>xNZc
zrq&u2AybN?fPdw;e}gQ~Sx%3coh*c<vb9E5<~T1-yCDYZx@Iz6;GAe+y9O|%fpJ{v
z0;DeL&NkA%5Yp$p_Y%eBeA>f1@k?B7Af2UUhs*}mX;C}2MqJT-y$?9==oNjcs**L4
z49TL3tCY9Zk<)2fJH}b6D+nDMNuBLosoD0yqtG;Mi^(jCKobn($(+N3L#|x9NHwo`
z`7gf9=Y~CoS8fos#qM0e)$d~J3Z;xttREv9%MjR#&r7Dsj2Qd5w#By|tvp`qRsE^F
z)VD#2Ydq1Vh`a=L{L38FpNz{C5#!SEHj0C($`z{gLQ7Q^<_<S?)NP<GHTkB+_ATTg
z6}$mrn<g#k@4-Q=A+y9{hzje(*rMW`7(J*yN|z7{R=QNJi6)|rphUqW+Cg5TVvP<$
z8}99_V|Nr$Em21^l*<S<qOTUjom&K^=-DxD3HheRp3jM;!Mg?AsBpJ?=vasd8?ng`
z%7Az5ed}$8gAIzsf|xgW*V614WE<;bw#D^o{6L|H9;GT&-Jse6EI0?+J2LW==c$v>
zI~5#{JJxECRW)k*F7>^?gykt(1+-P7n+Q6&h{m+R1-z~ZTN=H+4z(hjjxl)j<_@8#
z2~`cV0vik=DDW*P-LcYyc1t*$=5@sc5iscJSE!`4v0`X(Mxi{TlZXvd7Z(r`RfIHs
zlaGqSWojaWr>$yy<7k!@%6XJ0xnV|Hmyplk33W{fO5#_dCZ@bm-El#6LQNx5Jtj-_
zkearRu}(ZwYWS5lOtE)jc;%nYc;Sm%T)1}1{D(7|rVuCIDxjUig@_Fr;{z%<eCvos
zAym=aioU~`RgLMi%tqI?4X=LX4K}WRguT1_Y+8>Bf}Th3(~D;%ekeMOg<w4<)3{lK
zpG_FQe@ruvIN#u0qzyt@(a7|%GfrrOP^9d@GT7Xvv3B)dm4(K|R$TRw#vY|nN*eCR
zGi9Pugy4mK_G56rD{?Jy0FdH?WOA(xXk}LBMDhhHA%*IUrmLk<Q%Y?5I}@dD8r~He
z%IKArM%W-7BCF1j?nJFpDeJW3U8+^XkTk856-5&Q#b}K$eCe+<T-(9NnzkBK&8MP&
zN!n)>6>FQj^hR4aA836;bdBimti>pUwl=*7-QnLo<!)$j7QfLMGlk$;F6Z)f+7flf
z?xpJtH+C?&O`6>lDWqVztP}Y~L{@p(XSlXO<64Z(@pS`AOIBuD`rdm`QmlXOm%hwb
z|M2hgktZ(j!+Q_;&W~Q_shhjxTP3f4=MCQY!MmI+W;9L9a4_Qe=WkMGEysH&OeQlL
zHKm~<>*ZY7+F~>+xbd+MbMuo8o16Q5bMJ_2&-@lw*5F%z@dLhd`;?D=Y@OL*#c%+P
zZ#Z|$P-IKiFYNLB&unsd5c!SGpX17njBkGDk0|<z<)mO(z>A-~!1QAko4tMNcAeMW
zm=a@+wVpCtf+<<<Ey#4imFq3H9fVdfxpRm;w}+~)pu7x9qE%<+8b+t{P#PBR0?|Zb
z?PSZbh}?p+;*`f5&u(q4_Svo&>n^?0Osf^@+KD61WECreuyliVxrbV7ybH|66<gbe
z>FFtJYa=2aTjsRBm2);7$=-`gFJ5!+of-G<e1rb_xA@SD&v5<f4(-tard>i^qoT%Y
z%V1QBd5}p=qbTdQqtbWBeMhD3Ot~ZzSO#LE@0K;t8w|w2CCfhWJtiwMw3R$rt&QB%
z$;`@VjYnKRjWrl;#Z1Wx!lV`Gyjy#+Kg2APNR1HizD@l>Brghr^JKl8;2q=fgiF`2
zbN$v0j!q{mPsL?c8!hoC-P|dL<c@D862mAy`O-%@ynm0Ezx{n;6Djk8o%8EF`=M+2
zw&IOf?o!v{(yyam&8>98;dDsZno$WVm1jh2Xtd&;am&ML;QCIFESu8=Po}_*T0Z)v
zXSnsT&rtMtn9Qa;ynBx#4Eflno?>vm#^q~ZE}_&q#GKFm`tR_i-~2X5|M>5q*CX!z
zF^gWs=EpvU<`VP6ihR3Ac4f}|`>!%E1AgJRKZF0HZ}R5d6UmiE5p00bI$>*oo}JP<
zPwQJ+1~M#5k|WbP#t-&6_`wf(>s#Mvb~*uNh*siBa1A~=t;;GgWTQ2XE5{q}+!38~
z^fK#9QX^$p#f>SYlM!BG+UUTiKKn6lKKCJ9v!t2MiH*Yr&tPK%*EF=QMH^v(Rf~rC
z(&3X#BgRf{6Xh3$PF}RSrB)JlVWz1R@f2}&gZC{;m7I-yRyeHe`$&ViLTS4*dbQ=q
z`R$z`sCiLP)fKL3&}Es<U8()f>b-d8&!CctF=svxsU$pWA%%8@a}AlzXsViiCbFj(
z6^lj9!Qm0-&uy`}wa$-z_$J@}>bLmxFZU?VUnJ%|;@URhcn{+}Q77kmrKBcVP5XSJ
zU0Y%@$1~RB+ZGye*>)I-u~I;i7zRX!5!3L@|2XpU{%gK|DjU>d%fd-5dMK^Lbf$<f
zY*D2`8z(tLO9eJyw+*8wa#X3oTF@EMIdrUm3Rx~9hL64_ni<+UV$6w*fhkJofI$U0
zxP_)6B4|=(nlge=VQTbthH}S*r9#^byKT?|1N9PhB#vxx5K*Hwf-2FoJJhewX^##0
zK-0b#sNP<R@cJl&)}XDp7YAg@siP+I5j}R)2dB8)&>K3=@1A1T9E2gJuP6sKZZt#}
z4ch0Zm_fAI5QuF}Y%?*K(hgLOQXw589&K_+h#;m#E#ITM`*l=XW3nPq^HLEK5<NGC
zP<w9?m46_zB$he)$|lqi|KJs}-OI#t7iirCKT**1#O^x<vWHXzLnfG!mua6eA%UzR
zx!=a54e1IeFp<>EcSC=Z4oMjxq!?334I)V=(pk9^BOxeU5bcPoy@WESFzYr%F^TdH
z+8UZ_rN@c@T8kNrO7C@QPF06qL^0n`t6Z=2Tw{l)rdM4Hf~cM4-PZ$yotDc_?XbOn
zhllt3;*J{v+IqCNghpfAfUhF9i0Hzhtisp~ZH;W;%A6S<xQL%rTwPcE@XuZ*f3e5t
zs%3dPp|?I1`mND&r~x`^V(Xw;(o{9oyrSNpP|cyPg}M@zH2NNmQ6W)r13qMg&=PfE
z?cz0Dp3&ry%;u;d(^(mYhSlrPjY}nRrP~2iGVhWDO#c`*_U(<nFd&q?PMzi_r1#2q
z8f+KD#0w)dU2(BM*_3grB37piQ>&7pQ6?147*?hegow2|HMI%VNo)aW$i&eybUAq5
zi&ofqLKG|ex(T%2bK}{M^XXsu4Wc%LSW_>iRI?dv-OBxltgY>xOY}w?xR@GXA2B8q
z8eh_fb{zYTx_g$z*BNA_>3QrZrJluXLS4;MH*Q(q-eKqBMRb;}sA{rbNMQN0S|r3s
zsJ&a~FOc_!OlMPRWP@ZkDy`+c)@M}I5N3Sr)6em>KluZmzxf1rA3mZ|f&bzUzsBY_
z{*uXfPG(Amy)|C^%rle&!<*m#0edH7aD8&y@JoL?a_Z;Yel?K2rwPrN^TjS@`-t)B
zE?0MdgD2J}eD96F!=L=4Lwfl-C*`vY&)1yWIpw$t9Ghdb8sM8L<Kw$Ld*id1YQ*Aj
z#@^nX^H)ZM$u9M5$RGTjhupff%i4=e-hTT5i)w(bhE$U!UR#>n!1j>Cg9kiwae}!6
z8~w<}Fh?<gaf4cjvwZC;DOdrMTW&sgi>IIe5Goq(zI%te@7(41;Dp6&L0z>;b0%)g
zD&;AL5IRg=-q*(}RrPqq`Pj8`u0HEn6V<NUb01>~B)p_68oiIa|K5GBT-(KH!)Sd#
zy{uQ+q%jEc9h2`sj0)!>$|;u7^4)K~&3o@W;<G>d63q4}@`gc~(<~Q>p5a$}xav^l
zm>PrRU7!*oq>~OWBttW_48N4sn7p7j8cHU#(tOaD)&`xLH|3N3BXor~DZ)Zwtf9;c
zF~n1#O+qufA<-&MNS?cx`)M&241qRCqoRVyPv)})<HHlIisD+VHBQOt*fBb!Q8%&=
z7-{%F@##;p_ugB)^`kc!W(B6Oy!gpyxp;ZR%isC|N8?6j{86p^9z^0G$f?x`W6COA
zQ{$s2GX{?*D-6zO%oc$=4;-`Lu%#mR3!ZuD0#AM9L(Ho^dRtrU^fN9!afSEany^2c
zv#wu<VvW4)k(WKR84!rH^C{VU$!zQhv=kpYM|SQxf<Dji>VOzM#>}~X<qA}hW;*5<
zi=6lVy?@A}k@MeXC9_Zwdl{xzP>*YT6LH$nv`#!4tRnM{$^MK7Z~X~J@9z_}#%4(e
z>3qsJ7j3CF8f`7Tix>E_Kl=+>BGwo|Gm)XC)F<gY=?0rhi@%Ci814DRFMkQ8J=J7N
zeL5rfK-n+J^Ni(uLF+u_Ajj2?$zsWT+2E5aWK2x0j;YfPJ*5|vy*+eGyOTtr@U5e5
zn$*~K)?w<e=-lI336<{SPAVX^S|AOTeW0x?8RTm*7i#JTlVvh2;jg;)rXTWHdQDd)
zrTwQv9MX_i7?K(v8m!H!s*0lMNgYj~X<BB}IkV}6!TN~OSl)Q~J=WGY`S8!?nDuSU
z<~jUwhMrH-{-9C8$sW<o_7ZC)D%oq~J+mln(YD1`HP8}LKB`U#w(r0br4m!+UusZ)
zBCg&Go7CTWgl>fXrnSc8UNk}Hg>K@OEi{&1mJ!com`zQ{K&f0DRO5me6xvi`-V+>=
z3TOp2vZqAdpqPPgBqk+7h>`<Hkf;%@ytuU*O|+H}TcQc*QHc)6cvql}!Q>UO34}#N
zElVl%i-00Wxg*-QPf*7N*}A6u_&V{(a`InJ(ajodFoNc!$O0XeY&21c6io-B0k0xi
zK4b9l6S9jr;ci3^2iWa~xVA*An&{Vv^aMQ)N*ZeCQ0;OR94ynQfYOmjn>hFZ<qvUt
zFJnT4Y+dw;)}ZKgv2wbqD2Jr-2o1(b)2d=XyPA+$Y(6B8PYBBiW^@jHp%43W{33wv
zOB0?<0z#*Aih^FnL~9Eq?LpMibf(QKrUp=>Q{j>5p{d{&nshSxh+^n+MkDrkR7$#0
zgtoyo4P==dG)iJoVn~g*bGW)rl)BV`>KvmLMP8(Aued7eSH>jC%*l5X#g(I8BFm-2
z!XxgPK}S?Dy!z)0cCHUOe`|;F>4Ii#aka(-nNkL=#8@Nugt|o+S{hW7uMRos?IX@P
z+N!0Uw1nx5Yt45#c=J<S$Y;38DPi{_!`)pNNFx{k8zXU1;b#kcRnr_TsHPRJ33#7L
zUTFlBTNQ*lgk&xj@v&j!$_0!nXiX&VXN1Og;TEXS9ZX%PpPKg7@F65bJq;@&D{AQn
zd41;d&l8+l>0jEaVK$pFTP|s8N87Zxs8%_WU?eV}bBk9USx??Ulp-^Vjm<UI*Vi$|
z&@>g3@t9@R5{<$bov2?z{r2ADT2E6oG_Auqp=!CNAvChSS}#Trt&W_(c#Yrqy}!lg
zg{vvou*S`gr8#viD1%E)#Q6(1=nXd{LMaA1Gbf$pUEVpXr*Xy$mB+TCh$bp>kjBCX
z&tf*^Wd9-22Tb0l9FDm1<kN!cb=j}!P!`R(r(Vo3MuD-|!HD(kbAV<z+Q55xubuZ~
zd6t?f1x6fAm9g}Cmd}3b)4cVgSNQqQe1bpu^Ka35!}K^Z=x=cA*&AGWVux2>evRXU
zhZF{46FI6+c=979-}ug(y!i8mPyBVwoj*Jz{OBCt`18NV?|t@f&{l!V{hYV<exE=4
z_BR==?@-MwcW>Y2<Ci|hhp+z*-#GfaTv?m*vFo4Z&hlH-`ULA8e%f;7$_r%GSJ>DY
z@cyx5F<J7F8#j6BnJsR=`7Q3>Q}nia<i-;Gf((Odt}^vd^yloIETM`F>XL1BnY;VP
zxb6EC?K#w}M{sFxj?&;C-#=h{{v7Ado#Wb-i<qF%!E(?Zfv?c9#UZGLM9oAQf<+nj
z$^9Hxj^AgHNY{|2eQss&_89%0vztbx##MJN)M+B9K+Y}?4;~&e8Bf{RT4Q5p6IKRH
zO7`C1S5&^vtjRlv(HZke<j?=?hunPX64x(pv-jv-w#z&f`l{PFA;~(TXC<Fd;_3u3
ztfDej#w9_4J!34$UfDi|4SjHzB1lve&_<UY;~6zlMcEUqvE*eTkH>i#)FkKds0;)z
z=bTEJ;c1Z8v4pJBLJ3dnxTz|R4h~pUOSJJk`=MtTPo}){_U#p`Os!ZG8l^Q)Jb9Dx
z(IIcX@){Snx7oco;`vWM$z<=4?|<w2R11X;l4Y&5B8w4k^eQqwMq!!BFfDNH#tzq?
zzRB^ygg3wQ29vsGloym+8JRL<HMFkfq-mLq4Y%K2P>oO6+8r`nFIi4r=kk?v?BAY~
z_eS{5Cvd7yzZd{Dp*_Sd@3VGkje>~tHJg_=0GvQ$zrmKoXz}w3r7D80h<TrwW#o3i
z#@2#!=XTk@^HAhXKG4r#K3%dlT4Q`Xr41%!fp+_2#M#K{@hJ~)-=__MOk468Fl2;Q
z&TrifIgDnwyUYFKQw}CmL6M1=q^rc})q2yqOEu7PKU+cXKK;3uxcc->+QpJ)I-{u_
zsKCzVIoi5rI-U{&*xb@Kj_G(t-8OiF7$>Ey`R=R{qqXD;y~-j@Or<0x@FCLF6~1l6
zC<<0h;_3u`Oe@f#nsG(NJv*m67Kis9lUYy-YjWDUl6zg-uEUg^__X{UZ{fnqyeqEG
zpvR6WX_mFZHICfM(57u$G9|tzF*>HxDW~I8u3ou9S@zg_c*HB;eT~s(k0(F*Nyz%B
zjcr0b$29GVK1}Bm5~K%F>G?eVK(!Ipno0nJPc<+>5uBrK9lCC)m#r|<{$&p8PsXKQ
zcmn(081*n^6=nvNN6dys?I;-Ln8sq3ns)Bd16kAo<PcU46_(PRs3pXnIQ9i`vW-3v
znWNM(a70v+l?sg<0>Pyjb1Q|i5-JteqCyQtLU&pZXrX?FNN6mo>7#WI7@=lU!W)Nz
zjy%lBhdts}1;a6ZI>MA1*F@U3#c7i!Ye}1+!)jxRQJjQhi1>DnGGoHcON7#4#}U2h
zQ2hxyYJ3v_C)5#R<qk3=I<kgnQB(xu<nSZK*{CII{vL690D1$}8xk|2EuwRYcZn{c
zk>YSGS+Nf7Be8POw0Q4Op%GmMId<XU_&vN`$F@7*4L)XsHu>ONjkX!YNE<Y$TxiH%
zVS*sp(!c1{MivtVs+;11I1Z`^#z&k_oPY1T#B+$|3>k_Z6*bOBd{c|&)Hf&s&IMv<
z1pV?JUoDgN$6zfG#gHg3bF$1LUAo-29d$|`jpTU7Akz34W1{<tA%P0XK{f;;Vh9wJ
zrm1`U^S}F$U;J-f;qns?n7w>T)2!j0#aNHB2IT?ovC5$93}wKoCPjtl)!`6gL_3EK
zp4?{ie8cg*@AIH{lXDw$)YL;|C@-&}*K#q>nk}f`e}tbkEKip-Gntx)F0h(Nvi?o3
zn;{jOP%r1KZEaw)f&~rb#yV{)E`utBq!*5>0CMQ?oN#u$bq#)c4mx(mSRd^5=>8)X
zb&IPVb=^`eYZg^a(>i>c^2<DFkl<Hb@)+XELO({A21(*B9gv|92ombO$2DFecmkrs
zPDWud#Ip#Z^ju>QnpM=0dHKA{G(~^NU;o?xI-mIL&!81Ft^&YSr!1yZoJ+<S36;8d
z`4(k=4VT=XyL>%kO`;ns)Z@iOq7kpypj}f!Dos-3Fg`lq<lvzgb6IhkzJBvrOqM;S
zN#x8S8<jLol^~_Yuyf%uz5b98JeylPoE+_w*_?X0q%4ar!dQ$#k^z_tp3i>a^L*zo
zzRM?{f05})%U`~Hn~n1q`1ngVSuBqEi?4kh6G63(#j>SwhJL^1#z*_q?n#dBK7hR8
znV-*jXVx;@xWyz_>}(D>seYetyz%GcJqS%pl}-8b&-^-{d+EQ4Z|~4A98bOUDPI5n
zKPIXMvzgJWuG8NvIIRDi=Wg~{%;4$gp5yxUEl#Iza%cQ!-1aaHIhisH);#Bz83(5g
zRrU_^A(+gus3Mi_apA_#GTysKt1=$-4>@<aCVTxdSuF6u6N9F$8*ab8;PxBu3tcR9
z*~2MwH;B_|2y)MLQ4_21+z=moZzbM~70cvIl(veONCPjG7N+V#gICUB-Cxxi#Wub3
zAw=57Gw~Hw<2g7SQ}nWg5}LC;YQ>yLbmACLF5oK%y^Q;JC!8KF_|S9DbG$ggwufvD
z`_Rszji?w>ey<_Xo>q|;vd$8U?2|E#iRd7@eZ4d+^9&UWY*v2YdrYr4Aj>tg*$f{f
zF2z2En8mavZ6=6mO#7nLElD<zoaChw>;y4kg*rW+@g35iTNgMyJYca{(Chb!zTv|k
zeSxNOymR|D-La{ZCThiC)Z_9KSNKQ&pZ}OE+v{v?<-GLs&-4B(uk+~91d}8W)@VX9
zy^)$n9O|_Zqqnt<oS*%bFR(Vuc<Lj!ICuFHvCY|k=Qej<dzbh3$9(6Xe-pQ87_6^T
zw>6A<3{@X9w^aM^AOHLRB^R#laQ$jQo@HDZT23aLJX)OKt7G!&yX0}kg@I#x(_>#e
zN9!&5+9s{qV)oXLP<k8P+rqbF7K>Ac7thh(+@cvDviI5>%x9IX4?IC>*0Y?u4<2#r
zL(j2y=MjwxIM?EX5~`+G1PurKCoC&RhyiB<!6>pscely>QD+uA9I^hylYHe5{{&@o
zi~@yYwaq5vZzc2S?gecyO3~}V7ryw5SOv?vrkOP;A~w_H{esD4%E{pbZ7svi4Q9)l
zlgXUc`&Hht>iqvY^FzAYgcTCnX+2|V07LL%ewShhW1`9n9+2+Kv_G6>lqYauMTSb*
z*+I0YF$Rpz5g*Nr<zhzBHt56_c>J7^H5*pZO5Gai*gZ!7R~u!e)9;FoEVHz23q}dE
zOT%nlGoCHjt!uW=Z*lML9@EnquYB)yhMSx0KK&u+4Tx(y#FKrL6KS1_$sjciYdar|
zh=3j>iGvOhjUePY$j3Xk<Gg9es~n}4c()+hf7zh^L|n?OVa^Q+2d9EuWf2O8Dg$aq
z6V^4pvBb&}A52koOFPqGYcLB`uF-9c4;qtsP&rWzfquH#)=_>)G-G0{0fUN?H7dxd
zL6d_>;1jjUqBGI0sa`?i<w;Jes|YTFw-V~xMErD)pL=2}6g@(LR}MD~#Qg=~L_<4}
zBB4kmE**%`aCU--e4w{BV6ES0c{m~FeUusyV*}T=h~o_3&+*#x|6}jZqjgK~yFT#q
z414c)I`5g!9qLY1w?<W!sx(@%B+mvLgALeg#dI(U1bRUT33Ss5-N{-Boh%x<7b_3~
zgdP|Yk`NmVjlss2Wg{6)C6!7l)o|<9o#!*3^G<tsrvBr3_WPc4{>dNG@>){ATB_mP
zQ|G+H-p_CNPC+gq^dU+u<MUqB?kWq~BUB87j(c5;kR?L&@HA0F61H)>SECL#%hG!h
zwJ8!0K_PU9AT@!Md+06Ls4Mb?Lsb?UPpB$_bVw6uTtVZOsl-Mc3a!UClZX%rPf(H|
z3o@Y)N)xn(x|+{@QYxH`F+eR*EvP)XzImKfsbHtYwDYK>gN%XhiV%_>&?O`(L_;Rw
ze6;n4sErLF$_;EhT&*==ebN~^L>q4-CEdXwH4!dhk0DXb+KqCgY8&5DyL@tXZ>eVj
z7aVOMwI^Uh141yZmiY2N+vcM`d5Y5q?{NFwCc3dUAiX1am0pV>G!0T|oRmqwve;yX
z_Q}|oWhaDTqp&=B?Heq;|1!Eh!jy)k@jAt77hjdQ$rL*(X^u<E@eE_)`yPFlMS5{@
z86c#<A!A5*Ii=g{(aF11M<x2)8s38;I^wmlMR9mZbeACFPoC@J7Bruh*%O5Lwqz9k
z;y?L3jg7`3*15Sgz6>cVFl|DEkLbK_pJRfPM9Y;+DAPix6W{rHW8W6xG@Fxj$F#Yr
zqy|6562wep8Kb5GoIC#vf9n7Ie`IxImEaxTS+ufDkB+FSIGj2kP*UT8Gv}V6(_2dJ
ziLO<%({eoL)Z(yc4dz{fjLZU=UQheqIBPlFdBD-mHbO#C^eG$9xr>)kc^<znp;PU!
z(l!-MSw^>DDbPj1^6ENLDWnn%SB4xNACLvfY&vCieVrg%<1>+5S)r~<9=r4yr&c#8
z@{Hk{=f(GLu)4Cwof~g5o*a`0m`p~v#^Zv(7E?a?vzBK+xJNzBnH?-MJDf7{hg|yp
z({ze8HrG#c`^Nvom%sLDCRX5=B~9;uPki_X`P9dL8e77fciv#-?329y+MBFzt?}^w
z4XWxCmpd=B>p#u4-G9w9k3B)=6l?j2rn<(1y@&J%JqR_cn*)l@I@j*rCASkkeEvD^
z*b${4LkQH3ptHJ6d9+V8%`jt2KAEz&-Y~qV7(6_Mts)KDo5*ezLMJM3{20BBE|MY9
z!~J_yV?HfUA~R*Kg>NakZ3CO4z~XfpnLP8ZGil@6YgU|?MAIc65uvmk6S8&V7I+t^
zD~nWu+L*Y64T;981UgH(x2fiNV)T*^0#h5bR+I|fe&-geD=S>S{6TKreuK^7fYnZo
zn;s#Yis!r(X~u0AyC4mCXK^iJj3ii3(=;@*n(5IogJGX;Z}2TYV=iC5$lbelus$MW
zS*BB+Z|@P0k_w$=X+RBl;<8Ud%rBpxhdvp~v~=qDYJ6Hm`qap~NCRVhJi#^w5x_dj
z%{SlT$9~W6<G=gce=Ds|Q*;o5OV7T@SHJWnR+b9Z*Oz$i`7_*m>rIXiN~Q-5^{kH9
zS@H-_h!}(81e(Zx5V_=!{JB5MnXL{7cdygSEcL+^94qwK1W$k90xu1RoITZLcXyw)
zv*$RyHeh9I6VuGN^7`B48ykG|eHS@<;WRhC@T*kpl5L)*Yql8AZZg|Hz}~4@-H@Dm
zd4OJ9!fA~QiZXL#7v6`vafMr7{0bYZPcpGJlj(@&)hj%<xystv^Bh(sJ9~$CCviex
zjAv=(EO+027xCiD5%IP)Huz|Olc=hMK-G+6%?6aS)K-93<hhDVTcHTL!}*s#%&Tv|
z%l7UlI@~AuLOU?Fw_SV93lSpaJV>PQeDq@<;rzwNX=XE;$qa8j##oj&*KoFBc0A&E
zf1ly0Ef9*)Xu@P#M|o0W{CFRIG(;K%+bq;Y<P-~!g;c|wwV0+p(SM3XSq>+L!;|DE
zS{iJa(^=c|+n9(*3auanvMgibW5%j3a<pvCk``wPvaPu%V*8SNruU)sBbq-4p%gB7
zoR6kXO=Hm^;8WyY(^!u7j+vdFvAVWIt`w#9JiN2R8(({q-qH$7XU-6YL)>hPa<d3Z
z#RGLdg!_={bdto(m*{N_n2)_`kO}Ga7K*rV5T#kT+<k{VzAFb*B{yMZ9iNY5HlIpc
z`Nkt=a!#pKCLltApGhnp9V&bo5Sfb&bw?ovGE!dU61W@}GLX+8-D3nspe89-E=4K`
zg$OdzKVnj1Z2Hq3DYF217ePhv2I-;`toH`#G?d_XE8LOA)dp{yd1D_<#0vyVs3s?f
zNK4LKbhe8(x12ZVQhLZ0-Ny%nl>x<ChCbc~-$7Jk!s<(ejpy-xjPS1$)DfaTM$J6b
z2IL?O?HMSAR~n&e6b;BaZkvP0OO2X6#2g$T)LPVCVhKKvXq^xdLG}TWbr78lvK)cJ
zH#4L)Kt(`BnvYb_1dJ2dQ!gOSeuTWU5|_ZUit_pu%(XY+cm}mWNr^?n$})5XppbPF
zkpLtC73tMV;gyOS*r4M3DI*o7r4)MS(ts!tE)aqN(izv92E^)&w;oIrH3x`!;5F9A
z<%4yAL((8cmP{W6gXIC4in`g>EjY9qtq?Q3oHYog7ARTLBZ|1{hNO#X&AU8-#wQw(
z6+E~ndGp_lc;CmL;Bfl}dj}mPJ#Y%?EmBAv8m|IUrm$!sV&qH+C{#q5y+zqbYxfQ?
zhRr3xgEww)=F&2~@exORd&n$9N{{q$R(Hl>ZKVCoQ5X`vS0<wk56%jjX-&V=quU=+
z&nj5%qdEnRZ4l1kL%e?^BB2B<s5Eofbg|K8UYZi>^KGEU?Cq9wN$^||+|rHOZ%P@q
zfsh<{)rqY!^G&G`i=k>xO>SGrxyj7Dm7i;(TfUMAtrbzg3r%o>$_f6^zxyZn-GA`+
zK;~&o6%9pnV7h<IY&Id9a7mODSm8N){&|W{Ke;<5dT#5qN_dafbRj-uQ8$xzyZvGa
z(9|XS+Ygu??g5Z@24wkwzRFozTbm;Qv2e%Nbh43YsuF@nsf_+`HPYf$YXqWLT3TW{
znNXFp_@79bVyKwE#~WK))V1T_=po;DaGn0s75VK)+<J5y?|Kj{RULCQWnLgmNpWtM
zjrR*~-h~gp^caJuR&dP{5BDeN&K`roGL!n(c;)(UFjy93%?f7O^W<Y6;rIW*pJ3;a
zVP$2=JGZ~c^41Ekz4CzH_cOm6)ie}NGF(;MJL+@iRz_8A^MUuB#`kvEzE-of`YAs5
z+Sk}U$T)H%`l~nj(AN8z9(|p!KX@Bk3~^S_nFNYaPPUx!@aBD{J0-rbWSbe4A9J@e
z<;;1_=HW#OGoUUTf&pi2bRTyinhZ%GEn0k{>iH9l1s_28g-g0m&8BMeliTZ9wliqd
z7|PUGw<*$)5ZDvKRVi8xT5K$3G9(Pibvh)<u(k2`h@^QC)*>Vr6Hu9tPSRP7cJLCM
z6gUxRY=fKGnCqygly6<-Q=j@irhA86yZ$EU1}o%p6p>V+(m~%+217E7Np#BOT8#`y
znMH4lAh2c0@pSwxKVz<5d56XZ@~j{zh14pkiCdez*kHHI=(!zC2n3fz7m1k=d`Q|;
zpBi}P`aSQWT);aQ4SPbsREF8qAcJIWbsdDox??`_(uer+*T0I;so@7!*VlQwX*hrR
zDNe2TnNBPAc8{4&z*q<(OL}~pVlzZyR3&}05?Jf_7ys}V_=%tT0s3C>wcq#*n`h6_
zT^|!dPI-Ty`*$Ak<jc=-=E6F+u0P=6WJIsOL}@ISU;G%eslW)qmtJ{=OG}#^?(VWv
zZZN(26=b)euez+B>a()E$I5Dfb2CJLh#SN3%rlsBMt@_S?c))zeEAEkFRgR-{8{!M
zJ!EuqgkIg@^Iv=&Cnd%QRH4XqkM*q~ufH;-t}8^Y2@@9`{6nJ2i@2l=HY93w`m9{Q
zVX;=Aa)lGH`q&x1dHn{jeC4&+x5zN3s>VHDx_6Q}o>T#aCaM}d-AwV3k9{Kg8kGh+
zX=1D$Q1trLWy#*o0aaabZfyzY9s7qPDr2L6K|<5hZLrXXF1C=oXHaTss{xv(!8)@z
zd&R=k?>)7m)w9kQwqah!Lz)~#2m}|?7Nm^TO-hLpGD=u%2cMlV;A`bPv}olC)Hk##
zGp+Ps@pM#rSFLsAd4}~4?Oi<Agv2?|=y=9>I%BZWXRtD0yuFVpGj6_phvDi9FAaKR
z-7aEz1>e+2Q?<n`)^Ty7S7}`k+bEw_klQYuM3~3tL`|>9RCT=9#CzV!?}*2D<$x+`
ziyPO!?LJb7QNZ6+QA5&Hc%=wFqE@m2KPvH=L-ZwL*dfzN<03i)mOu_cb&>KsLYzrk
z=p6|5gvGU@UA+b$O(%rVAUr{42(=W0TKx<{z<H2P5qyL90X4I@ktNg)VFbd-WNu&~
zWROMYOs^!eP-q_rR)UQtK%Qu-q7(!R(mBd^A0o3JIU{<fMo3u__6%})A1^J)Qv}gL
z@(|w>h~WrPFN0bq&_Rfrpw^<6RtyQk5X1~YgA)=VM!3UkF}@@;(J9wCq=*Qzml;AU
zf{Jk~!ZwJ)#A0<)<EBz*ao)oUaBD9hPJbMWPINAZ>~|Qx@Dl3%FXN7;?7s39rdM8v
z@tCeLbXHgKd52&u-WyyHkxnFIGbLq)7b+qG$qXn0pAr)UVDUjjgwnMGql`mft5FIm
z<lYB-T}5LS6OnRfliRbak<}PcO~{0$=q*$CRv2z<kY)LN=;XxVB|^Xkk!UJDnPEgd
zRNQidLPiQr(nk5<qh5qaLz6O#=b%?yd!uA^v*6iJZt=xmx{Il^xFC@VuvlTyA-V5b
zh14=?fm7Ic@E+k!(w|wd@%c5T;@tC(@%oqUaPIVwQ_CHEH$!HL%I#WZrA)}7j@LI9
zT0(Ae@wLX#?G<#2E_OD<OUNEuqZWqX9oXoK9HW2Q`WErnxe?1;Q?mGLy6K~4`rB}@
zJ2!VKIe|&UhY#)0wLqh-2PxcquoJC=VLJe}3kuS=_&9{OOvr_e%tA-g{tO|bj>Uuw
zhe8ep{Eff<*BG2x#k-oL?S1-71r&<O@gZeBi`l5oqcV*RaQf`C6rFx-RMWM1^0h4*
zllE)ry>92#^t`-phwnf+8MD9h2s51!2<W_Eb?Y><S<O<vAHT0fg4;Nc)L_TBAwzA-
zNC8xWwXM@goshg_gtWA}%y>M)R8?d%B!ifiDpplB(^24`|NVc48*Wp)w8wsVh1t`N
zvFg&ixz2zUI$D9!gpe~46JGv(TYU5D4>-3vWX2xj!(H|d1BVkheSVLLzsltPj2E9(
z92_~WT|MH7>}7uDhyE01(x>08_{NpbfIQ&n(H`4JQ~uuH{5Afw|Kex(#@qjdDZk32
zdsFUTv%LLAgL5M;p6+t`>}huIJ;K=mu9+e0f>Bv<t$LfKUdHOufV~E^w>TlWzvJo7
za+ZdIC(dmk&rEpd%6;54AUd9-&K~{#fbOV=lF4;hcAyCJ_b?7L9_ynSmvvDGXpD*f
zw+7op!x!hHTcr;n=0VEELNw~Fh|cw?!<9i06mxU0Rzpf?Bf?t1Ma=2G^Lt?KSRSu6
z?_=ICKEw>>An?H^J+{CaN73z&b#yxKB_XI7Rbd<^G!zW^^uPKoe(LxBB+m4B?Q_4$
z%TF(%!*s5xlr8d+D2DUrDx<zrC1-7+lcTT5k*fPGKVyQ<(8SDasnuLpDn(*R1&vZV
zEfrhJb7Euo7|W)R5w-K_JP)MkkVNx!@y|%ZX>idwwXP~caCDdZXf1gE$3Dn#=?u~e
zKL5&>Q7TK^AIZ&kuJg>Z?<3STPdxiLuYLYEFteQLY)0uk#)p_?3j|a=OWNr{OSH@g
z;~8K0+D*E{O_nlC;XK7aaOeI5?!9xs>afcvfBb!9gB9L+^DVYEHK#AVM4*ofh8tHN
z^56cq|A0^at{<YilyUp;kjrb!)a5($yE8&Rqw5qhvkW&*qX#QEUt()Z_u}_6n{{zg
zk|X%kPya9v|MK7C^*6VvrVV9%h{W;W`b~C5CF902vkl$|e*e$>C_n#?{wbMI+<CZ9
zFS$8Kx9OP2>VlgaI3{PW*e3ecCnM@UJFDy5IU4chH{QXe-o%M^?U1OS>AYR!Y^Mc~
zfGn4M?4uvy(&JC!stIm3qpTZ(b@Y~dAOy$96Glf<RJY5@+A;@wV-Am}aVaH;DFrSf
zs8Wh#94Qu#yRA84yNHTr9E9L$>MCAm>Haz4fp&u26c!rRR@O1kM`ckHL%=$Vk|Mc|
zr}(<Opl&LP#-Nl=X%La+)2^k1Xf^W8Jv@BcuFa)gEyZ2N{9f-vikpe74Vgfd5TL9q
z`#Xo6+T3E}^g0i>55WcMQNy)2-etHt<jEJ_hs+CD8p5GPT8ER1&#dK0wY6?uHCq!*
zNU=0+-Iuu9Fd5INN=uA%{{IiC-w}_=m%j#g_sDEySOgc4jRdRUs5Lmqa7z$+@qiE$
zg$tT60F^70?n4%1X%LFwmVkAFTtUc95JLnbf-hpx4Klfl8VC;MQd8*@xltz9S=GUd
zHv3THg^hOqwLpXh8HfpX$(>b%<Tx8VLAZF+WJD`!1=7eEkS(I4ERK}N3aCJ+9bw|I
zeoV32pbCqqYQ%#YuU|!6cn9Q})P5cSS)@WsK@c@T_6WkGWQ;};QV@bB-f|5=7^HiE
z*}jhmJ%VkJp&=e#J*29DCI(h(iAb)dzUm~ym#{z&eL#vBdLHsE<k^o=Ih76(h1MGH
zAmFgUA(wlc`CUH<Klp<jzkQWQpZzu5?h(D^0p3|Gktz{gl`{g8piFG8kjW9z<C7Ut
zAde`jwbW986LAwAblR}TgEui@2wtFC)GkCSqjnC;5pKMTd3c-7bOcJXBL_VB(?5gA
zyD?)j#nr^Z(7NWu!`6qW%M?;a+NcnCZy@-%m1Qxs#7ZE+d$drA5~CqloRNIvUmx&&
zKfB3uAKT>3-<;xo57(Ha3r+|@qP+wYbM91{kfaZ&<gn{~967y`oO$61jvwxD@u^jQ
z^{clzv%JjrT<oI;z33h(H9-oz5O{BKEk!#1)HHO)jdGz2R4$?}(glj{63#jtO+;ho
z2yBWn@(cU+xook`2|7VRnlESJ1ReET_y{Ko>^$10t+4Z!I?`5^JW-%Tipdf0701qL
z%56bAA<~<F;wMo{u~^6oFka$9&gR)C_`m+u{}SERF-FK+W|I+}{yMd-sj69QPKnea
zhv4*?%M?Wq>uf@R7n|*c2~zvg+NHq!)y#+MR)-j@;b?!4$<bcSeDeknY@B|KqSK|S
zJ%hoJHomKMffViVE5M7mcyWzE2#L-!`u*X2s39$AI^7}}pMhx_y8SGg;1CD+(eW{_
z{nl&TJAQ-g+z$6<4>*_^%0h6wIz>LF*sNAiM-kNt&a%3aartu2n_qW~Zr2=(8INrR
z&Yj8`ZhFe;l)Y_1Iq70;!^B#)o|Jt2d!C?~&CqH{&pTef{)?=xKH%ZqjP(o4boxVn
z?O%S4ANt{s@Qt^x@uk<cxq7E!xTg7~e{qi=`H}Nny?Tq6-hY;#_=)FmlViT}+7<2`
zTZF1<`m6Y<=f$(@eC^#E2wAXnRxlWjcw7%D?H;S06>bNZ9mn_|*MZW{D92-HGQ4mI
zp(7#~5}lP0ql09MK#(e;6-q_NW);y6+jyGBQa6^#Y(`nmsBImuv!qiNt+TU;3saH-
zj>AQFOfAUsJk}B0A~r3<q6HUPdSEht3819L1v9@zy!Ww*baBX%I(}bO)u3{ES(m7d
z6$oi5#W9<YUEuQ5ALVoZ>L2oJpZ#@SdgghadH;v`txx|vKlr^*5TuEcix8rg%zA`h
zpjLtw&GB<RZH%O8^|jyH$LTX$?Cu_58;cGKsp4?lVrf!~)TFmJX{!T>XNyRbUT1A&
z2D!K(46XAxq$m-G_a1ASc)vCcgg~#?MN7e%3uo9odx3oA6zfMLI(bgWB({!A+pBL~
z<>&tRzr*#nUZua%LkXBPmdXf9>uHj^y-%iuGProH5zYCmlsK*FW=nkWH(#Xy-uKK!
zg1kegFZkgf|1g8q4nE7dcl(GN*Gqo?&;A5TYrgzzpXc(k&r*+$I6B(qAN<{a$mVL!
z^QRx<`9JidoLXJx;f=TGl!~Ie#rcoE%;5ZsOvbkuoIXXUCg|cUx@d5R?=rsnWsdeH
zeD5bN&{aFU{qCF82*!<JG;63T!>qChne%i1{vYLk`FsB(F4Hu%;|p)T$&WnqG&V#n
zrwsv%jL|-AV5f_Vjex)l7we$}vJQ38=PPet#j7kX4o;Ma1-&;#2qj9qNDCF2Y1Y?Q
zdGW>fp{1jmHdK=s>1J)i#_B1YvmEY@scXx|=?(H+vv+jBWLm}A<o*19^1-209xVqG
z{Wi=qL{lB|?NSkf^O&Yix!Xx2+lHn7_VOD}l<fB3=M>;rL*r7(Ahkv+IgeY*vYhF3
zgtZ3kvlOp$vWUL7q|W~u7QCtxfNhc*X>48Nlt*igvo4u{k<9%h2ZtkOvl*+KD`c6P
zOP7xKj(PhVZ?n9z%&Bu{V6cp<YV>rRGJ>PL!OOs$&;SdRw~e5iKc6_TI}@0VD#}TP
zvvFoIZI1DG(Br#uKy`25L>NJ>WAut@B4rH;S}26EgpS5(NAbdGw9cp>P7qT=wxrO@
z3M*5g!X~O)4;z*dY86Bu$UyW6p-8=)MEQu?I<GLvs6YjWOo6*nK~NsNCRh>auTmnc
zK};p&0x@)m1BdKM2nxTS9B<K)hM5V}K%=q(Hw{n=+$<KOShf=Kkv82*t0?zS0?`c!
z4H$(88S1ux;}K!&5PvEeK?OnZ5??i-JJ4?k7`!qBxr}rY5ti{`24Wfwq9)f6V~c1E
zK@n+KBAP7)o6M)AA(#x`lqq({g48kpvyF-f3WvtZoN(d$F(F$}AAo?wOCK8s9|Mj9
z;Iw1#{F7XM<}!P)ewF<%evV!ta9-mg>>KlMwTc4vcI)p0-Wh_8?uYnUyP%H;u~TzI
z$P@K_PDl_MAvCh+qC&vVO7Mo<OksbI$@U$>@irYBb%AxzY`**<I>ms}xaeAoNfaRB
zrr(DY)slwQxM^5uO(m$H(>Bn>Wk=c)wh;#|g!q0!%wsi<%H(|IpB?e>f9nG4m)>OO
z=9Hic5GDetGDb>hrBL&zh2*YD929U3!Uoi1L)Oj@*m?6BouY%*ftO#{;%|NC7XRY)
zLoRO>eD5=tIJ>sSpr0dk9!&s308}zWf)EVC2eilli>o{ikJJT|#u9eN$YqHM?ckFJ
zHXmtzA#FqBrZ%Bvt#&qmNDhMSHul?rpw6+K#WHkW_~z+Ht)4PP-YjTL?e{MzvG1v|
zVnI)8lgPt~LLXY{Rq#>Y8zhy7kA43S@TdRmpGrD32N;4iWSIh2Gul5S%i|(TXoWYP
zt&NLxx>0jFe+unz8cx2vw7;FB!i&jO%m)fjGo5g}ze7106P%5vXIikdyiRXv86PA{
zXOTtFP8;IHJJ2rF12~s7NJ_D~aR!w|H_NsMk8hQv)9GN#lDaDC4*K!Ar{&(AtJk=C
z?W^RE-KSc;NhwQ?D`-SO4h8iyW5(_l{j2M!-9DMuEUy(PbxcN&N^0)i?O-Nd_IFCo
zUMg@^fprz5vSL;SPM?Q<NAb$nzsl;zFSFL^aQDHNY3ke5$DVhuH{{YW8jaZN<(S#;
z=J^+U{QNK6Vs)dB90tTJ<13%v<0pUgIgWP<zW$BRGoDSTD!6yskV(&Dmjiy%<IA^h
zb5sSgBCwGy(XkDqy9eYsi0+JT-62dZANtT4?!Db0CNMreK!+8w+##?rx<tsR6-CbV
zj3Q~?q!cJA$a0O=Q4cRWf~N75jiJZ|ZqUUz8_m3|qiPJsB%?6z7Ig4d>*@pE2)rv1
zLXc%TW_>0DF>hVExcz%*c(Hl)NU0(ck|HO3(ql`RSVlJ1Q)?O~)iLVQGTq@Za``$T
z-=_Oi;6Q(sGnby>)|>D0(T{$dE}A#rdBn%wf04ZF(VA##=u+KHq$m;m38(3FPY6=v
z6N~JZZ{eeHu(r9*_-IU3Rb(#4Pz5QGA-eYpmFT@bqAoH?3an51QfIKH0TcZ(M9Q{q
zb#c}<(PqFJY-4EZIAr8mhRWg+JOqnUHP#z8FP~?9Z8=7(IM1w!8N^qvyvs{3zn{JR
zhe(-G*F=-R#^a2|xoBobt6@hVGesv?)Fw~}fzLB!zKk~w-}8N+WVn>`rB}Yf7r*co
zKJ?rZ^iFMIYtIWG{xCORd5xwK)T4*k>5#|I5BN*}*}uo58~5nh8QX^w6cfBFxcKzT
zgl3=hCtsrX^oOYGA@%)tdHv`Adp0g_^5FhI=HUJ#j_yn-ddn0`k|&<p;NlZY9M_7^
zzIu=226Qeto-|xIbCI9>vww`g_J8|p>`lf|uPh|*Y#;E#)(l59-Z8}F+{g%Vcy!D&
z6(TCnyg+Dy%QDnpjaRSUq^eztGD^dr4U6(3q7*S^UB~NBYRGiP#>xgyKXsAEpMH|M
zENMzZHEF;EvW}w2Gs@AF*|<h2$MX6Tv#R0X;3y&Ctq$4aok0@mw60yPhIakCFu-f0
zmqLi~GR{U+)0?=EltRvzO>O0Cmzwj5asKtip9P3EWw7D^0Du5VL_t((3D%)>5p#AS
zQtPugINGMd^*U(OoUs&Wb<Uii4=(`AWHT9=2(4Kmpp>Ha4i_R2V;hT7D(yfG#stQb
z3Ht~8oIU**!(oq|?E^#>nSzgQZ}ax6Z_w`*^w-uA!y&FIQBAX8JH2NjUo;G2(F`QU
zmnT?9HJUL!t}u<KF*U)%Y*u~C&zRro$9Ls`T1`AcDDovm@z@gf#vaN>*R()Dk>MPu
zwI1r+kT84_diM!8?xO}B#Bz?#Eyk88>j+tckQrJkLdbxe$aU912f`%LY81lj=yoe=
zg0GN1(o<0mRN4}Ul=CDrq$mN2a5**z^8O655zuFIM88Ax%^BfZbo&ib!;(inU7&k?
zxYJ-CSwtg}!AKlTgz)oCm(YL=WICWb0p~yng|rU#I<Tz}_dMaUBAgx}b%8t*Fj5$q
z5$p`1_7Un3-S6Vm`DnOf#vm(%zlOVe8|{X8XOYqqY(hqai*0S9vc&DZA*g~t6_G(*
zpz<z4IB)_X8r;$akShda44W4sYJWn&d!m!0m$5Nw1^Yrm9$5MChdB4WFYw@(e*yE>
z1BC42v`#t%i6A*ZO5*VA1gQ9VSesl1qsefzypJ5pIlT*&p|S$03w+VTk4ALOlrY+-
zdT<;6@Bw*UBXcO5z)_H#|GpPldG;x$jYD{g^D&$`xQJl3T1hViE{M5SNG3xiogz9y
zn?vVC+8RdcOS|2aKK?A9s4HFsB!N+x@umOi4ln%pdB#UKD31g}iDd35P&!^XBDAp&
z5rOp1l6M@dPj3>I3ihttVf(ESWJNS@P#H5<Au>(Sm$*Ki^6o3!xaJ0ZJ?2NAdxEDf
zoMG4>k_R8Pl0rsv4KJ`E8e};eMIy>HIB%#6$y9V$>gT9nk;2d2_(Y;Jwb5*(*aGic
z?P6;>(7N2Vxh~)O$4U3v`CtW*8bv=JyjryLL}yD|R0D()HcoTl3`xck=24Py^P1u=
zh%SR(fKZ-4^&k94eDX(roGQ5j3p{v-434I*@N`g_z}VQ;xDeRdx<I!-Ob93QhT#Mr
z^PZ=>c!(DKOthp(zZ~uDa=gDoa3(HMS}l!~tZkk~Xq~cDBSIIFAxrv+3un;~v36@s
zM9=b!;p*y%!9U)>?fY32ea7O5dRE3kIw4A<!(-mL`4#NuBbt?W$yY5KoesC}lz0b5
z2UHFtKf-)qgu14gy|Kaj*H3XYX_#yq)K<aZsR7fShSxtkMSNw(GtYEcTPs*y3#`yE
zJ#;*p8Xg_oW_SPR`6It)lSbTTkoUN=JtpLWqUdlu9&=Fa^2Vcoz=h5RMOLtJcA3$n
zLg${PFl2Lhfvv40Ui<PPE0@=J{kKYvcl+d_N4{Nf{)r{VQ;&CJma{;!?Z~?u=un}Q
z;b>y%E)|@848p$Q_=^p_)taGPMY_OjQiD(^oyELF1zIHpTSN`7lriHlrLdrcCeM=k
z(HpE4I1#C%I(-~c33Rt0^on$Egcy6&)Kv8hYvcJM($9=i0BC0Qd|{Jmh01iSvuWV-
zKHlHfxfmgquFeH&^4@zZgaF1dnLeahx<#{g$m35>czC1a&Thu(!8@$4UE-RUA!Gqr
z$<blM!NVg?o$tnVv`kr{-bZf|MDnqS_ZErOvS2WNdzN66=@=yydACD8=wqrn?Q$eK
zi;l)E3&~rs-hoY8Q)l83?_BZ^unA2);ehUvF4POGsd3(sWeMF5p3>Bu+q{U@IrU^r
zXLttL?Q!9;E#A6$i(amy%XWY-eD*VZ_#=OwWAlLhsiVe6dan^Uli`Jplu;QoPerC!
zURkDF6m0M8r3IwLX~7@+bAJL;?@%pw_(T8Ezs=|W(Ldqst5<kd!;?=u&g-v!gV}hG
za~GfFgHL>vW^#?`><(+WVR&gmm>PzwOZ@t;e3}6nonApE0{h!<^ReH1m*Er7^Ug1Q
zntJb;qjw(R^BP}!23en`_B?U^9C}vp_O)$pKXM2)V6=D4@>-w&;4l0!UVrl{|H=R9
zFCqx!nLv<J8AI^!?xP(pE@e0uDeyiaiopx4a7mLXkU_<dA<xNHH@UjK%i*jfBp07n
zSP}PkCuqKD=_zGEDM`0muytySUQzJW<;RgyQcp^nQAt_O@OajiR}msGJ)Y214qX&1
zE%({qKV~v91eat*$rv(NkIoZV(IT6&T>ytUyCIr?M($nHG&ma>I6e>&&$*7fttPQ>
z`?OGQ5A=|VOskI;LPQ5&=VBC#kg@I)BaM{KscS>WIFMOVpQQIF8Ai^ZW1?<_B$1Mw
zaI9`6GHJ=}y+_G--?y2g9)<IPy7oMLu*2!CGi+_G^XSno&NL7-!8vZceV_htz)Ror
zGI_5L!xjAD4$?)lEZLfF`1vOr@4+;;#z8ew!K-P-bTp&39@9iG4{HPC@$6fE#{5n{
zzAFdR+C!(~So(p}h|A|_eqn}sSjLSG4K5*<><7Z^7&o-Yr6pKi!z&G8h8kq(#^RbI
zq`ZleOL&e6VVUGo6ZI-Bz0H~EP#e~edW*m|!QH|OJJ0@X4=N#(j?u@f(*{LwB0BkQ
z_OM3>sJ?;I9(e%amLxcd?|Agt9R1=3w&-K*KJ2%LMwqibMF6y#H@VrWq&L(sog%D5
zmZ>3i5v60?L42i)?>~)L5qMF6UPX!-{@%M-w*no3*w{lqZ3+1*!Ho&-0cvL-G09O#
zf;ZrcNOSfJw>O*sZxTA|30Q)Z(TGYmkQopj#S)EZP)~oH#(BJO1R=<@PWq$t;}Weh
z(iB6?#1i0LAjk9Q|NCj~{Li1}%#vWLRuSh<BL{s%%5an@6alAWKBe((nvBDnNIex^
z5)?5|S!qP3pp&8U4oc+&rLh(|)dX|@3X?nUlFer1LeVJ6?krF`fgP+d%eR=72B|$N
zPJYpm*2jpUAX@Vyp9V}p2=UO2oAGwDDAKtp3AlNrP|FteJ_X8$6ip)&#mjl7rpK%Q
ze2=Y*D`;CW8&?EXBxkvn&YRpgm0<b8D#dcnWcQGRdj~ipu-O2VM0<}G7C9X9FMj2l
zXt9hh1wM2Tq9D@+RaoYit|<Di-{Rl?sSmQH6bcK$pk*`$bGD_$Mu*|Cn=uXo)9o;$
z$B}`xBB!ejTDU}iVIBt&hnn`PXnpZXfA=RAmfsHT#$rADp8U22^qH0i?fu#EsKdPJ
zAbo2PX_<7w!M4?=&{DfdMOw8paWbG$fhS*hp6~yCKS4Ed>AHv!A(DU<9*2o>PC`cw
zu`M}s=5dPda&%{I-^cmD+-{#wH2v>+-N~geISretV*kN?X47ak<8aZrS}Kh5tgda5
zWu0hP7A#V!xpVOXSU!QKrl;U+bnnfJ4xLU9FA^=41r;<9D2f>IVd_Xtl<5$>`R+R$
zOz+~(+{fnz?*p@`Lr6uIIeJTi)%8dtpH-0kkmK_Ak2u9L_it8=jvYZb4nAk;<~3%f
zcyK$X+Uu~f6*zs-QuHl@KIpFJ+2;h+_71PS`uFk05<xyqum2{?D-A2Fj`E)5CqDd9
zME44Xs=0KgU~kvcj04r!@zD>T=iMvsaPFxs)*kCJEiHfG&ppATHyvO8<%a42-Z|Wc
zHNpBtO<7Hu9K)X3XJh#s!(NAuw=~AFvDxSQ-v0#s`)~2ov+LY=RdVO9qcR3BHPTpA
zh|3(|0U6LLLn%q7HCd+V7CE6Ght`hP39^YugP+GHC|d5?g0UWuN|0xorKKE|ts<qQ
zv4+Wb!nB-4#!85vMFeqWlv~s_h8UkBltg7ZUHfi62>LdIwAH`)bYDn?3yxYexF`1r
z+hcBiX~M<LF3Kx(P#Do5&)ui9<e7{P5oXG4yi2YnjkQSQQ8GqD#YLu^8)!z<F&Ysn
z72<H7OzTAW)^EW?2-LN~dqL657_O}lY&30BGDBvXyoh4P@n{@b2aQSSZyHMH@iM7)
zq&Sceh^z_U(x5eYN8xPDR`%Y}?+z%N8TBlqI-1fuRpNb*3r}3+D{oyR&oxU+eMZM)
zWRdaz`CEU3Kk=vkB&VL(X8Ptkm~lmIDwNFP;E)hNEdnZ69Mv<XO@q^dNjasdr~KfL
zd@s*Gy~XbBW4zm;zBl302cPAY-}nMoUjHWJom;&B*;BaAIqCr4{MxUxQr%>IMPl<6
zriU|{(S$(F_dVI+mp}KAbC18o`<~xmZGDHgKKD7!+<t>x&UkomNYN1#dCd5%gJ-#y
z^Kd$*pPlCIodXVLis#<{BLCrk^`rdqFMpc<$6xu|RCR?5abbea@pTz%vw~Zrea<eO
ziN~1>k>w(y+y(C=Jy%JD56Esov381`(sFlupP-{Jh9v!WF3izck?NfwX4B_+#`@|i
z%fn$@?gq!@#~;VeDooulo6VxG+%@$2eeA5FoRpADwk|$~);Y&VN0gPpg%B6c&f%Sj
z_ixI%jV5-jabI$NW>M3k<8Q#&CVrn_&dzA%7;|P*8XQI2ug*c#_RpjArCpp5KuSGd
zN=L*~kYyRua)vbqm1Xg~m+haosBT-UqMydxzoA`lLXx^9SwdPcSr_{TV+=uNG1f^4
ztnnNijyOIZv9h*ICI!|;vq>*Kv+0Dlzj1|?<pGzUe-6<bKsiQCD^!TfYT*~p>Ae(@
zh!_JjWyR!ZMp?%kRA($U`2y7Sw`tS<JNWpn98h~?bkB6)(k8ZW=;4sC?IN<6khEX~
ze!Hf*IYVxaki7xjVTPZ1JT<zrf$Xf}r^mR-6~xjSD31^u;2Q`TL0E#U5N<{YIo?-s
z`-%j5fWrA%%!iaBI^-e2t5nbgK}Ncz^B#+Yr6JjRgD)+?gIet(2MsQO>L`kpE;1iL
zqX|<VFIpe@{z3(0h`e_n0xGVyQR^U|0C%lMN}cGjBAPX64<f@J<V^Pmh&@TZEKoy*
z%zIF6qC*8%;a!D4En$6(=WW#f4ss_t!%}GcxIqPrQ)y}n0pUE@sL_qFEDnN=OM;*X
z7%UYklQ?InhNmgS0Bd6micPMv!h3X{14(aYA`L^33@d~}IzxE$h~t0!OKeQ0>^?L+
z{?R_Vx6ee?RJK7BEAev)a!!>L!lBUw9WY+ae}_;Cr7~pRfjlQ<3R&bxp>Q5DYnj}5
z6L<AZipC)_&2B9yy=CYNbkXJE<}*CpdC1z<8baiFD-qHsooO2b&<>?8W?8y+0Eaon
zb3XGV%XHdYv4jwYh)}medf@}XdR!Li-%Vq%qTu$G8T|pQpIT-*8sqBduA6l-I^7(R
zWf*VR-#$b=@JN*rgvM!wAfuxVIy8j5%Wr(`9)~l95&@y62;T$W;OGDjRe65&M_ypC
zzC>j}1y2}HP~+p+)_M`CWx&j9%y1pIx{g@wQ;)}#RfY8hgA4{T(9r=cVsj<aa1tFv
zT??nO7)BRXKJwe34<k|J;v6UDpQAXj6>c}VF_&1*vr9#kvBk$cUoy-;$F{;QsO|AO
z5beO5$Vv`qXgDtg)&`tPg)Ufx@CYoyI|{9sjb~9mr{LV_r_e<|4X5vUNvX+5tw<Y#
zlhDni`b|_Yffj+$!2$aZA7E`m>eS~*w|9=Lld-zKflZ?fNU}UT;Reimmkfevu$3Zl
zK(e&5+Ftx=fImswf}+R>2yA0;)}aec@P^ml_%ifnltgOtg9m|P5IFmIpf>=r_`76Z
zQ0PFI3A+6mK|SPjcfd!+UG{f+Fes?1DPg1LnM(~B5^<Dq_galCEN9M19(%GzIK$RC
z&%@)pxbif^<%b-Fir@un8;+-+-NL#Z9zNLR(XQgzmwQa7J$4Q>Z+&shH@^86AN%-Y
z^xgaU_{a8m>-G-6@(;er*(dw_<j<YstN-JKy?udiEVHIWR02DbjI3v4RnqIOvR}W;
zv~rwTmAv)#=Xl>o&T!NmP<Fdi(+)xC_R7S#IEaa4)<;uEbSBaBvMi@F&KUGNEDZ+a
znZ_EIqKP~%<weK5)I@jnVotBtQlc_J(Nz@PoUG`h^G$+)x-^VNQw|Oem`%#az6m1M
z&ZNN-&S67bQm9;!sXUsuh)5ru_j;*;mH<jKDYhx}19slZSbDC*Zdo$6o+a;C8Z1+d
z?$b<$^w&E4*+2Xr^6bVk!=9$8E1Yu#l_F<yjV=m`P9GsPGS4aU4yeeGFy%33a!8P|
zzJIG9)`z5djdCF66|%?@nx)X0Vr6wD)<6`~!(##h<5O%<<W;Bmne;ieCB?b4EKaVz
z>HHKzQgkwu(qy^BHip_ccK0Xr4-U{n#n!21>Z(S`sI6OD8!|qc(ip?v`Rjk3Kk=vk
zB-VO%e(Q@MvWPkpB5LRaGM89Wv$na#(y)ub@zN(==K1%%K;Fst^1u25X8Mq)-hYXu
z^OxB)73cat%xC}Er`f);LvM5o8>T4L=gH+h``cr7k8*0AQ4|^}3#6YiST6Xn-*t&s
z-nhoy8;7iK1YUfx$NPWcBfRsKH+XROh)1&yer9obhdj%<vFAA2GpwAc`KkZIpCw-@
z_<#O4f1f||XZ{nKCOV26*R&M?GxI13N(zh@9LyX?vj!E2?5@aHjYpj%YNe$n?+;ko
z+GHjrZ@qIBQRwJ-;T<TM^!$#6WqV{cXeH_OyKJtllNA}Wy2RHF&wua(bUGcHX-PdU
zsq2Q|J>6c1Jk!+UF|}z}-R!WkzC`0a)2dEcqc(yS&f{%J%&l}di|C!wqMY-%qNKYP
zLE>#i)6Am7ZitIOowc=)$nKjPd8gMC7PT$zoKFs^(OEj?4o5ae*7h|rvIo4kn7StG
zc95tA^N0n_w;cjS`$r+^$<y<Qe@m|22%HZ{5l~9gm<pXIDS@y!6lE!S_;8yi&R=3_
zWx&JjT?(Ob!jKg?$A>dsd-W}rmWQ0abPln!jH?W)sS_(j%(GzMvjk4h*BZlgRMJ!i
z*SJWVP745MPXIpOL67gs0kzxFsJN=aP4)=o3>{pGg~;$K=3A=TVcse!PTj^o_BiBI
z>^SC{I$hw!0Ht;j<6VT@#|_U#imI#-E+dEuFeRuGLWS}YA1sb(q$jv3!IqJZhL2`P
zHj9m85QtDjs*>*GbqTq^_JDI5al^u+1`)bs>lxk&f)>>GW(0Q!VS2dhhETSdgQ*>t
zYK}4>;s#h09{N-CRt5(eK@2_)oj_O(h^nR$tC4yu3#_f6stH*aQEcGNF)~yj&ft&k
zAlBa`9Pc5wB^Zf!2GKVJ-+=8R!;FxE!=s^_$9ok`8eD8Hf-!if2tMjOoysE$efn{#
z$qefPQY(BAOr1vynJo1+dB4CJi}V4ZvuHx#qnXshPk)AuV#@IQE^=N-rjK^WLdEG*
z8o$(`8t-v9U5Ym4K@+4(1|K>irnyczd=jY?s^}szjn@*PGNe>k5exp&jqBKVU#GC1
zNyylpI6ArKY`4HRhS~ZVhG*BgdH)(O>VUVAJ}lGdBuNZM3Dz!b-bDy;>z5pcRXUh_
zqRTAmUN|w_qy|LFn5~<lXrfP#BUl$ra6-oML^B@G7z{LnB?Z#r^T6)Gm~L-Cr&F-h
z?Sh*SOp_8~a&pm7NY7DsmEZWv1HSO~4q6QGP9wz(!~~Tod{Fo>=B4L1II~sYt-<55
zO5#?!sHGmFad;E2TX|{?qcyWeu)AF{n;3>08LPuQ>M=rw4-#i;`pPCerKQ;UMU_kM
z*L<LF|K;02zq6iss5!x(SRkaGa1c?yn~)d$NrQuAQq)o&SeUGc){-S{oP$UMI4#8s
zQ0cYGfRvhhZ(YaRnoKI%)Y%XsI#N~>giz7pw#eApI8RpeBYGlFXoF5tjZT<7y@%Z^
z()B15-ZgCBz0dLPPHJZ2^(WL~^X{bMu`?IrnUoxQgA{0~=ScB<gMNbYC22E$<G?x?
zE=N4vM+eAxE?;Wibe@3^IAc;w2GmW>gS~51D@V}J*grhRE6Ih^4ZREmim`P_FKBjq
z9F06#5ImYX9#w|(XKF4!x5oQk%K7{&_jv1?WAC=+;swv8=PHguMJ+5^!rr~ea9Q8V
zsq?@SYftgXPhH^ipTEIltCBZg6@2Prnp@kuOb+iNI~_LG8kW}s`SKo18<LNG;tc24
zmU;7?NBrGi{(FR;V0W*i_tX;a+-wly0T+I_hy9$Uvv!L8`Vl?V;lX&yV0n%Aub$)9
z><$<Y$CjD1bWZiSU+$wtm&Pda-hif&^W}Y9;UvoCygN(=F2U0@2GcZ5$0fV+h%C?0
zN};7f$dHhdgnOij1c=5)QizC_DTUU8EEnV*h0>t&jH06$3=6hSFH=^A@!^!w(U`I<
zF|}DNW>TQNZ#=aqQJF?+i4+p+>XeHduhqsolnkss(eUn9Y99Y+j@+EGd+2dUT&AgJ
z4SC(+=?_1{fBeV)eR>G`#>9K9F_97F1G32IFRxMz22qlr6#$q(XTMVCh%85ES-f6#
zpl(W(pM5)y7_2d5SstCMlPS*n=4SM*kbzm<U>XyVBoznNaZ{z`PDqZt(QM2+H_ti^
zA*7m_5G^SavZqj4K6emrn&=fU8#R=<XSRRLu)D<i<|(xBSQ`xpl~$~6twcf=j(_yu
z{}255PyHU2H`e*gzxY>BOI#B(qjTY63VJuEt{Oi2i4Sq<*-H#M19UIP8pD@v-b3^}
z|N7tD;Bc(zjc?<t`<&fu5VhiXx1y>$^q0UX!>qeSwO?ZD5mg^HFP%oKKEXFAEEl)B
zynF2tM~4~Dy!<#{{*@c-J*pAIm$>rqU2g9>%4)=g^IQDPAOGL8vbn;){@icz7yrxu
zJIA|6fTgKsRHnfN8<`y<fY2!6K<V~$X6gfXM#r4)cM#sjv(!t-Bt@@Fx8J8T7@&F`
zc4jlKT)Tx4DjCHEl+dI$Amz>{pNV9UwYs{(@^Bd+9Mj2^;2r%U<Ef{g#?=i?Rne4X
zbYr(Qt1Byb=a@}OoK&oDZ4iQIJRVb4HFaZR{;5q7dyr_gij?4eg2>Z$F6rF%fgn7_
znMB2pNT*aGNwV7SOs0>5q(K&GMm=%*w+sa$5^Ak|4+`7n5J${KA%w&<HO>Z<YSGb!
zWB)w9C4DlH-pf4JEYQ+^<NY9Y^0ZJgMclY(k|sT<Sf{NI?CczH?(Ac1o!;R7gB^@D
zXr&O&poC=i(Jo*6+SmC+zr*tS8uSM-F2}k^9}saCfHtx%_H52rrpFWNS&cCfF^#5G
zeqJYY^_@4)edoQR{|yh>6NJ*z{Mt7$jU#{H62nfU8_!1vG?}6Ir?|IjRHwm>K~$i6
zIU-nsC=kBH9vvfP21|w@N<^p#DhE-;VN(W#w1AHhDx!=ANT$Ra55nSI%tZ|?4L7;V
zqH?l9hfr&L$AXv=mIEs95*|uq<`JD7wW(2S8LSF)Ca`yA)ME>-L)=b^<nGg2AjG%`
zWj)n{hWs%_w%S1+kMUj-Iv#N<NAx_+c8>K0p$JGF@I*?j3;|Rp(M>c|IpNMZDDNSZ
zK+ZD4Oe1%}uSUmXUssSp&?f|ALLe{%Z(~$Q1Fng5TJLI{>q02O)cA3Y+1pR=TZWfe
z-2Qeu(OgLwnpuq$3ZpeDk2zyfDQFs+8#m~0W;9I6w+5^~@ho=l5RM&o?*ZC)P7OA3
zn=2eYno_ccwaZwaVHJ4g5M6-?IYJdF9w%CngT#7=^O7)|G5Y!!$!4BM4jK=S_p&%V
zT8HZ{BhGFivo5`*_wo9TTfBJgi8yq7H`n!f0@xF%PynB7-i5FTsnYhFh4vCs1WXz(
zygv~mkX&K|cswd3!<-N?Hz@!oF(IQlwj7RX^4zew(Wke$#@_abS3mzvrXfRTIfIoB
zJz0=D%_y@xauqv!yNGE`k#z|{qNPKzj1v|W8a9^{&s}<+wbhK!SVG=G3LVXh1fT-8
zD=>v(rn`*7kjc2D*Ud4@9qiaMJ}&7Da#q*+RO6ba86bm4H-@fGLr^=WhQ;=E?uORh
z6Z0YHTl+X3*D=Zo<`lXXIf{pnNM>9<X{aMK81<2o-)=^Qh`KyM)VIl`V~y;kq&sRe
zBvmwXz(*T<SHeI3`~NR~=HK}vNMb~h2%g$hxXPlljN`K8@`b0!@_s}BWGuuU=FW@2
zLN_D?GP(V>sf#|u`=C4?@#yv)s&W!n1wQ6aq7o5ZD4cXGudUJP_Au7P{L&C#TM&{q
zMt$*nQ_f}D)_WIlHab~mSsn+K7P6k-3&~R;eGt}pT+^VsT_&R=9!>AzPc=;K6uJ(j
z1XAa$cLRrqf}i@{4gS{O+hM$`Fm97emj?_&$(@^rh;fG9FF12?oAZyCJo!CKtjbg9
zS<dZy$8?@L!`Vv}GqJ<H2PLMqJi2EI#!~ISh44Fw!9@=C4KIJs3a`F?gY~l(*@2~=
zV|{gl&9yPx4?UyCa{b{0t{-l5^{(T=j$kr&Sm{t<%BAPea{tN^mp-_`#q*lShcEEu
z{UdJN7&CZ$nZwD1SFV4NrE}0<USj#efcuZ8XrYij#c0Qpxh_lnK2;VoC%tocopi)O
zv&a>0QS+qd3gA3A7cfneHfm|HBt@hI=NWmfQ4;b@rzTaPGD(&z@=iwH$?0^W@s3g{
z=F5w?IFSN^MdyO5Drjm;U7MJz*qDfxCIrbDPt!CJP0F+Qyjvdv0&+OPnF&I!vvR?*
z_aLLL6`i4?n{_xoR6P2|lyCgw-$2wk%SQ3|g_Y=%ZVZ8lbOa%2YJ=?fDDfamhA%NX
zL=uoF@+^<br*Ff6`uOE12tktN1zC~foo9D<7a;`eTN`X{Y>;UkYpM@4b&aWNMu&$;
z87aL&gR^d~g>0QyX`TKdxxuoi)smRm=_3Wd@eK$2`z$XP6w3z`gD%nsq>qlyO<l9P
zvBF@mM4@|hx*L4;3$M@{6#Vqh{VZ>M{cWy({TqbJB$FnOaE_z>5uf>|pWzcf@^Ma`
zUSWA{g^jHtANa8sx%&2PUOf9Du73HKI8z_-LqB_oX+6dE&XO-bjqi22^U7Cw_th)(
ziw@b^7JfQnIyvI#&22vLiT9&Bf$Ws!$sc`^XYNn=`+xOU*nZ`J3+GRB<(_9}yv^BX
zp5@>B3qQ%7gF}AxU;Z-x-QW3}*xJQ~mu(nNX4I~U$a><j3n2;KA$2kpN;&A!xms{z
za!i?+D?&o%AqYhl3e058cxRisG7w^}uF@LPfVXga4RiB3B?X;M&id*aMJK0fW|+z#
zRfa%t{`@&sR)&!!RYhdcndk%1U+!b-2ID+~%^{sZm*eS_+6k(rjwY5aGAM*?&(?Sj
z`?N&#B2jN?*)hy%x539VSA>Ap8tYV~BrCNztLKBNr1i{bx3KXjPeWy!R~~#^(95`p
ziuJI42&9f0dQ;Z~YoN#$b!48}*Pc_%y$}}u!;2y!i2D0@tT|*F!i7{A6Ai#({FKKV
z%XCt5bacq-`YJ^?qiQTc#BAw8cJNkk_xdic>DT$#_kWn8+r`ah;OcoV(Do#9o^S7%
zj%Uoq6{dFa%u5Uv=UQDZ@NMa2erF%wl>@5CeViC>cyb|PY=#W;16c_@mpcg*{-!0^
z1GMTvzmGUm;4ugqLfv4ej$&1(+`*{BffASi20=-XF4BBd5viu$;LQXdG!lhO+sb&8
zw}IPHXG1SX%P!8(2(BV@x-blI&(TaEWD>oek)0E`!ee$C%56h1#awzPTx?|LS&DPJ
z|A3qoOuuUAt{&3!fsiTOvO->5MK}n%lAsGzCLsrvsYu;04y83h70G&Ep~`}AZyCDV
zSi4WQ{|Fu`+_5Dr<pg69$rvhb*^-;1i#e7NLG^e8wuz{iGtq%DG~lkkLnF>mi%uMl
zRgRZAS_@QPAd8#`AvMv}Q%3EF5(+h)G1}Rq@(1+R)-byjK`QcdD~zunQY^2c@+F$x
z5#jJAn@^l(GP+0Dc|gc7;rlC~qwA;;9fZsXDk1?<4;s=yU^%*ei*R&=tsTo~&RtmI
z!6SpnJzXR!#Mq)H7<&DJH?QyT{H3P|-bdHn)=Yp%_w}KriN*mdgcyWQ%Hs2o^hA*f
z;6)P7CjFpD4t$z0N1wcpL#dO|6*#hO;t(Z*$4iOznz~Yq@6`~-geI`H^(4DTGwA2|
z06Wu$YIcmZ7Nv6@Qw4R`W6OXj042zj<owoY&Yf9fxdWk|;=REdPvPAh`D&R%08%<=
zW*klg4F%O~f<Ypz#RbE`_Lw3>WK39(3LPpjr2`1>Q*nt)nRc%`e-B&39lkvSYPaaJ
zpels-ASjE(Q06+FxDcDitjDvbr8S9GpyC#TcB}tsKW2yq8Uln*J6I8rLE?em{FlGV
zkNwn-(_2{~_<-@2sx0IEn+YyI{Q{ZpM(vbR3tC@t!k(|O+lUq!f5-d}LyR9f+TLON
z_FY_4p>(`vTlYiZV}hFyk&eH%y2W@rp}(}wd}G`WKPTu-^CiRlSGBQAQ4iRfSS%7T
z#r*Sb8w`mgSVJ|NF<4n*(C;IqKxdX&k{tx8nbsM{dYRkTj`)SE4L|VA^IZM`h0Ywu
z(=pS7hEKk*MrGm7y9eCg=`z^zT>EUp*=2(obf~4}@>7cA-6@~_^gh?$5oB6&`m*Ny
zMNL^*-gvEIaH`<yt(s>)WRMz`dwrh2q?t}9)U!RVZC6x5lXX{kush-YL5KS{YK#a}
zA;UR`8pAi<+2w;DJ&kYHc=3fUU--2v?ClktTUuhz2zs3<SzfS_Y5GeUV`j*i0bS7T
zNXB(TSN3qZrav5Fo51n(5K~)(OZu%KTl?}v4J9sFk{fJj?=KnBb?Nb5;B1VZDJ5vF
z$O}ccTOhImEoG#Or$#f&GV)G_&SLJMcY#KbB2F^AYw5xs62;W}PK>vy4bIu*MG()%
zx~x&TN{D8_i-zS*!TkrGr6(+nnQ?p=SeIQ?&!LXidFPjF-u=yzJnJKC&$4S+Ums%2
z5|2R!9W{d?FnP3%DQkMGD-=sH;%HIR3{5?yHVruUZ9!0PTz!|U+acI!7E|OoQbmKa
z*=$Uf7w9Zygj<Ji4CQ2!ya^)Pz`H2ri9RwuE>L|kl56$wLc}G(iQ7;J8RZ>Bc8Il(
z(eZ?>#-LO;))ODSC7c&Ly7!3wV8rIBt+?FI6jkYX<@2wwG#v0_KmC)`WzC)Ix43`*
z9@S)qbTM-78((~#$1a>^xY4IuNG@JFkIca7F+cxnukojU_Ioj%(@d)|9kqm--6IR@
zEcMr!H7oeg#bF4mXQ=fGeLv={H^)5Mn*oNiPi>;QOML1N{T#ieE=8WvT))fi*WTb8
zx9{<V|NJjwT|j3V;RR(mV_KFNXXBzcUR|^URvhB>e4`v%9kPhW=dm$NM#mr{RbC5$
zmI8S=1{q@k^E8<1wEU9^<r69XCz5O8=dv^yFkBjd3`}Q}s7;m8Fsz#^o_hQVr1Mm>
z8Fe|M2@Yc$I!k#>5hyE!%E)^;jk8Rf24fY|X_+XbHs-u1Cul7dQb?NMTDO%Xr8!x=
zqW0U{fVBo86<TFT%zW|Mn%;TQE+bnf_^?QRo6A<>dnZGbIC<-n{x;3HCsGT%ge)(Z
zl_k~~w2L*Z{ghf8qB%a>`ott@)X5muwzkQXI~#Z19<4NWQzMl_dWn+`5fqJe?C$Nc
z`qY!GudZ<G&ZCqLBCxhW$&99|xpDP2E2}+T_`nP3UY}qagqQQ)L?(kN-#E%~$#hg=
zEm+&;6NmJz3ySZz<Ba*8etcIBs6j>UzD&Y&FCvjirZ|tF;-MT;prtedv0D=giSJ94
z0$4&&6Y2_SNmQa0U^T{p$P$<WGeXi>pfZqs2usLjL~t{tC2d<Jt>b9hp$Qc?Zb7M-
zOKEL16QU18@Og=;0>MhuMBwi>QDblqI4V-cVhXjVHCBlyr9#AJLIk7@$YZdxF1Fu6
z3`d9!kLVhhb_g>KA&UrQ7Ol?-4)0^*2RTwCvA+)lTOf?W`z6FoQ;+v?qJ!>?@z%rC
z5DEc#e7(VWf|d9t;EX3U0ar&`dv6@BG5A`7@wmI!Fr8<ac9uc+Ko-$CS9i&q4nccd
z2nYpQX9%s4xgvxHCKGH`;_%e>4k;c#Lv!bl=HUT@C!b_=cLdEIgA0%0)e`m18w{40
zDJ~7zed~32utan5Ihw9S$OffJ13<udk8qySc<Ncn-M7C%7f-Hop8bIj^3Jhkd~lmp
zXQ_h2cQP6WjhxZx)KqoF{_ZZP*4LrUo0KBWB<*08!stUtqWN_DrcE13OBB3M0)H0Q
zcZyvIbN*iZSzg2dcA0|N+clyWB4)BB>Lp&|on&Gvrn3W7)*;9t0>esQk!>zh*A0_{
zW17l=FQU!5huV+WS}J(z@_^hLL>)Du%0`-NTY%&L+*}J1h|x=z@(IVyj2$PL8He+l
zAT`o!>N-%UK6Nu8vongm2WQgtE>rPrH?ghpLYt8qVd-y&wSW(f6BFDVIhCoo1s1ue
zsqhJGx;@(=ExF;+8kwj#w6-8{{3)@dhLCi}3!M%^;3R~gs798*@t6K<{=#4Ri!{d4
z)U&wJCvbY>A}F0QbbU%MUubntRO|WB+zzTf1Z1K|?cKY}?*035As`~c5>elfuIGfr
zXvN0XX(rPNLd8g#mafZU4o#2by4ZexX;$|xF19+o-h5bZ&74}D?_3+3p1yO2$!Ns#
z#wtrI>wMznALL8h|Cr%&&I|=sb1;(lT}j7n@OwY?Nh<p~k9KZT&jj7RV(aV@^{nBz
zY;c<mb^?!Xt#SS-$*r$Vu<DSH|G*l!3E%kgjMsmwM?UByoMii(B^Ndw+SFWor)IsL
zv2n(t>?1z(!hovUW3(SL5~~VymeWx^zVwv|Z(MIMK#;0Um}0fRPF7SrxG`b-cF9Ly
zYPffFjdI-MV_`_P=lHF>!zVs?p6_|z_Y)p{iEHH;V>1LJ>O)Vyk+HS4!NaSPywG^x
zP!v6udn;6RgBdl*V50_gZlptW4firGmI&D35MB^cMxbwbe~5WBmvI5-Ev9zNW;I1O
zqubNeb%Pm3$%2xCOc&&tCgdVEmQtku-XaZY@exh*gbRp{KxP_eWkeWE6KS_f;at>O
zOC`w$4aeIfazA9SavvQ8`2<!^^_g6e{Ko%qKzYz5gp6s~(6N?5H)CnJhpj7+@r?8m
zq>ty~@%SD`j~-DBhjf>^s7{U)flNtMrpPjh=yW;Z!~HFL*t$W6$PSWP5rW4yHMl_4
zG)(MKJVzwaZ@{_eZ4uO5-`3LYV{ec$w$u3&#NsT9=TmD_ljJ1Hh|GD1@s{ay!sy_T
z;aUghY|_U{BoRHEPD;k(n+%r+tZ%H-?ROCfjt@q>w)YxYu2~xP`PdJBjE$`g@=Rl9
zmTEGit_|qG?D&v7cMf>!x%YGH^*8v_f98M3r+(-IxWOtbPxR4BQv1ODhhyH^f0gFq
z2e^L!4lh1+nP;9l$I|MM{$R*pbqyahhdVpG^UhV?efv5OZoSLxTlW|zht$TxU}*_o
z<jiI@M+b-0*5J}mDw5nL%->5Ov<P~VmLv*lTSw!UBqyy6T^h|IF+3Um#1M9rl5zRp
zYAh1#Ovpf%Weoa#@;sw9^*r`Srr#+;;MDp$o10rWQ)3!K)il(`;k;pac>@{))}uOI
zWRWv2D=OnCrxnv#iMM{CM-Bn4Qr52!?JUK@cqq*44rzF@K5AVvoyA2IF;6*So^51I
zl(2->H8<5uA<`0be*U-m(`X{8Qx1HaND)G0DM=y8^CH4SO+)Tnj9UuHJ0U$!k(O<9
zIZ5!ORZYsCPm8ytE{W!AA)%&mS?0aR+nC|(qy?uqJQ`6oHCv~*xPA8#&Lznnk$!Bg
zp$vkz-ndS0(C3NAFUI7DY8Gp6>m%VD<*a5ho?&YjO_6*wL5rh_Kf$c~&O=Y(yL3P$
zv`9_Tc1bbc7D}0J5h>E3nHs<noD77nKo`lyFE~PN@y4bfm;)U^6F346i9@OZ*(K-<
zLPlU1O%`N@Xk1zisaP0<pPMzct$Q4%T{0)l2q@?%MBig`3qs?ME#`WIJ@V0LM@U2#
zkO^@Uw1ufPhEekyRjSxz;=7pAq1F}IT24Si=-|f!DKu!Gf{``&NX79UgwhZSTAf)u
zoCe@CW}N|Ibq9`aK@n+(er5@^g(4)8d%*h!Us-|`@n)<Hwuxp{w({62AdJJ=eVS{p
zVmDvHWDP3sU}T4&X6SJ@I^Ai7$Rt{5gid*FTH$6DjV_pu4PtEY@7||)-xD-@Q;z=4
z6}nHH!40~Muid9OvqAsT%a~i&aeLcrJibi-#sRmlzQ%a<DQaaQl=wPeY@qfQZv(2k
zf?V$-PM@XNI?u4T#$jiZu%uWz^&G}V3a_=EBR|7?iOvO`;R<iO`)(A;`#>f&sEk~y
z=oG94I2-A?2BcdUKm}R~a7b3{X+RChR6)craPK`{wp7War3lIOGBuMvI`*WYDI}V4
z915&}tmrb31+(CB^&Zw4JTM(Onvhf0HpcPb=yp47tqfUTJIATj4yv(fxCS4^T+yZ2
z`h-fWNG)YisG`AAVAu&19%dS*KF|aRzQ!JxNTC>Jl3}Mo6J0b#YNO_TLP%RyqIuNG
z%+zmZ4(faIP+L0D32sxXk!wH4*2%E_nvh8e7$69b3`uvC2JIH{Cd5@yI5C{f^Jj!d
z3C-=R*LnR*ukhSQK7_L#6FjHRo=50B<(w|wuQ|Q;w^5)M#!nEuLkR`W^61uW4tBO7
zMB4e>|02BxpER>VD>l|PXsVLYc*^<5pE<ER_wgtcAx>a(n(rd_b&k~esB!BJhP2IL
z+~O~;+xdGb0#ZU_4CBKis>vyI-r@KE#2@9>-+hyN2e0Fok0?h4YogDyn=4eEF<*J*
z(`=o}SwFqWV~?Lf>JnodrkZk8I^KQVVy2qmYQa4woPGhO2M4V61LdUT+La^Ja*y?u
zg5$jjr!VE~j>gPJ5~novXvT*=)aQxE4dc;1%q${Hh*eM=9cR4y#)uma1wnOjb-<P&
z#(})wr7JRy?u{sP#`eu49vqeIUmepg6i-Qyo5J4i9`FAHFETQkHy%v*<XO$GUE<>}
zy~K2Km*K^Au6(oRu!wqojD=9sExPoElG&=n?3Q?!mXAR|<<L@LQ{!r7K)H0+%2puP
zUNw;ho?PYdE?|QsBqvy7BxO}mPAlrVM^!hhtPbcc_0hRPDTPuRDGXA?>_sINDZUzT
z1RG;zgccwaLKRS(h}<ZNa{-iN{Yk}J-yHMo59dr9i#f_!ms>peOknR;AcVNoC7~oa
zaB8W*%nmUf6$fackZHMX11Kl)rp7dT*h5LkB&tv`U%bdrd4|#gtt8*-N1kUuy2L#T
z7jqqv7N5)mvB~I&cA>ODg4(aob>>nmzP9i(1bkZBvsj=Ljr**1IPaKL6;<8Pt!rF}
zeg>@>qZD!Z5F8vIjW{?sq}S_k>dYyYmImZm^ddMu95FebaO>ThC?UwZIYrT<(<@jS
z3|Lv+LaD&s!()bL&Xf20eD%#oG~?S;O)CZHP*!7vu;iIwW7y}Nw{COgjkh5vnx@9o
z71;Q^8tbqwN}jw=+EHZ@KCrvL%cz{vG%2$gf$%;#;I~ZF_PmaDR7!C|f2|g&VWM@v
zjWw<vrj!u#p-&1Qdo#q`J+{?LMgyOi*WT@P=@bQ0#vx3~Bq5P05(-J4Ngh9Uo^GCF
zn})h-FpVQLmZquc4+hxAfmDb>Gb;__vZgYQ@py)*TpCd05GF!90IS%arVB)*;cy{d
z7YSJpI2#euJTKyMT`b<X7LlB@Wqgzcw8r*xhx=IG7X3);Je}THB_uddYT>ORwMMB7
zYno`V=2cvE&guRw^4bp7%onz_wI*$@MVmRBqG-U)wXfD#l#cn!t>j@+RvaE5v9h{M
zr<*aW8qf;sJ=!>gQdD)t;X&Z7*WaeAHD}J80%?%RV9N(bJ#8q*6~_2DkUF2-w>_ko
ztB|}!{BNt%-zksp$^lij;n``!B|Hcb1HMHN_)NyoZ69zp>Pke?!xT#eA?p%Cj-bKU
z24Pb79yAD>M9vL@DI$!4I^Jl~6IhOD9u1+0CO){BIT=DaxRP$B9avkZJtevXBclE_
z>%dxpm}SUrAL*dkFA)tfXEbZ+pOH5yQvmiN=E5(ig2EG|K%r1UAy#?>4S_CUstF^7
zP<f;qYw!w$OKr9fDGXeJDhRHQMZzZSl8}hyQ%I??(>;QAkWcZ`U6|F7<?(m<K(Jsd
zgKIp#3E0}<YlkV5VNUIECP4izsu?mKp2K7XUM)p^o1TJHkVk5-O0fuNiOMsOp0YDw
ze5BB2MNc-A-+Y_y(@!8eeag45(mk_*UOR=^zC}~7qlPPVYbbB;vAQuJUteWu_#&gB
z$23#WId#^d_FzmCuNUcH{VY>sIWm^!u%Rg(b?8wCgAom0Hw3Sdp$GI>dFCRjm+^Me
zO?3)GS2{B9&_N=M!!`}Ra)`3TRSmgp$droLgI~~CBcK*FF~Udf@7x^7JA4pn@M-BJ
z5+@RME+kP|`~8CS^`vtIav2Lkse06Q!nAe-;pr;a7;JEAW0|$3oIDq?Xb6u8Ce|+>
zHPEzC6sgh&AEMzvONos)YNFQ&atXQg^t50lcsxyN4m?U(bkbu+wA9XNI;5D4kSJ#h
z=fL<4k=pa^IojLCVy>iV(bCkM;7=Nu#UdZ(uDz{hw%zEqn!l9s7}Az4>6ai;%-cLV
zWPERZGEx!_ZzX@@FaI_E_CNZ&2&K_pP-NYRtcbQCr)_mhdpa?^&vj}^-z%lU)eVoX
z-Q;M07u9CFo*eo^AS4ZHP=d9!b;_z_JQ}fe`VvZI(M+oSxJbr4qJ6Ix4TTW%UND-!
zI7iXx&2#hK6Xzq^kn@GE$9qdtdPaxG^jDS{u5R#if8amnul~b7&%|kTqbc_+pZu{Q
zqeJ+o|LhT0Z%Iy_(p-FQ#w%ZW3wxZ=S;{FVmQ!bXT)k$fMpNp>kv%E-$v^l!-+cXD
zX4bQ`p3~h@43@_{(d{6L!2U5j@jZQpJ<YY(C%pFBG2KCrey>m7t&v`!vy7u-&zIhS
zD{trMo<^dPSacp;i0#a?zkf^~3JMLnGh}qj^2%>oE<IKA!!LDMx}o`#&p(AJmig@0
zKhOA%=f^*IiI0EJ4<NF~3Ee4LZL{LO!OgF1BZ%&)p^;pC?kRrnPk)Tlr&f4yYlp9Y
z_G_4)=gL>#Wpdo$y~3N=fEr_QHtMXLN!PJVOEcdZgtg3|M72w(opS-vKrKDvqZ;o7
z&OzNcy1fD+B~4=}XBDQ25f@6e2$4?`j-(TFvC&kzt;HlaVT;4j%`E-AqzF09;bZK-
z;W@eqjZw6+i5RI9834*VG8qRfQ`HFP2}&WnN9inT@FdaQ)XQkzUrXv4C<IywP@u9b
z(Tu;f52eyjBQ+r*I_)i>WTZk`X9&iog>p-;a4BkOaq=w|oF|RsLh>tEoHd^0Z4f-p
z+o=Ebp5QE1RWqGb=)8<kH|aBvvBmM+(wUAFa0`=3$>{2Jw2};m12#9;7z_p|9hc`m
z1YGT?n<=iWC=W-`>01V@b@BDfxWteGgp1}+q(RMj$E>O;rwz(_Ot26nnFxfG(>qQ1
zoYs1drZbKX#!RXjV=RsF)W*!uXDMZ@ovqYCq_ZU<c#^2k6)ifN+|HxxZ5tCN6RIue
zj@Tj%tx~q=X&iFr3kBKcKRb#}PN(RgGnFFaVts4}xd<4<0EXp3m#wW$M1Z<B*xKTJ
zq<i;!U2>(V>nRjnf|5+eGiGIj@s7#y6l+peu6N110fdUL!-X(Mfqk24-_n~?{c+x;
zp*|Wb`C!v|oa&(<A(3fmoDk@6qUH!G60JWhG_5Y00>(nPkh7fdJ%Xftx2B0EVR@G1
zDv3EZclD2zr$tHAZ=EB!Au$UU_ez-Os4J=Q-r~KUGfae#SPT1m2duAcu)4a!<o-jX
z6M(~OANw@~vsuO6dwUd}cj*>gR+oDOYZ0!&lnt{{MO_(e6F)cS<6^e`=O}}S6yH{U
z@tyYgt{hNXYMqenL9(FF1tk$hz-v%KB1(Z5G0RN|kH|H_YeF+YW;vnp2&)hkgiwK)
zf#@SJ2#yie4&FIL5w#wIStNCY5+OYzB$|Q`;7n@D6o?qR5WJ5FQ_u)B%p|H~K?%ZY
zk6d-hmX^Vm`0|iyW>Cjsag)a`AsUZ}Mf^r<95XjQh@>4XHTha}m6I0iBLSw9fZ>RE
z3Y~6R+8aYA-9;RzJyCBPMfHLpd$>a4Lyyo@Q0=0WCDh0GSw)Z@q{h_-W56{YTN%8K
zHuk<I=4=}9Mqnl*s>vIaXCx+HLZ}%bTR4AuqHYS6;!iS3-tCg-g8ee!CS5wgp`Br{
z?y26sNxrqg;PJDJ-+GsxGvupVxO=zB&pkzXFlDfjQ9ZoN`sri*!vn5<uE(SPDJHVd
z46weY@l`xTjG)0rZD#;)T6#u|Oo=)OA03??I2VhY6S1KY!A0XG9krV>%gHm1&T^EF
z(KcFpw9n81wCJD;Nk<yGRw8VTs2hYanBd4oK*&56S>nN)si^t2t_F16Y|2Rg_CC@)
z{fXfqq<A9X03p!1X5a%$ogv*pL8iOpIwLCrxd!2Eq(IpOhKXpRlWdz)gn^hNwGr)w
zSODUsD<Yh>FzJz?LLie1%5#wI@De*AnKED_WmL4Q*!bE*NQG9+qfmrj3_;)8hnPF>
zrq-byQ_~-}h^bGzfT)>mt()VJ7K<8bXro29=Y=yVYSKU$t^Iw{C$-<2a`H%<Z>jjp
z|LK3u|K%_LSIE4GIx!;EX}(=P;nF)V`Yj?RMOubP>e-Y>*KaaCK0>yQM*3dw7n|_}
zV$$#RnH(Qc%}TOfpTXLCWHikI=9b1b7u7D72krA{bvH@>rB!lFea`{EK(FKT_k<{J
z$KyEK*=F<XDT2?q^!Ri9nLqTG_$z<szu?Z!4x6WY{F~PgICnbmZ~d`L46{D>u6zF9
zzj%#O1vXDRPCsVxwWF-2y!g9&T>ElG-5B2Yk*65Dnn~!8uWK%TA3S(?Ol3S(Gv(-!
z<jlnZSFX?4-FEm=arx;UpZ%I+a$MqrV`-T2?Bj~8&`f6)N86IYAR{N9XHr>KH<xf0
z>PIEz)T6qRPF~_g2Fn@Ytmo$^*Z5<9>ihYg{1RWjy2sNmXfA#H89x4FKg`iC2stD3
zduVfqCtiG(Km6x^gvn7xSvf8~aha|2r|5Qays24VJ;R4T^c-J!<ug3Ib&p^qScU9J
z90uzQbz?BK#MUu1-rJUj7Sa+W)`?cHCnX3aBVwXtyOzban$@btxai7at;40<L#0$|
z+<lbf2<Gm<K}1J#Pe4eA(vna+Ox>`uHY9I)Jh`^U_HUHje|H?wqdE0Yq<&zLC#eu9
zuW{pmZ!EojhfXJ_b83^7)2r;<dBphM0|){az!Ie;LdAR6w1}j%1ZlqICrt1GmFbuX
zD`gzYj3v1CT!?3%v5sJUJWJDJ-Y27)`FYebW+1U|;<D4Zs7u9$Io;QX*8C?83*HgD
zqplofRbqE{IeGGhc32Y&S-#c^DFxO>^yy%K#Qxq9O2t^H{-DQTIApjqVAx+mqR>gB
zFGQeQ$hq6Az(=W(@MtYTDU7w$vxcUM&fUf}SRZh~L@CO2#_?pz(RfB(HPp4mB_9lz
zBs(rKKD>2{)4Q#Mt?(up*0qRo>!dCc3r`6-e~;%zcJtwtG`z|<sD?$QXiqQ+-~u|A
zbUGb6c^+#=@X<9_rFCvd^-oB0spx9W*)v<w(bsuwW8<*q0;Z|iID3X*1LzFZ?ND1w
z)i{jt)OE@6WEzdITr^B0crvLYdjXOhgd{LYF?eD@^^FwO$e2=6A*D=bu}G;1o+K!V
zb(xT|a2FD!SSH1Sv6Idd@BLhVE5sa*o&SEH@>cUA$JEu_y}h+vY0=w7scyBOi)BiC
z_4;%lv`;CVSTHLkF{VkH-^jjn!66(R9gT55u(7qlz5Clyk1wKDd+sbA1l4rL?Yj?I
zUS8tm7oQ>T_HYL!v*Q_M-QZhKmgF4lT)OvMV#N6v{X^T+e}_H3D+ko}kZUPyq(Xua
zkb78`s61^`>()_I5|l(0J<7d3^iqagUM8RsvPY;q!c3vqO<8hdq&dXzPH@Koy*`W*
z)yyCm5vz7;oJ22i@LQ^7q8@vPZ3H^&gU-Pm;inBkgB&_SPos-fC^iZ15Pd;V97xO!
ziEW7b)t1l?%)7=GMNPFy&WBY?=osXh0wM7ujk_bGTX7a54O6CVl?romene;_!3%<P
z34zP-Buy<ACpG9E-bk!pp=bm_1wxScdKS&5YJ;}{Q`&f-T7h#O(+I2;G_}Px3SWDy
z7$N;lLU)x$Wf&==y*tr?R3VW4A!=!rVr`v5ON8n$9zMp=jjO2I@a$HB+Mm+d=;9B?
zj1L<Ik6&Q=_H7223#y65U3r7yQx_OrzeaaU;_mIR5~e(T1V^_Ya=iH*vrdmD*^P_P
zL~Q`k#SkBG{^T$rP`<&rG{`43&-wr=p;#`u8CnFjkcd#x(mFjN<<@zEOYgZ4k-94b
z3XRSLnd;NgJ^I}qeI3YrgRo<;4H8(&yQ~%iloTlGG2YFQyD;Bq%0%;$5#f)O7iR;R
z2z0tx%Ct3!9&aL*rdAO}ZnyD17L7T=+m?49Am!MGHX@`QP9l7`$Vi*tyna5cMUCk~
zLjs8j5hvn=0?$Pn^PzS=aGl8a`&K`sN_wI>A{_6%|CO0UOni-lb<4<=A`P(6s=Qlc
z2`Lm(#xVD})2(PFsmY!Aw{7Vo$`bEsEhW2mxB1L3{u00E5C368a0?}rSQO*;JjW!&
znMZ6$MKzi5;N9z#qe=QS7Jk0bvOiI%MLc)1PC+>y(NrZu6`VTzI4&N*D8JCWk)F$M
zA36gIT@pTEO+yF{C0o6tSbU6gmPq`0!UckHaXA+P)nvkG|A5WM&f=Wq$qUc&SO3$$
z$v^q!|Ak-t<$umE{`?~@Jg51+Khvjv_>i}6Px#QMR(Nm>SFRiO4oe=pB-mIhn2t)G
z`uGylM)B6pZGQc=WA>+((p2bfpx;ppdVR(dLCE3VD<xG0D_hI-3eBV4384-+I#kXf
zc0IT6muwCt=TGO{>`qwKtI@&H8~PgsD;t(NOv%<Y+GONTaHij-oH{N%og-9WZ|wQZ
zzrM;Jcy7qY&tB&A4}Xf$-T|ZWDMWLXj;pBNd4<u<dxZX3p8BDWVEqbNzJgR8e5eU_
z9I0eYiO<K#;x1~vq&Udw_4=q>5TwUCLsi!_wZqm>PE9l|s$)atTWx%MEzwdD+h)F{
zG^84%Wn}9JksNx{J(Zkim4svj5%sA)u2zL4?Q`N8;jO_~%kt_5S;+YM=k^&NM!IH;
z*vxg^^RpqMKLY4x3R77QAC{QO9)m0c1+ssiUcX2G>?u~3&oSLTqC6hqb5J@`gN2W}
zH%Uvs{njtRn#NEl6)Chb>Ch7m&{{V??|h;GVi!9*sp0@T*G2|EZ@$~cH_Umy>2^$w
zd;G`fF6zC9M3D_LP}P>*-TMfsu_4T#mq>?{@XYHzgk*FT-M@YEQfL}aeK28ve}o{A
zYfauOD6$x16wQXxB0)riF_$`0!L`P^KvmaFCNrjGMcugQ_-tKrs16YU6ftXCrZZCK
zDn{Wnc0Nc4AtD$M(u%QV+@xhcY3<vdv+c4psw<+yZ0kNQP7J4uMXZ?DB<JIxqY`s2
zc#yfK-%E5}l|Y7;?wgRx<gba8kn0#XqqOAg=`&<H=0BSzqQp&YF}7xTWjX4EiyWyW
z(@8_qSd2A{Co`H^6Nl5(!;tEUQYyLHE(`|aWwM~j#<J^@Q*fY@7x8^;%ONN0?g?|B
zcDWhC+`T^F=XE3iAtCl64fgZpu4q?ZDTYi$4ZjdFE*QK|gJJp%Phd2Qt2$oSt(QR>
z5ayq4%ybTO`v;MvfzG*PctxtEp0cc%OeU<Zuh8ihlx2+%0qX_A#p^u;2*xuVPq}ve
zHXADgE<SOIP;{}3qPw<>XsLBJ-rKF0NI0QCrCsuWJKQvWXCL3418TZDBYK}qJX+i9
zC6K3cWS-+^Q<$_|yV%J2S&7*_MlSadc?TghA_IJbnC=j=7%L;g9?W*J;|6=A5m|#C
zOh7w=n<ONuAP5uJisY$KM>nwG1=t4R1ERly3l?D>;HM5}ClsA7WIgC~@IqmQLiTey
zk9ouh*a2%2*)PPVUCe0=$rL>ObNqJ=p@h>O*;lx62m6-B9~ZHQR`I<eg_O_rYVDyM
zT1I6a4_Dy<jU#Sug9>=5L1?7t#5Bh77*Y60bH<?jEN;ANk2L|)#I2?^V2q+Fp>71V
zRnYA)3mG==qgDp=hJEPdgiK>IjR}gn5m*!0om$+ZN6d~I4#yKNtiR0B@oPNTAMxZy
zL9;VKt}0{**t-V|9^YX6?whQA-~)`V-(qy-3jMR|RQLCB9fzn!Y%Uu<DUbNt{YS|4
z_p!fxmMYl9gz*R&V{`Bzyg~#;CVMC!FuDM(5FWBnBSJ$HEK)^XGcH4k4p~=_<$YA}
z*c1~}J7`QpZ7Q17*m)5r+h7BYYbYy2@W-*Z5K%PUE$9?IihRgg21kwLVL1g`v)Wgj
z9xQ`P4M%%_$X0vdu|5u29*+|+n$?J-!1}3Waw+I!n#@NHT6AS5BI9_3mjvPQh=kxL
zr{T6xv_oqe8t3_&BI&9;2&vL@iU>+9HnI7U5c6H<8j~11A<_vhTk34u`ijNL{=Wj>
zzEzJR%M&#$%n`@-AtiG*gGf{^mC%iLsBGy)ErQr?<wXc_5h5d%t8GGMSPXD#jx>n2
z--aNOLX&3_r7}MEE5F3$r!Mf!Cw_3Qc{-VIy1<pg$x#mwRHF&^uV14cjS~Va=FXCB
z%m5+jnSFpvQk4@-)1V5?%GPOgrx$<b$wnnjq3x1nzDzqom5v#W7VAvX0loLRwywSH
zlNLUth@~{>B#riiJGWRnwV9gKz{+rgfA?qpI6wS-Kf%xc>M!u?zwwLw%e*A(L8S`Z
zVa;eyP)!8AQ=Rx33<L#^{n3QG_bYbx1CorQ*P*}Z87x7VNp8Mo=x!AFs^;JjPMztX
zD$k?4d-w))mZew(@G5XTkv!UyoZ72-{=<D9yk6sK&C+SfGap!CIy$16$w;@94z%Ln
z;S@7;EMIh#jUw+Wjvh9g{#~D9W&H)p{XIfwlTLo0>h71Ayn2=Lu!F90!s-FD)n6xj
z>BkTu!<!kC$u<f_(OHGsb2z%qgV`-U{s+%<{qy^9yGLyuUOF1*=~P|nQcwv;@D&iV
zYHczRiTd}JMk?kDVWmh&NQ|DzGDXqR6rCKU1kOpUP5BnBGq;mS>0@Kwn#mz>wl!Fb
z(OaEPA7^VG-P@t6>x4oiSp#kCQ@pm)is%H-&(kdmM$R)np0FhZrt)OY(@Yy?V#fBp
zBP3AB3<^O-!@5$eW<5Ghe9kg?TxfYR=lEOmaMoZNHFrU7&ro9>#+Z1`hXqA4dJV+a
z=tD@8bGlj5IqV53Td}?GS{Z~$!$Q1AV@_&pmRqW!^^Ur>+_`rz#;={Aa<*DZDFvWV
z1XQABdXXH3eWXga%ZfnEAQwszoTM@qAL25kMb|?hex|K=$l|a{jeWp4hp|z+*A7!H
zk_K_%mgz{t_SP}O<7$5bloIoPG01i?DCgy)9Xx3r@Y}erb|7t$>ywXk?9DTx6Do+0
zS<zZ*`hz~bZZ{eerL(dfYUe|hkIzIaiB?Iw8Up=phqcwUILtd2Y3>26Gsr^H>2|Q?
z7~NT7Y8z&C6_-p+&2&7&Sre~@(E8p+#8M`DuOqzYy$-ETQ^b1UUCW6HD5X=~O)Kiw
zML$^px9ncz0{!$Y<EcH$Pb{0`SuPinkP}jnd5uNlrB0?q4c<Cb>K*2vDJPaM^Bw0R
zdwV|bMX3Aa$F}&~rHpRsN~Iw<n%o&A`+Iv_e(EWfhJ9w!5~n3H#LQu-(mjE|G?s(i
zBi?@JCjG$@Yb#6WeiuI}aZS=k`}qIh;<XZ7nAb8+c#nLiJ-#ak)YNB1G(ZG|2B9N@
z$wq-Z(*vgwy)x!?3K4VJLcrWR#O=(m6JQuat3_WUh7!*>Ic$1@*}>V8(AVUhKC-U~
z7_bhSGG?Rc7yyi@Av96nKo$?dv@Jn&2qDJ>P`1L(EY3jQ)S$ZQhMOSWE<!fQA#^Wl
zeB*FO#BFj4c}AWvg$W`W@+3B_@E#!>@-qhN0((7RZ)gH}+!||%Q1RzT6^Da`6B<b`
zs3USFRg@M=O>hPwH9<zp_#hQtDSQLo_nF<BVx6Ts%qY66bkAt?mcx`Kb=ff68!<bw
z92w1QX#=r#9(n2%VYp6E1=e~d&QaF}ckD3D3~M~5wlRb}(Nu7;s9I%t)T!7y^<f^q
z{%d5Df=jCzcK?uKbrp9!hRoAl>oa=uO_rX0n(5V>l-Ktt)-==mC2FIK9(dS3;(fyo
z;o)m!-m|}ShOyE(Bp4gr`8+sp30NFu+&oK3^Z`_OIv_I&P2AKfLF54o95sdRu)elR
zr>~JoQ&mo;93LNZIG#|=YRbAHIFE}T6p{C1<NNL{!8xkwl<9OI1Uy8Lt07yP>uj99
z$f%mJd$`Nlu4W)Ibjnr}L}Q5K+EH4;<Y+{p0_&$wv%Eg!&W)SA^Ytq{{rFRySDOrS
zff7DOm88Z-Chbhy476BgNM;7n6*N+Pl!)MGtEFs>a^^#28eC*c>md!(sFb0)prA#a
zhl<yyXpM8)iW-}b#c=$se>`?}J^kLO&5@yX!}L-A8yR5pO>s2OV4kzH(0+!w*O5#p
zj8v^ACcT^9&ofZxG_pCOoE*XlWK!XfNN-tPUgwoh{}V1e^DO<X^R)Vz*p#&$LrZCj
z#n?NCscRlwdzX4VMv@`|(&oR7zzhi$3*I9U*s8|5n!M;BvkpsZ8|kBMsd6M_;=}>n
zqFM9)dEPd}&AxSHX){01mQ9=Qh{lS<2naZ9@zy4)Z$y*I@evR2-R0clmuYNC-5Bzs
z%em9%_;Ww^=lIc&{V4z7pZ;xb-TWH)S%qn)%q*;K78HGpE)>hXK8Iz_(WqvMrn@C6
zia_4AR6B6vt-!1fY@RR3h7J42P-GH2g^90lwM3ziTC>z&V|TO%eTD4BP5t3ap^j?K
zypUtQXgPav6<aqPKddkzqqnh&MiXX%@$Q&zzroBT<+f$8mXUP_tY7#D({VwuvIXHL
z<KO(BQFkgj*(Iv!0XEAStOT&T`0+KW{1k?aMpO*DXOJj%j&E>l|5X~j%~BuU|B)5)
z3zu2jI)_)DYqzd&`|4H9)Ukc9rjz2Wj2B1)P6(WfjV&$fq1EKcSbMWv(d!qit}U^C
zYL#w3PX;iC$)sd9t8tBqi@G#m2^kqAJ|qtZ7cj}~ayDyXQ)nU$HL^5(T;!yIRth<H
z@oW*Pkc^Ntj;_i$tj+(Ay+3)jEXmG$v7a}4@6+8j`pB*-Gb^h{q0l#M9x2kEC`zR4
zH8jzri3XBsAhS1-<}%InUueLABpO9~6ds8Ok^o5%1c^dZs6zEw*=I&~<F@CV-MRbH
zV7c#e0(!$rgM`v4D~pVX8~2{Gw{Q2gzV$V%s=$}N_&R=8)At?g^(9x=8&a@z?a1gv
zqi)8InXOs6fmw{YPi5~1Yku<kOb8Jeu-@&AYf@qy)gVe6H%QsRS7P3WlgTEH`PrFV
zOl6WAO%pM81guFv4|#f;<7>jm7$R153ir`RpG<l`*=wp4Z!&puE!p8#)HyZF0+do|
zGTRFv$8R}{^~#2*k|5Sjjkii00BcnaWr~?w!WwPLZ61jxQKnL9c8sxj?`UQ-Dqj)3
zn#V;EHJ2)lma~*&CrwQ^l>eAg3iIz@n3hhrKII8^&XO(dP|R$)7-^b@qvHdXiv^YQ
zdGc&Df3pnur}{P1ymOq_7*yKgqA<UGkHvDSUt}#I!I*)E<6F0IR-p1wIj*jC|BErw
zZ(G_;H&IHp5mEypmnXQf#NHJk(CoR#XY+&<M^X$_b**4U$ZIb*rzw@x0GaMZk(lJE
z$_tFAvOZI$*;<#+Mk!O}vOafJ2G-!5C&rN!0@3>&rJWZUv6D$M3VJElQO=9q*K0$V
z%|?wnXvYvp$!P>mri|HSI6pt<EoV77J>v1Br_<+-#$uyn;8>94==;P+A3x>r=v}`2
zx!0L37x>wlu^rSHDS-r?)}~Y%b(x;ADI@kT+3TmbK`jNDNyy4zmzK0m*xNPfq{eJP
zDo65)l9;4LUwXWy=>QW+Pdji4a|G!i!D@jxi^y}t4X`-GFK1XgCt2NuNZFtOs}oCX
zZbU1Lt8|k!6^4N1s2kZn1HVRGPf7+O3ry_PJr|JS0>AkLNi7_#Fv&5$45T&?d!62i
z*%fb)0U<$!>&74^Von0}v1NR(C;n)G85?!p%{dar+Q~MZa=2ygg-CYdbOyZ7`oZGJ
z8BwK%B^ifUuZ8Uxaie4CW<1|S`o1F?gPAvQ^bnT!89vjnaSO)ei2=4lVAJ&s>n(k_
zVGN24O4v0aiZ&%As&Ph2iAd5-a~yddT7KuFC;W}upW|Ck-{aUgj+YDiryH8%1>HyI
zoV<BTf8Ei4{FwQ}1sC6WhH-~9Cl%*EYFWL0$m*rY@ZpA64<p^PZ&D9y{J}$>c|$Kz
zqedi7hi)Peh{PC)l5+B>Rjq9@2tjF4xSWRumsgj3^5~Je`#Fnsj;gMy8^dh1V0Car
z+YMabv~=r^6k@*Kf-#=x*fj&OQ7wdk^ns5aeaL$szsJ%oIXZaAqw`06<-t9i7;Y{v
zxekf-_KK6$iU&u>y!p9@yz!;CSZ_P}cFpqcSK-gz<v)DyJG^!0E`RkaUqi;M^~iB3
zlNmv#@02A&$}vvPGUebpvuCHDL6c0&QHrc1XB(AOJZT;^3se8pW$>gxud-JlFy+Y>
z_v1n@{K>xV-adFC9>r4hQrS*_@35Xu`l+xgkuez~&Q-}i)mL&wV9MpqkvdAfN;+A}
zv^1qj>b0LWo?FYBFT8O=y{h@vzx_x2>VN%z;Dhh{0k>}5W_eGucunTq%Dyvh5}v&C
z9{pyMi@;o5*b<>o8s<E;6d{O^#(`>4sa~#`QPs1|y2xK|3cc9wO}4W>ckl9Ga`s!&
zCucER!hXqN_KJ>t?~Op;wG)+DF#!FqNAJAD;$X$m@e$p+BZfdVn-gN<i?4r`|I`2O
z|HAM8(Qorle)}Ks=;B@OzV11=6{uE;sxc59XXhgyzSDElL$$J84w10YN#4=@Id@)x
z?OD&W9|KZh+J?*P9{i{Up9AUi^SicUeY3$m(9A{gjz>=h?)ob}_csn%`M~DNa`B|5
zKAn?pJoB_-dwEIa3@5jbdHXvT%om=c2XlsNOMf%qeNF72v3dSY`ggCW&m6I-xInn~
zYoFt$f6lEhy~f#(zl*uuVvcT6nK{F7O*fu#6+Yn6_z9zH`2PFvao`{F*MI3vs{R&&
z;ogG>{MxVn5;vPOe*Ztd&G-KO`>d}cTN&~F$T;*wtJ73T<p9omf>lR(XC2L~;pp_3
zlRHN&7Bv=O9HSPw_J($2Fd;GwBO-<{1iH4T9|G-7N7oH>+n%8hnf9CMcS)HX)_}EM
z*G9@`6<Uav=D`>T5}7-LBe01r#}(XcTf+K^dgidT!&ipw%@&&+w(U4D!fDlT+j{B{
zu&Dq``eb}M4Qjv`!`P4d{3ju0Fh-JSB*mzc!{WF+;d<8YjM(z?Hce7XL7wT28YR6e
z{S8K^IZ2JjVpN7gLg?C-%d6`hy`!)p%J+gyE7quvLJ2jH-GxiDl5)MG4XSlnMnuGW
zrTNA@g44`q^^+_4?vfp05ra>HiNZ+!{5dx{CGgfVYi2Cx4c0owG2w?wGq}Anh9-l+
zY+^ZGw32)O0vTujqLx3+-y=uJm@+oZ6a5qe-aC$tk2yLxz*ogsq5NXBE+RMY8Q@J(
zji<&CkU_8IC<!wJj*gCKyu$=FP#n|9IA{ZP`}SQ;1X#@IQ{<){8T&*Ck@ZGhV<i+K
zRbIc=;;c?W!_Mn=4?x^q0d?=yX1VgS3FO4Y7h>)v|B3RfWX`QAjT=no9QnPd9#*H`
z#T~y4NQw#AWP51A*PgKH@-vY#eW|R+-MXGW!StEgl?55ZDbBrRVGELVs5?EV*2Rt}
ziS?$V>v~R)kMS;3*sYmN30(%IHD6X}+n)E{{fOiFoR?p^PqkRk-E5IgJwZhFYurNh
zW!I#BN^G#d$k$J2gNnt7qWfY4@y$b=Z{UMV%;B6kGuTbgOt(C(D8&GJK!v}cjTWRL
z4wkSEuo5sM)>TB44MfrazD8zq#4HIks>hTT6E{i?9>z?KsC2Sw!J0WxsfN|(jcr(y
zq{AJwSa*Y=$5a8aHMkYnIZj$6=@dz314M&86`E_IKMP3oBx@!!m7;GenOO$Fk5ms6
zc+c>`fEgN+H{jG3d(uth>-XZWJn`xBW^PGV9qy8;CfF}wNzM|z#UYF%w8^j$&!wAl
z@N-|{)+;Y@dEIdSY+$n)xw+ZW4_o>e8AD=>5JYXa<7C!==6#AF5|nl%f&@d7m=V83
zO#0t$@C?fp-@dxytB1GvgR{r{lJy+V4cqH${JduU{&SX}xx>xdPdTWU)VCWhKiqJ7
zf5z&bXZ`LKho7|^oLH((HAmk+`ysbXM>u$e3vWqPjk8t04mz1iRyQewS8cCJIB_JC
zG{(o(dT<X8Y*5{Y6+#^7+L2A$adUIcv!_>VN4>_*dnz--34qHEzNHznsOeUvR2vCO
zGcg{U0^T^fFw#B$kjhkiV|d2t;UR8>quHEa`MEDLlgPc#-e+Fdq}hzK?G^Rm0q=hM
zN35?~{`%MdDk~c^eGU?FS_?TJWjdH8kJzL{RwO+*baHC)Ix!OtlJV-SOUbEdQ$v?+
z?#&Km)##W~WMyaL$z9Ou1d|=|sO9NTerKP}o9wD7>|z_@^nR5}x6{s+_dFdy+HLAL
zE6A)Bij2k-&`8SkBU6}DZcy_SLeL^C*J>QC4tV4Biu*4g<7Qwg$Mrkk<or+nd&2RG
z&;G6dJ=V`bl&Vx3Ux_1UAAC&Twz_ZTX^PEFb!kXNf1|7cQ6$|}R$WYurK+=*s#K-Z
zWOR?0OPOx(`zy=Tq>g>hIF8iKEZ^I_hn2#Ez3GGzHaF|3@SqSGqhM0t2mk3C+<o~K
zZr{7j@BP+qlL-9cum2j0!&7{{<gfkO-{lv7?pOKu-}u-3%isUUL_Xs5-T_yeo}1RN
z9eaitu%TjQEvx&U>bL<v&|Z%`|6yRbn8Dy#)eG8gK&<2Fz9+;;Yy;yq(Zw6?EpM^;
zV2dSTP8`7*5;M-P*37GpH{ZO&{BF(1&sw^RhMVh4#`z;&ebdmaI;>f7a1?2d9d-jp
z2M2ubkA4$xJB}V4vAXpVi(mQsy!X%k9kr{dzy39rKl5``u^}}z>-a5r{t-j_F1PM|
znbh|97<qL02b{~hTwGjW8Ckuy;=4co=iKpsh128LKq5nkM3&fYpfL^1YkV3R>Xvb^
zbX|{4*&Z&k=kBXOg~Ukc21tq8RjQd5<(bsJVmVu?v-`MZ=teFt)?8lPux$s1t;T1>
zBswk0ZkH(qZ8%NNUCaetnU|{QJ+mn?9nn~e8MfS7u1E$pvFDZM5c|OsW?|IAoy5{R
zj+|$XWo`_$wfJQ4QKO)8@1^U_d^#Xl7>4}3m!>66uF5GT1q7nH7%L4oOlP)<rLLJQ
ze5OfidEB7vHwC0(+E0}>&qUvr?5S|Q-q3eJyNv8|TP9pZ&Ol*Y!Hx!t$$zwKgi~go
z*y65aGULV@jCHul;7#|cC_4ZuC7rG%$<Mma)X_cjJP~zbt(=<wAP!5Sww4h`F#5By
zAMuU_lUXDC*X)ITYPXB!#>^`4nx}?J#f;Nb@#tv9@zD`=Q|q*stX<98R-N*bn?q5<
ztCXcHPNftgh7clSic|=vr?=766(NM47)H$wb%~?H14vs;wdCsJhHeOqA<%AH)|)}8
z&=RIS-Da~V>l~3hK{X|#G_M)612l@WtiFG@*Hr1(rwkyZRHmZ(e3+ciJ7GEds*`*k
zmiP4HOD2|7p%Le&8Jj&WLKyI=nJE1GZK>qvfB*YV6G<r)ZoBjD%Z5zWTCJ<Iv1H0X
zNHWT3>W9el^K<T;oX|8i+iuVrCr{BO6|hEGJ(3JJS1liW{FLRa=J;TVpDpM&odQ5<
zAN(!T+#-94e;GZJemWb}eS@Urz#KHBb-+{=vTzU+rW;9v{JCjG-X*+O!;)68b;PuZ
zblGG3h!8<Km^o-h#FO1o-Ncv2dQx5OLY@s66I72Hw}`1A3`iU>alrY2$e>^6EOucq
zt6PX!V$vL`ZX^B?MtZC>$PCOKLp?O4O(bk`fo#o0%TpvXX#*Mqa?nxT1;#|$R#@MV
zYSsD}Z&d?m_Rzs%1i%C^<>pin>(zGL0j`D^^h3I=^@)}=CNA3!_r;gE`}r^NnKxhK
z$G`Uje*bsgVTcXZG>igc3Pec^Ni|1N49Ns_k;E$cl-)U%?iER7m${@ml!z89LL_35
z%3*`&<7!4DjyI|UzWMBiuPx!$NkzX2gwep0HPz{Y^Y34C`|}6H<Bq5AZ#ld*V}5G5
z_+a4l;ez(v8%~#rFDBtT=kIeQhT-G^H)%$QQHSM5NJMgvg0Ul*9x*mYE`Y%yVNjcR
zZ}CnG`uTFf!O1P^sv<=Qn~}C3Sl=Yt?S`B7n$4zTv)yvDxnUSPQYuCRmTbMNv^Xaq
zPU+T0way-ct1FDD>B1#8TK@X4e3^MI{LweQ!^dYA{Gb1;|0CVi4e!7A3>I_ZSo7u^
zZ}8^bm+3d>wCgR23X)DRlg%a;p=5G0g<tETmXKs2x*g?rkM_B@s3=;N7h+HLMrD(W
zA%*HPL9Ub2<D@?=6V+Yj(x+lBu<f>!gK0U4OA%QdeaUWog_>HPD~jeT|C!Q%^|V&m
zm}IBlV&<UvkSaM&lkUk^mBxm|fW>lhGUt_77aZE>ERODLrg0b;e)zk*aav&)fv11?
zPdWM8e@;C=$xwmbtA4#<XxEq&DH<`GB?CpXma^tL#xVnOhLlET^-4dQb6DT}`OSEu
z4Nfq@-bq_nbj1J~qj{#GAE<mY)w+^oqQC7X<bp{;dvjCxaQeP8>P(C{;OR&2B4N$%
z{fmFj_uhGrfALR$gV(?Cd4Bb;{5r3_^(L!>L;l7u|6P9NOTWaQeD`<x){lROZTOI6
z;pN*W-22;xC(p0><k^~S>#$GZ=7})gc*M`}p<-McdN<%JSTr?<^BL#QZirVAV;zZ+
zdq+#&?mXfvM(+u$Q9-L<sONLud4J3Ie*Xi0>F*tp<OXC*RR`8rJ=d4erGdNmDsJ7G
z(LS1S_x{VAy!kn1&5Xm<3WmtV{60s2=WjBfA7W-J&ekp0SC8?}J<I!_Cz=uGTAn>V
zW9hGH;|1@Yz01ejC%8r8_UVkP>kW^;{VuPZd>*kEjB&(D!q+o?{DZ`^v&hl$9Epi$
zF{7G!{Cpscn!l)KANh7zH3Y0?p2cF05m;YuuxkZ9Vjt-Hk*muauCLZ?ZnivscFFa+
zBM1y*cKID6F{CUfDBy(K+at=OwVSx7ln|HqNwMm_6nhSuikWu=i6j|$qc+^Z>#pK*
zJ7}|~Lmu&4QEK3B9T(<?0wSuB{K>w?VI*coNt*O%nhzYtsGw}7_9j7}Q&YgYw9`sX
zT;84YwJlWKwCfloPyQq~3n_aRNCtmFI6uG0QYGC}lbN<Oc3Se>>=o@M>|$0XR)sQg
z$#M@P+U!=&;hfd)9#TH*RZ>!=++<kI)^<sA;D;K5bJ!5HMlePEsCs2%4Iw3ssgZ;!
zlBOtl)Hz;?8?P8JDMy9tdry)kFiiGXC#A;JDVOg1gi<34i^Yu7qa&K8$^HXc1AyIg
zx6Pn!lu4pM)bH(_*Zs^px*@V{d#tq_u4Wt`ALhL}GK9bo6GGtV@Q|kV#2$uj;C!<s
zM4=xCZq{33uacmhzERH~o6V6zuCFBDBeO>>Es`0Dfh2*Zu61+H5qNewr%M~Jg9DpA
zDHO=a+R}2Kn<ClCO<c}moqzx-Q~2E(zJqMkQB9+|RUS}f7jX98MVU>*i@)sVsypVu
zPNS+dU74z;7?JGHA_CD_OcKtXJ?E8&FR?mUvT1vgWQ)gGT>Di!o+J^2VI**Vam}MA
z&+yjKEEd$;mVQ`6K6}$t=j<-Q|DWRfm*}-~dDcTrzH<)2!NCj}MvS+}%?1Vyofip!
zW^LgtQYj~2MuBZ^{7k|Gi-`$Fz>dWF0yE!W7Bh^mFk-O@lGBVf@eWMPG?qF$U0VzR
zGp<Q(hl>^lOKe+gy?~jeIzGhA?t`3=!c4Q-W($yOk~5g4N5&D;8{9L9ebA;Mqor<F
z3E~WxD!{!Fcjo|kZZN4rYKN(`(6#x&n;v7cw;AQ{Q=ZyX3TbOG$>z=6kz9p|YQ;U^
zY1cRSuiodg|K<OJm<)gP|NQ6t!SB4!;o<8HbP)1{SrjdFdGcj3xI85aCQMT_!R?Kq
z3?`XfcyXap0*GpujJGsl&i9SOE*@}n-SVxgp0Ar4?*{6*=lsz%r}r25mFMC|TMl2X
z=+1k(F5#o0sv_5CJ%=X^k3ZaUc;fl2-4ZX}RXXv>>s<MYF^t+=SuGIMJZQ&VjN&lV
zm|is3ty0a5bB1ah8Tyv>W`meW9|ECI>hPAb2FqGzhbv~Q15U;VgdlYNhOWP6vu)W7
zmxOWDEXCxoj9NfgOJ$mDXwfob&9D687kK6VeZKvTKV)+~a1{o&+b#d$H~x?ZZ@t3x
zCtF+?xq0*fZtQYEI8;vU_{lk(Mq<8O8OTUQvyvtxGUWp>C8ghqSiL{VPWlY?4i8iG
zkT6lJCSyLErd-j|;O?dY`TiSu0U0fg?WbWbP}j4pG0l#`W~W0d(-plOvZu|OI9*eL
z=+Nt%N&zGK{5oqHL!uia^TT5V;rx@wT%Mih%sJui{S{yR`CsOi4IIQDaQpsUYPZIi
zhcx{U7_QbCt)F2eV$A|~{t@ke{1EfTU&Gz{3}$soe|5#P_kXN3vXC86ts>tksOf|3
z6|5-*4C*{sFB=7|GF@9UEX|HaHL)^sVX=44?y<h60#`&B`hge*d{s>}uksIeO{#(e
z5(2|^3x%3Dd4MVHzi)ef{G+$|@&D}yTwb&o=Xvd|H~7}Kzs2wU&cEZ~{Re#c7k`1T
z|MD;J@YR?2m7n`B`1vpYZ9Y2t9^ZTW_xR}MZPxWA_m5{heCaM{H*21JxTftktV4j{
zsVW$)P&E!yTe`u}Z7lQ3QLkq7L!|8kw;!&ESY|BguLEfosFv``{W;Yp^8TM~5L<C`
zdClzpz_TYK&)+e`&5Bh$=i%K$-nx5-ryoA!cz(*ur+2Zz({@|pW((F5-603}ZsDtn
ztLsaS@4muK_?YG45#Xrnnk5ypv|$)V7R?<F&3nYFOIC*gq2fn>c*Q&aT}w0nEqv49
zYR9};;F5=N&MU8dfpNH|i3UjnVgh4=*=&Jn*V7}JvL@2mL^H3ceMP_Sc(&QHxw)ae
z+R&{#rE6lTDvt?~11el?dA`{ym336N%My>IkF#S}Vw%YuX!4uRHGnf#|ADg*0<{%R
zS1T-um=ZokPQ7Io!i1J6+r2P5a`e;W{+aEBiu9nA;-7+l$uJDKqy@M0d9ox*=S?B&
z=gNdL(*pH5DW*Pq|6TU!+)bujB$=E=JxL}qYe|79WzED8^RsI>J39m8D0riWRLXSS
zjM;Pa&8UI8*1DbRIJ?M?91Ti|4HK*XtrX6zTP;-TsTqW1wfXT;V@{$ZQm9m;8%qwR
z?z7%nOwzB{#aZ;0cDMQ1Vt`~P$xPYvY+(fL10g2sEJ63nl$2&_gjrK@e00oWK4+J)
zYE;@Y2`b5-Eg~u8sj#}1`|Mzy5Nf9|V%u&BF*5UWmIo_V%cbT%4}sJUgg)XNoZdRd
zNgxPZTyN;2Foa0owzS(WPc}6=sG!E`-s?%ms_EIZ6n5uv(SnMkDL>VFKN%iLX{_=M
zlENY?+R_Xrip$Ka2^87oTc?uAn%D4)?3!KUTR@LO{k6{TfEu6I-y}6C|Em1s3E`an
zd|xw9nZTFaRjbig>R6m0T5!4v#TdD|x<U|6PL6o|<oS*<l_FwYrrnqC=ZG=V-fVdK
z)U#|F?wy{}tmcHiCv3w+#3<KAXJ*A$<}Y89`cv7UmUSsEQ%^1hXA)_y4;o!hyxu}`
zx-q2^fNj88?18~8YUl%TOql4PZb*KPi36q&(2rOXNWR5XTTE>bH-p&<Hy5O_m}W6K
z>59y9YD<Ha7U|Zww#N*WQaYT&H4S!d5Z@piBC-VQG1h}WBDr&@J>ut(F0nU(>ev#l
z^ue~K($A95RBTWrvmUUg25A~(R9Xh!s(F;P6I!g@Y?-2FPJY@UN+kjU3^B`W)tSv@
zeMmsC#Ad-KPv7RVzxZoNHS*|3pYZ)ZeT2x2?aj4D{Ft5fJ%zHF>4QbfD=tA)L{w8!
zw5XUw<q|WM5<oM#OiGxfl$a#KLOef|Id9(i3iD^*;}33HerDOQhyj_^Jo$LbgZm4v
zq$R!^Ik~&w!}m6v9M2iY$YzY(KJ}d3Y54f*!2MeuQqOlTKH&B!nA@-N*lJvk5D6(G
z{F6i6h$N>Iau?MV+FHbVD(|rlyqi%siK=d>W;3cPGUDjsKoAHqsGc>Ax`t}YeBQ7y
z4>(0jwe6<ma(%|-^(8~Q!CNrV5ggdD<L>DJufF;^@BHvRzWK*L;`m^}m)?AXhj$+E
z@XqJBX|H+m@4rI|14A6C3&I(ds%osF=z_^qs1oClwS?uc&l{?qTyC;XSL87ln35FP
z&XdF)O>W=(Cx=r+bsJWT&=TmKt?o@m{IS?<+rK087D|vHpTfre#V@@@aY39s2B+ym
zDHyYB@?P<(1~6JImCv6fO;HgNvmV94tDn8k7k}=TdGN-YI1ATLpYbpM*}vk0x4*|%
zzjU83eesmTMMr(}9zI+kJjTU=SRG;Gz%reyTlw;3s4S*^!hGHnyEDc=`!;g=8Cw4e
z+i*fy8~SK*ZjNzYX+<#*gVM8-B#nM4S{b8eP~Lkj=CYpDWF3><f!%gI0T1OjvitM$
zH5!vEbnS*DQ6~t6zFmsw{555IHS|4W*XJp={`Hsw{m}8@hwt;_@BD!6%@!kuWnHrz
zYPQ{)mtK3wSv}*~*%iO>Pyb*1lYjhAIlXh2FMj^ZeD!Nz<;~B3j$i-Tui?zVv+ED|
z{wLq&gAacM>5N-PJz*YbUm3aC1iEhI`gqIL#X#33VjSr%4V&vNm79@B9Nk(G=PgHf
zJVOg&0%ow>u2)p8@b(WP*3>-sy60@W=IWc4K2)sg18&KZ<EG)k{D^n{Y|XMcrm5z5
znPbE<w4=s?S<7N|$Y#4Ht_S)g9Lb8+@*yEcn#B_9YMfXuE-y)a&z<|P^Xp&whdh3^
z<?55~60R(t{7GVcRnv9X7^#@8=Jb{7LKps!%ZsNtr-k+qqJkgR5c-ZZjv5Cd3iA3|
z{Q~=&HS4pEaodrGsH_gFdEhBejXGlpdGakGF?Wtl+h!_zZomp_A{9_-H&NGzFp{vE
z=O`ANj5bs$QkjuEt3#?xJV_B2$x;OaA!VwjOih&(nUL%?C+HN-<V@!>S=%qCe^Rf$
z?{U!+W5QKV*<O-0iD~DqE9^OZmL@ARAjp$JV~xg~m?9e}dCI$HR+}w-I8#bHoom`<
zWnNw0WMev$`;0WPw<f@`ICUF?%MGg<O3BX2T+Gxgn53+;NhX_{gEB3}mMF3vFe#N9
zIfam@^FABKpk_{}-co6(q8YE~R5zuFi$X0^@-)VKjIo({r_|9TI`xY=`Uscv!?SoX
zrjkRe6l+O|x~@1_tynG>YL1!DF^tXfFu~cfbj{S@rlbRgx~_86k~XT9SL4J@+cJzJ
z*49|-I9M#H>xwZ1#xc<idL35Fij(6*;@H#nf$L7s#BtDE*bv4n&4{_FR@Z!IwGIy1
zJgSh73b3lBvEJL_`Qf}P)E1ynn|FhZ9r8$~?&>)%sel(dPlIU^oeJ%O-Pyf2g)bmV
zIZN_^iZz}PTLsXoJYg?jWjSuri|c!z20p!q478U<w$!`Mx$K&)=bpwbYNqpc+tGC$
zC#T0aZ*%09{%{ik>oRDgQ*%R7lecxx`Q<eSvxcM9isk7sF(rmg`vUN8cIfS=u*3Yt
zzkVtkRHZrQ1Iv`ji$$8Osap5Qb;6h$rUNE|Sj2-IS~#(o)?ls;rUuhEq)|Gl-DZkG
z@|ZSZwt`d^>l1VXB6FxzpD9={bEPB24PpkQ@35Od>Kqc1#*ah`zQWGIxMO15lhSj1
zJtw*Yj6Xo5Vk*QeAzZ@H!Q5kSS%M#ME8yb<8;A7@O?nfEuE)<B#5Ty7Fy29}6JBG<
zdN(r>Cro*O^K@q?j3c9*29ht_15yD5jDEkdU$Z?M`N2Q>U2fKibb8FeaD(X@MjH{k
zz<N71@6m!ykz@)YsfDahHY0EO`-^U_Nd~>og@RQ!n{0$p90qX~6D&T#cejR%`m=nU
zNBq&nb6!7eczJHw){ze%t+{uv=JIl&f41fDbit!%TkhPgk+|jJS;z6c8K-xJi<`vB
zk?@vmdAIw3;n@cB;LBXC?lHzev&Zt3QMEO_I#Z60n8UV9REtPr97cK>*=|K$nk$d<
z7H8&I=P-C`tMyYfD1AxL>{XMnxp}B&jyv-QJiPx|hQ8;ckKSdyeuj<b+`aP(Aq;%)
z&%TAPjz|I_ZMfG6>4OjX=!1_C859FGnKF~<mL>x?l0-oR0l{UvdGf)p6He8r>=lg3
zXyaNmn)z8Pq2t*aX?NJ9GJUm^JEATT!?Zz9_d1iwCTcBv2xBp3$ZuvZYw;)bS{==&
z$?arj5c5GTI`Jtvf+;HEno@R;OokZItlUyug@`v`tfQJWy!yG%^2J~LdFpw?&DCSV
z)fGb*_}Q<#%+I|s^6+GfT>L)vCgMogy1}_EbUlPKqMVYphFR6a_z|(M^f7L)p$9LP
zbhc(DAHp~z$}9BOmb0$L9-J^gI?h>#dhOykPWnHW(f(#N%kH9r6{j>l=U%uE1^h0+
z!@h(-<i$cEB_W1^p>L_HN)Pq4o1AFjyFc+UwCjx)i0lCJr%#^ngYWzqSLc^V)M+##
zIEOkN#}Vh?;B-ka3BRq_u6r)eF8EjP{Wib(8~=*Me8$5EukzNLZ}Qp~U*U~6ex9%W
z{NJIf9T(3Z@#A-Xz?0`6aM4~6?TFk>3@4G80yxL@^%d*tnwy(7T?{PaoVf8s+tH0Z
z(FG1zlC~8eeX;=`IeOXP8^ierj<JV>SA=Do_}XVb%UjF){OIG4dFB3XjIrE5y+hqB
zi7C(xH#7$;npp!T;b${uuBJ&r8&cmeYZ}JTV^SoDq1|*Gua1~6PCy3!XMgkW^MC!v
zf6Vzi6(7AbkYYpC86-x=IO6Q6;NQ}*xxAv9P=*ojgo;R|A^^f>Lufm?c1sv~B!o=y
zEQUFjF(w9UkV<7CMT-_iFfkF)=EfPzAbZZ1Fcp>iCO|sX%9?cyCTsZWx}hIh8ZX>l
z9?;ZoXHF)<+=$9mte#z3Bk!P-ly&XXz9P9vHRXL|dBdkLK^@0&0=lYHVVzY;OP+$2
z{ibN)&EB)2!=U^*CaPiq1WjhOWcS)VP`ZG|Q*=;Ny=#nt&3Z%E^=gC&Sh2gfkSX6<
z8&92F>b-E*s27H@%EoCL=8FY~N5>o;AF^64na>u~O@piSbCP3eMAR&59D0Uvpz8<C
z;2pJTjzNu_bYNzw>WcYn!E838YGzbTL*;9#x}tIwRjrMFlEj#V%d<0n`2Fwkqwl}Z
z`S}$?*X4L8hxHXHCWc|8?^|4^kB0)MMWviqmE&M}#B#Ax#!;RY?vp=?3+)(Erm`{t
zmzB48U+GyA1n&yKV%YR8+qP3c*C;{uV7Va1kx_)9k8C?-qaCi6G)+Uly<t75MmdDY
zwry$KUV;1=cMT&N0cE{MtX0_*`<@LuxR*Rlj3JO>V76Ff!>Y2c7g<@6>EyLoXvF#G
zFA|(GM4dQ#l7W^9Xex}GJgYB5m-2RmiGppcO2O1^H|=Gt=6jI#xPbdTbZKDAGlS7<
zAUm*A3>c>dWih7gEFMY5Vlj+k<l^#z!=nS5Sw*`UCN@D#1?<YET@oM}#$Xs@WPQD1
zy_&IF%$c7Y5)sA_V5?g4$!jI=dHW8<e_38Xl?`g4P)r(7Yt-0f0U|BwtjD%CN3#ef
zW(3g~s1xR(LXIkEGMekvbyZx_m=(~*Q`AY;Bp?}5FjzBUqebkgy4f<rutIQHC)jX<
z>06{7k?69n5}gn&EvE8d8q5$#+cVP93{z`^n_7dpzQLT<n9&oTwy>R(4rWvjXH=&%
z`l}mkr-+=mi19g4ylF^oi7<i-YD7>ZJl2%mW6}UBe|~2TE`KiQ%UDZtI{DSgA|YZ)
zMp$28(>`C*G&Mi^zyCJ#qeJeW+~dP|&d?_o)e+7dAa%uH3=s%15sVO0LKT&w(FJB?
zB$1-QNWfuK11kUt8*9WW^TOs2R8Y{Eoql~t%!BYmEdN0s@>lNP<-1RQ$n!z?^2ve?
z8$N!NI5;}s@}lE*4WahD`^lEizOv-<e9PuixOHUNjFy`%+&zLcY&l4l?|tx1>X&{7
zPH*9Z;Oz|K1lJhwa}+6<Q6ZS*NhV;uQd6p`p*A&R6vlC+-*#MYZ|M6iZ9ilVl*5?Z
z3^<)^JFMoIie?FFaQ491k5~h*J^U=g`j~Bh!RG3ms;V*WgmG*!M7G<=^^-Mo@i+@&
z1Ig;CX~JF(+q9tzAq3rQ%Q2P@BFPP`qJ-suQCgL{7^aN=%cS#`q?<j{<n%>kw8S8o
zePa)OsCUVnQYO`9>dFhQeM$Bjqo43=v0M^j$jwtEhM3(hlg1zAe8_Pea}hNv+UskY
z`HGXplB44#)x5zWTt0cq`tq8?JGXfKXTC_?EVw*-Ml+w`ydwsoy}qJupYd>YhV4IL
zupQ2?AfAJ<I0oYU6w`Lt_#6kM;TCqdB&7|ewwOhOv61n*BX$?WaE-GI=GzasBe$tJ
z<l~Q@vOT}U;`Uvt#he&ZHsOexla~#bz4KF1Q402*=52RF^<I|T+CPT{mtqYC$@;!y
z9D0t94%v-V3Qekfy-feTy}rp)bbSvo3E%mXZ_#(Hu8|BBN%kYCE04s0^M>Pt8Qpe+
zNgit~&Q=&nY_<dan0V*C_j&u>w+X*BVyxl#^n};nc%4^2_d1_@>ofeq=Re1CF{fYm
zoIQTVlSfZ^{`{Qv%@v346!);<^yGwY+hd7bUO(p-UYc=v)YJAG)>muHdPI85y;ql1
zYX_}mwbdNyyX_&bo!;lp@&T<e930$d?kl!k&)5n_#}62S;pq4-2dhJ3HV={{*m{O{
zFbv@R93mL;L_8@(UU}{FTwiVpV@KCLXKoMqD{uXM!ePVu?2nLVEAGDZ7S=*-JrW1P
zrbRjkT}!{YrrTW7ZmvjSL?q$G(j-;cl_|!QJ!*7cA+zt2DC;CeQQrbFdd}to)*6EZ
zp>~c<-%tAFGUHM(Vn=(EOdGYR{KJW(aus9WvYJ^==5xG;O=3|^sfaN6ia%<5=HhVC
zV#VSJI5Bw9ea4E-WTBMlswPWLW%7yhpMr~T-w#Sdb{-LfjXDJtgb-ua2bO1~gk}^R
zdFSdsF`o>%IU|34W|(Elye)fslFU-lCNM>H@_l}OuC&Gc{)!KR*rMGtlQE;qOq;5%
zSS**!XLF8@kGcQw0k3`Tvpjh1We#s2X9s7O&pG|SDU)C0m`t9Eo%vpoZeGwk!leCA
z1w057Xi@a(lFt?cNC=E=$1nWa&+`6zPx$!VPdNMJF<7YPj@fd_!Ri1;!`ayt-~7Wr
z;5*;>j-F{L;VR3)e93aP#5<o+iXHf&3t5{JiOLdEwv6U{I%BCTueDBycyDo5^RdSm
zx#_kHW3RD(fHOi>Sq=`C#HfZ|+qTaUT8@*`V^SR1v?J?i6a?x!Hk+FlsNjXxESY+2
zte)GEl+7M@z{~FJRdXcOuY$Ke@56-^1(97NIAO<R$L1nd&VQ%(bl1U8I?H`<GrQtW
zfOOI*#KJ)~M(NLFz#)tyKE^#N`QDP*FCk#>4B0iZ*z)k~mYK2(bWLicoEk}$7@097
z&Yquhdh3M6V$NpMBcjHa#kg<!{)lGPibiscLAbeYIXdoXjt)7vbC+Q|aJg+K!=!=(
z?j$pRxn4h&4XW5IP|glT27^>!7KXIlBF}q>m74ruc60*+_=IT$=767LgUuQY2gzZg
z)mRr1Bvp_`MR$@#B8a!7USNorF{sWnzprQjn=qB&4ipulvxxCX<G?o9g;iI&-eO~b
zuvLUtfQ`V_My>R#8MdAgJ`PX;Ck?T$F(E4Rf`n-jm;lCO+!A34RfnrOlCy}*B$w=D
zY)X?}4t`^D4q=(D?wHW}iv@yKt13XnSi{A$$8;&N*|ZGXguU4!AAQ1NwdCd7cj?=6
zuAY2EmlDZ&q*-998tZ5Hs-emVZLo@3IaKE`T1ODUNQFZ%#^Kz8=<;+ZBlKcr%Jnv4
zb%GTj#z3D2ertWj&%XQ>o;-b<A3YiQ)uRLE=8EgS<J?%DeA4s!$&7el_>&I?zVexd
z=g(WVR}IGpiHkwFyms7w;25qpe6H>J=+QTM)O{9v`U-L8FitZmO){8VoQd%yCx~_6
zb-HeC#o#^5Q%|$H#j0trV!1kB^YKSddHU##vD+|?7sSw^W*@Z{O*UY%F0+~mP2~vv
zNW0l^-HvLeWCpBp4C6?m)d+P_^AYbHjERsb)zio{scA%kEK}ahW2V~KoV#0!pHxN}
zMPFo<%3_j3hlg2rYMipS(O8BOZK56~MOUKDugx(Adn)MCljI4%QhS9o842&2(*jXs
z-PNaHk`!X5A2|=H!V)n~bLPfAWLjIraye(dTHs8@$(=hitAkAEuNnH0F^<$O^5ngb
zSRNd4eCsx2C$!fW%vKE+%eZba9%?6W^C7mrL}Wv~SmMTk)NgU!4Y2}FPB5z@!r3G2
z{L9>Y|A$y(sg8yEw8z{zAsu<7zQkN~(6_kSad7v9WnFU=BeSQMeD4R3xLCi+^8Rb!
zJs}O$b%i*6o|?K*y=j!}NLeyk?cOFKd-cZdP8O|N{^N;mh#0n;HDTzOEnb?;r(_!C
z@2U(f`VyP<H6^m5fGUz*m(&a<gNd0o=$&P3HF|5YYB*SF1FrhnikL=*QHWy!I4Z0O
z7y{SlH~h)jpBrb&!T<n(07*naRPcxY@ecu^ni-C79dqaQeeU1C$AgCtc=?StIKFdC
zT~!P>k+UaHX>Xo!eZ665XH;^J*B&mh9#)Gv#DuTGS32zoV@ETefjGt?XyF@XgbEIh
zZqazp$?XMB9M3-P=vt4lj#<6NSDrWoTwP<$Mw>x_&<<R$J2t~e9JctBI9MFAJYF#j
zEmzwOkKcVlHG7?T)9_#Z?Yq4DqYM7+?|h4^PtI64A@v<T=~1JNno_(G!HM9p)K;kS
zS(<QD!z9-1=z7kP`@}56NK@>DiAi(Ije=X0C%?G7X0G1<P@!frXlar<x6Yb5L$~E<
z)o@fdR8AdhjoF!PMI$_Qp5N#bUIZslMG&jHLnpeny_k&Qrkv$$GJP<cm08O{f?xXy
z?=j;z>J-K6G|XDNH;sw8(AW44n;Xz1DbrQWq{p|}sT+fWzf+c{vOZG5ekgHm8r>sV
zXBR_c)3)5)Y}6NF8f6K_?Fg7QpZ_O!Zt>aI-{AGP-r&{GyvFk2fTr;@iw0LYH5vkS
za<-o0AY`1^T7@Dt+R-~HAl6iT?5uKj`+}RA*<$L5rj<_syD<hFq@m^N@;Pn4B@K>~
zxx?O{GaKiuw*xmf8?G;|xQe=0SYPwyFMWj%A3b5axnee}sVbK<<IThpQi9WtCR<`t
z%DqUE@RfomA_`)9=c%1euLDBgjf~@f5sR;CLR4U|swyh)IXFC0fMV3>k`y9~)q?q=
zVY6-NB6J}zq{w!?rE5C{8!|PVJaxAwzkj2-?U54YQal7oQ(5Fbi4aD-ucp+8a<uBK
zXXnSTM-JY5<?lVsD|ffGWb;aa6$^taB}_Ixn!ty0jb#Up#+Wc+P$%oGjg{Ob@AtfV
z{_IUi_Zsj5EWE%1MrrVc=IfkJ*M%MH=j!U35QXJx$=S0@U2joT_9lsxOisVa@S0<C
zif%U@+ilB1j4TchS)H76@$@OFg#DDHfS>T*^8YQbpUMVxqN_^LQ5wVtOn~&b$95*u
zK=bow%SL7pqgLeDoYxt_uD1|Jl{v<!?zjn^8m2N$)x*(rU^WS<t}t%I97M$RsP}0E
zS0R-r9T|)_aFIx+!5(|4R!kb2mDe~3C;HxE2Sbl}-Z5?i)mwM54;Q2-E$L>&eYioM
zw3seizq>$vzd_F09Ho<O{hc8><<RdN8j!=yU(k9PH=%%~pfG!JA%%A_5iX4(8N>GG
zO3|gH(J>*YX5-@K8hLVqx0Yt!usm22B@l-j#?8nO5<@a1>o8S=^$n();jKeNbFrK;
zRE#9!h$a!uhD4>NNmLm_wfT^B8L>BDJkA;BwxK$H$lK$F*Iqj2)%Iil+siZFlo>De
ziCf1@&g{T{y6U;Nu*~l@eDm#&FTT?7bg*2XjXXRyJh8$DmzKMS3l?=Et=DvCKjysM
z5I*xJ)$ER<-o{S`5Gkpq$vF!_KZ_j$5)$iS;O1(><CG~lG12uchMu|>s>&0?lC%w^
zG$^HI=oJ+Wf$eo9B%SydXJj>!(R@~GJS0b8#CglCssL>|jmgFs;)*mM$(d%RM4%W$
z3TZ+?rz1wRNTZ;qM${P+rDi7!F{EsXU*ZPLbih!Ku{?pDq)D=y(B?`iA6j{JrnE<S
zk<#guqckVDl&<9`k3HVHa|_?h2?Ew6ypfEo2Z)Kj3v|Os=pzTG#~j_dqmHwK;Jh~K
zb>p#v!Aj4=H@`sT4fSe)t7rJS!di7FG$s)?Tf~mos-bW5grdKI@tXMbl73SWhQ#7C
zbKKFPy6GNR%>0D&cP=@ZH_Q(f$|hOPaHp@5-uoD58vMZlX?BD)Gkh}Kp3nID{FvYU
zcR%3ihaYk0b8oS_bBj2RNYIwAsh64BsHS8#MvYU>&Xc<<F8?3D?%g#6CPNdFuwAc7
zF*2Ji^F(%UGCS=}dbp3nK-aZ<1(C&8j?cdF7C-pbx9~>SyDm?QF^S4}#_fP_8iu~-
z*6{)Bb*s;u7?!Jwq0{FH5@O4Qz+jAiHjf9BNFzLdbi>&vSA6GBzemgsjq?UyDZO-A
zFF857#r;<v^6=G%oZPv~d{z@fhe=S^b0E?WI;D+qpzj8dMAKNpIIupCY`d*C3`0*-
z8O9+X>5Ld6m3O(}*Jdxo2*FTQ9*p3PCqxZCY^nw$aCm&cY&l~bT6GQ|BQ`;sMxudN
zUw?zoe<AXFAN)3b*D;fUxiM6Qnl}MLd9q^!mtdkVi2_+A`c|<ci%F_iH97CsNCsq4
zw|0wB5Z7c0K+LR-81x)885rH0qNpF7Q{7(E;D9l;Jh*#{g>%>xkQ7K|T&M(MOfvY!
z6Rgjy4CtHyPN$OsB6c!Av)R#8jJny|X%X*dH~-{bUDs1LnO&Jujsek#m8n@XyWM#I
zQB!@sO!UT*Pif4KZmX(@IPn}4COZOWIYNj?)_I<vUlGObG<&6Y$y={U20Xm>=4bfd
z{x|;(zG(=dBaS28W=pr}F&9;i!gI`*3s#4RFsm?CwWDulQ>vo%WgqaGdZicllJeJ*
zLf%{41XD6ig*96_jw4YbsSnH!PB^-IpJKoi`;PY64ObUy&OUrZdv#6QkJ!rc;I&sc
zymO!Ju;EX?^-WTUgg9gax$K@SrE$t`zeewsL^Tw&MrfK!Y0Kk?cb=-wXRZkSIMNRR
zk%Y4j=af;PfQp@(#mX6C2*hDz=sP@#lY<4s$hIHoW7gq!JvW;h!VpLy->8%(eXYrW
zg6_e8F8j2Y6y+(Y5erc#>dkDH^{2b@Ri>SJpB9{(I4POm1wt&*EBgyJ<!8F+k9W6z
zQi2p@sjO{^URCRbIF5udP*uYIA!C>b1o<t$I8v3puPeVVVgbobg0)totzrhcvXQ4G
z%XZzd-L$NhOT06rI0+5%`<Lv}oLQYN5mMy39~g(8TWzjwoYQG}(Z=q}hva{#;s5{r
z*H3MOYKnn^%?%iq^mq$F4<OvG=+S`XvZ+N7E#AdB%(8(vs(HyUXfSdho3_O1Fqo{d
zC15&_xlW{Ji)jFVlczqG<Z7e=u?{~Dqz25q!W<tUzD8)l2SBjZlGu-!I3k-%{b*7X
zKG_oQJcU~a*w<>@dldvrT94S#AOvLA;a@tUdm1p+k{6uGTy82{LAEodm5mQx1SWIt
zWNA#2kn&_zwJnMcC7ZP;&M<ZZ(Wte1LXB=jG(Rp3hMvf#Tf_A=#^@x^H=bs`VBwb}
z)8G#l$n1c2vsMSYK@E};;F6&-4bfSW@l>v1u#RN&0d75l%|<i^lbjYid4u;d5^dlE
zsc6j!zw((QzJC+>{<Ehn9<{u2wB*jgj6c~X;!WU-cMtftXNk%V+;18#pLQJIPQ0{8
zeEe)nwOVkpH2h2r?_GVu2k&i3um2p>N5rV^t|>VT*^ROltnG=(5=3&|tAN*o)H#=p
zI1<Mv2ZYem_ASH3hOTev$CeNWrCt)VOJ2+<q#8UJYak@mNqb930TV|W>#%rct|5#A
zPIi<h4YSt<A14aHSVPQT<MQ=4*~zj5vzO?Y@;{RS4U_Y1$1$jJiZ$7&!eAq6&gCS3
z&xxGIgd7P#+?^%U31W7%mr2W5Xl>cFBOjPf_EFNG+$)V8)!`uqkGF{`4*}b;qwfO7
zIh?PUEh`RBZc)t-A&tx{LsKi2D@nqx76^gE(-R~aLJBxU<pK`;%qzY7_y`w9&OZ7f
zL$EZ<LnQP_D^w>ZEbbf<E(1@$^PIb1SwVA3ynfE?K0N&FeF&F~gTY=7*eHmbVdg8`
zJ&QfQjh){j^aJVY66Yc(uioa1Uwnx_`|e}5AH2==C+~CX)i*Fzh4nRk+p?%D#$ixR
zyQ`)>q-4KMDD*zCQ0Q9I-cs(AC4`>s<{D!Rbv?^}x^sD)_N2T&ZLV*SF-!o86)iyD
z_~M&fK7YdF4<F;KC#ErjL`pT^ZnhlUS}?Ye<;*dgdAee3<qXTinz8S61`u7QuBE)*
zvi4D3<v<cvbe8WeTC4^k1R=CNYw39U^f^Cx`v(fTSZ$OgC7iFdsrN!vHO%UUs%~hS
z8O!AX)x2TW%&F&=D3Q~n+Z-Pq;GN@O+2EWfM9IPMUMHH-lZ@)vX7h%{(TwF{M&&Ei
zP2OqlA-Y!^3;{Kr8@kB##Wf#({A2#jzy6o}=-WRcb{*ouTktkvqEcj2=C)c<4Whb~
z?rswqTrer+<5!DqCkdNrz<LywB5?PdiggoUHHNSQn+k2ul&PS>JBMqOZQ-mV4Lvyc
z>X*Jm>bBb04u&`wMy(u`@FUv%+Da#fDNlkWSF<J+*Wfg1(291hOm|YwIkd*|DQq$H
z!>BQ3qSI6F1(%y1gVCFk87QUtHdrEQ0tqL$c~3HtlBiTE?SRI~vL;5I+Qy*Gmk689
zmaD7l48-r4F4hR*viE*wz`Xp*t33Vq6E4o5(rpLEJ`ht>0Ks{1FrUqsH4Vq7C!F5B
zL$jD+Yt8Yt&S|!0W@HqH;Ns@Jqh@9(a?iMN9~9F`YTWU@oPr^4rWv_1Ujm-Q049(^
zfZDORcSy6WI69hf{p^~@AACZ!TJXx3-eT-}{@4GT{}q4mAHK{#`)~g#7Z>N0<}4;{
z@=}geF$PS`F0R&6drgUmW5gIs<Li7c)t8{_2SNx~WAU!a>s_Yf&P)@@gmp&cJj0L~
zIEKY?Mn4R+<H$A|h8Wqj9qYCwOu4~jnyV&;)+x0*k!z5mvnj@Oxlv7dVjp9`I?Zs6
zF*23BIUXiSnQH1^q|#~TiL!nq+0~b3x0aH~L_wZrtNE-*Nw9v}SY9~a%X^GbO|q(S
zAf_2EoAs6V$W9F~{cG88kYU#km|dw+zW+4E&EO5>JtRfCzUTV-nr7bMy`u|pa`LzA
z-t*+FZ7uC~Akb0I7g+DGl_zu~H<uTxyUj5`g)LEN;eXj)Ka~xtnWlkx+qJoH8bHi0
z@3R;QXlA4ZA|#89f)%fEG1g%hHL_`;3n)B=(tQj6y%HpEd>|wf!CS1;&DnH4CM2k8
z>>QGvVO$_O2Zs)OyhQ2)5;JtPsT3)LQ)k`K!}bQ#Z&f$C6~=eA)L&oVUTa7<8`5*t
zau_`L5&OWyyd{P}HO~{}BoIT^jW9Xw8Edl+HRZ#=P7~%`QD88I)|d|pF$S@gJUY>)
zKcExlLK!h7S5foKB9b>{13~TjI})i4aM^>qqN-=i<e1~*V_yIKS2(zHh_jZV8##Zz
z=IZj2?e&)aW*~Nf5S?!JCNd%f=}0(2)akYfiWEnyu64#*ys_9QT(RKKM#JIpj92fx
zj;}X-aB2D0yBFMjC`dZsHy=Mk>O@?(G@YZVMpkX&ByRco*KT3!p5e-J`N^8&xucIY
zA6$Gwb$f<0F1ywiz{HXaq+qcrp?(lTl!3n2(D4v8g=g#rZnhh`@rG{Ll7fL0NvY2$
zo1(+X#L4Yn$QzmTd0Q;>+8*aDF*d~RihK8NBe5qWmnWrKGU|{aYiSHf%nhmtF>2#!
zcPC7Kl=3tr&##LCl#PjG4MRUrSB_M0Nn>7M-D(E!j`mr&Jd&H3Dc6_+=#~jTQz2Y_
zX8GgkuRkr}Mi_}>%khI(=(>?%+hMcj*;d+U94;10@3oeCHKSw`N(h9|YtEkp#<7Qt
z0H-i8hTODg8k?kuLl~Z4lBD9|tmDpnNi`Qt?e$*Ah`Uqq;NfTJuCCaAbi@2$NquLA
zKMEM*2{$A0dcbWxQV+!D5Py6ZaWi5+61tHT4YjlM*XJDEIVQ*_G$!D~mdkg(iJc$f
z5ARS{_pvTVJ&oFMS2?q>G@S*3e!)1a+{g0!T#zIZeZOJo+Z=mPDBimF6iu==1{#=t
z_52)(DLavp`E`PfkzfCt{~5pe&whjT^J|<~Le?l&&Xb0Lx}LFKZ)w|>gVl__TVotl
zGs_IeeCdf{P!pJ>6SXKYH`lw&-LgL#B*#<a^=K?MoBvE_jc9SIowvnVONfCGVTggY
zk7%yhE+)f}bb4V*TvE}07us^saa#qZ3QC{M#EcLUDR1#E$08MGgos0of@01&omQrZ
zH6AO%%GTISZS`U?84#OTL`IErlIXK;a-@`*%IQM?)Aw$&4!z`Lx`~ltre`&#q!Vig
zLrmZ*B@(e~RGe2MK-I1Vj8*#KOE2B$3!ncYDfYB$!`QV%uM<{jyGavkozTS%|L}kL
z5BTFh`Y!K&|2>c}<rZt-BY9Aib?;`cJQnhK%e1%uq+eavY4eaOytBlpMqd&I>wK2X
z$z;lBP!qkPGu-z-Ex=!z?#ljnYR^SHrJOOYfaf>{o?l!bSVAmoQDf3d*I}*h|MS_5
zc|GUdAAX<hrbD9OeZ_3P#2QNqk$E$tww}doL0!!ly8$CmEAot~Yh{+1k~fM1FSslN
zdGSJ<vOgGQNg)Ab+@olgM%RL=bgEgdM;d`d8hZNemi5&Q&ptY1b9s#y$LZ-QFTeF!
z>eV6FPtR!E4HwU^IX;~85C7|bz;FKd|C}HF@Ex6!mhYO~Vs&5^bKbdELnlepzQS8I
z-ik&+h@l@a#^HTko`ig9ciobqNGbApi6I3@y-**h2~^wl3^CFMVcU<auh%*SPa(@}
zO5-C~XEU=-IeSU=3~bAq5j&l|DP<3eN@X7Vrp>zvUz}u|<>pM@Ss7d`zwWn&Qnd5=
zbEgd0OH`oOkVw%^L*)I)pXodiHPZ@VB;_Or-E(&UcA5T{byM!z<kDU6@fSQVlr^Zv
zUD}XZ-0mt#Vhn-Ht1Ir_Ii;S}bi)?%&y@sXbN!Mdox&IieNR=@96vZ>F<*ed_1PIW
z=g$=wqD<p+Qosuh_FtaYPi2FuE)9DWpiD=dv=fCnh|+8&JEBsuHb)J077|nr7Bz$c
zyBUz)Kroq#5TS|KTMNvs1yWf|95Lev7}8u3c8Lbp0fQ&mgsn&H7}YiP^Z@EZ5)DFw
zF*Sx3Rf}kDRvI_hal|E^?0Q!do(po;V_vKAFB|A9H4=(KTqgW0j<jhpsnL&Y4Q|^J
z`T(}djcD9ap2`GSiuOYfGD#M34w7b|=Ir$>8n?Tm$y#mxEm;Gj`a}ILiDXNlr9rI2
z#{AHg32l;WoB_kobKRZu@h5A(^^Nai8jqXJ@V=&M7TBhynm72v6XtghSuKw-i+MKL
zu(<$=N)^iIu`v!CqfRlBU@fYn^Gop14T-82>RHcA(XeRdY&VwJj(lYpv!iarHjY{C
z7~%knnx_}f=$pucmk)T^R9rsZFkbX5wn;<gjnK!DzUyg+j=pah<G|QPVxJhIW|)nl
zLDMN1V#s#yj3f!#9A@;hP<~Qbe<mx3a_mk)^^`W`u}-rJozqQtG;40p7QA}*KK-M|
zR8BPJ=mok|VHRkiT_(ISsQxRG&-+xu*wbWI06>fwW0VpW4BfV8KA+)oQEP(5R(7JQ
z*-RBrNTwW#Y8xk)z&#3a8DW&D3796hQ0SsXOCnhp^C`T-NPm9DrfO)8AJ7tb>E+vu
z+pR`8S+DOp=kJB~3X5P|m4p3s0zpz|SxG2LYZ{bEx?_GwO4=AKN4hp(!<_DXp!v)(
z?&y@TxgcyWA+}UiNByv(d2mSV4OCIJ_Q62o2&1JR471x0aYqjr*BvgZGjD%wkZ{N#
zfz8DQ&puf*J6P(JRC*T6Io_@rZ(7!@vGFq`r$C-FmTErkLhZ||Or@rcmnrXdpXQZP
zWN2H`II=o8(EX_Jy!M(?NfDwD$C2&zMW!0sJQ>Py8_9Bg@tj}&^<U@r{?+fWy;<vY
z6-W{^D>!HVZM(!`Q8SxWjOzhoELG!JtSW{!l7@gbe?{MxDa|SC^9|WdP635-p4jYs
zZgRYmu_#?M-)nWTwpgo~w$8a6Rxe=l!JYR{lhC|BJIxKWz>V2eI~P7F>Aqa1&emFD
z$a|erjiz&skoSkWu30uKT-NRhYP8nMi1Q5$Yp@Y-EgrBU>fpK`1wqmKq0?0(*rbsj
z`|j^j@)e6=l4L`jWYsSqo5Mu2TPIpv8r9ny)oxj%`@0bcgHlk9O&lF8`1;p>k=wUV
z82XMdBqY^fBhu#vbF>gO{=wLQZxUbn+8g}*-}nW7<NxtneB)pJu7ZQ2EFoj;RMeXc
z6p7tU9VI1g2%~*re)p4lwcCy<)>(X}RJ*Kg3^>$<6<sog@R+}@M7fb&ehV;|)QBe)
zv&TaH%x=KBU=Kmp*Tuyp>zfS;HE42_Ym()oqD}J`zVt=D{&Qbrb+lp}2JYOhsAdhe
zIR1J^5*Ye{5F$5SPZzFmm7}h`PFQ_KGha}7kFPzxazMoU3g=yJR@CUpc&~e+^H}3^
z6S`*;m1&@6l6wIR*{Bav8rj~g`Q)R=eEj1lJp1IFizgSfo3%cF#$iq3%U}5{Kl95!
zi}RM}kIvY19aondhIZg*zV>xit0TVgjX%`;pJUj(clnr7#zu+}ho!12$ja;?M7psf
zMWOQb-tNdGb1Ai}HQB{h3@IuFK-Yji^z-?GB*G99AsNCr(%o!nx7(agEz>lP+>EJS
z*b*qihP?)8dIn{h)=LN^Ni<m+RQ4du-t)j-b2$xTcQG<{AAinYYpntX`u@`NwR@fN
z?mcB@&ZIBSb)z+zvK}L$ABf|K^M3ja)9v4rC6r~K?FwF)s7};ev-fVD>QGbAIxLBv
znPMy{!TS1!+oz|@=QFOZHi#HaZJge^f^x-`dr>vqy>rU#Tep}m=WH)8xq5uTI2k1A
z{fasFSkKB&3vK_4d;OF)r~s8fYQK3!W^*57_VjGIc-2LoiwI}X<(5&V5g2={o$Iu-
z1~VT>e#96(+>KQ#4D$+eIwPK}NZvylFx`krVbbQgA;NY9(_yL_69bHbIFG67+?;k8
zHW{565In*$m==@PIihq1BKX>4Hi7X`i+lMPbMFM6wYasx%z--&f7>y9{~Bi+peFVn
z+iysn#hO|-g-J-lL?<adKxqu74ZhgNr^qCMvH1|UWNmAvzs;9WS1e?~&2~*nmT3PR
zGGz@V8AP+A645BdH6e^+Br+myBm_s-1;Bw_lU$=9MCG&ru@zJvT#cKl*@>&>RP_R1
z&G7XM?;YMdjd1Y}TUEHw;bg!jrT>^N5qg7(h8q)bK0@u-`a}!^(kHrBsW>qS43=>i
z7*gVU-+o3ncJxDF42d!HgqVmya-2v;s11ZT>Xal2V;pzaq2#=lT*=gv{q|9Nk|8_E
zsbjMKnt=|Jjbe&Xfmp`T;AOx$on}^5&FShk_3(_tdCk)p$xsT~*yqi>(57-xo$@nd
zGiAz5CP`WMSHj3A=S9h8Mw;o_Z9CH8ffjw%YC9WKo+f7+U7n;F5sf~;OnvBdP)u}P
zGo2d+P$>soIf!)q+TCII6Morj#MYfZqgtJ?TrGI#hu`DPFTBOj4@m5ls%@-N0F%$w
z`k4+V0=~xDL`)eRiYZ^;lqWU@Yc&>u6p(dG0B}p%XAx5ktm<Rx!`Co}w;4AdlU$EA
z1JWf@CHP86(UQ!J>g0s688~?GHO!q~g4sR7$KU4qyZ-?$20}0NK`>Rr7?!l#mgQ>6
z`DI6C8$uXx*0HDr7q01_yiKf@_~Sd+x@OpH8P*#Pk52L=YA1czW9H_4MyBk6ln?E;
z>lo9>Y_-}=PV@JO8CX&xNZ{=8#|-N=b?rfthP4Mp@tq6=5@8tln}6$X^DqDTKcii1
z2zru)H;~4ty0ZlR5SiCCogL}BgtvyptYTb6#>-Ksk<R4_Q(=u1caT!g?L~2A3Tc`8
zTBDN+N4fud3#sVBjK*=8ghm<U#cvBwY^vGJj$Z1t;j)>2td5)}8?tKySk;;bYO>iK
z$>OBMl*Fu`cKJOT#(eTPRm)T5sfbE(cX~!KGMT@7{m9r{v{}*rHljdPaRzm=H&G=w
z-dSs2a;uHajW9w;k!W;}bAo~bNcMpMh|Fe=FMs)KeC`Wxl2CIpF9z^bp(Z7b$8iCY
z8Hf^Ss+#lbCp>udHr2c$8sUHX5B?{#*H`@Uzx!iccB9u_xM=e=?@=Sqat)Cp#)|G?
zpF)E=#z@?>0MxStb(Q13^?9lgJPu>sv`6b>V>2s3pS?2Sqnz8x=B&}&(B(<0Zml{g
zOEGb?-EeVv$q)iDDp;hGcs(CdGTgdz%HR7h{^u-?4zWPrwQReNp}iu+s77#uHl@0!
z70(I>#$rp}XXRCsS=Cgo##bKSSbXJ~E#}N-OX}KF`HIR~Osx&7b(Yy|&V1S6n@WL>
z94jUf`fkhEcZ^-cRW(5@&mKMJ<BuQn^vP2$o?I}j2ff#2!e|m>K1cuf+dt;I@A#{K
z{g*ktdy6N3_9NPDPv7<Q{T0W@Cw%Q|U+0hh_#0rg#*iG(<DI9Pnrht(VoY>>&(H^)
z_f(b3&#Fx4*2Kvu&q+B0^FBBXfe;hzpwVzm(=c=cLr9Dv(f5&U+cAy-5&fAlg$z{W
zDmdrMiWriW2^h*O+7~#klCxgR8Nc7~6^&$ROts9;KzeQlB~R{+*<WXQ$dk;}kd=B}
zUs?y5@^3R3R)+p+2BzR#?o~-q>$DI?Dw(}dZ|<5-lUXe?HMp{WP0F6WR~qZJS8y>%
zp3qu50jx1*L(pMh*3>F_iwgOd7(J5&;}pPKEf(B4I_3WDQ<ZS_J=?2u`s?i!M^|d%
zVrnG_pH?ElU*zkjvq3EdK(f;|z0Y?yIjbs}^0fvd5nWOq=^_k!#R6Urhoy%$k;bG5
z@q`i^rIp}w7U-zCCqeT%C2G6~(PC~iCl4I%aDi~3hYKx+0C)@oZ~#<TJ0PZ2E$gy^
z^%{=ARt|Hqh3D7s(o4v_4z~^%Q!(DMB$G&+fHO-%GT8HLnAezL0J9)l2W8!9nVM!a
zO!BmOVlHK^9{^4@RD}^t{`jKBbaavB;T_K7_psivI5=Rl-jIfoC>bro7DIU5yq(Qc
z2ZNYIDn)#XSjnIPmS{aGxoN`^8lCDUMSu+>VYG;?NQ@ZMD)l(qgloZH)RLJ>ooQ;B
z&#F;ELY@rR-Nsh1u&C=v&$2)eb-~r(bS<7m%}qgU)>WiwQk+uMNreb`$>pLe=p9KQ
zjJkg&pLI_WOrJBl3`Tt3(5Eg<3+Eg*E;2Hvfb(;fjd1_|E3{9a;HqQ#@tk=~wC7Lr
z1SaWJKvH=WvIZ<=8-Fdpq9k0>*$h+ahu*VD%FS(QS_>^n>-f+b`aVpi41l_Hmi3j3
zZJTVC_36}yOmQ(%D)ik%>ILQ01b5~?k#gvKdf8zvJNda7xOx0Ow_f`^_38+c!Qv2`
z<8@r2xhi9!7(JLGQgH_7e7==28)|5gX`RJ*hjSH{hQ-lsZmL5<f5q|1Lx!i%S&uFA
zhnDJSjy?DccJ8orr0oT+Pl&0p)d}k#{Fqb;$6xz3;^{Atnl}*BFuy-1zW*T?@Bavw
z4Bk{=XIKNnFwms|TYJ{)meUijQ+p>EGcv1sTqVS9Af`mL3m(1qA%`cYRP!b?Q|tsl
z>}Xwi5|}4#64Z^FnDAI;i<M4LlY$s!3gDdO{OMEHXBRyG<P${HLERe5&~J#b$5~;!
zzQ9`p!SP4m_;=iSaF6$X_%3!b*AcGIuUM>B-1Gwy9rK05*M_bSqye4mEF5*Kxv2(^
zwLTY(sbHIB$7JFZVltCUftIqzu~(GozLI-%Z!symX^KHy&T_V{5GJN)JYT%Lo>W><
zB6RFj$!5cz!q)?e8JRkE3KoYG&6>1EwP-#!s3L-wgflgSZlcahX3v!UYN}HArY=QB
zQgSs-VKJoL9%^%iS@w@z@om&VXW~K_x;&vV*{n^XU|a$xRJG@GpZfxDeeo^oN@KQ^
zTAZjVjleM8?VclJw4sb6#=@rC@^}C0e}-5{T}P6{|M<W9JN)r~{3GmOrtD;A^8Q;!
z)Z`+U!f?ox;ZI?Yi7_D#wr#ibxbT&pM9$}ZvrxY^KE^n^YjO<v-pQ0BYmFtQI8Dt8
z&Av?h0LC$LesRf@CyyD1A)oCY@2g2;nRJQrtAFip@c7Xa#*aSHY|$}LcvF*EA!ORH
zeg<34TWfIEF<P)rr=&hlQgz>R_^QEOT;ZHHu*Mji&2lPl@ZRIBV>WA894@ieG4?%U
z9|$3^x!JP0*?_gg=(sw+;^O>_zH1Q)#4)HpPZF$TQz`3+3}79xulVuXpWw_N@bzE#
z3U}_^p=}=l9mY6@K^Y(SUwWCRPaY#lLEPnRrXWE`xGaU}hK|1PF<9!Rh7whnpKl{F
z83-C-G7~FK{RKo?uh(?*ny&3}z+yIIjFBLTe(2e3J2sn+kPWXSsC3APJ||kX>f}#c
znrfSU=F7g1faY|EJeAzvj&!e!<&>S!;a#IQO>8HeIJ4;X8gir%q<7{(=7sN+&-#fX
zygTzJI(x1wLR776N)cyW{)t()ZOHohxc3<|7{42(PST=D4V&u@>m8;qK+w+oDM?}&
zN4mD7t{c4b3?(v0R5Bq3Bt%xTIY)~F?yL?tUM-;?Y1do2t2JYv%`a1CFHX7Y<yw9U
zVatEPub<8a)s=!FPf^HJF$zYMC#OXXUEnEWF;PgPAl-nZ045-|0$<_k8gU^bB0(oz
zL15csW@LSh`VW{85u+B_*38I4Na!8(3Goq`c_en=N1R+i0GldA8jST&J2lt=QnkPy
zG)O;S4kH8zAJ(|{Tl~Edx!b_?K-%`WBMU>1tPRHGLweI8qlKti(J6<e6j>8jUJ+w!
z3TCX=O!ZA(E$Lm(_R6|dV{&5=1@V^S`w#daLXgBDk!n$4Du;=pE_FetuxgaJqlrxP
z5R;USW(>iOh^e%Bx5WSn#JD`!Fkscv+ltLaahVE61q$4#RAtF%J{nPzm9hF>$h`mH
z@<A@7<jGYuO0BX8y_)|dPhy-3Gi`{q@lM4}BZ2C9OF@v6!m_RaASTt$ij7L&5w+Q$
zbS-%@TC}_-levVV_bVtH7!#?z<MjB1myU1q&L<yJ*ELnw6D!ZAyQG>eu@$Vlj*y}P
zDoM3V7HxEQmUp@~l#wV{;^fRpE<7>gWC%b;Cv9=vwH6x;DXP<5)hvinHL}JgEtYL|
z_f3+GnIg_vrL$yHni69oChHF;BsERb)1rNu=tbqwlj(!}L|@YZErN7g*3X}D>-0YF
z{pbh0_QspUWH5HG!L})D;`O!DiOIxPwxR|nf>LH(WpUo;CO$DcS~0tGhhe>DefE@D
z(-5!V;_?cAU|GF#fIV!ePgmIG3CZc&87>po=L=>>hiuM-*=d8T=SV-&o;~6EYS5G+
zJSkdir4+g$!qxRP)zXu&oL`LGzU2uiQhSAorSGU3u<c{Qla?{{eDK2=$9L|7w;Y`w
z<D5pJNS+WEDj|96)qOP5Z#Kl#)65syj3Q9E87Ue3tfJp;dH;LgqQBe{`;OJ&0jr}G
zM|X~S^!9u7eNW$R8OM%lzT|@+e1~UG9uvu?R4L@_YsclqHTUk`=W6|wb_~Rz%o0fh
zBLZEb@{ak!6NbRh>)ID<a-Wh}4lsdx6GJCu7kGA{AEk&ZjhSW2adcj?i*Vcn(eg93
zSCf?bZ}Ku>&q$_xMH$a#7Lh4a`&<y(Ty`mLrzS(*SzMmfC-Jy6Qai^$&K(r(cSHWR
zCgw)dmhV@xMfaK=$sU_Vjg)lfk{$hc0+Y&>$x&*_U`dQP#$+Z#HfJrquDNsPl$Tz9
z$esIlsB4EX0=4dn69}RpQDr^RSw<to7-+|i`-g|T|L75Q1FwDVb%rPy6Y*7~Zk$e&
z^A0=V4%tA)<RaFSeJg;J%rx2mNxwGREsMnxA+X-uut;;N1FwyK62uqjNA>_PR?Qqu
z_JVw&<zSk?6}Aa^s-9BGH;#0}$dkv9dHndXQb_X}Q7U@UIK4b+43hZp{SWX?-Lu0u
z>UznL`y|OIXl0pPv2#R@PGf7F>9z6n6~;Q<`;4cml&<NmGNVY6HL9w=Hpbv^cwfQO
zGi^2vjQvO$dQvc~H*31Cqib6vL_~suuKAf0QIU#}@<eWjoJtyKw}Bu0@DoyUy!F;+
zIXT@hi~~a&kQf=qf#vd$Fn+{pF~d8Dq)1E&YlUtc>G~d#gwGUN1(k|nob0YmVFcwX
zs4|*t$cbi=u5Z??=QU&3a(a53%2?JzPagtp=-9LyhR_pIb|Oz8akp_;tJgfKb|5>*
z>J2i}k-1wrDd;5Dc|U<{r74t@86c$}D-9KUKx8psq5#YJCfTxw?i&`B^KrM8SyNb2
zJLPF<h)sSU+I&L_kvNQ`n6g$^b_oIH-<n-x3wa2Y3d@ogew|FYP*5=CA%nFhXY8Wp
zhGU>>I}TP0>bj!uTh$>;0%x!$v1l4@ADwdl<b=ajLmE1^-G~ewaR@p!O;H=#Y-W{m
z1FTeO_mkgb{(@gWoek>5SKr|;yN6%DL|_svQ$+M47@IJ`AsaA5Ko)uui+6jCPf%C5
zXf@yRCa7UUP;(>`K)MlXkEuM;D5aNqg}4TD)e>*kq&nb?$M_1u8e$Fg0mdC6)e$yj
zB)IlSWsv2ZG+SaSz&on>iui+P$j58UXI8``)gZbGWFtuHbQ)%WUhVvmfRL1aWi??`
z5&4uU&t{LpkvAih(x6(L{E%3|q-@Y(G-o?lOC+$mbqlecG@@>nDXKA$0W-5yQemTE
z41pjK8RJA5Mduuo<1Sg(DntZzw^Nf4rwxEnEu$Fy`53U0MeGv6c!&n;vOcq*cPW6?
zX)MvI3ubB1a5-M0Xpe+kp6ObUY!qZ1DQS0MOa-D7ds8N+qzSE)lsCInk+Q33%!iVq
z+z$2A3|jb0V44V$izEag=%3`Na9uStRfD%4=MByXZr*_R%#EX3&WY_nH$=LApictQ
zvA#LSq=uLaW+Y{IW)!I_(;p!l%t)rSf(c5S7s>hRNeg!;25W?vH+BhWnsi=WubCgr
zraDecQaV&D21`uOYDA0bT*IjIaW?QGX174QR}*GbH5P=i96S?8=hL9ZsBSij;6)g&
zpX2U4V19fXNn?h<Qr2LlsfRG4dToQ6o)`>KVqRCG8pSdN6E%0ygEAl{a`MVcY`ZmA
zn`_1-41K`XGic%RyKDR`(wsWRan0iHEhHJ@<-lt7dBV_f^}{C&oBzbEuXKd<8JoAi
zPk3~O^D895I1bdNB6X2@<2XFI#rgGf>Vt;O#SK?$;rPIlEO@KMox~nzB9%nsV#~$#
zW76k-j@6x)7-rR^{h!U}7+WdzIfd*Nsx-Z^8`)l+QDHckuON=p)*?eo?0VMEuNe9(
z4rUeA3`Plr%{fo6pYiN{rFS_ZHjdDDn8vfdx}tKPFboPLX7ev=4L3Ji&M(ind-pCs
z{QkSlW;3eV;e5?@z1DWhL}rbrubsM9hJya<GqPiWm}yVk8(U7Sox;Q@rX9uo+U)JI
zlBb!H!7W?XZU%!&?7{B)=3>(3wTPJ}rPDAsQ^`%1VwgNVRmPgevUu;+3D;VzQ=MQR
zdY$}RWf>{dm)RX<pO%CrN44$I&vsIp@;8~QBZ-uoRaf@4Jyw%a4fEhv^yjs4;PB*-
zlfx5Ej!rl_J)v$q&I^)8;;8XkYLu0!hbiR@&v9zTjR@VvHMjhV>t|Pl)bsVf@>Tq-
zq1$e$>YA7Wbi&y6)TW*&%n_r^g6unN^1V$HNI+&A{p9zU$4{Pe=gu9Li#eupv|R^A
zn9b%?wbcd-(FiY#np>eX@06!sVzq&mDO0%IoqVoku(a()&d;Co_|YT!zSjx9d68kI
z)3BtDsWFg-$iMil-{j3V-r)H7i1~bu^?BbmS^j~WmV-4(8`u)tVT@{!od<77VN}+Q
z4IqwaMp6p6P?4-B*+HY(f@ob(_Ik4e@?#LfIFf=e4lUz2(06&N7NdgbxhYCH8(pLr
zgYC?3${ru{w74Bd-hKZQQW|;b;XT$jH}vDktgc8YYTk8IQB{tVoyB8}bi<%Irq*e^
z7(0`}KMx*EkM{)r*j*w;zNR9HFpdmuPvtGACnt<4(v5+(@7ZoTx@}8{Ba%V}QPfYx
z6wt-(VtA(Lmb`u^womq<NTPIQjYYS64Vz@vSTRl819aHi0@+O^MUrOUPIY+p4Ty2d
zkuA5h{91B-r%7x+Cv}Ri26+-o>6$B~Fp}bkulz*cowU{z*KnOC>krzo(IoFj`}=y4
z6&ag{GkbqV()STVVzb$BcyK_|)U0n>QqU8^Q?Y7h+&*0K;P{Zk+F^!{K{U39I1uBW
zSrciR4olkQHh(&G%wO#5r?WvVXizQ+3a!9c%{iT{uCa=OOjJ&#lo=VJjhOX-Js62j
z<H>Q^Xu#%aYeu+b8^H{i9#UnI=wOVnO32)kW{pzVk|>4iWQL4ATt(7wLu_W4s-`-A
zh?zY^VuSdlQV6J_dH}2m@d7!WA$AGW$l-H*JVV~^u@8<gCrcQwG4=r24p?cxJB$es
z3t_?TQG0d`#FTHAFQH7Sl@l2_7uj*r5{jU7<y1--!8y--wIa>tjN^tRj&Y1St+OCb
zCmG2asFb!MjmJcd9~qP8t|g;>v^v*wBxmxW7u1b57d7DanrF=$BTrjZ=hEx6RA7ow
z0g}lkL>V<rLUuAnKdZ^r60TquI>j}DM9mfzBZ%G8f3jm?XyKx0wl>gtvoj;sNp8B(
zM%3u0q*g<U<Rww?0BfnN&GaU(fQC^-z~-W%xGoOkh!AnZz+)np-In?);QNv7xTdoX
z9~1ZHOTrj)@hH1{n<VRs%|wqEQD^|s>_BT!4Ok%trK0mx6Ae8~Og0(O!^uXt36Z1i
zsAg5x?yU1|)aMFv6YMfAFlAE!gSByL1tb^q%BRjXcv9LOg+^vg$$-$qD}4&%jnYII
zND`^T$mJ&=a`@n7-u>a9^5&O*mJqgx^n0{L_B2wNE<MF0NM@QVl2f*=6m#Jus7XjY
zXB!i<yLX6J7sL>uPf#_?mNQ}$)*o*$-ZEaEV-onLA*M?JW=BV)r)!@4%YVbLJ;TLF
z=rz`(o;5hH1{%p5?%g@z!ykRX>g0s$i{~7i%sG2}fsr{Pj#wv}%3_k>y+Ot;o{?Gx
z#vlGR7tdaWFa9#FZW!8ugX1H{B;3AtCsVpLD|Z|N<Mx{N*~g^q4Ih5v-(YB{XO#jG
zmb?}t3mfnY2)1Io?(hc2A;6TAX%QlfBXJCbaio}uNsdvf1a8j{c=zq6JbWqf@=N!5
z=lzeF)fG_!#yHx|K-FlD=wdk|#+F_pHYS`v$P+;=yt3I`NdhPo&^<KNWd8h)imMD1
zQ}&xOk+*wmwb0p&$yu(urkC=wOQR>d=5UH>$Uonjodc#eFlsck-=KQu^%K<&J9N6I
zI)fxjFoupuh4$dy=*-k5jn^rp3!9jl_i~;{Hry+^SghT1C^pjsJpn<?)DZM2qvwln
zeU4@}%i8t85QWO3YX=3$Y~Bxy*_&${(LGusH_vbIy|8e?^VCrvcz*S-{VHP^amga7
z#~Q=>=^7ISV+Eh%H5@8PDSEncWvwknQAU|opTY=rz1cF1A9Ju;aCCgcayHNR+%k@V
zs`AuzL&~|@rmVYZT9oOM!ZblD>s7KRLfdwnpPzGn{+zDu^CEFmLu;*MPlTK;o$_=%
z-@|c)?|kP+4C6>lBbbQuj>CfkZr{4a$>9m}#e(^)rmD0_a~XNGUL_2wZ}jSVy;lc#
z=Lt!OCJ>Ao{*Bo&*jk^PmQ00?fiWtZD2{;?g)t1oq|-V{21zcz_uaHA6=R+x1Q~bK
zbDP0>m*q%9Vw1MK{m#d1H!b(?-=QA{Hf>K;RTyJvnwk(IQKD)>$AGntDpQloB+(Qn
z;qn}0HifR|eX6bOfvmfA5d%03cW&KcHlH)bfo=@BNge3NfiT9xv)W~~X7GYC{mV5L
z1?=6S7kQ#+bK*k6dOy*Ar^YrnX$ft{)p$#$hSX&9t$n)g)GQVte5&Izn6_7o7IvBr
zDsp;6-fzg;p}yC>^i|OYHG~nLn(5!}=DGU03*EecHoMEa`+Z6|^R)bZrkBSUl&xoW
z)cOz-?Y1W+VLn^n?S|+gRpq#Ka?I_6CCAGdtJ+{fPcjh`RR&Zh$R(>c<=Q6Y=Ts!+
z0s5y<$NUAqekvPO&8jO60aJlcv_gsu7W8*FF)Z^cBOV5Ub%brmnpL&Xmza^3Dk0wJ
zG-*uGCJ34VX`>nl7~fzI<|Nk;<r=fS!Bri`AHZ_KV7F8UGpt`?W(!!JB66F=u}%wR
z11=#_W5P%H>&J+l5f2U#UlC=%*Pe6{kf-OE`wi)$!<f6s<_eoUR7vTUu{7XC|CVWX
zqDZ94MaAS3nQ2SKQLe!9M$D8!XS1ZsS)<?I3Pi!z6|=(=Zm!lu>xei)4*pIVBuEm$
zXj7J4tv}UUa20lDHM2}ALUKf#gXlAqlyb8srplUMtCK3@GD2&!_EZfdjB%M7yEB47
z6m_;KYicG-donQQvX)E0dW^(LCxOkHD5GRDLw28(T(H4JUn^g)R2wq|!e=#vHhrQ^
zu&Xqu(3vV5xaRwjV<$?Gdd$?1Fa|Y0NQszwQX2Jh#K=W=PVxiobx$Q8sT0Y7k8nJ<
z+>DXhF_{$ERGe`GSf%!v>}ZlCsUDb7544QT+fuQhxuyn?Ol2)jz>)-$)WqZb>Izq%
zU~Qlhn~xp~VzFK)_{C)zw;wfPv<WC|fS3_<iHg9M<(X+Z<r!6T?)2%ThozWZT(fD8
zB+RIJ$0xU6A%;twn==j{<IGk;FB+_?p<2<kVBH)ms%<RVWK*18^S<QBAO?w;BJQB(
z{?Gg@H}AZ|HUtf$8UraAhBm4$v7QmT7QbvT5n?O2g(Zn)*luZ#kBRF6*E>S5Fi<_8
zk*sHSv?ADq^%XN8c=hG`y!ZGMR;vTLb&Id&JUt84#=}aDZ>UBXBCf795IzMe*CEfh
zT>YmHv8xBn?tX?LZCKp8%aga?rdc$^1goP1osfR~HcK`fK0M;+WPxj%T*$F2I>rDg
zVO`QxE1R-;OTZ8(V++M6cY1T3CZ_BhE+i{dR=71^@X>pZIlbHQ*;ilUhd=x=*1Np6
z4E;7xH4bAV^Q9xE$e1Z|9?ce(@<i{`&+^pCmUy$m<SE53JM+1ssa7}E4C<7Kr9ERg
z?PK@LC^P-Ge8x$VZgx#-nbJ;E+uZD$JjE(as&i_TWUXpaRc~uGJ2V4$qcPOzUg=P#
zgfsb>&F{pOX{yoptHh<E=h_vUrJ*apVH6mRKD48OEovg@eMLyws49SWj+E4a*)|X&
zVAR;kX8mQ6ili9mZd%&Mm%Mp;%EwR7Y3ca8fA4>Uuf4|8;7B1+RWttGfA_Dct?K)Y
z2;S-RQTM@=fdDmvax&coJ%5Nl@hwK1FSxmG*{oZ<cPv*kRtHO}x}mOW7V{a4#R8x8
zfu&ZNa$94<=7<_WwS;}wvE6LAzP_g2c7$wHRW!(x=*>*b(n;s4lO&~{#*lpkoWa=|
z6N|<huCF>SE<OZ#f6_?Tm8Y529335Uuv)P?Tr!_6n9pmPrdAe^vs873t14n{@RV8_
zu+e0VZN4~Jn;D}T0Hq{IOvDfnjAoO@n4eGb`6Ah{P}J+9#7AXE?4nn(S#w^5*NKsF
z)QRi+A3kQY*>d;ZZTfyhyrbK$8M>Y^1p0Bn6xwll?oD})ZJs9YycCeq7G-%rW<N~k
zfF{p;4UiHscFdcG!-FG8Lf0m`exU1nx^bkB>ZYEI)(gA7Q5ikT&d8I%0v_4jn#&uN
zf*4^0)ct#!oaRQd9K8FQ&6M}SJiAfDA4yoT*;k`9r@IX7RJ<@sqqc|g+w4TW54mXx
zus3lmx0jvyTNp#8;yR|;Xz^J|JFkj89t6AUSElQ(&GudsYgN`)SRX2hvxrsw?=TM3
zzM^)PAc@1n6{o8Ow-!s5&fr7PS~8g_)+wRSX}R|zY4S=aR9|6NqyA-j{ZuxniU{S#
zU^=Wz{+s^U4z$S!xv@D9GH<%ckk$ic+hV-OhApzzJk#WWRRsqPGDbKj>pH-U0zn^9
z84PArU4^GX+Lm~81#`jL2Dhj%vqlY%7KeykL0W=6%nNh_HjvUeRanEdCAM3IjGGOn
zyTsH6E`<2uK>f-Qq3W@0Nv&>*#>8xTR9wuo*-#B&HawD|mCFz1o_4p0(-AG~FsfOw
zMP~_$NaZHcSRpz%dHFS-e)Ngva9M3=lUPL<4cRp=g96bo8o`7fah3W_Ii*xkd2C%{
zjK_MVEQ_td*NCmMF_0x0rQa&&G&ic&DW$}yA&AnzvR(m2l2c0g_c_MJMAd{Q10f@>
zCT2=sE*z7tr8H47%tSYFnUaakIkH)2GEMhHU3o)x<5fCJn!?dz$ZI%(kVDhcKuAfA
z5XPXk<8dT~sJc4Q=0?Pl(x64Nm`oK;%zTBFO3OzHG=5G$4j@4r%cR4fU2KNK<$D_i
zrA3Ll@(D_VHmMZI8Su#g-6j?kpM1}?NfdFMU#>YkIKUfEiO2Ct5T}j3wW3a^*6e;}
zK1|CxB}mS4jJa{owp;li*{9`5(R_Xi$YUhpvum-*MnDWb+vlHfaPKwV`N6;E^FQ<R
zBoVy%W5V@0<LGgIMG7aF<~82`@%waJLw)=@H`gDsI9PIW`VvR0TU7M|TX~`cQVJMJ
zIA;+D=BVM|t6$*Y&Cf7AdCKPf_lXzhAc3%Hwb^tHB!@qo5!;Rw5_VRhmp<yV;47Nh
zoD@XCHt!j&p_(s9uHopVJAD0Xr_BHCJN)q<{4R_6oWrK#=JJxKSJ&KPhFw<FM7$mG
zeMEePcZL`ZE)LWZkeiWi`;4KUbKNYkvlac~2#jU(QN*}N>aSTW9Y^=?QY|Y5f%4vC
zvJBjenM#IWB&e%&rV57K_vpv+7$?k^HC&}BzZomIq?6BRBVKxLujZURyP#_azVziU
z^Q~`xizt@LY2(tjk*ZO!WIk7Y`j}$&BC<O$Di@#<MYa4T;=?M_#ui4w<RWP&j~$!Y
zIE7WPqwtoDQ+C7HOx2L-&vvx6VyU1EkR3H}*C1<CTAJj_JL;yPaxQC~4biF|)msn7
z5z)^YjVEZJ6+U~-d%&kD;7OZAWAM3|QJ3G`C>gt_wJQKtX^Ij7L(Jof*1$SNOyw~~
zC+sG9j3h!zD#tK}k%WlOvaNSJ$LJ)47-+XWAAa{kzVhmAo_%=1dA#PA|MRc$(i;y*
z8AuCbpqbBj{O%{b{iol@WOrGNg0wczWW50=35Ea?sYPg<;bhrxdQ@|3HUD&K8Iv3Q
zlo*D{aIxm{a*ef`1zT61X5P?L70&x?I7l{I%T!uPjQyxn&KPxSBYB@K`~C~%|D=H2
z(LKw0%Z)|Z?^VlcaYdu+cGECFH8rM`2@Heq?8zlhA6;Y(q^e1pnP)zqad33V$?-9(
zg9BFc8TEWd<txob&NOr)6j$QXXr#E)F-oS3npl_@>Z+=1&P~&)m|~tTl7Uf+^%k7N
zh?}BUtRZVuO>WwRCui4Oty^y2IVG9I^Jiyl`;IY0O<Bmzu$jC*l=3{8b*4RFY4>>V
zUi*)Iu2&P9F>DPim)h8e(9^}9K8|$bKs#&*F%nXq*2Xk>UX(o8Bqr1IOD1z+eCe~}
zGLs_`RfmikU`}sJa#)V2k<SEZ>@|E-q>W+Mc;+>*Q`+eSaYtRve7zj+R6Ijm{vJCa
zP}%beZLRoT=-(wt#9UXI5;a(uDSH=tn%oxsd)|+d>_BfR&XGtiFYWaCQc{MSF@i~n
zei+$oTMp-Q=JN{gXRKxo%gQo$ISaaE4C|<L&od*Zf)rXWdyzd!vT5kv82vBF>!-6p
zHRNWK-Nu_IMFqVznXk^?gEyhgdD=xMq-WQdZHM#$vrWhdW+up?Lr#{^2TT;W2}mWF
zrp82&egHS>bio*;nZsg)i!JGV0~MIn3>iC2bAY8%rmPrX0K3KV6ykfh_%S9qNHbg?
z3C{*(w#Bq9;wroejAy`Ah4ll*Txf2e(_nU`US&#hiT5t%Ey~lx;4(^@L%b24-WJrf
z<T#l;fz1;}lZA-RC@ojbjS~;v{0u+*!#_aAM7Ge#4W89azGy}SOzer)5RHK8H8a&J
zYHHjFLyh=G9So}qyv5-V@0G6RDy67dk8zbYvvrO4HNL5_O^s_Bd{a|ZmAbn*tCPqv
z!Vn2@B*s8Yqv}8blFNC0Ldj;HuHoLa0+{mgK=3~6Oq?afMBDas+fKDRePrynh$Q;1
zQ)lC(Zht9Cb_9;ZI1)nAQ0F)@#!)xjG-g}&5hP>;ui%GjMzdO;GHerlc&_W;jd{9n
za1t@0Lrk3yZM`RCMu9a-wgN-UXmiZ7UdZ;uW%r&aWM-PS^8x~&r}jFeBeZaSy}{K7
z+H7R=lqmwi>n~2pUioJxopMjqCzA^gA&Ou{5y+6y&YfSVPJksEJ`IMiHD1{R<f)j`
zn2@o(CdQUKFMWZ>?|zGWZ@iA>6<o7nbM-D^9P#yi{LpedtNHHxpODU;vl*Z9>Kkuz
zeDX8QnxmXc7=R96F<tjU2a^hto8gzQaPsOyXnV%9XN1ce!sQiq><L4!lcs|uVcY5R
zVhnCJr&=sAeW&JSgCe?axx_CIV6j5xbAq$9G4YvS{5odo`QzXH_pIuM`QeK7#SNEN
z>PUHDET#gRg3`T`1(zplR&Xv-`w=GtGHeLp9M^jAm7>R01u?QXI;5G;^S##PW@2%M
zPI+GrI_1y0Cu3Eo>zrh#U&pM`h=JpyhR2T;jkY9}7a%!T*5GW!xWLK6b8&OS_rCot
zKL6HRoLyY-@duA_Mhonrjrazd$|DQi-=tSy#AOH2l#8)a0i`^xwURSV_cU|rX1ych
zmZH^8XVVKu*{<$0h4z-e)$|<h)m{0*mBM(MRqjnrjWIY=;Vo3&Q`fqGVuTnSJ~ukP
zt{CE|wA-Y2AfhHgwNa|BX3L7QV)D<3#%at<E;fsSTG4G4TCbi%25<xeQ49e?YYaCj
zu&QgN>JNb!1I{V6Sb~}WSp<BwGr-FiCW$a?d*1oRkNNVe_jvNrbDpG2zW%qq#Fu~R
zi$oGO1dQ`Eb<O7NihuEs{s~qxozuKX>8&JSx|TaL$Jf95GIx(_4wg{c2r2JZ?o%or
zi)mJ}yZa7$j*dbY2HNeoqfHm{Eo1PN)fhH2^_DqXdjAiybL<oeQW}ghRoh#0xsZn3
z)H<7W$?O;l`FJRkFHAWj_l5;|TIb8(i4Y^KZ+fn;p7PQAPbPD`#d5~Ma>>EbA*ZLO
z%om!IYMoANttoiN?!8QGsic#~lI0n*W4LN3Ra;q%k^ro&w3e|h4=ge>qOgG^yYnZF
z#2bcztE(HvapcjH$D|Y#L`Ole$-36;h9XAFvrw|f&F*VTfiX33yNS5T-WNs;K2y7m
zvGl`0Klb!7(GP)PNa}@>KYw8mrWA2bvq5*fCwBVyslhDtR&)|AqSvGBRb?|uyM3$Z
zLw6I%3=n6^wWNutH&xnG^Oa>pX<C29mAbGnGVq))Q<|DmVOMU*3wvV9)Y%jh`6Exp
zaxYAM%?|L}#mbn86;~?ZX^WQKXO@(*OS?=zH>SjPyWwy?XEv*;o#SY}VCgGdNjfO{
zb&=iA&R|d8y!4miB3hU^CI9^|+3TmXLA5eHhH5^u->B|w&IOGxOR+qlg^3gl+<;v7
zFsKtOmBSnu?7}h5=HLX|38rtM0S+6It09c)UT78wp5z33v;-S4XBQx9x?^1p#%XSn
zGhj9tvj%gCi60?ynGu%?<L8+18hbX9OpEP}=HR7-yXuig&oQ?bB->*aOYr${QLf{z
zd6;-Rn*Un#JCl%nqF7`X;7ps^xtbMZUyXWF(E#L$tg*uB{d+X41zmp)L5SIPHcF;v
zB%=*&Qk_E(A(X?)=4{W>th=6Mw}|x^?+`n~`U<K{ne-LLdz`H>w#L;p)>Zhr#&vUC
z(@-}Nvw&(@QLP&4WsQi?w}I#)W(+V4XwaN$0My?h9~f?b6D-$~Dch7JIr}k^M#;Eb
zqTROieUBt<PKSOZgq}W(>hw0~)HIGGA@)QO!Z;`j7$cI#tWy|s!;<z8<f47mgW6g{
z-7HwH?$CD^gcxajgH2jUI+Hl8To&Z}OxM-sH023L%=cF}bc@)ijdd!8TbhhTb-0ss
zE$6({2>~r`krVPM*g<%9cE-WsT&JL-g{8WqCT!A-TB2mrr9FL{v-)pRAW76|eiRal
z2un7Eak((E`5Cqo@bVLW<w@8yEzg?lDkU~&k2!wuI@_x=By36U72JHya`}jx$Cuc!
zWj(yh?AASQ-)ZQ_8W-jqAKzg<|9`XhZ_nCg*LffI8*{Gp9`@efp}+1%qtQ4KBuEe>
zD2Nm(iBepWsmPY%N-R4Sr{Z$#yHs+YOaB?Uaqg0Gs#29WzOfxUR%|P#M2R8=5+|a8
zMg!>cci!LL`#r2R=NP#dW3Kgn4az3U4FeF@1ML3x;XSOwT<e|Be4g=)+djcc9+Z`n
z*A<gie!S~EnM_?YY!`6_^!!t-KV__sBX&HXhaKhW2C?dPe97dqji2AnYX~K~rB$Uv
zbLRn9xr+&e(sk}G_}pLoOB@E}YyarK<%Nq2B(J!7_=L-=JvnV)3%M7xc*<sqg)AKh
z5+!Q2M^rjYGet94Pq*4oMupt5x^;o{z2B*m+uf5!fSS48G6P+d802tEXcSVJRU{id
z9m9Cw<FCBHn{R%fqq<en>o6^9$77O^lnufQXKOC^1Ap?>uX69@m-y}9{>yypPhRK#
z{fCGMr4E#?V|#u^mwOHzIUYxRIm<kcE!-5Yj>NsJRBDZs*tGV(wqZF;F}qGd;GR>f
zV4j?{{Dg4#G+SVE2u=6(ro=Y;uS}-Qcx}A3hX|QO*C$r%p1#XorXfP^I!{v-W8H7q
z?QSqhln9th$*=Qj9?p|d#*`WG25FrNohV%+N{vAfHIS!XWSj(4j1nKe1z~52p%%K_
zGY%tTDM(Qgo+7Hn1F>qYjAPt$veN>w#IvVYyzvje#+N_-3U7b^KKJuAf9+rYO@84E
zpGI=j0VAx|EB22s`LF-){|SdDH%J$xs=6iBs?u@fH$VRpzxJ7*=X_IPDA=)J`y-k&
zIrUKr^3mU8CKakpj87SAvwY&I((Vvw?$8KI()QWVQlX>QY;#7%J$y=Dwb=>as=nL3
zr*aLM``At$5V9w5Pa#af8m6OROyyRM0L0c$uQ8Q`1ks$<a?IH6M-IoQJb3t&@4fj0
zHk&oKE-tuz`!*Nn=d4zJw4>>LzfvMk^{GDIW`#AB&?@NIKAW5rwfl|R*(KVMH2Z30
zVs){I(F|n6vGA>Lex1W`WR)W=+bn8ZTYJ0K`lof2h&ohj84&H5+Z3QbEh;p}Un4Wg
zE1|koPtn5hSU3(NhvUHRc%T%U?%jS*@Q}V~*npQkrFLgX6G^r>4-y1gOKfX2Jf3Pk
zW6tqIsk+llyJeTyhpf9yq<yZtY;*4TeRJ_O_tS?RBaMtsx4n~jS<J&s)!k*a&I>3f
zbv|>AzC+uSPxc*}Ixo|?Cbin+=D_oXyA-8ZwZh@Br&?jPS+VJQHroxWl#tdsaJrV`
zyy{68h8VMQdYpaGMmXJK@%<M9t#6sgyu#rtw>l9iznQyL5M<(Htsm;N$})IE?o_g`
z(&lz!O^B>w;<W-f_&ariZnv0jz12j2=D{#}gpDG31$G-53|V(@c809hvAvfF&oDb6
zc7y5-kjSfhU>)4uqgR)tCwmx;lnW*s#0t3z_TWHzHB<ABwAlvXbp6ZQuLtXD#p%q7
zvf0SI4&Q7H3fk)7TFs?Rbzv|s1dzpvMJz>io1Pb6`y@AyzTuPJl<_Qyb_Z@br<Iz7
zQ3R`ACt0Evv4zz$4Ahh{8DS7K`kPkiklcAvaqdZ}C+Cjbt+3paQb+20a<?LPYjU?H
zbt|Ovm~}~sE=4`7b*K%PRIg1j4}6*m>uEIruGE+*TC0<#P-RfB6IAt^`&dTrIY+^?
zQjY$dQICv+C+k%$XmR3M>gb^kxQeQ=YG$pzsi==P2+xvEAHc0n>io_f_Bv3@2Xtv5
z8))^KTq`5L@bb&-ZwjovD8DjKKH5aNs*$C|C(S)xYw~^6W>d5|0HLKu`r41qw|^tH
z-fPot_JzlnSFG;cak7b9ifSix@nMrQp@{ex%YEm4VrAsxpZyr0_@$4tA8vT^g9ki(
z>x%u2U^T}C%E*x<?7<G!0*OCTJjWwaBM?xvP!E^X!&5%?>0jjW_kNF8zxa8|@fEa=
zlxhr5AHdm5)Zsp77v~%v3~ahvsI4fakfcOS9jG4Mrk_CT(VY%$CK}|WO%Wtid|h|g
z>H^!YNw@FJDS+xp*M92M$wrM3w5d)oPm=_Rf^12i+WSkt_1C$6@|1VJ^G(h+TkM5f
z?4Dln<Ytd>j@8P#E38yWEAa&@t5~TF*U#9Tov}T;%ed~K&)!=z0@iZ3rC)6!cZekK
z`Dd|+h1)8LT5R9%>WQ9FZmuc2?Y1+dcWeJyf5s~>pYhJy&!QH|Eq0n>@}!j}gJ;3p
zS{&f!;bZ>j^7r`EXMT=f`|_{y&98lf@BiQ}#&KXAkKDO+oAY~jxVbrSb-g2%!iYsZ
zY==^qEogpJ%dt|&9ev-k-fZYn?`oFn)Th(ORZ~7~B6iaREmA%Ilux`_6^ofzt2u4;
zt*uXBijcBCXZk+5dfLx+WAN5Sim_g=!if;|?<rHvLsLS_q!Ul?&Iz(6R$}zAs40;&
zeyTh)uZOpQ^aU!DShPf`i1!f0==&AJc%Y8O7b6U`D3ZNIq#gksk!IT*3QrzA<o+A)
z@yoBi%v<k$K>x`#|K|VfZ}ZYCcaW7}qle0^Hyz`<5BTH%_&?|A_wNS@kZGY-P00cm
z7=QWmpWu8olIW3DX2?bz*S=sUAe}Gp{Wxwisz!=17N57bb~nwj<JbPwaW`z;lch3*
zmsaaMrzf}Zaq?QNtw-d{iX?IlHECi_tv!p|MpgZ**=lEoHEr5NXiu%?b24iivNnZx
zc`dXrwoKzEJE1uoE4$re?mu`;%8AW-&Dr^u^Rq2y+b!$OnpM}+^-Vep5?~hV&K99b
zjquG2r4oy#Q$BZ-8S#B^sufmDglEsL`R;eW#e@4FAOc;N$XVE|*R1;$x%K>}CD6{b
zl`Y7tZDWdjk3X#%#eb)Mb)-z+ugH?v4+Hyg<T#8R4@Zu}k!oWQHT?c*5yUgOoMckO
z+v59lq8e!wx5!tEmWmN-HH|l63SEks^1MJ{vkR}0(<yhO^)sfmR*UDUp142a_2V(^
z;J1a3Q<30Qt(_JSV%}P`P|8SeYrltCnkv$sQ+(t569L)xNo}fkZvC8FMt~&o4h0h1
z7GzUR*&hy`grEGgPpDpQ)<lI!9d7ldO{sm3Gq?BS`FX~VWug5ij6atI6=1UT(iTVZ
ze6n5q^Oba3cYqQyew~W%4PQZYCCTr!k|RW^h+EBQH`9zA24qz1EFpd8zJ}a^7A%#p
z0`};IVaE+=wSsN~dE=H`6D$?r=p;2Kj9b)nj@l8qlgZ;#^lBuHqi@IYfaw61sE;e@
zFrfDiq&sIm(;w@!?MV!?g3(R;+g47!xJ^Ne-%yj+QDDWNAD7b9uPPGWoX;khqW(*B
z_4C<Z{uSQ(=IbyjmJ`%ESyI)^EjL6-r?mp{=F<X8K4C$kcjEN;7U<-?5?vY@TmeGd
z9vt2d>3jd6O?}vZGtxn888a<5&^fVc^|!GC>h+~T2B#IfIcULK$>{VUs2X{^QD@Nl
zInK6@R?FJ{PTZv1O-W;d8#WhhE3B;_s~hdKO(JYgTb7)VZpH55dsw|8TL$T<vZCvZ
zet*rql=#8Aiw{h}q?;@*Mtp#&pD%6UOy-s*i+Yr%>OfnSm#L&o=LOLCP?|p+Yc|Xh
z4=$f^=hhkYU`0`)|Den>0uvB*r4KuD?-RHA3%~Q%=`RLq+0i}9^ywbAZr$beue{4)
zU&F>8{Z(^9>ye-T?Jx82owq*v+l#c;9oZ)@W_0IgY;WBqtun*mDQVSn`?W7~`Q(aQ
zcfY{)g)cMizRGY|vD-aD*Vpv@s|ax3y~Nd%57@u>F1K#o_K+$WAdR7LR#XO3?tQ##
zj@8%J!X!<B3z8bC@rkoZTLq4ZuDEleDju3<kt!KB?`9!!#nDM>ig^9s{8#>U?%#i(
z;rfd0b_I3K?)t#x^$y)+=nYvHQcA3IMN47x@+;`D;oX1sCY|hA-P^LcdrsH)<nt}`
zE96c`y}rg)XQcJk6`<nRfCq0>B!iUbyEz5=7}i&^^i)+(6IP{5%4Xa1i=TUqr;nbo
z-_Ht<MEIE{zvoEua1AHMu<1IU9Y)@H^SiwN&Rcxu=YF2Q{8#@fU;pE;@%ne(;KBVz
zoUO08b^A7(7jARCKX84$=UD4x$I|KAfN6?ajbj}-4oAAaW3%0|={HkwNo&<Gk=6Kb
zn*crk*>+Mj*c4b!7AuH_leqR$q;sbTL+V6WuY0;K(MSEJiv*eVx@ULXvpL&3Sv4h9
zn{2rnK~6QSjv_*5UL!b~xp60glw|rWjdjt<m*<NGE$Kkb2}uE}2x<do=PU2683BH^
zdeyHJrt0abb)Xz!H{Njn-S>F<;2EF#*sHwt^c}wVcRt52|N1YnIy>`J-f=)N&ZOh|
zAAEy{|IJq@&z?aq)YewaLSa>3iXF$yfAwGd39sH-u{no3ch}s0VdH%|SDAjZqU(k3
z$FT}#jV!m?>-cr^Do*#{P~Msvr4hj=Ef6yLX{J`1((KnVrIfZ&)719Vtmg6f=ZL8I
z&#kVp_0w(lmNwR!6xh^zX}##DK|3q4t&13hv!JD%pVYJxYOqF9pPDj`g-4H`@bJN7
zQWjQy&)M0A^Yb&#&(7JbH({~&_ta`0sOTSd&s(48i^&PfM5<$<Ts*8##dz<%clq|~
z-{9){%CGb4JJ5F>yWNq#>o`B#aJE@PxAHnxX;}#}wJ?g5uzB9ouG`bkn#o@4T)s~k
zlf>~ja;$|ztsI9VW6QIc)`WZkOq=^Dbw0JpO|fp)*;6vq6lZTAle#=FAjI~x1mel{
z`rMbyzieZ*^)7GfXuAbW&X4&xz^tk^$#_zc<9Act<_Te*+wj;t=h7Os>n`rg%#Uws
z=KSL%o`|&X!zCAgPtv**L}p1VO=@;_70OUK91g6`w)3^q9E&h75}oXeU$fC3E6DP+
zKv|nWBAg77e_`<FlAubLrXEo9eKHzTOTu!&R-vMf1skG=nzdjFnQz`H>|ni-&U?}}
z!!|>gNj9RgMtZ+Vr%{lDB4-C|y@`=uLy2@-tm=q#8A)zUrRyNC5pDykSUI`{Jn|25
zOPJ-5_$!u1)JBpf<e=1>N*5QA><{GQBg#g}s||&OX%{VF(!3&QBcgR$ne&3S!O04W
zXiFqaK94W~aTUu_j!+@{P+_{wsgj}~m6t#BD!1?5Wjx*|k6@A_k{|qoO9?&^H;J~o
zXA{|(R{BEGZe2~<YUb3BO3ZzHDM1}$aUyvjlw@iiA!;;<HbE&`?kLeaRlo~~V_U5y
zwTZviWcko(0i&%XL)2pFiG3x$bo$vRtX_kaVzHp@Xq=pA-Owz8lMN}DpG+&`I)6x*
zKdSF~4o@$s*AHVQx<^R#oWb~nU%K})Py2J~dPOd-P_<r$NE`QQnsj(@kuT(Q2x8ft
z;(p4pF!hp}>p(!=vfGjeC4p#mvKb$MS&V1<9bLCYt9ek8DRem$U!@ZvWdSMNeRa)m
z{ab$n(Ho$^d7|5|;qe}%a_7z&yPKy}O=vw(O*y~&BAa{4D}UiLjOpehzsIC#T5ckw
z&6-=Uev0j#7r})6?kSjZ7>?Y!`*Hg8B8rmF{}p!E-(bJH&oCqompkr$I&=H(1xUrx
zz|;Hx4d)jhCv~?$ULe^>F%U9cK7ejQRX+#qz*d-LS`Ro8ZxP`&kYQU3d3^fdH=8Wc
zG_GK6!LFsfHcMJ-7vvT~vhX+m-v65a?Em^7csHl@hONQja_6?W3(v7m7zcV@{_HRC
zy|2H=AN|vBa2TNLGhaB|g7054?v9+DZ&_cQ)7{z7-+q(3pZGZEuYH2;%P*17FMRRH
zqJ@E0lKLW&^qHKz)y6m!L=v?K(s_Vat{LILYoB_NUwZNq-}=@E9EaWwB;uo9n?kGJ
zHX%#(31CjF`;MCd#&P70uYZF#-}p9P_|mWOZ~ygwix1v=z}LR=HQxLF+lUyO%^5FT
zTyV1+*bM`vcujP(bE(A&ecUfl<9IkS3<Fm@W7VzMZqL}PSM+^HPI<bww#k_Uw~%I|
zl!=DUwNul-Cq#gmoaU72A|1QSnaySut&+knjgh;=dYgH+AL&*XT--Y6+2vJizs~on
zHsKF-VkmC;4JVw3=ETGliQ{a96;>iRD%RZE>|5QkJ(i=VxvIf&fKHH_$Vo|J^qT1e
z^mt%bk34<yh(`||@X25LJYW2kU*K*Y__g2qO)g$}fxPMwMvl8ZIVr<i_j&jK{fG2#
zT(PPneV52dCdbG$VT^4WyUhDX<LULDb5XJ#Fnh|XhwXV{d!D&-d(8{C&iVXDe2-ag
z){Nsg-EaLiJrloE7BjUmi5d3Py=7ZlZPztgfue=t?rv>yhvII*-7UB~6nA$ouEB##
zf#NQ~r4ZcRWpiEk{XBcW??2ezk{oMgoy(?-Ip&xv!zDy|nF;stZ%I1Z)Lj4IkdpFu
z0!u5CAJm(4TZBj@=bB__50D%Rn|J+ODHy^$u`~P1f?*b{?<K9R+L#x;Us;NSv!cEM
zwlnyLI_V#ue~*p%!`<e_IT_V!I{CWxFZSUbUPq`lkrzpV<X2^dsSXGzbjpuozX?3%
zyFhw;n|8-eJG!;)d~%tm16Na4hK)#X-*OBk>5T)kt?@ph+)GW0*1KJkFW<SjLyz0D
z(E0ScVpF`SE1S$!l6rjyUbyUW86H`_vk}-dcwGjQ<YpG7y%f#%WNw+DaZ8qsy%z&B
zHG)Y&jm$K!jS1g<sy?xgZw>`KnWXb8y)1+02KBvk5pZTNpOa=p4MeEZRgR=cxin4P
zuC+S`KtM+E#z1dJrtH$vF|x4i^6tx?U)d`dOB(SSTbM5IM|Lf39~<jM!SIc;;aKN*
z(%IN6Cg>zfg0+)^RAvMDvW)Jo6)zN>y-!=`yNSQn9z5J{qMPG%3=?PXVHgQq7zTqG
zNcfaZBxlAZU{JqHcg$l+liOfeW2`LxzMnfz$jq4|4{E>g9Lz-IOm-F^KNOH0C(B4f
zG74-OPE6*EmXS_i@5gWc*cih*{quVOze8}Po)C}culJiiXpz6cq>uPMzv)-tFC+(Z
zKk`sqlLnTw@#I<Zcj*Y#&Jo&a^{LQ|<L<qiZcC*<C<mMv6@4FM*_@+JZOiXOoJC(m
z_PlHP=95ly`zr8kfqCg`PN{%yTa+WqnQ1vrIJQbwcj)a%Q}tVJORgs)J1HdyI^R;w
zCnc?cCE6~IE}mNx>Zs+AS-n2fi@&Wa$b1pLZS-PLOS(G|{60;N^L%F0joNY88>7ig
zA%36PsRRDf%F$>)Odtj{NZkD<>27q47#=&ny8X<xept#WL(0nm9jv(3+g1`V&*n)G
z_ftPd?Nl?KE=HaSRmf->Irh~fokd<sU`)vSJVXR2NO!$}LP^SXD@O@5SqLzsTMPwN
zIDB@47^u&5Wm8m>-qLvgt|zybPpy6dNkpOMv8D6U9X{g@eEHOso`zKE4up8@hP@dy
zk9Oz0ufwka4}S%!eRpsKFT6(kI$|B1?!%U=JNs{n)XAzlEEPk1y@c2e+ZNvmj}!k?
zYgKSMt@n$x>v|@sJDF%n7U(}K+}+&9iGyVL&eXU3tnzy2ObUldwwRHGLP`8CS<BXV
z`|F^U1AnSza8sk+Z*;Kn@e-L9oThf4sUDv2ZnCklbl$bo$-HEy27P~Dhu2f``hD9|
z@9UG`hT1yoUc7C`hNmCtY4muMN;0#}-RkQeYV)hb0Jx_LzwMAn-^){ko}xTFb)55Q
zxZ~q(j;*E*>vMF^HtUT!!&dNerQi+u@+GgcZocB696MHDtL#CLz;)PzQKX4_gyXSJ
z`YDqB6J#O}>Tl<_ODDz)Z>p)R4L<|#>15S5H=OoMA-sTcj?STl<gQ@-b-?>M_xXM9
z)9CI$+5**+uQEh-zK?C*mv4B->&xQ_<=`3yv&2|2xT1_s-3FYk7%;k!${*VMV?4ZS
zr1D|ZtlqGGvF<=!4^=X=oi%<2vm?$_OXK2@DcERBI;td#)6|t5mz*}dmR`(I7Qd{b
zXYmHxD0LRl>~>34cXMQG%a!s`b=V0pD2tw1mCQ8P`G7Cv=>-SM!stXNRL<USczAkb
zN@Psrem>4LHy5vpcD%1#T>YV=scIql<0-#y?z8(}^{ywXm)+MlpQ#>Ob@yk0!A-B3
z^tvfO_AfCJYSih<Zuk1@r~T{4m8BLQ!5x?lL}_=wm8H9nOc^5!;XmY~`X;Q*YY6^I
z3w=deeGJZhJl206JZ08@iC+JE>vu|`fRZ{yK0=u@4UAm|dOu=XOLykcTn}Cr>cazg
zzB$PjI@c`}-=BlVB<l}>NO{(=mdm)%GZm_$MQ6(c3%;+e{hNX2h?OznZSQB(Mjxfp
z8ONOtKWAe9bd;YW9s>OZ({s*y<>=}a(CR8{OnhthISg3Fanl5;!KeFnP@85GQaBTR
zvNcE9zFWLz?YzvkyIg}im5YRke)8YiiWHo3tW4@DKNTv7>b|&I<Zqiy8=s>y^YY^@
zW){R~x<l1!Sv+nQAZ%VES|GWJGX0^liiHtj0=9@;z;0<zeh)gCT|T{kY$%s8@^9Ww
zDAW{>bqTnCp_-A*+=c(HkXbyma_PkZ%L$ulR@WwNMdzdyyFPj-_dRVu3WK%%>0zsW
zEW>AEIj%@nXUdWXo7F$3UbWEYYmV(HTdmdY=?tYlPR^1}3HM%1@1VwtR!5V6{Ux$)
z#4U+&H|<W%@#N5cUc{IAo#$wm46oehBMINwdnxDyjgZf=+oburJplV!ix``BLHaLa
zw<hWoi}<K2hm#$$;~d#^K`5fGcoWfn(Py2kg3L%ow&xF=^erR?#~pFFKR?#6SN6hf
zaf$0E!&3}rOQgFouz`Lu)x017F^dHd7p*p<is(Kz>X?rn+Rk~<C<{f~9}V}nog8tx
zIA6L|?pMTfnw@Jkb(4n3;N@S3EkiN--%a=VbklG$Nv}18Kzj6LdQo^La>^_6(b<Jn
z1g;snemA}RMWsc}ord&~!p?|-4}i1=E>)3M^amyw%;~;wY^~pNVqr^R%^q1Cu{jLW
z1QRZqBeQC>Vbp57X+GJ@;<P0d8YEh1QFOAAd4jqc)*QOf!ijkklSq&y5HiXcSlZ?@
zPc1Wl$$3X-LJ42k4=3qVNQeqoN&LJ+jo5hpj0@-c0?pmBm{P{>cJ$WB2{H!{i37_)
zKRB;tD3aRH)iCvw&PZXsb6uPr#GXG9$hW}%x;8ng>`7xG(i?H!mDO+&#;>^57?qaT
zEwSf2yX<;Gc({zIod2gLGUTS$wG;U|{g!U+HWIG+L~4IqHv@O{_C9jvz+?v3t(`Nx
zj&zNzzR_C`y5rx9;srFN-@H%M7xnlHyIgP}e>4tNBxbj<nocT9jjx?|#+7~F=!qn+
znJf3B>WhMR*zD^y9vj3>0#gpIx3H3M7dO~E0%S!^J-}wM3+^*r=U3{a&mPw!gAeS^
zXI6kBa^x^~(v)r0$^l$z0yoKqJRH-{mscN(&rtX(%Y6(!xXYksst}^;C<$Em>2H8J
zyT+bRy)%y&Vb$w;r8hM3G(QIvvcCAe-hE#4XxQC+%5t2G=SW|F8S_i8w;l1jd3}iW
zxUl}aZ@6{-);`|S_U&xw7*GAtt>xMNZB)~VqfBts2%p#4Y~c9GO5f<9PqTDof>S>A
z<kYXmMtrROXqDh3p7C!iT{k#>;gst;W1PA6jK0-YKHHT}hx)353c#u-bNXjZBi-QR
zVyGD=it%;_XJ_B?x=(hbSChv!SmC_-g`lgqM!oYHGXFN2$|o~4RsCgY6NbeKrfRVL
zj3Qx$!w$P?QW(_#t0xbhGxVcoPP~)C`fcR{)^mBuMAgeymK0D(l#IQtE{#|Bs&L(y
z-^F>a{ik@=o~2{%-lOcvT#^x=kE2+wS0j%0_}6i|zT=?*sQ+_Kfl4(fw{A_DS~C;A
zKS&F?UK%2)+bD{3m`;d2Bts6fp%IIlj59tl1Q?RqE8yF<2rijd*EzP^n<L771nY0T
zJvSa|+*8aKnSR&mXm|_v7Ywe?Nw-}ux;amQ)sudw2<G$BfXjodpj|1->Zvib3p0mx
zxepL3ni$`CCjdy8Bo`{xajl&22j_Lw@ANH|deVL!@9HIao44CzSrVD(GUlqDqe|Lk
z(91X{DUT$%*)r7erQ69So}&K%Gb3=ly~KdRl06^$_I>VANn;%ZUNs|mrKK1kFFA+M
zZSZm=YL1g)S1lJ*3Q<lp$C0FHrt^4p$y4q9V{Qx3x1X%ZpO>Z!UiIBNSfqG^s0Ka3
zJDX1dk&)WgYMFGQdHQywZngFs$8e~R6sMh+#Hb{K5uFfxGILR}IF^i55~?8E<V?+<
zNo}+h7UqXn&c2fSW}R#$UScZ+9Zi-4P;>PaEkP4M+yxTxoa1V4QElu*zIZm)q9xpK
zStZ_a92n^W?~QaCpz4rA>$VlSc}K6FcD8kNmu2ou8j*UTmglMtJ1Zh=`TCh@>U8mo
zAtzIEkTSZXmAOQcFK(xzpC__0+Hat;oo;aUhp@uvHr!Ot%|DrDmdChbw?~5v`HRtz
zusZHc70bQ_lGwNQl{w*Q|GKfXN*{UM#EF)Kag_A?0NW48g^Zk>dFkhiuS5EeZ|+dg
z@~MTgbRX&m+vXeiFz1J#g3u@OGygX6r%P>3YWwpKfCIRcvvrLLtVM6x7Qos)?zvE@
zbdISRK1l|ZJXYU2{%M2JND!C#vGSA2jA7_U9(SIKEhiILONh{tJBShs7wyJ&=wlFp
zXztIG?7@iJ8d)eii+IA{>vsj!;!P9e_3?EKmBgw??pbPt@#y+WnH)?KAEwFXJZh4z
z9GG$|A7mB+OVX^13F-=u5V}F>9++h%Fseye=!Y&lk$&eRPiubn`cqndzk=ZnVZGVs
zma*7ANPlUwupzyKYUwjgyh|I>tH84`gi)qx+Bvtqm9_c9c;K}3Svf#-%J$0?(Bk3G
zRSAKz>?|ho)D9ws67`^K8=80=;BndHK70Dwx%|)qsD{K~F_6D#q613W53F#Juj3iz
zz>!*WVjyB>*IRpQ=XdCy(c&UToobUYe#tKlPWFPCAp}Q6?|G$RFg>hp$4T<X>N!sv
zP4Bj9<$lxVO-PJh8X;v3m(9fhtJ!AT?ePjp6wnL^ht=u!TIF^I0&s5?KfT$C)y=-#
z^|-49FTvLjhrGEa*W(x`S$L1k6ruiY)hpY^)$M<->k0Jk-Yns`xxj7wy75=N2TV(!
z@sl08FgbROnYZF)%VD~8Hnnq0?@^Y*>rU4|@99gZ&Wb?E@=9@Az4&(6K5MJ97?Iyk
zB4Xv+d4s076;_=t|Kppe0d41RIrXi#robhNA14gk2R73?Uk3(a_u7!I&OcTOy5~lI
zd$Q8-vpaZy6t*AVfMsg&o|y%b_pq)F*lV{nQHCF}dm8H+I@J?|PI;Y*tk2jOtk9`z
z4OXyohDxtpO}aTZeZ8}KT{=Ar{^k>2g`4|Sq;NUlcSX~F`OxHX%4>JlM#8tXJ%*Rt
z)wJ_%wXLu1RIkpX6{;*jl4%Jsd>rP&R?Ez3*PF_<k<4yN0!YJLHk+zHU=<O6uPChA
z-JDB#14?(fm%U@>yL@tc4bX(m(kRQDeVWyZ%r5&cS`rj_f?Tj0eT-;(e80yOIJ$ce
z5!{koMhiZP%<bY-PS{>9PIUI0117d-tcV3XMz61vX$&~Q%>99lT(iX{2+Hm<pk(~{
zUxPpJFEDMB$XA$lW#y+(ibwL?Te6pFemKdM$F11%LdrNQf|_|h8qi7(Z{=(7e~+Ok
zipe=*2v5pOm0iECR3~UOH#ZP2qv+M(is2nT{|Q8*#k30!UN>G!1Kyrutg*s&mUVw8
zY<6liTBd1TYr5QNegha-t492_zhzxs>bpnBc03@cF&K`q>px9bpB>N$j%h^qTFf@B
zZ@iE^p(%7;Fg>7FHTx(J_Jxbf`J#f9LZ&n8h=YZK&{Ll0hK0|ZPXd%wrKaSIgFX&;
z72x|FzMgeKM_&88?%$rM7jW0@?cL6NXaDQA;GN*iQ3^&a_q}X}#W>x0Vmh^KUGfH9
z|7PP3W?5lpv%N43jVYMU?wEaNsjDTcq^zV?XiS`MqRF^a?1%g)Ne1v!mGf8@HMS&$
zft8vi)afLtCV(UnV>(aHNA4wkG2UKCu?zxHt&rDJO@6DkwY0WU@5*)^>{BX3MkrbZ
zW^S#%a}mc9-tg)lp%rKqT?g6F>wEy@?#~gZlY{Hls(SMdu3LQ<bAz5oB_6nCDLcXH
zNJeD|%}eX{{A=6pJ_d{wq0c$14nKXxn%2a>-Kj-dUUmpROpS=D80P^pD*AB{fJ~jg
z)0LqqGrMW1A2U@H1(&88?HybD?u*94cf{;7;a877aM{?f1%K@GE540<98f7QHw?`Q
zk;?$uWwS|ynZ_Ms4s@GQ`(CV-1Z6Lzw8%FQF<1pE=3n}&rS0@@DmTH1;BljT+&xsC
zwKk76!g@VQGE)Awx$mMqG=|Eg?0z@>?FHnQ{`R)&Km4zSes_=aVzagBwBY4O((=TS
z|8bU>0gDKuOl6WeW0zXAoOAQ$fRZ-3Y7x1WsRs7Aqf1A*3HSY}KB;DYZE81{!Mre$
zTa*OV=Z|W<4@hL&#CQcaDxO`M+Z2P!LFUa^8~9~`G~yZ*dU&6Dt6aaMtTj@xYTLjJ
zV#P!VjO}gvJL?#_{9IuzsQ|y!2yioNx0D03=eY~f*xI5O5tBZ8`usX!(9)W=)v&5C
znD+YCnl<GevdvzqbOU#zkgJ5!&yai1EpWeodf9)tPCj3%)S$==>Ng?fu_-Uv=D5uZ
zaw!1myfxZXj*9apl)Y;5Xp%L|ow9p26>|u|bnQL2?YmkyvH7-zM>M3MjT-F?h32s%
zNm5+e8@;#vlMp^X77ON@I8WP;hKdz$vu@lccPM{aw|@WP3Heb83h~-(h#&MBC=qzU
z*YENhn!KlMmtna+&3fn$cb^b%-4_u8khAmZCDf#r+>BaYV_CA3T?*M$WWD1SCSn@|
zN$_EXWN@YUKR0h!*Kb;{&V0^OD`;<iD?YAKx%tWc_3rDK=p5|>d0Dr{Xf;;a`DJZ%
zi&XiYMJ>BcEb1R?q+cwhzJAE+8Xd`fWZbRxy%}(g<!*C{U8&|esuuP)TXH3_toPw#
z-!up)#&JK#d6fhBpn^+E^~inl2-l02;E)x%0*_q@b50~HkQ9xZ=~q^&+pjJOrd<cd
z5?@gj1l@b?J9sI*{v^(K4J~%)Z^phTywc_RY^#o&j3DXXnY!D+xt)5pfrdD+!4R7I
zLMb}Sl&(c~+zHF^kL%x(g@=i{&N*L9UpD;CU$?t??cRsD@^kV`Qf)&fI{NPdX`DOG
zQ(teo#u;)SJzNKdJ#yX8mA_qZ3S4I*x2;p6N)WeS60bi!pDH$7ygv2XIks`?X;TqD
z{moc5x8hiSJM9Oq<x662eO6cdl`4((mS0^?G0xdd8I#NhL_T>_rw)4B9<JuSIDXLj
z6%Wg?H|V;lxg|W~hBC1yDS;FVs7msPjR%vm=QPlI%B_t6{l&M8%10nxz0G?>*T!!b
zdwPDgBZ+40@))rEZPl!PX>u_HnrH9fGG@q^zf**VPFM@6kd|jEV6v|Dtpd`1TB~1r
zd?lv<1Xr1f@;G)m^zm*FrdTGr<<=nbK(ai?3>S7QAsq->tDhAal|)=<P0!TG|L#wV
zK3iz-ejyu*sE@?qTd=DnMD>cm`o<qFRm*-cNuB4QJ+p~zMZ}QVuoHZY2aa0lYTsBK
z$5_^XzGHamvC92B*TJ;w)|izGx}Q$<jh}cuz|pAYY`>i2^}Z%~{(AZ{vo6u;lp9H}
zLpr$kQAcdWR8xcqO|V48l%hxAJ|MVnOTE^zRvZv;lyVI4uItdOzB{UZ<?*{*e_p@P
z_B%a&UN~WX9q@Zy-&OCtqIw~D-FS_zeyOki9wVX?XTS77vCHPSmc>{;-e{{XtE_?P
zG=gt2@5Lv#DS#^fNx3aGclMS_-WH{n)`zDfH@|u_#|TY;owI-sOuQd%>cqjUP}t1R
zAy9%}mUHn9A1~t6X|@gsjO>bc_&Jv6$Bu1fgeL45G3S;OUb|XV=0a^<rtMIxo7iN2
zmC2-#D-b^aUDWeIKmkciSH7@{B2y`HO6GLHi6@38Qq|RQ&&Wz!AFz~eIU|?$&_p~C
z-|H~onW!lhYppg|&ro&i)qqW&gB-0W(p2T?g&FrnxbSnFW;|OdI3HNMx4q5hntvuY
z|0agZne3D>P%?ya(!fJjKBH4SW+lIrEq{2);qzgv{OIZy`v_Maj$f5HP0mRPXizSo
z4;h?J+vzx#Q;^({(ml33xV&0A)~ZeXTd7u!zbRf((0K5CXqvs}MN&})$to9J+pg@1
z0P`ifGU}Y|JMQ%5K5`2@&--R7JR?cUKWAn42Qq2Y7<31~B4IQB=Yt^G6K>rE&VRrC
z&(9=8NJ6^**M?Xc@CW~&9p5&+EBh_1_<!xH0E->a`hOe&$(O$7%k2Mq#D9))rtBa5
z>+Aoylw3qc7?=O=(vYy9R1C;U|7T7A`G>zHAO3&8v>&&LYd`-vojV+?$7igzRVt|c
z@6mVRU1_iLI|Ge&rVkx*sBx5Gl%l^!pO|UmzFc)V5j@pidLf12r_qBkN0C{D-l3&8
z_HA}Jt<Sjd2Pj%?A9zkI+X-&*uKQ-5dLPQ=@Og)6ywUS05v0)JC8Z_3+%Vo~TTN|7
z;6=yNK6Q`%?bH?*s}avkE?(A$wI+~FV49FuG-c;AofG9b__TB6^yS&_xBlz0Uuu)@
zna|qmn)Z<`%F!Fwf9tvFestF_(QW<iqch(R!03f{JFK5v{X!=Ud77H^V!SeHf)u$V
zFB~~MmP*=7Dtg1I*q4M{ilvqa8yvDCQQ+%NRN5pJS&^&+wcB?Tir!}EI0$Dy;q_~Y
zsC0T#P!{j2kF>L{L@KN9aADTIBl+%DtW5aIz1Q|l4#GOhP#B(BKNm4~SF>0`kcv={
zCBdWWgy*VLr=G*DQEx|mPQCnI-SlqE>@{w3r~)xS@SeHzjMp!@JYb8ELQV+TT69=B
zBr{YpU<LGpL9v@nJfr8GozHE8;GOyD*&OQ0W%u%FZCmSAA{JD4wB~*@%mHZd!6*#D
zECR1`&I(<zKi+(Pf{QTLdjqF)@bR<@L3iLyH<;6XqbQYY?a2Kq@DxzxmWEgyQI_iz
z87c8+B2SXMDCiG!0x~%f9i1grrET@SjNf+C-Akrj4T1Oz#1HBxCPkEcznSVg-IVw5
zQ<2`A|DTs5_Wi_Hl8dYJN8|zi;%Lzfzr`WGWn|H8feg7=W-I-3of*qI+nGccPeQ*X
zoMXF}HFUv8FND%phI1{pIx89i_Y19K1}@s47=&mEcW@C|-ebH6CZdiv;Q~9NeVELV
z)$Shbii$9-imVNYT84~gH*gB~5?zn6_fM&vqlp8k9<RquKcch{2cA)+h}2^{LxFFE
zy^!RCw{h@^0-`UL1AlJkNlFYzDbYWuQrB%Vz}nD2xuhr-{#-(@sRmt_g_L2$<d_*O
z%5_e16agD^t6r0M&R7^?Wu0HpPND0MwO7oL@v;Z5a40Q(SWZ;p{vCaYk%}k_2X1+U
zR-bI3MU#_(ifV|VfglPdGb0rvUP2Nj(YLNUgsTn-@3R5@OJ+kzcsrJ?#5#v6AZy+v
zBFyem^i-}mFNL|T?=D`nN2)#yW3Mp`-3nI-8|ocSVoVdm@wsQ#rI3<RmGatTUs5Yx
z>zCShGAT~E5YeTUtbt}f5@Y?H0a7fOx({kn<j$)h@==~XL#NL)uW0KJ60g?^4{8nn
zm^VSE|C&SZKMSYT&7B@SL$VL|aDxc`G65PHzOPSC0fp0;GrM|5zAIOKp?F56Kk9k#
z2|s5Hvv;WHmxa%ql2N~0KC+n(i9#VdbjUn})%>T`a?6evyjIKP=V~lA6@8y%zNm<N
zzwGG3r|)`I=#r)((DT{G>U>;1opQm6qMS9jJtE6}2-e@)Pn6J5T`#5#qcy@hkZpt@
zv!@S67m_*)O^@nOC2tLg0y5lNhyL7`1lrJa337X04q5m`^?prIOe#@F)v%jbupKWg
zj*9}4@WkYywARsJoT~(vM;r_Ve3EB$(OURD5f2kD){X8nOgQ4Ng(&GIFjM&hQVK*C
zF?C~HiK6JkE6Ckd`H?iQ_e2@n`G7rd*F}J6{>1=sFii8@sj>vRcVC1YUdv!K@fk{D
z6i(6^vgDDrc*th9ye#=-Ej1*o5LR1$DJ)B|OH+NiyO1N8Yjg>Aud^|e?izOX2~C@F
zNl!wz^Syp6S9RFc|La^{j{f)ZH@A3T?<lzYViT+n$@Z9#o7Cr1Vn~sjZIm?ji(_Uh
zMjU#1Yjk1CTplVcZ`8ZKe$Dw--{JO<<6L(wcW6ZZ?rT3@e$}V((J?Gx4E1qQ(xP-z
zS(7jCWwPo}dW)eL!aGmF-|4-t5A80-m+dYOyRLEWZ<@M9@EAA;KEkOU)D%QfkRt%!
z6MSI^Q&T4b<#YV<6m=mDPgoY>5H7npuF7-d@o;Q&ltzG%HTpGX%Ma7Z0vpms+puHP
zCw{*2&GFB3Dl|JiQn3&fC!1hJj2j&me$6R8;R)Yo?XJ69y)rHRfWO=Qt}a&<gOYnl
z?f?m<ymk!u3xQdvLJ0{O_9oE5NzoOq&r#uEJ`Ozvb0k?^&prChd<*`$;T0ryHNxTS
z9t?A~37RmD4buF;QW|vF6fGe?Xbe9`6Lc+7;ECTg@jPxX))?>1N`jgv+qMUUKQOrw
zL6Vv|l<%rp2zC7&T(pW_J_%BH?jZS@L{@}?eB!rz_-ec!@J|r&*Bt-94mlPVYX7cS
zS~-WtUGBXT*cQ;4p@Hq<Ecea|MWJ4tueX1wZ1b{YWjGUY3Go|E+wQ)oUQa(y*BjMS
zA!t_01-#pQ5me0PApGU2s29K(5}re=B>#>^nu+EQewK!uU`zhp-19%0<9>ry)xH6%
zCm|BC-^z+UP;}6nx=D>&eI*fm*N-v1mtvS{YUL<hTb3taREnL^r&&op)HD^)&au2_
z5*u^L{f((vwI%*~7n}BNUZOC%YcE!4FT0y<S5R5}G@X#$D~GBsb46`Oy}V7a?ImP!
z7*sr~Ra-6zE=|9-k;JB)U3p=ki+2>3Aq%Fr=(g8AV*`RPRbvVqoRphei5H0&8c%An
zn9*jV+Q+>QwkOU(uXnE!_^Z-h^f(H=&ni+m{lv$z_iG3<r!+_|^+nL2I;LDUaz3<I
zT&qjrFB%k8`7d)0F|scB(D1DRO1;-VWPJKTK|~!8T<wgbn;H8-;qi3J1NPr^N~$fM
zcWbXT1jGMCWl!9_|B1!^7W6wY%A3K$>x@0x=VcCx4$GP??~jI2GG+*lqP${uaw$}T
zVB52}NE78KRTKPLek~q#&|pLNA9o>Cs0rM3a;8;*HiZqx1!2E%sfF+Y&szHC^0Dhb
zG2gDIb1kxB_PKi!-j(Rz^GFWh_+F=aT=K3L5XVFr!^$M(0Yc-iU3<^XP_;U$(4*fN
zmI`}9tXkeBvH?E~X|8jm>+XZ3w9URyFP}T<P41IE(do_Z{|*In^j@DJXuUB|^m}F}
z7$AI{w2bY*2+L^W&l(qBNSqkz)5VWzXGs&Zk^vEZB=q(mEUT1F1Epg8iR!d|58Thn
zPwW){h`tDzC5mETW6%({Wi6JkbA>r=T0bkkZEXnjxiI;n_A>TzP4Lb~aF<?@9b3(a
zu4hXHyM$SZIU6MtL*R4I+TcrrGn(;>sAe{tVQCf1%qj;8!TVHPeLW*y&1oA%7o%3a
zoVI`xvFw(dR?ylgC3_f`bPQ~$6s-J+iq$`Xt1l*_+t|MzJd9b+RsmE^E-xVUJ$N;X
zk7B6sjy=JA;+gT1P;gd~*FzAs%VAK8e;@NGaR;H3QUnk*EIX(7S<Ie3A?x5niBI^g
z`bJ?8M4tNL<z#}+XXaEMHAbHPZ?5Ac^|y*VOxBk(hU$L=`+d(ZyGEDS{W5z`_BvIR
z55BVyBO0r!9e$og2j+EEW5h?i<3^x+d^9!;<Z!YArAVGc*U%=Ht*LxJ&?w$oqkFe6
zdY}1{ZSo_)57tnjA98IB13fxAaT2uw{8Fedv+v=8$YhNFX?RO60IGCgz;KwFLY@nf
z{p`t{ySyctw0xa%ToonJ5`0J5Hv|pXiWe<ax*|uy<mV}OS6z>*zZuS*;v5}`cA&H<
z>yxa!)Si>*CbT+bZlc|v^h>Q`PMCl0q*%%tFV8b$4GH8*T=xa9^Lg=NFGo1xgz96g
z2z-<$F_|nFv5k<6VH560mL^S6Ffw1ucEID=l!A>aO&>~R>9z|xy8q2zy8iyxDLir6
zqcaAHJ}(zG6ru8~m?<HcMUwkH1g+$FC8l%|RHy$aqpde(mr_X{4u^0p#e5O^LC!4<
zWD!OQhWH6~*u|MCgxOHmQ+*smJhfglmH%uTbVWIKbI-xse<{e#?0(y=yyp8Tpc$0<
z#=3G__Y%c}3t=Hg6ziD79FpH|pt>1W(NFvHyFAg6)U>+e`i4gw5|AvNAZp9rYFUoh
zU#aMVEbN&v=<Qg(>~V#=YHv?q6k)Rcu!EpGU%$9^ZH(no+W@1N9-CTiMa$$FA$6RI
zq^O#l4-0~dYKisr=Be<vaNVV*kT$W;{w)vs9{DTD(Rt?gNc_8ZKi^4-3aJR=pXL>X
zBxscaE!g{O?fD3sI|a_RHeyyG@h{_ebxTH6d1gt_7#uy1^xLkBPY-*a02fs(wA3^M
zRj{_~RO@N<WydCj&z2OO@h{?MH0Vo9Q?gQj&h0JL0T(W$(J&iQ3(>}W*>mpM#hy#;
zPvm15D?rndTNiD6P{hW3vRd|0ztoFIrRTrd+>_=1-jy@5RJ%;t5tXI|AhJ0DR0Rwy
z*(!;b_IWYQe1Ms;YAH~(^r2Rj`I3#%nFaVW^OMqzr12*%P1zVW;k9=OJ!23jAb>{G
zK2Xy?qiH{=GN%FEvRfe}sO%=myIHjkZ9$~$DY4o+5PA~w?PV$T?qFWuoHkf1Ixnhn
zE?t18v1z>4C@NHh)&K!RJ)R$a5C^^iSCgmiN=P0Hghe!9k(el)hmD#M4Pu1GCz-!j
z$T<}A$`6nSm!wn5Hm;e^E9^b8XO3lK>^dtC(mW6gT74neF^-gbFPs+CF;5GYXD=~R
z1OqZVI6XaqTeRS{CXK@pe8d`fT|GFo&yEBz&2s@-4#BDn1;pG*{^o5xZ)Mj%zWVzU
zjfGXlTxviXLSp{y?O4A6PQTlV!8KABJrS>rkY!+0rdX3M35dgm+*z6G993V@Mt4hu
z+U@>(3*>&c@yM_r^VyWf_~L3BoG1ykd|&o#+bWQc1#k%fDjhN^X@AA;hgJPgx}W<$
zX?-ldvh=sh!bi%&0lyiu1xI0Lc|bU|j7d=4ByovyL@6BYSa@5@NTr&US|;n9If2?T
zRvE@FJLoHl*U7HO^eU~|LTm{S-?nov`@x$IIWYHhpGSgz>_DYr;4zM<xojAuGGkK3
z%l0>#bKTU>XY1*PpP0XM>*1W3H_C#pQk}>rR|BNJ#i*f<H$ZWC$(~`4ZW2;lb<XoE
zpy8zg3*xVZl19%B;&V}@!km+p76yGZlb+_w%?#%XHhnBq<BUO%Xo_lS_Z{`@FompV
zFiyP#Xg4S*Z;dj6g@TgOY2{o}IObDQ05nt+HXT)-o{9+}J>^LUvDHP>gdmVDI`WY4
zylFr5h4LLewjw#g!f_W?g&u2r*W&qEH|yHd0Wb2$!b^6j2DhGSt2Rzj=0}`NZN3L+
z)Bd2c75<foy#LTwer@mFoKA<zCHDNwb4Bp}#Eie^I0=8#7+U(ooOSm6uJ!K_Irgc<
z)rZ1w_J_d?)H9nw-&!S~nB?Z^=<a)dfWxgSP+lq4fBq+kBKY4RYQj=r0vsMsLSK97
z0UnKwewSj#+CXU-J33RiKyyh}s+y<PG+xZO)md=J+uEN0HeP+)HiaD(GPUsAjO(V%
zJ+o$l9yD(Yq}4h1vT`ZlfLyZYvz~4uJaZus)L}Rx>Pb|nNG8uUp=*^@IyiA%fCcZr
zLT$H<bKS$;)?R*|yPFV79Jq^C0tP-n5E7FS7x(6^{Gz!|gnc>%lpr>>E`=#*MG0Ef
z%Knt;j2`6JB8!R2C^ob5%`+=V$^nvcLZY<TTh8F68ogD=J{A>r*3%J7QAxC^CKa=~
zIThJz&g%iy3Kb$HP$FYF{+6`yz0iAfCMI=E;PqOmDuXFwyzfy2aCsBr87aS^j#ZET
zUJX<f^Fvy1HOpWnR!8r|{qWkYHjBH<r~jO*hWChBY1}T`;dzd=A1-2W>9Dw@FWU7f
z%HoHNc4k@_t?aWSjbXAY-5At%I!9s7RX)N$zEJ^J<<+Aue*j#P2;g@zrjQHq?lRL}
z01*7XybgfJ2LX#Un9iSN0%EKGapoFl=HmY^V1qivXIu`Bj4Qm^rM}8?oWG%|fF}ZD
z7yrzv!SHN73_qY&QcsO;AC|;U_!~6L0>m1MK=Epb$(x34D$8rgZWxkGkjLykC}g5a
zFiW6Bb`UN^E5_iM3$NUaO;_QOzw@wi*nHzKUC{;>l~rO79AD4-{dUJZ>AYh(?YR9}
zSFNEW8*9NtXdBi0@~&8izx84lxVh1GTBDb%jKXOlT}qE}9O<brqgj@il$BJBf>9uY
zS7MV4b&!z8%gapA?ISlCrm~=6noCH1kLkGiutVq5WY*S6^b7#0GWWYn&J5*RuL~8b
z6tkZ0N_Uv47UVEX?Q$z{V>1*N9D^%|%D;)1Di$VSs^66Vh<I0;zct>}olrB(A9_fj
z=5wfy|6bEbgPn0efI=h0ME<7AP2%LAer7$7_$0*85&x$i?2azz)6-YMyN=VdOTS$l
zMFJn$DKywt0kuLV`vtx^rsv5%!`i{=1kVnSL(FGhU6%<#yT%C`%kXL2p=|YV?<oly
zSXVVeF@XK5^z=9!u%b7oYF-o1RlP+{O~1GO#kuzGUtBRZ=)VA?L^Su$t57^z8unu4
z)#9I!(%exEx+DkWk-S@FS-O^<VE5I$_vjHR!UsWB6Q#{SC?&JfK+iRVHDoo1C8f-F
z3Nflkk)8UKo6JO^HCa~6r7%m=F1fy5biJf3W)hTkp(A<TP(8M@{K2kdeMYC8`QA;e
zE;6Uk$xhAP)#>k{oj1u*){1Er2}!p&JuFDRrhh{c?^6SI194V$5RmK0R>MXO`LQ6;
zISL5M-3upD&vGxx5(>(C;3^)rC;Sp9%UR}Y6wS3WloAyJs~r%x^=Iq_uk4VEw=FWA
zVR`00#@LvKQLhA>Kkkf!(+q^I?7g1g29GYK!NP`Q6rmZxw!{@<cyDXj08%h#Qu=}i
zBTcDP;Yq6k2pR?Ya3q#*#rnfXvDm>;(0BTL(4yyv;q!7GJMiv%@E+TSlXze-by`aV
zO_wmKuXk;1))PopdFM`#Q^#g(h-xffJ0wWk*jNvG<3hP7T%Nk6T)c8}wEUv?2{mU;
zm)UX{)Ow5rI!f$Bcn}aWe|=hn0HvhKVf9@B6*FbvyH$lVz5ie-f&Y`t&!!2hn{7J-
zF#ROuZ0a?u@0CGG4p)V$NOgENT#oW8sZ-Py`R|K_S7VWl^G4ErRrgv^D(zi-`eq~)
z=pNA`#Jj}(jvdal`pt@xD3zIX4_I0b_c<B*mvOSPb14?zx{cW}JBmfo*>fi<ij`;O
zl;a5yWQGFM6KW6m)99!Is^Qn~5;>X2tZntr_V$%=KUGHkTvjPt<Dkl$^$a@LkT46J
zC!pp!iLGUdo@>DGnL8N}iFcf5?BEW|RvL~sTzDH8TreJIx%}+1#Es1BdR00`=frp7
z|EKRpZ=u0lcmWX9X97_6X>JIDN@h|8J8Fq21;wBw21(ECn2ES%Z1W`)o=NdqR%!Xf
zi}3Z$+gDWOn>aXTF&P?NRs7LF)E!pts7$M9lgcQkrwnetanh(zLhYY%BR;RqeT1nv
zj;L7m&*Hqw9L>mi)Nh;UtH=$lkC4hvGOxYo`Cf7IB`hC6@-05XJ0ERizfY9H|MG<s
zkh8UAD3?AkFQ-yK1GFM3aG2`%l7E&d`Cc6K;eM-?gB4K6rOvU~rL9)mfR#Z>tk(D+
z1ZMPqZ(5iqxUBT}PXw3TAkAUftKNG8IAECu=ofLwn*?)Sq&-DNQl6qJzl$ngHa5f}
z+K7}Lc#_0}Y~gY(g;7rgh|W3}NGp+6R{mt=fL;-74*rh(@!55R%X1+Ho4TS}P<wZ}
ztEG0-M2@;~{gx_GbgxZxooXEg84J3<onzPe!cS>KN`n#z7}XxIz$+^^pomCR1BoB-
z6w(_T^31KB&VH9mq_^0Zk=q%d*cDCqy0kC35X3Oh`?=y8QFdj?8(j~Qw2sZsFeLR&
z6ZddXBX*_r`9k(ALl@hH9P^+s*U9RfH!*+~4+xS>ZJ$$#lD`;Yj=p}!4oDc1rXSjy
z;Av2)2Fw1UF5I&0a{<;*4=eW})X;13cjE<StqL$oQ76#sQ@-a3|E5Q&z6?uSFn=Q8
zv5U4#(iVAy^!WVVP_9ebdIlD?^Zb4F%L5Uk#GRs5i3r@Ch7-#eVUjaIdnq5x7zT9?
z2U|(!Xk(D$?|eabX^=x_yZ7}vB#1ueuDEqzD+!J^n@GiemSK3YwfrrY%pP<U36Ivi
z)%4+9tmwa9tpC3N!9V~hW`351dp&?U?+ZAz0#KHwH-i^dKJ_4i8?wbDCcVuS6$KsS
z!amSg-2_&RQcJmiF6;uvw6@7lvsv0nu`)@(mu)G&hoy4V8yrH2t55VdblY<0`#!sd
z;8?bYTUfDOC>}PglennTe>ZQPZzW{_mfDgZ^k}s6JwB{fW~}Az>f5fjs!Sn0L@bJn
z$O9W07XihT8lNNbV&Ex^zzzxF_RRouS|^YNb)vGu@RYr1d(4e{`;C^4T$~fHRf7O6
zSzQZ$s*#@vivS9o?W$*dAt^VyK_I`B)nS0_z-xn#4aH?KqEsoE^hav;Z=S{BnN~Q5
ztd8usW$D=YkKA5NbrA`S2O)A&=t!6c1ELs|a%mBXPlXmK@K+9wsrHWF42R@xZN*FB
z)4$R<%3H8U&~ZNck8Mb5nTHsQRn{!RtGM5c$He-)5bwK&U=LH(Di(xbM<;E}JWOFs
zdk9a`T;P()I11~D!GI1LZPF4_N3|rAPpgNd4k-@U#Ft)jcu2IK-^g-4-39FBWCwB2
zZZ@9>y*3}tXAPD9t*HH#=>G5Bq8MPS5MXv>No0q>8(0n_>3yV2&tT|b<+u-{dy_$U
zBs#K9Iz040%;X<{{wT|vF8ySf(DI4Rx;t?*x&52@ID%9-<C|`YBiy0hVii&aN-K=6
zT0%4VylJip0g;HN13O{!Vm0>>A~f$iO7NRhL(nW{Z6c3QW|I~DDH0Wfxp94nFV)sj
z;_GcLA9Q)$i=-awaEv>aWaTzPIWi}!JS|#<iP%|%p`sJDPA9OPiiVI$mIf#w7R=ZT
zfZv*p@0nXpaJRynWSPDuh~sNALO86pdzCV>M(RfAB1W;E7*Bs*;Q*7=)_i?@)i#jl
zv1D3?HwZe!4BoN{ggI|Dy2O{l5IprjJUe$#EfVjVk5Y;40nf?{e^3&wTkcmXq)&lS
zQao>@ogTEv@mh~)aOJ@^1cVE4b04!@`Q%f_3Qi-YqUJap7A@CQTM%&(3(YY+v~H|c
zcTSY}K0<M+V&V;Kgz_45Ky)e_jcGIO>l};D>C-<I$~A4Yq|^9!9Jnpw#wf!nhd_Vw
z9c0Y{WM=QAk+`f1xYH`dqW5Qa+0t*9!Nm>z7(J`J@du7iQtNU5iRcmkC!)tM@i7Ys
z8{hcFF`wU`%3(?ER_Glw_Ysi$E>`mXlZgnC`kui;EdV9is0w}>-C#LswMV#I9@s!n
z$IY$Eo<PekLV{3ksk&H&Vm*(IP-~0o@?s!~KI)kiae{JG)r1@xvtj=$22K_!fhDel
z&A@=1<HZN&BrQnTYKu`wUIa}hz1sKKi!fw#@z_g8z-I?stW|3=%R`>m2sy|Dgv2Qi
zICKR{hPzNz(6t_bJkxaN(0O;+TQ3{SV+c`sTYb0bTAIh_G22^g7xLBI1R!s-lvEW`
zIZe2w@}r$7&dHD26bdCWrVl9>vVZYthAGRa2yj>3<h{xr)No(RX(@Sp%^M!lKrj4h
zl1D#Vjw@VNWmbVmZrg`FCZNWmu{+F?!GA=Dtx-ukjo=dysZC&;^EnDD20D1Km}3iJ
zP6;R^4nfO<p6&hc0T#Bg&}G~y7KKE&!IH}XuB9T2@4x@aj{-+bXfn}YV-&Mlsw%n=
z*cGZMTu;`1H^64OnGJ1%HOkf*DGBcuS`(Sg;kt4%;lyUsY0+C6{TDTg4cM+~`NyS^
zFw2*wzviO_W77vs*tp!_vfM&+;vd^Id|{K30O~*Ct}cScJWcB5wyHkvGw*-jSK3$H
zFEwGKD(^dF4Q+Nmu)SJ=L3AnSA5(Fpzw)4zaX~6kK~Ii~|D)4F7#kJ(kptUV@n9-h
zcJ^sKQ7@+iaaIMe$3qx^vEvJgr<~gU(@Uf7z3WBti+$!zRxL-bz@_m97=2qC0eKIi
z&|Pl}sK|SYmt+CK$chTvvuh^Ice<9j5!hYe%gN%!c&VpRg#u+&LK_=-S%uO)ZNxkB
zG)-5l!_BCO88J~=EQO>E&%kVDjh|Tan4>!js}8nT<b)*Gv}o66!%_sCtWp}=LX-+#
zlnUsZi#XJKU)7%353an_zgmUol*0nE6jZZ;5!#MUAn9mh?Fx)TRn%{tj+tZB6uO(9
zO(odX*R$P+K~z`?W^4Du+LB{AbB76Kw|gKChIl!AY(b>2{+w_Da!~2QZH7>$`K6^O
z*%T9i<EJwG;nLs-+qH6M@=Ff=fD$Aol84)Sq*ZFbQG}ffmJU|B9wYy2z`qoLzg;;0
z`$jR1rKBk|IV+jLUe7Kq1F4hM)SJbobpgJ3tw@D!{c3P(K87=@t?<8@_iNijY$LA;
zDwho`gSUvuKGNw_Rgr6<GX_5-k{`1Nf`5>~U_E&p3@6;qs2spUX*E<UQGR{Y=m$EE
zO|*9u_sM5^TvRL$VRoU#N}eTuH<gxunwnqx>sg((-Z2Ipxb2Qm-cOq3Nl+=%qGbp8
zCQNR}u%H-xG-N5d?#Tke5on^3(c~R^K2+s;#eM*zB=N*Kp>A1xCZEt0+f7_j+2FEM
z)wF9g(Hbj|2Z;e34;=$ABAqBwA<0s9(uZpET(6dhVu+6xl#7jezoIL_ud3&W+IB^(
z7&G`?$}<@G<!kDy9jLZUIbk}GF3w9WMRr<|?tZflm!gH)a2=*kSIEYuE3wuSthahC
zzi=k{JC<}=Vm?|XqJb7_6JT6ZgL_424sr?)p}!GqbZ~YG+v!RJKIyy2ToCeY?-pf4
zXUy2lYH{-s7+c8c@yPlP2R4O(^JbFUH1p-V217%_1K|%gmpwncT%(h=)KZzs<4@Yt
zApaUbiEhVvoqsqav4l>+D}@f(FdpN&%<|LH=a|Y;lSL?rOtVPRzVWu##LFiM%++Od
zQ#1;?>^wnhL)7k~RxqhNA`weNgn6<<;rO8b?^NYx5w6Ipl2nQK_tBr((aOzS;qp{Y
zhY%^u-Ox#1uXL-bP*-%0R~*Y^w-iMSOhaKb&mJb{UPQ_LIkscmNCcqQr~E$?_SDl?
zpRF8+3suEKjf0piX@5olBv&)?A<6WR8p}DZ-^v0Pm0QJ_KpUJttYn)~g9C;NCu*P`
zw6|&yi?$3EU#@)EN^5qTXg*RK6UI6Jluilr27T+ekG{4S#<XaZV*}=et?*PUB~XKD
zSW51hu<>b&Q0-L8fxYRPhfcr-ub^-XXrZ(hKnk>$!4fNP;G+9ZYiyY89aZLJN#8Ri
z-ZVNK@!Uy{YTJB0j7Q>i{S|esYwJ5MRd<bGomTa#nKnK5+t84uH|dCKzYm8xyVc+V
z;MF}C_csR|@tc5`!%Fs(j6k#;BBT?7R1zea@9qQ;#6crI>T92q{u6IfXgzySUZNdp
z?EkmW=koy)-OLXzJY!p|aSis;h*FP*q?U}OYBj-qg6V&YO0tIk;oqF0L6dUi4W(xI
zJac9_g2(eei$%OX7_*9vE1vw?N1^7e!`0QBatlw=Ayb8!N|UwvFxCk>ogkWxT%>lG
zULQI`-93oRe(5|vMOBn2I&29b?5LVjRtW8;+0w1DZVXq~^NN2%qy2mHF77%?+AnY5
zl%FP&WC;*AXRn=x^Bu%cgMjIR4+Wgfi=VCS);G_gaC)4Q5(i08uAkTqn6&uemEz+>
zP+wHA`<S`RLbRL(U0?Lq2zcGpRbr4hmO7#g|KHg~?V!<=vBWQw&gIcpZ1atQ;`1OQ
zH|_Rn*<BBy0|8@w-)Ha{ZT(Um@ENy=uWS{h6d|Udk_juIUYeP|9HwaP_Bpa9eX=Z6
z2a<Bf5KtDUL}}jSP7sFUPfm^*CC``66_4BRzv5!3Kc0t3kl*h>$c)i&6NWk6HlNky
z9Ld}CPXkkbEj|yi`?+^y3^t4kZZ2MLQJtrufWGnd95+N<1$ji)2SA0bEz7vHBPwUd
z*zA5xGt(2Qo`IMYA<6h;Vi!!I^qqc#a!^;?5w5Ixn+xdgl%e~sME;qZt!%LhdM&!b
zK{QWjSVj_AC!jwH{1?6|_%Dg-%(zE~%pG{jUlt#<yOh*WE<s5Fh;I;Y{(Z!;Wu9ak
zT&Z@o%tRoUWPHu0U#_rOL57Y!lSS?_OLHqpPNi_};xK3hEk_)vHJ267#mI)&{REcX
z8fj)yA$6%)!#~tchuo(?JU_BuZb4Ij>>S?;ysHf_HG|Ke%TVF?u$y?R743E+xDkE2
zT7o>>cbxP5++k-v&y2!LQw+{8R$-6@0c8URm%?qdzDpJE)h}+UC?}LvIW_7Nm<1d1
zB{=Sy6lQ0uE#ofVi%<m(O{kC4g;`CG?)%Hp*&sR^k1LNeW<Bx7Gn{W}p}Jn+OUX>V
zx&qk}OCFSIvew?Z&qcyNnMy16L8b|l<Q%3;%))?-#9_CRQ~^n&OucIob3;p9*px0N
zSitjIWBBaf8VoGLO9<gPS6d-tiy>pK11G<nD`E~uN_5)o;XYN{CAV+B<nnDLc3q>V
z#8`k-n5xdIS$1#7HP;vAugB7iRHd>fZ?jKn#1BX%@@pu`B5y6^(6M&_9uD5&P{Y60
zZF%dRzajMu0Fqs<BFXykr~&9~t9Y@)WYW=IO!<a57lL92<B;uSZa_h#2kJ5aPVzAK
z3S{wbnX&l4e7d_@gGRbW`@(PyHb=zK4$S4-0KiZ}TzfwB!;Ct&w1Qkn<yVa&q_1lH
zo<4sM4nlZ{ErfWbqLOr7WD)|hq`u6&k;Ku!#X*jHLt>M5>Xi>DLbx#$YUQ^x)_$k$
zHiO5!O^Z3%iHr_wHXe6p36Bano{kmbcZx@aq{IM>vfqfv5=F%^c0P|bwjuFj_uT3e
zIRVtNED-VGb9k_O1zv0ptW%~19;ykQO8IS}^}sxV&oqkV;o+hi5w{B(VB00F2sy2+
z<fc&%6kWXF&9O6B(PFEb&lGogT%7K8I!Wy&KMSVnZER$T{w*HCXqIL|Q%ch~H)jW7
zJiu3uD%kp?lF$^!JR_-#E!g!5OMORK8jW-tIzG>Aa?nlh2>@!NgJse@1$BmmrHOjE
zCU5aZh|5J)TeI4UH4+kMMb2T2>6;$@ta>%I_#PtkqSfJvG3d@44h(A~S4hD1Kl4w|
zO#+PlJVUyh*K{A_PZDV0)W4tmcF7k(k|(|jl}O4vGvpoesQC<(A7&YZ8f8gZb6Cw)
zFqIl)8`3$nbKB40adN5;p3E*z0{<wFC?K-0b}0-^gtM{pVt-JPX{fS9`hfs?&HDM@
z41J>Se=>CcnMRhKoBos+9`$O6{MsJNdSr|}<>Co#lnZpYF$>};2)7k)pq$xg_-c(g
z?&qlkQOdDo4^|~42n$d;6BYoX%H!GJpC>YJ;;5XrQdC@2vc)9xDvkftt@5VW=IF!U
z{8jyzHiG$bPDKSzhhN1I<pY@Gu(q6KcDUZ0*Z1t^V~EJcTBJ7dde^B`8{L~=WRB-g
z#rI5U;&E{w*d2k|9TtUX;lZsukN|u-QyPhh;ANG5s6VAge!^wE6&ss|&T{$;<vJl5
z-yAmX(S4#-UU8pSPUlCE*3}=BqjjEcHl3jc(z-&F#5s~#P>o(M0ito~mYySBsYTG2
zY(h`NazBvdtWiHVAS`efoD!yfNnM0Ga<0gbxfP`IS(?30{8aNWPG=F-P<`HNT%T+A
z$+{0BM@6%-GsUXseNA=n7y2uaJ@s_Oes_1y02W9FQro)5Q$Hm_<DIVdY#l?akS0P2
zqjULx*!s$_D7&v;3#CNqmX4uo=#XY;hEC}ix=TR15r*!DnW1CoP->(lh7P400RaWU
z!#|$)I_JI4xBdCv*S+>yzgp<j4!QYDud0Ol3Ka>{b<g0I?TLI(v-@8vU03as9>KxU
z7Ok68;V;q}zFxC;<n7-8GyeRt=X`(n;7q0YoB5XvVw_h`CK7zF;W%VNUnly4SAGu+
z_cpS23(kQDFz$|P+_?M);?(|!1SR7k7dAdj;Ca>3>$jIUVeOr<T+!>xIOid)?$-Z>
z?$rWB<wVFUJUQy|zLBXZ|C~ZRkwXnPeU^QWy`tJ?HAV@^_&bktK@Ji55L5(I!^mB9
zoBejA)3v*Qdxj;!#=29olnO^g<g+phowP>@iE$N5?Q7lqN{H7W`s9mk(8Tx&Mb*xU
zAvItVzv_YXl8s5!{*a_255@>r%~`t4gPxNNS=(0Mtf4ny2LlNn;zhc{rZm$#D`1;O
zw}NM@lR%iefJT5>ixq&y?3<y8L~(hdY70-Ht$zX7(z$jjk>A!qQp5AclSB9`IQ3Dw
z{rLWXFr0-p7bw5@F`rpl6<@6w-Rj2j>a8+8Os)v3uv+PrX7SeZlr3l^m<po^8v3~Z
zxe(zl-IOU!yJy4p&Q6>=2O}@uyx0Q`d2oW+zIpFU<FUIdiR0j<%<<KAVli*kBIGi7
zA@Gc7!?qz&KB;!{r!@FBeYv$W4<ypm9XJtR*Y#!Rk9_qT@`Eqir7Sx9Bf^aYN~XJy
zEh|<6zqc4~#49-0k0anJ^9vw@jKY2|kMvt|jg0|dWRNhZ1d(#{C$)$A-@55ZqW_AU
zX@Is-vQ6<5Y%30TOZ%QV!FGi1!@#MO;gmMVIsYKYMA_QmF=OQ`c|Lh!)2c)qj5@S*
z4qMI-bMkD5N_I~<94KshcFz%VzEW$*TOj$quk^*W$kDY^i*ZNu*|QnAqT-P&EZQ>4
zfzEeilzaU<Ax%h%)Xk3XB8`Gs+XcaC*Ih-cc|#b9JnHexV|3Uo?-M5zJg4YstaYm7
zS)+D8F?{`K&!jCgaa)5iR#6W!pR&A7q^*b4Us79|YJaU=ubh&**abNbyBh+;4pvAC
zi5n5>y8!K#z_D-ATC5Gu@Jz`dXp5v~RW*)(H&<_JuOVmFF-nb-1lLLVS}m;AEQM-0
zLsJWa(WtqZRiSCg`TGh%VN>lDbk)4PVD1nh#>AGs`PCybak*7LFaL6d3cFv^@9G;=
zU($C75w3-s6#M=BLp8SbQ?_ZA3D@pV^?_lAXL2TtNg)hLDbF~w8L34sUAEP;OWYnp
z;i@V#e^vpF6si-f2_Ytgn?1A+2}@32jX=Y?n<SqEsGZcG>v;<QQVw;^<hdlfDzom#
zPR;%Nv!5&WKfvdD^gmQkLPY`c#~X95pN{w(lMOSe8LWxptUMGQ>-<!&i1PcRMa65<
zR^Ggl8DjP9u6lkv?8mBZ?~&>gFs+h02QJeYYT)xZ!Q!-fY2hB1_}Ohb72W249<k|W
z-PHc$+4r2#jZ=$aQD#{knD@7%>P_yy<=p4-y+1{2qxu5>UOx3WS){qfMKKr-%mf``
zmVeNd!=bp$G067X8x9Z~e1TL&(|!bm0;<q)3ZETesi@5YNczOg1L<*8VXk1F{+1^}
z<R_*&(KaF9@=+H7vUI#7kf3emxnf|x!+SYw=Ozo42r(dWona(ri!2kJ2noF<ei6AR
zVN{C`$8aokLy9GW;<JirG9014!zuSm@R{S0eNB4W_+jwq+j(amGyq9Lpb&TS&A^(R
z7+lfY^ZorylNgagBo|BV8_6B3!r(}j)3y2oA(I}@!T7>X;)8~Hvg=&V!zNO)q>pf0
zFMx)_AT%Ov1(*!2{84d4a*`(o^`g2?N4?@2UL^G$+Qbx)Zz^HxLq3}|iPgZbXKK(e
z<3FAc9lkgNX@^rf6kDHAyiMg!%4#)Ger{iKcw6|B)8YSZcX-Xg{FP=5;@s(K(xNQ*
zY_^j7Kz$o!UaydE5aC8`8=lfv;29%WTWzHaX@Su^50Wuk)@@0yb1|Nh6i<fIS#qes
zBN?BcEM9#pL$h#6uy&r(=hm?$*ZAjZlu^QFQS;FX{9cz}Hq=G;w^+z^$cUZPHMxsv
z^{<`NpzzUA>MC@`DC$+p6SlP%bIP&S`hy5MAcU6za|D1;M<ES;SvH)5RXqqgX2<o6
zA9JQ#ETn>kTK|gB?N0)RSR64QHVMU+12i4LE|en9Wf_ZeFQ<l>hUFqnXRJm4j$7X5
zjR#tOx+sZ)N1+9De%ss3SUb`c>)lbS%Ip(08@^Aa&FN-wnBeRbS{l!~7D=47qR)aW
zW59p<V>~FcrkPl*pWioOmK->I(#x-pb1PSL*&V3RFnKl9K&CIGXeRpRA^e0y!^Ov>
z=Qe^=DkwLt^t37|t*n~+#GEfIQAxc_QjvQdcX-$xc=}EhA|EAlf}NotfOXn)`&jpd
zng69Om&^KgqsdJejyeY?N&IXAm<T-rS#4M=;HCOV<@+?M?y(w<y5*$lQ0-oau2}ie
zW#_a<4loa`z!E>``|q11`me)7(o%q-j?PmCu^{KCK0G?+X6rIdL^&voy%OW0Qy5c;
zI6D}Qm0|pz<&cc~R84Zx6|0Yp@T`$7^_RVL9z}5t1JzZ!kwV=J73E_bn})?m-Uoe(
z9qG03^4epS2j=3t--GNbP;E+(i4IrrN9+An??7G9Z~YZBF}y#Y=hv*?6`w}jjRO_r
z5dkob8nf4ij<5=*7zdRSZ@!)AhK`aKsl-Bx$KL|H1(_NkPPgvDZRJg#Hb{6yWhmHl
znMMW!gT)W<Wy@*k!7v_H=983rd>yT*5GR>e^9C6yf?sB|A?|PLo^xd!Mr%=<m)qfz
z$Cy$fgy2nc0$TyE9&4Xo=Fp!B6}-|Me9H%^t*<zcaI3k%cny@UPG;iG2%W9NVmmxG
zDO12_Hy~;Lb#=l?C-g$Esz4`^mu0$0Ja8AQ@b`8g6-pxWBRgu8A)ib%Nz2!yVYX>$
z?YS7VTG+wN!N9-rq{SWxi_&{VKSNs+z4~BPkhBPq@VK4hl=@5m=94ZvdaOSaSI>^J
z=SObuNc^?g#!1yR4Am!e|Lvpy?XoKvPr=s1Pb034MLYf)9QWFZCfcE6h^2fr*pKnw
zuxVI`<bNMSspqOu3wrAI#nlnhi)kd^BgEtKlV6rwZ+>OM!z?A2Pp=i!O{EP@NwK0}
z90;`6OO(?}R31sw)ODqvHY!%aQO&_Re2_oc)V?F`(oOWo$rQxb5b@kS87)f#l)8z(
z;a#?9VXj2x>V$s?IznguLiYY(^03n+co%r_bS?NtmJ8Ct_mYfS;t>d~q|1BRQ(1%W
z=(tZeeUsnvQnF?wFFf0bW~TPgmVA*2-S^#3cH|CZpC+zP7&l&HU9sL;WRe=|6(dlx
z672Ff_5ppG7vt71t6ysAP{<be(OydupYe81$$_<QI&C?FAor8UYoF}dfZ|9d<Z+zy
z>%nFe{c0=QS|7$cajj5@S#K4^WfqViMfXcvw;t*t8`6Q(aW>3=;;XO0OB?j8amVbz
z+yQJ@Vy-XMaT9GPw9<pz*hj~WzN*~^TMEAc=xR~I7eO6(3d%C`HC<7H-;bSCjWZcB
ztLo+>ACxdB6Ou=YU+tU(DLYm~&XS5tcdQ({Pks;f{!m%*|1NmDTCnN272kWt<nnH1
z=kV{Zk5;N~1O;33cAdXCp2y_R;TR`7gz}~>|CbT`M>5PX1ghYuyf>qpoyuBZ7UxAT
z$}Q+~b~a|<9-EIT4=xAHLR<oYuNsC$=m0ZIADexVu)3;8MlA8JRh8QO9klx6wFkMc
z>EuSN#qz_<bP91E3iXXQ$`3XR!Hs^3Oek}S6hMN~EQeh0(tAg)oMg$b6LNmB^!^o%
z&K+5-;eTUl+JS9kN;i9OWu_lY+(dz>3CPX$;TR8eY3a~#Fcd5%shxAofx@M&Kcb!@
z((}X~H1o&Z?&=iZufrFy`FA_MR`^U<`{^}{Am5D+9|~>@$!8^Hy|@#QY82>ReTO=9
zM(b|xZHmGX0L8*-akRj^Lf9aCqRrCV*vJ(7k71UJfiQ}q62{q@Dtdz_$OwlmOyjcE
z96oT+khuamj;YJsBi7;}Wl5};abF*v`1PJNTK~2gw3gqWJH_`$@!S_1IHZ5R-M#w4
z?MsEHI7AQAWs^~NSyvbx78#!DL#36AU4}kH0#7#t>4@o`&P$N`x-|8!c~YUsmWSuL
z7~OyWf}ZU>l6Zgju{R2Ys^aceI1u8&bI+M>p0piC{l35psw>umRcK3DR&BEcsTlnW
zRxJelQ%xbN2aBH5CV$wco>btcX0t|SzkbFx<-rurd>RTz!^%r^L2DwBEA66?F<^yM
zGoax4CW+iAFW{985}ly{sRPM$$9;{hN4k_uO`?t3J{BW4vK1ovk}w~}^s-r}N}2Ts
zfAm--4?9`5P{@xDr`@*+7sn#EtJ*(>p<}G9vvFFyRR(Cx@diw`Y4{1aVqkXYbVJxU
z?nd7VxnxcO>U@M9-Oq*jP8%eZRx65ANpmymkec6qPil+djU5G<#Kfut<ZyQF`+JD7
zH>P&yQaqcv)<&?GflRcB`N47bRnrKAWlAx+;0bjumS;dF&0>Rc6_JAsc4e7S{Czzo
z{Av{~=Hn|jU<JurL%x1LxpQo-Yyu~gC^UXPI?kGgax7E}`ndxiX!8yy6XkjG{zt+^
zpz*IFFp3zdgHY(M$o}ZbnLHNwEw1vZ3Unbv8p>igL*HA*1ZFp2t-vz#+h&ryiY>JA
z{T(6nE+oAF{)0W!ZiL~^rR)2TxhReH#Z}G5AARvOn}@DJFDVuJYQL>Lix;&<wyy&s
zyeT2NP$tbikLTLw;Qu%yr3j|~{>c2V{Sr6V<GrmI<kNO8Ag>}Ne(pu8QVlU}r7FEh
zg8)n(jy_cLJiq@4ogVPm`C2Zrrx>oO{#cU!Y;97^=D6%#Vt!YKg9;-_^FbPs$w-V*
zv5_=d_cLRxb+Q}uW>_O6ujx3-h#2_GHRPJoM6z>dqh$AHpCKYF%2<0WPdxVtgJ#9j
z(ok<nk&w^U0GFdG%Bg!P8MH;Dr$T`8D4NAj;R1W+US9d@{`}cX*q$i5&YZ<W(a@Y~
zG?SqOY(>Wkvw_9uu^fMTFiC!3Fl;+k5yQg@Jdwy_8oE&H7Su<<rqHopjNiL=n8K<m
zcP!o_Z+_DoVQ1f*s^xZflo;juQt6gM^5?LliZ!dW5a5~;guT{KgJxWBuvTeOPmxFc
zhynJta1oldLVWGCJ;05Y^!uExngood2fxu4hNawx;2vw9<C09$e%&{x$D%QywKSq4
zT;r&lRL(Oeb3mADY&^l;Qs%}-rf~9Xm|Dv3P@+I$sp(D20;`E+z>)fMq{I5Md<m%X
z0tiPuF-lYIV=K#pAz+-^HUBD+zqIOCfd4YMeq%s^98=&!o_chZfAU2RS|m7kdZ+({
zssB_`Qq$zKfjy{{0>B_I)1)f_7KcWkK0X#o$JgkqowM%d8}+f+^6V;a0`K@S??T;G
z%j&n}?A}gn75rzjHm-Nl*q!k6!FP8u3to}eCsNyqLZ?koV$|Ks4sK2v5;$=d=NN<}
zJ)3h6aHY5#)Ev9edPkbj7qI<IZVYXsDstj8MW`bTc$%+cht;?l^HISuMR@!|oO)z`
zK)qXk`Lv#}ksO#Zb0(>4%Q<1pfOJ`Q*|oi&rmY~ETet0Z>2|jBSk{we2FI)KE-T_s
zebnoZqTUrbC{gp4Y)S2az*r&P6F0nM@vVR1gD^658efu*HpDx<7r~d7BAPU7B$5<q
z#IB1o-(OH=Wz$C0-q|hdD-2C7HqyjrnMGGDS*5f*n!>a67`@mkQ@600-xo%nnZ=)f
zyMOAV{|auiJFlu@^E_>&s?a}id?ydj3MO>KRhRPl>ypkl|1%hMWs7}ydA*Mv!t2d!
zSvuG4!iAvFf7rmuxc`~zc4Y!8Y4bCdM6Yx5uvu8Vbt`D|iH=o0Shl$Qjng!3hqCtK
zG217%daNvA^eonnEo4H2I^)E={PN+S44j%{=Q|-VH~Mob?k3F@SQK6y$*X1W+sb6m
z0?6C$IQK2|$KEk_aW{X@ahjlG&Not>-v=|f{ce618wUW7MoWsH9a!6>(;n2oNB4Pw
zxsD1!8r?3frvV*X-^Dp?T4zTZs4DKWwKrU0xpe*=rs$6($D3M6%ZM71hm0b{oURUM
zEc*L-1NP#8IQd?+M6WfH+D(*jWIDx5`W#!NKx_);w!n}wrN(g(Gt;%4zm%>|ZV3Vc
z)6+3#^T?0QEBw-)ui=-?=~ScZ*j#pY_F#dLXRjEX42i%4M)syXL`qqvyHmj-nx7SL
za?ie`T1<{_I?lB#My0GsgBWzNncrksTO7NDf(2cTW<TUyFQw0+ThWCRGMxx_Pvq|B
z7pLwkr(gUf-_^chZ++L*Uqfof=t__gako7KdpGC*7^{N9g(eW7qbtP?Q2AL2R5iY<
zMe=R`x5F6t2hscxTUxLbSQ#)WMa^k~sbXIiQITih%va?c?Ilsc^9H+L7_6I;2Wr1u
zZytUgt8fe(YF-tHv166y44pNqCeW5BM#f~rL^u!1N=eFLn(S0BT+v!cFap|Hkk{Dj
zlsu&SDrh}L>HTh}>)El;+UJo_Np_T7C#`&J8L&EAJ+j8T2dm^TmoijnoNHA4P)ava
zD3@R0i;UxFqtx23v&6(y7^(O3%wZBXU_r}KWMrICc38sJ&rNX7$ijqORjdS4azSIB
z40;ZL3(`0+IC}#xDC2jYx3-(JJx*-(W;=aX)8eU2m{eZ@lDBJ7K=s7`=5+!#)JV2C
zzkBJiG!eqEX{!LYG2(dSieU}W;k2N<dWe-~4~uK2&>KX3WJ6gdO!4hQ7Mhl6l1qL*
zbyE7G!+u=WetGR&tQc(Yo?~*^-gyvpyo~>L8;5Gjw}rWu%U9AxMNC``@S@S}Y?^aV
zNXaO$D8qjNA9>Rt<}Q_mY#8uFwW$O}SO84tZ#NG@Q1ZS1R_+C3swn-N-g`e_|5zyk
z%7^50IM+y~C|qFXU?geEV0KImU;+s**TP$ia7Wb0$zDa8uFceS6RM~aK$7U;DcJI|
zn0ZEdtT97gq9UP?%Q8r88UJVAYf1HpRy13jZSyCKcX?mc^){2Je60A_D~@Y$o_!zQ
zWY@bR*Eo^-=7e^?Jep=XPGl_bvCGz(Nu6UC*0^|7rX=b@r_Cu0qGfn623*!xD%d8W
ztjd1+WpVgP%?!)nYIoGe3WJ`gK4A3PL--y}O6Q)UDXkz8=gQv^L^;vBMAQvw-S=oO
zy{~HFR~<L(?rW;Kz&5~!VC7teoMQ@q$_g1bA9p;@{Pa?uhuGb)Y-Cyas!<a;sziaZ
zaE3<^Qlq6O(VH>1C*_C@NL3Z@)>~OZ3<gd^&QYTC+*3mei?4>Zh22_~8w{O==if9~
z{ZN&4N@%vtl__TaY=jvYP#yCO{}NBC320)4PnVnWWmX!XTj{<T&lmir#g0-cP&cK4
zz8zIU{1K5-7HKvfFNU3}MxiTs_{KvfwD#R#mLVAWU*`ND|Mye;9R?Oq4bM#UaMXfK
z2y>uAV25giCGwy>C~y>qJFjWg<@EsQ0G;G6CFhFL<NUo+_0>qQWoceCfRa=-oJ`2E
zW^mK1R#)OT!U$lb1x!LQ+#XW4LKF}@G1q8;Vg)HQ2ldypz`s8W-Ni|2U2A{;a;*Dt
zv|nCj0aC(Z40Kgj!51Kcn9tC6<JcXVZi>x5iO3ly=5JZ{l-hyk9_3q!C<sqGtZ~(<
zr+%4=v&QCb6w!nspk)lZwl9;2Oux&Cr0x=*+inkGc`__vDd}ylYZ^10!T@!)#AOp~
z?8jX|G`|RzM$e<<>FRIDN0kaD#xzFtphf1ipZly4eTb<%+s2ZB)UR-EcE~JP92aEu
zo;3~J$9E!EpH)(>UZdAcAupljONFC{5HhDWP1i{{>`&_FDKRp*UiC781}J+aet;P+
zeoVpV>n_|)C3<I+OP*WDjjSJK0jsBN9Jo@Y2opWHDRlwSR$H>ftvo_NVG<w*#Jo~;
z=Kx38NTEY#W%_|^&Fl;4`uU$o3>u91|MYP*A~_@A?&HW|)e7}l?OdHU1&V1^+|p>K
zjVG#Wwfh|fwUb?cPh@Ldd|-*V(V+oU?;G=M2dmI{6@D-HEvSYx7XvzUMX@}v)4!w<
zDOWK6P)EpcDgZ>$L1w2{N7srYnylVlVGOaG-@28%Cm|c%*EHwdr`>}uK7M>y!Syr>
zvJ!kI7(HLN=rn1b55N2p91@#ETpZN_g9)cFT(OXwV}U3xOR^a~gUku~z*r(8?E=87
z==>SY3v*iisJoS9MyCQ6i1R|cE)IYRK_Zp8v^5XnE{7+ij$pahrc({n6X|P+NqtgM
zpK0<`afG<KB-)Gaa~dTxZmpy)%P2yJ^~768Vh3@j$oOl`Z%z}_2<5&dM=fEgn>$|q
zG#k$U_!Y1_JdPF_(o$)39<d=Q-V+84gVOV?A?&4kD=t+4DvU7YCva&BjD`_9EFmEV
z06Xd78Y<tPhEbbVk$Xu~CJ|PwuD}(GhG0B%sIbgniS|6qjC)JK#8jLi1CO-**Md+`
z@>qEGfyMg72`HDzM81${RXK63xpo$3Gk9|F*f==vK&dduSiw~0pMUEgU1e1B6KR!H
z=O#FGeA<Bp7|sXh)xKPxwd19;zWA2!lDdD+sV}vz&Ba|<y!jzxj+;o}#%724B66&3
z@T+@SQu=$b&`VAqTYAyyEg{^gt!L^I-Sday{7AORf)%$F8uF;m>w?5@#SPnJPWZ#w
zg!G^G-Uhz=ep4jg{g+7K;>7qgyrVB$w0Km)Zk4(t*j)Ss&|d(4>Ga9a;apG@QTu1Q
zq|AgVp5b9fPf&=pI-8*Ox&jeGtNN>o3{2ayRvoL&0*>dQhc>vBSVMWp^I_?p%U(u2
zaiGts#q3;TFBYd1A&6SlJB}DVlz!@P&!WayZE0z}&){z$(~ca*P@e@%25HJ_B$*fL
z1?CL~t_PUUDiwAT0@l~rcU|b$1j#ees8Ih-vMS=k#(6~zP<tF=YM6pY)Uz?;hult@
z%>-E-kqN1I{#ZuI3e|H_5sxWXrQyRAT^w60SQ~Z*3tz0Cc_swsqz`R3?wRPFcsw)L
z!KwwZ=w0W<O9{*8L;tw1K;T+oi8V#@7ZZc>m;KIwi7Vxp(y0sTvW-v#&&tvMi5V@t
zbha;m5I*Oh4f!_(^M9%JD&{KMx*IZNUb0uq{m?oonQI(hGnQb@W6dpY+!=tZ&TLCY
zGz_?N9N~%i%y;g+b4UdrF25%9JZs_6OWeH2Gy=$eL=(e3yC-D2`ROIdzFI3e9}DFZ
zGlEu*UWo_)O1le=o}(c2$#u#nu~0;9e%MM9TJwv(|MLd58j~&&6o3WF1}jd(Fwud1
z6>?@ZK`5MReY@m@mCI_dBK3;`$={DkB%6L$sO2ZP&Kc{ZGuAxUA|HB+%&7PEiD6O(
zcomc6M2RXUrwLyz9mu&y`l69Wt8nAvydkOqFKjA^;I}kZQS)Kv_KNg0j|e)Xd7+?^
zt)mN215cr7H2;M#mbP+@-_&MV+9dNk$NrePD(z<;SKsKw8MBNOrf)b7^i1i5oQJyw
zkq)47sa$4MUEwAfP0b{G3Z}CWt^eDUNY5e+Tf+kMa?+NBfwoV4+b0XM1zV=whuBBd
zG_)-DZlPlE>3WK=M(I$)#T!J00tDV6Et<<z1hMgLs>DloXZ4gpj_($|VH530C#N<1
ziKEWBz%~dxFiszPJ!}vu${KyAq~0T5Ozfct=BwsF>-;Y{$_KcH)u;j46@!i2CHQrR
zHwrQ{bxEF{ak5lhu4b4Dq9dY>|LQA2yVCJGiF<8le@3O2tFE%6Q;^s7n67(bT1#b6
z!IcU8z=74IR=pa}=R6u*Z~_VPcdTgRn3$gU-~0k>8tioteHTdXC~%hwP=f<x0ePk?
zmO7gUXR^&#TWJeha_S1yrgYl)jb1S%Y*7bqIT@H^^8%l1%{~`J?rkkXZS&tM@!iya
z62;=z5vTrT>q|(*PFK8OW2kkIlGmXrd1uf{VV+I_EObDqB+8P%lfK_rXcjhQ;lZ~i
za()MKcIdlr^(tvm50%$0uhDv`+?JtlTA?8_(JS8uHb&A8jX9m=>&eSFvk+u^3K7m4
zp>IPJUg^w!Ez3$@sL=ac)wBc}Dg%@+Js&25Fa@0_wzOvmM)I9bY{#LqH63xzz~R;~
ztZ))R+cMl;xLtJQ6NL656*u{Ks<$B;#s@}z#n*G@N9Z<opyxRcF&K&wRM(1L5uyRq
z4Y5_dSTL*Yn*$F7Q0=^UX(df&+`Fo^>WlMtnb*WVw+w*m4_ZS{rWp+D-S_$b9lIfg
z{SW`iJ7&@iu-U4Y^5}ZQiX!lSuOMWdP&2P`6`Llr78jtK)$fC#0Hf1&@EMsrG7Y^Y
zmXD*ndo8~(=n^QutR)BnrTQUUF*wnw22+^8bEAojEu4Y_cH3gyxMg#~tzZo6+H0Y%
zyKvw3o}RNC`AA9M7;F*mwO%08NJH{%0wRT}!Sq@@@G4Cs_@pTT#h{1MoE3lX(3}Dx
zuJS8Nz_y_xvl%%5LaTsCA){hMua2r)wk$gvz)Dqh2xUch(n_x-bz7@jt^`a^Oh1oz
z&j*dnezL}1*au%>gjF;Hf%S{VgNLs)Hr*x2hWnI_!{lx~^CFBflkfj{m3Zj^;{rI3
zI^H!vK;2_FWCVc8R2!dz`jV+7J3aSGsVWQTAK}cD+2~avm9mup12oZ%b>jVGgK><^
z6@`Yz#LF;e1x3qhWn^5LYU2}lp>u<B!eGYN6&*W`q*_FpD4#7|t_O3ipni>(HU($_
z0L>qdeOS91$K3_n0i=OuYX)tj^vWyb5OK=#jula*bTv0{7gGGF{*>HY#{+{+33@pM
z@fxBX-38cZ<IHuyw(98Uv8-OZ_U@$=t~1uLsP6lBjMuLwtAE6O{fC<gV77-P0R7{8
zceJw5YGl+wyG8E2+2NvXhqeq{)jI((;zl+sl3IRopKLS?-MgJzD`!xP*qA*hFJtD&
z0gxjw@R___gw%PEL?C>Q)JE56xcQv>Zzt4s&qdJM@6X0ZzGAKU%!OTb^XwCXc3#qx
z%D{=!l^tO=L-W<^pFtZnr(F}PnUWKTQ$YXg{mR;J*~D_=EcbIa0ddhs3rc`NU2yK9
zX2Zzh6$eBoybX4Z(Y<JzTJCA=t$cAf=-D}|4RK%gq7oXcr`XaWr~npFZcXm^<Tkgk
zkVwc!{@!K8EtOHuBBQsMab`52pBDm$jQ=QT)LW=+sb=(Fg;ZhWLt=y8%f;?*y>`)C
zhat)%y+G<~d`v9WdL8IEgq1;})#I`x?bbrTZ#~YIDH(h`z8VS%eI}y)_U#eab98ul
zwVua%DPoIcrCj~$?ho?jT`_$EB+YuuvYJ!-Cr|m&^V<vFLdG;(?_KHGL6XE&zH+)-
zBnwnY#0s74G;7H!8==N@p!vSj12x{{=%X(N0k~%wn=kH1uEw-0h!re^sFgj-)Tr~j
z*~8K!py|4M#N+xBxqesX3i@|l<&XRyK!k)!JF3r(O!z2CvpXM~*FwMQXRf4<mh{+D
zGe)p7;qal9JCo9-k`~jh9Oux9+m@R+PlgPD0l|PkgVe&(!uAeUprwa=&>~}-$rQNY
zvWc<%YXxO>wOKZKexdn?VD|jevsKsL!Etm!+FKQAXSvsAZ9(xI<P6$F%4=Rq2OxOI
zwh)#`UHipGy!fwgk0%Sd6ruJZ1Sk%}mjs5x<Y@^9k9723q-w*g0hl>c+p4{5zk+i=
z0NDE6sK}y28SfsJuL~?ZOW??sIkQ>|i<$))s%JB1YbolP(hSU?zdJmpTdWqH#;iW9
zM~jswhYM`47;Om?r-4K+e^i%f!mIV!=im%80gfGLfvu@}g4QK;`{n_W3*&`r-ip{Z
z$6(;xm55dq21p<wsid@$Mrf3+V6J*9z=poJKxymq+hUFz-dK73s@W$?<md?1f|bo0
zq*~@PWQ*U%@K{Mh{tnxzypfjAHM}%Rcj>qRA*1Z6!dTa7i#Wa-(PU`t)gA1(YKwD_
zL2ckgq<4}C5FHw&U$6iDu=)L)v&~Z92YD#Bj5>KbjP7Ghli*I*s>aUFe(3tGYp)D+
zEbyA<tj0AcIxq2ZF{k$;wFmGY>B;MVN-uD%9+3uSwXzY$Gk6(BHa7xc(4F5pAvr|H
z5yh7F^E^@JOc&Kd=*9&k7R6K^WN5YbthxW*g67)IKLpdd+7S?5riuT8C81+0JDxC^
zD)0<_ZVTvOct`VP<6%c&B5nTl`~WWJ$cd)kk9$H%ziT=X8_BDY_~O16o_y8$x`k7T
z-_3g4e<qw2_&c}`WX6+cr}Fcs3fFCdR<x7maZ%JpAC>q#GC{(i$qi)c5L;h?_*c~R
zq_(+d)bZ7rb3U_~YgSQOirwU!RPe3?s@PmmX=^6OFCU(B?Fht*Bjof@1!9Iwa_~-g
z<m|2B0+DWInqnx?mUj=f#HX9>K7tTUh6W$BuhT`GlY)!hX$IbrM*gsyHrFOvUOnoK
z1w5Ukg)!>^^GM-bs)&(G^s=dbal@4w_D6jwkTsJ4S&kK}$F*}DvSnSU3f~#=Lc3u$
z_ms><6}@2a|Do4J(&-%}Q2y<^@^Wn{UNq*Nho!R`S5l6Kh7<O7dk3en?yy#LPFPmw
z%eLw5qh0MTiHC{>`p)|LTmACI=j(MVU;jkC(u2KI2ZfP)vuwEo+Df^?d9T$tm+q3&
zz?-ed%DJ@+aO352ilFs|lYj~9%hVo*|0OQs)dA}0oYEP^Pk`X*0P6|_)KhVyRuH6n
zv*w8H2`<CgAga-BCK>_*P*1o0%5a~;pz@mdr*!bBIf{cSjg{`9X8A&F+>!Cb2um>=
zdbKOGQ8;at7Mmu)^+`~IOa8tnOFUACm0CYD(53d3^c8kBffsdtGle39E-e<y{Tc7v
zaw1Eea>*OQ@+09B7uql0^iv?8^Ld?tE$n6`g=b)8dq4~s5JK?~ML#d3uXI4~f5n(r
zvB+y2RL%s&EQic0icaNVI#WsMs~HDXs%}k(wD15`e!jSDJ-=8Alv)rMB7xkgb#EB}
zw4!sxzE{7ybu3q!mZ(w03-duF1;n5MBKgGg*0o`oMzl9D!P{2G+yX~E@<XyDx5RY-
zVyza&Mi!JnQ}<YJ#;;~>aXx(%a^PbaNkC!|ANipr2%-!XaZ&=rSR>Fd{it*=#&CKq
zZOD%I1PiEV>d}kWzN9*rg8W)U5b0*q{Ti6APJc(w!GhBqm2WuvFn!lWcdOs+c&msB
zwYU@DmW&i13~YR?$0|PzFSW<GO6CX19(+HV%OExD8ueFCh-P4d*16`d+jifB)>Hmt
z{lfXjs6vKhL+U95?9V{$%0F8YyiB-J7jH>56w|Xg6d;bh2gzHP9)Qjp?YPE$eJ_y|
zPunIl_m!7veBi3qFF)3jrJ;`A=dYguvIizzfcatGa?B;^ec)WM0#m49qah4btkd{Y
z-0v1^(@hrorYbU}FF(_au}05)O}#AP{$7^4do$Ya1DGPvnyhbQCB<4E`2aKVQ~9I?
zcv&8pjkJ@@((bC3acDtoIG$Q@7O4+$9JS~fg-jd%$eopLh^=o)rgT+-zI}#1m{Uuw
zcavO~REoi4cAqfWCXOSi21IQ*E9GDx>`F}MPu2I>PUsuo0LT~m-sY^Xu@z(YmAR_t
z$J&2ZXv$%ac@TfYMWN-s=~S-(%I)!jqJ0KKe)&Z`b4o=DH8$AiyP-ysEdoiI9TMU?
zHiK`ciLS~T>_5C^)`GJbJEf`#r}k+j@8RmTTcwNQ^{2#)(W8}I)Bz+<j4ac^j>V8f
z`D}5dy~m-?&jj7|s}=IyUE>%xX3nASwv)d1zj7Dc&6jTtzqqe3ajc49nd0^^W+$nR
zU>y^}RkrX1Tj$<y5YlYE@BF^F4TO*@TXdThb2!`n7m51^El_|*T0A_cR|);!;vlTq
ziMX+!+=M+=Xuhoda92hq&J~@t$tlP7Twr+~;37vf1rr8k9Yz-a7IsVdw>Xd2-qx?@
zt+&pt-4eCF*X^lJR{HP^^!C|X86pgkFx=5bo5;R<LaOgQVkhmJNt79@Dbzk1Eeu;p
zloeEXkxa`aHZk;Angr5i(z9RAZ7vUdt2`&x9D_VNcz0XpBteVs$=%5@<*HNM*ChtI
z^pLK#XNkP1EPGF=cEF8&^7BdZ9;I#0VoaYWYysTI`nngpVOzH%a&D@gCl#@q?lPvo
z_g)m9e}A!V4GUb_PZA{=xrf&M9W1o!68@T$xST1}_5MEYmDEkbV&2s7BkDoo2b9Y~
z@I|p~d+@I;4GjfHUkCj}D>f3(PG=Y4N~*1NMhzLxP9R;Y{?=9(j6-k6Yzq2Wi&=%D
zX7`K@0jgzCWj(N+NKN0g=TcsUmQjZ6I81n%D>iS@KT;u1Y*(ZKBvoCwt?<*wEI?gU
zCZwPvt?^v^C1rP_P~TL8s|a?1YM8L^j5CLzy(!e562F;g&P_L|Qmm^I%lN3*AOgpN
zVAqbWywRjE10~39WY7ERproyTWkUqa$SgdQbHc^@WaWHm;sbm~oj>+9qb?UJRKIe$
zd;{h$`yn<Pkxp<Z1ph<n%=eyj*M@aO7&pxVGrmD?-qmvx(keUp>G+s)#-RkbDHife
zt-?;I%lqz8MaH}uj#$%xWhBv8wzD2H^0E(MNs%2@xTNV-ETml*+pX<A;`O(`$i00(
zpAD*T)pCjCCxy)YWprMsx9BfqcY5aCAN7*bjDKflY=0-~*<}o{GL1HPjYC#3aXHtx
zAm|(*)c=TN?&}<dY|U)3hdA+&h4sqIHhtw9VT<l(Fl6I;;#m2>VMqBB$})Lq0%*y$
zd@>heV2xyzf5r)wB2|vv($><%D)%Tzv>g%v9$9z&`o+|HCKggCdX(jgOImuA*8BTk
zan0W|gG1dD(iZm7)V{v_>-Rgae!sdu?G05fUL51WjZv&mGGsM}HYN11#yA|LsrPvV
zd*Dtl<k-|Zv%SAYXz7i*w4#S5g>zTO*izmk<!S<oWG;qSy=wwQG*D`d%jn&Sz*X(r
zpZ7*~adrgXV#2U7&OBE5V@eiE%%HkbTZe_Y4)O-UjYf(5&qhi^bmKP#-E&+PPwN2f
z615FqHI?&n7b#FaGST>qc7m%~{Hj=em^z6gnR|~Q&4Q?4c5)J;X9O?2U$S%Z7>^Yp
zt~{YgUhwo#9%cIP{=Nv_Z{vQOH`oGej)LF)y83sOY9$wAODIYja0YRlu~j<aJiPBd
z+zn!6;61;_6ru2bE9ZZCB;x6qccR{PjauIcKkY(=kK%aw)N3YH`X^!F<5zz&!y%Pl
z$m~0cEM+5lIW4MprOz<`vCi+ux46;Kr)oHzXJNT_Uk0~B=XRtHL!g3A3-#Y8i3P(t
zEFLbrj@)#-y4=Iv8~@#_NKu`R^5!%dYHWF&{UW(gZAS-8uOy*XCAgCNRWx<~>j?Px
z(7ph_8etPsSA=O8UMnpMU4@7FTtnU$4gjBv+EBk$8wBi#O9OTEeeJ5BvgH3xT)#mz
z1J;}?v|f$;Fn`$0i+pD-j+=eHEPV6j-V*ikzUoz<Q5>qe2))<tWHC<k%H!CjE8vVO
zSA#Bvj>Awo-@u1{9%ORQwWE0JciKmWZ#rU>0nIk+OyzE)>MUQLExl;k<8mIho4ii{
z!8PsRGU>#xWVoX|xf^&ZXc$FEeegw|Tx+@~pg@d>vI$oE2|{2B&X`Nm882HLJKuKc
zke17&TD9*6jxqmuoT{A5%=ZMcuYSBtF1SwX#7X*^YE)-eguoxr*0?l0Zcmi+v_iF#
zaMOZDagJ&-cp4fztU{amd^oZ6_3rz$M%EE|3w`!h)X<qvpSohTuo<yD`QfMSV9T73
z0*)qTCj65VqjucCe}sQrFr^}c*o2Vr3hi|6F$Z4_d;W2;U6C+fP(;+(u0vI$G@?Z8
zBcJw((Z4^t$<c5<{e?dvuH#fmr0Wwsf(}=?$`l*w|0Iv^v&qDV8QuRyK<)qcE35nR
zkK@K^HutT-WaSZD1E6RM(}ZyV@kkM7<i-#WuCy(R`lL^QT4YUqrAO3qE~^qV-N7@t
zYpx?Ki%!H)67V67)|#mX0lmn3&FIqrFVN86xt0flqnUsM!@urZL%s<8-FiVa^g^{#
z0X?#!-gUOqxw2Z2bxelXgY#0e9Qb`}{X2X*CH|$r*s9>8KrXMVc>}k!_P3pFm1=p3
zml<@jDl9OYdhL@_%WySa-gi$Y9PFm)unt|Mo?FLgM{_c^Sbs|@ozBG88zcxT9Xhqy
z|LEPfR3rMeQ}4Dj5b7|xwE+HRxl$?_4Xu{LOxwK$RZj1wHZ|%5ag8|F)mY?H7Ay_y
z3p7#Uf!a2I&vB3dIQG7|cMQhcCaOJ$w1=l-x+^=hzfK386o*dK$jc`yMF0%PLML3l
zVW?{I?uck;H0|w=+cROxtg@%C<acgzcADCxJs}%RBb{{3?&d9|INlPVXhOdBu(RtB
z3&T11(b!Y+o-K{0@8QD);&a(F1d(Qn;oVAP-r`){ro_inf8^rwzunKZ=WHXXA6iM+
z6rWP(FDD;l*e460rVzuwx}|=_t#G2AxT#sMHKhdzZxo}VNBasrNQ-@W&21)KdP`M}
z<4zw6wcsCt1<Zqg9lC7a|Jr=TFEY#jO`(nROzirB0F5|q#T#+yRb+y?oI(<ph-1ln
zHuserwTL$(ESX0C(jO@@adhyiETOeM^my&mMiGeGV$oGRN}y29;P}nzajd_~SvTdy
ztnixZpU<v;u@*ki*+1skao#1rpOX56GNEGIQ$BeRJ*R4z@tU2mQkCx5ib^};D>uI$
z(>xmETVOtvdymE17^&G&$Dfb{73pyVDKpc7G?tH9cJkDe3&%}aohG)z4E34BrhaOc
zZ?2L)sayQGl>YtAAHwg-`Oi`>vUv<;2ijLm!2_&!RJFsB@j4x>C3oIRK)pG&WF75z
zCG~>7eZTk)&BNsUeCY>|jpdtkOia-w-2SBLDyOlB4?20AD8y@AXC;0Z2|jt%>0gSE
z<Z~|2s>%|c5l4Beo3xVUB@uSYE_ener?e6(M(fWE1c!u*nk2kf3qIsDa#Woe*k8yp
z+7wU|j<WyRj3DvZ{NzHad?%ctsp2apmsYIfrSnEI_=`oW6_w&hQ5wMoQmGhdm1FDT
z!tCc=@~~3q1Ta$k2Rm`H`piRggLvW8UE6v#OL`U1dG|$=y`P6Cn?FN_^2LxlO8AfA
z-}vT#)#H$!di>dkOLg)a_xp>YsSXtqxwc;$UJ$*2Wv8*T&4N8iH0QGAU4MVURKZrK
zyjp?ktzwFRl{rPE5VLe(1u+LSWBquz4lySPOs$hL%PmnZ?4YUgIfp;6pV>;1!gumx
z*UeE>LlVAbgf_ixR=m4nuKLPi1c`K<<;$LHlvKiR{XD^5nU(BH5m7bcF{Z1{X9K)H
zvWCS^uaBtnpxw38glRW_;DzHwBLQjp@owTmn09)IvW<f;9t9a}WMO#$1nP}qULsDT
zP7bU{?c9FCZog{&d_f22CYWXW+9^2_yb=~+E9!pK$$_HX+h4SN&g;2v*xtm9uP9Te
z*CvslB@v!dHZY^hlVH2}5l7XN!wUxVB74nE=Bd8)oQ@v{e4<BxCO8ZS#TOD@ay3O^
zw6$+PQGWcYDxm309kGxO8)_dfGYIM<D<Ec7ceS#%Vone*;>}^Q7rJP7<vdW^u_-p{
zu|iM0kZ;7kDeU2owCkk-Jn45Ky;Rt`d>7fsKHKon(d($Obeh-j&Iy~%Sg#;6hy{Ny
z>hh$`IITD&Vi1Ow@CXxWnWUF6?F6dG$J2Qik=b){sEIw?+r#7PX~6*d*XhSnvBhe{
zQTR{!c3elRicg{j@w01!Hp$8w4orbmp;Q7U)uq#RuRZ5<0_p2)kL3w;)%dFN7aqh%
zx-{7*gqH&FWPetB=PM*vr9Ak6g+`)3HySu?7pHTgE@UFrrcf0|>3N|PnUA5ot<iJW
zjcZ4s-dah!<~-y)^xiBi@(cN_%bmb!#0beKdp1OsWoC_qncr$&pQe@@v2|W5Y)e25
z3u7=#j+AMXI};@ds#;`cYghXnM>l}rUTxUmX~p7X04q<N?Ob6sH(w>me&;!YZ4kXZ
z)m@PDg>5Aqg72D=XYPkU-8Q<z{Vt9x`AMvAd_1rSjx7yx0RpoL6(h0QV<eQU6uVu&
zJ&wKmky+MZu8er8y5e)Yy4}2!1fIw@SUunlQ-kc~F<6}6UT3PB%1d0b4P|Ys5Q!ff
zcsW1_r?7GgwO^#DBE5ITBL@7uZHd?pJ)eg>0bL)`7d}?ybG+;sVQwEc3oKT66N){O
ztfW=C^!V)x`k);0h6Fh?_iN$Br9aX%lT5>hsuU=iJ|sQGeJYBJi@?35P8;FD8?F_4
zxG{_V-9ZJ-f1SPqOXnJWv^{KJO33c2S=gnQt7BwFehAQ*XeUi*nqX3XP6=XjI5GG9
zS#;o}QiHPeG>r+hp40GYvi*{nZ4;=pM9J^(Q;`4LUi|N`hZQGTvgFufeXD0lz9A<n
z22Gm8z#ZYz&gLJB7rXZDMMP?r6oQtHBxM3V&7_}fbDY-)bq$f~N4I+pYzZD$-s|$l
zw%S0f{KAyCbDKj52={?kK{(OE+(o*cv&dmuO0%P4uk%u9WMr~%OWF4hVSQ44otW7*
zao3z@^Ge2bS8H=wcQ2U??+u~(DldVug=TB3B_+j@nT@~VHvMQ8Pp|USwexmui->(I
zm4~egE}DIU6B2g(Yq3YKu8iK8WbGb(ERUf4gaNHqzlgP<a`$8w8r#8?;Y+UY!*@~V
z{$163KKkcO>NoSu@mL!n7mc_=mrdp&zSxB7Odhcyr%bw$Fm7|`<b$+Z)8$3^9zf^)
zgZw=umOdTvwEwk_k5>8Y2yNU;bF=m~;1Lm<ABO;$<YX~Ly!-XJu0f%i!-*gJ**%|T
zRLa=~^z7!XH*8bMJCuH}>punZm%~4m^4M)I%iy<N2q*j0&yb-joP2|QXf1_V|A|TZ
zcKp4low6~T;|~5tlUJC}4)bBId_a9g0EK4)re@fJS1<?D+TSZQ)&oR4ldyS40(W!w
zj+q#(ZpRvI`D!AZZ+k_Wq&!9+CA0j&${;InzS1Vgxll1Z9mWN*KgbH^9=<-xs%)&p
z^dwlK?Mq}ftm%J?rgQ3yrxxa@)&+j4F-Y!y&yAM+145LxBnS5kPq46J5GJ;RuVJk2
z#h|iXNpTatA{bd*$r}{oqB&c~>RdJa!;(i*raTV*fVapxqHk5e9g+wonJr;@0%w$g
zM`TyNqB&o?J>4X@D;vb9WP%?nN^lO2(m%3iTegg>RxwS}HQ_sA+Yymnb@IygEwHV|
zvnZRRkb6^)YXP16de}$#&M#c3pr>Ezk61{iQ1F#$@bSC9lmf1B7^QXOw3qnQPJK+P
zwp9zDc7{Zm7Y{6}MZ&Ix52=}A5GRSc`|uFalx~ELOw&{VjA?JOhkX7)LL(Kmr~NWo
zM(43s*r#~M*ONNB{e6^xPnY$Ihp(TJVsP$-toA<sPjURq@;{Tuj6;V$KD{A%qo8p?
zm0xy6?y~_8Gwq2T{K<xh1Futy!d!8c&EvUATD+>T<#m4f)@=#MRTZ<8;k0`o1K{OJ
z!TQt#)6c**;zY>pDz$l&$TAE5TCEGvO2#UAf3tX@IvBagY{cKCW^wAwxuYo&UfSqb
z$(~<gH{4$ESQ5bR>%J7Y@2LNiKt)%XWskm1XoVhg#JVCa4l-UloriH^$5Q40Q?)Ij
zHBOPWh<xK&(xNJ$Q#0A~r1|=NfS5ZO8|mfSOU-Qjk3S9Gj_HYU2WgdX3#JW1IU%Ex
z@z;2IHVS_J;$Z7XzRJ`k+;cDWZ6JzoOpX>EVBfzR*DA5c2t6C*P_kjU3(C<4(Fu}b
zQVq9*Q9%lTta0{94Vgsy39pHTV5F550UW*6Rx4Jn6J_*Zop$%KYioOJ-=OqiXt&QQ
z)O_XTFU%7wW}IlIrL2q6jjy(G0}049gS591qIkUF9hCHMN2s)9QM??-iZ=C;C3W-G
zfD}CHl);n%jwFNTPxMI{GSn};q-gse172ic5NGP3{&z{V5-k8f9?zc1v)fEi;>yqS
zI^SH~?s(XPoyh&W%@f>t-({8Uxy^ym!t#4$@sJ*N6$j;K_h6|RIq!rH?C%WXyD5jV
z^{wtF<t|h9+zibx9>0L3(CRFT8I64M4LQ{Sl!Kq!xM+`IEKwa~k!zV)svMlq$5a3>
z-(P1q%pi4{gSmoj;V%s|{^&Md5WjEW6x-oU8?;QTEhAX|D7KNf@jK*W{@AjpQ5@zD
zp?sQy<aai57h>#c$1Utm8u)>Usd&-rQ>Hgp%k}3JwRRWfAp6&bM04ffs645Yi!nUr
zzR_i?*F2=m^ef7@CUqi}zoY89?=HHo@1xr?nCh_$8?T$#D?7$$U(~g-UYtc)EW#1~
zD?}GjG1E>m1DgBOR<WeIrQ28!#@{FykFgai@vxZ=u}uz(OQD2aN46X1pKpOua#fn6
z3*><d3$Y@HZ}6Uf^XUseE-5eSe<KZ$joqvP9munK%XMqAe$jmGtohueWK_T6Irw#r
z$a;Qfge5uGTxyB&Qh8*3V`hUUN=1$vzlwv#Y`^30zxsctUtRw*3HUu<{i!!0Kc(U^
zwJX(`3Fh;!toneD-8@kP(r=wWBQG;Y-{s@_BE6V|>;%DB^u$>8j^<!}%g&Nc0&~Tk
zzW7~kEqEG2pwQp{f@*5$G92dsmLMyi*#<7gWsti6L0CpKBFs05`erWJzrsfpd%NEm
z>|^o)S*0_5-tp$eJ6OvMxR}t_L}rm-I%zSrIBw|sNmJbu7k-P%mxHbnE&16$C5cGn
zfsJLh2-Y=@Lt|_u(B*hTp4ebwRX>w_-n2tp)B#IN#q;PLHk^F|4!dan$hl&rF}WdY
zo7vNq2|}TZ6fMXz{c#0(nM;oJ881}7?Dye*mHT+tJ6<E2JPaOrwfNn<y2F4!Du@X!
zrkNk~j>0n@Oq6QOwmWSVz#YC)AEBK*c_tkX45Me%NWpMxo>VdLa~H`VlE)^td&TER
zO__THOT^Nq-Oq=2zK|>MeHc3r;EiwB)`}yFY5>2mS<r2cQWi^k+8;a_+-AP=psSN1
zr>^n8D=MDtECsux8HZ?%irG)-?a3QRiZ$5c((p5%@<cc<bUH7ejPtqvtJ{bQk^H9!
zQrm?Wd>>V{;U~}<ocyVso=)PxVlSedGeS>qm!~#i_V+000YF`|4=fat&{mYdDo|Px
zSO#}{DCk_=akP#<t{AG)7n<#mmCLGRXl>;tf0ceWSWr^5L4-c?f9be=F<DUBp9E&}
zZLRB*T;jN8a>gP$w;@5Y5BE$gH$qRDSlNbKIN`n3)`DpLo0R>Q-GU?zj9iRTjACHC
zCObVNmg`glpiyr)51xe}h;4Q8YpSn4NS?f`|L|mD>_M#aveV{`PS<jvRTBT&jJq;)
zh88h6|B2ZhW;%#&)M+o!l<Wz@7kprVe8||R6EYYbbIi_2d#;ENidNJUtMZHd7g!2K
z(j+8I5YcS0WUJCAc+F86bc)^#`}!B}l|1d=reMw~XB#r@1V@H0S{9C3f-GG4nV&XC
zVI}mx6&vrm*%Ik@nY^#x6kesn8>h@Fs)e7maQ)l>?MaY|Qpp=nkdsOqndsL5a|CTe
zjUWFZw`{*-A{D17k{)qRYGN^(_uu1xhyrSQK=soerZPokPA%Uin#Fgfd}eRx866fB
zPZ2sk^QRB<S?Pb~Gnaq5j`6licP@}r6^4FSG{xY!x5m;-ONe1Pfd*73cOun9BBZCR
zF0M>MFXLoZ9%Ee19XB0{U3N`V`wTb3<qIv_p=af=eX2Huap2AANqxK4L4n0<9lm6c
z9`bs}Z{q=zNf7(Q@R`(V$nDgjHgQd3$<Dl-P`x|kZD7u^jMFW?q{{zc>Z}6V(3*DL
zmIB3#I}|Gp#T^P1cZXob-HHUK#ob+tySuwfaEGG7HMpK^`SySAlAGMD%vv+=JoAps
z4dvytrax-x`$;~0r=n0fr6gsi6Rrq~6~*e>`^|u)_XZjgjPCr}8oiE>iRYg!XeM^O
zqufPRaeb;jUb8YO*%^J7+V{oEDqXh<vH^I>(ezs=-(U696G+>L(8976(ItcM0^HpD
z`J$Nbwt8g*-LAuR1%BhuwzWUkn`hUHlR;{~LGVVF_G`@!9rYLY2l?EU(g^*KFI+)l
zkyQ9vJmV5|s3}g+JTgVyhO7_pzPH)!CcIQa<8GD4O-Bu~%qcehMM-r#wGk{clvt0C
zZ>8DjV2R&@pT1VBOpr>Z6)n~0g)S+>K$JqwvXHD{>!>pWCQ?wTfJcb`G%=V);asG3
zM!By@Q-Um-k4$wkfK6*4^%9`}=O10fQ}2J8HdkL9Oab;(_*GG0Xn|Gvh%z(9-f(BJ
zKhvR^f;|;u=EyyNi5Z3su1@b(iIs=Ux_Rs3C5}M6MKw0H@NEC`k{NME>&iakTuZLo
zHuwf^N&uSQ)%U$d81q>y^6K90Z0~(Y0OOGH+o0yNKM1o@ar*`0pf4ucC@swWNH5tT
zFRch?_mBgXH?s)Qj>e3=UrirJp|_!%8r`0kD9plTKC&QVoXQ@%vB3bFLi?I@r|394
zDVH7skvc`wVEkJ|j$1M{y*bJIg!pgu^aU96>$bOy0`Fzgtv~DzeAY|1rR8P)Fq=bE
z%UZ-yWvS`W-}w~QIhIe*c~30_wr}#s+3D2k(AWDSPF-Xn3Q~*P&tWWeKXx0>|6#$z
zZVI>b3(wXaRKdVq=w>+U+nUYF+So)ADgj@N)8yvXTWiLA_n`m43ml~2b>ef6mR|UD
zi5^ImJ)IcH(O=(v?Dm#U%qmZFdkHChr1?fHN%5Xo0x){=j#1t!(p+OF0c@~o%V<_0
z!?d29o3n_FC0w_mY_#6s^E<q%CXE^K`JWfe^S>9&Tw(WGuyfvw*O7!ZBv~?w4Zx~;
zHl7$LR<EEP=&WH}KUJZ|V8$*ZOk?%VHp^>#R{u<?y5W4!Bc0ju8-IxY)1ebEoY85}
zY|jZOK}?4mghUylF0;O-zt?{D$~~>Dma=Cw_OXE$YMPv;xkpg#?Cl~Ib0V#wzQMq`
zS8H^VVV()Aw8j@n4}<1hb`*rc7Lj4!&ofPV$8x)b>Fl9#aCx|H?~poPr(-Z#mlBGX
z+m)0lxTz?@l3p0nx*anN%N!${rc$E{vM<(|#f$zF6_z_&40t1o#7_IPzpX$%1RnzX
z?rj;%II79u$y(vlGN^MnZYp{+l(aqB?P)(XO#@ie%b(G-eCAi|;+&D*UMH_ek=<xs
z@QzwaJ&Dq+8qE;@*%~FAOz(%!7C+HmNSJ{{Exv}n_32bKp;UXNpj$x7<y9D33O7sE
zXYh=F{k{l?BNTG7m7#Nl<4aMp6eh@~9F?stiawBblEPMbXfE)t*GtF`Td-~i&sZ`u
zA%87S<<W`!NTynCi;jd~u#(lCz{v%^k5}IoVV@>?Eqj$({_71B()iz**#?)97gsHt
zNL{85%=C|}?8zIX6vyvAs*PV!DblDdrI$VpW3ALv)R&gFZ+&ho)^=q(QEOZAv8k>B
zG`{l6&R_`?(m>WiOeb8G7WGM#X<_*3xDlm`18%*qDC%gadfoXkJ<G>08a-meeHz3I
z>2td?A3E-GCdJA2o>h&3H8T+F{lvM<RzRW&f~xX--gn%Hyg!r!qw#hCLm3hoU{s2N
z)fo4F^o`#5(sT+lzgyRwBHmk4dgM^zn>sjPA!emKP}VUw5q;)U<GBAaq$HkdvLtTq
zgF2Jv)f0Xp82J#{F9*=Q$v!TLZ!yETV6?6O5T<W%6|&bzA}kN*hg#=`s%=Y*6JUp6
zH{zzMGR`v-Xt|u{==_y~JYu|tbk2*+ddfESeItkuMzg5(^Gv%{0H;wc<k(N){$BqD
zmPVW<8yQBl+#t812;Op@ajHRQG_&NwB9;8BMQV>PeE=<EM_5Hi9(I<l221k@fF27T
zz@h4}kLZP=9Fty-0;^NYD=p)%6*5r&$86#ez6ePCPo4n^iui5&SH@u_-pM7f_pNJs
zq1l`CmFHDZu4G3*PzJ>~kfgb>K6HRrH)3I)ZN;vWN3U#vxFc3@e?<RfxCOKE^y@@d
z-n@Ml?J+lTZe|`<k#kp@32ex>X=`ZR_%tQ)DG9lE=;DSmPYXJ^*5)9T-}DNs=f369
zJ!!6wv92ZR{8aKxEA_riI|+m!j7C8oVsUDo_w|=~UZXT+Lkm>6ESiH;ev-vc?5`kp
zH~9DiI2f$AP^f{+-_e$Xu_yZNw;0HB2HITk21;3d0kKsUXpbdS<FwH@W@ZT^iTQpA
zff{Bp7lxqO*+sMdJjK(&4}~1Nh)bKeaLY5_ojqH1_EaFMG2Lz3`HaO$L@C154vh2H
z&iLNE%mthtY_W3v0%Ou?$;>czT#5ZuEE_lF-`$;O0IPDK+9-7q9>$y=iL*~Ok+F|`
zZ@@gAI2jM=Jik$q4GzGt3D$<A?|&yeU|62@tF2?4yjiP-?WbaoSoZAoYH6T~uvm`w
zXALVXgda?)-|FHPn!*9Ki(H1J77RF>FD#)^$;ybMN0MrJG|n`$2DfI<3e$gU`prH6
z)1i#Yk$9-3Eox6JE#O8L+u8N>RYyJZkueWj6jt+qk2?=DFUQw~P!pW;ZU%@$%@<<@
z=PzJdo8tBuEPOAe;#59xXybrGNA{`sqzG|f9sVP^N`q;g8+h)7_%RG!sb3zYnn$fs
zw#8ZdIMYm)?SrWX2FUbB|NBtm=MQE^xobP=hlQ0c<_|3B`dFLtihTD2qQngTv9cQK
zYq(){1d-c3f<a=l5wX%~_u(Iok5C=&rV2reXdV@}2}h&EK?Xs|D(K(E1$^VnPMa2^
zG#zcD8q-frQCoHWX;KUj19}ww5jNZRK+z@0>Pxh$&ZpLfJw**>yzN^Woi9xK&po26
zcKCxZov&-8kkc8iW;gFaem)5Q#Rlt1`&Wg8A5t^Uu?<FiKUnk_97{55YQnyNBtmKC
zKhsDZyEoEzW6n`f0c5mV%z8;|su$m+%MTo!4r#oMfD{7>1{*5*oDzJLY&p!#9=Aj?
zRn*`l#ROxk)F^&6@8dtK>&K>Chugys!<wfoGaO;3IM#`IxW;}mk<(Q_@f#6d9o_><
zz%;vdJJjC&9a{>1{{~s-o2#V>=ZZ_6VG5Mel=&9_U8s#!+0OZD3F6wjqrE?{o>aM!
z48KZ7kN4e<!n3rswKAz?W~Z73G=oDY_ZQNShj#lVW%}8zi_9Jwd(5)D#iw*XY6J4R
z+!v+B32n9L<DL`t;ALZ#B^4^fl-1bGGkAwYMq96c6sJjrRo#BdTOEEJ6p)NHy|q{@
zUhrS25cLTB7E@fKyC$9<xJjpEWH@!?4or7*(N?r663{+w=5t^IQrHQ4cHxo;7zp8*
zv)QD^@PEeDz#U^-C5l>=r-m-uk!CF7GG%Uiy@Ul8LT3%W`B&p0@AVf8<2SYPT<m;$
zD(}1x3tne^=&Cge<I&2pz+*0KRTE~Hczg8N#NhC)N^NX?o~mEQz73SP9RB6YQlF-;
zCs*1OhHiDLWxZiMAY~=~n%(VV7)3wN_$P|Ci-}gnuZ-^ss!05I@FnAKY(oH_7Pt!o
zeL*tZ$zLfMWSWqd%gpCjLea0;+%onJ(ezL5o@)!tn}!%?A>c`X=vdP1$;S;wA>7E_
z)n+FdcUBvv{Czjd>wof$z5hw`sns;jH5mPlsY}BLtj5bQG1G>v9g0u8_wLU(ZgOb}
z0ZxojCw=9G_cw>JV!Gq_V`T*gm6#<7e;Pj*YHG<KjGBZ=M~(K1>dB!bWh*q~__sF}
zU9O$IBpWOWm!}t~6gA>tGpz$i%jDoZ3oZ*PwVZ~}##J}U)?+w|n@WF^XG%N#Y2-g0
zeKl0KS4tI_Ysge?drxK}7InkE2EkwW#MzM7b+fafm;n1L$KdEH)T~^VmcmFWPp-`p
zlZ(`Az=PCt#9PDdwjAE`Vv2%kq-*Wq8;5AAJqMP&clRJDYr7zP0k^F!<8EWuEPnIr
z#jmin58Q*8b@;!Po~T~u|GW?$-|`Gv!2Ef<8Em__BF5U>J#kDW)aaG8Y0$|!6p^9Q
ziYr^oIK=}#R^d_$GrI}!Dj6BiC+|_TK9kXON|>PiXt;|-VvGZ+hDfIrpc=BP42<?9
zz4U;Lb1uTk>!^qrVm*>xl&H(s8J-LKf17-a4WXJk{w04^kC?c5lunHO;44lu@XQ-6
z`-0NX6|ab?9al8t&4>enn1*GnKX$pj@)wSTaa~6L&8Gx{LjL!X4X`F98r!4HUVDU6
zc_*&5I`s_+#JXq7dI;5Wk$Q^{&7UW=DG60ZZ>EzlYga9Xw5F+f8#=xTG(d(OH+^r?
zNElqn`U@w~l3=c<vEvp-n)dF?mvKzZUK>P@!be^ht7q;VANMcuZh@@*oT-hnHB`qF
z?wp@p?VLDD6x_QMkHa&Tyr{~53rDMQoTNN28V`X)?m&#QQZ5{a8uq#Q>>49@9Q<N5
zTZ`7hPGHQ4k%}%oVB7s2LM<Pu=M_um>vPoo44NR=?2qS&{zDJnabAyw?^3e$R+Y~P
zn)lV)-if=LM^Z9pc)6xUHN=r&1%AGDB|;+cJB`=nh5N%+G!`BuX)$2Z(@WNip#J0X
z!rhF}GYKdXh)jN5D(<QXCrv1yt9bd7lf{(Jp_@#qC07}Tb<YNo^$$211v>zTOdJL#
zHKR7;BO(<Jb%|oVArXuJ475RxR_XQxUP-QV=1eY2wCHfzDmaX|a@$F#isT3@k}#wy
zxS-elP}8DVPswAV^kad5x~F1o=9d_Bn;zNt#nL$sI8KcN<EO_*RkZUO+je~+;PZp7
zN5sEUV)uWq+PG$QK(GVOp(C>muzKU>o=aP5J4K{q^yuN?scbEJm&2=B>a5ST`QWU$
zL>c<kn^pKWoPGfAP0yEk<O%1{58H|<CRF1Bic+z}o>#-v$Uzc{bda^RX)jASSEMkT
zGvO0RX~V=TBQ||Yvget05)bu34Wb5J_GBz1&up*?R8-&}YP@By>6Y684>K&C<Y}SV
zab$<<vI`DmaBK=FnE0ZOiVH6fUPSi8VLFUd(DNK(=VKOp_zC|Ab@z?Ril<=nxq2~s
zf)>3`hak_CN<mLaNJ_ivSh~Y2hc^W16NO+9t!Pw=Lv#KF{#Kh(_(-hpwigRUo1Vlx
z1(n5U!9=u50W+TkU2iaOH$@N^{g0Xg1)H-;I00;XeY9o$B}!a*sfNI@g9grAa;V}}
zdMWD^oT+O!odE6VCJ^hP*qv|nQbg&=dOq7d=hqDl=-A<X$mtm9XW?m;_5e7&^r@Xv
zeU$Ks3E*gA=DuoHOn8)kmq1kxxJcHTYl0i20_Q^V((u>Gr8HxAa;)rZG|ckP*h=aC
z?A0a#ivH3Q7tfok7?1vryUsTZSd$vDb(-3(W3_i^+m$5vSm|=VwTTW?e_YTrC9aB{
z91Ed2FuRFpRI3*cRRw&kTp7XN=4+&Yzb725is!VGB8YHMZ3(9;uHu=n9=LE4tvofi
z>1j6@ui~+tkN6-7m|J!1k7c_)d2zrLqNe>}CzDZy&Q)6*U9!q~WDL+8U_2N}{4MiI
zmCkMnB`=e%da&9hY+c^*i&9?q4E#bkqN7S5VxdOkkcH`_Z+*zNlV}=YlCqnT&v&!P
zhZtplQKbqOk4WlWuGcG%r?|6@!Rs^y*Q>i&YCAzNuoEID4}~RbFgK?RqL6#c8XFhc
zFTlH9D+>uekUw{XMWEwBX+}jL?$bFu$aOoT@qWhcC3-2n8k@<o9{^J=Ms_^Ep83s#
zcbz3sw<y`apzWrYH}RkaID<Zg0ZGV2EVP`zH6ApN6;Im>evHNELx2DUbB-k{I&Vzj
zO!BE(<5qSb>QreKteQX=G8H7|-*_t%?cZOQc^ObeC?Qc`DM@<|QDXb(x@$8&Dm+_%
z=2)~XmX`7@ks4F$^{t@8Qj_*xJL!pynu3s>{Bv}m{(^r~{=l%zW+$&0s<Y?)vS})x
zx;v`{oMib@@0$4j84^!YnaoPb0!YPM5i6C@E00q)K0`DIX{%!%6f8WK0A8F(l@e3D
zR)BDUYg+-!d`8*|Ur8i;y3#Kb@GN>3PS}|x--PPChp@~<Goay#(bu&kv(BVqzHMQ5
zU*w63K<g~V4hX~M4qq;*fdJW6DHfL=OJ-u)=PwW(q!=?NSLGl%MGU(9sgc$3-Ev_2
zC@8*H6nVR!8br&td&0$(a&}4+M7({_hkd@}u!WPKICAYL&`7<#Li)$yG0Hpb^~LAW
z4g!3gc@0;{T)WsfB-1n!LeBoUJvK82v%%c{_*4I$u(NAU>@G9xr=-}xFT1DpM(+8T
zTOqH=`Cl0{pR?s}^0G4R!#>mo@P>ZVFKPV_8@eOiAOcBIrkiota^b!B-X^Z%kZw}Y
zq~bem`bLES{>Rksae0rIP+%a1{`1)jK?9Pub@gMzjU>kD&Z3Bvv-X26F48k^nR~%%
z=2ErF(rg`siC;-XfO=1B`+>@)^?h{%I}I*uU_Z8!szzfM#>aWJ8NpuN389nHB-g*t
zOHNOw{|Q!X18ZdEWwd4mv9oFlxKXGoLMo`l?%Uhsd%KnDKOW*pHq#^9k`z~l0gbF+
z9lgqEgYxx#b^U}|KA`51wGUx=&!!>hVh&w41uC+Z64x$b@KRV@Kb`kR;RO-P_bww?
z80^`YHc9;m(X;7^y>_xgVx_IO`(>$7W1KxbP4C)wR*i#XT74W6`_#3i;~&7AP+9vm
zQ><P4WeCHi-d`%eWWj#&*T~q4ouDDJM%(KFsY6dQk6hcuU1;aapC@NM{?^SXpIhL4
zz5cVC_a327*LV<V--YdYZ7=K}D{5P9Bf6fB56PEPgIrG%ub@tlhmd#o?#s`*l+v&m
zheo5+jipgs{v`yTlcbIbjm{@d{rfG#JBx{c017d<jz>RS{>}hWpBr>!bbEH>1XC5&
z+~jSJs05^G0`h>g1D>F@U;Azx;>_U~Y+W&BNC3LZNKlH6DUFfN-ul-<O>qYp#$e%t
zFbX&tEQhB#lq8{7rblLhz-5*235Kn{y{?A@nwb4#Z_c2MP+Md{?V9gep!=Mn0$%{T
zcfpZbyJj>8sB<ajAk05OmsS#6dic+ZWp9$>ocbHB#!l+5w<g)+B@KVUJVM0(llk!%
z(3o=N`~A|dQ?YApw>iEPniQJl1qLF{hQv~{p-|*dnMK^IDqv5xW0t1t>gIS4Y03D?
zrZ66+X%+>AnJ4rqv-%ex1u;!gmGGdsK5xV-2t8oYj$F9z4BJZ)|0rO7Bv<pd^SWJ`
zHRzHy;mwF;=Nc0@&d>PHoM%^0^9MK8ec2LjakBJBi|+jx(^_$zxJg*3sdT9cGhKGq
zZjSKB=o-3WFHQ%Rdhpm$3JPzmHHUdM5+Hprh}e$5d28y$*XN)g_pb2lIoXBl;w7qM
z(C5kLC3sLFE2qTsww3ha?x3~J<0ARY^AYF}khP|3$ZXm%`YlfJ@kXr46(T3-{*u0C
z3o_uc&$su&!)HeGy!9ZxJbeq@-t3cj6tv*^Z1zXo{?N9|I%nj~W6+b|pC=y&4@=p?
z6kfX4Ho-BY_386{!|^U!M<Yv=UJ(HsHhHm9!*%n_n>mbnUS>PHuj$!1tKr4tQr`o>
z6GhoR?j*qA>t8{;p-H{BZrhy~rTc>MLm~`0O=TtSjkh<INk=e=(bXm{TADZpLkovt
zXu2HbH2}NeZ{Sab!jv2!W=%MLTJyyztE_I0ZU7T&wwBV)s9~^izGCAEp#i)E>oxG7
zn6>>cW=Z(l^0bhqEh4$1nq@|D<bh?d_z}&rg;E%*d=xh8oj0<P*8yV>my7c15D)I!
z54Q`^s*24?@~tC&HEsGI%8QYUnbjALf|B+XtE8Hnd1?rOV^lB9lqKKO0dztSQxb}B
zmc&i%Uic*i_?8h3MI#ov9Z4=N)<mT^iRyaAt4FrG{OiRtqpS05^(r&&M1$ppXPo4#
zg*z{B)wfXZqBU?E3G?tzn&Ag@c)A{_)-!y)n>qU2Rr!)oX%B4M)fRe)zjrmPAr!P}
zjIX|2DiqqtIlbUiQEe(apYP9P*3rM%8vL`%#>K=0T!d&dxljOMZ<yMg-8LQ`VhtMi
zf=E64Rut%8M=GC)9!mv;ZXNag!C`coaQ%Qyi8G-+KLx!@YdB*6Un1oJGelu9fyQ^y
zuqd~`cPizXdBWjU<4rM*iktXq{fgKcF>!-r1U+uj`O=yQBub<y?;7gxsDCLCM41~{
z7V3y9Rxp!(7kY)QSa+jPNmaqreU~<YV7ce+=FMUHQ=_nJnX=9G9sAs!ssu;KAuDc)
zSqU9_IMyHSXych{wSZ&=HC-HE`Fuu5^`Vu3oL*q_IBEoCOY?J@|KCis(Eq{FNlEZy
zaldbV66;IlZK4wx%cuF-b6PnV5efFDw}uhA%B1DB8qHsN=$F+9vwEs&CRO3brZ3en
zdfBSfn0~`mvHFZ4I?<M?<`8V};MLt56m@rZN^u(5Sl6r+7#d0jIIsn8I}BKgtyQax
zUK#XyPCV;9N>TiVp9mDtI5L?)OC8k7U#gp!bVm}I!fw3Ao+xXHnOI@$V-ny=MsBCI
zI4UzMv8En`gQ~3?cT12IZ6Vj~h{2>|G?>TdE_=26mDdq{aD>nu3+dAxOgb@hAs=-0
zoY>;y*LK&TxeX|Y^)xnP^>UfCz5$=r1`^_4w>Ot=<)17Ff!@5Ytjt39;d{`XTbpt8
zQ2sxhi?rM42zt9`@;4juW+*!3AvP(|H(`yqZk#;}y^v8CCll6lw;JSqClZ(jhTU%&
zp-!nlhYV<XXkfrBxB#_nK>Vk``~g_Vala!Gv^VvQ*fhJ|g3L$!GfbEvTw$vQR;4N(
zb0ON@L+8_iLGa0G*n6RaU)LndJE7BRqb3}FZSjs|CMffWe2vV;s$#TalR3A+M<uSy
zEtKTKP~QgAjwe}}p(@jf$#BjUL*XPr(ArT<3!tP=M4T6i^V#XIfc576FJ7gUW_u8u
z@=jdBO7_aqg6I~&IFJ;*M?A}kVlcmsAnKVjd*XWz#`e?E@H_{f_4)<xWG()6_1F?B
zD;*ppDx_aX`CQn=#>K3NcAXHMEU%~VnW_7$T!s4{jn1YPE3qDVhGlrkY8+{mHa!^d
z-Ar7zaem^hBzUP>$4mq}FMt}G_eiIsPO$eVG0+^LCVGdpM9>o<k`9nu4ywxRTmnc;
zmr6=rG6TB9w*tb1U7ftTD{umJ4KxsOA7h}zj?iF`kZT;NHzbHClEO|$-%*P$>NbHm
zA_K(F&ktQMrspVde5ZZp`Ag^?41~I(ZEyMoja?75>U;VX`W%ctZ3K~C4ohU6p9AjF
ze2zN1=b&qYXv8GUvk7s^&0*6%3*Mcxb=<xbrW=E3CiLJ*j+yU{cg<GQHcyJDzp`zg
zI*l6m1bcB5q8ee0;}BPR8~R-Q)Tp#|<4_2u>*XG}z&5c(sWZfnrWDL&-LPF<Yu`HD
zdW(yO%KvbPnb`)x$=pF#ulMd>j#!x+gB)zge?1`UOCLwtj;W)!DzRmRo%7a%oI&2H
zk_R-=7YHY_{nB_Zt@A?+Hg7A8OVG(sHMY!@J9fbAi@NE1yM~TG{|G!f|7%K^2AjvW
zvb7pkj-4li!rYoG_AR2NZv%Y9*ctcaXJk*GL)bT{#D;MRE6t=&V<*#S3)cxGCrudz
z=Gd{vDGSGQD*D?oeVrRl$AP35QxXbL++A?n4^3!0O#hh`GK)9(9eGei)ah#-s~n~w
z8_J+fB+0D&EJ*Y>j)z_BB^!CQrgf48EYHUUHjK29O0)8C>h}0590_TA`Tdq19m5;_
z&-f0GG$^slH<OeC6gXQv@YaD?y9aUb{7o(2iQ<L*0Rez?2L_^!o8-c%%DZEDklV#N
z+TvrM#`@Dc{bzci!7AqSeP6A}v8>gL(;4cgC$tU_X$zYIkbfEF6SWi3{d7{+WdM-0
zW(DQv`QaR+b6e;nji0iSeBBR`0V{9F>*dN_RiW&FCS4S%0QmGKQ3Z+5?jTR~YnxJ?
zJ^y;P8vbFhYjj<zGfS|cjo~7S&N0*7Cfc9EO&#)Mr>~Gy7E@I$XVI~b8g#IB)L^Z}
z0}|r!%9kzuz-<4B2WxN>b6&ds!YFgNM@f^|paR`~_AEL2&}?}douV3TfJbfNQ0g(#
zl1ZwtJbEzpW3CjZ+V)HgzN{<7ySTQz487=@K+j^trY(ZJSySIGTY|qy_b1i=PPZ&n
z;dLFS2b**G1MMNCU~PFh=2xpsPG!>{3ad4eR{su$`Pr9-Bd1KK`Z*NKF-$rXMvM_<
z3>dCn-DK@Ct<c#|o;e21PDVuV9Z$9T;5Y@r-8r)}$dVbz4RVoNWu?A(KdN1XjcYzb
zyVk@KMA(-@nm(86RvDQ~{g636aCq-<HSu7RA<X{=7!LpVt38rPf!el^!cqJJ4tIQF
zM0r6P{fw<p*p?$aCs@10W226eSDZ~^))jIA<6>k`6rdbH{sk)=M;MsU>%H;Nor!y<
z;|SSgdwIEC-&5$ke%pC>&3pl7_KJAux7s;GN6VrU^0*Er^?dMcY2D}!sG>NMon1P)
zjR(-=D;{RftjXg08{pv$2;?;GGqklhzMLy~`!5jlmGgT&`ENUoLKQ&@D&K;tK*?h=
zu@rF1jML867KU@7B7P8=Z!IO(EZzCt13Iu#BJ)pMUT_W_G6xPI5)zU!04to@zr~G~
zgPuv*bKLlJ{W<}?DERE`dDr{jN!RMcH`~daInG}sU&qD2yPFQM5}+K*@eTxh>f~?m
zdh_Pz8>w%?s!`gs_=l9VVBn?nYuhT?^5HM(c1JZw1Jh@Q-7RTAj$cIL2kjo5oRgGK
zC*-bSpt?$y$$wx$N5g+f>eNlI8_XOa+foiQBTM@as^b>Z(p~53`)N!spPXEl4y}&k
zoAvswt*1`m5^~wuZi13X$&a(Khp`@%h^^@F{e6dmTbODC-UxD^dEV1j>b($kK1O9-
ztb6>~8|^&DHQ@U4yUmkB<uDQ)F<gKwnJOA1rvP3`tb~{Kba-^BX~#I7n<h+M;&p6l
za`b==$+&ASjB;xuY${6N(|8$%{z}|7`o|GPeCd$*O%=c6Qj|w~0E=VAlo<K`08va;
zg!F`6BzfEY4%uTiD^`Aqa+vZr8rxaJaNLp29~%$DT^Hp7g_RxM7rhjJ?slkJUw3X@
zA>~K|Iq3x{m-!M;36*XAEBbC>y%gD}^yK2SDR~SSluhF13bemP#YV<7{oP%={yg(_
z2ArYoGw^#|B|}L&LHCPya}}DVzm*F(w|k%ygU&>Y5qy1qiwK#C7NUJiq%JxF?tk%j
zXe=o2xM!v1Jnk$6e|gMLjdAlKR#hfw%!CBtYK;9D#eLag{>fgZTV`qEt=U$nrp-`L
zH*XFZ<5#!`=GNc2joEP7kE-r}Ft2G=lGfUeYdKun6KzMM7))5SGH7XP@eGW$VPY%L
zs-}CVE?rc-9@h*?%=wnsXGO(pd_mco_Rk>N|Ce-)9;auav&!_>fkJt$Vk^b!Lkg#X
znFNHtSYp48;9h!k#)(Y?|7qS@4B60Plt_zVH7im=204e7Y10Ll1jj3`C<8DMuq}MD
zpT-tC`vyt9yU<>*(C&}ppu*41ig~%`-BeJ#^5&vBqpAG3X*&KHLdPghR#<UOwC%8y
zDmK}B+F?1+%i<GX)PR<Fq>^F@53j<;gc{@1Wvl)MspqYL6L4c|%eU8daJ-c7^i(ff
z&5tPLV{ag1oF|pq>UFXuphj)8dp8n(!NZa<<|-8YM?@;1qKU!~xfR;S?8@@>x%kQ_
zAkGl{S}v!V=IZfrT{(YujN(H{fHD@cDmFeH0LiaM(Fo3|h&nz-3-Qn0vz_C)42h6;
zUsR?~U)aHrA{=V(M}eHLj%K!OtXQ^s>xPJEs2a#7d8UTtOUSiKR{ec$kPM*f{b3+T
zuj8vs_NQ<_aY-HisXQAwj0yLf0O_XuQo_n*^md~AIiJTr8{pH=@h%GaEPkym0C(?$
z&<}uwwI<O~F`Xd5{OB5Tn%VMkx;(WiM{sU=N!gA^pOyCKiyNjb!O`Qd+6)61>-R4W
z$ccf5X`BbHs@@4%aryj~T0bu(UjsfIrgFjlJy_~Z@L%GU_$4<{H}a!1o7X%mV?>~V
z{CqC=?Y?p2-Lj>hz_!d0%C%OVB6U;O^mWsG%g^L(L-ty#i@FuuH4etQ$QDzvpn?*-
z>@U7b-<mvKH<DhK^}Cl*pnVLW>tm_D=tn*N&TdEB7#=BTF-S0PWDlBewHCNNZqpq;
z#%9mJ``&P@kwtpUP(K^9*T|(aLPd^p(W949_y}{B_4#nMse}LgaQ6|Jm4UWfreCU~
z9Snz`iHK)5+vm1D41;Q9FsY~@gF!i`=B9GN?Vai0NEaTeXLV{l+rv(1KAut_u4PJv
z&4?(2QBJN#qelA`AJO?^337ax%nRgogY?qMXA{@;b%vr7l0hn>#B>PTn{5v{jC4sY
z!hQUd;kfMt%;VU8=oV#SuH-Qn4r2we&G1i}Yo^^p+oFBBg?8uq&+|TFm_;;m{<>?w
zB&ZJ^GK<uDG1k)Q*piYoB-56x`bp_J&lkM08)@G~M%Ri3=y(pX5g}UYMAdpsh&-}}
zNl%~w(`?e9?a?4I9Ogc@3_vz8Iu@a$B)Ct<VJtf_86Ue_TRZ&&#FM<HmJ*{jrN@$?
zJ%U@MnB(P?xMa<RA>-EaOk1ta_Mds-|Bm$q_BTAXFio65)llr07BN>e886wf8k2RU
zRr@&4I&GilW3b?()MJK6p+#xpq1B|absxcJ$|Ko~8M8NpU($IjJ$RMGv3AEBnkf3+
zo(4prQ2MiT$F+pd5H;yjaO3(tBC}pw*H|(j>^W<OG=&aFNJ`d2$LdvP;aeUF{B8|$
z0h9WOn*E7CKfSyxji^BpcDZjD8JJ%%+><*i+0O31&P&p2z&2RNK*bj?6Ma^2CG;0=
z?#KBFQ&c=VLAN39#G)@#BommF_1ZD{XEL@K$z-tXQMuca2x^o4_^Q?3nya+=UlFM4
zQJqupD@wwRvJ*d54{8il9N$UW-7$CE>*~PGhSQxNFVT+~(7-PDova~XZbScYfg)BB
zxXk*$adDB6C6%l*vp?W!BzYcK3H3gbdUh3p1oO>NRvgh?T8Y75>O+<SM2pd5DI*_4
z!g`0I1fQZ?4|rbZUpIWd)#AJtRFfH|EFp3AV6MNHRa!u<!U!Rmwe7|^dpikBW}05#
z2eO$kHbq}07)aL@JK8Oa9YvkqQ>eud-*jpb&HsWds#)edbn8?{M@Pjtav{aW)>i+i
zmecGH3y;~lO7Xv1l@Q8*qLtN#%wBv0jaPentv2C;{7iB_pNx)HPjsSI3$l32-fF_5
zfpEh)gT9%@j0i2yD*-||&f0GQsYTer3|J<U&@%zpIFm&6F#G47*v<ziig9s@`u0?w
z8jMgT*HkZwG7()6V4VrO=7i|e86bK(?rXUAa4fSp<(|F}wCQUu9I8l%EZYC=yCBQF
zFKA%b58m_UY376Ii0!E2qA^0{VH&iBkx;h?fZ2Lw&*JYeEejwQ*1^WHveVeN*XjXY
zRxw&O&3{;7d9Re;lxvuLBPf4a4@)aeOin3<4K>MmUqlChf7yX9R<VQ5>U3VO_=wQ*
zq0*uInxJ5tU@W4*mUOKqqqy`&`YP8!12Id`XJXU-J02Z?J`$qgy<7R?`YzSy3eB4y
zOm8C|1T)7UzX)E<u%(49X<P)RKUW#$**ikg*Myroyn{nkzCqGgew<DS!0cg*jyg|Q
zHmZ&$(JRe6GJjBZ&rlp$#8<D=Y3Jc0=nMm?>0AG*o7v;BqRu^r>#O1X9QPX~mqRo;
z>49w6O=Y&&@3%?P!6Fr*_0}@RzpUf`Hw>CgMaH2u2NIlVdP%UfMB|eVDb$+KtXU=m
z1@AYHYs{Cew_OO<zMCm8aoDyY!TIy4I&$)3H5Tg3R;m{Pz3}c!I`vv8(mZytQgcyK
zo4fPY>8Yd@thM+tp5=5qV`%(Bf4Zk0b?jNKCM7~o3dw%J*Q-yow+&;JOey`R;iwb=
z)u_UBkrQ8q$1E(<0o%W8#vm=OhszVci?LeYyUFIu>WUQiy4pK$bkAi1D9VbM5DzVz
zPn|g(mZ^@kSv?2e+u`d`g)%_m0syhT3HujVl)(qKL%;K+%XI~u123Q%%bq|aDd~NG
zrBB4coTGWbUZtM>LijT`YoW=Febc*EvD0v2e|0zj+l_Njn1cuZ#ScN&@Db+MYyHj5
zG_?D3cp-3al;AJc9p|Z{eDW^qiM0d&c<Hxre(l|9cJ0De5~;m@YkrE_Hle6`^C?^0
z{q!?eY#6wO*9<?42$$9tUS`ve;1pUPmPCT~wP=*ljAN-7wG%-x%sUG;;*t|Rar167
zINFB(tfy0FE0xV-Vv!;9a1*k{?+j(?|E7Gb@1FjvW3Ij?day;13;X4Pmg^<Yuh*$8
zl>9C+`_B2FXzXgwuotfs^*a|>w~vrZ_fQ(GR!3U%Y^I}E1Cy8Ni+#oFWg8#XkysBY
zS?5^K^Mfwg<=EN4^X87Z<HhNz{f|o{Q)=@mD-&{5d%6p+*G%d-<h4#CU_nOOk_@@U
zF_Sem9H_elEy%B49OhK79hqDOG4p16!jin+Mf0Rl*TPXW*ejJ~-SyJ{xr23H&je$B
zWxX?nB2OyW$wYoM%U0pUML(o16JxEJE*mc_G1-QZ0Gln}-yL8T-A7JjC!Y@1Ch53<
z(|;~K`vHZ5dkdqQy&QU}v*e%dT7|%kucNQAqH7Lr&fROJ8ne|Rw5wWq7#itua^cS3
z=ZPT~k!PM~k5AK`4_tSkmzh7mXMb7ts1kFS;YqwtmX{z3QKTcDT;iGS$?b*pc^dZ_
z$qg&iIX?RF3^W{Es&DDz%%W1sx-T=hPe?C5whFSWL(Lff{^+IA;`-jEd1T8;-zsF@
z9ZxFkmSD2pJCO=oKff~S?#GnNt(#D`Vdmayb)ryXRqVeYUDW^Z192<Hcs#9>Hv7a=
z8JcLMgHb|bw!zwmJnc^!j^zyM{qiDID&9E0^B#N<Gp3{Qb~9-^(g*sK;1x5iHL^#U
z4`?M7e6!dqo>2h+7nkCmLXe&ToFm^f7@<0mj&;<?qdn*mA0fYUEai~q=uE6mW~@15
zO7yVe);DL%*%Z~UhR~mv#xZO`Xc-IAK29@a$b}tMw-zjD8HQT0{%2w;c3zcHlkZ}F
zCJ4Eo?OUj-!a^l(EM4rvSB!<HfMDbAXrrsSNHR1S1PGnNim;3ctjJRVfVkhN6!AUd
zgUojeuR*Ukl2BWOT_}Ox{uHj@bBCVgz4y2(nC7-UJ{aC%)Y7`G{CzQEwW)@be&%3k
z3Z=S-64eE|fqT0)N^tx2>g@SZM^AUAFDAD8+x+5*L>WROkXlE2VnrPhQ-MUl_>xdB
zL+|3c6xg(Odzhd)nnCsJ-ujvfwAq3;(|p(i+n>}_29dbbzkTL$^JnAgx$lwpIp(cr
zoMt>}{u-AR<t^|ak`osm$}kb^{UYIP8S$J-sd^{Lw2t-n*!2HR1&;3%7sS$5l?Jf%
z#;`Uwa=>7wN=(M|JzEpVgUX^}$wcS(*dV$O3})2jVf`06M&qx+TNtl9t;8&Bp~mbb
zNI4W@@Oh}_#TK{MRNn_|ykK~8$1~^X&f0HtzjVv9nKD|}m{GP$@M!&MQ%fDUxp9V{
z=&Q>obw0?5uRYl>F1bkM`>x>+Koq4-=Fp)7s=&C&I;JOKXtx)oW;5`?imfT417eNG
z3DAXUP@_n7%MXjCRnyJKikaRjq->OCL`yFRs71%p)}}t{80l5V6M)nLVrfSJlK_n$
zysFa0k`Y={HOyZ)zz3)v_xKM>j#mv1P3^k}-h+O<RJ+@Rf)D#DsIA1;TfTAl<=xlb
zJ4?UsiXDI+qn;Q}ih7xmIs#4L#dN#Z;l{s)veSO7nAWMnF>0ozj1_S^FR~nVeUe!B
z+@|$;fcNRaWm?~=TDd4zDEgUie3%+WX_o15kHFE+HNk!4ray6u@tbPYT~_DT{j4U*
zd4mAE|A#mCdP!$yd1YFO?b&-c3DqPJZe9NCtjP&Sg2~K}juspzB1L2iw!x(o-Nn0u
zzofe-r~mH6rj==vLvF*P;~f!R2vb`SWj=_5WC2^DQ4CfWYcz|<;)rWrh>X&Csv2}6
z_A$<|V6jZ;Ug}&Clcr>r^~*8pEUU4^u56M9mC2ej>z6rdpiY3D_oD}TxG_p&@<RB}
zJaKa=fppXC1f-a#Uh-9nMXBQ-np|eEj(?Cp@~O!C+4J*+A4{0v<%H(TevykX8^jdJ
zcQYDVurU1{s*<QRpori|CQSJ!*O!Z^Y4xnBC9)+^c|y|iG%m632?OU~U**K7kQKnL
z+9Iog!Pw;DQR#13n!RN)wNEmKfz9Updp0!wEhB8<Z=?JP>S$?t7*1|9(qmUhH}<ye
zAa2&``P9td(;l>9b$d8Rw>ztJeeFd{w$-zr99df}`Z+6YC60M_bx%zLv(O5|%F*_1
z@MyQH(0ud0G)WU3P9u=@Yntoj0H*Dpw(Y6yT|Tg5Kn9yB>of<#Rf#WEzgEuY4!!3l
z=@X2VzMe7rJ?(7uZy`sPez7@D*9y?8AeCEj@x;*tM?D^Wt-yUFHu^)b+pvQj^Vg#l
zQ^A%FKFe?JF-N4rh6)4o;BPhO|A?-u{~euX<C+ULd-8tj@)3I8Y*~M}j(S%aa`>*d
zPU6sX?N=(z<MKS^R>~<CwLR;u8OPJv@su=2ULL`IWaIL$w8sN1%78)Q64<Q5eewKq
z$MW>jEVDf2c&KsVr$<V{L!3jESgYFeYU#@1dxX~ACD+Xn(b(<4=bQ8d*JU~o<D@TM
zoAY8drkOsHt?#%59GhOoXC3V8dgwZb6#Xe#McX`g>E(#tOuF^N6R)T9)aMFT)$!02
zBH|MtN)KK&bs0OrPZ>ei`wUZpcvt_?&&~PTfgH1{N@8K}9q3@oExHFN_$_RQ`Xdd>
z?&hdY`sttg8Kq4!qjWkSli|-g`u%_;kW1(-dPDl6(s2pBIiX%Chhl-Bu=}KHQaHFB
z{Zx#&k*8n|Ck(iJ`37CSp-bcAjV+P2LGEmzl}_g%6vN!T{PIHud#$qMNBXSu=Vj1z
z36)u?0QQ8V_G5dPZ>?B(rnwyN2Y3CmX5K%{AY?gK(0c=j2NHR<x5T1SvB^DAs4J85
z&v*jpN*oLa{hvQ47qK_lIh=x1PIDqmU!BW4|DD1m{V&OvaYn-Fo6?NW*yoRsm%CNH
zbhi)J0$6FCjh^V>TxWf%y!h&uo;LO}?!lJ!lS{rZg_A>m5K7e%dUhCnq`^KcniUlr
z)LryQGBya~S*DU}I$Q7(J7QNL+QTe=%RXi!^${m3T;;a5V!q$fO<b6jM00}Aj54_X
zMzWxGdn+s_y<5&@YxDGND}jqlEhbmvM3mm@57r2nyau}Q%yJ6lmIlJJSb_XQe>D|Y
zHsO9($bpfIk+51Z`D~7qAeE&)yI#KeSW3yjQ%ONTuI`=!8CYKKgBpj2Ag*!=B4ARv
zS6qh<ApPV`M8Sk(fG1u?SHY&1z>D*A*>$yreW_fmW^Mx>A0<ZX+-kd@(1$PfJc|<C
zSVHjL1H%vPs?APgi7Ck!BoA`N?Ad61CxW-N)nh4j+)Q{|SadNTSJ-xUE(+(T@4j8y
zbQ;6U7GCq-XiYoj4gplkye3ui*<wxm15f9UdylG)zVv4k<|lSstB3ljsO#yyw6lD(
zv{|!&unk6I9ksX2!ixRAO{K;oPFjKmuArZ%>ZY4FeV5Hn#$*HkoEhXlc9jLhGnj~&
z!hD$f6(V~U(Wm`jZQ>)V0sLg91_|__<-QhEJHf`^DKk4mz~%=FF^Ys1<T`kG44$0C
zY>&sxtkEAkNvIhRC4@=9JZiC2%F3O~ZegFbwMfi5wVv^=9CnSLAV93ctlx2NtBz^5
zsP}S?F8^xzl2l>tQflktl`FaHY3yK-xGrG~YlklHq!lol&01vSXq2ju9!cHtx`i`^
zUaxo%N~1(<SQG#-!!QcZd|#|B7LZKAfowNGs6fJsEJB-YSt3nW+@k3f=BhrpgHCTi
zf{m@iVTuP!V-OMfl?3NU%UCtec;<Lu{JVN`=dXe2b)Ryul-jtr40vv&rU;Nwm_*jj
zXg*vUCwFe0_J`!0?}VL^<7zzpc{%kNw(T^smG19C5ydm^l0IHZ-!p9F`_y%E5m-I1
z`4Qhm=ZxuYOViELW2AL~-}?wfu%)%z(x7O&rD$?|vsonj+gc2xneJN|?^Kr!=OM^g
z3%yz8%%0*xY8I*%5rPzIt1p`3L}>hkU*d&og(HiUmJrOyQhnj5GabRro*4BwsJ5`W
zeP!cc`f>0-`Y{ts<wkWH!uEc6hG-GfBO`f92mGbBQ_16e>N$I67tUjt&qR&A_1$g*
ze{XGSz8vPDM&oKn+pa5@LrZG_r^7<zY<A8zY$KP1t)5&RCkv@UQ$~?tIG@&W>D8Cd
zEzjuon@iNpAB#0x`)yEvSt7i>NpR|(4sR(jm%?cS-CER$DabjZCO9^~o@J|~`saLS
zC(+f};06LwG6L?Mwt_&muJfjzH0EcUSMGMCdDIVqI7xl1J>^c-?&51>aY0GQ93sg$
zhRbGRY+t0R?)XGw>wuGzA1%&8I_Q*qV)>W>TP(D=ErdB#Fo_}o>ex<r-xn!N_Z2Dp
zOY1*b<cgOou!Qot0Q1&J@GXzST462H?TQpgAZxcmKc6}B`z=^&DKApwj{~H;pZogl
zH*UZmOukPFm=Gz2XDyxGH@?<)$~CudLpAHDXh8AU-EZVx`-TtI%TBFZi${j%B-XPq
zl{-&0uVxV)_?ndKxa_Rg%{%usn#jVjyZ|&ZsI!~5!!m$=X70$Fc;CIe1<5eGjLGHJ
zgwpf5W%Tm!LMMbIJs;*@utV@ay%KCXT1I>upHUBCH&p<M1htv1VX*l)WzJKGYqhF@
zdW;hnbPstZ>~^<#z*D2#8J`Qb{m0oj74n{jpaSv83mha$k=PvKpNJVnqNh0ewm9{h
z_5}E4-aggNXYY|4#R`+jF`C&rr5t4<!<oWNA~&o^W^#(vFjZX}dlsbHzpI$Ix|f-}
z+C-xv%bb-DQE4*S(FW;H)+vugBlG5PPstXKHZ<{x8NMY$E^qlf20n@qYwF-tbf(G`
z3t*)bK2c*+p68jR)ZK#=<vlwM`<{Fw4T~;7w!&a^TNib%2e2J94lF>3`s#5eQ)t%N
z`>^7~r(p6U*=g%=nav?FgZgekxTI|gX_0NW&wH}RUEw)Y$HfF8V}268C&Q*~_<l4-
z_^vLFqnhxlNR6D19LS!Vo0u*OxOHxZ)UW_bY7nPGJs~7@*loq>ZJ(P$zp8pZYi!PO
zChQ`gz)6^ZZK(o3X||ZgqX)^%r-x0&Xr{P=ehmY%dMB~RW6xVFk;kBqIWJNzD%qVr
zI)x_9zfH8hVEhp7S+K8!CXP^C_5Qz^Tuq6I3Hb8~fLz=r<8#qyuo)WPJ4#bEt<3G=
znQ8g`wa*@jrSu&%JZqdp^UJ|<HZ}$0K&#~nzf^l@T{Bl}w%Xp(ovK*k_<Z-~_{(x^
zQwc_c?8yi&s`A~o(DS@VgO_Qs68iX80{&SBH0Ke}kVm5-lXPk(k2^wG)$d@l1c`}{
zF3yV%V+0;)w7`K~lg(Y7d+;F0^&s+4deMWL!C!#OHPS4pjA`Y!Ye^cSAjQ^k(68Qp
zNm^;f-7-UQb!WET2prTIY(+I1;l7rU`6Fo^*1RCfJpA8vMU(4YP!pHKH;)3W;RcWW
zN@_av%kSGZGt$+5tB+{L^rCil<h8dLR0XY`R@|YzPT2hp63Fk?LC!+Xe#5eNs4M5Z
z_plYZ(n{EdS3)6|{_NU??tOJom~I;xUn<b_Jzt3lsV-&hg?!ILt70b8x-@hC(7Q&h
zi@|5ztaKXyF|HDjq!q8F$N;!+InpJ0{UXDW9KyHk$*~GG-bc?t0-dn+#3IoOo2#&#
z(3Mx<>)w7cYilY*V-~CM^7zXa$)NYYNg1mFR>$F#YrBw&6Asq3={w9{EM^8BGVaPg
z>r2N6Zst(YxU#m0=f0t+HuX78jO`8WNd=1pzA%@IdjLo=^oN_ncyB4v&GyH7YM%22
z?Dc%%>E6(XUQ(Zrkb+aF`;f_pX`rCTVxxmn=2oE$(4^5Kw5<M<l%yR0P>{KLEQ4Hi
z6MbB-6!lG11y9CjZ%>a=pq<_Z0b;floRkH1U#R4;WA4{nhTO<(C!8SVH(}dA=XxWk
zEAFABQk}25u7_fnxQP9PMu9%CI8N(0o802dN$4f7RWlJ%6oPo(cQDpyh&VdlZ4>g{
z6Ws@UjbxP=4j6VR&1y+h5;~p8_`wDKcxH%1=d*4kkfoRI5KGC_-_51YSBgj<+W?5i
zlNl*1sB%l840rftnkJa`3?u;tRJCe*(y(JA!e-EKZ~kbHghXYIt0dLc-5x2a7R@O+
zm`{-{^P9HVNP#1;!{ynj5WGzlGg1(-ro{D$V(WI9$~4`-%;P7{{;B(xmaTm0W}H0j
zh5Pl49`$mSag(4_wRth`_K(TmOxu&=|HYHCwF(&oS@y7-Y3=6F;=Ui+QI0x$Mj)&l
z6)62pEpzNxBEW&yl=Ob}Bpl$cOrM|F1QajoUpfhR4)_u&qGkZ(ws*B3e);jObX`W{
zq&=6#4;NA~I5T!~La*bJeZx51FO1mdPa;hjR_#oMawJ%Fyr=bKzy6dZWwU2dDS+VS
z@*;F5wEesXEaD|?YcipG<R++*7l7`kmXTAY)Mh_Go;@gTrDM@wrsUNj6>#fPWcwt=
z5#n{mnLxEqGo%d2zE00`NjpffsmCM$eyf=fmm$Fo;v@rH%rF)N@_TV%1i*LO+HEz(
z=Lspb5q|S9XbDArS1XE7@eHhJOYabk@eSQRG=P+E6HLy5WYBGw(Z3aM2hYc_A}1qZ
z8x+AbGI?GeAY~pS>Cw?)7YDT7C$9Mk)DIBRbYvPaXM2YQjM$}aYMV~o(&4;qk*Db;
zADb!BlP;hNJNm)?_)e}V1SP@MOGy9YVHjL|6Jj0atoO@|c)Dfz&ZqjOXnINq+eF8`
zFkd7Fw$bNn-lb~@+UY+v*WUkH<IHaigs(WRU~59R%FyaxKTe;z%g<g`=F|4#Dz^xl
za(oPR>gRlrPYP;+!O1J-&VN^dmHsT>+=j)(9R93_2j5S>FE84ESxa~>b~UD)n11i^
z@<e5dVO<Ff90AbA#hwVHU2eJS1q0B=c+%kW;&h;ynR;ghV*Qe=6`KUQU{jjoah14$
z+-&8|{K(u0t*`r7xcl3&BHlMjkX#viWd-XD*M}XJ{HvdNzBnNJJjp8*=qk~&NyS{M
zR=Km{K{F)G?s~`9Jo+7!`uG#;Lk%pK$9GG*Ta}qJwr3)n*!{8T<nO2^ZtuhMkV8?i
zmxvWfQ&bhpv6ND|R1M+OLYc@^-_<pX(N*mTmWu0_4LGYC!rbNBdB^F$=Ia~d7)S<q
zYz>`mVv91$=qi24cR+6~4>etMB^1eaySl09oe0Em=CPe(ku6Y#n#FzUCeI$AoA}Mv
zyI+J9p%!9e(S2ei#DuZn?(K5*#Csglqt!TL77y%?Ax}mcPygJ2aW1c;H52TNue|ox
zP&7&wJfZO4pB9iC6TV(o#@pit=2yp+*5<7f;j8ha0kn0yon~xtF&0Xa1Xo&Om1880
z>%p2<V>99P_-aNp6Q3r$RY?T!+^*g*F`EYd=+WNxXLEeBB0*kO?INC8oUDuO4)p~j
zt6&cYC)RqBPz1aeW;+2@_b<22`d97TRdjQVGT2U7W#+>V;w*}sn{uU#XMf9=X%wI~
z08(Mt>C#wq%%xjISS*QXcv+xK5qQ?bA<?q)UF;0Qga($-VAp|(EKmHw)O1Fy&0HnM
zmq)vI{emTY@;6n|jYR;s$1$h;iMsD9y?O$L{Qr-wvx<sy+qU%z7CgASySoH;FDRsN
zcL?qf2<{NvT?%(6!KIMk?!i5{U)J7duYKFO_iaA?FMk`ejnQWxU-!kFlE;^R))x1Z
z%ojK!=&$PcweSdos7{e%sha)1-LufSJ{W2%m63sRde2KSt*y`1)L3X6RU~&;U-!9;
zCiQ<N=RxM1I6<LT?poY8-FXBmRh$8Uv$#BuzSbYYmZA-=cBW0bXMaYKnY7(EfPA0y
z>b$gsb@|}9;uys=0?WKW)h$$44~H0sE~^#PhSTyJRS%KuIL3>>)!;UMkE^k<$Mgf2
z`4@@}n18E-hyUDcwNhKUj2Z;Ll5>@LI*z}{k&UBK=vW=wV4v{b{65YKOUVhqzTu7?
z=e(LtD~&ij>%Tk;R-x@D*!gQA6*{h3CPzr4nl)CV%~U{nRjH;mTSl2q39$)Pmfn)#
z%V;b6$#Bb$8xnA26Tkhi8@jaMrP}#ZG5TWCNWb$Sz&t(sl=mS)l*mkJeg`HTYn5Ox
zbetA3|0_%R&z@_YlicMFl@4r9yB`ePBL%7&Xh^`ON$FdDRi~Y;8)_*^<Z}3g$CYo1
zRQtXiRh)9`7@OatP!mU*Ao!S4oL#(w6h1JTD&`LO6vvzS^X`JJuJu`4qD6jfzkO^*
zu2)Y~=Zs;1!GU)ok7)oi{lCN#Frq$CBUJPVKiQBXoZTK5LpN(Uj1IS~zlF=}aHD3y
z>;dpX+1zcl-nf%&OXD<i#~tg>EQ_=}DxAh8R~Kn;M`l+W!L~$>9#NbpVzX-@ZQreF
zacmWJKI`9A?(R1~n$MX%oKRFWW&6X~OE;-%;GwJ*5uWS_3;5htSy=qbl?nQ%*=BN#
zw#SZ}&-@aHKlgcbubfIw3t^YGs&oH#@kaC2Ull^Yx{Pd>!@ow%za~{R=IWs!h;QFs
z=vjz`_whYfCO%-00{M(>!}yiRcGT|UCI`K=491*>?~j+YbMm8uRf=#LbK1hdV8lh{
zWc^+JZ*DUgaxK>NblwcccUb3#E%?1jL$iT<%C<3Lwlrt*WsQC`XcT56*%ba3yC>(w
zA5@``0X?P9hXR;P+b8n7N8Tz^wQVVYKga9nE6m;2xWcsV5H;ODsiw!k`R`VKp7kD0
z_1dB1VwIax#vbTd;})xgNgooRil}vo76=?`5R2h-NIViA8By&wUr4)E$|JcYn1)H4
z!HacDemVT6$!@?~pGjM0@nY4`UMfUv#T`!{Y;cky2uE)jMBd1k{j~oxMS*cp-n_{@
zcQST)sqTivwUK7`_J@)YPqDc)AX*{!V<a=9#9j5_?kv;He7#=jWfo4p-jyPD)Q%=%
z6T{4JzqR7?)koC-xwmo=|GB_;A%ws}H}gTQpuMi6DFi7eKa<1?|DADgmIZ~CmfzWm
zw_rZUSeA7LKQZl@!8*s9(%1|_CQU8EJ1Xw5DsY@_0Z*q=yowEVLWnedg?UjQhMpKz
z+g9<aGgvUa&(-cdJt+%>MbD;GL}7un<qo8JQpIJjRX^Pod4cmUc8WS`rSn{3PUkVE
zycJt3Yd1`8)9|FWA5LueIqf4msnN$*f|3wwmaULbkJhhEz*fy7<KB@`-c>9!#4<;@
zwqDVHg|n+SDUDfIy<gJ1cV|Je;F6PbB*i}2Hp}sI3+g01W2Q0$kZ~S%U0aekdW817
z9#s{NLb!aZ;i4z(+yzJ?sveIs$(nG~ar;u~KIa2udhlSbqf;$noA*?i!RyW*pHll}
zEW;^MxSQfK->khRh+wEG<jj(s)9m}Xfzi)NBE@l)Gnx?^T&a7|AmOf7IfKy&8WUVH
z_lmn>C@n6%&GBaC`X<Qyj!&i^G^!7!bo=bX)4+U}wEXY6#_ONkimwH_`l`4hdf9RK
zWh|%_Bb{^SopOHL*5xu#`lMsrMt=4|@oF_K7ISEYa^^C;C3F&#fqB>o+9JwzVIJTf
zgBYKWS4fn8lH%1Zwu(3O;SRl{%RIx9iDcpmmz6yeZ}h81K33F{OpE!GPg<hvSb0vH
zWU8QNLNX51$Jz=rKYwnCVk&Gk&cdb$fI?aL=N)Y`SX%Zev%=UsKnfWR_Q-?+MRAMA
z7k6sj2vF{T@BKYGwI#*Vq|{R`S@BC_rAw40&W^)Ua$-McW#1(c^r-lH5vd@|9XzKv
z0j{AZZExtFU}dpX`&}i=767pchM&2u%Iz;=QG!+uVwHCVWam%Mx7Ou9y}I)V&nL?Y
zx{fN}&u+ER^OFcxbb|}z>7AHVEi+8WB|jE(7k>X?&LB9aGkJvLEC$^ez!#)x@l%U-
z<mG&qQj`g+tlDM63D2f`D1Aq?Ucv2VuI8%oMR!Ds>QagwwT?TLA%k7;S1^8lt8;yt
z!kk8xANl?9Q<(!d=f5V+8KZv;iIA^l9yu6qbvJjm6sJ0yM}UFE$pr2#9x@2ZH3wnp
z`JChAS})I1#>`$iPxiLO+MX&zuWsqG#4|G=VI8?L1iT*#R%??`p5$K>8mcB$zw-%Q
z8=fVQhGFY!=T`%wqsX<E&5YG!CL5R{=5!^3de;>{4f1EM1asDBeB8{*Zm0kRdG*=D
zIby9bb2H`-KVaDHRKSH6w1&mwQAz)~#|Mu_JH?BW_a==|18ll3=QCBpOVU2CELB*w
zL&?C&C9BGZ$bRWg-D804tiF31Sy9rrq1NktT&i~1vmm_dV(uUNRXmY6*7h|Ix1^v7
zV2cJ<%Lkv!6~_A7ajY&*K8l!K2s<I#@sA}8t;ETdWkG~W_QXyYTtjKfd-K?TR0wZE
zjB+d+u~r|)ZrWS7%ahu@_~61>^e3wqNbs8ITDQZ)uyr}*z#dE0$O4o5OJQKf3&!n+
z`Z2(e+)*GTUD3DszE;RyNzdSRz`U-@2NJtRp;5n89@XQWqS^U(vqkus_a7Pi-N<t!
zz~4n%_E?~|)<7&?vPQ!z*wLqK`kNqkmVZV3YwCyY1Cpjjg8i6)T($mhFdf{L#QFTw
z1nU-h%$_j<N0@E`&Uf4VEx4?$m6Uy|<@_CdKBnE7bj6$NL1yuCO>(YlOpf>8^wM@;
zL9|7cSb+8}4`GAEBIhlz=v8RP%wEJav<uT|1B<ARoI*>iO$(wA)c^$&dt>9$Hgmg}
zUy~mg$10{}-8*{rKn?hbk-mOeUN8=|!9IRgxwEUHejU@4lCG>`u%Z{2TnW`Ezd!jM
zqd}`e|A{n=$1cbdLpC)hs?psw1!Jv37^4;I*EB2Bm;1quw=lhE-0-?!ROa5B^smH7
z<n1Mokt|fY2y=~L5-66~0^rDey%YCiC^0w-H)?(?CtdMIoQ@8B2Ya2#OQJ!XsjMp{
zj%>Lux>-=2!^~;J=s0>+ls-uCkb2*md9NV5tQa9VTd4E}q$AQt{y1ofkU;iggD;S*
z-{VjLmAT*0B5aSd@%MFg|KHx~fWBKTu9ZPB`>WTFhm?jRJUJ6@o~P#6?gwY+)9Y@H
zIOXALJ8Y$>varPFGCeWW0j@_Pm%TZvaD&;Yv3nu0x?Kz1wO{UG>%N2IL!|0QIWK+C
zebvsq)Dgo53UaWOxSrPwXcWPvMK@|vlw-0&*2ZA9^4JFs7dbRGZHH&_#Q4HVXQ+1>
z0Qsi&q3gGfF-m1b2710#{u0quS#4Qv9d+bb>^ug{@K0BiO|R#RM9}=K!J*l^I-tVf
zOJS}=V^D&>ptJg!mQmQ{qo}+PM+J~V2hUZ+`Ep|A;PvYa+Kk@CuzXslw5&-tU1-8G
zRxp6!7xMxf%Ij3XZ6>lCypKJ`)2^4k*V$7Z1hRj;dtF_r0hD8jCgVa&TjSFc)+_G!
zSGvww=+2wY{>u_(0+(9o)9jK1OLefB|F-dzgG(zWa^I@6vKvUl2EFVkNvxqyQwwm`
z?aKlLz%m;n#OY}2+Qd@rYokvtZRmX&KBoBmbi6O*erG5BFSF(Uy-Er?=B>|4n5m|q
z$5{qO`d|d-X+<-2Fa8dI+855#4?JaM2zJ&SLKjOn6CcGgD-flpp3TAOVUr5tYY!pd
zpF~jz+Mh?GWzgzZvuNxx@qa*P_4sJ!E5C%21hSg*ytw$ln)|~D&rOXlTU9wbO(t1G
z9OyGQA+VC3VR6#86ysZ?u2ag_5ylVS*uuX@RxDj#QDU=(AYvOEoAvRX8ee;xv{+<W
z0NtyIUR;#1Vuj`5KG)VwP*$wU6>%u8O(Dk1)SU9WF*o(Id<O|M7gXjdtVfL0UqO-~
zkEs!zeLr6>)m;}qI)RGD8+5dgJ@k{5CMTAF2@6>t9Gs||2FJs4FZ#~?hrdBe>rm=L
z5!$XF06)$2nG%$xW=JwWYDgBQlq4amCTd*AM5KqskuZH53@U0phwS%W%q5NsJkbZc
zdFFdrL-dJZ_qTM)gF&C<DQft2PdmFxhEa0!l{l+tuUQ;I=D@;}oDLIgk{6ib?J!BB
zXp@kymF7IlzSKKso)i5n>Sd6i-hU77LjQd%U53N$PV6ra>t^E7oL8G6BN9Hbi1@A}
z?yd=Y$TR{c)yNhtIOwFZD1+pZO>aJ=`f*GbtmX0r$xVYeRhqLW<5$KMO=0wUsUD}f
zyYw>g5i6hYQ(9`%m&<!{*e`CkpKg6$j(-fF2kh`pIpsxTuX2FtTDoL4GPGev=<<W!
z{gR6N2^x@;hhF3c9g|S!&kaq2tJ(R{azaGMHq7l~SGSa<AUYl6nTRC$;LCoFK(&ZX
z3Ur-iKp)^(Pyc-BbkxLJ|62l=Y^pZeoOxLOoyBd^`saO{{TtrI9)We7dR(wWzMg>^
ziQX0feS)cmzpxX7#L%wtVepQ%|MPiNpU_pz+ug=Ko8B)FgwsqLd?q|hm#U1DNIFy$
zh79YqKs7K>z}pn=n#%qc=iuBonFsBk1=X~JwL1ii6ltRZseU{)#8<6tBK5x(4%Bw-
zO4PMiVUS>jbkVlgdt<|Hc<=h`vt@+evbL2ob+1<^AZB-eyoByH!fds#GUya=&mJCH
ztVdzX|EJli`ww`(w)!>uqc;n?G^00DbRxO9@KkCwW{=5@aDZlj6Sh1tp1V(|6Ddro
zRI3W{Q0-5UZs=!fY{_yQ{e&|j+8x(mP#%^H_x@|6AQSO&^v37?+wYMd`fh*>DHoiE
zuR8wM<04zPx7d>+KC!XTfTU@Oh{3WvdPL<OB3}a_HM-7ewf>yOKxLh`BH><4TNC_d
z;9{In_N8}$@tjgf+s9LQOGzq{x<!}1P~t6RYEp$nX6L=j7+z9!ZVQ$Ud8XfbRo{#H
zFO_S3<dqrX@x~?{@oqo5w)82Shm4~VeBi3=DUip%i~dw&;*SOK2TaUUpD_9HB#VJ2
zG@Dh$6(%~H@zsa?dX;0|4$hy4W}SwFK7J_0PqF@S#NK=2Uk!q^la?=rf{5fs!K(e`
ziQtaGCcE|4&UOJq%kTwNF?HCMhN~ZXyX6Ag{^v&`jJ`TJ_Rhq#QV=c%52TAO&mz_N
z81209%}t>@VYWlNX2EKxLaom?>4(ZmK7VO#xVKcp@Pc?n!mCZU9LgsDf2k&`y+?~a
zOt?~!Kwh`wh|W;+A!{|^1{xZj<SQ)IhltGU^&dLFsxN>a+#*wI4VS}pAxYE{3Y|HE
zir-z5dYwWA1f?u(P}V-dF5oY)CzZPQI_9`QC+&GMt7#Pu@?mM*wIWVB<d~WugUWY?
z=`ec43b42pi69IaWo(Bj6GHAwQUV_4Oq7A+c}<uO%2ge%ZG+uuT)`Q)Q*$W<G6Ew3
z^9R>$0Fli76RzUPQVQ|Q+NSQ8Tf+1OhA1H4u&P|33T}SQu|+nXhqDI$!%5tap+eTK
zzKi=0RoB6*4%Tyg(!g@9PezMX4I@?)C?2<q5}_~)IeQ&&M!Re@%27Y5IL$s^_V8}(
z^pSqvf80_Z%Wq@;U0n)AXst8uQZfyl?OL}dUjjGe#fXIsW&n$&(D$Bjc0^sC5J7kQ
zqds<)-uSiQQYPJ+?2|C-%N{Ku#vxS})r(MY86`Mi8tyAC44+h|3?A6OeW?bWIH&B?
zt>vs{_n`4?@9LGZZZidfG~XFo{~FtQeE+-sjAMJ|HAE#`#f2QOM{>gwOi+gHJCV6m
z`Q=1D{?X?6y86|cPGQf;p|MSjA+CUIj&f^upKJ_gEff7nfU=s)MfpAYK`F@wtE%Er
zcHWUmOgXfX()}t(2dP^ykLRK%E<myzJ5%`t{O4jjQmch`b$z|Aue?cBK}o?9?w2Pm
zd3Tv=BDq{7pm<@5@93R0GOIjxI;=VrVnwm0Ogz$QIk>+rFQGE;*4tbjRmDdphoOK`
zFQxuH@_uZFcEPeChOw=!#@1uoN4fx~d;5gsY%0w0&*k|uLdQofqV9{NmzB5+ze5BG
z_DQcg85UH}!y5e*e6u3*_3;9+2ygkiyU{cXI+QzvI#r`=e2X2C=exI>ckp!4xH2TK
z08L+aIj)1DW8#~8{Q>$+us4i6eFh>}RhX$;m`}UW2+9pFbs?Pc``)^;rtsXnMtv12
zpKgRfuwWIrboO;NFZzIxqe2HSq(D_fQK4+{PhoY|qp-;`=?}VgZ-+OOUP2sOL&J)S
z@_KM*+ZNIk3gMl_<-bJ8T)6)X^D!sN*A62mRDIk@qMT#P0GUFrMuuW|DMqjtW+i`1
zs$7e5JgS;bCHPD^^PQ3eXU8*J2yt0G&DU7=?ajatw-IXA>K!}F%J{;IWDYPY2{)|@
zsjm=I9Y=PinKS7Ad1jG8>Up~rmL%Z(XXW66%an-0@a1t*U4HlDxnFK7tHaVM>Q}Qr
zUna8L3C%-g$}P(<8TrG>N+YbhB^m*E(>Pf+S(D|zs;RmCSk!PywvDZm+p|bVhRPSD
zB4R<AeDhLC449ut4a|PpC-Uf&m$jCJtfbTyD3{@2>jjTkjtYKU_xcmn`8fY>5?6Um
z@>av&@E@C->_mzq0aPRXF$zOT-#eyGQlA{nIBB8oP{8Grgwm9%gk|SIimK&Gqy6lE
zPWc+O@d&pNMw=!$ZV{apF{kl$oH<T6^e<+dZ4JzZdFf&!=A&?T&|<Q(4mG1uo{d<T
z_*F#OcR0PNEL;PkHOtSQr`v3GU#5oaItD!Wb85A>q_|F|AvThR(}$cj-R>DIIA_)F
z4Zh?roE^%(tYV5zF~xozBL3sr0;J#dRl>jAeq$!H{$G07SiKpCE&b{rnvE>49P4*y
zuNYs7SF1SHfM!(NAi=0cE)v!!#>!MxZUub0;sOq%aV!7GyBrip<tpeKwoQAJ9L6j9
zJ@tY81Ac5e6Ef-Vqa$!&;)Hn$OHYTaN;U|&P^^W{2aYX|8n*QG&dcY9{n__GyC7A%
zi3R_?DBnwnVbRf#vAZ8{rmyRc&%y>>?NSg~VlE_kp*^xxZ-QdYd?nf}{J!*(_mx5z
zEoov2SZeV|kS*8rGE*~#hWx~biHhaJpp>Q|&c?Qs--C2f&eCIkkyjVaW<(J2J<1*M
zB!Y){AtSj?Wmc$Mgo_2L)P+VotN*Q1^@d;X^A?iB`cwG(K4izNN43|zRzS*Hb>1f#
zJ)5$y6k~Pu+b<=(bkyqJxaDhV$eke!HLzx}_BqLKJ3pSzUHF8|c#sCxVAKZ^Ju{`a
z6FQjtCzSHv34PTVgW_LxqLek=z|uAjKVthPmhEfAb$WaM2pAUFFGQRSIIr0^Z+$2c
zOnth<PS5~Ci^<&BB~TjJH&LxTy2QBTLHgz&ek=Q@8s6X=e)?2vCV$SMo<m5zkA$<l
z51zEt(ZNyw_k-vDKh}@Fu?w7hp%EB#BynS!MIy=RjN&eC``BVNoA<zR?fgR<QY?DD
zoFAm`JDtco;f=M{gZEp}lX6BwP{3OHY5OS6*$~jJ#?49CwWqSJ;%67$b88E_(b**F
zV2X6*v<)f0=$_xooI875PVHS%Zej5&Wb7(!8!=YtzotF+k>c(lZ_|dmgd6wJdn0_r
zDvQ^MrR>P;jEsP?CT9WS^wJ2gj5a|fV`^+;I&NF5BGTU)D$JHLPm0D`GeHb8GS;-Q
zyOp;C>=6TB9n9y`x(B4n`5e1$g2CwtE67|ynMC?nt3HR2ZU&bGo{Dl$>C19#8IMTZ
z{1haT&#xT*oGwqVGD1&28=FVCKSsahJ~24^g(Q9G$hPKjc{)nzn_+Za-2stVN=XUL
z3Tx?^ylO<6jMcGg-oI=-6Z_wJbPo*-P-gr*G5*j(GD{_wlgh@bGtv&r-r}&KM8TWs
zqPmD{{=UH21kzXg=0wC6WmH}O>&U#?Axz*&{x#Uyy6rC)3&>gXa#JYHW@XJ(RpT}!
z@RRPn_q#DX=lIyw_Tk7bzMH0w=fO3EYoj#G!3}OrJtEcP#%<XbeR8BtZ-uG*f2luH
zC1f0cAlEs4I`1p%$amz7tSNy#WeCpDe)@cncDNYkkOEaH4)>rKUMTTXUrygvka5}K
zs#PiGDVOPZk8V6ON=G%8>1uLWHa-QjCbx}_T~2yw+1JZ9jyBGgT9*Q7^OmUP7%f)z
z^l1jWMxA$)q>InciN~gSRiM~APFn!W1_(!=b<#f$(X9=98Tc?@HAH#p+9$NeJ{hxQ
z>F~mnY&l6~hLD_A7WE$Xh<}vP#)7DcT+1Mw27gdR_cmt7P+4Bb?Zb&Z{+Lv2l!|!n
zyTyEgU<afB-Ey4Sl9cn?5^`$L)~uA&9N7dU+J0<<CA-w^=Uu1>3BQnUCS-*-?X&lQ
zCF%2Ly;IdCHInFAUeYf&bF3KBUpUxC4xeRf8&K!)g>HK?{yZ-sy{^4Iyv0UAK8`Wl
zEmsG(GiG4($yH1k@EY*dD7)xoQ(!aagH-GbQhM`3GOmhU$A9&88$@!VyA%OT5{mD$
z<O|t7LZ;u*7y8!84%sENSj>lG5GGz*nuPZhmMa3F8&3nVh=ES2CwQQqu5fy8vSB$x
z>P2r>-w@t8K%+J39R&Z|LoP8zapM0qiKp$u=im?Zo_SB6v29bVu92{xBHAueIrUU+
zO?5$8a7Cdi!*uc#X^wVNV29?j#R}ebB!Czj6s<yAY&2uB&@ZHkliNx0j)&Ry6e(gX
zFnPus0aFxX4aNnEGm4yI3g(Ma%!qvN((IXo)v2Uy&^U_i7XhiEos*coP+2ry>|!kQ
zvgDo+Ee~GbcW3#cp|EJ#qAN1SZY&N0HXBfevrtKW%&!C%)I>X4PnT1I!C8|vUWoy2
zD&6I!NTm>!lf|ypWxG12h%5mwG8e*Q<-W@q6xgbtrsrTBQL3=LwJa0WZ}h&@%C70t
z5ELua;%8w3sLo)S6LKoz5O)!e3L)^le#hv}USA9D;nxW?zo|0Tqfw01P|7WuTsIPE
zmz};vy<Ps0*vhbmDm3O=g(Nnwd}K=<&Ha3w{zZ4XJ;<zWYE?8j9}BD-OqC5w7edG*
z9%aMay5u}IWTSV^c(vNvX1_WSq;R{!bhM)b1#p<lXj~0gJ~rI;^al0oi$0m+?u)@u
zdeuN7nl|pWA+22KFl*$hpJLq^{N)&LdAeop_tgzu(`UQ6z(fI5*QoUG=hgpqHpm|D
zh-t&qR)62|E$28w4Zny<PM5@dhZ(zdI#i4N6SByucrd?Uc@hXe6uPh0l*rwRJ_*Ae
z<R+u%RD~4=$}+~ouV6zKXMXVKG5$P5lNQC6OYog9^1Tk5+hx(Vo$^k?$fVqULPW3N
z^)fP@>?T$T(0I(rf;{Ru>rp8s)Fc_+etW#e8+#xCYI&wgDAhZVDh^k<tC4*(zWHie
z)?G)+#tR)6{Y=wkFxICLMk+*Snwx?D?%dGGRYTvIorFS-@h!EH&_#9MgjLAfQ`P~W
zn^(ULiJHTJvQaCl&6GIhG@iOvCdF&?nWg#@dM9cUd6E`(2tq5L5%5y&S08HT9vMgn
z_WN!l(jkhnIlVW51?)oa+g`xqk8Q#8wJ&b1a9I`)2jC=RSBxp4?i3g<xpa?L;@>r`
zEH2qIT%6G#-HSQ!%9`NSVhfe+(LFWV@zT53NPcn=F?6)mRY2bij9q;<o2y{y0bcI8
zmjvEIr*2S}r!*uPjPUDBrf;w7xsv2zO~$g7In12Nm*!^YkyY4YBr>g#+rGp|PCL@J
zINu5=?5d9BmCV2Xzw~g6GMMsJVnkeA%J(pEkjvgq#0tS$LTGV{X@a%saVHT*ib4Vz
zoJ7_8QV>%J2vJ<3Gl=^qKA7zwexw0f!f7yDy5K0@<qKB{Khb+8@mm}J9xSK$I3rWa
z5IpQCzYKkAFzjNb)MN=Ei`b-42h%u=eN0IZZ(&k`$U7DK@^XlF^vejBN|v$=D3c?5
z5vK{U-a8CaYE(Q=gR3gB>vY<hC2cYs*?8rK^Uu;`PeD>OaXJHI&p}dq5ul`}9+1to
z?!7~gJ+gx~GVDM?KLR_}7pl*+&g?sk0{J{|Zrd)9epO~W9uX?97feCTb5M{sU~Fhy
z`BgoXpeEO)>28sLpnUi<InYVH@$BI}lIhLo^;N_(PDU%v1ZH|a>n+-Ajz%0?AAx@9
zY?^pRR;cE?0I-(rkMjCEdjPCTm*!Qr#K)Dkmu_lnC6{=(_C=7ByKfJHK4yD#Ztmxr
zCGFY7y>9LY;as89uCDrbO82r`2jwDQX)JxJ6P`oVMi{q(-y<Ro1PFXmDfXDiSsF=2
znA!bp5u?a@c{;}dMa_I&;Iux!y8XXjq{7)KjqyId--c^^Y?Co-s`ydPU|2$qn0cxK
zucxdGJ}W6;0%$595EkfEmITgA;eB;MQwTBsnolQz(B#S#q*-b;ajpaLP8~f#X(AIZ
z#;bz$in)WE!jQ*eSD-s<Ey<{mcS#$!+C$HvLdit7)xU{+TU*cZeEky}@ErFV-Z$_*
zd}Cg5O_d<9^%HRE0^HZKM=V6kyO*6OCEU1v@FFp~xJhyS?jC^3z!`hRbTf9BN&K2K
zz%*rOfWTO{OH>HHD)!hI=LgD#ECwBL_ms95O$<6^(_^s>G!=(<c!h;vCG^iqPMMkT
z@Pn{MMVC<SRpXt0Hc$=$#BjLv6zD>9_%Tgvpe=l>wS_OvUnXB$o@swv%IFyg^;!<i
zS&_J8^c*6Ow`Wre>rkD{S8`@LIk;461*I*mvU#|J@QJ~6)OV1?@Cg^BqtYPWkB|^l
zAoA|*T*Ajs;S=+kd?yS;y1H=-9AV2|d(#V8%0|bzwf^1Nd#V!_>Q9g?R>|<BVl8g%
z+FiFC&cejovoE2namV9=``ElTe#w;JG|8q0vq(JP45*U7k8xjX^?z(b|Cq?@U?$@4
z+U2Sv89D?wP@qrX@TekE$*8Zh02Hcn+`S2O*i~Z63fS^+r=FRr%*eo{PrVLUHq-!1
z>SnRa#u|&nvX%6v2FI;LE=-$LGem1nMT_md^#s5LAYDzdKx=l*x=KxEsLMQ2akVVq
zaf#9Ym^9#9)G_PD_8dYkt=(tX2=2+vyymgjbi#YZ<aWCaagx*@Gn&UPbopJ8RQBE3
zi0;Rh`9(^tw7sJ?4`vgZY4_<Ck(I5SWiEGzrM{2Zl!nhyB9KE@iM6=l6Hli!;?XRG
zadUF<J!Ws9O9_XzybinpiB*A96JwBB723V3k%&oK?O)~ycTTKEB<`DR*wC0#L<_y+
z`a1zHA`gatVgepbQJ)y)myFlhD1KY%{6)XyCm*|jqoZ!)b91PZjFhb?s8~e~wxVZ8
zI&mV#6MH2Xjld_A*UsZ4SP-6Q>9`sV?u!9b1PVw|+gi9H3?_0ywU_AKOivA%#hbhv
zov7o=X<bsOa_VJs(v!0H#jZU3#HiOHE+uzNR@R3rF{Z7q8}95YK9OI7$;$_!K0@?P
z+O_Nd{m}K_eT{j7tHbwHt(1<H6|(HJv~jW+j-Ra+MpO1+iX?)5@<apfhysfMQ?V#G
zdNi3idTRLNYQ(2(j#%+2_y#qnO7nWKgsrrTiZiYzh!S9(iNs7glU%L5=batl<(^Dq
z?ap4w_;r3nXZ5**u=V0&z(r}&3#CJ8C?|UhCXbXzr7o|w`05_+n65^zRXoQH@N`UW
z7G4u0zl}9TFYzx)VZ~4ex1gyZc_;8NK5^X3bkd<lqU`RT-U;Hn`B_cz{=nnM!<+f}
zq^Z5a<dyQto&S3#{j%lfePuV>Y7^{{ZN<nWS@kQcrnP!A%Ft7)X5B-+So>CD+JpXV
zu4&PB!UszDL^61cLc#FtqEkt$l|-T8ddZOj3;8Vi#BGf$&mot|+B<{er*+<d&JEC0
z(zIygFf20kF?#h`*M3gJP*c>g&SZb_5b5>l6dxK>0nhB(!U3WcGn+F>X)>~O0BS-;
zjF}Ar>Lvp}aLJCK_ZBMS_Yqjb8xzW;7(>=+s^+u59Lv$o@<r36U-sLdw|hgEoq16^
z_xNpGA+CF-cwCW~l|17@mPp2|FXR<EZ5eY>`1xmJ1iw;q79GB4$g))(ajPN{z;K&e
zr#Dxu)PZMM9W8LYF&%O$+d5ZF>NHY>NDy}B4?=#P-FM8H+S>eD6Vkl6^Q>2cl$i5}
zf~<>7;ed;LfyeB0uWbY_TeBY4O!_7o28`0Ub>RW-RSt!r+9CnbI!jSQtI_zJ7Gs_j
zX$VJupH?lSl^QF)yG*nQG}5t4KHe-xg&>PZ%YF$W_=fhaHIe4kF`TSN#S!}|pyJ31
zewzvyofaKm>5tM95WCbRFN;DuEaf*?k|fSD%7)kHeUXmq2DRli(H@6j#Xg$XG^R-h
z!x67Q{wW;~(3FV?c>ZmmNRQu-rHf*AiXTd%K?ke&@n@z1I@`F_NRtYkhFgUN1#Q96
zQ8JEJsKof)fEqcWhrW!hRjX>aBzdp~uvba&M79&E9{m7AugzPu172BRC3Gj%VR|*#
z)-ix^*Di+p10aw|8>9Apds`Ysl{Q?HvqntV_9)1v$uaD=jTxfsis-_l-jS@QbP(qw
zXgt$fB53+9jXNus@$KULb>aEe(Kp!+g@V~e{(wM*x5Ahs(QB#9%fv0CVWKnGS_YPT
zE#4@YPPu)S11E`xyNGO^i1^4X_FgK?q}S`v`p$#4<2r4le^AznZ05^xBU(_Z3;Cha
z?W*^kqYYk8(G2g4mqc~x5^i~B&$bT0loOp&-jDd1e}s#ECl?dn1$-kulzHahOkkMC
z9%NljgD5$rR05PXUo>jIpGadj9c&3n&->TGPkP0*=m3@n(tvG&X5!4Tn2I6?OXJ4|
zPu@yA%N&(lsmKrw<T$2gB8QtyfHVAw+%)dGT<vOc)KOeKYbecbo}KKkI{za2W1`ZB
zHZ$$JSrMNcfKH;s@Dp-dEs3|L&Woj{@UgMdg4DT9FF11dXyaE+KB4L<{^JJ-ka^}q
zrJ@9J{zMcOp;b|vQ4|g>R#S{tmV#PkYp<;V$8UcK0D&k+1|=V7W3dPs4xuR=oQQn^
zXI5TBN=rtM_(@N&$eIiT9x)#QFObmCFJ-Obqn`IO!ssI^JEB<?_2&^w#ePG+3TXq(
zTD4@<ZA7mXGI}zlkMxf$)<&H8662}Le;UP|$Cu#pwIMYxy^dEdB2Sq_8z*8oY4kS5
z%Y6g7vBDa9rFuulbUU)6d=UlkmqA^3;)L}Y7>@GuL2~59vVRf$Y|O);x8UMV@@WP&
zv;;%|B1K3BOV%FmtBhdk5wU3Iyk|lML6bv>h3J_nt4MI0y!#Q157(N%55HW<rL%j3
zqJBs--j_6MRS<|Q2OY;$DDxb%VN4_lL6b)}a28{%c*8`kq^!LXzA3%YS`WUL+iO*?
zqN?Rs8vD*?)4GPdh=llUSAf2A^;7-;a~$^%+C7DAn`*w;8HRPl;{eoTS5M5U+>@wB
zy}`wT<0}?Q_Nr0qCan~Cz*XM0U&%Y4+kDcSyB2hFeH;i7l3++o@J~}-hN$8_=J+@5
zJwHd!am?H?hWZ<=eR8$$vnlU3w<!j@sy;>|cVX_VO0WRe#^-Vl$9f=cSnQJrvH1N7
z-ofBu3@A`mhGYbxf{l^7xO%B|mWQ@Jt`jWB{0M$Je5^7<*06?5KLz5G&r0y1;fh&d
zBC#mp#90-7_jQ=ib!^KFj}KESubfG6T3vjW&Qa!Rb~h?!JXXTQ)euer@A)7~LNZ$3
ztWX)j!6yI8Umz%Yx6bQv-1~s%u7JUXZhvT5S*u?K{)$wxO5maQXaD14^v?1{{_=`H
zR^_-<s|7YZ28hOh@?ry);WjdRg~$Xm{P$019pP&w%YIOvXROu}gbKiVT@O`_Q-F<7
zN2-~{F_`p-9cbfzp%0*Hqvv>8g?XY3b{-{zNcO@y8sl)bDDF(UJ=-ZEagi$*%~?hs
zg;3&G4dq(^hMpQC8Y82FJ+HjuCPzLEv<-Ot{SUi|$6(z4KAdshe)4$f7lALgw_3(w
z-ixX6`Xa6)-<0KTz%mMZ%BIa+g8x#b%p=~!JlZ-{CQt8h#XLYaJ3s3Us9sv}aZz)w
z{_U!-tu^N=Y&I|9-r;D7_ozt1Vammd>$pxlFKj;idS{1qlb*IHiTWwdbN3{4W1mhM
zh+#!ZPYQlchcu=O^zu)neL_~2E-tzM9h7MI>Z$jFh&wD<W>c&jJXmQ5uR=X$%Kj^)
zns{(LUbUeeBdM=TxVb|z8m*CGrIq2hd?tAJC&cgt?nx`I_4f5nHPq$g87DA0UXMK{
zIR~t<%^Lh0mwAwfv8BM_q(FHyTeLh$;9&;6@Z!<($3(+Mt3;=Jw&f4EOq#|@i1zkQ
zu!5v)`DJBI^sbtz!s$VJ3U7H4@SS)I$V4K-U^E_N_5Yoy6Kro1{n~omIM%ww&?I<d
zchKhhLLR;CXnKjnpvfv3%$7nDGwI#HE3oXy8`DOZwgkKFb-B;WzgsR^o`Nseb-|th
zYB<t}j}Z`B&wj9EgvnP330JdQUc21U>1&L7Dj9ZYTGJx-Z(lT?nE_b57n|vz?$AGF
z_hv<^s1}HYIbM+IoKcJwdb25C@W>^N47|iDw8n^3=?_MOkMK^ZFt8~)v%+TAfG#%s
zvpL?*;lMw?(ge;zE&laM{w$`O_4?a)O;{7ch;lKi1|gBH-|O^Vc)zw&rV2u_2O2#m
zxL^R+np;?)f<~dYXNYXs03R%^82ErqwQ)8*)8?G3N=L0iekW~-uvEbAN^tvD5xw3b
zNgUa_FgS!=SDQw#ac&O;q%0Gy6wKL9*>ak;peoBmH4u%4XvNC1qs?LWqZPh=D&Fj<
zbnDQ_fQ9(%cV3UET>gC8Q1ZBkdAL;wCcbhm(5As&Wo3wVUjhFK2@eTAC>w=kqurE3
zB4>#=>AV&U7+w=hzrAdE$ES2>0+PB48M9VDOH=vWsmPYm)6)b@gd0|pX*eHQ*c@R+
zr9VEY(wY_YKN@I{JH_H6J=cq|q+^88G)qFsZFAMB{{qX?!ZBCk@pXEqQyg8+@}PYq
z{ZuFpZkp2?-;aI{l5T=hH?&I&Kay@*)%LdcRyjI;XVDGZ`!!p?5H~4piM!N~49@HR
zV3)GW6i*Gd8?7A_flfTSDOA%Tj_493ea2y7s)%*BzQU;eyMal&oM*?J{rl89xhS&e
zbb}O5Kh4EFE3Gjuf9At=XJtPzz>tq5-y?g3Xb>$=A%1;QPPT5sm`)Z*L_w}j+%w9j
zY_jJ*o)}Xws2XidhukNcizLB<r7n)CW5*hfp_5asox{9_A!J62%3-088Ti|*Dnd3m
z2}5b2P8*r78`?&U-kj>AvK<h+aTa4PA@na8abU{eo<{nfawAXC^5TvJi-f&UtZDnq
zT%EiT%1uFOf?}y?e96T4Fqe9t22>G@^h!w{Oh#IpYoiLhc+!DS$_{H;N4<EsB8*^C
zD52pa$H#?gr$hb8+V(4gx{<V9@ZLbg>iaq$`ZvqZGSf741jrC+c;Y&|n&$~#{!`4u
zt(F<#Zn7O*A-0{Kq1^4?^OopqX7oyVa{6m)L~d#oC=GL9M!=hX#h><(qIU12bTutp
zMmMd(Nm~<4>mam1tWIXFKmcb>WIaoIV%{3)3u`V~OoZ<|gFdZWlL5aZ8Zl>mvl=I@
z6u0u|@gCpO;*TMIaym>GY$52*bj&}RhpMIi&o_ubyrx5UaeI(*BAd5lG#-W%<2yOB
zE75jj+Kf83N7o;}YPTudn_@t-p{yZD!1Vnl-9WYY_u&qIMK_8b8ehD~o<Q<hlJ_pe
ze`$tw!9fa0S6INuAP-{jLNG8c6a_<a22}oJ6Tuhf;K?}6*)=kP*^?<z+SrhDgo#4~
z3-HnNSB@6f)+ywv(!R8>)h`;v7o>8dcfrueJ4^9KV$PBU)1af_u+O+f7^98#^BNs@
zm!|ZLMB$r&sycyQfNZ9+G{tR=nB@6eg8^|eTd|SGX>>{1?^JuEE>ntAC(S!(2}v~O
z(HW;iJ_cQUr4~d(P$lwo?l{CgKDxoR<c{PB7(+VA+NZtTpNPZ?FeDTLj&XSX-#GQW
z&wUW&)Xwgo6$zqB2Ni9oiwDSB8SDXQwh>(yN4U9`Iv86C9wzAXfYN|j4-hvjiT5-5
z>jhFSY=ndP+&1$sc&Vr#EF8-z>C1Vx3To6&ZsKqo{Bw`5;d)eGV1d-B+$(E2Ez;`Y
zO{WW3jXZx<;6v&QRt!}2q;s*xL97}p&I9%b?t!iuqI14=mknRob>*|6z?0OTef9Hj
zCR#-ybeb-!JM0Jiy7>0H6f@CN_v@70KXi2=!C#YuLPWzO6T$)UgH}v=;4*LZT9M(p
zP3x>m<Eu_-1!wrTlDF?%fB(-nO=Fs}ziZSfcZw&%X@<RD_B(~FSw&1`3&xr|#AYt>
z=|d)1(H7@QoCSzo5JW9f7Y8bp&|pb|;DIwEGSt%5)#WuK8T#1RuG72CA_<E5`0Y#z
z><J2BGq9{W`qz6%+O%2}OTZ=R8jw205RpQ=(y3POT@Kc6U!QsnQ2E1176Q4^fK6Ye
z@!tFtZ=6HyE{{AN$1eSgnF!!d22})^b%rQ=bYhGhbQy+(M7XhfBPT|@Q^ChH0h4h^
zstL78C|Z#7%>jo!VMI@hnv8-Y+A3TOW-(f4Hn$PX6_0rYLQ49I;UnCq&a2aY8It@3
z!TvIBtV(pNpG8IhS~n9Y+wkN5Q^(HHaY5<3-<-%j!xHE27R<o?rgG2ytsznh9u%)J
zc2lnM8%yoXV5zdI24zXVsJW6gwel<4&mkb@A8EX}3pJT-Qdt=*(d6g@nReWS)h*JI
z3=F-8HUq^nHSzQ}`@2dp9k25-0iV>3lNk#gW~53bt-JC*<o)(e_=acmLE~bZ{ae~I
zlzjd#YHDtA`6<?5EFX22jNez1au8{ICHciA>XJ}j@1UxzrHX;_LtdB-ag;g(7{CTp
zbA25SC$VW}WQ`NR=I>s|qSNi4t|x;=*^ENIa$ePHS$2k}QC~(L1tT%QF#COc@uP{1
zr9CND*3`C#<}TE5b6Wlqow<!WiTjwPI0DN&7$22NoGd;83z3fu1SEBnqJ0BICxO~w
z*JY38ltsgBzEATtBX@*o^9$=1!rP?FQY`Fg&eW3&P$Z3V4?e^xeFLL^waYmmJT2c8
z2bI620tCywzlqTwrE4Jc&!P_n5yN1^18fjtaS7$JN!s{Y4-H*>9Ffy)7h*fLX^WO=
zHRfhEi@3ru-EyIvf!x|;k@1ng+?XFm-3*?mk%&5HoerVn@@2&9u&KnpSte3*Nm5j#
zo;mqGC3Tbr`tFKd*9A94B>I>jhI{#Ab83As{Ygh;z;85l8vhQQ%!HT202ePStaUQY
zy$Pl$D$OvPsLTD5m6SPdfN_i=;mnfI`3?{lcjXneDc9FS&gWq=wbD4NN)>wPfAWI{
zZqM4-mfXlDk0*_LDoRjKsK>C*TG)gsryExs+47({+Y{do*%XViRep^m4JbpGkDEl)
za@I|w$q`rG7+zZ=7yd&<O%$(eX!)|5$`-|qDvYt@bOHRDQOI*uU-sx7i>oq-V<M9F
zcAz)$_YWPrfBR>Y3`?ymV;I1i%P1szF7nd&BmvXFrb9UNT()VGpq+=c5fk0?!c{hp
zd36w69cl6zp<b<M)&?D*7TUwB+Vs`@7t}!X&8UY?6JB4tW!FT~wo<)Rmo=W-Q;8;k
zoFe!jM0Ov7ZFql@%Gc&INa4oxl%IH7=QLLpmoMPem9AAfP0-g+f+0>#XDa)PP}`5y
zC8WRIu{_e|7K7f}s{eHG)y0%$>ixgw01=XtLh8D-eg`GbZO~T!lH9s5D*fCoe`X_>
z2qOWw?pgv<g`ut9GAbVsP@K+%dA)|L*!i94clx*Q4elEl)j%3Qu5g7h_lIK)ANy2l
z25xPJ`zL7gu9FeD<clHfH7@6>aMOmd39q};*7<l2A70u|iVkX@PGv6ic$Gz~v^Q(f
z@8noj9_4IPp;S)X1($EiD2ORp4~$qNHj#~p4g`SH9&#zOt{JO;oU%uPzfVNHsH;{T
z?kw2X_dIQh?8(Y7-tKLfQH26n8gCHU;dO@2)hF+9C{>?drW9ub&C=^KKqYX7wc5##
zJL;ix=jf~=q6Y*mX12gh2rOg$U#45hUJ)rv_f}m@BP@%FL#3p>mH(ClTE6{97Z|kR
z?ko?)TCGlNs5iJlt}tehC9sdCG=^RXe)jbiidX<$soO_-&D!w50D~$7Oxa@{3pFzx
z+2unMk=j@to|?orEGw}>yW^ko9a0GREuHDha6E!-F86?XmrOb%bgcd_I%n%`-^wX$
z()Mg(8X35}=GoA~VdU*8P)!5JH9+djkj*1A=4SS2lMpflNP0dUJKXXw`&&V$WVHeK
zy^<M@U2OdjRC12T+@VC0mIrUA1XyE7kId5$T`6V``e^d@U0&`4Sy76dJ1qD_c#Lpz
zv`opnU*CT3VDDqO^7eAZ)oVy;Lf3Om&*QivG-JT7p*1E1n}pAamv7c6wdQs6i=p%<
z@pbwpFBp2FO39HwAgeJ?MS7Mb*{H9_3)AWtYXE`lkgrOHm}!v#q4i*%eMyY`<18!d
zF4;ureiLoFO0^V@FP^)I!nQ`8qIGTB#<+aP_YTo<_RBb3_jPUe<4wg2gb7W}JPSyq
zH2nwjPUw^zXjh`OIaJ?N)?g-cA0^dS&155Bm}@yaPl4~V-A!Wzw~G+C<fe6?R+V$P
zX-sbL;`+X_)WSw=7Jl3p%>C~+Xz$JD-`dV#mF`{V!Ek54jBTO)$Y}eyPEHlSh(7!E
zlCDFWkq-*cp-PI~;1)CcS9COLpKyaR9QSZia&STxL9rQaDK+PB#d32w2}8QJUo{KA
z-3&bdNCK@HpwUfm4M7@X3e9KTLjFRNtkrJF<*A@DRTRd6mAX+fCwuGbVe^y{Y9m&;
zyIcUONwZp3kV=1=-K(XXnqpOK37*oCEV~z@O}4nxF27(x#gZ!3+*9<-@xGTrfKp$Y
zp!tlukE^Tb44g@Hzs+efUOu?An#gCJt2mTSS)+W^#N&Zr<MB_6tjwqQs@9-zc1a^z
z466I<$X2fv4Fqyk4TK_ra{zP0_7VF{8lGTmWg<D|&NOr?tqEZ!-1t$^ymY2(T4dg8
zuWqhjAX%Y};hcjyt4U;G6`qK@;$a3Xz2tIIIK9b{jWPNF)vwK=11|%N6*z0kMBXBM
z=Z|{fL@L)BJPS_J97i>}!c<4+GgP&9@OdyiLpaj2+e3L~1iBcmWPsIWMm@43|Gv+6
zh1yMsQLTP_f&2Q$m)l$#6@Sj;UE)JQ4db#6=3~L?E9pSqT*6FU-+u`@D?R@iF?T{N
z4SOq9BLTB5B1+@BDp5e73+EyGE0oljN+xXX1s*M*_{GbFRge^(i}w@^QsGlA&V<Gd
zFzRtkaiZItAM4TBNc+=uMc@sX_S3h0{;0sch_7k*b((Vm)_)EOY%x9J;@VywmBcZJ
zyU@X~chKj4ksD9fspu5^me1sERqFk~Sv!L~s{Hy?dDKM7dW%HK!LCs~Z^z7J(P$L6
z-QA$w1`y*n3e5;}Re5Te9*J_vws?{3Bn|j&9i@K0#f?iM$3JQW_#RdR%o!Mz?6CXP
zBQE9a(&F$%H1$`Tl%3a4xxbGn;}Yx}@i^BOLMzpPEtikq9+ab^aTXs}SsE7!>f3nK
za^F|eQeWEUEVf2?h<lS%QdD)#xQ*u8iI6FJ6dJ+3{NU2pj5b6OWLpy~fGiAp`)q4e
z^Lkw5MpQx3seP=9qtoFic9LFhC25qpZZ!f!)7o9{8ND4w1}--6%Tvc}FLlZOLGY}H
zZr4vnSgWAvMNxH47+s8ur)h7oAe$wUnlQX#<xl)NZgOiYfqow~%N0}D$v!(A*i5K$
zBqqL9`rOd&x;B!3#FS?)&Un`dqVXMxImNR7K0QVA8u+iN`~;TjOa4q2s=$GeDF>Nc
zzb1dhMCeN<Z~+v5sBFFlTQtJc!!bRaM5!34GS?Ia?J)Bcw{mMK<Rgb9ViOCcMz-nW
zP>>_0VN|xUg>is^nsdBb-wb_j;2eeC7?frbT+Y8ULCMGv2i*R+kq~;k`1V{Fa2*w1
zwES0Qd}3W*FkV;j!fZn;P@M2iQz2Wd?yIeCNn>3xLiyPQp3M@lJDzR!w2533QjJR#
zlEJu!7_<N%V=l*rE^UJ|1ywF*cYYSz!LDp(;<`=xjj>cNo~Qk^#YxvuHiCwy=o|Dr
zMBHUppbew*%TzT%*^;T_7;#jHbR<tQg7ND*yVI<E9(bLRP~O9J7cDA*=K(7LW`hq!
zZxN%)%Q8@)7!Zf4t`U_g^uVY#(yp?M+(+<T9CH~Ii{;B8_tG0Q|A)Gdu4mXDGQQ)}
z`SLP3+q`$ETG?ygR6<ydy6^{#V)|ovi=}aMK8k`+klF2Ro;5tU-8QrTz)q=9p@3fe
z(tX_~kz_=q@YKDM6C=HT6w^tQ#^>RlZ)|Y7x^AShTSS7H7fBORyb7zz?OeoA&Er8e
z{=f8bmUe5Zs?k}lOg;ybL3zM;w2XNqEv(UadT_ENo!j(Oi)uyO0#<=k;(&lTW}05`
zHq0CwYXy5@*~6^Dtz0JE2Wt#{=wAlI9|S%`<TL1iF{fChye^A*Jzp~?XBwHQ?{2WV
zzuffbzfP;XVRguKJP*hD;)*;qh{U0yWA|153RfGG(?s5eRYb%%#!^uN4aU=iupDRL
zeRHgz*VlrO(R&>N5V-Wg@~OXqH<#qtK|qql^{v{K9TyM7FpDt;fvt<&i#Gg-+O8iV
zd*addK;`VtL9)-Jm_d#Mk>8hQJzX5FFw5<&t3`Tn0}c%v{9f??Xr%XDho<Sc*%{Wh
z!_f}8_l%<K5Xt*dR$%cCuwC}aDJ=;vQa7T|OFkMex{`0TFt2~rVD15}BxYxnC(D@w
zF#$wCZAVj*(MYU9!=Lc+0IFsLW}fn=oo9K+ke#`V?>8BYaY6_7?WT4vzOyBYB4%4k
zN~44e?TS2mhY=tkr~R0#fDVzR>EM{1SzaBrTB^Y`wI(FznIdl349EQsy(XlQfa$;@
zM{clyW70iLEx!ii-fWOG;s#csCaQhs%o?afJ&UBoDe<pDbo}^FMe5b72htnp*tY;O
zwP|Z?B*pWr2~F(k00&jgb2yG~DAAee`8h>e$xU=ivsFI`7%p#~2zg0?YoJQd2w1=v
z8G(R6JQypxxu4G=%bUlw5xy|~skdRnJEX?<)>d}rgE)(P^eCo^$j{t2h{(vqYY>fS
zbcD$5*4+i;j~DJ-LG}UhvJeGf1X>>pSjHQoZU!@@CV7Uzy!7V7=e9I7DhvRX<hwZt
ziF_^szaeNhkf$%XDs<@flc!PL=8a2f)Ks+goK*R2bX|E1`}$?V-PKOOrOMT=iZ4be
zH~Vb!0@zuh={1xrI4PcP=65s|IZLePWs^Edzz}LheNHPRjgfdH?ka04l-UY+)(8$V
z#jL>ut=Ro30}{4W6v^hT8Ps-lg;g%(OeJrh=X&3IFdaG-XMm1=ALZgm^j9_HlYAEw
zZWp{(pmpqN0E<D{F91{auIL%0D>)AHpj*yK8k&2H!)o3qR7LmAy4~TSFtGD`1aNmm
zMeMk2H56n}#qO26XttEHpUuA+F{UbhS|yUYByCol>QbB*;U@b>Szcn`0kxsAuTWw8
zcxbqRssZ-OVz81eLUO=HnU>0c;c!v#<lhw<-4gqsR#lp$z9kof41bhQ20#;p0RY>V
zf8LrZ3PztYLIw=~AF|%EE3QR}*4_zj!9BQJaCZ$(aCi6M4#C|ujXN|hjY}Z7yIXMg
z#^vp^&)NIE<K7Sb1A2_LR#iQ7)+_+oi3F^e{g+hldIOuZ>B+}zY1x+A6JrP5`GVyB
z^lb@RiL2oREExq-=&U@peHaFp2qER0my?>CBPG9_DTid0-z<z|7G2NbLcQ^C!xx2T
zIwl;l#PAY1-jm@Ep@z@o`VEqFu=@=pR`ypYQc=T51z5~sz(-wnvP>F$exx!TP=*xq
zLu6pn8Os?N{+Y(^Fiq~hpQBF}x6lnt)3Xbu7(>*`)>+s5gl(bMvMkfu2kX4$a~@&C
z8^M61$*UJ!q4DVMM;|>wK2lhUzLlBGkF1i+%D`g}D-3tn(D`*$cVh39d@QwfE+1AG
zn;jA}N*S<iGPvF07eEO!GSnv6eA^{Ch(N#xqQ}TGnS-i@=Av&HIUD$z=nBCF!6wCX
zLWc@+-^Jk3qs-Ni6yzN;(<0zq*SU%{P2?l?bcFWqZZJX`8dSxQ?d%=-AFCe?E0V=@
zWSaC?*p(I*Bav1(>OwS5s`P6`G_QFTOX7G9L?ib;*KNcz6>d1bN)cxI4C+(@mEvPN
z+}`I?S6LDlxvg{VEUJNT#;d-$dZdkojFU?&f<h<%1+p6d0@<NrQjM%{^rT`~%-l07
z9W2r)2-65!F-DQJ=xDV5Le<4aTxwMI$iTKXwMT<-1r2N!ZkmyjC_uq7ZEIeukyjyX
zx##;)<D>OXj0~tL>YJL^pH{+`yP5!4TLKf=+X^cF_Qc#bk~bfL$GbskI-@kY&Low=
z&37N+-!yOYj&(~Ll8eFI`)N7=3iUwM(1N~QoSC|_4EX<0UStGTP0!3Gs^YLF>mdzk
z{#AjiIMnM~cGR2F_f=s8JD0cXikG{Ct{<TytUW<2zYaOUi*1Jz!mbU(VzXab+z!5k
zdz@cCzzOx5UOfKQeUJLKskgpff&gNawUPVo%*l<|*F<IDmBReqdr{~19U}BFh%!+8
zA+Rq1=gbcfEo@PV%tOSz8eVlou?`WFug{JkYRsYHZ&1neT_;|asnH=CA}K~_CKBOd
z0|ya71gt!Na0D~~;Zzt^Tu*H#>#it`A5J>uywpsq=y!tgD!_oL?T~d?T!;P*iQca&
zyh-49sS&OwaNI1@+?r*7%GWBJ#(C>}BmB&v)_G)Ss-vFlD~AeV!(;`w%ZyP!Rj8V8
zRb%@k^=Ku7aG5iuN7?vtHC!{tb7PbseeeGp{e*Shvt^^9fkVtch3%=N&TWm5|4bnF
z`{KGTlC0B+_d2B-*2`i=g$gADr9)2F3gQ?LL1Xs(sg_lzHK>O_v)#18#-q&1838Lf
zwS`hztoGaX`^Rh>hDw>OR$Rk_MSt+3vh5KdPN>Fn@R?+9^SvS<u0%oN%7h#VWzf>&
zar5oq-Ezb83@6$mbMG-K|F&^Fs61EQ4<l>|ZH`)Q&ni5PRPh|00K-IVm2u5srAg24
zl$F@;iLMD16#-)?x=;9V+3?Tg%Ut|ym_&2uxQLTK8bMT51lb2Mmh~du-%|OiF(qd_
z!Xzz^1NJLtaSlE`_wT8>`>6g`hKY{r7Soi?4AG?Y(wdQ9S$ntHHlLoFTi)v8m4P<;
zHfb|+kZxLH1Kw^M5(a#eYMMH!MNkZ!SL9b5U+SxpidBQ2m`vL>zC6JCvAu~7Ow^En
zb;DyShh!lW-=rFMkp97B2SNe+Hv_ILEF`oUn~Z7UQG^K9u!qqEa4VP3mZBx15(Z;v
z2~%g-i_p<EMdzgktZ?HrJLoj+y-Us?I`hoqw3Yvo8oiUI995gKi&?M}xea2{JU&5R
zrgZMavnHR~*{QG7UfvX{;<{DtA$C(Hl*=4g*!3p+{kjm8TF3aeLT%BZ{x9g`z;zva
znGaJ#2!ml(Ku3|L<y^o`puP1|n&k)q-eTXJO|oJ2)idO?8nNcFek&s2=r38*6Ip8_
z#AJmfH<0E~T+^b83Os}n)Y0{>7~92UuI?ASuS$RHH+-P%V+w*lF&>V8jl9@VdVLiB
z;3e59x^N&T1U{O)M16k~F4&Do3o~BFEU3e7;?d1%0D*Q!oW`Z2%t;em%C)~zNQ*@0
zX(RMRM;2LI#eeeLKD*cr3@0wpZVag|)@HOaDYE~#x~l8DjT;bt@wl|Q%-1G61Z4u%
zg`UaEKxvIIF`A7Qr4$i@CPJ>Tn5f?BQD4yRlnF=N(W?_;>3^$q`J^c{I!xl5^V?x@
z*t!yb2rIFl*u8DH(h##Ptf#&k*VME|BcO(7IT?+*f1Jv8bxBTAk5lpsysri~AEd;u
z_(KggU)Uq2(*Fct%?}S8@6ySV9u#k;;RMafn;?U`=FXO<*-K+3`aag#!rQ}XzdRH_
z)Kzw|65>Wp|A1DpNyMvVqtE3N<e~xaA-t$C4eubw3J)s3=8lbl^khm+DBh2)maETx
z_uN#INpvq6!=&#G7O(|$Mo>If7Mll_YispIcBs28RrRK(M32_K`3@Z6v&fF;E-)FE
zQ;3q04>!!^_$a*L{bzWE2&4o5Os}SR&h`l4<W&|KR#UDimjtqfAOKpVjum9S<}~;r
zVU!x3JfxCmW`bo!K)yj+0ia8x1L4s~<(rEpC#92w!rDUX)3##OFPy0h=t|@BTj_<*
zY@=R<Y8o8izCG56@JWQ{)K<K}y(f1MtZqJHd1;}G2S;7^PX>hMK0m*QXd1MQn^=Z-
z8Me}|{s5mZuB|VdgLLYxnun+`O@o0KzruJ*NV7>(db+9WjPJMT&rG7>Hs3f_)8lFf
z2;Gxm011a}5(yDnW|jf*xP~3$aPRwuyZaY@KOJ9B4Eq(+i_mNv9I2?1`7}TSq<u>Z
zNKk<VBkQXNu16Wcw&6n>+fUD3!JUP29WTR5ms_a}pe)XWCY-*nIlH_4iwL@3D;J>%
zu<7RYZFbD$*Qc<Wr`z0l_KladB*$01PH<@mk0d`#rz&+HIyqY+EN^<`k^%|a$T@2e
zwq}Kjk!Kysb#>NFPLsbTCiz5(ieE=VfzVs*o3mpusBP=w!Dfh1&+!I@Yy1=)-pSS4
zYMg2%GZjvzu8iyTK!>hK9u=;d;3El%!E9Z!2S_05YGIP0X3jj*5ubN_1sg$8)BT8=
zBKGk{<?vEH1BGt&S*HObwF>q(scmiQ=u0!jrj--hOh+(_2jFx|i|UT;{~;IRoJusa
zA1LA#N>YoWTGrXoBJ7RoI{VKzgdnd{Q3~GESZhw<NzMi`Ycaa1=0|?IDNY9>gO4ZM
zC^ck7e67YJqEtyN5ea}WF1HZ8I+I-xE{!!^j&5H>2{K-@WA0UCz@v<c$58EQ>+LV9
zw~2te=4IhE$!8anW6wX22M>eqlL3(9TzBMjTp=Z<Zs!uU5&7n7w;?72cMG2?{P)ag
zxC}Trmc7Yj#?`|5_D<rjmfD3Nol#Ba9UINyft|g5T|=j2GEU<vlLD@FyEG=BSV+F1
z(>>$mwXNg#M?Z*Uc;O#}EA*({^(faZ=DTB2T--+XrFh>Cp|4u<TW-S#SK75szX20l
zQCW0M<AcQJtEGFXxb92cQ|7x0Dqixhs3sn%RkoTPF+lnZx3jRiF!)4*$P}HwcKa;J
zgTWVWWp4B}MDx7p)F5SS6|D31HlH4XlNxXfH$llu1;Z%-N#`cz1BEPQnAW^F1th&y
zOud0DM8rmCWe%<0czD%zwayB`m?+Kn)Rbg<H1`#edAG)?e7$#I&279ZuJ4;|@+A)v
zdlPMYPusSFTNkIQV3VQJrpc_WxH>@c#jF4D(VnJi-ay!S><Yy3vl>(1?2UY<a3)s9
zdnE^7w|byyBN~5!MPWANhZ0=>4_L>8YT~ChAO8Qa5B3fEe?{2yrdt`@3JC>D6NNl|
zN}OmI`vy`e$7y{=_obC(<2o(0!YNGr-lVe7qR@dDMT~aQC6r!wyy!4Clc1sjxeRCy
zT0Ufm(F2aG5k;5Z=k)XxBFt-~)Q>>AuWn-6600L+Obwlf{(!5B+hX%hfxU8%Zb+X@
z@{Y1H02-Xndzle_hVg%d{C#o+=RA_EEsGdEHXWN)VEAWchBBPCCQ&5hlMg|Ol<1g0
z&;flWj=6=9N8wWSqIh+k%xbG=>iZ5DS34WE@;<c?Yx%fITEEB^)86jo4n;Py!{0^0
z@FPDzO1jWB@%F`ATskW@m8V%_l29lbT@R1IQszuf9lZ?|1NUZ&+$^=G>-duYwvz@;
z^U<bfEYmPnAQu1jInMg|mtr&5`qGsWDlMt;IT=-H1TkQ!a&=y^nN8{Xk^#52TyvBI
zAVTt^hvTL@CyQ1#K1jCr<YEWMs=UdNDd$4IXOoa!49)pynwD}c>B}@yaht;`jdjsC
zN^k5MJeyGR+UKDUi#fg_2L_%$<@2>EnWfrTcG(kuv1WQ1@5}*p_$GciMDQiUcvr>{
z1W4J0xi&tqAtsZx?74U3Psiy6m}D6t8fRTk%jB<z=*+Krh%?=?WxJafC#h&gxV3F<
zImXN5n#v#J0Ovx0wLcO6$G`CZnYNKHidg0SG^8Y!R`kCg9izjgqb}*#k^N#RTh*b$
zK(WK4)t*w12g`|_TSw1COljx!NSE|&H*HE`ql}x8eZg}6a*HU56`3e$6DeUL-x&Q#
zlr}#+)$*@G9B=Z$QaS%4T;bvMWKJEnN#9Fx$o<I6ni;nb2qU<{uT*ct!VkO#0Y`4N
zFvXSYHuy35X3nfZcbstLR%JNqH*-dRsUW&WPfV_wn)!G{yFdRZWLP|EBXS@j!89f1
z>~&wQZ+D3-VRz`dBN%PqJb_w@XVS+|!7>S<2JN0wej&~Jl6qro#82R}SUuCnfi749
zJu(_hPCWt&Cn1$(66y6|Z?Kp_YrR?_s&@KW^*c;}vGun4{(8GAlhMvdOY-*IzEZc=
zZ>di2E=MrS-B@7rh;<Q=TB&V_uE}6N!oBcf8bN46VVp$_jz&}ebi1zmXJg&RRL0Rh
zkEm2V<FtZ&8)d>&iZ`NJ2#ySJas0_#CZ^mWM=U|YhQK6KN~w+^6{vN&wr4H0<Kw>@
z&u(Sm=z)t}e9~6##XVP9nLJgOzmzIV=+det#uvNJQ!X)JS|XG?O)l(Erif#Ikc58h
zL((zj-Ek%YT8Xo7&++4m^zY8cFsyKso4*zqc~W5}mj^gzpIpW3EUj5|qEX~4Ld~;L
z?|tOX#{Sz*^U%cdkFN$EUK>f{rO4;NDn6XVC*lNEcxdD8ljz4+oAKr4sQFIpS0F_0
zL$<IQw%RN_kekVikSth$N$bJu<JO3VmA&QKR7q{DOu&xv#$gTF3Y8|9HrW-Tu-MH~
zRuyyYt^LwV^R`#x^gO{k+F&YlI~{-B{I=6Ai97-{yuJ~>xp}#x@ej`GvBC>WT~$EZ
z^(Q?MnF88}{;>W@O%^=AoMcmuLWZF_dj3bq=c~`DVOQVA+lOe2c)7*u>w(zS&%~ce
z@e~%nzvkPzDs1nMa@X1B=|nh*##Bd8`F^5;)*O&Gq8+Hm3lC4X8f;oLf@arL?vKt}
zsbX?Rb+>L0odilSM~Z#a7F`G>tl8X09JqtvKcslSTR6`@mH|2E(_w`UJ<IE+CW}+I
zLL{aS3l-^9bOaw&LJ~SiWlTiGpp$`@<(ctSX%q_2S&F<n(*ECrN)gRzv$As(V6=D0
zOVcr8L$#e&dP1QWeB5!!=9%{dNa@h4$c8f>Z<mQiyB;wWB0dDs(f`!@xIXVk?Dd>n
zx%e}<P%flNRCQ;;U2GiS5Cy=~)RYJ2cZ~EGWQ<4|Ri1U_prcH5;g5E5)v{h3C8|z(
zDvmOP@A^h-^<<jLLu90^R|bwUfTIgYGA27Iv!0U<KZ)&^2oKydG!$R?uhzBRQ6WJ%
zO+dB(8`Xs2=g5rxY>D`6(A5XBMg8tstF&5d^Ff2hpq`JvG^?tXti4SQ5vw1TYCk4}
z!9gdc%K{^uHU-+Ll#C;?Nx|SpGP??LeUeL$#E_(RwI=PzwR~;Tuw+FW4Kj@Xb8>h8
zdV{KlWvTD&b3n-Z^!v8slsi+v<s<VA>bs-xZw6*RB=!7Tv1wvFylatRM83JQJcl_U
z=xGrkC0W=Or3yy$855w%-16c_@GI2a{oRo}pF7|a<#c0WGCwzO+gqN;%D}JG6U}8C
z56@SSrgamA?kbT2CIW^G>jH$tGe$6YN-l2*lSWiT&r(zI)l=PlieK7TwY0oQK*PjH
zc8nT`Fu?*ZUOi0UV?eFK!6jgLm73qLU~7!Ja9DCYYgm+>nl-i<to=m65D8_&l3z?g
z>*VAPlih-{k%}>Gs@GAhz~Ea^VC50yExnk0ZH@6Mv`ZQO)@?XXOPjx<671P<BkdV&
zG3gMMNA}QU8`Fn141^>gFnSeMeM;EcIL;t>P7Vmv6KM85IyMKC<WC;Bu74a4Vx6-l
zbOCGhRc)#^F#qr_adln>ROF}=bAo_j4kOt`z|1;~ugu=gVn?{Rvg&#t1_VySwxXaT
z&?yxWYq~997~3$2#6W0^k)N+<kF=Vo3X^03Ydx4UoK+W(q1lU<CEb5R^_b=#{~)`t
z@`B@q5iU1KRiCzJf}h5&Ns9GfZV=!%b@Pp?_NK&@2*y<748j9)PFnAn#U@9Z<WSAw
z9X9ccEusaZsU>#hRm4<>Ta$nczZ|6>bj3zhXhfh#cDV0+n(TTYhm^~`-<x3Zm%Z*s
zP0zhpQ~xvEfY{B4ex<1cs*;3oPVwO!V<K3_>JD1zB&ER!BNbo}uJpV!p|~6}iv8k@
zy?sMxr;(#$deu6SyNCOd4S!p$<SeC%yqz10LR97CMV-rD)MRS}q`Y$Jr+Xefrt=60
z6r2!rzoS_PJ;X%t)eV*G&Wp6Si#Ig%*Ah&X(|?7x>BC!E-u%FB5U_)i<A3bLtYAY<
z7DNAew_sP`yK`H~`mxL^(#B!vV6||s%cRigpB`l@mY^^N@yj6tfp=Q1%hwe&v22&}
zsB(gmV<4G7A$d?ygcbF6ja)=^g!tB7?5fae;c=pj4SHhgQ;VRfP4PaP0{n1vxqJ*p
zJ~9*;WfG+(Lb7h$?83)+k#Eh8xU7`8G4fv$??%w$g>I1fDy=`dMu)ABhOY_n(<BZR
zpdFV>)+z)$#DM0?Cy0L*0N>W;Z^|@0tb8r823Q>M>?h|6s2Z$uqzjB@<`6zlPL5>I
zfa>DP$q_lwZP(*ItJQT4I)k=gTaA9EwM~z@GLjZ*2zGUcEqtzvtfM;tjo*iH3kLlc
z`QiLqEAW(h=k07Tm|9tGVNhfzMvgjFC*qWjGzIIhzJYo1!5S)>IhV+`>Zu>a@S)3}
zi=Bw*tVtK1S=FNmPO^q|q{)(8s58*T&QcKM?`Eg$Z+>pR3X~2W_-?iPCwTZ@?!1g`
zK5M^i-Q@0^?tY<SrYkn$o7+6uY4{6pEaMZHN36+ll|fZK5W0nUuyDuK-VO(OK<k%u
zYM0I+ud%tMWlw=zqds{Adf`RBLA&$N@Z+yqy&0m<ZJF%9Qp4l4PB{eLPa6boay;ti
z2W%Mb#>ltbN6gT%O;4<H&pP$b866dfD)SxUZNANrJH7rnk(TbbeL)hqy(~OEyYR|)
zNTP!R!iyX7bi0J@5j9l{NUWV7E_VIltwdU3;4of!$l@7paV`vtn(WOB>PNykq@52N
zOR=tuRHY396nL!(U>kTHsna-t{faPz@!U*mp4DFjqw}dq{Xh4GCHa|ZCLOQ_&mosW
zn^?gk+60;erl2@W;Y8{VGWc(ozE@5@hdkwk_Wo897`X7=Z*W_j_iLusiOv~omMr~V
zDSG={(chZ+QW{q@FU&vt9jE~_S7zfi?opVNo|=Z{O}X%y)y=eJ_sAZtXrK@qf58E%
zX&P4mhb|Q8{xvAVaIQ8siNCy^&7m<&rivIhY;b)}`I@+)`9BUn)}1QTf4qLirr*Xx
zjI()Led61r?PWK}r1Hz0{rx8r*k}5V1eSwVT?SG5kxDyv`BX6qje4rWfAe7mp~>9|
zl>db60dCX0rN#_2krhkGJ2=UgWJvhma0~zLoK+?EJ6_KM{|tC)5Ej?Y4f><4#o%B(
zua}Ew6_(Hl3^<k`6KS^M<ykto-1-sRY43jDeAJfKq_831LCnM3I%?BtQs?>O_bZJ3
zViF*%O7Z|&fZF7eX`<>Z?#9gmq|@N?T|^gQM`M*-^kHAKs~@g=#}5>_FXmtCOiDI&
zZg=ww3f1KqoS6|M8Mw_HBW-3|Cl?b4>4hW$!1Og8F9M}Br2>~zZYG2X!|YvUXpAbd
zzq_As-=j!g%Sqg^zr~hVlfs|)|5^Zn;44np*t9*xqa-*BBsGWT5A+bbK2(q!JYgo$
zsJ>Fe4_7T|SVs1Rta6`wQq(L+X%jKkxFJi<-?RYFKYo&}{W1Tns?ID$pfHUff#_5x
z`3;ItD|!lL;M_<HYvLv<`0JOOrh9>&qhb9J&RA%*(671PNdb?MN(sg0dH0YENc?2{
z;TsZlj8GkVO15QNs+MTdjDSxP^N|OQdTRD#rs-JwtCeJEfEDM;4~$BPjN6Y^V@BWJ
zsX(72z2CvV5;M?J$9*Sx5ap-<rtHj4hyX>B`=_5I2UZfu6_5WD0senib^=;t-3M}w
zkaq41QH0MVj<c?b1+v=rRuhV{vrDKHS`?~rE|g{T8uxS*_*@MX)?*{F$m!+ZUDX7N
zfz9N363p#&zbYI%CZd_U2hOM6&ewIF-kxmpLRBnHWLYcCnhm#s-?1WMl}AKY5f^I=
z_7cO1w^yJvd0QF>O-c*x2>@SaI8f*i$|D(@bZ}OYnsAx$*4muH9eu%GX*36Vo&MQe
z2sjjUymS)!7Xo^AiT5#k*D*`yXr_IX-_Lp&T=RF99etxcI-ac~g39>lB;fZ|NyZEQ
z3|r>x%Nag=bXu#gj7kGIc|QJyG=(Y%=|6J(7kye>Zg3`r@5wcZTY8J-;F1+I(YxNp
zEgvT?9!bsJ2G|-I3E-ncP(s1pq{V84Cgq<DMWfY6%Pq;p(J0$SaBPOvr33L^TEoeb
z0TL<j(<O92(QN%SpezRm-+b*&6Z_}vx74ecERfC1e#%ua^+T-Aa@8;Wz&C_IS}9$o
zzLXD<BR$t4_O{w#X`1J~0T%%;2N3PV6i{Mmi9VeWS+{pgv8aXnXMnKz$W1ac%Xe@W
zXB)yWo!EH4<P(Uv)(a*H&dMw!lW(N<wcL|OJo+LB%7`{a;C}h3TRs>K&F1AYh-H<h
zq4emd`3$zghU>=5-ws(Mv7mY<Yg^(U|1$@>dF8&-Pi?r^J!qXbd@b-_wt(@kvq`G?
z5Pmv37K>sJKC=C<zkd8BBO$7GAG+Fnj&ZRju0lmU?|c#zb8s~9S+|U4YDY$J8&S7X
zn3ViYaGOhdR0^TGXs>eD->B2TX9Gmr&jy5PIPFXABk@1d#)<ZWoOzi@m#h7VgU0qd
z;nggc_(wz@00F&PueA>|UU&=~US01|E7yGJOHg7Tp&*|8Q2}##d1ZU+Fro8Re<f5n
zetwy^Vh=z3E8NRgydN{*Pp08BHG~9=_w0>w=<gun@K}pzNf(`Lz4%JnctAcgXiz@7
zx*absw!SyElh+d-!hd{<Rfb*RwZ1jEH+j7b4{DvB7F4(d(HC>o<1h$Z&6&DJ4>03d
zkzuslZ9#q3Z|fajH29N>Yp_2ezIeF8q(SB4=r#g^Bkt|}b^VZ{5|YGz@qXBTlpZ=6
zaD21Nn0bWpU0w4<NCBBspn;Ig(ru4J3bvjhuP%+eU)n~%2cSaEB_@x;%8~VX6KcGg
z4^XhnXRX4XX@SM*{H4(zc(;DIY#u`m?Wk5gsE$@~T&YZH3hPs9>B<l+xxKL7oBMPh
zkkl;r>MlD{qy^7KdW*&I`TXqi5;J~^gv7<w9(9ZsKvzLEN|p?_IhgIPUx>?9JV0YF
zCSSCx%1x1<0<>{pY%`@eU#hRLicl9MKD$H=ja+M+(Jr+=_OFC4-;>{<J^Qp7GLJ+7
z43ggIl=*hH;Gmlmg95)3uB=X@%f+gZ>ZYa-wcD{A{I>xB-x1*-CK0pV$>glq<pP0d
z15!&d;5lvd;H|r_jv7SwYz)RIp5<{8%$W$)t_rc$umvAgY_1ZUVGl%)7Hz|fFZb10
zRh(0+Q36wR3_OphmqXrMjX1N&(&#KQU}cFrV&|ex6m+aqcAtKuUe2j1ecwB#S_w^-
zputo80hp_8Zy4QUfD`!s8u9zL-wGFbqD`uTB0M7Fq~=<uTlh}9hu7lsrp)k9NB>7c
z<O&yeW}<xRJ~TmgS}{xlfRr|qCK_MUN=Rd`s$)P_O~4IIj?cMyj_);@2_onHVIQk2
z%IY)Gh4<pD@1KTTisc6a*cpf2au0!tBW<O%72`u600v!si8V#M&?9`s4R-Uz+V1}M
zaBhd(?>7SqUH8vEIIBb_8(P(W9v(v0Uzw_fd?Q~ONDe%NuG%Xnvfz*Bs(teJw7zIL
zp<{_x@myj2j&MusV=tkCPfKNk9|9z|58hfHNVvG^MyJDv*7a-o$+^;+t6ZafyBaRU
z6IVz53GfTXFF%48)iO8r(P|j76|-yAp76Sc>&_Iu!xWC{Pn*Lkz{-Pzo+ke2d-~j!
zUK6lG_%bxR84jqptV@y~S1bXU$zEky(}gCnQ`HQQVvD=0oM7_nJQ}!~^A6a&<A_~m
zIhX0S6QqcVL`q!Gnc0D^JQ|!DmT8G7dI_*~U5DO+hfI)kb4@&y>h}KPHK;vbXw70Z
zC4iC;UJ#Qv7{Y?NL9I#NnqFpE_4%(E?_W7%=ek(`Nq`0UCuGH^GkG?Dvif`4HsUwd
zMyx1+$NGO(n`daKBI)e7%~vbwjZJp^DKb)5JPF`?BFJhFb;K}MsAs<DG_}R?(?D@=
zJn&)YroGY?@5)LW<1Ii#(I)tq%wG2;quxM%E=R{yIQG}&aR3G^IXlO*`heQWp6O5F
z;q9mAOiSzb)+(20)3w8VVxlTW2fyyW)}W%gc>9h$9?_9JK7@OM`_my1TyweIb^Kn;
z^pczeoev{ga_f9u6;M0*=MQm7@AKxXyAb$s^5%Gy+jDmsZVk)+!?<iRhIQh@;r%PC
zf!7~i2ys3M70id!udgkxM{Fm5aD2kQe%|c-?Im1G$lnc*26>5K(!U7zFKZk7T%h>x
zEDHS|z5?{$;WQfr1c&GNhZI(P&C1>|@p2z~E2ijr9uyk5csssu@%1@#S5noSADF4w
zy7-x|j(|(MpJ<Gaz*Q}_z_n^3w1{E4J%3;*`TU_VZ=2FRtEf)rP?aU^Hy|NI%~4aC
zGAwqUt7Sg(FXbIIb;7hzLDeQor|O?);ni2AZ`(>_PH@p}%G82ZQD&0(55IhJ|Ga|(
z-fJG}gs*}f+SgBq)mEF@_B3s&-BzNA-cOnOzID*)Af=(m4qPJ=%7WPM!8a<>qIG5|
z)KfJ5Cf{NBH10*qaQ#a@m#Y~%W&b%6A@)SXQ7Yh=S{K{pb#)tTty-`KIC*eJ;2Lw<
zQ{``LaO(IJbD1cNcSBpPjMM(T7(eDI_wS0lUYGN)se9yBj5$4e<#A-*+56BepD;aI
z@hi51k0pvia`tR)aHR?%7AcB?LI<%RWgrR%S09=*YhuA<6bD@rKqQ%5fAt&XReSgC
zNl1%Ax6b^4nv?mWZ%HAtAf056?~}d2%M|Aw^I3g+x47(dL!|{Hj_xVR=Lfy9ILD*`
ztZz)aqIAKDCgaER)T*U0gfMt9CAK_m?mu6H1r;Isag&GOcriox%OB00;&nb!YMN+P
zDR?n*r@fIc_YTawwmNrBxP50~7Y|#{53fqELi&r&;A3?bGD|A*BCO@E#&MGOS>YbX
z_ZjA!Cx)8oro=(wfSBx8arZPqGMH+i@3E`gj?*=o@F>azMJQM)iw<4Cp1V(e3p|V(
z?jd!*n|ED&?TWTVCHemN=VaI9lp}o2^NP3aOuy;A>EQu2fTkN<BmDbpaYArJ%hkpi
z<P_K^o8488V}t^_((Zhw<tZn6|2)<(T+{WkLhYjy)r_&YU|5AI>~+Y>s&WdiaZ|l*
zE@$eG;YL`8ZVhd+`b|SHngfl4g0-G&Z}K(v{nL8!C|3Psr`8A9G1!C{1T#!x&r8Gu
znu7?+1mm^@GEXj7I}muxma3Q>?Uj88M*KLQ8%8H|s<nWzF+u~0Ls#0KB~2v{8Co^u
zG*n(YUo#3t9c)HlNiIc65E8-p(VApMd<^G@k(21!N<m}k(g@Q%^zkGD$2jD|5rWuG
zyx&Nhjo^ko`3vr7%KsKlfB&PW?i?D08(VcMH$$L!O66|PyTI;P0-W=gH;_uCjdB#W
zT6R&vK4m>n0e8~nYj2`SL1bXY$MU9$0X!=e=V&u56|9x&or8dL%#bOzN?6BiY_fDv
zg^an#4S*)cGlqsZAjP3>>+qVa-Dg#cE(5<E&gb;(Y|1sZc~;g~t7aH5Rgi#$3@eHM
zB(}=wfMAdD!DTx}m((xvH+FB!;ZnEjkT+<%XOP6)BOsj_qR2q6;hf&^<hL<ErPrK0
z-|Qo##Rkjiem-S>Sn}}8Q1UyzKU5if>-j)+ulP-c{0rX~LFZJGm*IeP!kVrLLgubs
z(|F+!S0<mYUvK-R>;7ENGcYPCYB8r*J_VXYRr|zM>sz>W>>j6%`aVV*?mR<Md+Gun
zMP(-{(@P`hG)a7uG!4De!ihT-+gGmzg|E^5?=^)-Pnd~)hLhUn)u`P%4D9f*g+xyF
zOIeeADynLufaqeshhzDn8L*fl9`(kgqdr#WREKfuOLN$4yb+r322P*_Z&uHCe~3n4
ztu;2%L{z|S(L@w}OMhovMeU5eQ8`RnwN&tl!Itz<gJYDyi@{52?moaj-{3X2?4}wj
zqaFaDd;%>EXWA#8QhhB+W=dBENkrgQ)_tgYE&)B_!rN5WCP&~=hw^Y;u{Av(H~x-V
zSd8kCxik9iAGPN5=?+><Vgw;8hy3@CPdbidZK>QG5%knz@*g4*Q0n*yk4-@<Se2&9
zH&*R$+^>d3e{beABmLX-R7uJulS^j10s+aB1qMT^Sw<<_8j?&>!#SEvIvuBFSb5GV
z(_iQmC0qp%aGg?FV8ucbd>1G~RuCBi@dS__ikSnVxg9q8r454AVnmTJV!_8LRg$A{
z-6#A@$2gz$Tl=6;^@<S^=e;GHmrNWT7u8i_jPZ=nXg5B67Bgc?Y@p0)JZNktT%k@{
z39F9o@@qR>+E}-!1Ld84th?G*rIAZ$Yf_Abe_kZs^4mmzxW;`;5TWdI!k()tCfIrk
zPH%O)$?_Sr{H(uXeDOUK)$al7Wy8@g?7s6|(2luDx(?hqkrAOkII%Zm5|I8i$2-WO
z9R^(`x%MxG2o43ay7lSY8m*k18B{^PBd@{s&3?_dtrI(Ykni~{a{(ZdIu8mEyOukB
z?%^6OXyEteM(q2-SH~{&1a)!1EqtZz(B*v`+ql*DtystRD`<S4yDp^MPDB29E)u61
zZnMoN9Q;V9qbG{3S`ZQ$Ll$VfUMNDieodIEmMXD~g<6g))6W{bBJq@O$zOFOChiC&
z{M7vRtmm`eX@zX(mH=pTBE%qLWcj!Nw=kMx$8z{<u=}3E_xN6@|NedGz7@1Qc3>i*
zzy5j86}o~-tMFA@{`HmUUPkhG(Rp1Iyf|bNwkV~J-yy;{@N{F^lk_CGu)4A2Ft|uq
z8?nY4yhq}l*#z%hopZ*uCRGK4Fno{#2%Cx`Y3?s9rOpR-tJNP*;v#|YYzuS#hwH=s
z!L(8lJ@&~2wQ3C|^FUqM<+1`EC(Z^LCMGw-1}o9=yq0>{)Ug=JQkL?>z?C-w--+L;
zGD`kodAp1+Pc#8Y^DrkJMP=kG1$-=w=>pe%;vLVO7e<K$#QxjppPD@Io&x7>#(Un#
z>@!(Zor!5BhyC2=tL;hzRK7>sL}Vf)XMCjd{Hd}^+0?`b5mO)=;e9=OP&m3sVmo*G
ztP~)kiut5!>t#XjANRhrS8%y+<g5l&3N8CgL-1t1zvGFW4$kqL9oF}bq0(D?9}^&t
zO6N;Yde;a=yusH26QXh|1$)wx0S_)!kZkx5t$ckB#h{>RP-OP2zAN68RaSO(U1#&)
zg2PwB_=k~$?vZDqC;!@<>JzKNdLjOUYtSg9Oe$)vwG955r?4~jy^r^W>wc^8PhWMc
zM4K6!1cGrIbv(pb)annS`jKkIE|)Q*rBcFhyRJ}N4||-mbw5m!6Mi7etpDu2QqnW$
z>%%G0ygy<kno58B$of@Y0-{v)7uezr`R-1>QhvSgrGW~RR54L8QKQ7ekX5IKnt^qV
zI0Vv2Z}og7-t`ayTd$-D)N3CEKp$l5z&4V+@JvGW+o+#slvme8*L24`=l96<0_`<u
z-E`G$S(6s3v?mAJRCL$61l0dRvWbe}U3bDwed5)Ab}|CP^%hVy8bK_S9;+<XF~<q$
z3l8ZxG-py(yG=;RN~RiXU(mMy3vSc?1-DxF;@AS?N?8>|nxOr^s9Na59=eKomf2G*
zM(iNB&q0nl*)(-mFU>hiav(8pWoxQ3u-T?kGF107fA?gYne!%l3bN?XV~Vlgz_aj+
zXpa9jLbb4GteoZ-bO~*y$)Q%rsYj8RAT>#kL!U4q9~^ZC{8%B6LY~%ds>~_qnw|A(
zp)HfhCxH!+!|xk>`&%=_n$?CohsR{&8qpGtHTEXIzjqa_E<>c92+NIoh^$-eIQ`{Q
z0_7~REgBW&&h<Q`0irVSKQ|}#za^^{_U)^v(ce1>ctGj8H+B44AU_TY%PR!XEJy02
zi5b(sJArU8K4^RHU-lp5Q1meY&R{f&+eS9u+uwh@gWES1mo`gcf0{dJp~|UnZM^1}
zKAt)T0IWjK3hj#`AxFUdLkwTjy9YaUms97S{Dac5@n`)J(6|&DVacO_XsWxQ1H%vj
zQge|;PnVj<hbmc<P6N%=itaas6kw9~2VB|gujSBFhzb-bU_j?56-0rjtN5!vkAN7v
zKrSp5E9oBiX}!qQ@j7!D#~Mk21)Bl5t{p#V*}OMB>yLCIQPbF?RZO{A)0|#IDP*eG
zKP+;^RBjln%32lhviD^N8SVQ#0<lGP^H)2AQi&ND#NtS|qiD8(6}q1I>{FoFMbo-%
zRfW`G#3niko6e$Wm5z{@+Mg-IFsjV60D#}g!Te6+aqNgFbH_hym-OX50Q0|`m;9V4
z*TwlFE4hKHv{X)Gl=Ec%*A%Loi%V8-*{-TBS+w2tPgXT-Hdqy8j$xOQolRB5BCOIe
z>zL_x;yxKoS1?=V<?!YMQGG9c4GY5UYZCY=B;Nmh(DR=CVpspdFD#tIFGDh2D?2w3
zF&&k8n)94p|K2vul3_wgbJzb;f0&g*i!h;MtUT?0t};i1yu<=4H2)V1UWNJod6lYx
z*SVy@+d}kOyIVTj!<)sbTSx}p0s|%J)Ud5r^}_cf(S*_1jk*f8j#ct^@Cdq(&sTx>
z*n@9@mK#@CUivJNAbq#260F?wNOXiilb>kYMGK)ok??P;Myk6T*}4!~nA(J(*VZvQ
zZ>xKHMtte<&K;2M+$AQH^J-6?Z!LnkdG#m$GF8|!&=(jXStR589`BFK-;O@75?wtY
zU+2l1w3W=BBBREE%&ECK8K=b4>H=4B!!`$NDWBLj*g64aAF=U}>5HT`umkLZFX1Z(
zuu7S*u9)@V@p!I^Lp~)hJi++Nh!`=nvhbm3Y#1Pma6ylw?N<3vMXNu773m*BcWbPX
z6R}>le|y@Mw4JSRMOMo}%B0<y>*=W-#1>^_X&HT%4Wp}z)YczVVlBgpUJ<G>PP>nb
zwQ>?~gc<<fc}Hw_@Rzl&6#*sX3xO|mtV4yyj~_Zz#ipO|azpD#t>Cc+kQusA2{Ds&
z#F}B&xf+oeztm1W7}UUQ8ZsRZmcmXJ+Wd;j7x$8TU;o?ZkN>aFe;_-vbgk26V4eS9
zt<z`V>WY_!<{&Gll1Es(K|r@b2c=R|@b|aF=hI{k-sBstBs0PS6VKs^P!u)#K-Sd+
z{o^rdWQW#Rx~n6lfE@5Q+D_fbXE3iM+{V@U{bfF?e}+frefH<mGYkn{!7jOUooZrz
zY8ldlv1_~jqfQqM%L*G+_xL(BLh2aR&u-6kEBJf8#Kn>F<TKJEgF$6`%e)637p#nn
zZg6T=ZK0c*!yy(u#o@%ePo}6wUJ8f~rMD>KF{$!(oJs;$EV}JqJ1(m<6G-&}Zl`Xl
z)KVK)SP?*E+!7<-(5{~HzNOY#C-sS_5A%t9iKz10efo6aw{J}Bd5BRl{|hqJ-Wz^@
zS<yFX_<cU{sl+z#o5aKAy>Cd3@6HMI5ligUFVvUe`1&O<%p6On;$IYA1gT<G?<b7v
zS<wRTRR^{CG%4Hm&9ptp%DIHcH$RGgl%w}cvtsl!HL0msa^@aAmrBK;g#}hs4c&pn
z{U2&xs5l0I-@JKM;YFw_9R6@oKJ-TvXv}-pP~*fnOy}`ps@aS-qEVrlo>^j0^#OP}
zDKe7RN&WMGS(<#zN06o{;+nCe0#wDhTV}dj#=Co@Tll0exz2l`C7vT%zb8~4>n&HL
zl1g;L8>{oG3nQXr6~&RTfxDxFuyBPtXNQ6HI!wQ8GEq#GyA9Y3D)EJ({6$1D5<-gh
zH*W)y{+5q6>Bn$3|5KA%CO!Xz682%qu%fMsL?u#7tx#jA-fGNaanF+AVH8_@WnOX(
zo<kUrg|5PgVO?}Lww*bG#!9gg8<PE!X!x>cNS66fq)?qUF?Ed@a7@F~);Q^&ez+(X
z=eS`JV40qs&GZBYBsZ{6D5l+><kKe@Jdv|0=6R+~BcvWY1$n-??F<(QH7Ps;5mEq|
zpQ8dCkNF%iCVnn)L`VQiM5ECO#KnGt2Z_x)>9f|(BG-e+zsZu}QmGvNPO{tpa|zuX
zF<)OZ)HDQq|5;+s9Zv=?YO7#(J3>svmS3?fPmk~umIaEY*}>WMk?r*eU)SCYJ@nrH
z(7t1*GyamW`&!rc1T}ZB`$706{iV43dUNmciOk}ls6=#y38!vvr5H{tCu{obEUz`8
zDTU-EQh0#*J$aK9wVK8IkGeXKy~|ag?2B5g`}Fk)cP<TZvVDk$x5$7XML~N{-j3Wv
z3KLT%2vc9_lpHH%T*wl6qp72Fww6chvh?lFuwO-_A@R5le{ig{%u?z1!%{~G{0%br
zA<zDrsU5672Co2UGQ_5c#j%X6Cf5Y(56UyzK@R^O?_$>T?FmY*1reHwcDbcDy;AqZ
zy@9zCOhjB6+(0f25J}NM`U+#GcDbbg-iARvCj}B?6!}CC7)c4ei_HB?rM7%pw&b_L
zSR{Lo%*Ef41fD1VA{WL*sQk$;B}n901WS@!u3Tg}P>ycKwXW$3aa@f})jnw_&Kz_T
z2g9_1q^i`R0AGgNU-m8P>S*&FxdJRg9AX!U^=G5g=OwoLm87=2dsL%2?;;&y?hOC^
zD%gZeWO-n@*6I>}<HIo50aW`znc2YrQt(*g0Asox3!N;{Go%`Aji`=q_ex35>mcv-
zrYSt<U{K6(9NKUuF3dFvJna_x9yJ(Bg-J*g!KU;y{AjI=^ht~lxCnnJ4!F}z@mHsj
zqYw00#(;mr$-q@scrL>Dq7|h_*Y4w+{#&k{HdNZEu6cBPadT@wmQdhsH~2r=K*#k<
z#f#zF{X5N@<@-^9gpCd*yafzO;$W^9F0<bb1LK`Lx=dNMHBKbSi-+(8^ZTr!_()Jf
zM=cFnx#%)dY{Oei_<YnXKeu%Sf>r|kCqfMJ6aPz7(N`15a?`uM7!$NJ1}&zO8iBgI
zyUzC8r+|Io7u^)DJdFtOD^7*iI2u<n_pGgIRpov>MFg5e9chM){V8@XvO4%9`OCB2
zm`~cue)K!CWGR!{w=%QuS@z)jN`tUSQt&Mm=PGi|hDe`>qofHFBM?``hBd`d;VPhZ
z*S?3%pjHGeHPPpJ=MuQp2DzAhb@(k%C3uT$jvFYCYev25j@DQIKJ-6cvifg89)+!w
zZuz5E3mUw!_zH`7KW#9nN`fA|pSoN>lHY*4UZ2eul?c{WCiXb9dAU#%L<n!ILQ|pq
zbN|TDeMd+es27v+k*emgi{$MCvG;>$TU8pk?O=kCn|E!sKp~KS(4)oeBm?Y~qc+Ks
zvuJ3futQR@yx6UA6IU4ON=a$@HXEJIC0A{e5Iu7srYs4XIuOGpq%pvdR*=JItCPu~
zoVg1s-MFGOe4TM|jSg`|v4I)i-_Fc(I=U8uVDZi0ldr+KU!%FHo=>Hj3V|cJXeEzn
z-s5@n4vKd!-B~*6Fch3bH3IMCgEW>!b9D{#iO-PPN1#u*d->)$_r*`>x~?>`9nMoJ
zCI7=+je#kiw@FM2#<y|13y+t}+!x~i7-Vq@^g&$dvFdGIiXS8?FV_}x`@`gwoA0SF
zA5%uK)A^>T1J+dt*Mln17>#=D%b@eiWeqW7n@(!i<Ak3*-f(!m5YnLY!G+kERlC#d
zeNK(3nL*f-_~~qMgAdPyq$~SZmZ>X5yL+-$Oz*&vJ*#+-08`pW{}<a|?Aa{iEg?_D
zNhmUII8sQ`)Nv?X<QZ6Brp%btRI=d@JzoG3x(lU#D<hfX^wR^)W+s0{&W7TZ-BjtE
zO-#!bh%GhV$8niDCjHlW`hO%q0byJO8|Y(l$iU<?0177Ad|>?i#F4CRhF!;m=0~i>
zrG&1et{)+%H*9RA<@>Jy3Cxg^*pADWhODgI{AGADleA$h|HJrq+B!Kcjq{E>SbiTQ
zm8dnruE8_@LB>h%Qd){Xm08YeVqaHTzb6(e+5q(^i!C;ybk8a<7*wk$2kMi;duxe1
z8m(xNMXmeRV2P3^uWv(_sNo}DE7EydZedG37K^YFu}<tFlY*6HrIMzYbZDx$taZRn
zu#@TaWHlQ+cB$Wg6jV;6TWP4JVqroAo4s9#Z_5qKnOGos8S9H->sZa(tl{1(iD!o7
zWmci~2ZU1+gZ%~Dnp%&RP>XysT4SRNt7ufe(`?^2k1o;1lC*p8@`OJ>l`szhakyP~
zC`lG#>boJ^%vLd#noz?bd9pEqCFju?RY}7@^3oqR`N~$fNUn~V0T;rLxicpOvrbCb
zSzHTEdg$^3?K%}9XtUOnYIcPWVNaI0qDX1%WL)3}SSfQiO8=cSM|Hmbj~1zyakyf@
zYx-|zO&XisK~@=2{Xr7)NaQf|WZ>g%cu(#0K48`&TqT!9T((#Ix~@eITvHv&NDSq4
z0QFpx4JY#o5NGQO|EPsb4fUbU&S&Gli}jAE|9~xIP8hJgY<!e=xh&exwI5;0vY0Nb
zCW215M5!Ek83*t1R@wJcPLnDO`I3o@2G9)zEGakhNdd3Jn-c1H&eX+2Hf{rkZ?*xU
zQ?T->{M?TG2v|E4ou&|MXUa3_cHKvmXp{`(0V@54OKC9hI+B>xE8S}jSi<ifd~dW4
zl+3$eKk2GTn;Bv;1GE|AQ_l2BbQ@66p$AoHD|MdOzbw@F0@hc~5vwgy?HCsYX*}Yl
z=j#VPQpC)%sb$jru%col>~~c2g4uc5%>p~eehSso_)L<EVOj*)7F96%*)WetHB}5#
zv*{n3g(ArR)!jWG8I2;UzM*;2+f7O~apI$8sb!y_=Zk&(<uy`s%k#=lUB%$dfEa(q
z41BL;7v~!se>qrcj(uo4+kEEW19xSWD9qs|zI#&(XmZi@g|&0n-LoOXXTw(_jT81p
zF2cd5F1hULWvzm8b20P!C%_YRbf(iu*pcUDG^U(p705z(P*AO#sQIz&_z;)<n7VQW
zhTJw)Ua_!Sepd-P^+^zzvj<w~(@Wh%GfCxTHp?$How^U{Z!xNHuekCo@+89u!TQE7
zCLe-_kea6puYN?r4)Xs?0aIY-46UU1pF}>E=KqG&`h~@fw_9MbN0krWWofD$-<P$G
zgRq)ga2T+Z*+MHrgA5+LG#^HI4ootWjrxal@HiUI*$tmwEVXNV9M4gtzSz3Q-FdFA
zZbW9(@$v8Zddbcr+)qrjxID+~SsOaG?~9i*O$?u&;ESy8o=Q<1FYObSpkTBb6(c8~
zHNR;wDR9<{(e*A5N2wq};`)RP8w||HI;SCERJ_ShTzv{qy>mrF4AHox&e;Lkh`TC0
zyh3}ot|BTsHtS{5N=3s6sxfs!MY1hoOee~rDu>U#o&1)_qd7MX*sWMJjR+G9G+9iO
ztVZ*epFW}T>e5a6@7bm_H0U&{rgo7sx_b--A8<uuO$}X}Np7P0?wVfYFS^i#3c2f2
z#E4;1O#0nu+Z~o_6Cgy%<Y&yF=ETP8NY{}`UymJeWQ*DRe}N8lw~X%S+^TuEd`GAN
z|8yKNnfJ5JyjOU^?@Q=F@H|bNQI;_Wq$3z-Ng*cllfv34HWm=e@o`e!yxZf~5Oz5@
zRlnnz*iXV-5fKaDos5S);n}$oXNVv`#O^P+57r>EcvQ(MgB{knkOHwUQW}MLSme}Q
zUN*%zv_AvBD^#Rvos3OBSe^Fz%|#QdAasu-{;i1l>i*lEm9=p;mKBtMCA5a!pwy!a
z_Mz1jO6}E{r0BTDb*-4V+zNE*m8-flH^Tr9*KT;+j-3Osx-|vnq&|o7S?t137cW?n
za2%HB8y)=%cR`R<&h|kXh@5UkM;oOh6>L&~S*`|N%PX)-?UedT!&ZX!6FzZS>xXzY
zx3LW`5i5YfMZ=j(qoVl0Qg!$AG%E>72a#ILhA+&&Y?nvG#_OegP`i3Ui+OQZXmKsK
zUgs_XmQqSq+Yw;iGnx(zqL)@D)JJHbiw9PQV@q!>teZ|KP7eE3qcq53>V|dwkd(mi
z`quQRd3kkpJJax$ez0b(ueg~<0Z@j&`V5!FtO?%*<?YT~Sj@6J#(v?yLXsHTHi}4w
zXg_<^yV#{Z3W8>J6Qe_z70cC>{$4B;nwYV0Y6UNiF2+ip*f^iG8us23@I>O&#Rs;l
zg8q0Uc66OVQ*}{(6#5gJOTxafNNHV@OX1)lDL#>f252mRfjq6k^jk6KwXACJ`k^|)
zt8U2(H(rQ=Lvc3-*TN=s##LUMNsJBs{bf$jfXI1WWFA0>tp+6-tTc=zI^R$h_Dd^m
z_qG2pG@8IPP&Mk~@YEef+dWrQ<9vxgm%+{nZ8G2gHh}p4-2mdE&v8#ITEK)v*0EMe
zfE4Ypn6Q`>9pu)l)JlYvB<7mU2KcW7FvaJq>J8tY((97&>Q(w!+?1|n9Rs+<l{H4$
zH7()xZteOXmy`5hX^KE0mH?D;=w7+FN{l=p+rl5ko5Tehl;RZtx4%L8!r+zVVAE!K
ztyE0Dp-YzloRO^u;e1X*12$JiMFM$WidKzA@(dnMqKCi@^hwwKV$%c4C-HnNiV$e%
z+*5?i)5=(PQ%>fke0qaoHRZ`#Mmr`QhfO}nZo+Mae9?qXkfZ&=1+fMi=udul^-U_X
zUpAgG55Eh!U=o<LQD07;iRwAWA|f$4*<}n7VzNM^Vz&K|=GX*;B)amnx<%&&AE=b$
zeE;aD5k$!%xnhHT-;5LutCLVrgl3J4WagOLeU<w@i64d?F|U$u(+W~(a2Bkj*CUf`
zB#5ctG-@mq2T_03EUO422x?Y%-XW@*iyPum*zH#9g`sL{`V_sH6Ls))aB=4B4CJTD
zO~Ya;ny!iHlb?vLESJHBg&ig3%=PYqF8k^-;jdgqTh<1!(U@1R8tm{&)a;4;KHbC2
zq346)UM<pomTR5drqr?Ayv`BxKi_uLKY~D*#^|ouD3iJ}5w;ScO!-q<Db73Ksm25b
zlI~Hw(~S%61(*<GRFrAP6RdT)1<y99KKUz5<UFf#Rtr3^oI$4D{qvJ%X@zaOR<lkW
zRgHdZ8pu$4r=C4w4SG?8Zb)15gsqj2?Xzi9-WXs!9+EPycVB5-2de4<akQ8gcRPxX
z_}Uzz!8XG-oh8Wq2<r04!3@7LF#85`y<@tBO%LJ1y7GdRoyiUa=z=JY%oxQ1*Y$I-
zS|u|@b<3PZAu#4Xi7iggA+tA}CuFk~^#-WYGDT?cZtOR9e(4vlR~K6gRXOS1V8*=-
zR#WbT^uHdTn;JZRLHwg(dOv>I9E~{2qVU7%a+>Ots#9K%+%Stzya6!(KeFB`D$aG;
z0!;|Pf;+(-8h3}_?(V_e-GjTk2X|?taR}}Z+$C7!?gY5K_Bv<ZJI)>b@b`O-`l@Qy
ztT`)<Gigc@DScpgIJ3_dzbouC=o*)NK~z}|So`$%_WinU4vbaVcA(N_v{al1eUw2(
zsIFB-J15f~B@D_UzS|w!GI3kDiKnSxfe~ps635l`m%PR^{i`0I60OR4CrH0U-_q88
zI;}|(cO%na6^q@1`bFUyjcHm1<6S-7$$2I+;$Wu-=SI7fb>*OcFw&^Sriw(vSEL-*
zORIkaEd8Wjobz-mceNx5L^^_{p8OYyWiWZGJNRc_Ubm35uCP<MEAzFWtkIFKT9vRO
zT+mRh6ko1PQvG6jJpDXcCvT{V(1vN?d-S=9#!uU%cR?%h*Nf1j^0oKm^WP9b9Gb|a
zFJ)zAh`h{M3|!ULzEma#N>q6~W{<~so$r2XyYpBI$D>~5Ar@)}rP(!Yi-}EuoP3D(
zonj*D^2&xp_{6pE{{8BX3n`1(;)$F36@r-O^h=&`n_g0QZ$J$jWrr#C3|xgh2rBhS
ze%fNBtK}EjZ52mrMe@8Dd;c9QBP1RDQybT86f{G9sOjvtcuEX?+?}*gLa;$am!Pt`
zn|W1~n>NFD3UJ+qBWV`183~#tHj*?{NqJS=pf$GJ<QxRt6@_Pk3Y6M=s`HE1Vp#b2
zlZKQ^W-4!-qeL2M#=58)py5kKBV)>e>AyYwP9gL(7Z8{OTfZ})GxT7?*3y@~Nm`a=
ztrS()Wr^1~&5^H{`kpxQ1Vq}MvSs{CXM(5EnlRm2U&i}Jpx#xJ-Hjf^^<kKMrJ-zc
z2Ix!I-ostIx1~FJbgLG$))@&R9f@)O6D#!n27KkTDEk%tBnGLrGHz%~S$neYda-KV
z=YG8UVxt7J1e0^jX2Z8+HSvEAu>Y%2-48foXI%D?9IsVw%)8T^$>OUC&|E98R<8iu
zfxpzYKMR&68*;RK%+FchJ<nZBDfnIr%}Uyw@he%8f6Ccy?C45g?)wM?p~tH7pZwnM
zN`|DCbXj_z^(@(PY2Q~yuE^2E*=YRJ`Og#U^>XYpuY9!JONBrFE33=JV@^_f@nFzK
zuwNQjcCl(RA44W4(Z8#BZwF))C`r_T?5@yb&}dLMYIa#|cZ8NggOevwK3ybOYJ~fN
ziIvx|7VxAQMAPxYr`P44s>^j=-;CvT6q?@Zwu|i0=@id5{15zlfZ!kqCKtN2x9@yJ
z*n0Uyyo<l(8Ja!qAM5Ko*4n!p+mx5T-r<~?oXqBorxdeaY0)gWJG)Q*Y;=UfK7iHb
z<FKA}ZyCy)Dq>#piA-Ucg*<~dkbZN|=i)S_2KN4`0>Zv-yB;v<bh^pVb;xl9-}6-6
z$SH?reB-y^NfDv_q@$OMJ-=qXaJ*DC*j%2DvarKwJ>|l`IIMTHOi1wvjG-larZU_7
z{X7%{GT@Bb*9yh&bQ)E;1QYGL@<SAQw4$>HtfetFYjvf&FgAXKsBPUbC;kBrRJ#kc
zd0PjnSof-_8@#qYd?8ak(rzYtQE>b;D>v=pj8BtX;@6+`Kk@G6XJh+^9Q%NelO<8c
zpFiSyEodL+EgesC+=wTv=ZCN`#bfX4t)Xb6Ec>f)v9q96d=?6F>C6l9WT5R%v@+d%
z*hINmjm3db-*<0MVNPx+&y4@<fj0%5@hnCApN?}jHvED@tlrEhoaSvS^tS7Z0Dy%z
zx`n=S^)sY`DJ5%l1S2v_ll$LRzqBgIQ>xefcQb;E1n%Q`-S0tdKE4y9D?BVP*lRUh
zT{7R@+{f<E8HEv(e|2o#Q5gPRcL}NguH}PPyC`QB(H!dTN)$iHLHcuLR6Qo~W|s}r
z!lt_h&)k0YYro3Jce{W3@@U#OE#CiczwdpcVX^*cJ-_nQln$fe%0Weo&{WNmfmwh}
z_E605))jRA+oU0QY);YXwmZHao)REoWMcu_xLpjnEl_fvDt=XC8&v3?R!qAcSA{wr
zl%~WGF&?pqz$ylv>Wp^(!TV%U<-&L&H1OcNh0@^m^ka?M<~*lI*cu;lW35WTfkZ2@
zT9ZcgIg1m*XTbbK50w?Gm0|!R1|vpNA~gq{RZ7YUG+0TKIg~VU@S84kELV;Y!;q5<
zU{bKAQ*LI-=HFMV8q$f!m`39BT6h*PuPp`aWFDcy7Z^O_KFD^NO~|GWeW6~6{ka>H
z1gC-crO4RZ>1(ONdW;M!kwdC6#@1~?ZY<XFM1tBwCcW)}g+!+NzalGU|0}z}MMdRq
zM10+ME4fb>!zoF)KnV|xiN%Dk5{&CBDEc|bbCa!Ho_Y@;aSm(Xcc#uxSNZ8<KZUcW
z>&RKMW@5d$pL^87$3E<nM#`%F)d%uq$BR9pmzVStn8VOhJ%KjP6}d4vO40e@0N^cL
z6&pd+ppvdf)y52`^^sW6r#a-3iUxyPud|Y+mCY5hA%-|)6tBBG{9@dMpX||}X+b$T
z1aoc-EE4nx@ttSN-H$7Etcd>knAw<&A{z@iAbHzo25=BIWypZHfTOdoLdP;S%zNIe
ztWW2|>&4FfS$5~`dgI@#05B}Ku6bA|1?BGE6nu+;e`ry^Kr!y(=kAkm!KrD8H)ND%
z&*Nw0JYVXDj@^6RCW9E7Q({8rhesmJB2%+wJ+_p*Cc;rl3^@Mx$vUs@WO=@ZkT4?-
zRMFcNteb~@)2_Y5vb=*_MgEWa{nE+`212zsJcSmVMWnqEv@@=eO^1irgg|LmebG4(
zJIgF5lKk3lw@8q*f<i~yI*s3ieI3M*r*)r00okZ#ts74^Xz)kr;jm-5X{|l8HgI(m
ztCM8Kus~-Ps}oI=vD3_4&P@~@4!3uGG1~s#t_FSX3M}^*Dq(<KyF@aGU_i*G*0=qx
z1Fl#(v;_Ny{`y#rXg3C!Al^<dFxMbzR2Y>rF695N<(cpBkZqF#Fq0z(SSl27=E;P<
zcr;5)DCP{FbwD#E0$07OsMMQXv2La|)~eGiEb*+*=OchdZkvr=%SDC^N8y`_P0`Na
zM@Dg25ldR}6`@XkJf<4^k(j92<yD=1x}i;iAuubzWPz0y;=An~MWU1KQ>3DOPbDk$
zYNH-08|dSa-gz~4qUZBS3i8l#$7X=6?4Uygq(kPJt6`@U6<0Rx;a#O{y^eF^Zx)z(
z<iu$Dm8gYe><YS=+j(@Nt2pLmsD^%|^*yab)g*^Oc**bGG^Yl<+u;F!!=L&t-URbs
zW4HGIP!k(1pp^g8XqjA~TU)Re_}v8W{zrf7N!b5FQ)p83!cPd6_;D(0vgUsJ=PB{o
z)#`X1up@V55I1bXxozjFMPQKF{|di2!}BgQJrbSSg_3(PLR&)JiUN(I9u!!GA~4mF
z?}@H$yD;IvfPet3JXaS;+16Mu+$#v5<Dgd8L5|ri@fRv^5pULJr^hCYE+uyh1W7m9
z$*xJPa-wC2QHkfnMu|iv&OiI?#eKXIDKLIpj!s8Zc5TPKE}fQF&kmkf&sWZ@u7$$5
z*(#_knV}99D{G)^<i!b5({=7bXVsZ2x~ReCh$yNghCAM(*4j|JBUGoC`>a!>kN*kj
zjs@L1tOfnlnkwxX@8KeUPVJu_@9OOPe~+q;?OKC$Nj{nA9v>#w3^Ks|OfIynOvch$
zKRNL((&#8!Fx-hs!dU!eQ`a4m+KXpk7F}`H>hgdLn;x%Kqp_D{mCX=lHhpa?j!Bxt
zEyM@@14~S0<3uSJ^RqcZVn8>jY>1rljf*2G4nIp{O}#7<sj>z=XrcP2XDw6Iym9tw
zHQ($5RLf>-|Dr<w+aIhr#xo#)Pw(g`tvvqnN@Paw(-AtbZ_%D4WB@{M532Kh`s{3o
z>s>>svJyFNsoMRsnpC0OWvaL}G)+YD&-oQz^jTU4y19roIqB);74ZwHeUE$a4aCNF
zD6Hs2lbcUSGVOt;KPrg~laeFy!T%PbiK!TnSoeD1a5>}Am7c^a7S+>(tGlMFfDVze
zJZCF=Q1IM-Ru_D!n4T@Md*1b7C&P65tUtmYZI|ZZfL3+<_SmsG{U>+{n1LF(8A4}b
z0_?$IP8&H6rN5MzNvk}TZ7wpO)S9YBJI%AB3W6%em68|zhq(l@>`0pzGycRvsp3fI
z7-dhcK%2CB$hjjfxg?Q=87Y4fk}b0?TWf4~ML@3;-|S6bU|K`^Jq?KJTUa_Pq7Fba
zQ(=tVo*WwR2KGf(q9219)!a6i;KbqOaP&)Ksyi@;tO}zSUxA~`LBzViG~vk5E7jOa
zw1cdFDS)8=5_EpE%Dsf6AJg8NY7$6WZ`6i-;Y;M*R}crVm`y0fQU98v5AsZ$Wu|ZQ
zJHakNtR?zGJr5DZ>F_we97&F6+53p7BDdb=GwwmiFq8G$ZLN0JSha{YsdUI_C-h!_
z=<gA($_D#d>&*-`?+{bWJK0W0lKq{!aYUXMqS2m;DjI6#9@e_=8K<4KRm(j$|LQ?3
z*Evw97V>^l{eJi)K~+3bXl2==d+)((pO3hwvS!W|54muOsk}VghIXEE)gSm#IA+~^
z0J%+&O-HVJE+0tw29-|i6+y>;dTh!<(VSB=CB+i>mGYZhkgd4jtpM?kN5Jb%=Shd)
z@VSRSO6efh8n3$Z(-<1q;sFl*w=sRXY^^}UcL;bFDN#Tj8uqgQ;2#>CD{zigFX4In
zy4AaLiT;)BBPr^!k9tM-VW`J2Q^H6nW%`v!;7G=ct87!2Cq$Y)3h&hibFr=70ASIu
z6PGKe;eaL@^uSnxX_cK;YG=8Ywk)QCP%JJx$IebV)5O36V~lPbHGnI1_p*v<^^Y1~
z+TS+VDpgJ+SKN6Mmn^!<$qAjmPjvXQ_RZUs(5~yTHEm$rV=!FL2U9W4zoKs3k9Xvq
znae`tyhNh)nelkpp;Zh46HR0yN}?7Y|Kldq&G5es0GkXEr(NrLM>T}B`N#?=RZV=J
zzdagcF)^PMIm=0vp|G{3<B*|j_c&iS#Z6Oc(3QSQz8*AAU*j?We}Dg>hcL^I*#B##
zYwpOY<|d0yvo6u)o+nvJHlxzmJQY`Wg9j@*6;aQUCRe1H6ANuc3xQ&1<r~}dOqPX%
z>=kV~6|@K@wOhf|6f{$xcCP99di&%)ESZmFNA;2bxc^t-bV>n9@8QeD(J0==6t?(R
zBA;tvd*6F)nMIvV)$HKmbqXAHQPPaW@KYBmGE9!^pE>Kg4UHYLOCW{6P~Sh3LIM^i
zeTe;KYE{6N2i17pHk>oj!}qbv_nb>`y3GFo{M75wgR`>fbmGzcWZ-l8QShMx%+1eT
zjF{<9Gf-NP0s(eum34-qVS;{=4GcAArW1LE)QoHTJ+M96=;asoys;{*KZYH;UTyF6
ztc6CCFEF0R=uvS!lwac1+@D@))%5OKfkEBxehxoB!-Ma%s)wbRRmkl>*Ftk})K+um
zR#nJnM?gpwsY|%V3{KWk^2K&!^1IkOoDfy62z^Nm;Vc+<v>BtU)q-<qUE2B0(~3j7
z`6@CUrt%D7I0YRBwei}q!=e1^67nH}=Eb$7=VyDt=><Upq3G1+|HymCv_$+bEF(cH
zsxI<*EEHoIn3~RvHa&yD2hfQz#OZ_s_hDDiK~@rhB#HPAojPD)iPCNA_2|+9L^@DQ
zi;_Mm)OeO^Y(?;Q*J!(ugE3VBBOEL_%*Jg~FEWz6h8v9M8S)b+)eit5$jU^la^E$l
z@T!bxURO8ztKBEsP#J}j(YHanc^|s=K^9oeTIZ)@O8ZWy?8Ziyux^rq1oWJ|y5$R4
z6&$b28CaU0_i#%*hyHqInXui^z;+AmnmHYqmam#-a_U+PunOn7v-(|7!oO`paBcoB
zmZlw`l5H$vgTayCNW(&OBt!?=!ybIWK2P&ZZ@lrZHvwa7@9Tyiq8ASEmsdA;Pi+U?
zm)LGI_q_+Z8rfF61~zsf_M~b|3K@zMN;Qq)C_=)(Z(rk!m<4;7OCyHM^mtS=bTYS^
zUU&+y)ngez<VUmHHGQp6y5eFjRq>Q+CJIkGHJKlP_glQebIrZ0op{U7I?*<BHpC+8
zDs<ZNkh?#0c{{?m1y%wR3YY?E1r-QWjVduK4L1~I6B&!c->fg968>W?v}j3I7MCf@
zlQV7ShNZL}SS(9Hu|+pj%=md}*JsQkoz}ye2&=9{+U=!<Xvrk`?q3Rn&3{31OIio(
zbAr(d4EaxF*&7&}V&8_74VjRmZ2N-3Vs5b9040c-yL>GL_LX5@^Hn*FBTVTANK94b
z*uW180$iHXhi`9!ep|a`({5#M6s~hI6Rr5#lnX4dS~bN*+?Xr;^E35)iEC)hQnWba
zkjuN0FsdPIliKd@4xL7av2(kW(KSBw?(y9GyxT)`Er;nc`HDQ;s}M0YXa9CnYF=$y
zvTC;v>SGDgc7Dndq29&j>MT<{pCHSr?gs%yM1_l77snK<HPEVL)n^`wY1pMT9oH`k
zqCrN|mz>08AJzYiKJ>%}yt?%Ka(Lej=)Et3xKD-W3D1L#MH(kXQG}*!m^#0~B{ndw
z``&SgAg;IhnfSK-eINcHzYW=$)eIt&Hm9(N(iR+KZa4w-Xm$&+tzvPI`yT7mfkVyW
zxNNh$XxbcG&+Kklkt?npwRzzcs9q+97A`sQdrd@`8a2mb5`$^SxR=v00S+@Fh6oKr
z5$JG`U*a=1WJV_$ps~k@3#2NYgjR@vV7wT)+e4V}!cy`-k91aRleR#2zD~AqwCF_v
zrN&pzHPv_w3QBgk$8O|3HBM9r#eh2hi8#9@s=OuSqEBA`KkLP?O5TEMqv08y))m|?
zt*o6i5j?gmM<cQ#i&dw@FY<*P_#PLJA^g=DP{9QN)6X(2O4HvD_>~Z+reO<RYu%x|
zaKA<CdOA&xOQcQ?X{bMh7*Lq)A_L@%neGY}@Uo&!9VgZox8~O!d1!Pn;U!u&(4l26
z8kxe>tLlIT$T{l*1skh{M>_Wp4?JD3Up+3?jXeTz)U@(yAnMMc-%p9p9eppb(Toni
zM2mkLncL0Q(Wt5{Ux8O!qf$`Rx6)u+tO}g-p(Zd86fwSr(Zdj-T_8J_A0ri4k+H1U
z7pSz<_$>lI#Mk}u$@l2X^i^J+Ut+l6C3M3wC6i*o%7NX5KXv}gnWEGFHJW$|AD4Xr
zo`QYO>}MvlC}_19Iut>X?zy6kAUIX8grCqim6lA4&MMaKf5e9ws&_Rx8~IGuPPt!q
zFWi37l-~783Q#c`Se0|M`xl6n)r}I<{?4-9zRN&u+lEP)T%Q*69p0Vb%$*JSF|b|d
zMHwEJh}8f{iY>tsd-m>JZSui~7C|h=^_3|*vCI~0TxjT8E;-T38rtel$T=OB`62oX
z?DYc`qe42Q^eURZnE^i}>5FwJ?kiZ-O;qk|#;9oZ{}SiJFy>{%BxSAa?y&k&Y^t`-
z?79|Mk2=X7Bij$n<JBah>Q_uVJ$!wtF>0va0#zdSCUu9u*X!k~I#SX+pz8ce8P}{+
zk`bQZXtL`5G)NCI)|mXgVs0{Pn=@5!U9bU{-+;&0r8L&KQbO=<3)6=A>{c+|o!i#Z
zBYOU2{Kn-vf$qj_-`Pvh(ce2h_m%FH$hNVoW3r{&Yb^CdRs;dDKdaC{@b=II<?63X
z9R|nmI@p1Br06`I;2|$8>T+|5rRHCi2m>N4+-)9H5Zyi&o*CilZYn41y;PcF&YAqB
zhq_;ooeVyg!T#g9z~;+U{0+|A{Z<&Gis{D_M5pKiLtt~uFyH2bzdipu0p9NB*_vyG
zsL#(U0@I{0QhJ{D`wwLV$qooJh|(20?p0*GL{$Fu>Swo;1D0xU6V~Qf*$tJf@};))
z)$Yn<!zcsFT|HWH&7*kn))flKP##@j-tThA_oD(N8tU8LHYP(4#+;za7US%e=>Q73
zfmRJnyXtYM!TI<`J*`^rfvMx&>j#;j$0?g^DSc7h01jY*9dFC$pO9ueFae#GGHZU;
zHE@}|n1bD^D(TQWu(Tan<_vw2C-`4mG3@{Dv)cBg3taNIEVnp*s@WV+7?G-ntoq6`
zJ)6CX5iENGHqgEN{STs$`!JiEA_jgvpAylscV0c)5Euv;H=m;glaZ9#y7A@YZBb?{
z;t?>he2tvcLSQ>!>@P0XfZ|P@i>BVX+ZsC4G$n(c>si{8k9|k+c_*~dPBVW9?73Kb
z$+5|c#X-Z%#5<~NXt32(*HmA#2AL%nqQ(9mm`Ld3c7~S8>UD%cE&g~5{$L`0$s{AH
z{1wVo6jYu#@aKnR`+avuyRVyiNfrB_{D7q#m;24FzrY8c2Z?6(YUn~?RPHJOYhK$y
zD?YbP-CfNBh_EV@bt|)hY3Qw4U+;4|Sk{=PF&?HMbblf1sI#{Xa$I2uKI!4xp9wap
zCDbocrwl=+j<d+~i^7B=MVPb>W!~TgWq$x9%8jzED9BYJsHAg@8q3=+A(c^66X^FO
zp2$VzvT4)b&v$YqAEkuSj^D_(qh`pNcV0&-x;R3(Wag7a$*`T5fha)-0|uI2^f2g<
zS?>{1=(UYWS&?Az^E`U#(=#fV6dsn#AyCu({-ISW^8Pqz17FfrUR7-@Bm)0*bX8a0
z&c(o+t~O!R%3O7y;2;`KETW_?H6?YgDUtgNQ?tP~eMl3EYLamCS-SBv@Wi*1t4wHZ
zUiRJEA^psv+Ra4he8pB_U;|MU{Iny&Ze|l${GMWK3~a%*dpB!C9%G4vuZ|621RAjG
zlFI0GGd9iuyxtag^n~7ZG`TOF6OzX_>Q%e|Zu?+lL)y*w79Dbl7~CrZ=C>*y<ADe?
zMO<^QAyv7s+qu{p&2KwH&pMb@2EAiZ)_F$w_N%n;qO9y(vo&iu=4(}%;*qRm=6wT8
z#lNp>xVB8+4}Z8X7(pozPz9Fi9@rA9qQk#G5(*abpi0wIliuBrBCX6bXeh-Li6W=i
zMSRcyzMkyL86Q8YRineGfO9%wWA4LudU25)<mA>&Z}p4k)~f5!QbX3fF=|v{^2NYE
zkjd`~f1=FClRBhyP@azJhBkS1&Vaod`-@negPI@$&+(i;w8~glb_Xmh|BEe2LW7;g
zujo%?K>a>b1YGR)9BX_s(jnW#ILF8NIsVrj+47rWOl^i*Za>HPm2+e!zr&(|1QKlZ
zbwtBywD0X~5GK<zoC8Dyp%_=%PK09`pAURw$?biB9r=x;LJ6#upXYda{UV)89p)}8
zv;nQXU-x3aDo-=}9mpZ>t!{9A3M|<i%XYQ~-nmrbH@iN@@8PM_`d{kJsm%8anpSic
zArg74JZtwT{rZ+OxIc(XXwFY7;|J)K5YJrd)@?N$29hdydn&y9jrAz%UWfzX%bW!$
z)+lc7U+B=sCtPbIlW1J!p}tO#RnU`E{FsZXyymVe$H@UK6pwvW+*Dn_u<yFAdp0aV
zgGBgo36@-#kOdN9>KN1PmGRyk9XN}8f19n+q<h|!#Hhk7kU5o4P^H-Dp7ly?1a_EI
z4u4TrDw8IUu})E?o?gyejZT(&wt8_2cdI``_@|N(c&{*)Ba<4g*VBqfBvNUrp?kff
z?=NR94)x#d-3sop!KB|T`a_VUX0-Hby>7RYayLF9$GNWvt(F{pZFhqnl^9*ds~X2j
zK0>Y>J4?5}3f+35(u~IPw`QMA8nunXFcQpf<ktpjRdZda+19<XG$$qLl>-)<b}}Cz
z8bUlPIQ6&@`Xfp8OnyJRzs`oKJ3}cnd|Q$bS!?L*{J`Rl635-@6`X*spC0zX0xdo>
z4VAWE>}deSkIV424~&Kbr~fUYZI-%s?|WFjM)9>TM8<sZbH&M}0WgD4kMgByJR7sb
zNoqaSh_p9syO2HP>s_BOAig;-V+Gra0a1x4+hJq5ttHZ??Q;AfbYLv(1G39_!xOo2
z`QhTl3v;2vI{~ml@C;lG40N?OxAM^ydB^+8*l|sy8%?=pGTUF>WD95azH+u?I`F6D
zK?<%AxiRNH$Cyl}P~W4ZZ&*tDaiZrqAO(_|EIWqeF)Pmh$<mOSB{|a<8;zo{luaYX
z1(@-@10E}r{4*P&q{JxWWR*VST1%^rTK@3AI+SLbqt+HO5~t`+8)(np$ibE!9F4g=
zENQnIvZj+X#$t!TiDR)nx9V)68K&2>;iSFaRABPz;YnAWZ|%{p(0`C)hE63GI(c>P
z^}b5F>VId=2et^6Ie?i*T^df*B|CJCpv^ltE_|Li!C)JW6R{41{b!tL$hiWls&ShZ
zjxn23B<YieEG%8xrxHC5%DNRmB~cDJbdkOXdli-fB3Pid5*0R5YRR3Rv96U@A9eMY
zt4DlrnL}g14c;XdOWJN(Xo8HQ-1t+PEpVg9D|hq{l1N16{7)}r9=s*{4M1|LNYjX~
zL;f;|UNP=UUAm<?1&4n`Z%hSXQAcf+22Y|BWmQb@!tid#6bDZ3APws;A<4D@>D3u$
z+jzEOw`??1X10NQ`|p5**j%K#`=_=g+75z5tP=w|E7)|Azh9|oOG302!Z>C7-4D)|
zpKB)~sm89R^GD>?&02HDr^o&8(vBwS4z{vVpRRJ=a_BwkBpP$VtD~gOUH_A4Xj?oI
zLA?KYQ@@&L42nFf0Cf)V!Nc>CN1r^uw9yRI^f-Yi_I*~oeMuW=e}YI8{OoJXNkV^e
zr(?ui(^nVTL->#LgA>WBR3e{_XmmTe0%u{_wO64D?as<phdTLaR8@{;pK%nf?S{+b
zXzv9$;b{Agf?X7dbI3gJ?@`JtEBQ5k5!hj9*Q|D;U_V0UnEjpGm|FO1%}nNy-@ulh
zPjHi!8_*heqaC_l(mZzR@cI+_sXNy`6|h%oH-raEaT$gk$o3*MwEd(!V$_dEjE()G
z?hXVjYir|od}*koqd)~h1wHtW=yI~?c07oFj@VFJH!F(dJJKan5=Fu>a%FtF;VdjY
zSD2M?b1j>FyEptcz{@mp?GfX=KO1BEL{pAQA_v}8E;89(%4h0Icv61ds>}3BT8U#f
z%}V9EUJQ+5GPDn6rT)NADqm|aCv8vNqa1o36PIsmRu6atUO3Qaj7f{LLx0B3I`p4N
z=YQ+W75U5ULRfpXiIi}N5m!S&;BZiV^XLX(1G3uoAO@=H4jW4eD`bZT{qrc|UJ)8m
zl>eJqM!EsZ8Wo3NC?B`x2@2#E5G!=|t*Hrq*8&#?tkAkAwXKqW!)LZ6xTi3_&-4^Z
zq$O?|J1W0asJyhB&m4};NQg9MgD|pvo@2d1&2?+GqA>keN;PwIUYw_Vy6!Xn-mDm*
zzu`T$1kC-Ke^yeTo=@ETH4;*#?ektiUt&n(y+?V-yx?u;{LR=v$W<CQx}GS-ZYSDc
z*I~KIxuDlk2;j!bx{JajXK~GxLQ>KHqnVX#?8e&5lm5a{k`OSCk-u`CZRnud*hpH;
zGpa#EZkl~Udflt&`@;<%j#ODQxpQkKih)~b3DMm_Z}>Rs?&)*%E?cLi_OFo;&N?~@
zbhM@fmyPlgaZyIAuCDer?QcrTIrhgI&zRP0?UjRjz<i`s(SC8X8?RS^B`@Qb<eB${
zs&=g&sYYcJF658+^QtQ<E(~|HZ2~}o*@UoZ!#V>83Y8VX!B3<cdZVw*&4}>*8d5Z(
z2+d}i!6l?xW8QfN_X3pURO6|N{{(<#hi^zqN%EgR@cs0#SlUU(OAH9d7s@ntxEvcA
z+CDpMT0dUOZUd##9BXM*#S1phY2`}-fU!tEH$1?j2vQ6U!v%Z}%ocb_d3<wz+h|*B
z=P?*WVfIgxG%gisoG$UdlY2#SN~odnIUJNGmuVUzN1RujFQz_C!~_?S0}Rnb>pJ1(
zF|3vtSekGnA^b)!#=2Td%|y_V6mIRU6Q`Z*5yHv@=UA9r6oLXl)&A!sGKa2bUj4P1
z8a@&HNPUvfYPg|{NdKs|U3NZhLNY<Rx?@x)CHeuftjzk>b#0wlE0<n#QsAI>4DOxn
z*y*C1%05rKmq5NI$82SP2d`$18Ws@wbfY!0fdB7ec7!zi4}`X_G=t8J-c|Mi{x8r6
z1nUdQB7_Q--lyCZ_HoJBDs&C<axcI|c@hnU>5YkkDEr!YJN(wWr8g73g)lq|SzQ#O
z+8TrI4gXp5Jf74OuC?qIX_rl~u+s*Ab}w6f^G;HvEwB#9rm5Vv<mKL_%<HMV<s<c4
zG10JTaZ1bR>6Hh;KMe!gyK@Y*8{|u##~gM^(r84*P@3YPrJ9*;qHBC=*Uq|*OU)U;
zX^1q#{#bBb{BERRK1p5+!=mJ~Z|VQ7L2e+22C(4Rn5Sa=y9=g*y-FmQtf|HTiQ>-{
zHVRvmN-Ne78%-!@Lj5EgNW}R$|L&szyaJIyeP`=0ML?eaeI|iKG6iu@f|g!235Bt(
z=hjQUMOSyBtVjof7JIwBC_U$bfNtpmD|NPXC1mAEHuN-@6dtd@!>3!X#aX>X2t0#3
zAuzx+mOT904l1$H(2h)lRuCrSOi=me{Ppbq4LJ*FTU{3X)oN&1@D6RltFnuB!8Xil
z@AUkWp0nrOPl-kY7QWSC!1R7mIQ`VL<E#bI%C|S~yo@D)D;s{NIBuhJ9&5!~`&*9G
zTamUrAvQip%4Fq`;W+h&7E!WA;0;GEj$)YF)|+2ThwIDdoZD9Pm2h?zy#U1FF;;Sd
z%}+F9pso3{UQ432a3emDATZmcp^ec8%d^q3{{V-%7|vowz|K#j!8=Q(r<DbA@BdGZ
zbbbHtyS2DxLQXpvcqvYI<MNvG{W!JP*QAb;+p=Gu>v;!sYzlL16Q(sI;{5W&(>O5T
zWgUpghn=4OWX?yGQ&pb=!!f23q$a~<8{Mikmcx2STgPe}9GUtf%byY0^0mx3pI+C#
zE2x>Yt)l+e|DB7iEi{PwaPjhT6OR_g9Rk#Jy2s*@4^+!>REf<|7`K>%*e25!FtzJ6
ziWwVp=#|yQ>@ozJ`&7ij4U!AqsIWrs9Ua179^$-iFH<@^;&}uG!GE25+qLJKwL_8R
zNn9gwofi~43}!g!Vze;(`Nvl=7B;o`%Ex4s_nrqMQy!3Z=kot@t8)y>Tbpy$0eu`J
zstBWZRYk}LK;)rfYP;L3qyr<tnuaFLI*IhgwBMmsCd9g^or>8LYUtn6>TB~5k$u^a
z=!gJ8X<qXM)9xfsPce43CssfIEK<Xv+|0hNoYieKiL1Sceve0+`{OO0EuP1}WF23T
z)Zd(5GV%AV-{e_;dbHi|QVYOV4QhsCvyE1gGV{vYpLTwK8eg*rmRWNsTOP8`kyNyM
zJk@DHaw7Uq4sD?gWo7<Tb3FS-$kTMiEHhT<dT>Z}6}i-jdD_%L#G&?X#RrVigq5N>
zYbvlTa!B*SGICW>Iw@Wp`#Z-;$@dI`mx=K|U=7oM<rU^(&tNT0>oq99fX*U$($#1%
z(#`Upc~Q6~II(H%J!^(VQW1Lb^agRGQo*UuOCtW$3UyjJhV?$%ZCAJQj7-zoTnq}N
zRAyb48FKFR&|D4#_4Z4y8w7G%j7y$n5pE^z#R=~<?0*V`#qd%(a-Wd5+5_TikwS1=
zZ{u>iXd9nmx^m&w)e8XSl=Oh9#42l*!brMKkJd`YMg7syu_VrvS^`Do$y-W14PJp{
zN_UidN96eh%d?DU{Yvj3sv<tYP+n5n1bSA!92cjZLu|UZt@iQLA;u}%ozK0yHKI1@
z78c?zN%hz#PRD0|f~3B-e=Etsm@P#gvKm7n#?*Y4G~4g7*m&honF=^JWH@kJYtM@s
z*OKYGpn`8tXW()f8?@y^T}>I=wL59QFzkPBZ1+0*v#Umb(646wls8$mXL0|M%QRK^
zZSB&_)7F#Qo_>JSR#C%`i`d=g?=$iLgzSIXw;q%P72%n!*fPPZd4kLevl0o)<Q)32
zdyQW;GVw-JwI~Q<fFfSu9Fl_NACrlz$P~&89N2uwakW>AT^>iP8~_<9!YMmUA{8oP
zG8vI*EEl@!(h-SKnbgRqVTljYFb3h;19=L!q{4ph2lqKg-<Kwc*8Im%f9r8oXfx<C
zkq@IVW}KbwSh(bMztV4cpUF><KvlVo^G_v!_U~P$FAuUG!tHbz($qFFcFB?x3(C!0
zJZqS!2r*kmOm&P3C@axTObOBDmsoYVNWxF&CDeKJ&RFqOg+exE(2emUjUwv+^FAPo
zd65H8xi+jLEOFAoZz{vCoTN2m1zCmC5i=PI1OrqLwkp{CISm0f*wiRQ1t($dAJxys
zc$9)@KoejMUk$t5DpWU062MwwbAI{;;^DzrL3y}291ykr?RlMG@@*Z5NK49dWr@b?
zO$$S(UCeBJbKRr3@@^@}&Mg}u(N6cCYN8?_-T;x9k-_<=1}9XpvCrl_pEi$s>C+7@
zsHE6}GX|jqOD4=tZ7y*A;5RLdbDjzSbi|&Zdm{9{f7327B=AqMB7fV*g+GRTCHRf@
z@Q-RTmFZFerZvDG6D;XL@71k^G%(8h*`36hVfo(1UiP!CCWja;VNAX5f)>JKUsx8S
zQPP~%4N?oTwVl`Ku(@Q*-$7BoFl0dD-Q-t1zma5nbmy0IS-c1<s`|7Bj@JB0Iodz2
z$<}-mXjw@EaJ0_OzP-&w0?1St?)UB$)EHS*KPW_)v)qE^QXmrEDxR=PDiPJ#49Ql5
z#wGnR+Xv;?Pz&$H6wO1w|IkSyFjQoHrzK9tL?5r>D26PJ1nwW)Gs^z}l%hYWV~nSA
z7>gW2p_3i6;t`Cf!2^?0WIo9P38;6EPc!<9vVEWDh{0R|PqM8WBcnwf6DK@XRalLg
z#z65IBYW5OstFAaKw^_l;yNZ+APLbJdOVeeMNBLeb*X>ZCI9zY*;<q)ba-oZd(=$?
zV3Z`p7D)jc=wI7%%9K<`NxR0O_jS;nq0;N)Pra&9Xi5PIPSv)z!6<Xy1oe&X)hHdN
zS4Ye4uUtt1oD?bZOn3u-4XRp1zL}A<cC}mm7_^0rzpb<)zNHw*^G&0$jMj)F9@NQ~
zp`7GY-g>5A8~L~M`+uui!4Is4qz)xEvFFU0>SH@y17b-<bmA5@8D+bsW#Y2#(q^+Z
z+Bi8X4pL~U%|^bBKh92(Re*%3-EfrU9+0Bp94V!pnsO3Fj!Qq{ca4m(NjD-oablBF
z+UXVh>u@eClC@PG(yj_>ghgV)uuT~lbX~c=!7*Ey6SuvYeiwJ-`>SO5r8s1-huoJ8
z7fY3Px8i$vO`g@?+nr@cWGA&o7QBNk>i%wXDxVuOOE_qAo6;OBo9x0LFjTCMf>W+@
z$OgXa>DO99x(~Z$zGXAcs-Mo>VAdJz2ueR~F$87BpPA65-S2AGP@;ljQG+Q9XBlUk
z`Hy*umxEhgsrNfU5U|9!ae(hI>jR?^P#86bmp8cURbWb?ZaMc{1Qx(EsI8AY*^oD%
zDI!xoCr4zGmpOSe{ayJ-LS(`ClKt4KRjr!zD?gLvk^7l9R_ks3L9uaonow!x?Sa!0
z0g^5H#|^R(%$3<N=<l+AQJ41|nrYZg?;*Cd&AP%y{e3b4M9(W{g(;=}gT~u!*$qXf
z=u<Am{j@UKp<h*`1F*xa@kdg{-$kvup4d3E{!d5p5}4Df!b{_`ufo}kf7|Fin5Lzo
z6ZW*AX1_gi{XnHI5vAW$dqSW=A!D{Jy)8fseedz$_3|gT+Nxw=WfXoXzg<`+&{XCW
zL+^8vnGaLf*$gXX8v1$@*6OooJtDsS?~a7n6uXjF2Rm}qhsAx<Ox$eyAp2*{Mv=Ui
z%>J7j%Ji~yg7w=|!c&Z`c2&3(a#wh)f?7a*M5dFQQ|Lg52T$mtjOM;O)Jd8aQj#OW
zWMx5?CJiUW=7AV_*!(W&j9Wmz!Jh9P*<*Plm!%?PjIPqpx&B_Yo#(M)0!7xLijh<E
z424D_2L9r}RUh(9nt1t3?*n*S#+F2wlT;}Vgd!6#GLi$i+}*8{nz^e;w2`@3Cp#5C
zqov1pGTm=b^amykBg-gbVMqZ078BOC0KzKUrRG~jm!Qgip7?~hm`{k!yvH&@O}z88
zqivld+Z3=e%C;b~;4@IZm)|K=3IT6%p*ZcvS1*71EKLyO7(=ud&z0X@3=)n-Ts`B;
zRQ5M!()<=d66<mQPSR#dq_rr(<bl(oLu2f}ZT#k!|HUP6`%f)1156(U-&yH2MdhZI
zG-re?*uFrnkw5`ci}+$}$KDNCLwg@!p_)BamS$j-2;j9Cu|2_&1;NsxWCKZp0G$yl
zQuW{xPrk+lTz|gx8Wf=w1hv&#H(IWH+ENjIRpnWkZz`mHamGlD$0=z`Qs(UPJ9~e4
zZe9s1y@yP7Fhn#AO~zPcS0oTBu~3LMOYYfnmPiR;ITtq}We$5<4|a|nsiRTf_e@qx
zK|>qQS|R4A#Ob0d*e1(ezH!qmARzszxUja0Kra-tnVlF#JTW8eX|B@*b&!*So06Vt
zahyY4;Lj_uf8ZSCSgWk(J^Amt(BIFC^8FJF8Fa%`lqvvgQ<+B{%Z?8RGUKv>>C%`|
zlr&aSkz)?p^lLiBVxx-vcbaBKuEso%Bhs}`M;M7d<F<WL`V`V+Gf+{gv;}gRO*$tH
zoNLo&q99bh$uHcLougj)V3S-?Kf$^w9OEio-r{26(RFtcMCp0I2fGT!V-H79y(b!b
zLs%(@Qn`^JNJyPm%e7f+&&SN~P=+Btd{c%I1Z1wV6%_rS$+-W2Cu0*h8&aeJdlwf5
zo;^zvXR}Rvn+P^tj9n0WQ^lvmSVlF)V-c-}Su1XpmV*jiMJIQ+%!Ni{Lj5`j@^87O
zj?F3SCV5dS_d`eBIoR8b4JnmJgVo~(qy_I6W!l9|`dBz}N8utntum-WHi#s>lp&=P
zWSc`a>MT00;fQ2<cfDggI(Wsil%MJB`uY{+Jb!N&^X8;dbprFk=6thj+Ld<@?S{UH
zl3wCDn5Nw?=O#VsHwRLLvL=VLvs6M_JqZUbfAf5P#BWAEg>08BxADz4Mm|0ls^+rF
zUoW;`cY6~T&gWkR)t)9+htC5I@?OoI#vTsw>4KN^`L8qatXugv5HW(3o@?U2`73Mr
z=*9gi6Vv4OF%<}@9(zp%YHBw96j<Z#V_$HOZ;0TzS0+4UHl=fx9pl2sl_}{e#o<A$
zt?N}GWa2KT@Qz0fpUFeh${8XGO$XH&FcglaWoaC!;^txGLdt)nookfwxQ_A6cMt#$
zq9k%Z>!1qfjx&Dl0Y;+$=+i8s+-!RHALz9phQ!$eMk_j64S~q%csD!d^@4WUb`H$J
z%^t=70!y3B==1;S`)8T^;kE?D(#40*zD5a)HIsUp7d5RUl(#eFe#K86?)VBO045+e
zk5pgF(M{*xeeK0a4itw9Bv4eH*F6fh17VgI)Txc-Z}#c>a~3z<i=_tsvQOEL4RpWR
zdVAQ<O){$F=`7Rac>$mjB9l_uM3qKDPInt!>W4Sqh)^!_F*g(YgDxfZ39b8{g4rOB
z51eGQHypCMvnvO#h;Edjh~c%L(9Gv(tHO+=jee&z(?kfgNk-`JUHeKmT5F$iKo&F1
z+433lM2249W=usdDiB{%IY(PeIrQW`FV6|{ybrYF>?bk_Pti<<&nNNKeE2AgflE-W
z1R-Ll9!43-6F13bp(Lpx+k*TwgCfQdKSv;2)x8BzUA}E<@Vz>nhAprk?+h&YqP?SL
zUTwxEgcNlDQ6|ktuE<s(MDV9<7{(?som-S@IB$IDj|Za8>6;nKP_3Mx_ESbU!t(xA
z=iGb0YC+u`byJcpQK`jyw*uWQoRhj+mzvkzmWUJPoA$e?C$khml6FH=G`xR1#7><5
z0vl>DI$Z2x$N|!ZzB3pd21W~io_N^HQE}IhwO88<m|S9$E6ufoTqt1j+Psn>?xYc9
z6{w`L6klBJzBz>?U=1el0xCPLvZ^$G-FdagqqR2}J#mE&U^BUBe4lhqx86Deh$1qR
z1vOSTQITs#nMSfY=*DEYh%QLh4UHZkX&cmV`*}{(fbCV6uvo<YVdZ&ZUZgNWuYyDu
zK5cx<I0F*WN|>*qGSHZ6#~zvXwZmtxCi0I+%b-yQS=G^`_b11gZ=kiyW|{RvmZSIX
zU^xp3eRVCNWB~q7+-(}zxmfj^*rk78e@pjZEVvd!Gz8AV<3icv$Fzne%(x}MrSHj%
z3t#|@C}YK>4`S1-Z3!tMXyEu=`<As1DjYMnZ&dWcww1CxkKKgWp#PN0q{f~h{b8LU
zb`Gw6QHMQ(c-W@0E`1CPJtc_vC{nSFwou*88(wFf<Du3)mD@B3MX!K%MH#5|a%!nE
z5iOo*R0c~%pVgl?V5MDsMEK#=ZHbtOHl&lkkbcpZ@9gvilChwC8^Zqy6ZL<?<jcfq
z2m686R?}pUnG|$d&8datQa)SC<*Z~AR<BAP5h!Lp$1}e0m9&8?n>pJPBJPE4of8&9
zrab%#>*0BD&(>rle|8+}w&V<JrLZWU(3B9x%4W^z(0`c`n`HUAO@_#|BOxLeos=%E
zHjn-{MFsdnL@qN#97h^kdXRZtbM1Cu9NJh7VtGnnU<eGT?;kC4kxQXKOO#_|QcxIw
zsjFaT!bH?K<}a2ZPA~5Fb@b0i{q$k(2)e_Tt?MYDrYkIsVUQ|?amDwP#(im_vgAZT
zn?Zws=(N7ErN6lLa>sFpb(B_jxvHo1H-~JW@#Gn}mPKfp6ol0JwQ(Te#_)~sAo;5-
z%e2q^Qd;q>yl!6vsCuidr4bq>Rq7z8t(hhFjVNWTNbrxB2~*1-@3WpQP?avg+k1sd
z?nTxufr9HM3!fuN@G7Gl#a#=VmM<eLlwj1Dg;u=XUjCQm%XA#JF0j-nd!BAeo!fBt
z1OUtZwEmH9?`=sgQ&h0*maW{Yr+GPgp-tEiYa#H5=#l(}GdG;T=+v5Bmoek}>rwZ=
zB`W~;zuU%Da0l47?DO&j<2~2m;*#|*leAu8Ne1*y3D1+Wj7;Wo_=A+(qZ|wcN8|N&
z|AC|Z&nL@UGrD-v6BGBbTs^(f7e1M8jQ~?IX<!03ZHU|#9?o~WMJ+#DVxKjKV+#`>
z)=fI6_5xu)4u#bQmwT6%S6gd2?Z9RR-6rHDP5*chd=5YVpUWY%VU+0~&=sAAn43?F
z0#tWRG@h9F^uarsSiLo3%Wt9bQY^_&(r1mK-XsdTd;%1g=6#aVxv8v@w(#xW>2YjC
zmVgGeD;wD?+0*4GU%}2=iQK*Pzx8s8$R^yW)LQPPz9A5nTU;IH1YCK%^@OmMC!y(|
zb4^N8{LJdFqqq+rf0m~Y;k6Sxou#&(irRNCLXA3b5ezXz0=u8=Id~a`w|vb3gT#9{
z2`gqbhkhrw$S*6sw^%ylZgi$1gL~={Rx|{;F4!g_)0E1M;CrE?f6vTnXJh0<>)8X^
zu*<K@Zyqa$UcO%Nw-ydmrkqHSP{gQN2U5W5-r=9%-uG4%P8BgOfip~#YHR0bV+8)K
zPI>=Zo%+gikP9TFo!z33sP5cxuTHRr1C#CSn_5049THpXJ{)B^q&++w913t}OevM+
z=rc+`vP$B@qEq3H--rW$i5#wy!Y{ov=ojMp)8R_X{752I5lM<__P{7kvC&I{H@9Ej
zG-#W+BVB2z&bpSf6$ZgrP;v~yZQ__kZR)n?@&peK3?5NRayb-sYdYWfFyaK7uc#<m
zg#NV``O%=0-qZZ2^}sr@GoA%PJG>yIbuZ7ql@}7fVz6C+@#icLeeHBis{UJQ*>n;Y
zjd~~|yT(3Yf#JKaBs;&UtoY}06<Lb&ByE6Vg74h-m7)8Rt*7@mH!Sy%Irn!0vouYj
zWu9dm4TH4ohm_WW4$#(l=lQmj?UVy3>>*HSe}_(E)ri!zV;6MlaxQ*m62#do@cIXH
zWdL?Y5?(vtgPAI9quHJOe!FYvZ=`6R{lEJwhMY1bwkvk9bmv3f>xQZtDz2&lo8uFW
zRKhCA7Bc8o)n*%fP%_NPRRqU$mDVErCwjTNGEw!GwezGMA1P2!O<2%6(%^pnOd_sQ
z2U49Jxcmo@`u{>3o?sXt4Hn(*J%wpEm`S>pSxdVGtki<xQ-ey3tCyrrt5RMXSF}Nx
z*Y2OYJMN+}6NR+koEm`%l?)BGG4Q*q@_i)V5})JH=8+@Zj&^G;*Hbx7YWq;ybFxM8
z_x@U2nV8|OxMOSDckV)MeBD3+Z|aV&@4zx8T``lU%(S&vA$wF-A!4C$8J_mn(G~pM
zQH*^vlye>UsfZ34r8ZF|5%aV0F-a)@u#H(*y&+tE?0q^sfI_w$z~WXe52NemK-t{t
z)t+J6Ty4Aild6;--?}7kaP?cWkda57u?k;j5siOjhKL6K=nD~A_w$Y6pK}|OKi04<
z-F=Zs)x{T3<-;muNpv!wjM#kIOE@2*x~v3+7^9E+$zb7DABxg==G8_lljaR8_82wE
z?}qKU!*oS6`sq31834B-{%l(S-6cihS+!aVWk*z10JN70{o%^lx5AgblO)_~QB&8I
zgEzl%GO@Z35AMbwkZR+6WZE|Km+$&NwcVRuK{CkZV+hsTox^<bE>SmFwUw7s`%jwJ
zzEx2L74xm7Z#V4!L87`b{#&;?Z**(N)zsQ^sPIP-rp*_pK$!D2n0kb!i%6u~YX|C-
zioXT!=F!qZ#>Imxib9^rbY*b%U%0j&sfb?=%UE<jNkjzNwKySvOp1+Kr7JO0Y@+tq
zdZWgp#cStC=AD?w4oYH3h{T*TfegeFF`4!1Z8lU@>SUq;77jJ+Avei%wL&hxtlLKJ
zw;3J%8@4Z384Vh(BRqrl<Cer6T+Ed$xD*UW-T02L-6K)krPS{NxpjF#tRu<hR!WhR
z=+^MkXz5TsJ{(I9MD+o!!>&5X?Z_uzDe$BPA__J|5mHKZ8K-L>514#|?}f)D1)Z{)
z-jbPwBeY7mYc4}X3V+Ut$t8}2$XKh)NU39{+NQ@*h#VcrtycZ?%5OqhY2dHRQiDTp
zMBf~yaY)>57*G;v@^10a&N;049Sq%$#oWYkoHcq?%61F<UFlp~)6*>ugXMbpVeKI2
zF1AP@bcjDe$tio~+_|s1`08GrTbzw}#N9woT6BBvz<W}|R+8#5C>84?IcMN<0L}As
z5fq>eVc4g$>e4MQ*v($;|6)(dUM*<2HHaoUwW}jgIzr(q?;9AZ_~%!RP#REH-qA;C
z&!WJsEXo`Y0wMk)c~nv3ai%srREA?B8VhO*IrF>>C>AQ_@FVvRvn21>l66R?90BRf
zdgm)4g#%P7fC3rX=`AzXfuCacl|@ve2PP+AOtV8X!da7&a^=v@y<a6eQn8U^>RK|c
z`r$=nO$@Z+YIMw!fXey`Vt^J*k`QwlYko$<Bzsu3w>!{mbxi?{CbuXKNgA7tz6teH
zqYO81mdn}dM~Hc3>ng@tSgRh;ezE)}0*)ofx*NuL?sv(r{ih%YzbB%TMvggk)3Uam
z({sNa>p+}I?XJ<VK^WmLc-REB%fW2Iw{Xo>hZMhcL@3Z1z8CKK9^@&~8i&P?K`TZ$
z?vpD!djQg=eWJW-K^NB?AZydHJ_z*z_w1~5mKxI`mL4U<@gxUaRG$5vaZ*w<rSmsw
z_Ge_ekNQ#-0D!Q1Fj*xFH6>1x{h&rCY4Pi+i&t!ojZuc1*@;Vf6eur-hO)S#)4-6}
z#B5O$_Qk19f?Nx(3ER>%^(1l#@85w+@xM6lqqRy(Zg&goRtB{!zRGHHSDs>8osVwG
zN~z}jvtwEfGr@?vl<d%g@nB_aaVw>^9mI6;IQfG@3g1QN<cpz~5Sfm13UrHE?0ToD
zIbP6rX&4rdM#TnYEhH700UBs|%b4<GbAP2#b+%136Vq}5+M7t)MD=;p3Ox=QN0?9A
zN0F7>W^H<N9D#R(2$^0vn%L2CqyF+Z!mknxn3Lv;gkl|=&TT+L^1Ww5miF55qxxHH
z`N>NC<AhrAovf}l@aiOwq$KQwJ2+yGXW;%gYJO0zdZEa;7<K0QfF}~P+Z#-0@X~kE
zG)y=x<Pz-YJ{Sy(!~fci&;NL!U%2-)NT!%PU{;teD=(YmaI`^B-L7E<rM6f#{aYJq
zw5!1HXTpc97;Wc?ibKb0)nZlhLAlkUAC$Bl4Is)LoIVMm-Huebb!x<9_{Ip~Rju}y
zUBp*1VHTIN>rSI{?y6(2zpkSH4_9v))`qrqZSPWAthf~`7NEGh754(cf?IKSmm)=r
zySuwP#frPThTtjg@MiCG&VIh@`<<Uj)|zXKG4Fx=s3-*s58U(c%mbrT<GS7#dQ<+o
z$<EkdVtPhHwX`R_P;08*tREBtnh3d$PGig_%~(;X@f<fUH+@iTyQcWhSg0Q!n-ldB
zc9~)SKH2&lKaIoAD~|~T6s~bvvf?@>HCZ=!+{}in&&N+I*niM|8*}fY8gw}jUi8!X
zP2P2ERWLkyBa6f#TMrOWB)<ACC^Xz`o$F+rTC;Ek*mdD*k-91GPhZ-o%`{HVLaXil
zC_Ig|5w6x8I<!;ovZNLstSWiQ<rK(DC5k-YNdL64>PhfhbTQ&Rdq~M{rrX&?F4U8t
zxVJCfi7Ck{fKgK?^IINl`loUTVY?mv!IF3iT|k}iT7gkSDv(2Lf4b1qvAaT|`ifn7
zm%4-9Stv@aG&3sL>^PE1eo>Vso4dcG3K6SJ=e*s7!JGBlMlZhhT~3}|&b<Uha6=tk
z7by6#@!Td5t&}*i$VngP6wmO0W(mIYaU<GYS?-2sbrcn*6^t51pA#Cuz<OgOv#2m$
z)52`^Q3{z8n?q5guDHROV$$?O90~8N&W4Y*A470IFyeB6$ZsWh6rOKx%lN(^iBrfZ
zUo<0De4F>tJ{*(v$?C(y)L9uT){=R_U1m{T)e*X9ULAANejQL3c4%VKhJM)u#1{&A
z(`%fC-i*{*cR$$+YSYzEJ-EZ&{Ozbj-n~Zu?+cXd^irA|ilI@8H>6GA9fzmgXUR;M
z)xJb^Q#5@{Uj?aZ5Lp5}Imc_{5&cKYsce$j$eFkyMD$gtmsO?iA^B9UuF)-8OsNWV
zbG%6r^f#`isRF{1DPHfGHw_@CBjFh~EhIi{wVLV)h;J~X-7ur+)yFElJ*vM1YbKjH
zs@TzvKYk3rED!BNz0B(*{7h9b<T1W2f=N9q*!Ab$ugA>qPHI^&<=jwzJA+2^TWibC
z-c`Z?_#JK5zE<qWwvUX-yuz=E`MN?I{59_f&8jF;GVbW5Oj@tSV&W5lU>=wSc0h=y
z0r9B)F8>f>E6<vt)l{AowTzdqTj_Co-QIE2+B{u8bP4U2c<wNE@tqhhv~NT%DNm&f
zqW%U}qXhXUymbp7<#3`*mMJ!B$;+erts0byEl)`oAom01ja}H+0(e$}ePUeEMB-2O
zrm8?3VU)hS&>KUEJTW&$u$F8o;5E%CEzzFFS32bro?1c7+66Xa1T|5kix`CoB0F&8
z6`{RWMMSNh%}PskzGD*$d*|ZGLKaD<O898gMa&juc8%~MX`A?_+bn`-)HoNo@7cJ7
z+P%3H*o?2*RBEaI^mmZJsNXH>f8Xeja=O1hgb;~>yuYdxY7mdw>(>cLr2y3hB5d$?
zFe(y|`B_HyxCTN8e&tII1_7KaV=LARN~xC%-fEL#;IIM~O+z79cL9g9QYDDTMTS5{
z5Ho)8)J;%A9jSy%ot<3RX=O6_@mp&zJS$Kdkd133B9~Xahw&*wNhBT1G#4Yw`rIWA
zk$k6|<I2qzXLsYwsw6OTIA{)O5S>{`O%&1N71vwM!TlLSd3&E-G53gFQ&w`3E2s(<
zo)<+-p`vyf|Hek%Wc9K{`YNWHh#@LPq*awhJ=yZ3MkYA!JL@idKUGGH4md7y%7sBg
z?ySQzTSNF3j~{sX-K@&6o)cmxoOg|@OcLP|7-r1nQX(F(PcSQ!IT?^TRY{qPe(n;W
zIqb}|3gkC5qt;oCIPuKY{jA`uibP#jF$OBOW=0#hIM%Iweosa)3t))HkO-^N4;@NX
zq7{c5&?^s5Ul_BDhBI6TOW<-+EK~V|-@Q`{apPNZ9uGIs(s4x|qP9O~ZFlLmo4NhV
zs<hC7hjk8#f{6w!%Q{y!HQ?WfNw3?8Lx15K&?I!WGQNo7vtmD~YooNxJ>t-c<8pkw
zecz40TDq3s<My`)x<T|mg`cpHPGTUjD2bT<+d$e9cErZWv20vrjm$(M<M7B22sOhy
z;Ob1(LWaIBrtx9ni5c`WoAx~))n`39RrpNxlan(?R|nWi`ZvBzhykU8Hd5KM-~%;B
zsT<$^DTAT3py6rrthH6q93pwVg4zHsec56M3GXjzOby=oMUL1!48U}2D0=M3{q}$a
z?KR0>89Kt_;*G~?xgGm?djXEjLdco85z8-1*8059<l1bQ9n~deY0FU`Amo!NvPgPb
z^?L7M>BB+VJu$-id9Dya@FMQ;+XPyFO{ouI5mJzj`+4rx+apTblTR15knG)~yC4KR
zp<f*|3iMSgQFG!QgqNDVi(yLpp@QtLW&PK;(CspDkYp4oLkrI_;xIUk0Ew8G$9B_o
zjfai89byn2BZa#j_*T7KtRZtpM_eof1w%D#po%fioPuUP&5Vg`Vs--7P^8I?>dR&P
zdT2$w;hA<kDKLKa5}9fB*q>M<;1PNmaEa8c&AwVHGdwNX!qd5Tw;{$9#jL5%0E?EL
zwox@O))wrPdBpAa>q3vc6GOg)e#f9RHKYg8miHHV6oJO`=~Nqm8fY|{Vo@^=j(9%^
zra3?@+(P<93A1_<>m{W=GfvD!zK?d(r&EQeny04-c6tW8-gg?$=rezx(ND+0|NTQl
z5t>dS3h&^Zs+P=Y!`hxPAd*&xHb3heh)hFlNb7Lkmi+ni^0o~z7sbQ2UZX(v!nRK6
zno|(ZB>#vd^3Xa&D*CI((P6kH+xGM@5(58h#SfWmx(*XHVo*7nFe#j0JSw}l519%Z
zBI|VU7c~$0`giJTPJZU(^*#JX@}NjwdhRcf1iH`Dk8Gpn%L{Zs>(+3m-fw!B6GU#c
zO{N~pTULs39Uo(KsuUd5)VP?*3w@<BN5ij8A@5h|Sg}Hck&Z>1Zf~{)e0skk9VrJ+
zap`30o!^v0|NPcC!<c|I-knJZT|A94OjNl;h}>dEL<9>XzSo#zu+zDwEate#fw&8P
zvuMQjrgd6*jdds~5OtI<Ws=x0>N+}ZC_$_4?*`qLvNx%i&P0=?@!_!kcwuMQ#bOt>
zu_R^IoFz0O(pPK#Y!0Guhb%3p?^HoqJY2Jv*+Yh+qyku1Q`whODQ3eb7DJs{J{~YZ
za}nfDN(vPMGzJs}4X^(KD4nkNcR#&xzVIOvD({SAj|u4`;RkAC6@;bCdTYL;bY?Vz
z;AgD0PiAs=4FooSvUl<Ry6WscoMZ|e;mjGCfOj<;@irT|_v~E;zbjo03PA1Af6h2q
z82db<U2DisQqlZLxXOEEKl$QL@sh^G3zRr+^%LnI@$Ec|^*@foosYjJRGwj;m`h2#
zjxZlw#Fd|od1~iS_#`zpqT+}f&Ks(`b@ddPLM96+Y)Rxv<6?I*@kI~9VWf`+P7nM#
z+u$?`C(afyvV+3Bp3U7A`f3ay|5(t?r_C4Y)g?5+^|+WHgvphA2EXDkjnBYW185B2
z0_a?VFH@z1V$4_2=yg+I3iej1=&!`(UOli{qERWXJjLeivf>5z%j|2o{?qFd8?(<J
zs;tK;r#1I|!bF}iEb=A^-qS>OF1-CNg!(DB`k*9ULpe;&P$rQX0kMb}`B3uO4r`vy
z+n8p3zjXfU_29(an{guVz0_Omtgkwmx{kN&UOd(2;LxHe*HC~Zb?AEm%tUeI_w;W{
zrwe$3n>2SJ?FSu3xBKK$?d>{`OosCu1@6(;{QA^F9f9qZn9YIZR^C)g9Lj1V&fGKO
zk-GZzS~Lp~&e24g0`JO0oc&1!o~J#8rN=G$gAI|UsAij{+VHoPUj$uRdaKT2l8RN=
zNqToaii(T<@)$_t#V(C)p{_WiT!}f-2z4MW8RgV*bbxiS#;Hof^#}EHO$H#}g*hIl
zie-76(F(o1%^r5DFXyDW{pxWHJKG*t7C~!%(jX>}vmyu>oz(GCOsZwwM}3gn<I!%d
zFiaCn9e29l>4#5w9o~ThEEV0H)Jk)FkI<YLoQHZnXSm_1q#lA`J-6t~)b5Qe757gX
z9g6usZT18EDj*upM_P`~t*i5|*;_x+Rz+%ro-4KZR8S!~F`}F_$A*xv^enY#yp;$r
z{K{?PdA$B(Yg|FIYLe>O{e)01%3M8f`y&Feu$H~&6JoWf{D7}jE46XD^-OVl98#9_
z$Yl~(5P{YG*e{GY07s=iZnDX2TznqZ4xEyP)%FD&mjP2H59y5sA05QOWh|D1imXg=
z(jm=EFxz(%Gen9X_Lu*2J=r;;;d@M^$`9+jaZWeO>K>n-4H>&JQ%z*PjNUUY%rPyP
z3xOM`W3IwL9bd2NF<(HbnqX@2drd%qZa_BDZ;=MA_7fqg#o6F?{RgIo8;*z{u!LwB
z`BuOE*N|}hpO_rj1sH^u2%==v=(?$fYHtMiv{}7uL$tF!!X6gLbG0$Y*~^PhPREoV
z;T@h4ETEm^Fob5Y*Pp-I`F`{Dcn43=ePinz-&bWm+b!8TMsfd3fvrf9)bZ3yXk>zZ
z(s2F*Ond}WGUynM*V*&F;TCy<zYJ;VMBhOZG>R?jq|jL(;J#Jf+;)+`7i9W){S6L&
zX28n45y6+#B`O6TK#mtX1+HN(l^&*~U2SKS2z^sQRTO3LMQ||1!3Vv{?}%3D=KO72
zy<P3yy5Gpp!_}73n6GXz&i&g@l-_%Ri+8;mieYbJS_lX>fE*Hgtn;c8??b-OIaT}E
z&tTw5roP#HII#sb9SVb~2(;d(J3tTq8-V+Ff8k^4+DTg_(Lz>==}4-bb73|<iLE)T
zttjCSJ`K~P9v}HYMUs$!5H&gA7*UtuGH0NUE)~9zO-wesNlSY7?1h|X5Ij%d0$yHk
z|2@BqG)-|_WTM`F0ivz1@yKS2bp>}m2#y;nQpHh2RqTdIdcVP={FvAD!{tC&ZLyxN
z=r>LRaWWnkF^<^eV4G|8XoN4utyn#ukFUn3>+t!Wpg;fhdH{VfK9Ws^aRSvnuDLh1
zoL=`XQ&2)xiukerj@F-kVp+~)S+D-%6p|L8`Y|>oN1MAMNkb<~zmBIXPcWk2H8>)x
zZ#lz?-^<(n)?|Nj!d^u&oJ3<3_jDP#K2%FWchP-Z-JEJFII19knzHE)jf#=ELp`6i
zL4020j-`R)aER|yzQA?us%m??vo2t#WWPPMd`L|gDf9DXa(8-Wa#zH4zMI3KT^@NZ
zKc@_F(_h?|Eae#I#AAf6M3W~2X4?mZrm~<<*y)Qh5h$u;lG3bOos%MBSS*_4^JzK7
z(v5)&?dF_=q@8dbq_LsXz(RUJ;xc54Vx3Ti1jr8zZ#jdXmoxGqvL?+HF%EQ+EUPv=
z!<a_SrSqv~EC@AE7E5HJ%xxWISTi((BAZqB%)^NWTZ(p299}*Rq5J-@gy3~w^H<MQ
zjU-y9w^7HAyV#_WAeWL7aneVKnq+x?RwCiJG2Aw-X<!#97^?n|-I(=@P8)OR|M#$m
zzO8_#w6L2cnM>HTx*uiI5tHCfq@&|uM5;A8V>4XyvhVdIIam>Ap-i+)@#SFCB%O$>
zNuYH&ig#5RoH*m8=$*euQY%!wMw|Z@V(J1DI&R%JQ6`h+3*{@gwJp=>FjlX^&}b`?
z!dmI^_D=~G2Fn~}s|!^`GL7<1Cd?Xaa;zwI$I3MN54;O(v@EXyE`cU3FxU_vre8|v
zS?Fc8dwsex>41Z6ec9~wz(da)jH`-;g=Mp9kXxuN(N<2M)?P@|<ZNCh3KpQtYP~%<
z;^b+GIti2z1MG;G93l7Bf|zAS(d0-lFfZjzo#q6L$S2>&E16-ui};QcfqbI1rXE*S
zZE8;PN9sEihGhKc$3a!}?zGdZAjV2|hGNSUn|jf$Momgf!nrx%;r;CD;`Rfhwa**p
zR?lE8cPeae-QDaF6au808=S@<S4hF=rH)q+rDbqm&4l!Z@zHrw4hoM(mGaM&#?W*V
zl$z{3F7kKLvVo-(jlDQzm@3JAKUa*C&^P*ZRgRV)r<U0Z5@rjjs#1dxKw7ctB*Ujz
z<yIxxv05VSP6anD$i3_OMLt((UxQ)mAlG|*Rhm2PRt*~HUwnh1k3FQjhqrzKemCsh
zqhO1OU>yrU6L~5)ZX=GH>a|FJ=@6l`+h)rdWKWk3&W$p)dU7_8JY~E!j)eX_hIag)
zXZs;Myrs1IBMvZe%|sIiA9vi+g1i`xr<ezdG0Zim!)Ww*WZeyl8INfU2xeNPcx=$U
zkT=1iltIV?IK}W~sKlwCjNS4I3rne4sLQDeq_Cxq@%TEAJ?gvbpM5^HtGCfs7zt~n
zE`*cxG?9Jr@{UZupZ^B5?x}0c23u9TR}<^5nyou$Wa`F>HobS7nQ|X^<kBw6%-O)s
zQ@-tA`%t<a;=A*B|I_b<2!ul4N~R@ci9g34jV+~%G>5B4U!XA+U^qRp8JhqnDu+zO
z#+YR0#z|{~4Z<&XnjsWc&X6xDL!x_cBUPjJb(m_gMmqU)k+wiUOj|8A$18fvK`mH4
zEU?&0=-$jX0pziB&d*^(p=**}GlvaC&DG3b$LeO6ViENUF3*-M49JAk4EvaCgn~V@
z0m#=lX}Aj&G2f(Rc|B5Su>>Yg4Q6ujlu=Vm2j-Kzl(0ki(A7|ZiQXll4;e|;(=6OQ
zlhI@xB>;v7=}-Vo%b2wxG;r*RY5ksPNnplBjsfY2ddpSj+U3K*6YcplzM=2CS@fMU
zq;fFpY#7;~UF^ge#)i*NSH_c*wcnLY4vl?41+B^$Tcl{;h-Cdcz~n2x@UY+GY<J&z
z{@)zVQj3UZLHegwFAXlI0vtFk>t8;18r4x~LCz6B(Jo8-AO`dt?(M}Ee?Leiy$AoR
z);<Y+Y5Erb?Hk-|Ia}v?4ULTnPhFY!*VM#{6uJRA?S1NMJUJ#Nyaok|!l;jMVF}o#
zazl|aRVCAUjDc}!>F!4hkBIO|<L~O1Bg-*B8JX<s%OV++r8PR$De6%Qk~7pDVemZb
z4SpYzwEJwXb4PBh%dQSPJ%szgOq0Z}1)*=1O`6MET%JQs=WDep!-fywT1*v=aiPHt
zQ6>InvL%AEgc<A%?_gaEzNFnR_1#(VZYqGPWM_IstppH2-xPHi%6wod)KXHSM%(*A
zYsR-U#vG<W9IN-w8AZvvOHAePpx2HfmzUzGX*g=x5{28$ZO?(9Z#X9iCU`rlON&Nc
z*rO(&mPrj8^`-^9(?9++O~s%dyM^SSv6tNJiys?g+E0k-7@d8nr*-fNAHs6@l*Uxv
zBxJIWqs>7``&-9494;2eY7TtN63msYPpm9B0l`wm?a@IkYZ->0MTXE-Xnx_8I?n7+
zih*yS;0@W4E)_Eh0Rc4OD`W5JTZU&rmxzszC88fbm5DKYGT<qJs80G4c0LiF@5r&d
zlKJulMr;;(+~S)nLTjCDLNZ(4Yfm%QdF5zzD=b!h5%j#Ew&@VfYTcWB@!Fh(zWgTW
zz9g!4syzTPX#|1UuwCMRK5goQ)rX3eib~|BQf*E;I}N+C87H4he0tf_;q5NqLL#yL
z326V{8Nn0Y87D4FMOLahwCAkP_@h@KgqBW|)1?aJ=~c*zDw9XnaXGO{b=@?KY+08Z
z!J+-prlsf1OIA143Kh@e=Ki)`hcOJ3PM<yXbSkI8#-K@sHS^TT)nQ_m+jdGZa({6n
z&VMXsV@D3@hYXU+Iw<)ubT+HTQrIbNpiBpoqQt2LueYT4I^C<pspEd3y&}nKEXXv|
zzTva1npZ~pmd=;OEvt}dv|>rlOYg~OZ|3gji0u3*lc?m-errl<M48K*!YnE24QVrf
z9xTma*N^1yR_5cT&IsD&{`yP1Lq=RgHHi44Aa+EtWId?uh71j&v(a3T`mvOPcJC5v
zI6R-Q3=IOH;kHr-G*(T^7`e_~)(YQu>JV?{xAU2`vKcw^K$KBAA|#rcztYX{(Jv3f
z)dqNK^&IN*Mf7vQXdQ&P2BGEWaBX@RN#zyD+Eu@Ep>vIEB=oAG)fcrQL%7x9YHTqr
z-$X;h%F-}a-^*bd2Ft<;_c){Fj;b4`QfvfXg2w6#3fAh&uIoWAM4;F-FKH)qGj!O-
zIByLBt3pjCdj5GvLeib*4~EF$N@(#)dT*cI^tum;4Iz+d`as4^?i;Oqme2Q_?Nxi1
zXm=0MG-vNU^e4ci&%wK{^PkP`r&e|jP*m8a2QZT8VhMY`(yIBD`dUtKwMV9Jkes>h
z!-}P_RTpmm9P`pYvScXq!+&2Xo9I{H{Ig)F|1@io1S59%;y7%pdGaa^9qXF@qL@%}
z2T#W|iKQ98w9tVnORCDb@TE3dpIEH5izgIPQ@+*R?NvJ!HI<c*B}cq2oTDy0X%Im2
zs7vjYR+Elx=M9Tk`*ZUZF5~m$X!Z{JM3RXVSeM!_E<U{8SmTc#CXI<Lo5OLbF7+(F
z`|h|~3r}}Iin*H7HHfB!(J7>ax@-YcJ$yCQpmZ1gX?pTcy7239iF~+cxj6Zx&+iI*
zUNr`Qypg)AZZJ<zu7_=NL`C|S#i|3Kc-;1L(6Dn<wUw$4Qpd<3H8U1~L&J0b^80?2
zDl>R#-zfr7PoQh$GGV^!PY<a_@NV;FPOZO7FO0gk1`nYJGPvrfW!%mkO^qlkSQfb`
ztW;v+8ZG7~(T6sSJ$^mnHB1ak43>`EnuA<wi8g*lp)n7Z&T`;l*C}i%qyFKqiWc@J
zET9ek6pJAz$XfTzF|V$lDt4_pJRqoOtfS5Glm>Qx3eZJ~|5VTyFH*!loSLH;b*q^d
zd^&k2PYv>cFL#qZeYc)p&92`qvC1_k)tq{!S}@w|;Gd;GL(;6uzi2;*K8^Mbg5l;G
z{`$KbgWmXdKYJ^l+cy;Y@ItyfU+tafcLSR`&y2#TzqdHhecP!LgSDq0Bu|u7-m8ps
z&`G%91N`(tmi5Jeq+B=E52X43_z#OH|9iVeW*O|;apj6UKU-Iig__i<dqm%&L3OgE
z7!%o20dR3GEggAQZ_CxxeSsWtCgrBEAXwAk%@`D`2dC6B%j+;ur^%{+er`U4j3iMt
z_0A$kv-(gSC616BzQI*{yeQqj_+*~3#urbm7QX05A$;2P_FM?}wTL)HC=ZT=?it&u
zU)H<d86fY=hm3qH(w&V?><!;t5>XD(RSG0BLrhhyuBroSq*IFcQe!i_-|WnS<KT5U
zf#t6Z+9Z@e;@CFn>$`touwtf7%~VpZw^23e>WSG)a+ao!oAF$4;mlnK#mCGs1NQce
z0l0pyO)L@e_Le31W5g4H;&HydQ}`;M6GAuAj?6Fv8EHJZf);8nR3O6?ovgW5#YM}4
z2tsBBNRRHi;<uVmxBe_eK?Wa@HgJqw7ZBZw4D7e?)58kDWYpG`a4M?|Guk;271fbI
z#-t44CC6Qm$f2z^T)wa&1E|ue(#um6JiUZUQM;fSA!9mVilPiFsTdvke{l>Mns~$3
z9;O*QkRphX`}=Em@5Fd|#V5(4&1adm>Sw14U!>IRo=#(imJV~LR?+O0p09ZQv+tVN
zC7>b=`nfMB#WjZ^_Vb@eAFs@KyDvlapJ}?oMVlz|2ga+?xi(+gXi?<vt$xg%jzf4!
zCo616r;^hk>R!-)risYAum6f%0XA_Ob=Qhb>B{VGE@dUR?ocQ6B5Uh_Qb1HV{+Y15
zv?vUD$<zRnEnMgeH{!eMNuc|=Mqn9tHx(irXg&_Uf4lD$(r}tK;x(3og0fKlZZtx!
z&2^vvkEuFq=Z`^giAc1Y;JzJJ%4*smJ@-))lH`$JYWLU?P;q0bqKsErl?q#Z96?;p
zR6(sLVp1P+Q_s!I-@0>_>BUe*C|YcQnin=gds%A6R>>+S{?q&9QEyx0W!uhlwQR}2
zVxCREq^xf(=Q2vXz(S)z!wSfa7;!hEJV`z08y?yRUjPp@q{SBYPE}!8uZ0OJp<?zW
zvvE5alNN>S-jzcg#<%(GadwEuXgwkvV^m5ZL~Y!K@VbfiWhs?|<PsU!1@vL#w%@BM
z+xQBz00lqhWgWD{;=RNN6fs@ookeN<<vhNVQHj$5M`XKrh69{r<;|R=NpvFt%m>~9
zL7|n}k^Ug@O6co{l&`FnWX{V%RsouUmFdJw-!Rjnm|Q|{KVvp>wh&-#1~|()dSMhH
zz6mg#*gXxu1}EGj2>&gT1lvXlJriGmvuX4yW6S-U!j`k|`ug7u9VP$K4V0%lfP1l(
zyZv2FNqUE~4=I>BubuS%@bizh@M}6i4TygMTR4Z4B+W{gi7{V*q=7FPwc|@n3jkmb
zpHD35&YQI+rvKO-*#EOTKK6~%Mwc`AB<CgRsTRwENfMphq4rwVK{yojNI><esOad!
zm8g#tumSy+*O}24=zVysGjdY4z$wL@4Zb=GD05uaE0@)gNQ#hKI7PCl?-l(e4|=~l
zoi9av0{tuX3E4pj6=AhnsbT;usPJRDQ(pG+r6|RbDiQ~!z!X-qmm}z4!d~3WYq0#F
z({pwI*4FP2Z5#FZLb=_aGwo;tf>xjjUpDJ%*xd@|Y81Nnfr)RbKC>Em=S5-YEEZW}
zyvaCK8`BIE45{<wFSAb>uuVd(`4T58G6d;>QYlgR#5(twu(B%>;Z9lkuic5j^L!)K
zE>Wu76$5^h1X?^dC6|Gt%&CM&Vbo#SYV0}Ut@<4+4|PozAV&t>=}veUziFFyMCRpY
z4_-9l`!;r|8#`=Bc2W5Tx;pmiml!-3P9Y?tssaWnd7R9a4t-WKiXhW4o)8T&ju1ie
zR6vAg7Aw{5f)<NCV_sImLLWs06{U-2E{9A?Q_h<(%tNMMG;0s!j@O)H8cW-x!`V4=
zN%tnvb6yzHn%(Dk^Dj0F5$r{q52y_mHGII}sJmVi=Ticppd6n*8NW&b$r$Id@AJ`O
z?Xmv%<3(wi)K7u~w=e0;e@e6OlzsP$Uy1Y{*OD<xW#JrOml;v$k}g~<YnkQOkJQFp
zZQ}(<*Fqw$JIm4!|4D~@JkK=x?}~JRassvoY-hyyah#+WO&_+16yRpU#qI?>?9-YH
zDhOMWWMZlURWI~cQT_%cw68T<c6wUu@uf5<+g6hZ%B>QhlL{L6L1T(Er(jb@7BO}&
z1p>I>FIsI(#=k$>yN6^Cw69t6!L!<sOAR90F43>9E#1mmPREHx^6KI*J1fg(IVn)^
z3`aZ7M8n<qB&O!`XWKi^A#jpDc12%wX%KeZY3rLfI1EfoB$(QT3^%|<E}sctPv?EP
zIV}5;gVZnrdjeCed;SEC;C*wD2A?+$KGJ227D|^f#pBSk|0r$d!^b)q@hvw>vQJWC
zi*=dEExKD#TC~?<<avF7)Lwm96uycTXg(@nU=Zh^;!sO%Ns>T}4b>FS5G(nD8xcFj
z&?@Bh#plWCF5WJ-Doy7(4>OhS;t!^0d2W}W$+}v9Li@K&`c){sqp0ebU^^`{5ELo)
zEB-QTk7RyAs1&=xsy?$F!&$0|bA}<zIIRZb;#?ZFNyi{@5Ql@oP-}OF@@j{(Uzbb+
z`fGS~X`sCy`oJ#zA*GQFbxVn2yj(wbwHwtYZ)M$>xkGYS&=-hVr4>MM&F)k-g6P#Q
z-59Nxfni2M@urkU^%VD%Utj!2IJoWYLNWBB{B?bE53J~uBzzC?3wQIoLby-uensc;
zIIe*A1FP<{8R+7`Ts@Y?E)mO==?T5<i+iSPPM_zoE!)4n?@-K5%zsY~wvuA5sTq;s
ze8r2zjQ!)!vbuKAQ_^Y^UkoN@{2&bTRZdoM23$3)rKdsP=(a#aDef$j^@=c?QODXZ
z1G6slkwJOlRtWm#iCGIxHZ2`mESYn>EI%A6vkHPvVjy^<T?GJ5b2?XJni#CRUaNSK
z0}8JAo-=wAr;u26TB=VzQgqMXv82NXo_zHZ1GOKYzGpv)Y&@)#uX!AQDt{I<<>#F5
zhZt+_Q!1c3q(wW)Zq=_hTeVrIqtaqGd5WIW_aFax0Uef)dxC3hd=8@$arOm%5ZbrM
zyB@x+<x5J2fKEMLS|LapgKN5(4yUQwI%^(@v-ZDLnEg^zTZwzID*@c}RTINQ6sZOp
zTAp_D7XhO3{Y8d1e-=wl<btH!kTj5_L?LJ?Gm<66-|ZnpHL`b*mybo<FrQOPO#5cF
z)IZ<3bE0-m<OCzR8a-AZLe}T!M<!SV6ta???fvH0H{{xZHnp>IR1D*QGvWv&7ZL_i
zgEQV<{1=p>aomn;YUkvjRw=)O!P>rC<Awd*bq!qIuHl{a#U-C3s+D>tYB%4X;Abal
z;Xled;I~DJBv>th^40l|lQ|9wY5Ied&ZlBe;^b91YI}P&3|uMiUCYo~Z_$KroBh((
z{cfC|mR=7O&mk|jez02IYQHf3`@z>BORZd1@+6g<LO5~_*09gY6-&QIMc)%yRpZ#T
zCq(W3J#v0L|KA4BozIu5w1e)AH1OJ|H3sfG;Ez>D9;~;@!Sbr40`AfuVyjNsSWg9m
zhTnq|MAIDG6c2@Q4V#?A@kw$T#*tlcX+L&l#fjIDeMz2Y4>KuJ<K^WA|DL`c1bSae
zf|nNrLRPSgt=~-O6vm*8x^9W*p~Sk(vZ1IPjez%-vih|rCUIA-xwVmT68<Ve0~(8y
zmdH<Qxo__>v!^Py`rR@;a+|teJKF$UV*x+hd*Chgr-PHGT%1c|6poS7U|CrsPi+?3
zSn=2;nv36YQ*B7y%FEVf3F0+T^Y;N$F7mhgYwl<7xwAaJk4F4D%;z?=3qkkUq`7<A
ze3<m`?KYh6=v&u+A+YtmGbyuL4Bd!tkSv5dvX*HIQX#jrYs3Q(<RN)nsjX>ue~Cfa
zVcRMXXPOhZGs-#q0Qq^`YzRrkbAiruJx3hW3yao}tV~=y2EnUjA4h#KTFqhoX0m36
zWtls!TYm~yo0MQwDm1dFs&AJd8hEp4APvCjnD#zyUOvAKEv}Bk)95e9^YcxfW18l)
zSB)3CwwOW579ETI_Pw4kk@x&XgXgu;uYbeLH{;Kr(UVVz8r|1KC*Dw+t&@-4O6qnU
z!%vU;)Vc!x&pvsV^1}C+3&NL*_VGwqAI>Lp)<kwG+ajvt0Xukh=dt)`4z)Xb92c{%
z!}`0}epiF{VXwp4+lRNCJETu$es7UjL)1&jtq{suDWq{Ki9^Na@gJGBol4V_baBUJ
zCjX<1|BJPeF_u0$?VjW=p-U$%PQbR~F3Ts{m$q>Sp<R18Uwp=%{m3SKl2|Sz+n{0j
zXg8`|hM!R#My+Z)V(B8HCb!>+IDAhmyC;Cf542_-FRi309_+vCS{v8WV~?NDv)0^f
zv<#$3tIVb?4~HWUSPX}+SRk2oN$<;fs!2@6p%Yvj9<d;0s(0zLGc8y^+)zqZB8FL0
zT^9n^I_6CHW2WpYMT`J`++K^8vOEi-w4wp!levlfF1n*eBI)wPUAq1}e~F6|8wya9
znp>?&t(HThmI2@fR-<vVZ03ISRxyw+BA%WJs_S?^K^xy%d%MP|cV7$L<$mqiH0T!W
z;efFxc<)E>TRflLQb^~CJ1>Z5i#MZJJA>uXekovP+cYJ?S^khiTEQK4>z2G5VP>mj
z0$ByNgwKnCPxO8r!b1d)O-4KI8c!-CJ>7cFPGP2KteM|m+o1($m$b!TOYyl_V7g5u
zYs?2D^DhF*K`WUQtX~%KypD^UH2Ou_sY`Qabr~2g#%vwsbpxMfx_^BorFj1-`b_ew
zwYZl57Kt0p5K67XRC`aQ*5W_<3k-eYDtFU^9EAAA)wGDKYCexZmS2aV<cwdp+?YXK
zFNCwC?rGPHXKX&(1Tzi4b+a(9{&KTHXNn5bMQ}xjU{!Sn`aE}wyV7-3@_MV^UtOEs
z&)8L#?p%7Gk-i{T`<$b`^k={7pF>{J{lZ=s^`DO!6q#baIhWE)2D(reeRV>pvaC`B
zN|aY@dFg{E6ZYQ4)gt^u;rYLC&qr-cdTm1)i<*!d*9{y>^7lG3IwAo{Uy?F7((lXc
zJv<~%M3ogUpDctSt*L@0R%0$I4ztHG@xy2u5ym<*M(L-dRVLKxN)X3j1!NAJzImgb
zM$cE)k&MlHcped%%kdy3T23Nb#S*kq8Cu2stW5nlpy+a6gjfceEVj>7jCJe@IWMOM
z1Qa<%H_Tf==hT*`<9EiDd>gHNt|QbEB7V>&PJ&f6LByJ>z$54*T@O-vMCwZ06W4e7
z^3_Vg8I&9f4~%ryCHoP@C6V`CHLR^{d2@YtYm2Yhw)=}<WtcOEhSkRNZxRj>M$Rh7
z7OfIAYVQ;DR~9?J>qU!=dn`4OlbXw2dH;rYz+u|i1{HMU8F^0M#J6Ge<y2eVuG9bI
zQ%;!&R=p-R)2CX`Tc-0D%6;;hG{Bw`NI1M=g1)0#21|xEL`1WM*9Rk&nQ|d(W(bCG
z7*@%?H?Bz88^k6@oIk_S5VO_}paRN+D!vGcN*PH+)-s6iNxY-cLRN(f9S!?+T;Jik
zW9i%!sBlwj`t#FRu3Q1rBvbKcXT5VTuL5qp-KP`Am3S0&Xa(YE|8}v^(6#W>AR@i{
zqm}+nE5uCR_n;lojkM<Fh~()g;QrP-<~q4+`0D;P_NKVSt^50_cO;J9prBv;oi-E$
zr@wOQYc7gvbl2B5`=JfAcoq@#1azMVf9BT}R_0UEKWE3$cj>Rm-D8T+?XM^=nXd<%
z5WDAMzxa4Js0wy{%O~swnU{<pkoA!XuLYI#;Un%fkA`WCOUFFn-xJYoz`6eT-}jz-
zZ;jC)N396rSU<<+gk0@kXzO{V5<PMQTxl#$sb19inw5yU$^4RXTDIBoR?*!K_Y^9M
z#h>bf%JR$TGt~R1z(o7RwP_I2$7Z|%HME~u34BqCUt3(=Sqd(0ps{9bjU_Fuus|z5
zC%cinrb&GT7L8p0M<ntom+$j!yY_zBeSN(Amwe?)J|cr#FO1f!9K}yb%PSwT>TE?U
zBgMfzG6UPQ%^+Q-*2@QyhO`)l*Z{IbjJ9|2hd~g1&oI;JX@)BEG)`Cp4r6kE)k!A1
z4pj;ET0eW{DX=}<lFtx0ud4R5P5yT}21r-8wr$K%9Ziu@h*bI)uzx%l{b|b$yG=#!
zarre$6NDM&FBR|TO?yHzo`Y4!v?G78q3hrYK`rooi<zJHdCTEr8B#G?5{Yim1b<c3
z&;2}8pkfba3ZG~^$Q9{OpbLvHI0Rxf6J@r!ZgySC+&4@5){kF5a07D%w~U2XAgF#c
z56`Uy&~F};f)B5g8Q38IK!q=s_TD?#Jh(6hUm}A7&_<0F=-)Kd1Fs3_^$gJa$a)&T
z!R34SdauplK#INl=LaOv?@+P+_It58K4WpRRo49(w9scaIpu5O0JBeG8P~~>`B~T7
zNr7Mi!M;x$J+A1v?IHS^eT~IVw-uHSB>v8F6(dKjE&<a03UG^ER@udR-Gj^N5S`Wa
zT^Qw~`%&Y?$@ggUIs4r1b)fs#=Hv0}yXXxc?nws9t$LD8?eQ99<J68gtZA(Ns4d5C
zYvFjI|E$2U|8E6`LzdYhYoKAU%=7^x^Ceod_P$7UMjbGDRho#Yl7FfN{BU0@m`>aq
z^D$2Hk@>J)N=*iyf$)gLg|qOqN}6^*VM)kF<BKeu4e8m=+~vVl3V!dUy9VS+cT|ng
zr&0kf1AIEcwWi=$OMrnb6Ff&ylv?0w11q}tyI2Fy$yTTP_jJ#vdd=W)+`JViY!6D;
z*Aht~AUKE<4OdFFPvgEeK2UJuHaGfDqQ2+PD$VaMDfernE3R|=QO{QFdk<)NaOQ_c
z@L`e#B_*_`y`SDiP(9IpGig=O?5P^hO%p3H(r=8%s$vi<>A8*;7_85J!05iYeL-$(
zlL}|tA=aW=X)=zzY2nSTL#Ah-)#WG{k*V!c*NJVFa5BJ5vYCwv;xk{2o%`IVV$WD4
zS?8~e`C+BV5U<BeBrl&o7^QXRY6rsmpl9gWVBqPV`PEH1U5Q}m<qbfwu>;_0m4OIT
z@DyeUYPCg?Sb@o6#g3h_y|B1h2<~}Q3fcG#l1PevI`%A~a!=d4?Gt2eNv4g%yA>bz
zvjKu;aKK@-cZ}8Mt01hwk7_Ka&x*9a>zemQ=!T)#?-<J7=@WQ%5>!5^>u`mjPjRYI
zq30#-V}<60ef30=;hEPBI8%&`FR!p~l1$@q-sw@(T*^k&B3j?Uob+{^(7yfT*jTY#
zUOeUewD=m`-Sc`S{G@+r_S%~bu71&fWn1S%(0J!>{*0x9CQy&%S>$mpQ}!eXnEUUu
z4Ek=@;2*_Lz`}Y7@_S!vZi?L5oXkj*ljGr6{sy0%I!qXDm;T@b`7HK%=0~njlnuRu
zbQoZnA|1l`T$DfkAhYP0F9<+<dvqYW^YoP0VY=CvHu6+@eYQ>v$BtO>jQ<>IS?VEE
zh|LaYYclE7arE|{Shd@PthdAB>0JH&u7D#`zi=O3Cq2r-HEcq$vF`%3*z61!JGUME
z(a+I-aFy`aS@7cNMwQc*gnSe-6w)L06++u|C7nthxn;yv7d-liKR%m^GCY>@bnpQK
zF!)xMrUp`E=#amPP|gdSzfAhZR6kw%^(<=$OPP@DV`;r1l+g`T&#Ka9aftm)GR-|y
zP*g6Un;g0y`W{2(Q(RldgHDuFGf>=8{--o-55o3?qqA-v`x|uFwX>&m=?r;OrL_R<
z_$uHRe{*+SP;Xb~c}J%~;|5yyia+!YFDI_mHo^-%jXOJvJZwDf=fdhEtdO8-ZK8&H
zAVnQ99|NSNPdZ*H4_=stE{E<3@sj1EEP>ee8mNws{=(P7uYum%6SVsLry$>RmrqZZ
zo2D1N(aMuTWA~FU9QB<;`Q5Mj%6@k-<q_;%*OO<nI&(oGa^iUX!vrr|e2$(!1hr;{
zKWOW+d2DNDAY%<#wjs-t?88}<4SD}iqSP21v!k-@-egbxmgr5<+faJf{yOaU`@}xl
z?{e~m7j{I|Cg2=D^{9p3KYaZ=cDm@w0iJ9-1vQ?+Xt}Zfa3uBVBa#2Ze{#S9SBJo$
zJo_)w@}oa(n*x6W(m9mgHLBiOQlR$NKoZ`{tv`4Tos$gdQadlZhKh>|1~Aj5C&H`$
zz@Yiey+M^B-MF(6=q2Ft{N7et_Nbk_aOr2EfKIpqs0NV3D=<tp*?zPz>jhkuP`4Ne
z{CEi5x|v;Ub!91bZ5vJ{oHOltIGT3UVcWh1g@>w4S<5{SMf;`H&(aIMvUdl_1z@pW
z_ZKzm1*itNp+RwDk>e=CVj1G{ra2kc>1g0WQoIBmB<87agVlkQTHBr50$Yi}?*b%5
z**-l7W*fUeNY)ZX(=gjZ;mIWjzj9Kk%F<_P(f3P<!uSygy5jbss>^wdxMeD$vi()T
zcpQWQ%){DYpoB5;0vjSxjcF>8PVyNHBlYV(HlWw;(X|^$x2pB#qF%n2cS6ykU2u$W
zW?r{nqon#U?MozYqucWVs*zh%`LDP=PjV=k$-F96oam7H$@RNC+B&DehJgL2^tnfN
z9fWDuQuKrcLh4!Fm!r*l8$G*3jL2jrLZ+@KdhcC`Z!9;o;{KQK&V`0hND=G`Y58`)
zl9m=ee;>Z@K4RNExVJ*?>LJ?E(c3Y|OuF}VIb9L6oQ<2W(qOQULqylOp4fb@R*DnJ
zIT?EKL_hgw;TTn}^c)RQv?WOYX;8Q?`e`_O$EW+LyyLvkZlB<I#}{LIO~yKXa&n6e
zu@v6_=YM`0{~z$fk5m<|Mq0sbtfswRMyCqO(s7|RGX1#pmWDEq3Z?yfcmXL7-1K%7
zT%px;;hNMbw7=KDAUzuTrhzylf0ITz?xRxNh`LhyR*<8wN6)oYNDAHX2S#=M!^+AX
zWX_GH1SiIFiXyUBN6CjG!l9XRc3^me!svOvQ-+GSu7izRfUFF{SvDZ<v|G@yG39OA
z2ZsKND5^eUgIM7|#X@pAZr<1UzSzZ4UwdeVFN<`0)xR&0VLG;`$f!o))ehx^hY_?0
zjGFUg1V~Hh@{0eI#I|E5y-2k44Q!A8<WSp|M->MT5a;mdpvwe%##w+<kI+KeaqDH5
zeGCMgPGkDk!-=pBk0!&E9gF~QtbB2FdO=ePQ!f5oG>n1Y<uFi-aZ|9`lep+)NJ>#X
zUNq_z_^5lraxtI2O*m~{3JSPGQcPWXd&aeOGCD_{T}IkQh9?o4vm~C-q^Wz?m*LKF
z3y_RBn|LH3kDx^gv<lQhz0dmkrJ6C$0sXknC~er$jM{tVVI*Uz-hU*bg#E^tJ6ytH
zn3sl+cETWRp;u~V-y7qO%iu?EFp=3>1)=FIQXQT^?^#xE5>Gd2O19J$?cl>6ldoH}
za~nnIpe{a82m2Xk204`%M$e(fv<xXVbos@oHz*p+kR)%05_Ln2K1rVu&^i>Q`gA1x
zQv9@}_^kaDoBg2j)L8vwVni49cIc756UC5)>cddgeXZ$#{DuDmCb4}^30&qXPmg;&
zoAsfj#UB5h;?lpTlnB&So@+DrxZx;Nh_>{_e{bRHWm`X18IBl#|B04br}j=V!X*8{
zW4>#F8aqXSKk7qXS*ks>M-UDD=DdjaKIz3x9WL@AOO@+pdnVPA`T#?wgJhas0&^l)
zjQ0J6B6I4b*@--jD!Xuyq|D}<vMPZ_cseRdstaSR5=xr)C;-iKvDXE&*4qieiH){V
z0J2Pj^B~;*NR#c5#r|!08U+OaAw0opM+wR@&Yo#0$^X4t*b^4&*<rr8c}4&*viD!j
z^9Qt~&};l?FnJR(WTC#Qid!u!cs^FnJE^bO(c;%X>J>)WX+)?+En|^nHHV{4{|gtT
zF-}zPuM}xGELK9gcx&TC?=E9RcL#vn+-!nPpGBBD?xf#70*|0?#C+gmOKDA;j`Aed
zr4=`#O(4MDHz0hqjK>kYR#kg|F>cu;Jw}o{P?q(<{r2*%Mtq+?=uu;+Qa1(3cQPI=
zaX4ZzwcW1PDnJr!spjB<=W%s!hoynSloX4fl{%#(k>SbGVhzXUbT|5=MS9BFyfd&O
zE#vA$S91^PKw0AU2Cnga+h4k89Rm~#4569ir{Qya99d4!`%EvDQPiH&nBpZy1Lyp9
zj(1mb0=F;8cMIHlqd!~3CLL}h_WbKN<<zB~o&Rx~{#~s3c>jLs2d^san9b0iL_MkT
z<~mkM>71nD#Il@Dtke<XO2v_2RF!pg$EMz}5uIb_rue|<xlmdmq53wG8dcvvpj3Ig
zVqEamPTYW_NTZ)W9z`+jbzxy}#>g(0ejvbuYudAy-Wu^ZMwCHJT6Oi^#01mQW+rRu
zC#6;$hZ!0&h-Vg-tV(Eu)`&O@)znK0!On+Fx2M`yaCeX5Yht%l2O_V5j0)PbjCM#p
zhL;43L+I$9hAO554^-(3$1B>V!%Eft{&Pr&14L-vnUuM>f7@kfTjZlNZMN__s95aK
zVqCUm3b0E@Jz+h3UbjDQF*+IM;Cyhuv>h2eK@yT{*c(pwtFGHql?985fRz?QDe#+B
z1u74G-gG1R;)Owqdk1$KI6T73j&+_8DmZ(){wq&;9@`%7t2FHnFd5fGjiKK<6Zd<|
zFJ1{zeIp_aR(T(FMgC}cnu6?CKb3L%IG(l9_Pczsel6=6vi9Deq+R2r&wGMhSHB5<
z@_cee+Vnh{PYa|3Pf!jn%cd#>g;Jn`P)_-3dF;pu4!?!nrOi2;zhi3`(dzzLUFG27
zDpDUlBr|B;6i8N3BdG(Bq;Hg-s-(cPq{bFoeV50dLRV{(a+tyV{9t$9=XXTqyBGaL
z{&K^uFQz8F=b^g#aB^4!-_ZJ-KrHT`DMRpIaCoeWU~`vg`D{Hj^DvCqpUTKkCNH=F
zRUM=RvlbuB3mNlh(|_eDpqAz4$rm{R<pINL%svm*(#n1h;Y5dhaTk7<Q0}+ctRG=s
zl98Vqx|8iL-I<F0<D$5<gb)1Gm{N1A=L<hUNlp`IJ-*Yyv}5c3iBZqlSw`S<=EYNk
zjEMSjj3A@>j*uY_^&GDfBVBo|pVb8V>pAJZ*~@~k>7W96(&7k~ww?e>kE~gRrQIlA
zhG)m+7ty)U70irtgX<VuvkAqgJ<{!Q=FT37?{PMJciU*0O$)N9ad@9YgSLbF7NWwQ
z<#a6wAxRgGM!YmM33rl7R9X~Jr^jrbMxSxZ032Vx#O>Cbygw_py{O|i>(*wA2wNe#
z<WnXPcWTh(^}<sA0KtJIR#{+JGKkdpSMmWX-Lu`Vz;=I5Uf2st^Jo35Ek$L?KGCFA
z#~OUhjoGea!4OzrlJ@UMJLjBuv~K^(Nu6aLdvw9+?j918;9L_T;Pdu*iUz6s66N41
z+_~usHS_}dI=DIMcZdIqd7B(2wzXH0+Zj1l#I8P#Euq<r%0EkM1KREfRl+yy-lZ24
zcL?;far*AWW)jqTg{JEjx4ZS8w_EL~jpKJOz$#v<c-?D_e0599(8_W2)k+p}!dmp;
zym;W!J}64i{J<U@&VOuKX?cE0b{voM5QoFj5ZB+eDs-_}Rb#oB{YOBt`*%ORziPDg
zGhGV|`1qv|p8R@SIFMKAhld<(e=SW}S?@Q#?VZ}2S!*X;FCE0j!56#?bzT#0Qxt0O
zGh=VFT*>O@HL~4{h`N~K1ar%J6OS+nVo7RjztU`;B%wL^G<8aWHNtqRl0UBKh=D}Y
zH_h01(I4%<=D~57<+zv2LBnIXJ0&LVj^V1L2_0e5OeeoC*2+PC4OpZvx#$<c`p?A0
zcAqGfWqdy37Sf47*!(q>r$^5NL(&q6!`aN+7xV9<KOOo79-hR^?qVmczjO5nNq6a^
zpxU>r7@vjD)#SXh$`bzbc2dQ#pruIHC4;!y{g&fm%X)mdEk8`wN;EM46pQv_t{jYq
z>0o&<cW!lzy|J_k4$e%|uG{k|?F&|A=tHX3yso5Eg2@%Q!k7yXxXiwB90%EU<o@3=
zE`d5iJ7<m64!g$HTOBl^+jRD(1u215a7qISr=xfdA7$W_30Zm+z8ripO>vL>0SG>L
zT~mx#F9vWMUce-g4lfCAk6`y}r#ZvmbdCN?sU;Qcm;Bc|guCunQonb&xv0J8T1%)T
zkV?m8WG^@y>%*}a#0CFsdI&oH0Y{yi6#{p<;b&XuUlVu&4<mwAOp#%?PY;W5*{=_A
zX4%SFejdH2=<+F_B=^}<RFR-oC4w5zCTEL~&%ESX^udVyUI+$~g6CvRx9KYj#R_#W
zFRChj`Hv={)PJ!P|DGeYT^%q)#yf{_>;aFE5+dI!iUJ{NnP1Z(__)C_qIj8g_4T8D
zLTlz$8_w|uqj>CzSdxH}y)@iIF@Mih!`Gr<e4}R_v!0?Bmy!mH1rjp|WXvr7;*Rau
zk`Lr|$xvcsi74Bv6o%L}HXzE1NGZ1dl}@*|YAk_TeUu6xKlx<BDg)N^+i>+AuiQ>q
zDdbR(1~7c2rx=8tqh&I27)kod?ibU2>-V^-$~8_qLP145qU6$N<X>XTNR;rI!mL5)
z9K2f%tKD+*3#&pB;OR1G1v#Dn5vq5NQovSAT#6ROrd;4{`>H8~{B)LlcG-n4Aoi1n
zLZ_Qt;1cKaq6I5n`4Io@r2FMYfK<dUDp?Mx<O|+bkUrTL`~@8@PJvg3V=yjr5bh$U
zM$g;?czKyc<e(&DXUseq312XRi6!+rK*Fj=Q%a{%;AsX5JpH=lcc}S|Z}_PGyX9A;
z1>b3M%Pe}_FY2KHaRzMVauiw@0P()Y9Mm`K=6aHR9ZOwhrCG0d^^s+MepS`=%_-IS
z25y}rsCnn075|P>_zCXy%1qz5yXsG)eXZr6>mUP7)F|4R5lg91*;%A0f*Cs9*<6U+
z)3>i}EGzuQ0(_WDrJ5Dk<=+ap&d0eaH@)jkZjLq9TDINKpV@6Uvg+7Ti3MDKUM^rh
z!?&Gq*>7N0fWgt;=%qHY*BGq`>I?`@s6q~Pf_$WP0)bZA<<kFklY0L}L3+5R4VS2h
z<EjngNxSe`51Q14o2sd0_Lb)BDA9lw(d60K(Qa;r7j$4Sgqm8soBR^(GV15Za?{2+
z0Cr_Y=xRCNp~En4c$@Z+gYamXM+!$#|M?-U;(ANe0$<D)ags_vynM(5A%Mah4H39%
zosJ9lnQWhhwzT>_QF_|EeQ4mG>T;dH7g%4cQV^p@ySK#}M+f%t0y27JsmlJp*1j^R
zuBPiUxWmQWA-KD{1PL14f&_xQ1Pd1p?(QDk-Q9u&w~Jid{o)MI^S$p>%~Z|5`PJ3u
zoPGLFSMRmg-m5pyZ^E@7yWc^OUnyS49w63LxNu^1;I-EXr7B0!8XpmLGFBFH_f3-V
z&eRLS8}Zq?*s!-nt6mMi7_E`;wj<wSm1`Ejj0Tn#j?yiFKKd+wD{I3y#WKe`{BVb;
zw~C)`Te3$tziD7NM`Rn!MpA2wO^457?7g3<P0nL5sFba?<vd@99uQIOpAfU|7dP%U
z9FcX^XRwS}3M)^_Dl><)RaPYHBx;%zIF|=<Orj8&NM6S~dpk<`MCo`DuqyC-*+#2N
znVl*KBv_S!B1@WYcZQEUs?b?o=j?x;QQ?1gp07*+;#z5tKJP@I7kyk<+?w&VoigK~
zvImZ#3UqIqv7KGHK)V%rrlhtol%otQe8@@BE^8|`Bvi#*?O85BDeENK@RZED@RNNy
zBXsb0>aAOrD6w(%2=Oq{#gXa!+jMaz^otMF)^|wT99`UTWvJnZuqlEET^1|2RVK0v
zvvEtg;s^{Ovdmzw5!KWWGIf|rG0Eqjj`$yyA9a_@f2mjY^a!qj?`L(c>0fZwGRx~k
z=G&DTxZgkmq1rJnqS*5a*OA4HpGlvD+6xmM%LY<;)Q&^fo1Oz-P6{ccqSf)7>gMMa
zfTvAxm;~|O<mJ*V$%`jH{y3iw&f~u8qZS#B`JK>ZBwhMy(ouUqC6=X2GjFNb+ho{M
zjQ;gVx9(m?iWvGa<LLZG0M|jXD<Hwk)gk4m?hwN|m14E*cj#S5F<6{=KO^=4e!ks!
zeNR#)<FZ9rRJ+g>yDWR3&hH9HeF4%;YB8M|8D2$IcaNJxelFW8)a%u@30mY^wxks}
z3zfo}S|LVZO6H@<BLAv7Otl{G5CS)MU*U~EWES)rR-G&JuVv)N^J9>H8M);QogJ|h
zYZ!<4dXDXb+BVA_;F4#+nun5wr;%(Ow|L}jw>Wx+V4udl-S5Kz#`PM3h0-U29g0ep
zAvAJI(}~<0Ripd!5noTCo2y!$6pbk=+4z6#`nGf$rYf;V5Ja1!Q&N29z`^d<qfSoS
zGY@%0wnT?)dK~)Qk#F@wrw<&iDyAy6HW|#WJy2zmC2;4d#aTi)l$YuT|0K?5wV%wl
z){PJvG_s5zq&DKt$Tesp{YIXN1mxO9&VLCsB6z=+8WDf!D6iH)3)V&HJ=hKUBw|6R
zCyfOPC@JV$Di>C>8Bk8Ur=HyASvdZAEz23h&~ZP5kBR?DdHlzWc}n0&P0e3AZpfNQ
zjGWEylsGl<f4dby!v1>?glrboyIO$4O_n)8IE(%hjs}bSTz2jc>&o9{MEf|E4W-dQ
zMb$070VUGpzRf?lG^?zKxXQ%pehC)1Tm!8Bm8e{~zVT;2vm@<a#2>8*dQ>G-_(esw
zOdR+UTJtt|HX<{E*IE^t81+9(Flr4~Sbq!s_L`RLrx}=6!1F7~NN4NFIqp}vU_2>%
z&f>#mYdQ`crw;2l6p?&C7^d|h!iH^ab2p~DFRR*zseD#6UudlG&XZ1usZ>Czj#e;8
ze37=T+~uy!x9d5h`%eV<i^OwwZspgMUs+kmw+QkIR!NR{@`mqin+pM-pd=F@hub?r
zDbb8wJ`R7%>6p6aDrS(@1ebPOOYS_698+vUy<4&jb>MtFdM%Bs?hh)H-(zN=M~aLL
zzS%r&mperCaIi$c*#$4fMzm2Mdlu-+RW_!|V}OQ-x+2V!nJ<<8A%9@3u88lWd8P1u
z)S~W&z}B9ek?G?NPg$UdW^t;gI`cjbZQ8Kzgmy43B2y-pCzy%b_};C95!Qt9Ju7eY
zbQIolyyV?lB&53?BkU&^TSKGFq{KDjdx){Ijgfw(;=h$RdYk@{D6~G@V%D_XD?mzy
zBL(t@J+nOZL{r`(l`w(}e6|`;gm}cJ8;q3m)Q6-!!*Q8Ss>r{%<r6BGI`NTYt*v+R
z&vE7oWeSR15il6N`M8zeYor^}{UTOtM8=i&*$eE^rL-$TIoKic0z~Z>#F!2>LHU?5
zHT{pYC-Q#sf8QdQnlv_ImEreC`lE2jUtRPZ2@<OCa>jTJf+1;Z?pT-*(}}U~+<UX{
zvWsP81jfToDXP;w_+?~)Q^^QTg#*)%rA<h#pCk$6$CH;OBuKG*{V~jFDKj&(E>C7v
zz<XQ`u#~@wgu9o^3zGM3aG8vTjSg72YJBNyIbn&@ENCaw02<QRI{P{X2A32v%<$+K
z7^&x)N{lL}z97>Q&(W9E&hRqEeuIiqb%QZ!LBHpXS1F%k7vt?<Z6i}ZU!yr|TDQ3C
zr;?7_U<b#yKiI5O3j>L>u`*aw3qqM|tyAmkeEe-M$e$S*1q!n_zNyQ6tm@a64P!aA
zmf_K*ro>v2uFzX?GpUU<Fd*>mslZb|)lCCm<%6$FV&t#F@N)!q=tQ4Bl?8^MSr6{+
zQlq=GJ%BfiFNuvG-Wnmr9LYrxuZ?rVN$2LaMvq(0+1Yv?flNwEdGt?l`7&|&BTID_
zvv?$<&Oq3(Es7C*e^fx=WHT&cBEPsOL0G`RPuy|5NQyd>utaGED%$Wu*?JYo#7)=S
z-^TT}ae@8yt1S_Tmc}C$G|U9Mr@wJqSzFtg`aVc>>Ot&nUtl}RZc0g-*mGCdVXbYd
zKE__{roBT@bjz7xlxx~(n^A>Qg4&VFq)}SX)Z=?GI|xg<fjZP=G+?<=#NOeYouAMA
z{@$xC!zMA15=TJNW}3n+liwS2Hfq-@=ctkoa}|R=8D&5nL!yT~rUnFPC>C&Ddcsu~
zOjKQB&MoHY(|cAYFQHW@y}s<`+nFek^1I$x8-NxmO4XCFgK0%aw62q+1WJ99_(E8L
z?b={|ll#qkk%M&c=1D|}-V>4xz6&Usj#u|?gLdA+6dAmTlVkXykElSjv9(hw)w2~S
z?#2y1M9O&`WXpG*JlVn1GuRR8zJ9VxzZ8ZdlD>#AL-Q=vvnJC{mZ!n`MIK6{iv3G^
zTST_`3vGBy#sLyPhWg-o_eXI}%j@7q0*b=I!V?^Gg{{o~V4?~M_~E_3vB$GnymngK
zfl|oZu6X%HO{5DG9-owXB=&cyN{i62hr9t-uEwriDs*JoWyoZLqxRhZn))$EPgnVB
z?Izw`FKc@ycV8v|65_^65P8`grXBFir;X63otk*th#sgl4gcO<1^yeDu>BP(sundv
zblvY0-_)pY$O0KTte{2-!w4*m{a2{$^PO#$Kq%p-LHw0{Yf-4^rkTbFn+mZz-HyvO
zpF?B^pMB)eC+zStbF-Epeu2G<EGB~0ucvEp9Y<zk<J=3X$xp6hKLMn+X*1l&%i?Cm
zEkgyIF|lx-7kyk@WS??zLIRM(Howy!{DvA2v@meQd5H%-!v7iNDCj5l*@FACvWnny
zp1wN}m`JoQT)nZ9cfF%AlD}7MH*MA7cq7>}!M%dOjE*z1>135|g2V|R3Y1YR{aK5i
z#pGI$wnm__?w62Xa8TuXkVU9ofx++yr-z)#UV0S!xCiy^WYjr;gnpW6**F?<Y9a|1
z9@4#HrW@}mgP%!2taM_ea1N;B$-@hh+&&`QHzv6<_CGOuetdHd_HbCRMcYoC4@E8_
z(L?aT-DY*o1_O($kfWoCV@1k}1N0nklfRy;)~$>)SREuAx_ijpH0Ia)Wao-Tx{!fk
z=S#nt()80xl%#N62lPHDi(d1+p@Y|t69aKSwnLV?T_SNFw<Zg%SW-&XTQB?6?oqIx
zrK#fNpTDRXzdbH?zxr+bdY|9FVQ<{u`mcu<@M1Qe#%OY`;3$6}#>oXJP&87FI}3KQ
zz8IwlPXVwB5h^5FLO7XPo12vp8HsfD{l8vta3p#a+qDzZK~<vEO;YG7OEnOBlLPX<
z0K0IL)f*C)z1IKuG;_VK32+q3n;7&z9x>{b4?>#Xw&fUI0b7(P_EvD_LKawdb`!!`
zDZE~uueCm<XCK8K$@KX=Ems8>Lm5T)h_V14{skcp4suXX+S;4YlD|C>0%O{PBDz)<
z+)0Iv__bIoE_a4=atW;7BMXjt2E89Z2+7&GY24242R5PHMfNE1N9?v0%1ip98JXy0
z!4L~Cj4lbAn0UO*Li=kV%Hd2J#S{+t>(XpR6&h0C;9(^&pP2x4@gmQpQ@g&bWNnpi
z4&60bO@I8Z7b?cU6F6!cJ^~vYWtjb0{&{=pnu3RmlAVAEd|?C*%0H3U{G73gq5wq3
zI;A-q*t{EV$&%`uWZgPrU0WYsC(E;z0BMXUQF=o+&QpecY=ggJDn<w>1LopnK}(Y{
zc^Ce^s0ttbL!`+ClSIua13*T;Us7?X6+F1;JU^p!GbuuqKOEu45s~3YL|x$eh~3`{
z)BgfShsY7##FjfuF0EhNW)s&`zyyBHp+@l)qVgnA?A5GtoYs;3B2zL!U>=@dp4J90
zM*lGNw(LK(bMWDr;M}?ElK3rP#t-eZ_IFTZH7+Io@YFL<-g}8PdOYVT$=J7hBN^-c
z<e>}s&k3|g{08YRzFonZ_2@TUIgrUkh7SEe3s8u&h^Lx5bQg4OgKO2g%+Zf%M(ZO?
z1Sl2b&X(Cf;jfYnHQ5)tH8zotr+z8SZFYvYF+5R0qX0%wX=R4AOFPlyL%aD!i{Qsz
z6g;_23jSD+m*~cKP%LwY_D;uQ)=Q#~;~>RvtdSYY5lNZxe`v}#)S(#kDPzK$r8i^l
z73#R<x7q)Zx};B(g8+w^mp|H4yQGU9{G9?`#;g0DpI`B!mJ@D@ih~aTG0~pN+8P~K
zarE(iXHw!V2xZc^gg|Dz*|}q)l;@i|gUi5alDJ^TGrFdjFN$cB6dh4b=KFD;0OWHs
zQVb(N(DtEawi)}u&xkx7Do<#r->|xI1!ES1GA6Vcjt*1ojbxQv9f40z7%k5?^(Uw2
ze`nJt;87_wL(s^MNwp9&<0nx0aSDF*cscqFIr5ZrR}fTKkD=qNXt?v}R!Ga>BS#ew
z?3u6sJVUxlA}RcMoLs7&pUfAlsBi8b0zi{n9GYO90(GW;w-^;d-cnvuuA-dN73Q5l
z1xDfHl@V2QcA#YXnkK0DJ*pYKbRMIoAcu6KZm=3!8jhNj0=#+~4X4WazNuE!`9p%d
zhePDptbeGRj6;<@Ax^!VR;yPa@QdcsaTitU8m*hRt`<?lt@};ROiT;ZPgK8R7?=G1
zMsjW}wJ&n#aCWi&-b}f00tr;mAdMWQ#gU}X6qSyU#Sn$Xh^#}%2J*Q~@!x&%(^q)+
zU^xWwgM4xFG$1q6x)KMLMzw}dd5A`UM9H+D*^u;b0%7W+@b6s;)QRE(0X8c3!5=g?
z`|-&we6BuffRR39h4soY4saV%>MofF$ER8=UErNRn*s?#E1l}f_o`OBEGE{Hm#l3V
zdfTTz6YfgVJ$?{s$5_(Qm)dpe)hu=Y(IAiOH)2G*mmf3e%I&uaj%oldHv_pr!_)Be
zjo*7jO)3Uqz%#J=gk@zt+-SucMPfn1@~+*)B_Z|j#)RfoamblzCFITPNzzYlHv|Yl
zX&a|nr%W=jSX||$(L=&bwE>-ODRl|6EI3jLTz-x*>&HUo`=XL1?cskWXYQf6t{Kah
zMVxiNb`*UlvNm&+kjX0{@j_qU90|&ICY(f2O}F>2@vQou5Y%`9yB{Z6aK#!}iE-U>
z06t~ux?I4di;~zANOJQzA0zg;>Lau#J*`h9hSwE`4aLQy^tBDcC98&=9n$cU$iwNB
zkhLIoW@D(&e2tgnCSzb$K;~o_bx~em8BvI-hW0ZDNW|H>ZwJhMP!7z9ij^ThMkk0t
zrK1C)Ou4td?xE7zG?D#5^eluI$m)~mNx@o1IgrE2JK0R6*-^Y7MS2--zIS+C>>hV`
zo_#||*u_DP<W}uDGm@gkMJNE~A~&6%O(!Mev*9Dct}CjDmrRwis+LI`-ve%Q3@zNe
z(31ITWv4x3j2M^5RXlez$hY?2#{K0*h`CnUKWF2TYzS>`yhUt?kJ~pSFtxUrQsYa=
z&*YQuxy(L@mRYfdzwadJ?P{g;+>cnl2D~Tu-u_g}jgw>1%4F8b2Ta$p)w!DhBPG><
zV>~k1vxkYUW6-o_;kGr=Cd5FGpLA0=>-qh?TVFHr9yMU>Q4zwM3mRqV`$OLwva5Ze
z{?fYi68Wp`I-Ky5T4Q$l@q1EqRxSsCpV{wKib09M@&F=;3jZP#|J&BXI2cfgR+lyL
z_p{P!*NppTFG0I3e`d4hW2%z_%BS<^{qa!Q)^~Be3&E_aG95imQ@~}QFYp$z5(z3Z
zg7V8GT;`>Y83(+9-NnLJpUmtwfMao14bjv|mfe{z#LfO0gdx!J1Dm6gV`y5WKKkgJ
ztKy&ne8PG=NzO!#p4upsY-WKyjhCC!M@GjG6Oze=nS5rSxxpZ<0&kxhag#lk7|}Z&
zoJSC~{tVc+Vn9uDyHYO6_eU!&MzRofpc8r#=f@=4alzCw3H)#}GzEe>`=5Pa&CGl~
zOvJRp%jC~=1=9jd>y$%86LVWb${jT(G-Jc%cxlj)63_yKdbpm{04|kcZYp_$*pXp!
z>I3w%Jxc8^K2zWF&~XwO%H{RYp<R+QxIX63AUD>FD3c$<rsrQ;5(xDK8Zs%K&|N$+
zs)eWgPi5@8@6ZcQ)AM6BMJ@dow&saL3IwIXP}xZP=Oei+;cb0)7(kxKg5@f{k#cN?
z-)^6|l-w~0)?4})mU%wnD$yTg>4QOcl*W&8Z$I(7uCA4s`du4fluh2wIJ#fh{82t(
zmZ$A$ytu=CnSfNTip+UNjxU&!i6u*3%y!=>o<mmE|G`89v9Q+fW5@6_sQOJ%eTbBY
z;g{CDd-_3ep)+&@1>!Gt=pruX%0qXBF{&6hX3BwewONWzyC-p&@j%v$avSb1!5p}1
zx?bkYI5MAlO&Z79-tgak=DgP<B2{<a3N~SJH`>g~nH9+yNJaITb+wlzN?RhEH(bJ>
z`wx8fvF^8n%dcG4*$`){1*Bh5w%a%Ck3EFp{R)H8*x7#C9_<iskdUc+Nlz?EHlxf}
z){+1Gh900!1Ptp;)}5SrMdGXK`eV`}Idi<(SyGFBOcHg=9o5eZWz)lhzR$wynF%4V
zspWMaOn*W0-tf&R(B81{xhA87Kpd5WD_Q#BzbVUj`Jv=60Ke-XC);y<<IMhgxOL6i
z#jjE@si8tQ!d^5%<V>q7!CE|gNr1$^KXZEB?cu0oN6gE|2Y~8u+uV2G0?=I#7%thX
z)oepJl6F7?gFi9>{6s2Vu3%I-IXc@rk+d7v6dYZ2#L)_5*ene~6{8_oGfUEhWt?*u
zPsQRM73eWU&Z4*|x>+bg#vqK;Uq#FcH)S*%B!HnW4V5|iA-m9d-l?m@2EM0IGL}6%
zo?}~xMt&xj>VNeQ9X%-?&W_P;^JIAg>X>R+9i0I+a(FJ@K^xhs*1@h^6=|0P8=i5m
zb(@W1UBfS=!=GrEFa(qVr6vL{vb7BrsgsW#^-T9G{t&Utk=ZE?hQXOhwAY&r-w3*#
z_ZApz9E#-x^VCcT&oBOuDhHKSn1v;RqUKHh125%}`-Sco!=Sl)-XaetgJyhV#FAkv
ziUxd>6%^Ui#UKi#xqmP=hdkcK`{)^?Rwwc)35sFAYn8sLA}18;4@A^r)qxQomn|Y%
zV@oAayhs{Kp=VE__i=2Ndx$e_e&QRC+qy|nes|XpUDy4rtzlbO{6%B+3-sI3+r9GG
z=bW}X0H=YK>0kQ{UPfH`qJW<sc>AcmvTCG#Qs2Ama(+{+n8|_D1N)_#CT$0lwr9W0
zXia0m)uti<-W6_p83ncVZmfP_vlzS$c9l<dgH^T=(<11iJbE0NDnisBun6rQ?)-$L
zmunGIv!@UO73&TUCo%S^%lE`UOnY`En#S1A$radz=@p)sq0A1ajLSdX6X@vt0)$`#
zWiwmXSNM@=#*;0~-LzZY=aCt+iKCNPA&Bv|iEH-q7~D`+W>I4foJgMFGt|Qka%<Iy
z2qKFb|B;C}a@ogCyZ&&;lXn4@+@k&U7QWb|i5n6vtf;(CSf3_xZn`Qdc^pc=izVY(
zB#*~8vR?zRQ|$<JJ6yfN0r^6zbG&2@32D7LgQIG}DcSS8z~2pXld>~kq|%tI-^=Sz
zwHI|(Rja=Hz$2=3&Cv?wm_;)VD5)8peRYWd=>$e8>*&dn+Rgg9W}WKhE(=l7Ii3%=
z9B^KZ)g|k;*$}nrU!8-h9-gHyY%6v2#z&ukPY&}zdUbvh+tI@Zk3qI}Lni@WU#_U1
zuev>>#h%69emcC|8A{_jXVo`o{?(}AQSEqG@%8y8;Q3?`vd>y`eFSN}Iz4y#CgtLx
zy%~<?EU**NedWOqi5q5DmZ~B;>vn<BMgcu+eE~gQGacjyy5Z#&%&&)fO*a^HXcdt6
zG$+i@Qq-{LR1^dL*=YB*^3Xj00<9pNFaRC!az}GeA%@pZyGd(dI-HG`#@gO3N^FZ;
z6>%o}0zL?q1x{~hYTSV1WLF+Om!@mc;$gHzz~hpNa|#o5eGg9Yd@Uf~D@t*QJw`QS
zQJ@EUi~vr1B!*r;TPof3x24_-@j*m!61>}}iF)*=*0byu78m36nC~p`W26RNItaoF
z0d<CnXaN)j?`=q2P#<w8u7BdARJ+*JpqdBgYna~%(sugcQ}PrPV4s0HBFdRWAGfbv
z+eo8TWJyReHzQ?!13zo2-*KxecGQkAOCbfQLmz_!G3A}{Q#+qo!9wjy=yG!jW)*2$
zSu($V4w}KyWBfTK9a!PqU4ic<Dy%%@r9-_6>Ad8Clz3q3<8ut^x*JNg-oYjFiH%yF
z<EB4V&me`m22+o7<DQ;NA{_~C8V0gc8$AY!QJn8)`rbe@5$h-FhM=KQ!A9G6Bi|zR
zDk#8+xm5Lc`<HyfA(2#Q8m1;YWV!qc;?VaEE<>9i_}kFlq_;<~=;&!a+Fo~h7!@%(
z834m*d6)>a)urNeY%EE$e%=bYswng^M@qA}pW%L;yel4AKMt$=8HLJ~R?9}KON&J1
z*bn;+#AU#sx-z>&h|iYT;cD`*{_<0%zL8HV8MsMP9-R9dwb<EQh2<-P;ng3}1)+(!
zt-*K4s*#ejxA^*ivHass*xpvPA4-3!d*tousC(Bzq<x@MyKBnhb>87?mc_#aG9SjU
zt?(TIo8Mu*@OALiOe#FBP4wCN&W_d4iFsd+{}V2{hn!Uw7>1q}pVEx+e$a+8Oosy;
z05G?a=Wi^W^6jCj{1=MF$Y7PeQwo>aT<6e9r>I3y2-Y5RL3YQ&WGgem%99qD+%$XN
zJE)LB^kp~-!W}t;`Lrz~GsOC+i%qeNQkb!NTaugAZ=KZ_EBbGl)AG(VL*G^7DQH2w
zOJkb`%8t=ZM7V@|xY1o%`D3%M>V)S$h(Iuya9B>x*0#b)(8z-0!MxT(&^Q$hWkw(;
zbBw$eHnjDcz>1|)K@}(6s5*D?SanaJi9>j4)xzV6z>Fj%#~ayj&(`7Gsm6ce^-dHd
zm2An3_Gp`JSVHe23D_hhW5<)kW|{I-x!w%AY|Mz1!~RxJ*u-lh!f~%X@V&)Fp5<WP
z@#>6pCzMU2B1i5hGCu)~fzB_K&Yntoy*b20ataE#PD!n-+=4}&e8$;l^(PbSn!GFD
z0IqxApnT#ZNW?CMuA^XYqI7}QC2p$j8k`iWo@KyETX1!jVPFd4ny8?H9c3rE9<4w2
zc>*Cdp#=!mp?%x-yoSo0&y5mC32=2n)}PNsmcYhwKhiz20>bcdg+)vLA!%bvn}f<h
zsH1RdlFW*8<#lbJ7N(wqqU71~jMQO5WFqX-GGE7%4E2Te_!cd4+%nfJ{3xH}$J_t(
zb0K<o>f7_4;rLk<*haC2>g(GGCNC~i@V?0+dl?3ysQz?^OT_bbLTUX}?DyV>lBegt
zJiX_B=kWNVu5nX7hXP|pllWq{l<#1pdBcZTfJ8lljLvU_?HO#m-6tn1?cfVzhI)JO
zb9M1-b0|9Nc-6q`lriY%7-t0p(Th=%c18eT*AD$geFtg6Yh<CuJ3UUGoq9+7RF~Q`
z|9Vr;7~O#nNQ5eOGN{cG^cAp`Fk^5Xk-f4CBeUtL{2h^<MFV5FnyV#@1-`1tm<fKX
zzltWf%pK$%>=Gt(H;NIayoq4CRG*vQcDlD0Ux(jFt3!4e`yWHv(zlcX>(L$U%It6w
zjDq-g?Ns_iT-1KYN(_YK!47$z*`C)lb%k3eHN|doD-+-)EY6A-^2#*7ytbbC-alYR
z_+!)QFpdKR3pkQ5&4;!?Vc-`eN%@dtsZ3z|s<-fx$e!mp>`x^oK?_-jrQydObN`@g
zD14Gh?;yCC{!o6N$swM6=hRVDOBN6zkcR?CQmQ6EiQb1fBu}W!%h7|_!#%;&4dp7{
zAPHv~W@ihDl*{aP6Ws%H#=a~Z(RPFLWn}5M$a;~Y!RufQ)iQVw(lxlg=ocoE#y(<t
z<d{d}Mab?M$zz2E(o-^vRSwV~)TNveqzEpV0n*hWY2Ue=Y4ONQ*pV=fKfH5FDwS5O
zXgG$}4T$DM#Yv{FR0Jx*oaSp8z0OcF#?%Uqjk#qs`LQ@JA9PJ>@)@psP8KM4tr5}~
zZi2}a7@pHayP6{pAOJc!SW73+wI#S`qW|03!y1Yrsy7cOY@`N1n7_Mh<Q(A)cJQ3c
z<~1E-M|ab6r^UrM?<u^>IjGs4KAh~KlTWo8lna?i2y(%{pYVUA^xMyP&u?UHH9EYC
z@>LV_wkdXS1q9X%fK!Y%uZ>@zV?;gTK-Qs$TS(2uUMO5?CdFuU?PgWokGu#77}IIR
zQH8R<y$ao|AR`t^9`+dawT^@X_-AV;LLNvH7<)7-|Iq8h)c@IKnAg}}&bO<6!)7rA
zy(cpW{Po7Ep7V>T8uC>U2?w)N%RBhv=Bcy{n2Sr)a|@{w4X+w`ekd}Tdy#p6`lX>l
zYbx@od1HT8>{{S;XSG`64`cd|@cIyd0gizm=+}ySLX+gfTopfll;{c}rqeL5e021X
zQi(>$%d6>QD`NatkOvW-B(7CS@%7He9j_jWt5A@sk?v7NNfpZie?@KNFxL?_j0C0g
zui9*H5A@$d8fCSE-pbj5teka5z2@q*&Hj<UStf}xn=gw=V39U^e*`(<bY|M-&lE=o
zNb6x`QUgRrC7P!y<3~P+W+z3GlW-p6F}wW;W0p&zrCKeSkP8dsz(=<vIXS8I_na^{
zi+CMW903u!H!}&o;ATHBzeVnMKSKoC+=4y2A#!Ui&bT5OwQC89e`iN=pfEK;oFeUd
zh=V!fnK7)kR6QFi?*~?pS3SNFU_^1Cw!;<t1|Ol4k?IH*<xiM8rrZShlrG0ou#jYn
z!<)l8=Vl;<f6g_y+R@EzXC4i|U)97q>7w(l2t<qqZ~|iyjj_+B0KwwIZIVV>H=k+n
zwQzF0+lGhI``Xj%=B$X>xg_$01BGjjX&hQn1>7<hlfP0fcarjNXyeFGWCreeLa)ql
z4>Nb~GEA;pyu`7Hx*Q_;T`Z94$Kc*1f?pf9CoIK=+5C3PGuwTT(iHC;jKE`%GZQs)
zvb6?Nhy5_`b0axZ2EpY+P0f4vQ@w_Q+0gx5yIu9LMj4b|#=SczJbZTmg;e~%-VprX
z&@!yGQhNxgG#=|Wv?QFS>Vb@`YKG40$Hyi@>5Eup)lw^$dG%Z*D?$&tqR92%5)6I0
za<xrL90!Bu`k2Ar#5YC446be7uHQm3ZsYQ?S7kbT;E3tOD2w~YwAk7DVh5!9u*itu
z9u^pxF8j=)HeX*1le3R%56a^({>bhpgF>|wrEDTlDKqsbR;R!&6c?f<o&5qTUsh9V
zR{52{;2^D`=&6Dkq~be%Fa@k&7Aq(Ah&pE_<55{(8la@BcdNsO+~rBL*|0RIM_O=f
z9;ztW9Q)S=Is*lLum*B9H>-vxLoJbHa$c~x79kmT@>gvxlB&3O>hU{g(V@*Rf-i|~
ze=a8tqvb+L)^xu*zCxGdPjV0u|B#tXi`sv9*I%KMSKqZ>u8bQzZIN@eIYqJZ1ghs+
z^n412#1DB!BF(vp?*b*i;V8INv1gv&wCHNze;6A2k-t91#X;DGQhc`~E{1>5RWd;v
zths5BIj1ipC#0ptla-gNbLAQW_fYpy|0uo^eOi*T9>|_e;exh)-;OUqo#Nb4i^%pc
zma=&N3>%_v7(M~S<tILl)ub0lj{>YXH|aau?GSt#_1$mPLHta_j~uX01g~D}L4lZ*
zD>R)+Fia@cZzyDbJQIG&U9fu0VdM=QH-6!c>9}B4+;1u!(g6A0T9B=+NYMY%N>^3+
zZrz`C+)O}Dl3=3@sEwB&scGSN5nRSc{GB>%^QXQ#@WL)PC3F05E6XM~!C@Zm**lt-
zf1dv@oDb*h*F~Ya%SEOINMhED(NbmJeUzo!?4J=oMK6Z_O~W`l0vJgWhfxR`w6|0R
zTmelj<Q?3iIu<1u=fkU;R%EP<9(!%V+G4PYf+Y5dPK98>NRzU@d!!s5+-0-V3kzWj
zH6(q)QTN;}-^o7gR#;k|L#4196*9Z7enIwM-k`^O`Yq0cxR=k3tj@96OetI3#v8{F
zBwhn;c4SqP1PvURkKRP>Rmg1IDT3a;P*+<-oBYvHTPqse<j);sb$SbseAe?xM2Af!
z`8IAC?uTH3Dv8{I>dD}wuxXK*`Ea*&|AEsg^74diUNhLFRYTprf-;FJ#fhAseDUCS
z9v2xGPxkVF^XZvy+x`^il3@SqWmw&ajv=}8YcSj8eTiT8XdvlX$Ae(A{{>y<ns<ud
z_rtw-rN%}9E8Ex!+doR4K>C&z;hjDAU_kbmF1&648k*Qrv%#9?4uPP&AdE^k{z!1i
z1m(H;0maA#4ktAetS;t(0nxPA50pnkbH2bsRF9?2609k}y5V4oh$CItd<oL<6D$Xn
zHp~L~Y$9vHUA8y{lVaKL{wR_pLU82Zapuc1k^()6V=kvr8Qk$?L6Y>RKiOIaJnSR5
zq--NHn+JYUO#%Zs5B%j~*U&llb9D3z+G)H22tunJ&-&d`A1P)@6X#mSwi|X$ykiQy
z+wX_e4}9W|guCr`_7@e+bx7(jX%~%Zz8e?JY%}4#^im+&9pGUg8Dv?UL-hY8x1{6#
zLv9&(9(}hr^g^MemHz~Q4unNe{=j!mZbuh&J+tk@rcuf(F0WX)NKcUv5J`_EwOUT3
z8FF%Wh_QHZ^JuP%F1@wi5wjj8d9{*bJopRxD{1d1VM?vy5mYgi`CHEL6|I`azcw?0
zW6#{8(|00G36z8YBUNP<1O$Xkk!C{jRjHK*7RT>m896)6QseGeNBte)C8s)3-wy6O
zf?80BD+awh1i#-xSwe#pGP%t4a@|6vvB;sjI6aA1ek3o16GDTAS^jF(V})%aZG3D%
z4$v^68IfcwF7Lvna%x)$2VjVL`Y;toB?((W&#u&h{?H@91l7}1G9*gx5k8a_YfP0!
zmJlZscDh77LGB2$#rlt~1QT8e`8K-#{B`^lRmmlMso@{E?|<jO0b1XS5_>QM-M^Bn
ztGMe-62k}Q&q139++tbs5Ep;rVG_7e_95$#d+f0<SRz+^^rc5pX`ZdAQ_Clo@@S>e
zz)Ba--gOluI%n2>PclX33I&~@=C4WTw1=fI1$~sdo5Tv|p`qP-&DkdJ`0Tz%*q0?%
zFe!+d(-zt47r1rBxu9ojrb}q=EWEyZKfN9BBxH6LEt02t02nFU82};y{j?~GwhMpp
zI?#%gTk<_Tv-$kO7kxsS6uzZ70D=*QJDjgYSN(6K5~nNJZkJCWsifSRUu0)c=58aO
z9M%iTlt+@Hdq01z65S^*JcY}34`N)oDFCdzZr+7u%jJy9l)0h4g(o{<YLvY*ru8bK
z4}RUWwC`lQ3eKnhknSkPuZ-`kBOId7hkfV;1X}=iuiC1WQ`Yh9L3yO4{Z}N}pIW84
zYOtJ0jM6q$7=WMwMwwdSTp@$J5tKxAwJ;=c<c^5hem{StJjSv^)D4}AW~cLpTh7Un
zLbYc`t0!#<7P17y65jP`y`NG{Ym%tVyLVabw%N<l@&UFQ-e8q%6{FRdRYMI#4%D&n
zr4;1*;JCU*0R)K)vY?)))FL9EhQlBG>`~PG;$Gn$5p#kdKH{1o*-J!E@>tl8m}WxB
zrxJfFYvCAnJiEn%K9Fz@RXt)qQm<Sq)^<U?XhX7jKr=9KNJE*WTtvBfk**&76o!$@
z)j3L#qjanwJzO1MsXTlpN`LLoS#4eGsELcbMwTOZ1xb6ORaVVaSKnwiZt9gJrTZBd
zz4`p|Y`og>ElNA>6jYePcjKa*=L~Pyn_!cE?w}1z%RW>ybIw`C;_q6$_16W?tGalR
z>CjaHpnB&V{y6{#idMuzj7}#)D;6yokU<`d@x~ZUi5?|QX&x<(sT!24y5IEyEvVmP
zq)AlLw?)zG(oD&ItsrAJ3>4dOqPE@|I6?EbzhS}0zd0{1o<u@h;v=*&qGqqlHVKpB
zxX<?BzSmj3*kHnxe`4<=ask-P{oK}Nm)Ji%gHW(DzNiR2g-tZNK6!bRGL-tDlDRe?
z8nV$kIC(21Pm=bRgFtZrf3r^u^cua&a^j9m^!lzobhj2f7ZNcP!9{cu6v?8~6@%0p
z`*d%_|C>P$|8E9)dCE6iXtDYlq5Fm05i!$YR;ROvzeaQgaZoB@*6XqIFIE;44HPg^
zLe5AAimoT4<$4d;V<9m37ul;iW?Ha#9~zciBQUT>A>ub9uNJR>@Jg=`IZJKJAmZ#4
zUCCG0zhqc86Ohx_KJ_d>#EUZdcI1txrjskTsjr=d(}LfRALM$9)m9>j+4N>2luPce
z%#<y6cdM(`=P>}z_wLC2`;J4H2JIe<;)=m@@f1Ed5es0TLrI2{_H-lkY;*F~!y>pJ
zaw^Y{3=Y<C;SjO$S2;DFkf}zzRGbvBMf79?->e9J1bvOPoK8Z05@{>`g)7opNY6<i
zWNuNKS$JSqH+LC%#31M4Q19_3m{r6&&QxAeQLRyyxo4hZgCN9z%<v?|>U(l63`t`H
z{$B3%3ofsIj(u|IMpCG$?HDjQ4|1{+XCTQo@Qx;G5eddHJewXzwb0Ik7s+&Mp@=P#
zb)&5-$IKVEL~irl8LEfnjdezn#*-@8yR)JEfUx|*Yio7_y?SO{jAAzY(pC0uj_aiR
zlGA3Y>t_xLRbx^197*IR#JIA>2}_kSH|wjfMKE2?3qZx^gfm6VIc-?fISq8hN#~8>
z3ArurxCz~$VM0kx*3vN|yIKpK97MsEmmM|(`GSdF>|<^%4yqujBucGUW4L8AiNe@H
zxvRboi&}jRvyK~|72xb->m9t!bWRdNALi58*<#X-gD^2pb>6IgPYicG<7PkJSc1-*
z3;*FYe@O;C2)VvPA{vm!>xt0D7Do32T9u)1Mi~dcN@h)-9w|+nLi{e_i$qFNktox+
z$e6Gm#=31dH_w=^xg$V$QUHj}PZ>8}ZtJ(>mCVOG;ykbQm3dnau~#FC>9yMbf%t^n
zt9TZ>Y#DV`r^Ah7aB(<2Y15ZjhkK*lhAvf7dY!jAmQp%y8D5yBOAfgZIKNAl^SFaz
zC9=_&bSmAH#rp{$kqwf*I2G{M@gX~Zd+sith25H7q0i#dv2kQ&^sVHUX_iv6*8KQ>
z*7WYa^nsZ45F9pLe;fsd0wD0K0>_8pC&q|IhR-U5dt%gMn&<nb__iP3B?O@tWM9#s
zBa+b+1#5u$?9_?Pi2JItRE(ujKVLb~l1;r3Tg}(zY|q6u3(LOSN*UpxhA84Njo&P$
zjI_Z-CK0UD-P6-NDTdEyo86o#cU_bF$LM=gSDl^D{Nd?W%3Jw#8SFD8_B5=&(?m5v
z)D}o~<ry-;t(z8iOJkuOciUFlA=TuC3F#99zJ>-c>n%z#O#{U@KS&|Me~)qMGIhUI
zsd;iRJ2I<FVwv;IxEvST0d*o(ckq6NOtXtFtdYQpUV8XvTW0%5E0Vv_EMHlpzH;(}
zJ|V2Nc_mzept9^!<}2lvgk7@jXG*T1xdg>|vF$h7#!t>jKY!$k<lkAg6#bmWV87wt
zqv-#POuD6e<f_u{zlwrI88jH@{#Yhp$0pk_`y71mJVSFNqu!W$UK-9@D`a{77c6|e
z=odm~{5<#c!khSBN+;$-6OjH-N!>r-`hCEFy?W__`S0wXhvNS-ng720XWRY!pBV1H
z7NUUv+x}m-_@BQ?Z^KCZpXTEKpUt2Bn_BVcwcrm!K79B<>nfw;YW~&LLeR|F;(hqQ
z$<E2i!p_ga$)m}^FUZ3o$i>gZ&MwH#{`FF7@c%Hdb1?t@&FlYd0H}EKdN&~aX9W$1
rZ>}C+oh?3C+B?`;u(-T0W?}DQ<=}b_(=Gb`(T7hmpQWoLO@RLk0uW1v

literal 0
HcmV?d00001


From d598b6c79d68d532dee1616ec2419f93ff8afd6b Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:11:06 -0400
Subject: [PATCH 28/58] Update README.md

---
 README.md | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/README.md b/README.md
index 15d4b48983..189541f95b 100644
--- a/README.md
+++ b/README.md
@@ -31,13 +31,7 @@ runs from the command-line (CMD or Terminal window), and does not have a GUI.
 (ldm) ~/stable-diffusion$ python3 ./scripts/dream.py
 * Initializing, be patient...
 Loading model from models/ldm/text2img-large/model.ckpt
-LatentDiffusion: Running in eps-prediction mode
-DiffusionWrapper has 872.30 M params.
-making attention of type 'vanilla' with 512 in_channels
-Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
-making attention of type 'vanilla' with 512 in_channels
-Loading Bert tokenizer from "models/bert"
-setting sampler to plms
+(...more initialization messages...)
 
 * Initialization done! Awaiting your command...
 dream> ashley judd riding a camel -n2 -s150

From 2114c386ad7540f17ba7793eca98f9077af4fe15 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:27:43 -0400
Subject: [PATCH 29/58] moved index.js .html and .css files into
 static/dream_web/; changed batch to iterations again

---
 scripts/dream_web.py                           | 2 +-
 {scripts/static => static/dream_web}/index.css | 0
 static/{ => dream_web}/index.html              | 6 +++---
 {scripts/static => static/dream_web}/index.js  | 0
 4 files changed, 4 insertions(+), 4 deletions(-)
 rename {scripts/static => static/dream_web}/index.css (100%)
 rename static/{ => dream_web}/index.html (88%)
 rename {scripts/static => static/dream_web}/index.js (100%)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index ec909ec5bd..d48d5ac9ec 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -23,7 +23,7 @@ class DreamServer(BaseHTTPRequestHandler):
             self.send_response(200)
             self.send_header("Content-type", "text/html")
             self.end_headers()
-            with open("./static/index.html", "rb") as content:
+            with open("./static/dream_web/index.html", "rb") as content:
                 self.wfile.write(content.read())
         elif os.path.exists("." + self.path):
             mime_type = mimetypes.guess_type(self.path)[0]
diff --git a/scripts/static/index.css b/static/dream_web/index.css
similarity index 100%
rename from scripts/static/index.css
rename to static/dream_web/index.css
diff --git a/static/index.html b/static/dream_web/index.html
similarity index 88%
rename from static/index.html
rename to static/dream_web/index.html
index 4e7af0f771..5dca10997c 100644
--- a/static/index.html
+++ b/static/dream_web/index.html
@@ -4,8 +4,8 @@
     <link rel="icon" href="data:,">
     <meta name="viewport" content="width=device-width, initial-scale=1.0">
 
-    <link rel="stylesheet" href="scripts/static/index.css">
-    <script src="scripts/static/index.js"></script>
+    <link rel="stylesheet" href="static/dream_web/index.css">
+    <script src="static/dream_web/index.js"></script>
   </head>
   <body>
     <div id="search">
@@ -18,7 +18,7 @@
         </fieldset>
         <fieldset id="fieldset-config">
           <label for="iterations">Images to generate:</label>
-          <input value="1" type="number" id="batch" name="batch">
+          <input value="1" type="number" id="iterations" name="iterations">
           <label for="steps">Steps:</label>
           <input value="50" type="number" id="steps" name="steps">
           <label for="cfgscale">Cfg Scale:</label>
diff --git a/scripts/static/index.js b/static/dream_web/index.js
similarity index 100%
rename from scripts/static/index.js
rename to static/dream_web/index.js

From 652c67c90e97729d76404c04f2789c57af567a2e Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:29:41 -0400
Subject: [PATCH 30/58] Update README.md

---
 README.md | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/README.md b/README.md
index 189541f95b..5c049c4d6b 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,7 @@
 # Stable Diffusion Dream Script
 
 This is a fork of CompVis/stable-diffusion, the wonderful open source
-text-to-image generator.
-
-The original has been modified in several ways:
+text-to-image generator. The original has been improved in several ways:
 
 ## Interactive command-line interface similar to the Discord bot
 

From 91d3e4605e01430b0472e699a0554d13c345cb83 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:32:48 -0400
Subject: [PATCH 31/58] Update README.md

---
 README.md | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 5c049c4d6b..315c4127d9 100644
--- a/README.md
+++ b/README.md
@@ -179,8 +179,10 @@ repository and associated paper for details and limitations.
 
 ## Changes
 
-* v1.09 (24 August 2022)
-   * A barebone web server for interactive online generation of txt2img and img2img.
+*v1.10 (25 August 2022)
+   * A barebones but fully functional interactive web server for online generation of txt2img and img2img.
+   
+*v1.09 (24 August 2022)
    * A new -v option allows you to generate multiple variants of an initial image
      in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
    * Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))

From f4c95bfec018bb7b822d0c3726bd61a0576c7342 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 15:33:49 -0400
Subject: [PATCH 32/58] Update README.md

---
 README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 315c4127d9..271518a67e 100644
--- a/README.md
+++ b/README.md
@@ -179,10 +179,10 @@ repository and associated paper for details and limitations.
 
 ## Changes
 
-*v1.10 (25 August 2022)
+* v1.10 (25 August 2022)
    * A barebones but fully functional interactive web server for online generation of txt2img and img2img.
    
-*v1.09 (24 August 2022)
+* v1.09 (24 August 2022)
    * A new -v option allows you to generate multiple variants of an initial image
      in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
    * Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))

From 888ca39ce23071a54fc32eb8689570757e5a01f4 Mon Sep 17 00:00:00 2001
From: veprogames <veprogames.gamedev@gmail.com>
Date: Thu, 25 Aug 2022 22:29:12 +0200
Subject: [PATCH 33/58] remove k-diffusion from repository (git rm --cached)
 should fix conda environment hanging

---
 src/k-diffusion | 1 -
 1 file changed, 1 deletion(-)
 delete mode 160000 src/k-diffusion

diff --git a/src/k-diffusion b/src/k-diffusion
deleted file mode 160000
index ef1bf07627..0000000000
--- a/src/k-diffusion
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit ef1bf07627c9a10ba9137e68a0206b844544a7d9

From dde2994d10f751c57170d750a9557236e701904f Mon Sep 17 00:00:00 2001
From: veprogames <veprogames.gamedev@gmail.com>
Date: Thu, 25 Aug 2022 22:31:24 +0200
Subject: [PATCH 34/58] add inputs/ to .gitignore (a place for initial images)

---
 .gitignore | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/.gitignore b/.gitignore
index 992cb2be90..ffdc22fed4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,9 @@
 outputs/
 models/ldm/stable-diffusion-v1/model.ckpt
 
+# ignore a directory which serves as a place for initial images
+inputs/
+
 # Byte-compiled / optimized / DLL files
 __pycache__/
 *.py[cod]

From b3e3b0e8613e7f11e0ae3e5f722233290b0732bb Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 17:26:48 -0400
Subject: [PATCH 35/58] feature complete; looks like ready for merge

---
 TODO.txt          |  1 +
 ldm/dream_util.py | 21 +++++++++++--
 ldm/simplet2i.py  |  1 -
 scripts/dream.py  | 78 +++++++++++++++++++++++++----------------------
 src/k-diffusion   |  2 +-
 5 files changed, 61 insertions(+), 42 deletions(-)

diff --git a/TODO.txt b/TODO.txt
index df9aea75ba..32475b43ba 100644
--- a/TODO.txt
+++ b/TODO.txt
@@ -2,6 +2,7 @@ Feature requests:
 
 1. "gobig" mode - split image into strips, scale up, add detail using
    img2img and reassemble with feathering. Issue #66.
+   See https://github.com/jquesnelle/txt2imghd
 
 2. Port basujindal low VRAM optimizations. Issue #62
 
diff --git a/ldm/dream_util.py b/ldm/dream_util.py
index a1d0d3204b..b69a0c1367 100644
--- a/ldm/dream_util.py
+++ b/ldm/dream_util.py
@@ -2,6 +2,7 @@
 import os
 import atexit
 import re
+from math import sqrt,floor,ceil
 from PIL import Image,PngImagePlugin
 
 # -------------------image generation utils-----
@@ -24,7 +25,7 @@ class PngWriter:
             print(e)
         self.files_written.append([self.filepath,seed])
 
-    def unique_filename(self,seed,previouspath):
+    def unique_filename(self,seed,previouspath=None):
         revision = 1
 
         if previouspath is None:
@@ -61,6 +62,22 @@ class PngWriter:
         info = PngImagePlugin.PngInfo()
         info.add_text("Dream",prompt)
         image.save(path,"PNG",pnginfo=info)
+
+    def make_grid(self,image_list,rows=None,cols=None):
+        image_cnt = len(image_list)
+        if None in (rows,cols):
+            rows = floor(sqrt(image_cnt))  # try to make it square
+            cols = ceil(image_cnt/rows)
+        width  = image_list[0].width
+        height = image_list[0].height
+
+        grid_img = Image.new('RGB',(width*cols,height*rows))
+        for r in range(0,rows):
+            for c in range (0,cols):
+                i = r*rows + c
+                grid_img.paste(image_list[i],(c*width,r*height))
+
+        return grid_img
     
 class PromptFormatter():
     def __init__(self,t2i,opt):
@@ -80,8 +97,6 @@ class PromptFormatter():
         switches.append(f'-H{opt.height       or t2i.height}')
         switches.append(f'-C{opt.cfg_scale    or t2i.cfg_scale}')
         switches.append(f'-m{t2i.sampler_name}')
-        if opt.variants:
-            switches.append(f'-v{opt.variants}')
         if opt.init_img:
             switches.append(f'-I{opt.init_img}')
         if opt.strength and opt.init_img is not None:
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index a3b4ecfcc7..fe0d3819a1 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -20,7 +20,6 @@ from torch import autocast
 from contextlib import contextmanager, nullcontext
 import transformers
 import time
-import math
 import re
 import traceback
 
diff --git a/scripts/dream.py b/scripts/dream.py
index 10acccbfc3..24dac5b927 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -153,54 +153,60 @@ def main_loop(t2i,outdir,parser,log,infile):
             continue
 
         normalized_prompt      = PromptFormatter(t2i,opt).normalize_prompt()
-        variants               = None
+        individual_images      = not opt.grid
 
         try:
             file_writer        = PngWriter(outdir,normalized_prompt,opt.batch_size)
-            callback           = file_writer.write_image
+            callback           = file_writer.write_image if individual_images else None
 
-            t2i.prompt2image(image_callback=callback,
-                             **vars(opt))
-            results      = file_writer.files_written
+            image_list   = t2i.prompt2image(image_callback=callback,**vars(opt))
+            results      = file_writer.files_written     if individual_images else image_list
 
-            if None not in (opt.variants,opt.init_img):
-                variants = generate_variants(t2i,outdir,opt,results)
+            if opt.grid and len(results) > 0:
+                grid_img = file_writer.make_grid([r[0] for r in results])
+                filename = file_writer.unique_filename(results[0][1])
+                seeds    = [a[1] for a in results]
+                results  = [[filename,seeds]]
+                metadata_prompt   = f'{normalized_prompt} -S{results[0][1]}'
+                file_writer.save_image_and_prompt_to_png(grid_img,metadata_prompt,filename)
 
         except AssertionError as e:
             print(e)
             continue
 
+        except OSError as e:
+            print(e)
+            continue
+
         print("Outputs:")
         write_log_message(t2i,normalized_prompt,results,log)
-        if variants is not None:
-            print('Variants:')
-            for vr in variants:
-                write_log_message(t2i,vr[0],vr[1],log)
 
     print("goodbye!")
 
-def generate_variants(t2i,outdir,opt,previous_gens):
-    variants = []
-    print(f"Generating {opt.variants} variant(s)...")
-    newopt = copy.deepcopy(opt)
-    newopt.iterations = 1
-    newopt.variants   = None
-    for r in previous_gens:
-        newopt.init_img = r[0]
-        prompt            = PromptFormatter(t2i,newopt).normalize_prompt()
-        print(f"] generating variant for {newopt.init_img}")
-        for j in range(0,opt.variants):
-            try:
-                file_writer        = PngWriter(outdir,prompt,newopt.batch_size)
-                callback           = file_writer.write_image
-                t2i.prompt2image(image_callback=callback,**vars(newopt))
-                results           = file_writer.files_written
-                variants.append([prompt,results])
-            except AssertionError as e:
-                print(e)
-                continue
-    print(f'{opt.variants} variants generated')
-    return variants
+# variant generation is going to be superseded by a generalized
+# "prompt-morph" functionality
+# def generate_variants(t2i,outdir,opt,previous_gens):
+#     variants = []
+#     print(f"Generating {opt.variants} variant(s)...")
+#     newopt = copy.deepcopy(opt)
+#     newopt.iterations = 1
+#     newopt.variants   = None
+#     for r in previous_gens:
+#         newopt.init_img = r[0]
+#         prompt            = PromptFormatter(t2i,newopt).normalize_prompt()
+#         print(f"] generating variant for {newopt.init_img}")
+#         for j in range(0,opt.variants):
+#             try:
+#                 file_writer        = PngWriter(outdir,prompt,newopt.batch_size)
+#                 callback           = file_writer.write_image
+#                 t2i.prompt2image(image_callback=callback,**vars(newopt))
+#                 results           = file_writer.files_written
+#                 variants.append([prompt,results])
+#             except AssertionError as e:
+#                 print(e)
+#                 continue
+#     print(f'{opt.variants} variants generated')
+#     return variants
                 
 
 def write_log_message(t2i,prompt,results,logfile):
@@ -209,9 +215,6 @@ def write_log_message(t2i,prompt,results,logfile):
     img_num    = 1
     seenit     = {}
 
-    seeds = [a[1] for a in results]
-    seeds = f"(seeds for individual images: {seeds})"
-
     for r in results:
         seed = r[1]
         log_message = (f'{r[0]}: {prompt} -S{seed}')
@@ -275,7 +278,8 @@ def create_cmd_parser():
     parser.add_argument('-i','--individual',action='store_true',help="generate individual files (default)")
     parser.add_argument('-I','--init_img',type=str,help="path to input image for img2img mode (supersedes width and height)")
     parser.add_argument('-f','--strength',default=0.75,type=float,help="strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely")
-    parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
+# variants is going to be superseded by a generalized "prompt-morph" function
+#    parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
     parser.add_argument('-x','--skip_normalize',action='store_true',help="skip subprompt weight normalization")
     return parser
 
diff --git a/src/k-diffusion b/src/k-diffusion
index db57990687..ef1bf07627 160000
--- a/src/k-diffusion
+++ b/src/k-diffusion
@@ -1 +1 @@
-Subproject commit db5799068749bf3a6d5845120ed32df16b7d883b
+Subproject commit ef1bf07627c9a10ba9137e68a0206b844544a7d9

From a10baf58082defe05311c2d90614c387651301bd Mon Sep 17 00:00:00 2001
From: Kevin Gibbons <bakkot@gmail.com>
Date: Thu, 25 Aug 2022 15:13:07 -0700
Subject: [PATCH 36/58] factor out exception handler

---
 ldm/simplet2i.py | 248 +++++++++++++++++++++++------------------------
 1 file changed, 124 insertions(+), 124 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 0ec3d60d98..157f55fbcb 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -52,7 +52,7 @@ t2i = T2I(model       = <path>        // models/ldm/stable-diffusion-v1/model.ck
 # do the slow model initialization
 t2i.load_model()
 
-# Do the fast inference & image generation. Any options passed here 
+# Do the fast inference & image generation. Any options passed here
 # override the default values assigned during class initialization
 # Will call load_model() if the model was not previously loaded and so
 # may be slow at first.
@@ -70,7 +70,7 @@ results = t2i.prompt2png(prompt   = "an astronaut riding a horse",
                          outdir   = "./outputs/,
                          iterations = 3,
                          init_img = "./sketches/horse+rider.png")
-                 
+
 for row in results:
     print(f'filename={row[0]}')
     print(f'seed    ={row[1]}')
@@ -181,7 +181,7 @@ The vast majority of these arguments default to reasonable values.
         outdir = kwargs.get('outdir','outputs/img-samples')
         assert 'init_img' in kwargs,'call to img2img() must include the init_img argument'
         return self.prompt2png(prompt,outdir,**kwargs)
-        
+
     def prompt2image(self,
                      # these are common
                      prompt,
@@ -219,7 +219,7 @@ The vast majority of these arguments default to reasonable values.
            callback                        // a function or method that will be called each time an image is generated
 
         To use the callback, define a function of method that receives two arguments, an Image object
-        and the seed. You can then do whatever you like with the image, including converting it to 
+        and the seed. You can then do whatever you like with the image, including converting it to
         different formats and manipulating it. For example:
 
             def process_image(image,seed):
@@ -252,28 +252,41 @@ The vast majority of these arguments default to reasonable values.
         data = [batch_size * [prompt]]
         scope = autocast if self.precision=="autocast" else nullcontext
 
-        tic    = time.time()
-        if init_img:
-            assert os.path.exists(init_img),f'{init_img}: File not found'
-            results = self._img2img(prompt,
-                                    data=data,precision_scope=scope,
-                                    batch_size=batch_size,iterations=iterations,
-                                    steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
-                                    skip_normalize=skip_normalize,
-                                    init_img=init_img,strength=strength,variants=variants,
-                                    callback=image_callback)
-        else:
-            results = self._txt2img(prompt,
-                                    data=data,precision_scope=scope,
-                                    batch_size=batch_size,iterations=iterations,
-                                    steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
-                                    skip_normalize=skip_normalize,
-                                    width=width,height=height,
-                                    callback=image_callback)
+        tic = time.time()
+        results = list()
+        def prompt_callback(image, seed):
+            results.append([image, seed])
+            if image_callback is not None:
+                image_callback(image, seed)
+
+        try:
+            if init_img:
+                assert os.path.exists(init_img),f'{init_img}: File not found'
+                self._img2img(prompt,
+                                        data=data,precision_scope=scope,
+                                        batch_size=batch_size,iterations=iterations,
+                                        steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                                        skip_normalize=skip_normalize,
+                                        init_img=init_img,strength=strength,variants=variants,
+                                        callback=prompt_callback)
+            else:
+                self._txt2img(prompt,
+                                        data=data,precision_scope=scope,
+                                        batch_size=batch_size,iterations=iterations,
+                                        steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                                        skip_normalize=skip_normalize,
+                                        width=width,height=height,
+                                        callback=prompt_callback)
+        except KeyboardInterrupt:
+            print('*interrupted*')
+            print('Partial results will be returned; if --grid was requested, nothing will be returned.')
+        except RuntimeError as e:
+            print(str(e))
+
         toc  = time.time()
         print(f'{len(results)} images generated in',"%4.2fs"% (toc-tic))
         return results
-            
+
     @torch.no_grad()
     def _txt2img(self,prompt,
                  data,precision_scope,
@@ -292,62 +305,56 @@ The vast majority of these arguments default to reasonable values.
         image_count = 0
 
         # Gawd. Too many levels of indent here. Need to refactor into smaller routines!
-        try:
-            with precision_scope(self.device.type), self.model.ema_scope():
-                all_samples = list()
-                for n in trange(iterations, desc="Sampling"):
-                    seed_everything(seed)
-                    for prompts in tqdm(data, desc="data", dynamic_ncols=True):
-                        uc = None
-                        if cfg_scale != 1.0:
-                            uc = self.model.get_learned_conditioning(batch_size * [""])
-                        if isinstance(prompts, tuple):
-                            prompts = list(prompts)
+        with precision_scope(self.device.type), self.model.ema_scope():
+            all_samples = list()
+            for n in trange(iterations, desc="Sampling"):
+                seed_everything(seed)
+                for prompts in tqdm(data, desc="data", dynamic_ncols=True):
+                    uc = None
+                    if cfg_scale != 1.0:
+                        uc = self.model.get_learned_conditioning(batch_size * [""])
+                    if isinstance(prompts, tuple):
+                        prompts = list(prompts)
 
-                        # weighted sub-prompts
-                        subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
-                        if len(subprompts) > 1:
-                            # i dont know if this is correct.. but it works
-                            c = torch.zeros_like(uc)
-                            # get total weight for normalizing
-                            totalWeight = sum(weights)
-                            # normalize each "sub prompt" and add it
-                            for i in range(0,len(subprompts)):
-                                weight = weights[i]
-                                if not skip_normalize:
-                                    weight = weight / totalWeight
-                                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
-                        else: # just standard 1 prompt
-                            c = self.model.get_learned_conditioning(prompts)
+                    # weighted sub-prompts
+                    subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
+                    if len(subprompts) > 1:
+                        # i dont know if this is correct.. but it works
+                        c = torch.zeros_like(uc)
+                        # get total weight for normalizing
+                        totalWeight = sum(weights)
+                        # normalize each "sub prompt" and add it
+                        for i in range(0,len(subprompts)):
+                            weight = weights[i]
+                            if not skip_normalize:
+                                weight = weight / totalWeight
+                            c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
+                    else: # just standard 1 prompt
+                        c = self.model.get_learned_conditioning(prompts)
 
-                        shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
-                        samples_ddim, _ = sampler.sample(S=steps,
-                                                         conditioning=c,
-                                                         batch_size=batch_size,
-                                                         shape=shape,
-                                                         verbose=False,
-                                                         unconditional_guidance_scale=cfg_scale,
-                                                         unconditional_conditioning=uc,
-                                                         eta=ddim_eta)
+                    shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
+                    samples_ddim, _ = sampler.sample(S=steps,
+                                                        conditioning=c,
+                                                        batch_size=batch_size,
+                                                        shape=shape,
+                                                        verbose=False,
+                                                        unconditional_guidance_scale=cfg_scale,
+                                                        unconditional_conditioning=uc,
+                                                        eta=ddim_eta)
 
-                        x_samples_ddim = self.model.decode_first_stage(samples_ddim)
-                        x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
-                        for x_sample in x_samples_ddim:
-                            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                            image = Image.fromarray(x_sample.astype(np.uint8))
-                            images.append([image,seed])
-                            if callback is not None:
-                                callback(image,seed)
-                                
-                    seed = self._new_seed()
-        except KeyboardInterrupt:
-            print('*interrupted*')
-            print('Partial results will be returned; if --grid was requested, nothing will be returned.')
-        except RuntimeError as e:
-            print(str(e))
+                    x_samples_ddim = self.model.decode_first_stage(samples_ddim)
+                    x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
+                    for x_sample in x_samples_ddim:
+                        x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+                        image = Image.fromarray(x_sample.astype(np.uint8))
+                        images.append([image,seed])
+                        if callback is not None:
+                            callback(image,seed)
+
+                seed = self._new_seed()
 
         return images
-        
+
     @torch.no_grad()
     def _img2img(self,prompt,
                  data,precision_scope,
@@ -374,62 +381,55 @@ The vast majority of these arguments default to reasonable values.
             init_latent = self.model.get_first_stage_encoding(self.model.encode_first_stage(init_image))  # move to latent space
 
         sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
-        
+
         t_enc = int(strength * steps)
         # print(f"target t_enc is {t_enc} steps")
         images = list()
 
-        try:
-            with precision_scope(self.device.type), self.model.ema_scope():
-                all_samples = list()
-                for n in trange(iterations, desc="Sampling"):
-                    seed_everything(seed)
-                    for prompts in tqdm(data, desc="data", dynamic_ncols=True):
-                        uc = None
-                        if cfg_scale != 1.0:
-                            uc = self.model.get_learned_conditioning(batch_size * [""])
-                        if isinstance(prompts, tuple):
-                            prompts = list(prompts)
+        with precision_scope(self.device.type), self.model.ema_scope():
+            all_samples = list()
+            for n in trange(iterations, desc="Sampling"):
+                seed_everything(seed)
+                for prompts in tqdm(data, desc="data", dynamic_ncols=True):
+                    uc = None
+                    if cfg_scale != 1.0:
+                        uc = self.model.get_learned_conditioning(batch_size * [""])
+                    if isinstance(prompts, tuple):
+                        prompts = list(prompts)
 
-                        # weighted sub-prompts
-                        subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
-                        if len(subprompts) > 1:
-                            # i dont know if this is correct.. but it works
-                            c = torch.zeros_like(uc)
-                            # get total weight for normalizing
-                            totalWeight = sum(weights)
-                            # normalize each "sub prompt" and add it
-                            for i in range(0,len(subprompts)):
-                                weight = weights[i]
-                                if not skip_normalize:
-                                    weight = weight / totalWeight
-                                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
-                        else: # just standard 1 prompt
-                            c = self.model.get_learned_conditioning(prompts)
+                    # weighted sub-prompts
+                    subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
+                    if len(subprompts) > 1:
+                        # i dont know if this is correct.. but it works
+                        c = torch.zeros_like(uc)
+                        # get total weight for normalizing
+                        totalWeight = sum(weights)
+                        # normalize each "sub prompt" and add it
+                        for i in range(0,len(subprompts)):
+                            weight = weights[i]
+                            if not skip_normalize:
+                                weight = weight / totalWeight
+                            c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
+                    else: # just standard 1 prompt
+                        c = self.model.get_learned_conditioning(prompts)
 
-                        # encode (scaled latent)
-                        z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
-                        # decode it
-                        samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
-                                                    unconditional_conditioning=uc,)
+                    # encode (scaled latent)
+                    z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
+                    # decode it
+                    samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
+                                                unconditional_conditioning=uc,)
 
-                        x_samples = self.model.decode_first_stage(samples)
-                        x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
+                    x_samples = self.model.decode_first_stage(samples)
+                    x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
 
-                        for x_sample in x_samples:
-                            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                            image = Image.fromarray(x_sample.astype(np.uint8))
-                            images.append([image,seed])
-                            if callback is not None:
-                                callback(image,seed)
-                    seed = self._new_seed()
+                    for x_sample in x_samples:
+                        x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+                        image = Image.fromarray(x_sample.astype(np.uint8))
+                        images.append([image,seed])
+                        if callback is not None:
+                            callback(image,seed)
+                seed = self._new_seed()
 
-        except KeyboardInterrupt:
-            print('*interrupted*')
-            print('Partial results will be returned; if --grid was requested, nothing will be returned.')
-        except RuntimeError as e:
-            print("Oops! A runtime error has occurred. If this is unexpected, please copy-and-paste this stack trace and post it as an Issue to http://github.com/lstein/stable-diffusion")
-            traceback.print_exc()
         return images
 
     def _new_seed(self):
@@ -476,7 +476,7 @@ The vast majority of these arguments default to reasonable values.
             print(msg)
 
         return self.model
-                
+
     def _load_model_from_config(self, config, ckpt):
         print(f"Loading model from {ckpt}")
         pl_sd = torch.load(ckpt, map_location="cpu")
@@ -507,7 +507,7 @@ The vast majority of these arguments default to reasonable values.
 
     def _split_weighted_subprompts(text):
         """
-        grabs all text up to the first occurrence of ':' 
+        grabs all text up to the first occurrence of ':'
         uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
         if ':' has no value defined, defaults to 1.0
         repeats until no text remaining
@@ -523,7 +523,7 @@ The vast majority of these arguments default to reasonable values.
                 remaining -= idx
                 # remove from main text
                 text = text[idx+1:]
-                # find value for weight 
+                # find value for weight
                 if " " in text:
                     idx = text.index(" ") # first occurence
                 else: # no space, read to end

From 078859207df4a6149cb8cdf7d4d9b4bb1fef1ae6 Mon Sep 17 00:00:00 2001
From: Kevin Gibbons <bakkot@gmail.com>
Date: Thu, 25 Aug 2022 15:19:44 -0700
Subject: [PATCH 37/58] factor out loop

---
 ldm/simplet2i.py | 226 ++++++++++++++++++++---------------------------
 1 file changed, 94 insertions(+), 132 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 157f55fbcb..31721906d7 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -216,7 +216,7 @@ The vast majority of these arguments default to reasonable values.
            strength                        // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
            ddim_eta                        // image randomness (eta=0.0 means the same seed always produces the same image)
            variants                        // if >0, the 1st generated image will be passed back to img2img to generate the requested number of variants
-           callback                        // a function or method that will be called each time an image is generated
+           image_callback                  // a function or method that will be called each time an image is generated
 
         To use the callback, define a function of method that receives two arguments, an Image object
         and the seed. You can then do whatever you like with the image, including converting it to
@@ -249,34 +249,40 @@ The vast majority of these arguments default to reasonable values.
             height = h
             width  = w
 
-        data = [batch_size * [prompt]]
         scope = autocast if self.precision=="autocast" else nullcontext
 
         tic = time.time()
         results = list()
-        def prompt_callback(image, seed):
-            results.append([image, seed])
-            if image_callback is not None:
-                image_callback(image, seed)
 
         try:
             if init_img:
                 assert os.path.exists(init_img),f'{init_img}: File not found'
-                self._img2img(prompt,
-                                        data=data,precision_scope=scope,
-                                        batch_size=batch_size,iterations=iterations,
-                                        steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                get_images = self._img2img(
+                                        precision_scope=scope,
+                                        batch_size=batch_size,
+                                        steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
-                                        init_img=init_img,strength=strength,variants=variants,
-                                        callback=prompt_callback)
+                                        init_img=init_img,strength=strength)
             else:
-                self._txt2img(prompt,
-                                        data=data,precision_scope=scope,
-                                        batch_size=batch_size,iterations=iterations,
-                                        steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
+                get_images = self._txt2img(
+                                        precision_scope=scope,
+                                        batch_size=batch_size,
+                                        steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
-                                        width=width,height=height,
-                                        callback=prompt_callback)
+                                        width=width,height=height)
+
+            data = [batch_size * [prompt]]
+            with scope(self.device.type), self.model.ema_scope():
+                for n in trange(iterations, desc="Sampling"):
+                    seed_everything(seed)
+                    for prompts in tqdm(data, desc="data", dynamic_ncols=True):
+                        iter_images = get_images(prompts)
+                        for image in iter_images:
+                            results.append([image, seed])
+                            if image_callback is not None:
+                                image_callback(image,seed)
+                    seed = self._new_seed()
+
         except KeyboardInterrupt:
             print('*interrupted*')
             print('Partial results will be returned; if --grid was requested, nothing will be returned.')
@@ -288,84 +294,41 @@ The vast majority of these arguments default to reasonable values.
         return results
 
     @torch.no_grad()
-    def _txt2img(self,prompt,
-                 data,precision_scope,
-                 batch_size,iterations,
-                 steps,seed,cfg_scale,ddim_eta,
+    def _txt2img(self,
+                 precision_scope,
+                 batch_size,
+                 steps,cfg_scale,ddim_eta,
                  skip_normalize,
-                 width,height,
-                 callback):    # the callback is called each time a new Image is generated
+                 width,height):
         """
-        Generate an image from the prompt, writing iteration images into the outdir
-        The output is a list of lists in the format: [[image1,seed1], [image2,seed2],...]
+        Generate an image from the prompt
         """
 
-        sampler         = self.sampler
-        images = list()
-        image_count = 0
+        sampler = self.sampler
 
-        # Gawd. Too many levels of indent here. Need to refactor into smaller routines!
-        with precision_scope(self.device.type), self.model.ema_scope():
-            all_samples = list()
-            for n in trange(iterations, desc="Sampling"):
-                seed_everything(seed)
-                for prompts in tqdm(data, desc="data", dynamic_ncols=True):
-                    uc = None
-                    if cfg_scale != 1.0:
-                        uc = self.model.get_learned_conditioning(batch_size * [""])
-                    if isinstance(prompts, tuple):
-                        prompts = list(prompts)
-
-                    # weighted sub-prompts
-                    subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
-                    if len(subprompts) > 1:
-                        # i dont know if this is correct.. but it works
-                        c = torch.zeros_like(uc)
-                        # get total weight for normalizing
-                        totalWeight = sum(weights)
-                        # normalize each "sub prompt" and add it
-                        for i in range(0,len(subprompts)):
-                            weight = weights[i]
-                            if not skip_normalize:
-                                weight = weight / totalWeight
-                            c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
-                    else: # just standard 1 prompt
-                        c = self.model.get_learned_conditioning(prompts)
-
-                    shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
-                    samples_ddim, _ = sampler.sample(S=steps,
-                                                        conditioning=c,
-                                                        batch_size=batch_size,
-                                                        shape=shape,
-                                                        verbose=False,
-                                                        unconditional_guidance_scale=cfg_scale,
-                                                        unconditional_conditioning=uc,
-                                                        eta=ddim_eta)
-
-                    x_samples_ddim = self.model.decode_first_stage(samples_ddim)
-                    x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
-                    for x_sample in x_samples_ddim:
-                        x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                        image = Image.fromarray(x_sample.astype(np.uint8))
-                        images.append([image,seed])
-                        if callback is not None:
-                            callback(image,seed)
-
-                seed = self._new_seed()
-
-        return images
+        def get_images(prompts):
+            uc, c = self._get_uc_and_c(prompts, batch_size, skip_normalize)
+            shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
+            samples, _ = sampler.sample(S=steps,
+                                                conditioning=c,
+                                                batch_size=batch_size,
+                                                shape=shape,
+                                                verbose=False,
+                                                unconditional_guidance_scale=cfg_scale,
+                                                unconditional_conditioning=uc,
+                                                eta=ddim_eta)
+            return self._samples_to_images(samples)
+        return get_images
 
     @torch.no_grad()
-    def _img2img(self,prompt,
-                 data,precision_scope,
-                 batch_size,iterations,
-                 steps,seed,cfg_scale,ddim_eta,
+    def _img2img(self,
+                 precision_scope,
+                 batch_size,
+                 steps,cfg_scale,ddim_eta,
                  skip_normalize,
-                 init_img,strength,variants,
-                 callback):
+                 init_img,strength):
         """
-        Generate an image from the prompt and the initial image, writing iteration images into the outdir
-        The output is a list of lists in the format: [[image,seed1], [image,seed2],...]
+        Generate an image from the prompt and the initial image
         """
 
         # PLMS sampler not supported yet, so ignore previous sampler
@@ -384,54 +347,53 @@ The vast majority of these arguments default to reasonable values.
 
         t_enc = int(strength * steps)
         # print(f"target t_enc is {t_enc} steps")
+
+        def get_images(prompts):
+            uc, c = self._get_uc_and_c(prompts, batch_size, skip_normalize)
+
+            # encode (scaled latent)
+            z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
+            # decode it
+            samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
+                                        unconditional_conditioning=uc,)
+            return self._samples_to_images(samples)
+        return get_images
+
+    # TODO: does this actually need to run every loop? does anything in it vary by random seed?
+    def _get_uc_and_c(self, prompts, batch_size, skip_normalize):
+        if isinstance(prompts, tuple):
+            prompts = list(prompts)
+
+        uc = self.model.get_learned_conditioning(batch_size * [""])
+
+        # weighted sub-prompts
+        subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
+        if len(subprompts) > 1:
+            # i dont know if this is correct.. but it works
+            c = torch.zeros_like(uc)
+            # get total weight for normalizing
+            totalWeight = sum(weights)
+            # normalize each "sub prompt" and add it
+            for i in range(0,len(subprompts)):
+                weight = weights[i]
+                if not skip_normalize:
+                    weight = weight / totalWeight
+                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
+        else: # just standard 1 prompt
+            c = self.model.get_learned_conditioning(prompts)
+        return (uc, c)
+
+    def _samples_to_images(self, samples):
+        x_samples = self.model.decode_first_stage(samples)
+        x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
         images = list()
-
-        with precision_scope(self.device.type), self.model.ema_scope():
-            all_samples = list()
-            for n in trange(iterations, desc="Sampling"):
-                seed_everything(seed)
-                for prompts in tqdm(data, desc="data", dynamic_ncols=True):
-                    uc = None
-                    if cfg_scale != 1.0:
-                        uc = self.model.get_learned_conditioning(batch_size * [""])
-                    if isinstance(prompts, tuple):
-                        prompts = list(prompts)
-
-                    # weighted sub-prompts
-                    subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
-                    if len(subprompts) > 1:
-                        # i dont know if this is correct.. but it works
-                        c = torch.zeros_like(uc)
-                        # get total weight for normalizing
-                        totalWeight = sum(weights)
-                        # normalize each "sub prompt" and add it
-                        for i in range(0,len(subprompts)):
-                            weight = weights[i]
-                            if not skip_normalize:
-                                weight = weight / totalWeight
-                            c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
-                    else: # just standard 1 prompt
-                        c = self.model.get_learned_conditioning(prompts)
-
-                    # encode (scaled latent)
-                    z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
-                    # decode it
-                    samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
-                                                unconditional_conditioning=uc,)
-
-                    x_samples = self.model.decode_first_stage(samples)
-                    x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
-
-                    for x_sample in x_samples:
-                        x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
-                        image = Image.fromarray(x_sample.astype(np.uint8))
-                        images.append([image,seed])
-                        if callback is not None:
-                            callback(image,seed)
-                seed = self._new_seed()
-
+        for x_sample in x_samples:
+            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+            image = Image.fromarray(x_sample.astype(np.uint8))
+            images.append(image)
         return images
 
+
     def _new_seed(self):
         self.seed = random.randrange(0,np.iinfo(np.uint32).max)
         return self.seed

From 31b22e057d12daddd9a9b79c8d156288dfad3b95 Mon Sep 17 00:00:00 2001
From: Kevin Gibbons <bakkot@gmail.com>
Date: Thu, 25 Aug 2022 17:01:17 -0700
Subject: [PATCH 38/58] switch to generators

---
 ldm/simplet2i.py | 30 ++++++++++++++----------------
 1 file changed, 14 insertions(+), 16 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 31721906d7..d63502831d 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -257,26 +257,25 @@ The vast majority of these arguments default to reasonable values.
         try:
             if init_img:
                 assert os.path.exists(init_img),f'{init_img}: File not found'
-                get_images = self._img2img(
+                images_iterator = self._img2img(prompt,
                                         precision_scope=scope,
                                         batch_size=batch_size,
                                         steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
                                         init_img=init_img,strength=strength)
             else:
-                get_images = self._txt2img(
+                images_iterator = self._txt2img(prompt,
                                         precision_scope=scope,
                                         batch_size=batch_size,
                                         steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
                                         width=width,height=height)
 
-            data = [batch_size * [prompt]]
             with scope(self.device.type), self.model.ema_scope():
                 for n in trange(iterations, desc="Sampling"):
                     seed_everything(seed)
-                    for prompts in tqdm(data, desc="data", dynamic_ncols=True):
-                        iter_images = get_images(prompts)
+                    for batch_item in tqdm(range(batch_size), desc="data", dynamic_ncols=True):
+                        iter_images = next(images_iterator)
                         for image in iter_images:
                             results.append([image, seed])
                             if image_callback is not None:
@@ -295,19 +294,20 @@ The vast majority of these arguments default to reasonable values.
 
     @torch.no_grad()
     def _txt2img(self,
+                 prompt,
                  precision_scope,
                  batch_size,
                  steps,cfg_scale,ddim_eta,
                  skip_normalize,
                  width,height):
         """
-        Generate an image from the prompt
+        An infinite iterator of images from the prompt.
         """
 
         sampler = self.sampler
 
-        def get_images(prompts):
-            uc, c = self._get_uc_and_c(prompts, batch_size, skip_normalize)
+        while True:
+            uc, c = self._get_uc_and_c(prompt, batch_size, skip_normalize)
             shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
             samples, _ = sampler.sample(S=steps,
                                                 conditioning=c,
@@ -317,18 +317,18 @@ The vast majority of these arguments default to reasonable values.
                                                 unconditional_guidance_scale=cfg_scale,
                                                 unconditional_conditioning=uc,
                                                 eta=ddim_eta)
-            return self._samples_to_images(samples)
-        return get_images
+            yield self._samples_to_images(samples)
 
     @torch.no_grad()
     def _img2img(self,
+                 prompt,
                  precision_scope,
                  batch_size,
                  steps,cfg_scale,ddim_eta,
                  skip_normalize,
                  init_img,strength):
         """
-        Generate an image from the prompt and the initial image
+        An infinite iterator of images from the prompt and the initial image
         """
 
         # PLMS sampler not supported yet, so ignore previous sampler
@@ -348,16 +348,15 @@ The vast majority of these arguments default to reasonable values.
         t_enc = int(strength * steps)
         # print(f"target t_enc is {t_enc} steps")
 
-        def get_images(prompts):
-            uc, c = self._get_uc_and_c(prompts, batch_size, skip_normalize)
+        while True:
+            uc, c = self._get_uc_and_c(prompt, batch_size, skip_normalize)
 
             # encode (scaled latent)
             z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
             # decode it
             samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
                                         unconditional_conditioning=uc,)
-            return self._samples_to_images(samples)
-        return get_images
+            yield self._samples_to_images(samples)
 
     # TODO: does this actually need to run every loop? does anything in it vary by random seed?
     def _get_uc_and_c(self, prompts, batch_size, skip_normalize):
@@ -393,7 +392,6 @@ The vast majority of these arguments default to reasonable values.
             images.append(image)
         return images
 
-
     def _new_seed(self):
         self.seed = random.randrange(0,np.iinfo(np.uint32).max)
         return self.seed

From 797de3257cf3507dba8672f4387995821a42adcc Mon Sep 17 00:00:00 2001
From: Kevin Gibbons <bakkot@gmail.com>
Date: Thu, 25 Aug 2022 17:16:07 -0700
Subject: [PATCH 39/58] fix batch_size

---
 ldm/simplet2i.py | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index d63502831d..3187bee090 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -274,12 +274,11 @@ The vast majority of these arguments default to reasonable values.
             with scope(self.device.type), self.model.ema_scope():
                 for n in trange(iterations, desc="Sampling"):
                     seed_everything(seed)
-                    for batch_item in tqdm(range(batch_size), desc="data", dynamic_ncols=True):
-                        iter_images = next(images_iterator)
-                        for image in iter_images:
-                            results.append([image, seed])
-                            if image_callback is not None:
-                                image_callback(image,seed)
+                    iter_images = next(images_iterator)
+                    for image in iter_images:
+                        results.append([image, seed])
+                        if image_callback is not None:
+                            image_callback(image,seed)
                     seed = self._new_seed()
 
         except KeyboardInterrupt:
@@ -359,14 +358,12 @@ The vast majority of these arguments default to reasonable values.
             yield self._samples_to_images(samples)
 
     # TODO: does this actually need to run every loop? does anything in it vary by random seed?
-    def _get_uc_and_c(self, prompts, batch_size, skip_normalize):
-        if isinstance(prompts, tuple):
-            prompts = list(prompts)
+    def _get_uc_and_c(self, prompt, batch_size, skip_normalize):
 
         uc = self.model.get_learned_conditioning(batch_size * [""])
 
         # weighted sub-prompts
-        subprompts,weights = T2I._split_weighted_subprompts(prompts[0])
+        subprompts,weights = T2I._split_weighted_subprompts(prompt)
         if len(subprompts) > 1:
             # i dont know if this is correct.. but it works
             c = torch.zeros_like(uc)
@@ -377,9 +374,9 @@ The vast majority of these arguments default to reasonable values.
                 weight = weights[i]
                 if not skip_normalize:
                     weight = weight / totalWeight
-                c = torch.add(c,self.model.get_learned_conditioning(subprompts[i]), alpha=weight)
+                c = torch.add(c, self.model.get_learned_conditioning(batch_size * [subprompts[i]]), alpha=weight)
         else: # just standard 1 prompt
-            c = self.model.get_learned_conditioning(prompts)
+            c = self.model.get_learned_conditioning(batch_size * [prompt])
         return (uc, c)
 
     def _samples_to_images(self, samples):

From b49475a54ff3f49ff68d129cea6146ca4b55996c Mon Sep 17 00:00:00 2001
From: tesseractcat <tesseractcats@gmail.com>
Date: Thu, 25 Aug 2022 21:06:17 -0400
Subject: [PATCH 40/58] Keep a log of requests for dream_web

---
 scripts/dream_web.py | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/scripts/dream_web.py b/scripts/dream_web.py
index d48d5ac9ec..227e8acdc7 100644
--- a/scripts/dream_web.py
+++ b/scripts/dream_web.py
@@ -85,6 +85,12 @@ class DreamServer(BaseHTTPRequestHandler):
         print(f"Prompt generated with output: {outputs}")
 
         post_data['initimg'] = '' # Don't send init image back
+
+        # Append post_data to log
+        with open("./outputs/img-samples/dream_web_log.txt", "a") as log:
+            for output in outputs:
+                log.write(f"{output[0]}: {json.dumps(post_data)}\n")
+
         outputs = [x + [post_data] for x in outputs] # Append config to each output
         result = {'outputs': outputs}
         self.wfile.write(bytes(json.dumps(result), "utf-8"))

From c38b6964b4e6c350004bcd4180caeae3b77f8e4b Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 22:19:12 -0400
Subject: [PATCH 41/58] improved inline error messages slightly

---
 ldm/simplet2i.py | 1 +
 scripts/dream.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 3187bee090..21f973988b 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -286,6 +286,7 @@ The vast majority of these arguments default to reasonable values.
             print('Partial results will be returned; if --grid was requested, nothing will be returned.')
         except RuntimeError as e:
             print(str(e))
+            print('Are you sure your system has an adequate NVIDIA GPU?')
 
         toc  = time.time()
         print(f'{len(results)} images generated in',"%4.2fs"% (toc-tic))
diff --git a/scripts/dream.py b/scripts/dream.py
index 24dac5b927..b0a31b63e0 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -252,7 +252,7 @@ def create_argv_parser():
                         '-o',
                         type=str,
                         default="outputs/img-samples",
-                        help="directory in which to place generated images and a log of prompts and seeds")
+                        help="directory in which to place generated images and a log of prompts and seeds (outputs/img-samples")
     parser.add_argument('--embedding_path',
                         type=str,
                         help="Path to a pre-trained embedding manager checkpoint - can only be set on command line")

From f1bed52530fdbabeab9f9ef029ac6a9b479ff8a9 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 22:49:15 -0400
Subject: [PATCH 42/58] moved dream utilities into their own subfolder

---
 ldm/{dream_util.py => dream/pngwriter.py} | 100 +++-------------------
 ldm/dream/readline.py                     |  94 ++++++++++++++++++++
 ldm/simplet2i.py                          |   2 +-
 scripts/dream.py                          |   4 +-
 4 files changed, 107 insertions(+), 93 deletions(-)
 rename ldm/{dream_util.py => dream/pngwriter.py} (56%)
 create mode 100644 ldm/dream/readline.py

diff --git a/ldm/dream_util.py b/ldm/dream/pngwriter.py
similarity index 56%
rename from ldm/dream_util.py
rename to ldm/dream/pngwriter.py
index b69a0c1367..aca7b47c21 100644
--- a/ldm/dream_util.py
+++ b/ldm/dream/pngwriter.py
@@ -1,6 +1,14 @@
-'''Utilities for dealing with PNG images and their path names'''
+'''
+Two helper classes for dealing with PNG images and their path names.
+PngWriter -- Converts Images generated by T2I into PNGs, finds
+             appropriate names for them, and writes prompt metadata
+             into the PNG. Intended to be subclassable in order to
+             create more complex naming schemes, including using the
+             prompt for file/directory names.
+PromptFormatter -- Utility for converting a Namespace of prompt parameters
+             back into a formatted prompt string with command-line switches.
+'''
 import os
-import atexit
 import re
 from math import sqrt,floor,ceil
 from PIL import Image,PngImagePlugin
@@ -105,91 +113,3 @@ class PromptFormatter():
             switches.append('-F')
         return ' '.join(switches)
 
-# ---------------readline utilities---------------------
-try:
-    import readline
-    readline_available = True
-except:
-    readline_available = False
-
-class Completer():
-    def __init__(self,options):
-        self.options = sorted(options)
-        return
-
-    def complete(self,text,state):
-        buffer = readline.get_line_buffer()
-
-        if text.startswith(('-I','--init_img')):
-            return self._path_completions(text,state,('.png'))
-
-        if buffer.strip().endswith('cd') or text.startswith(('.','/')):
-            return self._path_completions(text,state,())
-
-        response = None
-        if state == 0:
-            # This is the first time for this text, so build a match list.
-            if text:
-                self.matches = [s 
-                                for s in self.options
-                                if s and s.startswith(text)]
-            else:
-                self.matches = self.options[:]
-
-        # Return the state'th item from the match list,
-        # if we have that many.
-        try:
-            response = self.matches[state]
-        except IndexError:
-            response = None
-        return response
-
-    def _path_completions(self,text,state,extensions):
-        # get the path so far
-        if text.startswith('-I'):
-            path = text.replace('-I','',1).lstrip()
-        elif text.startswith('--init_img='):
-            path = text.replace('--init_img=','',1).lstrip()
-        else:
-            path = text
-
-        matches  = list()
-
-        path = os.path.expanduser(path)
-        if len(path)==0:
-            matches.append(text+'./')
-        else:
-            dir  = os.path.dirname(path)
-            dir_list = os.listdir(dir)
-            for n in dir_list:
-                if n.startswith('.') and len(n)>1:
-                    continue
-                full_path = os.path.join(dir,n)
-                if full_path.startswith(path):
-                    if os.path.isdir(full_path):
-                        matches.append(os.path.join(os.path.dirname(text),n)+'/')
-                    elif n.endswith(extensions):
-                        matches.append(os.path.join(os.path.dirname(text),n))
-
-        try:
-            response = matches[state]
-        except IndexError:
-            response = None
-        return response
-
-if readline_available:
-    readline.set_completer(Completer(['cd','pwd',
-                                      '--steps','-s','--seed','-S','--iterations','-n','--batch_size','-b',
-                                      '--width','-W','--height','-H','--cfg_scale','-C','--grid','-g',
-                                      '--individual','-i','--init_img','-I','--strength','-f','-v','--variants']).complete)
-    readline.set_completer_delims(" ")
-    readline.parse_and_bind('tab: complete')
-
-    histfile = os.path.join(os.path.expanduser('~'),".dream_history")
-    try:
-        readline.read_history_file(histfile)
-        readline.set_history_length(1000)
-    except FileNotFoundError:
-        pass
-    atexit.register(readline.write_history_file,histfile)
-
diff --git a/ldm/dream/readline.py b/ldm/dream/readline.py
new file mode 100644
index 0000000000..f46ac6e23a
--- /dev/null
+++ b/ldm/dream/readline.py
@@ -0,0 +1,94 @@
+'''
+Readline helper functions for dream.py (linux and mac only).
+'''
+import os
+import re
+import atexit
+# ---------------readline utilities---------------------
+try:
+    import readline
+    readline_available = True
+except:
+    readline_available = False
+
+class Completer():
+    def __init__(self,options):
+        self.options = sorted(options)
+        return
+
+    def complete(self,text,state):
+        buffer = readline.get_line_buffer()
+
+        if text.startswith(('-I','--init_img')):
+            return self._path_completions(text,state,('.png'))
+
+        if buffer.strip().endswith('cd') or text.startswith(('.','/')):
+            return self._path_completions(text,state,())
+
+        response = None
+        if state == 0:
+            # This is the first time for this text, so build a match list.
+            if text:
+                self.matches = [s 
+                                for s in self.options
+                                if s and s.startswith(text)]
+            else:
+                self.matches = self.options[:]
+
+        # Return the state'th item from the match list,
+        # if we have that many.
+        try:
+            response = self.matches[state]
+        except IndexError:
+            response = None
+        return response
+
+    def _path_completions(self,text,state,extensions):
+        # get the path so far
+        if text.startswith('-I'):
+            path = text.replace('-I','',1).lstrip()
+        elif text.startswith('--init_img='):
+            path = text.replace('--init_img=','',1).lstrip()
+        else:
+            path = text
+
+        matches  = list()
+
+        path = os.path.expanduser(path)
+        if len(path)==0:
+            matches.append(text+'./')
+        else:
+            dir  = os.path.dirname(path)
+            dir_list = os.listdir(dir)
+            for n in dir_list:
+                if n.startswith('.') and len(n)>1:
+                    continue
+                full_path = os.path.join(dir,n)
+                if full_path.startswith(path):
+                    if os.path.isdir(full_path):
+                        matches.append(os.path.join(os.path.dirname(text),n)+'/')
+                    elif n.endswith(extensions):
+                        matches.append(os.path.join(os.path.dirname(text),n))
+
+        try:
+            response = matches[state]
+        except IndexError:
+            response = None
+        return response
+
+if readline_available:
+    readline.set_completer(Completer(['cd','pwd',
+                                      '--steps','-s','--seed','-S','--iterations','-n','--batch_size','-b',
+                                      '--width','-W','--height','-H','--cfg_scale','-C','--grid','-g',
+                                      '--individual','-i','--init_img','-I','--strength','-f','-v','--variants']).complete)
+    readline.set_completer_delims(" ")
+    readline.parse_and_bind('tab: complete')
+
+    histfile = os.path.join(os.path.expanduser('~'),".dream_history")
+    try:
+        readline.read_history_file(histfile)
+        readline.set_history_length(1000)
+    except FileNotFoundError:
+        pass
+    atexit.register(readline.write_history_file,histfile)
+
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 21f973988b..80a0194957 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -27,7 +27,7 @@ from ldm.util import instantiate_from_config
 from ldm.models.diffusion.ddim     import DDIMSampler
 from ldm.models.diffusion.plms     import PLMSSampler
 from ldm.models.diffusion.ksampler import KSampler
-from ldm.dream_util                import PngWriter
+from ldm.dream.pngwriter           import PngWriter
 
 """Simplified text to image API for stable diffusion/latent diffusion
 
diff --git a/scripts/dream.py b/scripts/dream.py
index b0a31b63e0..b9090e79f4 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -6,8 +6,8 @@ import shlex
 import os
 import sys
 import copy
-from ldm.dream_util import Completer,PngWriter,PromptFormatter
-
+import ldm.dream.readline
+from   ldm.dream.pngwriter import PngWriter,PromptFormatter
 debugging = False
 
 def main():

From 5711b6d611ed7fb892ade5e45ebf17850d3abb0d Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Thu, 25 Aug 2022 22:57:30 -0400
Subject: [PATCH 43/58] Add optional GFPGAN support

---
 README.md        | 30 +++++++++++++++++++
 ldm/simplet2i.py | 31 ++++++++++++++++++--
 scripts/dream.py | 75 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 2af4a2a53a..6260032d3f 100644
--- a/README.md
+++ b/README.md
@@ -80,6 +80,36 @@ You may also pass a -v<count> option to generate count variants on the original
 passing the first generated image back into img2img the requested number of times. It generates interesting
 variants.
 
+## GFPGAN Support
+
+This script also provides the ability to invoke GFPGAN after image generation. Doing so will enhance faces
+and optionally upscale the image to a higher resolution.
+
+To use the ability, clone the [GFPGAN repository](https://github.com/TencentARC/GFPGAN) and follow their
+installation instructions. By default, we expect GFPGAN to be installed in a 'gfpgan' sibling directory.
+
+You may also want to install Real-ESRGAN, if you want to enhance non-face regions in the image by installing
+the pip Real-ESRGAN package.
+```
+pip install realesrgan
+
+```
+
+Now, you can run this script by adding the --gfpgan option. Any issues with GFPGAN will be reported on initialization.
+
+When generating prompts, add a -G or --gfpgan_strenth option to control the strength of the GFPGAN enhancement.
+0.0 is no enhancement, 1.0 is maximum enhancement.
+
+So for instance, to apply the maximum strength:
+~~~~
+dream> a man wearing a pineapple hat -G 1
+~~~~
+
+That's it!
+
+There's also a bunch of options to control GFPGAN settings when starting the script for different configs that you can
+read about in the help text. This will let you control where GFPGAN is installed, if upsampling is enapled, the upsampler to use and the model path.
+
 ## Barebones Web Server
 
 As of version 1.10, this distribution comes with a bare bones web server (see screenshot). To use it,
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 0ec3d60d98..ab40330e43 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -132,7 +132,8 @@ The vast majority of these arguments default to reasonable values.
                  strength=0.75, # default in scripts/img2img.py
                  embedding_path=None,
                  latent_diffusion_weights=False,  # just to keep track of this parameter when regenerating prompt
-                 device='cuda'
+                 device='cuda',
+                 gfpgan=None,
     ):
         self.batch_size      = batch_size
         self.iterations = iterations
@@ -154,6 +155,7 @@ The vast majority of these arguments default to reasonable values.
         self.sampler    = None
         self.latent_diffusion_weights=latent_diffusion_weights
         self.device = device
+        self.gfpgan = gfpgan
         if seed is None:
             self.seed = self._new_seed()
         else:
@@ -199,6 +201,7 @@ The vast majority of these arguments default to reasonable values.
                      # these are specific to img2img
                      init_img=None,
                      strength=None,
+                     gfpgan_strength=None,
                      variants=None,
                      **args):   # eat up additional cruft
         '''
@@ -214,6 +217,7 @@ The vast majority of these arguments default to reasonable values.
            cfg_scale                       // how strongly the prompt influences the image (7.5) (must be >1)
            init_img                        // path to an initial image - its dimensions override width and height
            strength                        // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
+           gfpgan_strength                 // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
            ddim_eta                        // image randomness (eta=0.0 means the same seed always produces the same image)
            variants                        // if >0, the 1st generated image will be passed back to img2img to generate the requested number of variants
            callback                        // a function or method that will be called each time an image is generated
@@ -260,7 +264,8 @@ The vast majority of these arguments default to reasonable values.
                                     batch_size=batch_size,iterations=iterations,
                                     steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                     skip_normalize=skip_normalize,
-                                    init_img=init_img,strength=strength,variants=variants,
+                                    init_img=init_img,strength=strength,
+                                    gfpgan_strength=gfpgan_strength,variants=variants,
                                     callback=image_callback)
         else:
             results = self._txt2img(prompt,
@@ -268,6 +273,7 @@ The vast majority of these arguments default to reasonable values.
                                     batch_size=batch_size,iterations=iterations,
                                     steps=steps,seed=seed,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                     skip_normalize=skip_normalize,
+                                    gfpgan_strength=gfpgan_strength,
                                     width=width,height=height,
                                     callback=image_callback)
         toc  = time.time()
@@ -280,6 +286,7 @@ The vast majority of these arguments default to reasonable values.
                  batch_size,iterations,
                  steps,seed,cfg_scale,ddim_eta,
                  skip_normalize,
+                 gfpgan_strength,
                  width,height,
                  callback):    # the callback is called each time a new Image is generated
         """
@@ -335,6 +342,8 @@ The vast majority of these arguments default to reasonable values.
                         for x_sample in x_samples_ddim:
                             x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
                             image = Image.fromarray(x_sample.astype(np.uint8))
+                            if gfpgan_strength > 0:
+                                image = self._run_gfpgan(image, gfpgan_strength)
                             images.append([image,seed])
                             if callback is not None:
                                 callback(image,seed)
@@ -354,6 +363,7 @@ The vast majority of these arguments default to reasonable values.
                  batch_size,iterations,
                  steps,seed,cfg_scale,ddim_eta,
                  skip_normalize,
+                 gfpgan_strength,
                  init_img,strength,variants,
                  callback):
         """
@@ -419,6 +429,8 @@ The vast majority of these arguments default to reasonable values.
                         for x_sample in x_samples:
                             x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
                             image = Image.fromarray(x_sample.astype(np.uint8))
+                            if gfpgan_strength > 0:
+                                image = self._run_gfpgan(image, gfpgan_strength)
                             images.append([image,seed])
                             if callback is not None:
                                 callback(image,seed)
@@ -549,3 +561,18 @@ The vast majority of these arguments default to reasonable values.
                     weights.append(1.0)
                 remaining = 0
         return prompts, weights
+
+    def _run_gfpgan(self, image, strength):
+        if (self.gfpgan is None):
+            print(f"GFPGAN not initialized, it must be loaded via the --gfpgan argument")
+            return image
+        
+        image = image.convert("RGB")
+
+        cropped_faces, restored_faces, restored_img = self.gfpgan.enhance(np.array(image, dtype=np.uint8), has_aligned=False, only_center_face=False, paste_back=True)
+        res = Image.fromarray(restored_img)
+
+        if strength < 1.0:
+            res = Image.blend(image, res, strength)
+
+        return res
diff --git a/scripts/dream.py b/scripts/dream.py
index 24dac5b927..c49340d655 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -6,6 +6,7 @@ import shlex
 import os
 import sys
 import copy
+
 from ldm.dream_util import Completer,PngWriter,PromptFormatter
 
 debugging = False
@@ -68,6 +69,28 @@ def main():
 
     # preload the model
     t2i.load_model()
+
+    # load GFPGAN if requested
+    if opt.use_gfpgan:
+        print("\n* --gfpgan was specified, loading gfpgan...")
+        try:
+            model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path)
+            if not os.path.isfile(model_path):
+                raise Exception("GFPGAN model not found at path "+model_path)
+
+            sys.path.append(os.path.abspath(opt.gfpgan_dir))
+            from gfpgan import GFPGANer
+
+            bg_upsampler = None
+            if opt.gfpgan_bg_upsampler is not None:
+                bg_upsampler = load_gfpgan_bg_upsampler(opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile)
+
+            t2i.gfpgan = GFPGANer(model_path=model_path, upscale=opt.gfpgan_upscale, arch='clean', channel_multiplier=2, bg_upsampler=bg_upsampler)
+        except Exception:
+            import traceback
+            print("Error loading GFPGAN:", file=sys.stderr)
+            print(traceback.format_exc(), file=sys.stderr)
+
     print("\n* Initialization done! Awaiting your command (-h for help, 'q' to quit, 'cd' to change output dir, 'pwd' to print output dir)...")
 
     log_path   = os.path.join(opt.outdir,'dream_log.txt')
@@ -183,6 +206,32 @@ def main_loop(t2i,outdir,parser,log,infile):
 
     print("goodbye!")
 
+def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
+    import torch
+
+    if bg_upsampler == 'realesrgan':
+        if not torch.cuda.is_available():  # CPU
+            import warnings
+            warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. '
+                          'If you really want to use it, please modify the corresponding codes.')
+            bg_upsampler = None
+        else:
+            from basicsr.archs.rrdbnet_arch import RRDBNet
+            from realesrgan import RealESRGANer
+            model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
+            bg_upsampler = RealESRGANer(
+                scale=2,
+                model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
+                model=model,
+                tile=bg_tile,
+                tile_pad=10,
+                pre_pad=0,
+                half=True)  # need to set False in CPU mode
+    else:
+        bg_upsampler = None
+
+    return bg_upsampler
+
 # variant generation is going to be superseded by a generalized
 # "prompt-morph" functionality
 # def generate_variants(t2i,outdir,opt,previous_gens):
@@ -261,6 +310,31 @@ def create_argv_parser():
                         type=str,
                         default="cuda",
                         help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if avalible")
+    # GFPGAN related args
+    parser.add_argument('--gfpgan',
+                        dest='use_gfpgan',
+                        action='store_true',
+                        help="load gfpgan for use in the dreambot. Note: Enabling GFPGAN will require more GPU memory")
+    parser.add_argument("--gfpgan_upscale",
+                        type=int,
+                        default=2,
+                        help="The final upsampling scale of the image. Default: 2. Only used if --gfpgan is specified")
+    parser.add_argument("--gfpgan_bg_upsampler",
+                        type=str,
+                        default='realesrgan',
+                        help="Background upsampler. Default: None. Options: realesrgan, none. Only used if --gfpgan is specified")
+    parser.add_argument("--gfpgan_bg_tile",
+                        type=int,
+                        default=400,
+                        help="Tile size for background sampler, 0 for no tile during testing. Default: 400. Only used if --gfpgan is specified")
+    parser.add_argument("--gfpgan_model_path",
+                        type=str,
+                        default='experiments/pretrained_models/GFPGANv1.3.pth',
+                        help="indicates the path to the GFPGAN model, relative to --gfpgan_dir. Only used if --gfpgan is specified")
+    parser.add_argument("--gfpgan_dir",
+                        type=str,
+                        default='../gfpgan',
+                        help="indicates the directory containing the GFPGAN code. Only used if --gfpgan is specified")
     return parser
                         
     
@@ -278,6 +352,7 @@ def create_cmd_parser():
     parser.add_argument('-i','--individual',action='store_true',help="generate individual files (default)")
     parser.add_argument('-I','--init_img',type=str,help="path to input image for img2img mode (supersedes width and height)")
     parser.add_argument('-f','--strength',default=0.75,type=float,help="strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely")
+    parser.add_argument('-G','--gfpgan_strength', default=0.5, type=float, help="The strength at which to apply the GFPGAN model to the result, in order to improve faces.")
 # variants is going to be superseded by a generalized "prompt-morph" function
 #    parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
     parser.add_argument('-x','--skip_normalize',action='store_true',help="skip subprompt weight normalization")

From 60ed00432837bc393f205321750788f91f3aa4b0 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Thu, 25 Aug 2022 23:31:08 -0400
Subject: [PATCH 44/58] Update readme, fix defaults for case-sensitive fs's

---
 README.md        | 14 ++++++++++++--
 scripts/dream.py |  2 +-
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/README.md b/README.md
index 28063203c8..f3cf91877f 100644
--- a/README.md
+++ b/README.md
@@ -86,7 +86,7 @@ This script also provides the ability to invoke GFPGAN after image generation. D
 and optionally upscale the image to a higher resolution.
 
 To use the ability, clone the [GFPGAN repository](https://github.com/TencentARC/GFPGAN) and follow their
-installation instructions. By default, we expect GFPGAN to be installed in a 'gfpgan' sibling directory.
+installation instructions. By default, we expect GFPGAN to be installed in a 'GFPGAN' sibling directory.
 
 You may also want to install Real-ESRGAN, if you want to enhance non-face regions in the image by installing
 the pip Real-ESRGAN package.
@@ -97,6 +97,15 @@ pip install realesrgan
 
 Now, you can run this script by adding the --gfpgan option. Any issues with GFPGAN will be reported on initialization.
 
+~~~~
+(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py
+* Initializing, be patient...
+(...more initialization messages...)
+* --gfpgan was specified, loading gfpgan...
+(...even more initialization messages...)
+* Initialization done! Awaiting your command...
+~~~~
+
 When generating prompts, add a -G or --gfpgan_strenth option to control the strength of the GFPGAN enhancement.
 0.0 is no enhancement, 1.0 is maximum enhancement.
 
@@ -115,7 +124,8 @@ That's it!
 There's also a bunch of options to control GFPGAN settings when starting the script for different configs that you can
 read about in the help text. This will let you control where GFPGAN is installed, if upsampling is enapled, the upsampler to use and the model path.
 
-Note that loading GFPGAN consumes additional GPU memory, additionaly, a couple of seconds will be tacked on when generating your images.
+Note that loading GFPGAN consumes additional GPU memory, but hey, 3090s with 24Gi of VRAM are cheap now *cough*.
+Additionally, a couple of seconds will be tacked on when generating your images, but hey, it's worth it.
 
 ## Barebones Web Server
 
diff --git a/scripts/dream.py b/scripts/dream.py
index 66980bf1f1..0e888bf99c 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -333,7 +333,7 @@ def create_argv_parser():
                         help="indicates the path to the GFPGAN model, relative to --gfpgan_dir. Only used if --gfpgan is specified")
     parser.add_argument("--gfpgan_dir",
                         type=str,
-                        default='../gfpgan',
+                        default='../GFPGAN',
                         help="indicates the directory containing the GFPGAN code. Only used if --gfpgan is specified")
     return parser
                         

From 3a30a8f2d246c0f7934064233de271b00adb3477 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Thu, 25 Aug 2022 23:39:03 -0400
Subject: [PATCH 45/58] Fix not being able to disable bgupscaler; update readme

---
 README.md        | 2 +-
 scripts/dream.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index f3cf91877f..179be569b2 100644
--- a/README.md
+++ b/README.md
@@ -88,7 +88,7 @@ and optionally upscale the image to a higher resolution.
 To use the ability, clone the [GFPGAN repository](https://github.com/TencentARC/GFPGAN) and follow their
 installation instructions. By default, we expect GFPGAN to be installed in a 'GFPGAN' sibling directory.
 
-You may also want to install Real-ESRGAN, if you want to enhance non-face regions in the image by installing
+You may also want to install Real-ESRGAN, if you want to enhance non-face regions in the image, by installing
 the pip Real-ESRGAN package.
 ```
 pip install realesrgan
diff --git a/scripts/dream.py b/scripts/dream.py
index 0e888bf99c..e2825f8142 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -82,7 +82,7 @@ def main():
             from gfpgan import GFPGANer
 
             bg_upsampler = None
-            if opt.gfpgan_bg_upsampler is not None:
+            if opt.gfpgan_bg_upsampler == 'realesrgan':
                 bg_upsampler = load_gfpgan_bg_upsampler(opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile)
 
             t2i.gfpgan = GFPGANer(model_path=model_path, upscale=opt.gfpgan_upscale, arch='clean', channel_multiplier=2, bg_upsampler=bg_upsampler)

From cb86b9ae6e849cbe4609a62eaf686388eda9b2df Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Thu, 25 Aug 2022 23:48:35 -0400
Subject: [PATCH 46/58] Remove the redundancy, better logging

---
 ldm/simplet2i.py | 4 ++--
 scripts/dream.py | 4 +---
 2 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index f0713081c8..1cc199c119 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -399,8 +399,8 @@ The vast majority of these arguments default to reasonable values.
             try:
                 if gfpgan_strength > 0:
                     image = self._run_gfpgan(image, gfpgan_strength)
-            except Exception:
-                print(f"Error running GFPGAN - Your image was not enhanced.")
+            except Exception as e:
+                print(f"Error running GFPGAN - Your image was not enhanced.\n{e}")
             images.append(image)
         return images
 
diff --git a/scripts/dream.py b/scripts/dream.py
index e2825f8142..4def627519 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -81,9 +81,7 @@ def main():
             sys.path.append(os.path.abspath(opt.gfpgan_dir))
             from gfpgan import GFPGANer
 
-            bg_upsampler = None
-            if opt.gfpgan_bg_upsampler == 'realesrgan':
-                bg_upsampler = load_gfpgan_bg_upsampler(opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile)
+            bg_upsampler = load_gfpgan_bg_upsampler(opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile)
 
             t2i.gfpgan = GFPGANer(model_path=model_path, upscale=opt.gfpgan_upscale, arch='clean', channel_multiplier=2, bg_upsampler=bg_upsampler)
         except Exception:

From 5f844807cb11792f68ed360338c4f28fb71919ef Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 23:50:56 -0400
Subject: [PATCH 47/58] Update README.md

Removed a bit of an uncaught merge conflict warning.
---
 README.md | 2 --
 1 file changed, 2 deletions(-)

diff --git a/README.md b/README.md
index 2af4a2a53a..cd834f9c2f 100644
--- a/README.md
+++ b/README.md
@@ -116,8 +116,6 @@ Then pass this file's name to dream.py when you invoke it:
 (ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file="path/to/prompts.txt"
 ~~~~
 
->>>>>>> big-refactoring
-
 ## Weighted Prompts
 
 You may weight different sections of the prompt to tell the sampler to attach different levels of

From 539c15966d78a7d37aea68a14c7caf91ad70dcd5 Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Thu, 25 Aug 2022 23:54:44 -0400
Subject: [PATCH 48/58] Update README.md

Put in a plug for Yansuki's morphing code.
---
 README.md | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/README.md b/README.md
index cd834f9c2f..17215f9ea0 100644
--- a/README.md
+++ b/README.md
@@ -198,6 +198,12 @@ repository and associated paper for details and limitations.
 
 ## Changes
 
+ * v1.11 (pending)
+   * Created a feature branch named **yunsaki-morphing-dream** which adds experimental support for
+     iteratively modifying the prompt and its parameters. Please see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
+     for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
+     significantly.
+   
 * v1.10 (25 August 2022)
    * A barebones but fully functional interactive web server for online generation of txt2img and img2img.
    

From fcdd95b652b8f503528d3e3526590e3c7ab2f3dc Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 00:39:57 -0400
Subject: [PATCH 49/58] Refactor so that behavior is consolidated at top level

---
 ldm/simplet2i.py | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 0de3a33237..6ae45b36bf 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -282,6 +282,11 @@ The vast majority of these arguments default to reasonable values.
                     seed_everything(seed)
                     iter_images = next(images_iterator)
                     for image in iter_images:
+                        try:
+                            if gfpgan_strength > 0:
+                                image = self._run_gfpgan(image, gfpgan_strength)
+                        except Exception as e:
+                            print(f"Error running GFPGAN - Your image was not enhanced.\n{e}")
                         results.append([image, seed])
                         if image_callback is not None:
                             image_callback(image,seed)
@@ -305,7 +310,6 @@ The vast majority of these arguments default to reasonable values.
                  batch_size,
                  steps,cfg_scale,ddim_eta,
                  skip_normalize,
-                 gfpgan_strength,
                  width,height):
         """
         An infinite iterator of images from the prompt.
@@ -325,7 +329,7 @@ The vast majority of these arguments default to reasonable values.
                                                 unconditional_guidance_scale=cfg_scale,
                                                 unconditional_conditioning=uc,
                                                 eta=ddim_eta)
-            yield self._samples_to_images(samples, gfpgan_strength=gfpgan_strength)
+            yield self._samples_to_images(samples)
 
     @torch.no_grad()
     def _img2img(self,
@@ -334,7 +338,6 @@ The vast majority of these arguments default to reasonable values.
                  batch_size,
                  steps,cfg_scale,ddim_eta,
                  skip_normalize,
-                 gfpgan_strength,
                  init_img,strength):
         """
         An infinite iterator of images from the prompt and the initial image
@@ -365,7 +368,7 @@ The vast majority of these arguments default to reasonable values.
             # decode it
             samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
                                         unconditional_conditioning=uc,)
-            yield self._samples_to_images(samples, gfpgan_strength)
+            yield self._samples_to_images(samples)
 
     # TODO: does this actually need to run every loop? does anything in it vary by random seed?
     def _get_uc_and_c(self, prompt, batch_size, skip_normalize):
@@ -389,18 +392,13 @@ The vast majority of these arguments default to reasonable values.
             c = self.model.get_learned_conditioning(batch_size * [prompt])
         return (uc, c)
 
-    def _samples_to_images(self, samples, gfpgan_strength=0):
+    def _samples_to_images(self, samples):
         x_samples = self.model.decode_first_stage(samples)
         x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
         images = list()
         for x_sample in x_samples:
             x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
             image = Image.fromarray(x_sample.astype(np.uint8))
-            try:
-                if gfpgan_strength > 0:
-                    image = self._run_gfpgan(image, gfpgan_strength)
-            except Exception as e:
-                print(f"Error running GFPGAN - Your image was not enhanced.\n{e}")
             images.append(image)
         return images
 

From 4f1664ec4f7b95addaf8f531ebe0c2bd68f0f516 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 00:41:41 -0400
Subject: [PATCH 50/58] remove params

---
 ldm/simplet2i.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 6ae45b36bf..66fdbb48b4 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -266,7 +266,6 @@ The vast majority of these arguments default to reasonable values.
                                         batch_size=batch_size,
                                         steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
-                                        gfpgan_strength=gfpgan_strength,
                                         init_img=init_img,strength=strength)
             else:
                 images_iterator = self._txt2img(prompt,
@@ -274,7 +273,6 @@ The vast majority of these arguments default to reasonable values.
                                         batch_size=batch_size,
                                         steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
                                         skip_normalize=skip_normalize,
-                                        gfpgan_strength=gfpgan_strength,
                                         width=width,height=height)
 
             with scope(self.device.type), self.model.ema_scope():

From f1ffb5b51b2d2adb48799d367e285d573b108792 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 00:45:19 -0400
Subject: [PATCH 51/58] Fix blend if the target image has been upscaled

---
 ldm/simplet2i.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 66fdbb48b4..1b7e34a7dc 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -529,6 +529,9 @@ The vast majority of these arguments default to reasonable values.
         res = Image.fromarray(restored_img)
 
         if strength < 1.0:
+            # Resize the image to the new image if the sizes have changed
+            if restored_img.size != image.size:
+                res = res.resize(image.size)
             res = Image.blend(image, res, strength)
 
         return res

From 407d70a98789d0f14ccfaf4aea8a0ed156af96d8 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 00:49:12 -0400
Subject: [PATCH 52/58] Fix backwards logic

---
 ldm/simplet2i.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 1b7e34a7dc..f1f88bba5e 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -531,7 +531,7 @@ The vast majority of these arguments default to reasonable values.
         if strength < 1.0:
             # Resize the image to the new image if the sizes have changed
             if restored_img.size != image.size:
-                res = res.resize(image.size)
+                image = image.resize(res.size)
             res = Image.blend(image, res, strength)
 
         return res

From 4f02b72c9ca464f53c0f7f23ee483f7a24b631ad Mon Sep 17 00:00:00 2001
From: Lincoln Stein <lincoln.stein@gmail.com>
Date: Fri, 26 Aug 2022 03:15:42 -0400
Subject: [PATCH 53/58] prettified all the code using "blue" at the urging of
 @tildebyte

---
 ldm/data/base.py                              |   18 +-
 ldm/data/imagenet.py                          |  322 ++--
 ldm/data/lsun.py                              |  104 +-
 ldm/data/personalized.py                      |  116 +-
 ldm/data/personalized_style.py                |  108 +-
 ldm/dream/pngwriter.py                        |   98 +-
 ldm/dream/readline.py                         |   95 +-
 ldm/lr_scheduler.py                           |   89 +-
 ldm/models/autoencoder.py                     |  425 +++--
 ldm/models/diffusion/classifier.py            |  226 ++-
 ldm/models/diffusion/ddim.py                  |  389 ++--
 ldm/models/diffusion/ddpm.py                  | 1581 ++++++++++++-----
 ldm/models/diffusion/ksampler.py              |   79 +-
 ldm/models/diffusion/plms.py                  |  373 ++--
 ldm/modules/attention.py                      |  167 +-
 ldm/modules/diffusionmodules/model.py         |  808 +++++----
 ldm/modules/diffusionmodules/openaimodel.py   |  203 ++-
 ldm/modules/diffusionmodules/util.py          |  105 +-
 ldm/modules/distributions/distributions.py    |   38 +-
 ldm/modules/ema.py                            |   34 +-
 ldm/modules/embedding_manager.py              |  191 +-
 ldm/modules/encoders/modules.py               |  386 ++--
 ldm/modules/image_degradation/__init__.py     |    8 +-
 ldm/modules/image_degradation/bsrgan.py       |  306 +++-
 ldm/modules/image_degradation/bsrgan_light.py |  257 ++-
 ldm/modules/image_degradation/utils_image.py  |  346 ++--
 ldm/modules/losses/__init__.py                |    2 +-
 ldm/modules/losses/contperceptual.py          |  146 +-
 ldm/modules/losses/vqperceptual.py            |  197 +-
 ldm/modules/x_transformer.py                  |  382 ++--
 ldm/simplet2i.py                              |  501 +++---
 ldm/util.py                                   |   56 +-
 main.py                                       |  685 ++++---
 scripts/dream.py                              |  482 +++--
 scripts/preload_models.py                     |   48 +-
 35 files changed, 6252 insertions(+), 3119 deletions(-)

diff --git a/ldm/data/base.py b/ldm/data/base.py
index b196c2f7aa..de9493fc1e 100644
--- a/ldm/data/base.py
+++ b/ldm/data/base.py
@@ -1,11 +1,17 @@
 from abc import abstractmethod
-from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
+from torch.utils.data import (
+    Dataset,
+    ConcatDataset,
+    ChainDataset,
+    IterableDataset,
+)
 
 
 class Txt2ImgIterableBaseDataset(IterableDataset):
-    '''
+    """
     Define an interface to make the IterableDatasets for text2img data chainable
-    '''
+    """
+
     def __init__(self, num_records=0, valid_ids=None, size=256):
         super().__init__()
         self.num_records = num_records
@@ -13,11 +19,13 @@ class Txt2ImgIterableBaseDataset(IterableDataset):
         self.sample_ids = valid_ids
         self.size = size
 
-        print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
+        print(
+            f'{self.__class__.__name__} dataset contains {self.__len__()} examples.'
+        )
 
     def __len__(self):
         return self.num_records
 
     @abstractmethod
     def __iter__(self):
-        pass
\ No newline at end of file
+        pass
diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py
index 1c473f9c69..d155f6d6ae 100644
--- a/ldm/data/imagenet.py
+++ b/ldm/data/imagenet.py
@@ -11,24 +11,34 @@ from tqdm import tqdm
 from torch.utils.data import Dataset, Subset
 
 import taming.data.utils as tdu
-from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
+from taming.data.imagenet import (
+    str_to_indices,
+    give_synsets_from_indices,
+    download,
+    retrieve,
+)
 from taming.data.imagenet import ImagePaths
 
-from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
+from ldm.modules.image_degradation import (
+    degradation_fn_bsr,
+    degradation_fn_bsr_light,
+)
 
 
-def synset2idx(path_to_yaml="data/index_synset.yaml"):
+def synset2idx(path_to_yaml='data/index_synset.yaml'):
     with open(path_to_yaml) as f:
         di2s = yaml.load(f)
-    return dict((v,k) for k,v in di2s.items())
+    return dict((v, k) for k, v in di2s.items())
 
 
 class ImageNetBase(Dataset):
     def __init__(self, config=None):
         self.config = config or OmegaConf.create()
-        if not type(self.config)==dict:
+        if not type(self.config) == dict:
             self.config = OmegaConf.to_container(self.config)
-        self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
+        self.keep_orig_class_label = self.config.get(
+            'keep_orig_class_label', False
+        )
         self.process_images = True  # if False we skip loading & processing images and self.data contains filepaths
         self._prepare()
         self._prepare_synset_to_human()
@@ -46,17 +56,23 @@ class ImageNetBase(Dataset):
         raise NotImplementedError()
 
     def _filter_relpaths(self, relpaths):
-        ignore = set([
-            "n06596364_9591.JPEG",
-        ])
-        relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
-        if "sub_indices" in self.config:
-            indices = str_to_indices(self.config["sub_indices"])
-            synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn)  # returns a list of strings
+        ignore = set(
+            [
+                'n06596364_9591.JPEG',
+            ]
+        )
+        relpaths = [
+            rpath for rpath in relpaths if not rpath.split('/')[-1] in ignore
+        ]
+        if 'sub_indices' in self.config:
+            indices = str_to_indices(self.config['sub_indices'])
+            synsets = give_synsets_from_indices(
+                indices, path_to_yaml=self.idx2syn
+            )  # returns a list of strings
             self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
             files = []
             for rpath in relpaths:
-                syn = rpath.split("/")[0]
+                syn = rpath.split('/')[0]
                 if syn in synsets:
                     files.append(rpath)
             return files
@@ -65,78 +81,89 @@ class ImageNetBase(Dataset):
 
     def _prepare_synset_to_human(self):
         SIZE = 2655750
-        URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
-        self.human_dict = os.path.join(self.root, "synset_human.txt")
-        if (not os.path.exists(self.human_dict) or
-                not os.path.getsize(self.human_dict)==SIZE):
+        URL = 'https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1'
+        self.human_dict = os.path.join(self.root, 'synset_human.txt')
+        if (
+            not os.path.exists(self.human_dict)
+            or not os.path.getsize(self.human_dict) == SIZE
+        ):
             download(URL, self.human_dict)
 
     def _prepare_idx_to_synset(self):
-        URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
-        self.idx2syn = os.path.join(self.root, "index_synset.yaml")
-        if (not os.path.exists(self.idx2syn)):
+        URL = 'https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1'
+        self.idx2syn = os.path.join(self.root, 'index_synset.yaml')
+        if not os.path.exists(self.idx2syn):
             download(URL, self.idx2syn)
 
     def _prepare_human_to_integer_label(self):
-        URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
-        self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
-        if (not os.path.exists(self.human2integer)):
+        URL = 'https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1'
+        self.human2integer = os.path.join(
+            self.root, 'imagenet1000_clsidx_to_labels.txt'
+        )
+        if not os.path.exists(self.human2integer):
             download(URL, self.human2integer)
-        with open(self.human2integer, "r") as f:
+        with open(self.human2integer, 'r') as f:
             lines = f.read().splitlines()
             assert len(lines) == 1000
             self.human2integer_dict = dict()
             for line in lines:
-                value, key = line.split(":")
+                value, key = line.split(':')
                 self.human2integer_dict[key] = int(value)
 
     def _load(self):
-        with open(self.txt_filelist, "r") as f:
+        with open(self.txt_filelist, 'r') as f:
             self.relpaths = f.read().splitlines()
             l1 = len(self.relpaths)
             self.relpaths = self._filter_relpaths(self.relpaths)
-            print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
+            print(
+                'Removed {} files from filelist during filtering.'.format(
+                    l1 - len(self.relpaths)
+                )
+            )
 
-        self.synsets = [p.split("/")[0] for p in self.relpaths]
+        self.synsets = [p.split('/')[0] for p in self.relpaths]
         self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
 
         unique_synsets = np.unique(self.synsets)
-        class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
+        class_dict = dict(
+            (synset, i) for i, synset in enumerate(unique_synsets)
+        )
         if not self.keep_orig_class_label:
             self.class_labels = [class_dict[s] for s in self.synsets]
         else:
             self.class_labels = [self.synset2idx[s] for s in self.synsets]
 
-        with open(self.human_dict, "r") as f:
+        with open(self.human_dict, 'r') as f:
             human_dict = f.read().splitlines()
             human_dict = dict(line.split(maxsplit=1) for line in human_dict)
 
         self.human_labels = [human_dict[s] for s in self.synsets]
 
         labels = {
-            "relpath": np.array(self.relpaths),
-            "synsets": np.array(self.synsets),
-            "class_label": np.array(self.class_labels),
-            "human_label": np.array(self.human_labels),
+            'relpath': np.array(self.relpaths),
+            'synsets': np.array(self.synsets),
+            'class_label': np.array(self.class_labels),
+            'human_label': np.array(self.human_labels),
         }
 
         if self.process_images:
-            self.size = retrieve(self.config, "size", default=256)
-            self.data = ImagePaths(self.abspaths,
-                                   labels=labels,
-                                   size=self.size,
-                                   random_crop=self.random_crop,
-                                   )
+            self.size = retrieve(self.config, 'size', default=256)
+            self.data = ImagePaths(
+                self.abspaths,
+                labels=labels,
+                size=self.size,
+                random_crop=self.random_crop,
+            )
         else:
             self.data = self.abspaths
 
 
 class ImageNetTrain(ImageNetBase):
-    NAME = "ILSVRC2012_train"
-    URL = "http://www.image-net.org/challenges/LSVRC/2012/"
-    AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
+    NAME = 'ILSVRC2012_train'
+    URL = 'http://www.image-net.org/challenges/LSVRC/2012/'
+    AT_HASH = 'a306397ccf9c2ead27155983c254227c0fd938e2'
     FILES = [
-        "ILSVRC2012_img_train.tar",
+        'ILSVRC2012_img_train.tar',
     ]
     SIZES = [
         147897477120,
@@ -151,57 +178,64 @@ class ImageNetTrain(ImageNetBase):
         if self.data_root:
             self.root = os.path.join(self.data_root, self.NAME)
         else:
-            cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
-            self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
+            cachedir = os.environ.get(
+                'XDG_CACHE_HOME', os.path.expanduser('~/.cache')
+            )
+            self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME)
 
-        self.datadir = os.path.join(self.root, "data")
-        self.txt_filelist = os.path.join(self.root, "filelist.txt")
+        self.datadir = os.path.join(self.root, 'data')
+        self.txt_filelist = os.path.join(self.root, 'filelist.txt')
         self.expected_length = 1281167
-        self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
-                                    default=True)
+        self.random_crop = retrieve(
+            self.config, 'ImageNetTrain/random_crop', default=True
+        )
         if not tdu.is_prepared(self.root):
             # prep
-            print("Preparing dataset {} in {}".format(self.NAME, self.root))
+            print('Preparing dataset {} in {}'.format(self.NAME, self.root))
 
             datadir = self.datadir
             if not os.path.exists(datadir):
                 path = os.path.join(self.root, self.FILES[0])
-                if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
+                if (
+                    not os.path.exists(path)
+                    or not os.path.getsize(path) == self.SIZES[0]
+                ):
                     import academictorrents as at
+
                     atpath = at.get(self.AT_HASH, datastore=self.root)
                     assert atpath == path
 
-                print("Extracting {} to {}".format(path, datadir))
+                print('Extracting {} to {}'.format(path, datadir))
                 os.makedirs(datadir, exist_ok=True)
-                with tarfile.open(path, "r:") as tar:
+                with tarfile.open(path, 'r:') as tar:
                     tar.extractall(path=datadir)
 
-                print("Extracting sub-tars.")
-                subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
+                print('Extracting sub-tars.')
+                subpaths = sorted(glob.glob(os.path.join(datadir, '*.tar')))
                 for subpath in tqdm(subpaths):
-                    subdir = subpath[:-len(".tar")]
+                    subdir = subpath[: -len('.tar')]
                     os.makedirs(subdir, exist_ok=True)
-                    with tarfile.open(subpath, "r:") as tar:
+                    with tarfile.open(subpath, 'r:') as tar:
                         tar.extractall(path=subdir)
 
-            filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
+            filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG'))
             filelist = [os.path.relpath(p, start=datadir) for p in filelist]
             filelist = sorted(filelist)
-            filelist = "\n".join(filelist)+"\n"
-            with open(self.txt_filelist, "w") as f:
+            filelist = '\n'.join(filelist) + '\n'
+            with open(self.txt_filelist, 'w') as f:
                 f.write(filelist)
 
             tdu.mark_prepared(self.root)
 
 
 class ImageNetValidation(ImageNetBase):
-    NAME = "ILSVRC2012_validation"
-    URL = "http://www.image-net.org/challenges/LSVRC/2012/"
-    AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
-    VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
+    NAME = 'ILSVRC2012_validation'
+    URL = 'http://www.image-net.org/challenges/LSVRC/2012/'
+    AT_HASH = '5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5'
+    VS_URL = 'https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1'
     FILES = [
-        "ILSVRC2012_img_val.tar",
-        "validation_synset.txt",
+        'ILSVRC2012_img_val.tar',
+        'validation_synset.txt',
     ]
     SIZES = [
         6744924160,
@@ -217,39 +251,49 @@ class ImageNetValidation(ImageNetBase):
         if self.data_root:
             self.root = os.path.join(self.data_root, self.NAME)
         else:
-            cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
-            self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
-        self.datadir = os.path.join(self.root, "data")
-        self.txt_filelist = os.path.join(self.root, "filelist.txt")
+            cachedir = os.environ.get(
+                'XDG_CACHE_HOME', os.path.expanduser('~/.cache')
+            )
+            self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME)
+        self.datadir = os.path.join(self.root, 'data')
+        self.txt_filelist = os.path.join(self.root, 'filelist.txt')
         self.expected_length = 50000
-        self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
-                                    default=False)
+        self.random_crop = retrieve(
+            self.config, 'ImageNetValidation/random_crop', default=False
+        )
         if not tdu.is_prepared(self.root):
             # prep
-            print("Preparing dataset {} in {}".format(self.NAME, self.root))
+            print('Preparing dataset {} in {}'.format(self.NAME, self.root))
 
             datadir = self.datadir
             if not os.path.exists(datadir):
                 path = os.path.join(self.root, self.FILES[0])
-                if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
+                if (
+                    not os.path.exists(path)
+                    or not os.path.getsize(path) == self.SIZES[0]
+                ):
                     import academictorrents as at
+
                     atpath = at.get(self.AT_HASH, datastore=self.root)
                     assert atpath == path
 
-                print("Extracting {} to {}".format(path, datadir))
+                print('Extracting {} to {}'.format(path, datadir))
                 os.makedirs(datadir, exist_ok=True)
-                with tarfile.open(path, "r:") as tar:
+                with tarfile.open(path, 'r:') as tar:
                     tar.extractall(path=datadir)
 
                 vspath = os.path.join(self.root, self.FILES[1])
-                if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
+                if (
+                    not os.path.exists(vspath)
+                    or not os.path.getsize(vspath) == self.SIZES[1]
+                ):
                     download(self.VS_URL, vspath)
 
-                with open(vspath, "r") as f:
+                with open(vspath, 'r') as f:
                     synset_dict = f.read().splitlines()
                     synset_dict = dict(line.split() for line in synset_dict)
 
-                print("Reorganizing into synset folders")
+                print('Reorganizing into synset folders')
                 synsets = np.unique(list(synset_dict.values()))
                 for s in synsets:
                     os.makedirs(os.path.join(datadir, s), exist_ok=True)
@@ -258,21 +302,26 @@ class ImageNetValidation(ImageNetBase):
                     dst = os.path.join(datadir, v)
                     shutil.move(src, dst)
 
-            filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
+            filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG'))
             filelist = [os.path.relpath(p, start=datadir) for p in filelist]
             filelist = sorted(filelist)
-            filelist = "\n".join(filelist)+"\n"
-            with open(self.txt_filelist, "w") as f:
+            filelist = '\n'.join(filelist) + '\n'
+            with open(self.txt_filelist, 'w') as f:
                 f.write(filelist)
 
             tdu.mark_prepared(self.root)
 
 
-
 class ImageNetSR(Dataset):
-    def __init__(self, size=None,
-                 degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
-                 random_crop=True):
+    def __init__(
+        self,
+        size=None,
+        degradation=None,
+        downscale_f=4,
+        min_crop_f=0.5,
+        max_crop_f=1.0,
+        random_crop=True,
+    ):
         """
         Imagenet Superresolution Dataloader
         Performs following ops in order:
@@ -296,67 +345,86 @@ class ImageNetSR(Dataset):
         self.LR_size = int(size / downscale_f)
         self.min_crop_f = min_crop_f
         self.max_crop_f = max_crop_f
-        assert(max_crop_f <= 1.)
+        assert max_crop_f <= 1.0
         self.center_crop = not random_crop
 
-        self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
+        self.image_rescaler = albumentations.SmallestMaxSize(
+            max_size=size, interpolation=cv2.INTER_AREA
+        )
 
-        self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
+        self.pil_interpolation = (
+            False  # gets reset later if incase interp_op is from pillow
+        )
 
-        if degradation == "bsrgan":
-            self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
+        if degradation == 'bsrgan':
+            self.degradation_process = partial(
+                degradation_fn_bsr, sf=downscale_f
+            )
 
-        elif degradation == "bsrgan_light":
-            self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
+        elif degradation == 'bsrgan_light':
+            self.degradation_process = partial(
+                degradation_fn_bsr_light, sf=downscale_f
+            )
 
         else:
             interpolation_fn = {
-            "cv_nearest": cv2.INTER_NEAREST,
-            "cv_bilinear": cv2.INTER_LINEAR,
-            "cv_bicubic": cv2.INTER_CUBIC,
-            "cv_area": cv2.INTER_AREA,
-            "cv_lanczos": cv2.INTER_LANCZOS4,
-            "pil_nearest": PIL.Image.NEAREST,
-            "pil_bilinear": PIL.Image.BILINEAR,
-            "pil_bicubic": PIL.Image.BICUBIC,
-            "pil_box": PIL.Image.BOX,
-            "pil_hamming": PIL.Image.HAMMING,
-            "pil_lanczos": PIL.Image.LANCZOS,
+                'cv_nearest': cv2.INTER_NEAREST,
+                'cv_bilinear': cv2.INTER_LINEAR,
+                'cv_bicubic': cv2.INTER_CUBIC,
+                'cv_area': cv2.INTER_AREA,
+                'cv_lanczos': cv2.INTER_LANCZOS4,
+                'pil_nearest': PIL.Image.NEAREST,
+                'pil_bilinear': PIL.Image.BILINEAR,
+                'pil_bicubic': PIL.Image.BICUBIC,
+                'pil_box': PIL.Image.BOX,
+                'pil_hamming': PIL.Image.HAMMING,
+                'pil_lanczos': PIL.Image.LANCZOS,
             }[degradation]
 
-            self.pil_interpolation = degradation.startswith("pil_")
+            self.pil_interpolation = degradation.startswith('pil_')
 
             if self.pil_interpolation:
-                self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
+                self.degradation_process = partial(
+                    TF.resize,
+                    size=self.LR_size,
+                    interpolation=interpolation_fn,
+                )
 
             else:
-                self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
-                                                                          interpolation=interpolation_fn)
+                self.degradation_process = albumentations.SmallestMaxSize(
+                    max_size=self.LR_size, interpolation=interpolation_fn
+                )
 
     def __len__(self):
         return len(self.base)
 
     def __getitem__(self, i):
         example = self.base[i]
-        image = Image.open(example["file_path_"])
+        image = Image.open(example['file_path_'])
 
-        if not image.mode == "RGB":
-            image = image.convert("RGB")
+        if not image.mode == 'RGB':
+            image = image.convert('RGB')
 
         image = np.array(image).astype(np.uint8)
 
         min_side_len = min(image.shape[:2])
-        crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
+        crop_side_len = min_side_len * np.random.uniform(
+            self.min_crop_f, self.max_crop_f, size=None
+        )
         crop_side_len = int(crop_side_len)
 
         if self.center_crop:
-            self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
+            self.cropper = albumentations.CenterCrop(
+                height=crop_side_len, width=crop_side_len
+            )
 
         else:
-            self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
+            self.cropper = albumentations.RandomCrop(
+                height=crop_side_len, width=crop_side_len
+            )
 
-        image = self.cropper(image=image)["image"]
-        image = self.image_rescaler(image=image)["image"]
+        image = self.cropper(image=image)['image']
+        image = self.image_rescaler(image=image)['image']
 
         if self.pil_interpolation:
             image_pil = PIL.Image.fromarray(image)
@@ -364,10 +432,10 @@ class ImageNetSR(Dataset):
             LR_image = np.array(LR_image).astype(np.uint8)
 
         else:
-            LR_image = self.degradation_process(image=image)["image"]
+            LR_image = self.degradation_process(image=image)['image']
 
-        example["image"] = (image/127.5 - 1.0).astype(np.float32)
-        example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
+        example['image'] = (image / 127.5 - 1.0).astype(np.float32)
+        example['LR_image'] = (LR_image / 127.5 - 1.0).astype(np.float32)
 
         return example
 
@@ -377,9 +445,11 @@ class ImageNetSRTrain(ImageNetSR):
         super().__init__(**kwargs)
 
     def get_base(self):
-        with open("data/imagenet_train_hr_indices.p", "rb") as f:
+        with open('data/imagenet_train_hr_indices.p', 'rb') as f:
             indices = pickle.load(f)
-        dset = ImageNetTrain(process_images=False,)
+        dset = ImageNetTrain(
+            process_images=False,
+        )
         return Subset(dset, indices)
 
 
@@ -388,7 +458,9 @@ class ImageNetSRValidation(ImageNetSR):
         super().__init__(**kwargs)
 
     def get_base(self):
-        with open("data/imagenet_val_hr_indices.p", "rb") as f:
+        with open('data/imagenet_val_hr_indices.p', 'rb') as f:
             indices = pickle.load(f)
-        dset = ImageNetValidation(process_images=False,)
+        dset = ImageNetValidation(
+            process_images=False,
+        )
         return Subset(dset, indices)
diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py
index 6256e45715..4a7ecb147e 100644
--- a/ldm/data/lsun.py
+++ b/ldm/data/lsun.py
@@ -7,30 +7,33 @@ from torchvision import transforms
 
 
 class LSUNBase(Dataset):
-    def __init__(self,
-                 txt_file,
-                 data_root,
-                 size=None,
-                 interpolation="bicubic",
-                 flip_p=0.5
-                 ):
+    def __init__(
+        self,
+        txt_file,
+        data_root,
+        size=None,
+        interpolation='bicubic',
+        flip_p=0.5,
+    ):
         self.data_paths = txt_file
         self.data_root = data_root
-        with open(self.data_paths, "r") as f:
+        with open(self.data_paths, 'r') as f:
             self.image_paths = f.read().splitlines()
         self._length = len(self.image_paths)
         self.labels = {
-            "relative_file_path_": [l for l in self.image_paths],
-            "file_path_": [os.path.join(self.data_root, l)
-                           for l in self.image_paths],
+            'relative_file_path_': [l for l in self.image_paths],
+            'file_path_': [
+                os.path.join(self.data_root, l) for l in self.image_paths
+            ],
         }
 
         self.size = size
-        self.interpolation = {"linear": PIL.Image.LINEAR,
-                              "bilinear": PIL.Image.BILINEAR,
-                              "bicubic": PIL.Image.BICUBIC,
-                              "lanczos": PIL.Image.LANCZOS,
-                              }[interpolation]
+        self.interpolation = {
+            'linear': PIL.Image.LINEAR,
+            'bilinear': PIL.Image.BILINEAR,
+            'bicubic': PIL.Image.BICUBIC,
+            'lanczos': PIL.Image.LANCZOS,
+        }[interpolation]
         self.flip = transforms.RandomHorizontalFlip(p=flip_p)
 
     def __len__(self):
@@ -38,55 +41,86 @@ class LSUNBase(Dataset):
 
     def __getitem__(self, i):
         example = dict((k, self.labels[k][i]) for k in self.labels)
-        image = Image.open(example["file_path_"])
-        if not image.mode == "RGB":
-            image = image.convert("RGB")
+        image = Image.open(example['file_path_'])
+        if not image.mode == 'RGB':
+            image = image.convert('RGB')
 
         # default to score-sde preprocessing
         img = np.array(image).astype(np.uint8)
         crop = min(img.shape[0], img.shape[1])
-        h, w, = img.shape[0], img.shape[1]
-        img = img[(h - crop) // 2:(h + crop) // 2,
-              (w - crop) // 2:(w + crop) // 2]
+        h, w, = (
+            img.shape[0],
+            img.shape[1],
+        )
+        img = img[
+            (h - crop) // 2 : (h + crop) // 2,
+            (w - crop) // 2 : (w + crop) // 2,
+        ]
 
         image = Image.fromarray(img)
         if self.size is not None:
-            image = image.resize((self.size, self.size), resample=self.interpolation)
+            image = image.resize(
+                (self.size, self.size), resample=self.interpolation
+            )
 
         image = self.flip(image)
         image = np.array(image).astype(np.uint8)
-        example["image"] = (image / 127.5 - 1.0).astype(np.float32)
+        example['image'] = (image / 127.5 - 1.0).astype(np.float32)
         return example
 
 
 class LSUNChurchesTrain(LSUNBase):
     def __init__(self, **kwargs):
-        super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs)
+        super().__init__(
+            txt_file='data/lsun/church_outdoor_train.txt',
+            data_root='data/lsun/churches',
+            **kwargs
+        )
 
 
 class LSUNChurchesValidation(LSUNBase):
-    def __init__(self, flip_p=0., **kwargs):
-        super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches",
-                         flip_p=flip_p, **kwargs)
+    def __init__(self, flip_p=0.0, **kwargs):
+        super().__init__(
+            txt_file='data/lsun/church_outdoor_val.txt',
+            data_root='data/lsun/churches',
+            flip_p=flip_p,
+            **kwargs
+        )
 
 
 class LSUNBedroomsTrain(LSUNBase):
     def __init__(self, **kwargs):
-        super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs)
+        super().__init__(
+            txt_file='data/lsun/bedrooms_train.txt',
+            data_root='data/lsun/bedrooms',
+            **kwargs
+        )
 
 
 class LSUNBedroomsValidation(LSUNBase):
     def __init__(self, flip_p=0.0, **kwargs):
-        super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms",
-                         flip_p=flip_p, **kwargs)
+        super().__init__(
+            txt_file='data/lsun/bedrooms_val.txt',
+            data_root='data/lsun/bedrooms',
+            flip_p=flip_p,
+            **kwargs
+        )
 
 
 class LSUNCatsTrain(LSUNBase):
     def __init__(self, **kwargs):
-        super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs)
+        super().__init__(
+            txt_file='data/lsun/cat_train.txt',
+            data_root='data/lsun/cats',
+            **kwargs
+        )
 
 
 class LSUNCatsValidation(LSUNBase):
-    def __init__(self, flip_p=0., **kwargs):
-        super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats",
-                         flip_p=flip_p, **kwargs)
+    def __init__(self, flip_p=0.0, **kwargs):
+        super().__init__(
+            txt_file='data/lsun/cat_val.txt',
+            data_root='data/lsun/cats',
+            flip_p=flip_p,
+            **kwargs
+        )
diff --git a/ldm/data/personalized.py b/ldm/data/personalized.py
index c8a57d09fa..15fc8a8d2d 100644
--- a/ldm/data/personalized.py
+++ b/ldm/data/personalized.py
@@ -72,31 +72,57 @@ imagenet_dual_templates_small = [
 ]
 
 per_img_token_list = [
-    'א', 'ב', 'ג', 'ד', 'ה', 'ו', 'ז', 'ח', 'ט', 'י', 'כ', 'ל', 'מ', 'נ', 'ס', 'ע', 'פ', 'צ', 'ק', 'ר', 'ש', 'ת',
+    'א',
+    'ב',
+    'ג',
+    'ד',
+    'ה',
+    'ו',
+    'ז',
+    'ח',
+    'ט',
+    'י',
+    'כ',
+    'ל',
+    'מ',
+    'נ',
+    'ס',
+    'ע',
+    'פ',
+    'צ',
+    'ק',
+    'ר',
+    'ש',
+    'ת',
 ]
 
+
 class PersonalizedBase(Dataset):
-    def __init__(self,
-                 data_root,
-                 size=None,
-                 repeats=100,
-                 interpolation="bicubic",
-                 flip_p=0.5,
-                 set="train",
-                 placeholder_token="*",
-                 per_image_tokens=False,
-                 center_crop=False,
-                 mixing_prob=0.25,
-                 coarse_class_text=None,
-                 ):
+    def __init__(
+        self,
+        data_root,
+        size=None,
+        repeats=100,
+        interpolation='bicubic',
+        flip_p=0.5,
+        set='train',
+        placeholder_token='*',
+        per_image_tokens=False,
+        center_crop=False,
+        mixing_prob=0.25,
+        coarse_class_text=None,
+    ):
 
         self.data_root = data_root
 
-        self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
+        self.image_paths = [
+            os.path.join(self.data_root, file_path)
+            for file_path in os.listdir(self.data_root)
+        ]
 
         # self._length = len(self.image_paths)
         self.num_images = len(self.image_paths)
-        self._length = self.num_images 
+        self._length = self.num_images
 
         self.placeholder_token = placeholder_token
 
@@ -107,17 +133,20 @@ class PersonalizedBase(Dataset):
         self.coarse_class_text = coarse_class_text
 
         if per_image_tokens:
-            assert self.num_images < len(per_img_token_list), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
+            assert self.num_images < len(
+                per_img_token_list
+            ), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
 
-        if set == "train":
+        if set == 'train':
             self._length = self.num_images * repeats
 
         self.size = size
-        self.interpolation = {"linear": PIL.Image.LINEAR,
-                              "bilinear": PIL.Image.BILINEAR,
-                              "bicubic": PIL.Image.BICUBIC,
-                              "lanczos": PIL.Image.LANCZOS,
-                              }[interpolation]
+        self.interpolation = {
+            'linear': PIL.Image.LINEAR,
+            'bilinear': PIL.Image.BILINEAR,
+            'bicubic': PIL.Image.BICUBIC,
+            'lanczos': PIL.Image.LANCZOS,
+        }[interpolation]
         self.flip = transforms.RandomHorizontalFlip(p=flip_p)
 
     def __len__(self):
@@ -127,34 +156,47 @@ class PersonalizedBase(Dataset):
         example = {}
         image = Image.open(self.image_paths[i % self.num_images])
 
-        if not image.mode == "RGB":
-            image = image.convert("RGB")
+        if not image.mode == 'RGB':
+            image = image.convert('RGB')
 
         placeholder_string = self.placeholder_token
         if self.coarse_class_text:
-            placeholder_string = f"{self.coarse_class_text} {placeholder_string}"
+            placeholder_string = (
+                f'{self.coarse_class_text} {placeholder_string}'
+            )
 
         if self.per_image_tokens and np.random.uniform() < self.mixing_prob:
-            text = random.choice(imagenet_dual_templates_small).format(placeholder_string, per_img_token_list[i % self.num_images])
+            text = random.choice(imagenet_dual_templates_small).format(
+                placeholder_string, per_img_token_list[i % self.num_images]
+            )
         else:
-            text = random.choice(imagenet_templates_small).format(placeholder_string)
-            
-        example["caption"] = text
+            text = random.choice(imagenet_templates_small).format(
+                placeholder_string
+            )
+
+        example['caption'] = text
 
         # default to score-sde preprocessing
         img = np.array(image).astype(np.uint8)
-        
+
         if self.center_crop:
             crop = min(img.shape[0], img.shape[1])
-            h, w, = img.shape[0], img.shape[1]
-            img = img[(h - crop) // 2:(h + crop) // 2,
-                (w - crop) // 2:(w + crop) // 2]
+            h, w, = (
+                img.shape[0],
+                img.shape[1],
+            )
+            img = img[
+                (h - crop) // 2 : (h + crop) // 2,
+                (w - crop) // 2 : (w + crop) // 2,
+            ]
 
         image = Image.fromarray(img)
         if self.size is not None:
-            image = image.resize((self.size, self.size), resample=self.interpolation)
+            image = image.resize(
+                (self.size, self.size), resample=self.interpolation
+            )
 
         image = self.flip(image)
         image = np.array(image).astype(np.uint8)
-        example["image"] = (image / 127.5 - 1.0).astype(np.float32)
-        return example
\ No newline at end of file
+        example['image'] = (image / 127.5 - 1.0).astype(np.float32)
+        return example
diff --git a/ldm/data/personalized_style.py b/ldm/data/personalized_style.py
index b6be7b15c4..56d77d7e81 100644
--- a/ldm/data/personalized_style.py
+++ b/ldm/data/personalized_style.py
@@ -50,29 +50,55 @@ imagenet_dual_templates_small = [
 ]
 
 per_img_token_list = [
-    'א', 'ב', 'ג', 'ד', 'ה', 'ו', 'ז', 'ח', 'ט', 'י', 'כ', 'ל', 'מ', 'נ', 'ס', 'ע', 'פ', 'צ', 'ק', 'ר', 'ש', 'ת',
+    'א',
+    'ב',
+    'ג',
+    'ד',
+    'ה',
+    'ו',
+    'ז',
+    'ח',
+    'ט',
+    'י',
+    'כ',
+    'ל',
+    'מ',
+    'נ',
+    'ס',
+    'ע',
+    'פ',
+    'צ',
+    'ק',
+    'ר',
+    'ש',
+    'ת',
 ]
 
+
 class PersonalizedBase(Dataset):
-    def __init__(self,
-                 data_root,
-                 size=None,
-                 repeats=100,
-                 interpolation="bicubic",
-                 flip_p=0.5,
-                 set="train",
-                 placeholder_token="*",
-                 per_image_tokens=False,
-                 center_crop=False,
-                 ):
+    def __init__(
+        self,
+        data_root,
+        size=None,
+        repeats=100,
+        interpolation='bicubic',
+        flip_p=0.5,
+        set='train',
+        placeholder_token='*',
+        per_image_tokens=False,
+        center_crop=False,
+    ):
 
         self.data_root = data_root
 
-        self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
+        self.image_paths = [
+            os.path.join(self.data_root, file_path)
+            for file_path in os.listdir(self.data_root)
+        ]
 
         # self._length = len(self.image_paths)
         self.num_images = len(self.image_paths)
-        self._length = self.num_images 
+        self._length = self.num_images
 
         self.placeholder_token = placeholder_token
 
@@ -80,17 +106,20 @@ class PersonalizedBase(Dataset):
         self.center_crop = center_crop
 
         if per_image_tokens:
-            assert self.num_images < len(per_img_token_list), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
+            assert self.num_images < len(
+                per_img_token_list
+            ), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
 
-        if set == "train":
+        if set == 'train':
             self._length = self.num_images * repeats
 
         self.size = size
-        self.interpolation = {"linear": PIL.Image.LINEAR,
-                              "bilinear": PIL.Image.BILINEAR,
-                              "bicubic": PIL.Image.BICUBIC,
-                              "lanczos": PIL.Image.LANCZOS,
-                              }[interpolation]
+        self.interpolation = {
+            'linear': PIL.Image.LINEAR,
+            'bilinear': PIL.Image.BILINEAR,
+            'bicubic': PIL.Image.BICUBIC,
+            'lanczos': PIL.Image.LANCZOS,
+        }[interpolation]
         self.flip = transforms.RandomHorizontalFlip(p=flip_p)
 
     def __len__(self):
@@ -100,30 +129,41 @@ class PersonalizedBase(Dataset):
         example = {}
         image = Image.open(self.image_paths[i % self.num_images])
 
-        if not image.mode == "RGB":
-            image = image.convert("RGB")
+        if not image.mode == 'RGB':
+            image = image.convert('RGB')
 
         if self.per_image_tokens and np.random.uniform() < 0.25:
-            text = random.choice(imagenet_dual_templates_small).format(self.placeholder_token, per_img_token_list[i % self.num_images])
+            text = random.choice(imagenet_dual_templates_small).format(
+                self.placeholder_token, per_img_token_list[i % self.num_images]
+            )
         else:
-            text = random.choice(imagenet_templates_small).format(self.placeholder_token)
-            
-        example["caption"] = text
+            text = random.choice(imagenet_templates_small).format(
+                self.placeholder_token
+            )
+
+        example['caption'] = text
 
         # default to score-sde preprocessing
         img = np.array(image).astype(np.uint8)
-        
+
         if self.center_crop:
             crop = min(img.shape[0], img.shape[1])
-            h, w, = img.shape[0], img.shape[1]
-            img = img[(h - crop) // 2:(h + crop) // 2,
-                (w - crop) // 2:(w + crop) // 2]
+            h, w, = (
+                img.shape[0],
+                img.shape[1],
+            )
+            img = img[
+                (h - crop) // 2 : (h + crop) // 2,
+                (w - crop) // 2 : (w + crop) // 2,
+            ]
 
         image = Image.fromarray(img)
         if self.size is not None:
-            image = image.resize((self.size, self.size), resample=self.interpolation)
+            image = image.resize(
+                (self.size, self.size), resample=self.interpolation
+            )
 
         image = self.flip(image)
         image = np.array(image).astype(np.uint8)
-        example["image"] = (image / 127.5 - 1.0).astype(np.float32)
-        return example
\ No newline at end of file
+        example['image'] = (image / 127.5 - 1.0).astype(np.float32)
+        return example
diff --git a/ldm/dream/pngwriter.py b/ldm/dream/pngwriter.py
index ecbbbd4ff7..3a3f205512 100644
--- a/ldm/dream/pngwriter.py
+++ b/ldm/dream/pngwriter.py
@@ -1,4 +1,4 @@
-'''
+"""
 Two helper classes for dealing with PNG images and their path names.
 PngWriter -- Converts Images generated by T2I into PNGs, finds
              appropriate names for them, and writes prompt metadata
@@ -7,95 +7,104 @@ PngWriter -- Converts Images generated by T2I into PNGs, finds
              prompt for file/directory names.
 PromptFormatter -- Utility for converting a Namespace of prompt parameters
              back into a formatted prompt string with command-line switches.
-'''
+"""
 import os
 import re
-from math import sqrt,floor,ceil
-from PIL import Image,PngImagePlugin
+from math import sqrt, floor, ceil
+from PIL import Image, PngImagePlugin
 
 # -------------------image generation utils-----
 class PngWriter:
-
-    def __init__(self,outdir,prompt=None,batch_size=1):
-        self.outdir           = outdir
-        self.batch_size       = batch_size
-        self.prompt           = prompt
-        self.filepath         = None
-        self.files_written    = []
+    def __init__(self, outdir, prompt=None, batch_size=1):
+        self.outdir = outdir
+        self.batch_size = batch_size
+        self.prompt = prompt
+        self.filepath = None
+        self.files_written = []
         os.makedirs(outdir, exist_ok=True)
 
-    def write_image(self,image,seed):
-        self.filepath = self.unique_filename(seed,self.filepath) # will increment name in some sensible way
+    def write_image(self, image, seed):
+        self.filepath = self.unique_filename(
+            seed, self.filepath
+        )   # will increment name in some sensible way
         try:
             prompt = f'{self.prompt} -S{seed}'
-            self.save_image_and_prompt_to_png(image,prompt,self.filepath)
+            self.save_image_and_prompt_to_png(image, prompt, self.filepath)
         except IOError as e:
             print(e)
-        self.files_written.append([self.filepath,seed])
+        self.files_written.append([self.filepath, seed])
 
-    def unique_filename(self,seed,previouspath=None):
+    def unique_filename(self, seed, previouspath=None):
         revision = 1
 
         if previouspath is None:
             # sort reverse alphabetically until we find max+1
-            dirlist   = sorted(os.listdir(self.outdir),reverse=True)
+            dirlist = sorted(os.listdir(self.outdir), reverse=True)
             # find the first filename that matches our pattern or return 000000.0.png
-            filename   = next((f for f in dirlist if re.match('^(\d+)\..*\.png',f)),'0000000.0.png')
-            basecount  = int(filename.split('.',1)[0])
+            filename = next(
+                (f for f in dirlist if re.match('^(\d+)\..*\.png', f)),
+                '0000000.0.png',
+            )
+            basecount = int(filename.split('.', 1)[0])
             basecount += 1
             if self.batch_size > 1:
                 filename = f'{basecount:06}.{seed}.01.png'
             else:
                 filename = f'{basecount:06}.{seed}.png'
-            return os.path.join(self.outdir,filename)
+            return os.path.join(self.outdir, filename)
 
         else:
             basename = os.path.basename(previouspath)
-            x = re.match('^(\d+)\..*\.png',basename)
+            x = re.match('^(\d+)\..*\.png', basename)
             if not x:
-                return self.unique_filename(seed,previouspath)
+                return self.unique_filename(seed, previouspath)
 
             basecount = int(x.groups()[0])
-            series = 0 
+            series = 0
             finished = False
             while not finished:
                 series += 1
                 filename = f'{basecount:06}.{seed}.png'
-                if self.batch_size>1 or os.path.exists(os.path.join(self.outdir,filename)):
+                if self.batch_size > 1 or os.path.exists(
+                    os.path.join(self.outdir, filename)
+                ):
                     filename = f'{basecount:06}.{seed}.{series:02}.png'
-                finished = not os.path.exists(os.path.join(self.outdir,filename))
-            return os.path.join(self.outdir,filename)
+                finished = not os.path.exists(
+                    os.path.join(self.outdir, filename)
+                )
+            return os.path.join(self.outdir, filename)
 
-    def save_image_and_prompt_to_png(self,image,prompt,path):
+    def save_image_and_prompt_to_png(self, image, prompt, path):
         info = PngImagePlugin.PngInfo()
-        info.add_text("Dream",prompt)
-        image.save(path,"PNG",pnginfo=info)
+        info.add_text('Dream', prompt)
+        image.save(path, 'PNG', pnginfo=info)
 
-    def make_grid(self,image_list,rows=None,cols=None):
+    def make_grid(self, image_list, rows=None, cols=None):
         image_cnt = len(image_list)
-        if None in (rows,cols):
+        if None in (rows, cols):
             rows = floor(sqrt(image_cnt))  # try to make it square
-            cols = ceil(image_cnt/rows)
-        width  = image_list[0].width
+            cols = ceil(image_cnt / rows)
+        width = image_list[0].width
         height = image_list[0].height
 
-        grid_img = Image.new('RGB',(width*cols,height*rows))
-        for r in range(0,rows):
-            for c in range (0,cols):
-                i = r*rows + c
-                grid_img.paste(image_list[i],(c*width,r*height))
+        grid_img = Image.new('RGB', (width * cols, height * rows))
+        for r in range(0, rows):
+            for c in range(0, cols):
+                i = r * rows + c
+                grid_img.paste(image_list[i], (c * width, r * height))
 
         return grid_img
-    
-class PromptFormatter():
-    def __init__(self,t2i,opt):
+
+
+class PromptFormatter:
+    def __init__(self, t2i, opt):
         self.t2i = t2i
         self.opt = opt
 
     def normalize_prompt(self):
-        '''Normalize the prompt and switches'''
-        t2i      = self.t2i
-        opt      = self.opt
+        """Normalize the prompt and switches"""
+        t2i = self.t2i
+        opt = self.opt
 
         switches = list()
         switches.append(f'"{opt.prompt}"')
@@ -114,4 +123,3 @@ class PromptFormatter():
         if t2i.full_precision:
             switches.append('-F')
         return ' '.join(switches)
-
diff --git a/ldm/dream/readline.py b/ldm/dream/readline.py
index f46ac6e23a..f40fe83316 100644
--- a/ldm/dream/readline.py
+++ b/ldm/dream/readline.py
@@ -1,37 +1,40 @@
-'''
+"""
 Readline helper functions for dream.py (linux and mac only).
-'''
+"""
 import os
 import re
 import atexit
+
 # ---------------readline utilities---------------------
 try:
     import readline
+
     readline_available = True
 except:
     readline_available = False
 
-class Completer():
-    def __init__(self,options):
+
+class Completer:
+    def __init__(self, options):
         self.options = sorted(options)
         return
 
-    def complete(self,text,state):
+    def complete(self, text, state):
         buffer = readline.get_line_buffer()
 
-        if text.startswith(('-I','--init_img')):
-            return self._path_completions(text,state,('.png'))
+        if text.startswith(('-I', '--init_img')):
+            return self._path_completions(text, state, ('.png'))
 
-        if buffer.strip().endswith('cd') or text.startswith(('.','/')):
-            return self._path_completions(text,state,())
+        if buffer.strip().endswith('cd') or text.startswith(('.', '/')):
+            return self._path_completions(text, state, ())
 
         response = None
         if state == 0:
             # This is the first time for this text, so build a match list.
             if text:
-                self.matches = [s 
-                                for s in self.options
-                                if s and s.startswith(text)]
+                self.matches = [
+                    s for s in self.options if s and s.startswith(text)
+                ]
             else:
                 self.matches = self.options[:]
 
@@ -43,32 +46,34 @@ class Completer():
             response = None
         return response
 
-    def _path_completions(self,text,state,extensions):
+    def _path_completions(self, text, state, extensions):
         # get the path so far
         if text.startswith('-I'):
-            path = text.replace('-I','',1).lstrip()
+            path = text.replace('-I', '', 1).lstrip()
         elif text.startswith('--init_img='):
-            path = text.replace('--init_img=','',1).lstrip()
+            path = text.replace('--init_img=', '', 1).lstrip()
         else:
             path = text
 
-        matches  = list()
+        matches = list()
 
         path = os.path.expanduser(path)
-        if len(path)==0:
-            matches.append(text+'./')
+        if len(path) == 0:
+            matches.append(text + './')
         else:
-            dir  = os.path.dirname(path)
+            dir = os.path.dirname(path)
             dir_list = os.listdir(dir)
             for n in dir_list:
-                if n.startswith('.') and len(n)>1:
+                if n.startswith('.') and len(n) > 1:
                     continue
-                full_path = os.path.join(dir,n)
+                full_path = os.path.join(dir, n)
                 if full_path.startswith(path):
                     if os.path.isdir(full_path):
-                        matches.append(os.path.join(os.path.dirname(text),n)+'/')
+                        matches.append(
+                            os.path.join(os.path.dirname(text), n) + '/'
+                        )
                     elif n.endswith(extensions):
-                        matches.append(os.path.join(os.path.dirname(text),n))
+                        matches.append(os.path.join(os.path.dirname(text), n))
 
         try:
             response = matches[state]
@@ -76,19 +81,47 @@ class Completer():
             response = None
         return response
 
+
 if readline_available:
-    readline.set_completer(Completer(['cd','pwd',
-                                      '--steps','-s','--seed','-S','--iterations','-n','--batch_size','-b',
-                                      '--width','-W','--height','-H','--cfg_scale','-C','--grid','-g',
-                                      '--individual','-i','--init_img','-I','--strength','-f','-v','--variants']).complete)
-    readline.set_completer_delims(" ")
+    readline.set_completer(
+        Completer(
+            [
+                'cd',
+                'pwd',
+                '--steps',
+                '-s',
+                '--seed',
+                '-S',
+                '--iterations',
+                '-n',
+                '--batch_size',
+                '-b',
+                '--width',
+                '-W',
+                '--height',
+                '-H',
+                '--cfg_scale',
+                '-C',
+                '--grid',
+                '-g',
+                '--individual',
+                '-i',
+                '--init_img',
+                '-I',
+                '--strength',
+                '-f',
+                '-v',
+                '--variants',
+            ]
+        ).complete
+    )
+    readline.set_completer_delims(' ')
     readline.parse_and_bind('tab: complete')
 
-    histfile = os.path.join(os.path.expanduser('~'),".dream_history")
+    histfile = os.path.join(os.path.expanduser('~'), '.dream_history')
     try:
         readline.read_history_file(histfile)
         readline.set_history_length(1000)
     except FileNotFoundError:
         pass
-    atexit.register(readline.write_history_file,histfile)
-
+    atexit.register(readline.write_history_file, histfile)
diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py
index be39da9ca6..79c1d1978e 100644
--- a/ldm/lr_scheduler.py
+++ b/ldm/lr_scheduler.py
@@ -5,32 +5,49 @@ class LambdaWarmUpCosineScheduler:
     """
     note: use with a base_lr of 1.0
     """
-    def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
+
+    def __init__(
+        self,
+        warm_up_steps,
+        lr_min,
+        lr_max,
+        lr_start,
+        max_decay_steps,
+        verbosity_interval=0,
+    ):
         self.lr_warm_up_steps = warm_up_steps
         self.lr_start = lr_start
         self.lr_min = lr_min
         self.lr_max = lr_max
         self.lr_max_decay_steps = max_decay_steps
-        self.last_lr = 0.
+        self.last_lr = 0.0
         self.verbosity_interval = verbosity_interval
 
     def schedule(self, n, **kwargs):
         if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
+            if n % self.verbosity_interval == 0:
+                print(
+                    f'current step: {n}, recent lr-multiplier: {self.last_lr}'
+                )
         if n < self.lr_warm_up_steps:
-            lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
+            lr = (
+                self.lr_max - self.lr_start
+            ) / self.lr_warm_up_steps * n + self.lr_start
             self.last_lr = lr
             return lr
         else:
-            t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
+            t = (n - self.lr_warm_up_steps) / (
+                self.lr_max_decay_steps - self.lr_warm_up_steps
+            )
             t = min(t, 1.0)
             lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
-                    1 + np.cos(t * np.pi))
+                1 + np.cos(t * np.pi)
+            )
             self.last_lr = lr
             return lr
 
     def __call__(self, n, **kwargs):
-        return self.schedule(n,**kwargs)
+        return self.schedule(n, **kwargs)
 
 
 class LambdaWarmUpCosineScheduler2:
@@ -38,15 +55,30 @@ class LambdaWarmUpCosineScheduler2:
     supports repeated iterations, configurable via lists
     note: use with a base_lr of 1.0.
     """
-    def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
-        assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
+
+    def __init__(
+        self,
+        warm_up_steps,
+        f_min,
+        f_max,
+        f_start,
+        cycle_lengths,
+        verbosity_interval=0,
+    ):
+        assert (
+            len(warm_up_steps)
+            == len(f_min)
+            == len(f_max)
+            == len(f_start)
+            == len(cycle_lengths)
+        )
         self.lr_warm_up_steps = warm_up_steps
         self.f_start = f_start
         self.f_min = f_min
         self.f_max = f_max
         self.cycle_lengths = cycle_lengths
         self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
-        self.last_f = 0.
+        self.last_f = 0.0
         self.verbosity_interval = verbosity_interval
 
     def find_in_interval(self, n):
@@ -60,17 +92,25 @@ class LambdaWarmUpCosineScheduler2:
         cycle = self.find_in_interval(n)
         n = n - self.cum_cycles[cycle]
         if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
-                                                       f"current cycle {cycle}")
+            if n % self.verbosity_interval == 0:
+                print(
+                    f'current step: {n}, recent lr-multiplier: {self.last_f}, '
+                    f'current cycle {cycle}'
+                )
         if n < self.lr_warm_up_steps[cycle]:
-            f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
+            f = (
+                self.f_max[cycle] - self.f_start[cycle]
+            ) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
             self.last_f = f
             return f
         else:
-            t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
+            t = (n - self.lr_warm_up_steps[cycle]) / (
+                self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]
+            )
             t = min(t, 1.0)
-            f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
-                    1 + np.cos(t * np.pi))
+            f = self.f_min[cycle] + 0.5 * (
+                self.f_max[cycle] - self.f_min[cycle]
+            ) * (1 + np.cos(t * np.pi))
             self.last_f = f
             return f
 
@@ -79,20 +119,25 @@ class LambdaWarmUpCosineScheduler2:
 
 
 class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
-
     def schedule(self, n, **kwargs):
         cycle = self.find_in_interval(n)
         n = n - self.cum_cycles[cycle]
         if self.verbosity_interval > 0:
-            if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
-                                                       f"current cycle {cycle}")
+            if n % self.verbosity_interval == 0:
+                print(
+                    f'current step: {n}, recent lr-multiplier: {self.last_f}, '
+                    f'current cycle {cycle}'
+                )
 
         if n < self.lr_warm_up_steps[cycle]:
-            f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
+            f = (
+                self.f_max[cycle] - self.f_start[cycle]
+            ) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
             self.last_f = f
             return f
         else:
-            f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
+            f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (
+                self.cycle_lengths[cycle] - n
+            ) / (self.cycle_lengths[cycle])
             self.last_f = f
             return f
-
diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py
index 6a9c4f4549..359f5688d1 100644
--- a/ldm/models/autoencoder.py
+++ b/ldm/models/autoencoder.py
@@ -6,29 +6,32 @@ from contextlib import contextmanager
 from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
 
 from ldm.modules.diffusionmodules.model import Encoder, Decoder
-from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
+from ldm.modules.distributions.distributions import (
+    DiagonalGaussianDistribution,
+)
 
 from ldm.util import instantiate_from_config
 
 
 class VQModel(pl.LightningModule):
-    def __init__(self,
-                 ddconfig,
-                 lossconfig,
-                 n_embed,
-                 embed_dim,
-                 ckpt_path=None,
-                 ignore_keys=[],
-                 image_key="image",
-                 colorize_nlabels=None,
-                 monitor=None,
-                 batch_resize_range=None,
-                 scheduler_config=None,
-                 lr_g_factor=1.0,
-                 remap=None,
-                 sane_index_shape=False, # tell vector quantizer to return indices as bhw
-                 use_ema=False
-                 ):
+    def __init__(
+        self,
+        ddconfig,
+        lossconfig,
+        n_embed,
+        embed_dim,
+        ckpt_path=None,
+        ignore_keys=[],
+        image_key='image',
+        colorize_nlabels=None,
+        monitor=None,
+        batch_resize_range=None,
+        scheduler_config=None,
+        lr_g_factor=1.0,
+        remap=None,
+        sane_index_shape=False,  # tell vector quantizer to return indices as bhw
+        use_ema=False,
+    ):
         super().__init__()
         self.embed_dim = embed_dim
         self.n_embed = n_embed
@@ -36,24 +39,34 @@ class VQModel(pl.LightningModule):
         self.encoder = Encoder(**ddconfig)
         self.decoder = Decoder(**ddconfig)
         self.loss = instantiate_from_config(lossconfig)
-        self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
-                                        remap=remap,
-                                        sane_index_shape=sane_index_shape)
-        self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
-        self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
+        self.quantize = VectorQuantizer(
+            n_embed,
+            embed_dim,
+            beta=0.25,
+            remap=remap,
+            sane_index_shape=sane_index_shape,
+        )
+        self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
+        self.post_quant_conv = torch.nn.Conv2d(
+            embed_dim, ddconfig['z_channels'], 1
+        )
         if colorize_nlabels is not None:
-            assert type(colorize_nlabels)==int
-            self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
+            assert type(colorize_nlabels) == int
+            self.register_buffer(
+                'colorize', torch.randn(3, colorize_nlabels, 1, 1)
+            )
         if monitor is not None:
             self.monitor = monitor
         self.batch_resize_range = batch_resize_range
         if self.batch_resize_range is not None:
-            print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
+            print(
+                f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.'
+            )
 
         self.use_ema = use_ema
         if self.use_ema:
             self.model_ema = LitEma(self)
-            print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+            print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.')
 
         if ckpt_path is not None:
             self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
@@ -66,28 +79,30 @@ class VQModel(pl.LightningModule):
             self.model_ema.store(self.parameters())
             self.model_ema.copy_to(self)
             if context is not None:
-                print(f"{context}: Switched to EMA weights")
+                print(f'{context}: Switched to EMA weights')
         try:
             yield None
         finally:
             if self.use_ema:
                 self.model_ema.restore(self.parameters())
                 if context is not None:
-                    print(f"{context}: Restored training weights")
+                    print(f'{context}: Restored training weights')
 
     def init_from_ckpt(self, path, ignore_keys=list()):
-        sd = torch.load(path, map_location="cpu")["state_dict"]
+        sd = torch.load(path, map_location='cpu')['state_dict']
         keys = list(sd.keys())
         for k in keys:
             for ik in ignore_keys:
                 if k.startswith(ik):
-                    print("Deleting key {} from state_dict.".format(k))
+                    print('Deleting key {} from state_dict.'.format(k))
                     del sd[k]
         missing, unexpected = self.load_state_dict(sd, strict=False)
-        print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
+        print(
+            f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'
+        )
         if len(missing) > 0:
-            print(f"Missing Keys: {missing}")
-            print(f"Unexpected Keys: {unexpected}")
+            print(f'Missing Keys: {missing}')
+            print(f'Unexpected Keys: {unexpected}')
 
     def on_train_batch_end(self, *args, **kwargs):
         if self.use_ema:
@@ -115,7 +130,7 @@ class VQModel(pl.LightningModule):
         return dec
 
     def forward(self, input, return_pred_indices=False):
-        quant, diff, (_,_,ind) = self.encode(input)
+        quant, diff, (_, _, ind) = self.encode(input)
         dec = self.decode(quant)
         if return_pred_indices:
             return dec, diff, ind
@@ -125,7 +140,11 @@ class VQModel(pl.LightningModule):
         x = batch[k]
         if len(x.shape) == 3:
             x = x[..., None]
-        x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
+        x = (
+            x.permute(0, 3, 1, 2)
+            .to(memory_format=torch.contiguous_format)
+            .float()
+        )
         if self.batch_resize_range is not None:
             lower_size = self.batch_resize_range[0]
             upper_size = self.batch_resize_range[1]
@@ -133,9 +152,11 @@ class VQModel(pl.LightningModule):
                 # do the first few batches with max size to avoid later oom
                 new_resize = upper_size
             else:
-                new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
+                new_resize = np.random.choice(
+                    np.arange(lower_size, upper_size + 16, 16)
+                )
             if new_resize != x.shape[2]:
-                x = F.interpolate(x, size=new_resize, mode="bicubic")
+                x = F.interpolate(x, size=new_resize, mode='bicubic')
             x = x.detach()
         return x
 
@@ -147,81 +168,139 @@ class VQModel(pl.LightningModule):
 
         if optimizer_idx == 0:
             # autoencode
-            aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
-                                            last_layer=self.get_last_layer(), split="train",
-                                            predicted_indices=ind)
+            aeloss, log_dict_ae = self.loss(
+                qloss,
+                x,
+                xrec,
+                optimizer_idx,
+                self.global_step,
+                last_layer=self.get_last_layer(),
+                split='train',
+                predicted_indices=ind,
+            )
 
-            self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
+            self.log_dict(
+                log_dict_ae,
+                prog_bar=False,
+                logger=True,
+                on_step=True,
+                on_epoch=True,
+            )
             return aeloss
 
         if optimizer_idx == 1:
             # discriminator
-            discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
-                                            last_layer=self.get_last_layer(), split="train")
-            self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
+            discloss, log_dict_disc = self.loss(
+                qloss,
+                x,
+                xrec,
+                optimizer_idx,
+                self.global_step,
+                last_layer=self.get_last_layer(),
+                split='train',
+            )
+            self.log_dict(
+                log_dict_disc,
+                prog_bar=False,
+                logger=True,
+                on_step=True,
+                on_epoch=True,
+            )
             return discloss
 
     def validation_step(self, batch, batch_idx):
         log_dict = self._validation_step(batch, batch_idx)
         with self.ema_scope():
-            log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
+            log_dict_ema = self._validation_step(
+                batch, batch_idx, suffix='_ema'
+            )
         return log_dict
 
-    def _validation_step(self, batch, batch_idx, suffix=""):
+    def _validation_step(self, batch, batch_idx, suffix=''):
         x = self.get_input(batch, self.image_key)
         xrec, qloss, ind = self(x, return_pred_indices=True)
-        aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
-                                        self.global_step,
-                                        last_layer=self.get_last_layer(),
-                                        split="val"+suffix,
-                                        predicted_indices=ind
-                                        )
+        aeloss, log_dict_ae = self.loss(
+            qloss,
+            x,
+            xrec,
+            0,
+            self.global_step,
+            last_layer=self.get_last_layer(),
+            split='val' + suffix,
+            predicted_indices=ind,
+        )
 
-        discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
-                                            self.global_step,
-                                            last_layer=self.get_last_layer(),
-                                            split="val"+suffix,
-                                            predicted_indices=ind
-                                            )
-        rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
-        self.log(f"val{suffix}/rec_loss", rec_loss,
-                   prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
-        self.log(f"val{suffix}/aeloss", aeloss,
-                   prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
+        discloss, log_dict_disc = self.loss(
+            qloss,
+            x,
+            xrec,
+            1,
+            self.global_step,
+            last_layer=self.get_last_layer(),
+            split='val' + suffix,
+            predicted_indices=ind,
+        )
+        rec_loss = log_dict_ae[f'val{suffix}/rec_loss']
+        self.log(
+            f'val{suffix}/rec_loss',
+            rec_loss,
+            prog_bar=True,
+            logger=True,
+            on_step=False,
+            on_epoch=True,
+            sync_dist=True,
+        )
+        self.log(
+            f'val{suffix}/aeloss',
+            aeloss,
+            prog_bar=True,
+            logger=True,
+            on_step=False,
+            on_epoch=True,
+            sync_dist=True,
+        )
         if version.parse(pl.__version__) >= version.parse('1.4.0'):
-            del log_dict_ae[f"val{suffix}/rec_loss"]
+            del log_dict_ae[f'val{suffix}/rec_loss']
         self.log_dict(log_dict_ae)
         self.log_dict(log_dict_disc)
         return self.log_dict
 
     def configure_optimizers(self):
         lr_d = self.learning_rate
-        lr_g = self.lr_g_factor*self.learning_rate
-        print("lr_d", lr_d)
-        print("lr_g", lr_g)
-        opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
-                                  list(self.decoder.parameters())+
-                                  list(self.quantize.parameters())+
-                                  list(self.quant_conv.parameters())+
-                                  list(self.post_quant_conv.parameters()),
-                                  lr=lr_g, betas=(0.5, 0.9))
-        opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
-                                    lr=lr_d, betas=(0.5, 0.9))
+        lr_g = self.lr_g_factor * self.learning_rate
+        print('lr_d', lr_d)
+        print('lr_g', lr_g)
+        opt_ae = torch.optim.Adam(
+            list(self.encoder.parameters())
+            + list(self.decoder.parameters())
+            + list(self.quantize.parameters())
+            + list(self.quant_conv.parameters())
+            + list(self.post_quant_conv.parameters()),
+            lr=lr_g,
+            betas=(0.5, 0.9),
+        )
+        opt_disc = torch.optim.Adam(
+            self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9)
+        )
 
         if self.scheduler_config is not None:
             scheduler = instantiate_from_config(self.scheduler_config)
 
-            print("Setting up LambdaLR scheduler...")
+            print('Setting up LambdaLR scheduler...')
             scheduler = [
                 {
-                    'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
+                    'scheduler': LambdaLR(
+                        opt_ae, lr_lambda=scheduler.schedule
+                    ),
                     'interval': 'step',
-                    'frequency': 1
+                    'frequency': 1,
                 },
                 {
-                    'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
+                    'scheduler': LambdaLR(
+                        opt_disc, lr_lambda=scheduler.schedule
+                    ),
                     'interval': 'step',
-                    'frequency': 1
+                    'frequency': 1,
                 },
             ]
             return [opt_ae, opt_disc], scheduler
@@ -235,7 +314,7 @@ class VQModel(pl.LightningModule):
         x = self.get_input(batch, self.image_key)
         x = x.to(self.device)
         if only_inputs:
-            log["inputs"] = x
+            log['inputs'] = x
             return log
         xrec, _ = self(x)
         if x.shape[1] > 3:
@@ -243,21 +322,24 @@ class VQModel(pl.LightningModule):
             assert xrec.shape[1] > 3
             x = self.to_rgb(x)
             xrec = self.to_rgb(xrec)
-        log["inputs"] = x
-        log["reconstructions"] = xrec
+        log['inputs'] = x
+        log['reconstructions'] = xrec
         if plot_ema:
             with self.ema_scope():
                 xrec_ema, _ = self(x)
-                if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
-                log["reconstructions_ema"] = xrec_ema
+                if x.shape[1] > 3:
+                    xrec_ema = self.to_rgb(xrec_ema)
+                log['reconstructions_ema'] = xrec_ema
         return log
 
     def to_rgb(self, x):
-        assert self.image_key == "segmentation"
-        if not hasattr(self, "colorize"):
-            self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
+        assert self.image_key == 'segmentation'
+        if not hasattr(self, 'colorize'):
+            self.register_buffer(
+                'colorize', torch.randn(3, x.shape[1], 1, 1).to(x)
+            )
         x = F.conv2d(x, weight=self.colorize)
-        x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
+        x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0
         return x
 
 
@@ -283,43 +365,50 @@ class VQModelInterface(VQModel):
 
 
 class AutoencoderKL(pl.LightningModule):
-    def __init__(self,
-                 ddconfig,
-                 lossconfig,
-                 embed_dim,
-                 ckpt_path=None,
-                 ignore_keys=[],
-                 image_key="image",
-                 colorize_nlabels=None,
-                 monitor=None,
-                 ):
+    def __init__(
+        self,
+        ddconfig,
+        lossconfig,
+        embed_dim,
+        ckpt_path=None,
+        ignore_keys=[],
+        image_key='image',
+        colorize_nlabels=None,
+        monitor=None,
+    ):
         super().__init__()
         self.image_key = image_key
         self.encoder = Encoder(**ddconfig)
         self.decoder = Decoder(**ddconfig)
         self.loss = instantiate_from_config(lossconfig)
-        assert ddconfig["double_z"]
-        self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
-        self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
+        assert ddconfig['double_z']
+        self.quant_conv = torch.nn.Conv2d(
+            2 * ddconfig['z_channels'], 2 * embed_dim, 1
+        )
+        self.post_quant_conv = torch.nn.Conv2d(
+            embed_dim, ddconfig['z_channels'], 1
+        )
         self.embed_dim = embed_dim
         if colorize_nlabels is not None:
-            assert type(colorize_nlabels)==int
-            self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
+            assert type(colorize_nlabels) == int
+            self.register_buffer(
+                'colorize', torch.randn(3, colorize_nlabels, 1, 1)
+            )
         if monitor is not None:
             self.monitor = monitor
         if ckpt_path is not None:
             self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
 
     def init_from_ckpt(self, path, ignore_keys=list()):
-        sd = torch.load(path, map_location="cpu")["state_dict"]
+        sd = torch.load(path, map_location='cpu')['state_dict']
         keys = list(sd.keys())
         for k in keys:
             for ik in ignore_keys:
                 if k.startswith(ik):
-                    print("Deleting key {} from state_dict.".format(k))
+                    print('Deleting key {} from state_dict.'.format(k))
                     del sd[k]
         self.load_state_dict(sd, strict=False)
-        print(f"Restored from {path}")
+        print(f'Restored from {path}')
 
     def encode(self, x):
         h = self.encoder(x)
@@ -345,7 +434,11 @@ class AutoencoderKL(pl.LightningModule):
         x = batch[k]
         if len(x.shape) == 3:
             x = x[..., None]
-        x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
+        x = (
+            x.permute(0, 3, 1, 2)
+            .to(memory_format=torch.contiguous_format)
+            .float()
+        )
         return x
 
     def training_step(self, batch, batch_idx, optimizer_idx):
@@ -354,44 +447,102 @@ class AutoencoderKL(pl.LightningModule):
 
         if optimizer_idx == 0:
             # train encoder+decoder+logvar
-            aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
-                                            last_layer=self.get_last_layer(), split="train")
-            self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
-            self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
+            aeloss, log_dict_ae = self.loss(
+                inputs,
+                reconstructions,
+                posterior,
+                optimizer_idx,
+                self.global_step,
+                last_layer=self.get_last_layer(),
+                split='train',
+            )
+            self.log(
+                'aeloss',
+                aeloss,
+                prog_bar=True,
+                logger=True,
+                on_step=True,
+                on_epoch=True,
+            )
+            self.log_dict(
+                log_dict_ae,
+                prog_bar=False,
+                logger=True,
+                on_step=True,
+                on_epoch=False,
+            )
             return aeloss
 
         if optimizer_idx == 1:
             # train the discriminator
-            discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
-                                                last_layer=self.get_last_layer(), split="train")
+            discloss, log_dict_disc = self.loss(
+                inputs,
+                reconstructions,
+                posterior,
+                optimizer_idx,
+                self.global_step,
+                last_layer=self.get_last_layer(),
+                split='train',
+            )
 
-            self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
-            self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
+            self.log(
+                'discloss',
+                discloss,
+                prog_bar=True,
+                logger=True,
+                on_step=True,
+                on_epoch=True,
+            )
+            self.log_dict(
+                log_dict_disc,
+                prog_bar=False,
+                logger=True,
+                on_step=True,
+                on_epoch=False,
+            )
             return discloss
 
     def validation_step(self, batch, batch_idx):
         inputs = self.get_input(batch, self.image_key)
         reconstructions, posterior = self(inputs)
-        aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
-                                        last_layer=self.get_last_layer(), split="val")
+        aeloss, log_dict_ae = self.loss(
+            inputs,
+            reconstructions,
+            posterior,
+            0,
+            self.global_step,
+            last_layer=self.get_last_layer(),
+            split='val',
+        )
 
-        discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
-                                            last_layer=self.get_last_layer(), split="val")
+        discloss, log_dict_disc = self.loss(
+            inputs,
+            reconstructions,
+            posterior,
+            1,
+            self.global_step,
+            last_layer=self.get_last_layer(),
+            split='val',
+        )
 
-        self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
+        self.log('val/rec_loss', log_dict_ae['val/rec_loss'])
         self.log_dict(log_dict_ae)
         self.log_dict(log_dict_disc)
         return self.log_dict
 
     def configure_optimizers(self):
         lr = self.learning_rate
-        opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
-                                  list(self.decoder.parameters())+
-                                  list(self.quant_conv.parameters())+
-                                  list(self.post_quant_conv.parameters()),
-                                  lr=lr, betas=(0.5, 0.9))
-        opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
-                                    lr=lr, betas=(0.5, 0.9))
+        opt_ae = torch.optim.Adam(
+            list(self.encoder.parameters())
+            + list(self.decoder.parameters())
+            + list(self.quant_conv.parameters())
+            + list(self.post_quant_conv.parameters()),
+            lr=lr,
+            betas=(0.5, 0.9),
+        )
+        opt_disc = torch.optim.Adam(
+            self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)
+        )
         return [opt_ae, opt_disc], []
 
     def get_last_layer(self):
@@ -409,17 +560,19 @@ class AutoencoderKL(pl.LightningModule):
                 assert xrec.shape[1] > 3
                 x = self.to_rgb(x)
                 xrec = self.to_rgb(xrec)
-            log["samples"] = self.decode(torch.randn_like(posterior.sample()))
-            log["reconstructions"] = xrec
-        log["inputs"] = x
+            log['samples'] = self.decode(torch.randn_like(posterior.sample()))
+            log['reconstructions'] = xrec
+        log['inputs'] = x
         return log
 
     def to_rgb(self, x):
-        assert self.image_key == "segmentation"
-        if not hasattr(self, "colorize"):
-            self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
+        assert self.image_key == 'segmentation'
+        if not hasattr(self, 'colorize'):
+            self.register_buffer(
+                'colorize', torch.randn(3, x.shape[1], 1, 1).to(x)
+            )
         x = F.conv2d(x, weight=self.colorize)
-        x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
+        x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0
         return x
 
 
diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py
index 67e98b9d8f..be0d8c1919 100644
--- a/ldm/models/diffusion/classifier.py
+++ b/ldm/models/diffusion/classifier.py
@@ -10,13 +10,13 @@ from einops import rearrange
 from glob import glob
 from natsort import natsorted
 
-from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
+from ldm.modules.diffusionmodules.openaimodel import (
+    EncoderUNetModel,
+    UNetModel,
+)
 from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
 
-__models__ = {
-    'class_label': EncoderUNetModel,
-    'segmentation': UNetModel
-}
+__models__ = {'class_label': EncoderUNetModel, 'segmentation': UNetModel}
 
 
 def disabled_train(self, mode=True):
@@ -26,37 +26,49 @@ def disabled_train(self, mode=True):
 
 
 class NoisyLatentImageClassifier(pl.LightningModule):
-
-    def __init__(self,
-                 diffusion_path,
-                 num_classes,
-                 ckpt_path=None,
-                 pool='attention',
-                 label_key=None,
-                 diffusion_ckpt_path=None,
-                 scheduler_config=None,
-                 weight_decay=1.e-2,
-                 log_steps=10,
-                 monitor='val/loss',
-                 *args,
-                 **kwargs):
+    def __init__(
+        self,
+        diffusion_path,
+        num_classes,
+        ckpt_path=None,
+        pool='attention',
+        label_key=None,
+        diffusion_ckpt_path=None,
+        scheduler_config=None,
+        weight_decay=1.0e-2,
+        log_steps=10,
+        monitor='val/loss',
+        *args,
+        **kwargs,
+    ):
         super().__init__(*args, **kwargs)
         self.num_classes = num_classes
         # get latest config of diffusion model
-        diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
+        diffusion_config = natsorted(
+            glob(os.path.join(diffusion_path, 'configs', '*-project.yaml'))
+        )[-1]
         self.diffusion_config = OmegaConf.load(diffusion_config).model
         self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
         self.load_diffusion()
 
         self.monitor = monitor
-        self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
-        self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
+        self.numd = (
+            self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
+        )
+        self.log_time_interval = (
+            self.diffusion_model.num_timesteps // log_steps
+        )
         self.log_steps = log_steps
 
-        self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
+        self.label_key = (
+            label_key
+            if not hasattr(self.diffusion_model, 'cond_stage_key')
             else self.diffusion_model.cond_stage_key
+        )
 
-        assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
+        assert (
+            self.label_key is not None
+        ), 'label_key neither in diffusion model nor in model.params'
 
         if self.label_key not in __models__:
             raise NotImplementedError()
@@ -68,22 +80,27 @@ class NoisyLatentImageClassifier(pl.LightningModule):
         self.weight_decay = weight_decay
 
     def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
-        sd = torch.load(path, map_location="cpu")
-        if "state_dict" in list(sd.keys()):
-            sd = sd["state_dict"]
+        sd = torch.load(path, map_location='cpu')
+        if 'state_dict' in list(sd.keys()):
+            sd = sd['state_dict']
         keys = list(sd.keys())
         for k in keys:
             for ik in ignore_keys:
                 if k.startswith(ik):
-                    print("Deleting key {} from state_dict.".format(k))
+                    print('Deleting key {} from state_dict.'.format(k))
                     del sd[k]
-        missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
-            sd, strict=False)
-        print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
+        missing, unexpected = (
+            self.load_state_dict(sd, strict=False)
+            if not only_model
+            else self.model.load_state_dict(sd, strict=False)
+        )
+        print(
+            f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'
+        )
         if len(missing) > 0:
-            print(f"Missing Keys: {missing}")
+            print(f'Missing Keys: {missing}')
         if len(unexpected) > 0:
-            print(f"Unexpected Keys: {unexpected}")
+            print(f'Unexpected Keys: {unexpected}')
 
     def load_diffusion(self):
         model = instantiate_from_config(self.diffusion_config)
@@ -93,17 +110,25 @@ class NoisyLatentImageClassifier(pl.LightningModule):
             param.requires_grad = False
 
     def load_classifier(self, ckpt_path, pool):
-        model_config = deepcopy(self.diffusion_config.params.unet_config.params)
-        model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
+        model_config = deepcopy(
+            self.diffusion_config.params.unet_config.params
+        )
+        model_config.in_channels = (
+            self.diffusion_config.params.unet_config.params.out_channels
+        )
         model_config.out_channels = self.num_classes
         if self.label_key == 'class_label':
             model_config.pool = pool
 
         self.model = __models__[self.label_key](**model_config)
         if ckpt_path is not None:
-            print('#####################################################################')
+            print(
+                '#####################################################################'
+            )
             print(f'load from ckpt "{ckpt_path}"')
-            print('#####################################################################')
+            print(
+                '#####################################################################'
+            )
             self.init_from_ckpt(ckpt_path)
 
     @torch.no_grad()
@@ -111,11 +136,19 @@ class NoisyLatentImageClassifier(pl.LightningModule):
         noise = default(noise, lambda: torch.randn_like(x))
         continuous_sqrt_alpha_cumprod = None
         if self.diffusion_model.use_continuous_noise:
-            continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
+            continuous_sqrt_alpha_cumprod = (
+                self.diffusion_model.sample_continuous_noise_level(
+                    x.shape[0], t + 1
+                )
+            )
             # todo: make sure t+1 is correct here
 
-        return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
-                                             continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
+        return self.diffusion_model.q_sample(
+            x_start=x,
+            t=t,
+            noise=noise,
+            continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod,
+        )
 
     def forward(self, x_noisy, t, *args, **kwargs):
         return self.model(x_noisy, t)
@@ -141,17 +174,21 @@ class NoisyLatentImageClassifier(pl.LightningModule):
             targets = rearrange(targets, 'b h w c -> b c h w')
             for down in range(self.numd):
                 h, w = targets.shape[-2:]
-                targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
+                targets = F.interpolate(
+                    targets, size=(h // 2, w // 2), mode='nearest'
+                )
 
             # targets = rearrange(targets,'b c h w -> b h w c')
 
         return targets
 
-    def compute_top_k(self, logits, labels, k, reduction="mean"):
+    def compute_top_k(self, logits, labels, k, reduction='mean'):
         _, top_ks = torch.topk(logits, k, dim=1)
-        if reduction == "mean":
-            return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
-        elif reduction == "none":
+        if reduction == 'mean':
+            return (
+                (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
+            )
+        elif reduction == 'none':
             return (top_ks == labels[:, None]).float().sum(dim=-1)
 
     def on_train_epoch_start(self):
@@ -162,29 +199,59 @@ class NoisyLatentImageClassifier(pl.LightningModule):
     def write_logs(self, loss, logits, targets):
         log_prefix = 'train' if self.training else 'val'
         log = {}
-        log[f"{log_prefix}/loss"] = loss.mean()
-        log[f"{log_prefix}/acc@1"] = self.compute_top_k(
-            logits, targets, k=1, reduction="mean"
+        log[f'{log_prefix}/loss'] = loss.mean()
+        log[f'{log_prefix}/acc@1'] = self.compute_top_k(
+            logits, targets, k=1, reduction='mean'
         )
-        log[f"{log_prefix}/acc@5"] = self.compute_top_k(
-            logits, targets, k=5, reduction="mean"
+        log[f'{log_prefix}/acc@5'] = self.compute_top_k(
+            logits, targets, k=5, reduction='mean'
         )
 
-        self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
-        self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
-        self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
+        self.log_dict(
+            log,
+            prog_bar=False,
+            logger=True,
+            on_step=self.training,
+            on_epoch=True,
+        )
+        self.log(
+            'loss', log[f'{log_prefix}/loss'], prog_bar=True, logger=False
+        )
+        self.log(
+            'global_step',
+            self.global_step,
+            logger=False,
+            on_epoch=False,
+            prog_bar=True,
+        )
         lr = self.optimizers().param_groups[0]['lr']
-        self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
+        self.log(
+            'lr_abs',
+            lr,
+            on_step=True,
+            logger=True,
+            on_epoch=False,
+            prog_bar=True,
+        )
 
     def shared_step(self, batch, t=None):
-        x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
+        x, *_ = self.diffusion_model.get_input(
+            batch, k=self.diffusion_model.first_stage_key
+        )
         targets = self.get_conditioning(batch)
         if targets.dim() == 4:
             targets = targets.argmax(dim=1)
         if t is None:
-            t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
+            t = torch.randint(
+                0,
+                self.diffusion_model.num_timesteps,
+                (x.shape[0],),
+                device=self.device,
+            ).long()
         else:
-            t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
+            t = torch.full(
+                size=(x.shape[0],), fill_value=t, device=self.device
+            ).long()
         x_noisy = self.get_x_noisy(x, t)
         logits = self(x_noisy, t)
 
@@ -200,8 +267,14 @@ class NoisyLatentImageClassifier(pl.LightningModule):
         return loss
 
     def reset_noise_accs(self):
-        self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
-                          range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
+        self.noisy_acc = {
+            t: {'acc@1': [], 'acc@5': []}
+            for t in range(
+                0,
+                self.diffusion_model.num_timesteps,
+                self.diffusion_model.log_every_t,
+            )
+        }
 
     def on_validation_start(self):
         self.reset_noise_accs()
@@ -212,24 +285,35 @@ class NoisyLatentImageClassifier(pl.LightningModule):
 
         for t in self.noisy_acc:
             _, logits, _, targets = self.shared_step(batch, t)
-            self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
-            self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
+            self.noisy_acc[t]['acc@1'].append(
+                self.compute_top_k(logits, targets, k=1, reduction='mean')
+            )
+            self.noisy_acc[t]['acc@5'].append(
+                self.compute_top_k(logits, targets, k=5, reduction='mean')
+            )
 
         return loss
 
     def configure_optimizers(self):
-        optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
+        optimizer = AdamW(
+            self.model.parameters(),
+            lr=self.learning_rate,
+            weight_decay=self.weight_decay,
+        )
 
         if self.use_scheduler:
             scheduler = instantiate_from_config(self.scheduler_config)
 
-            print("Setting up LambdaLR scheduler...")
+            print('Setting up LambdaLR scheduler...')
             scheduler = [
                 {
-                    'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
+                    'scheduler': LambdaLR(
+                        optimizer, lr_lambda=scheduler.schedule
+                    ),
                     'interval': 'step',
-                    'frequency': 1
-                }]
+                    'frequency': 1,
+                }
+            ]
             return [optimizer], scheduler
 
         return optimizer
@@ -243,7 +327,7 @@ class NoisyLatentImageClassifier(pl.LightningModule):
         y = self.get_conditioning(batch)
 
         if self.label_key == 'class_label':
-            y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
+            y = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label'])
             log['labels'] = y
 
         if ismap(y):
@@ -256,10 +340,14 @@ class NoisyLatentImageClassifier(pl.LightningModule):
 
                 log[f'inputs@t{current_time}'] = x_noisy
 
-                pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
+                pred = F.one_hot(
+                    logits.argmax(dim=1), num_classes=self.num_classes
+                )
                 pred = rearrange(pred, 'b h w c -> b c h w')
 
-                log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
+                log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(
+                    pred
+                )
 
         for key in log:
             log[key] = log[key][:N]
diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py
index ddf786b5a8..3d9086eb1d 100644
--- a/ldm/models/diffusion/ddim.py
+++ b/ldm/models/diffusion/ddim.py
@@ -5,12 +5,16 @@ import numpy as np
 from tqdm import tqdm
 from functools import partial
 
-from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \
-    extract_into_tensor
+from ldm.modules.diffusionmodules.util import (
+    make_ddim_sampling_parameters,
+    make_ddim_timesteps,
+    noise_like,
+    extract_into_tensor,
+)
 
 
 class DDIMSampler(object):
-    def __init__(self, model, schedule="linear", device="cuda", **kwargs):
+    def __init__(self, model, schedule='linear', device='cuda', **kwargs):
         super().__init__()
         self.model = model
         self.ddpm_num_timesteps = model.num_timesteps
@@ -23,70 +27,122 @@ class DDIMSampler(object):
                 attr = attr.to(torch.device(self.device))
         setattr(self, name, attr)
 
-    def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
-        self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
-                                                  num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
+    def make_schedule(
+        self,
+        ddim_num_steps,
+        ddim_discretize='uniform',
+        ddim_eta=0.0,
+        verbose=True,
+    ):
+        self.ddim_timesteps = make_ddim_timesteps(
+            ddim_discr_method=ddim_discretize,
+            num_ddim_timesteps=ddim_num_steps,
+            num_ddpm_timesteps=self.ddpm_num_timesteps,
+            verbose=verbose,
+        )
         alphas_cumprod = self.model.alphas_cumprod
-        assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
-        to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
+        assert (
+            alphas_cumprod.shape[0] == self.ddpm_num_timesteps
+        ), 'alphas have to be defined for each timestep'
+        to_torch = (
+            lambda x: x.clone()
+            .detach()
+            .to(torch.float32)
+            .to(self.model.device)
+        )
 
         self.register_buffer('betas', to_torch(self.model.betas))
         self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
-        self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
+        self.register_buffer(
+            'alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)
+        )
 
         # calculations for diffusion q(x_t | x_{t-1}) and others
-        self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
-        self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
+        self.register_buffer(
+            'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))
+        )
+        self.register_buffer(
+            'sqrt_one_minus_alphas_cumprod',
+            to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'log_one_minus_alphas_cumprod',
+            to_torch(np.log(1.0 - alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'sqrt_recip_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'sqrt_recipm1_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
+        )
 
         # ddim sampling parameters
-        ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
-                                                                                   ddim_timesteps=self.ddim_timesteps,
-                                                                                   eta=ddim_eta,verbose=verbose)
+        (
+            ddim_sigmas,
+            ddim_alphas,
+            ddim_alphas_prev,
+        ) = make_ddim_sampling_parameters(
+            alphacums=alphas_cumprod.cpu(),
+            ddim_timesteps=self.ddim_timesteps,
+            eta=ddim_eta,
+            verbose=verbose,
+        )
         self.register_buffer('ddim_sigmas', ddim_sigmas)
         self.register_buffer('ddim_alphas', ddim_alphas)
         self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
-        self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
+        self.register_buffer(
+            'ddim_sqrt_one_minus_alphas', np.sqrt(1.0 - ddim_alphas)
+        )
         sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
-            (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
-                        1 - self.alphas_cumprod / self.alphas_cumprod_prev))
-        self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
+            (1 - self.alphas_cumprod_prev)
+            / (1 - self.alphas_cumprod)
+            * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
+        )
+        self.register_buffer(
+            'ddim_sigmas_for_original_num_steps',
+            sigmas_for_original_sampling_steps,
+        )
 
     @torch.no_grad()
-    def sample(self,
-               S,
-               batch_size,
-               shape,
-               conditioning=None,
-               callback=None,
-               normals_sequence=None,
-               img_callback=None,
-               quantize_x0=False,
-               eta=0.,
-               mask=None,
-               x0=None,
-               temperature=1.,
-               noise_dropout=0.,
-               score_corrector=None,
-               corrector_kwargs=None,
-               verbose=True,
-               x_T=None,
-               log_every_t=100,
-               unconditional_guidance_scale=1.,
-               unconditional_conditioning=None,
-               # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
-               **kwargs
-               ):
+    def sample(
+        self,
+        S,
+        batch_size,
+        shape,
+        conditioning=None,
+        callback=None,
+        normals_sequence=None,
+        img_callback=None,
+        quantize_x0=False,
+        eta=0.0,
+        mask=None,
+        x0=None,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        verbose=True,
+        x_T=None,
+        log_every_t=100,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+        # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
+        **kwargs,
+    ):
         if conditioning is not None:
             if isinstance(conditioning, dict):
                 cbs = conditioning[list(conditioning.keys())[0]].shape[0]
                 if cbs != batch_size:
-                    print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
+                    print(
+                        f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'
+                    )
             else:
                 if conditioning.shape[0] != batch_size:
-                    print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
+                    print(
+                        f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'
+                    )
 
         self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
         # sampling
@@ -94,30 +150,47 @@ class DDIMSampler(object):
         size = (batch_size, C, H, W)
         print(f'Data shape for DDIM sampling is {size}, eta {eta}')
 
-        samples, intermediates = self.ddim_sampling(conditioning, size,
-                                                    callback=callback,
-                                                    img_callback=img_callback,
-                                                    quantize_denoised=quantize_x0,
-                                                    mask=mask, x0=x0,
-                                                    ddim_use_original_steps=False,
-                                                    noise_dropout=noise_dropout,
-                                                    temperature=temperature,
-                                                    score_corrector=score_corrector,
-                                                    corrector_kwargs=corrector_kwargs,
-                                                    x_T=x_T,
-                                                    log_every_t=log_every_t,
-                                                    unconditional_guidance_scale=unconditional_guidance_scale,
-                                                    unconditional_conditioning=unconditional_conditioning,
-                                                    )
+        samples, intermediates = self.ddim_sampling(
+            conditioning,
+            size,
+            callback=callback,
+            img_callback=img_callback,
+            quantize_denoised=quantize_x0,
+            mask=mask,
+            x0=x0,
+            ddim_use_original_steps=False,
+            noise_dropout=noise_dropout,
+            temperature=temperature,
+            score_corrector=score_corrector,
+            corrector_kwargs=corrector_kwargs,
+            x_T=x_T,
+            log_every_t=log_every_t,
+            unconditional_guidance_scale=unconditional_guidance_scale,
+            unconditional_conditioning=unconditional_conditioning,
+        )
         return samples, intermediates
 
     @torch.no_grad()
-    def ddim_sampling(self, cond, shape,
-                      x_T=None, ddim_use_original_steps=False,
-                      callback=None, timesteps=None, quantize_denoised=False,
-                      mask=None, x0=None, img_callback=None, log_every_t=100,
-                      temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
-                      unconditional_guidance_scale=1., unconditional_conditioning=None,):
+    def ddim_sampling(
+        self,
+        cond,
+        shape,
+        x_T=None,
+        ddim_use_original_steps=False,
+        callback=None,
+        timesteps=None,
+        quantize_denoised=False,
+        mask=None,
+        x0=None,
+        img_callback=None,
+        log_every_t=100,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+    ):
         device = self.model.betas.device
         b = shape[0]
         if x_T is None:
@@ -126,17 +199,38 @@ class DDIMSampler(object):
             img = x_T
 
         if timesteps is None:
-            timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
+            timesteps = (
+                self.ddpm_num_timesteps
+                if ddim_use_original_steps
+                else self.ddim_timesteps
+            )
         elif timesteps is not None and not ddim_use_original_steps:
-            subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
+            subset_end = (
+                int(
+                    min(timesteps / self.ddim_timesteps.shape[0], 1)
+                    * self.ddim_timesteps.shape[0]
+                )
+                - 1
+            )
             timesteps = self.ddim_timesteps[:subset_end]
 
         intermediates = {'x_inter': [img], 'pred_x0': [img]}
-        time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
-        total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
-        print(f"Running DDIM Sampling with {total_steps} timesteps")
+        time_range = (
+            reversed(range(0, timesteps))
+            if ddim_use_original_steps
+            else np.flip(timesteps)
+        )
+        total_steps = (
+            timesteps if ddim_use_original_steps else timesteps.shape[0]
+        )
+        print(f'Running DDIM Sampling with {total_steps} timesteps')
 
-        iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps, dynamic_ncols=True)
+        iterator = tqdm(
+            time_range,
+            desc='DDIM Sampler',
+            total=total_steps,
+            dynamic_ncols=True,
+        )
 
         for i, step in enumerate(iterator):
             index = total_steps - i - 1
@@ -144,18 +238,30 @@ class DDIMSampler(object):
 
             if mask is not None:
                 assert x0 is not None
-                img_orig = self.model.q_sample(x0, ts)  # TODO: deterministic forward pass?
-                img = img_orig * mask + (1. - mask) * img
+                img_orig = self.model.q_sample(
+                    x0, ts
+                )  # TODO: deterministic forward pass?
+                img = img_orig * mask + (1.0 - mask) * img
 
-            outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
-                                      quantize_denoised=quantize_denoised, temperature=temperature,
-                                      noise_dropout=noise_dropout, score_corrector=score_corrector,
-                                      corrector_kwargs=corrector_kwargs,
-                                      unconditional_guidance_scale=unconditional_guidance_scale,
-                                      unconditional_conditioning=unconditional_conditioning)
+            outs = self.p_sample_ddim(
+                img,
+                cond,
+                ts,
+                index=index,
+                use_original_steps=ddim_use_original_steps,
+                quantize_denoised=quantize_denoised,
+                temperature=temperature,
+                noise_dropout=noise_dropout,
+                score_corrector=score_corrector,
+                corrector_kwargs=corrector_kwargs,
+                unconditional_guidance_scale=unconditional_guidance_scale,
+                unconditional_conditioning=unconditional_conditioning,
+            )
             img, pred_x0 = outs
-            if callback: callback(i)
-            if img_callback: img_callback(pred_x0, i)
+            if callback:
+                callback(i)
+            if img_callback:
+                img_callback(pred_x0, i)
 
             if index % log_every_t == 0 or index == total_steps - 1:
                 intermediates['x_inter'].append(img)
@@ -164,42 +270,82 @@ class DDIMSampler(object):
         return img, intermediates
 
     @torch.no_grad()
-    def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
-                      temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
-                      unconditional_guidance_scale=1., unconditional_conditioning=None):
+    def p_sample_ddim(
+        self,
+        x,
+        c,
+        t,
+        index,
+        repeat_noise=False,
+        use_original_steps=False,
+        quantize_denoised=False,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+    ):
         b, *_, device = *x.shape, x.device
 
-        if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
+        if (
+            unconditional_conditioning is None
+            or unconditional_guidance_scale == 1.0
+        ):
             e_t = self.model.apply_model(x, t, c)
         else:
             x_in = torch.cat([x] * 2)
             t_in = torch.cat([t] * 2)
             c_in = torch.cat([unconditional_conditioning, c])
             e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
-            e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
+            e_t = e_t_uncond + unconditional_guidance_scale * (
+                e_t - e_t_uncond
+            )
 
         if score_corrector is not None:
-            assert self.model.parameterization == "eps"
-            e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
+            assert self.model.parameterization == 'eps'
+            e_t = score_corrector.modify_score(
+                self.model, e_t, x, t, c, **corrector_kwargs
+            )
 
-        alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
-        alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
-        sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
-        sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
+        alphas = (
+            self.model.alphas_cumprod
+            if use_original_steps
+            else self.ddim_alphas
+        )
+        alphas_prev = (
+            self.model.alphas_cumprod_prev
+            if use_original_steps
+            else self.ddim_alphas_prev
+        )
+        sqrt_one_minus_alphas = (
+            self.model.sqrt_one_minus_alphas_cumprod
+            if use_original_steps
+            else self.ddim_sqrt_one_minus_alphas
+        )
+        sigmas = (
+            self.model.ddim_sigmas_for_original_num_steps
+            if use_original_steps
+            else self.ddim_sigmas
+        )
         # select parameters corresponding to the currently considered timestep
         a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
         a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
         sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
-        sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
+        sqrt_one_minus_at = torch.full(
+            (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
+        )
 
         # current prediction for x_0
         pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
         if quantize_denoised:
             pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
         # direction pointing to x_t
-        dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
-        noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
-        if noise_dropout > 0.:
+        dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t
+        noise = (
+            sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
+        )
+        if noise_dropout > 0.0:
             noise = torch.nn.functional.dropout(noise, p=noise_dropout)
         x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
         return x_prev, pred_x0
@@ -217,26 +363,51 @@ class DDIMSampler(object):
 
         if noise is None:
             noise = torch.randn_like(x0)
-        return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
-                extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
+        return (
+            extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0
+            + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)
+            * noise
+        )
 
     @torch.no_grad()
-    def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
-               use_original_steps=False):
+    def decode(
+        self,
+        x_latent,
+        cond,
+        t_start,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+        use_original_steps=False,
+    ):
 
-        timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
+        timesteps = (
+            np.arange(self.ddpm_num_timesteps)
+            if use_original_steps
+            else self.ddim_timesteps
+        )
         timesteps = timesteps[:t_start]
 
         time_range = np.flip(timesteps)
         total_steps = timesteps.shape[0]
-        print(f"Running DDIM Sampling with {total_steps} timesteps")
+        print(f'Running DDIM Sampling with {total_steps} timesteps')
 
         iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
         x_dec = x_latent
         for i, step in enumerate(iterator):
             index = total_steps - i - 1
-            ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
-            x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
-                                          unconditional_guidance_scale=unconditional_guidance_scale,
-                                          unconditional_conditioning=unconditional_conditioning)
+            ts = torch.full(
+                (x_latent.shape[0],),
+                step,
+                device=x_latent.device,
+                dtype=torch.long,
+            )
+            x_dec, _ = self.p_sample_ddim(
+                x_dec,
+                cond,
+                ts,
+                index=index,
+                use_original_steps=use_original_steps,
+                unconditional_guidance_scale=unconditional_guidance_scale,
+                unconditional_conditioning=unconditional_conditioning,
+            )
         return x_dec
diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py
index d5f74a0fbe..ccfffa9b9b 100644
--- a/ldm/models/diffusion/ddpm.py
+++ b/ldm/models/diffusion/ddpm.py
@@ -21,17 +21,39 @@ from torchvision.utils import make_grid
 from pytorch_lightning.utilities.distributed import rank_zero_only
 import urllib
 
-from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
+from ldm.util import (
+    log_txt_as_img,
+    exists,
+    default,
+    ismap,
+    isimage,
+    mean_flat,
+    count_params,
+    instantiate_from_config,
+)
 from ldm.modules.ema import LitEma
-from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
-from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
+from ldm.modules.distributions.distributions import (
+    normal_kl,
+    DiagonalGaussianDistribution,
+)
+from ldm.models.autoencoder import (
+    VQModelInterface,
+    IdentityFirstStage,
+    AutoencoderKL,
+)
+from ldm.modules.diffusionmodules.util import (
+    make_beta_schedule,
+    extract_into_tensor,
+    noise_like,
+)
 from ldm.models.diffusion.ddim import DDIMSampler
 
 
-__conditioning_keys__ = {'concat': 'c_concat',
-                         'crossattn': 'c_crossattn',
-                         'adm': 'y'}
+__conditioning_keys__ = {
+    'concat': 'c_concat',
+    'crossattn': 'c_crossattn',
+    'adm': 'y',
+}
 
 
 def disabled_train(self, mode=True):
@@ -46,40 +68,46 @@ def uniform_on_device(r1, r2, shape, device):
 
 class DDPM(pl.LightningModule):
     # classic DDPM with Gaussian diffusion, in image space
-    def __init__(self,
-                 unet_config,
-                 timesteps=1000,
-                 beta_schedule="linear",
-                 loss_type="l2",
-                 ckpt_path=None,
-                 ignore_keys=[],
-                 load_only_unet=False,
-                 monitor="val/loss",
-                 use_ema=True,
-                 first_stage_key="image",
-                 image_size=256,
-                 channels=3,
-                 log_every_t=100,
-                 clip_denoised=True,
-                 linear_start=1e-4,
-                 linear_end=2e-2,
-                 cosine_s=8e-3,
-                 given_betas=None,
-                 original_elbo_weight=0.,
-                 embedding_reg_weight=0.,
-                 v_posterior=0.,  # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
-                 l_simple_weight=1.,
-                 conditioning_key=None,
-                 parameterization="eps",  # all assuming fixed variance schedules
-                 scheduler_config=None,
-                 use_positional_encodings=False,
-                 learn_logvar=False,
-                 logvar_init=0.,
-                 ):
+    def __init__(
+        self,
+        unet_config,
+        timesteps=1000,
+        beta_schedule='linear',
+        loss_type='l2',
+        ckpt_path=None,
+        ignore_keys=[],
+        load_only_unet=False,
+        monitor='val/loss',
+        use_ema=True,
+        first_stage_key='image',
+        image_size=256,
+        channels=3,
+        log_every_t=100,
+        clip_denoised=True,
+        linear_start=1e-4,
+        linear_end=2e-2,
+        cosine_s=8e-3,
+        given_betas=None,
+        original_elbo_weight=0.0,
+        embedding_reg_weight=0.0,
+        v_posterior=0.0,  # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
+        l_simple_weight=1.0,
+        conditioning_key=None,
+        parameterization='eps',  # all assuming fixed variance schedules
+        scheduler_config=None,
+        use_positional_encodings=False,
+        learn_logvar=False,
+        logvar_init=0.0,
+    ):
         super().__init__()
-        assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
+        assert parameterization in [
+            'eps',
+            'x0',
+        ], 'currently only supporting "eps" and "x0"'
         self.parameterization = parameterization
-        print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
+        print(
+            f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode'
+        )
         self.cond_stage_model = None
         self.clip_denoised = clip_denoised
         self.log_every_t = log_every_t
@@ -92,7 +120,7 @@ class DDPM(pl.LightningModule):
         self.use_ema = use_ema
         if self.use_ema:
             self.model_ema = LitEma(self.model)
-            print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+            print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.')
 
         self.use_scheduler = scheduler_config is not None
         if self.use_scheduler:
@@ -106,68 +134,131 @@ class DDPM(pl.LightningModule):
         if monitor is not None:
             self.monitor = monitor
         if ckpt_path is not None:
-            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
+            self.init_from_ckpt(
+                ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet
+            )
 
-        self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
-                               linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
+        self.register_schedule(
+            given_betas=given_betas,
+            beta_schedule=beta_schedule,
+            timesteps=timesteps,
+            linear_start=linear_start,
+            linear_end=linear_end,
+            cosine_s=cosine_s,
+        )
 
         self.loss_type = loss_type
 
         self.learn_logvar = learn_logvar
-        self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
+        self.logvar = torch.full(
+            fill_value=logvar_init, size=(self.num_timesteps,)
+        )
         if self.learn_logvar:
             self.logvar = nn.Parameter(self.logvar, requires_grad=True)
 
-
-    def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
-                          linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+    def register_schedule(
+        self,
+        given_betas=None,
+        beta_schedule='linear',
+        timesteps=1000,
+        linear_start=1e-4,
+        linear_end=2e-2,
+        cosine_s=8e-3,
+    ):
         if exists(given_betas):
             betas = given_betas
         else:
-            betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
-                                       cosine_s=cosine_s)
-        alphas = 1. - betas
+            betas = make_beta_schedule(
+                beta_schedule,
+                timesteps,
+                linear_start=linear_start,
+                linear_end=linear_end,
+                cosine_s=cosine_s,
+            )
+        alphas = 1.0 - betas
         alphas_cumprod = np.cumprod(alphas, axis=0)
-        alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
+        alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
 
-        timesteps, = betas.shape
+        (timesteps,) = betas.shape
         self.num_timesteps = int(timesteps)
         self.linear_start = linear_start
         self.linear_end = linear_end
-        assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
+        assert (
+            alphas_cumprod.shape[0] == self.num_timesteps
+        ), 'alphas have to be defined for each timestep'
 
         to_torch = partial(torch.tensor, dtype=torch.float32)
 
         self.register_buffer('betas', to_torch(betas))
         self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
-        self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
+        self.register_buffer(
+            'alphas_cumprod_prev', to_torch(alphas_cumprod_prev)
+        )
 
         # calculations for diffusion q(x_t | x_{t-1}) and others
-        self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
-        self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
-        self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
-        self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
-        self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
+        self.register_buffer(
+            'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))
+        )
+        self.register_buffer(
+            'sqrt_one_minus_alphas_cumprod',
+            to_torch(np.sqrt(1.0 - alphas_cumprod)),
+        )
+        self.register_buffer(
+            'log_one_minus_alphas_cumprod',
+            to_torch(np.log(1.0 - alphas_cumprod)),
+        )
+        self.register_buffer(
+            'sqrt_recip_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod)),
+        )
+        self.register_buffer(
+            'sqrt_recipm1_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod - 1)),
+        )
 
         # calculations for posterior q(x_{t-1} | x_t, x_0)
-        posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
-                    1. - alphas_cumprod) + self.v_posterior * betas
+        posterior_variance = (1 - self.v_posterior) * betas * (
+            1.0 - alphas_cumprod_prev
+        ) / (1.0 - alphas_cumprod) + self.v_posterior * betas
         # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
-        self.register_buffer('posterior_variance', to_torch(posterior_variance))
+        self.register_buffer(
+            'posterior_variance', to_torch(posterior_variance)
+        )
         # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
-        self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
-        self.register_buffer('posterior_mean_coef1', to_torch(
-            betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
-        self.register_buffer('posterior_mean_coef2', to_torch(
-            (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
+        self.register_buffer(
+            'posterior_log_variance_clipped',
+            to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
+        )
+        self.register_buffer(
+            'posterior_mean_coef1',
+            to_torch(
+                betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)
+            ),
+        )
+        self.register_buffer(
+            'posterior_mean_coef2',
+            to_torch(
+                (1.0 - alphas_cumprod_prev)
+                * np.sqrt(alphas)
+                / (1.0 - alphas_cumprod)
+            ),
+        )
 
-        if self.parameterization == "eps":
-            lvlb_weights = self.betas ** 2 / (
-                        2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
-        elif self.parameterization == "x0":
-            lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
+        if self.parameterization == 'eps':
+            lvlb_weights = self.betas**2 / (
+                2
+                * self.posterior_variance
+                * to_torch(alphas)
+                * (1 - self.alphas_cumprod)
+            )
+        elif self.parameterization == 'x0':
+            lvlb_weights = (
+                0.5
+                * np.sqrt(torch.Tensor(alphas_cumprod))
+                / (2.0 * 1 - torch.Tensor(alphas_cumprod))
+            )
         else:
-            raise NotImplementedError("mu not supported")
+            raise NotImplementedError('mu not supported')
         # TODO how to choose this term
         lvlb_weights[0] = lvlb_weights[1]
         self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
@@ -179,32 +270,37 @@ class DDPM(pl.LightningModule):
             self.model_ema.store(self.model.parameters())
             self.model_ema.copy_to(self.model)
             if context is not None:
-                print(f"{context}: Switched to EMA weights")
+                print(f'{context}: Switched to EMA weights')
         try:
             yield None
         finally:
             if self.use_ema:
                 self.model_ema.restore(self.model.parameters())
                 if context is not None:
-                    print(f"{context}: Restored training weights")
+                    print(f'{context}: Restored training weights')
 
     def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
-        sd = torch.load(path, map_location="cpu")
-        if "state_dict" in list(sd.keys()):
-            sd = sd["state_dict"]
+        sd = torch.load(path, map_location='cpu')
+        if 'state_dict' in list(sd.keys()):
+            sd = sd['state_dict']
         keys = list(sd.keys())
         for k in keys:
             for ik in ignore_keys:
                 if k.startswith(ik):
-                    print("Deleting key {} from state_dict.".format(k))
+                    print('Deleting key {} from state_dict.'.format(k))
                     del sd[k]
-        missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
-            sd, strict=False)
-        print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
+        missing, unexpected = (
+            self.load_state_dict(sd, strict=False)
+            if not only_model
+            else self.model.load_state_dict(sd, strict=False)
+        )
+        print(
+            f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'
+        )
         if len(missing) > 0:
-            print(f"Missing Keys: {missing}")
+            print(f'Missing Keys: {missing}')
         if len(unexpected) > 0:
-            print(f"Unexpected Keys: {unexpected}")
+            print(f'Unexpected Keys: {unexpected}')
 
     def q_mean_variance(self, x_start, t):
         """
@@ -213,46 +309,78 @@ class DDPM(pl.LightningModule):
         :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
         :return: A tuple (mean, variance, log_variance), all of x_start's shape.
         """
-        mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
-        variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
-        log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
+        mean = (
+            extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape)
+            * x_start
+        )
+        variance = extract_into_tensor(
+            1.0 - self.alphas_cumprod, t, x_start.shape
+        )
+        log_variance = extract_into_tensor(
+            self.log_one_minus_alphas_cumprod, t, x_start.shape
+        )
         return mean, variance, log_variance
 
     def predict_start_from_noise(self, x_t, t, noise):
         return (
-                extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
-                extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
+            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape)
+            * x_t
+            - extract_into_tensor(
+                self.sqrt_recipm1_alphas_cumprod, t, x_t.shape
+            )
+            * noise
         )
 
     def q_posterior(self, x_start, x_t, t):
         posterior_mean = (
-                extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
-                extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
+            extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape)
+            * x_start
+            + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape)
+            * x_t
+        )
+        posterior_variance = extract_into_tensor(
+            self.posterior_variance, t, x_t.shape
+        )
+        posterior_log_variance_clipped = extract_into_tensor(
+            self.posterior_log_variance_clipped, t, x_t.shape
+        )
+        return (
+            posterior_mean,
+            posterior_variance,
+            posterior_log_variance_clipped,
         )
-        posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
-        posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
-        return posterior_mean, posterior_variance, posterior_log_variance_clipped
 
     def p_mean_variance(self, x, t, clip_denoised: bool):
         model_out = self.model(x, t)
-        if self.parameterization == "eps":
+        if self.parameterization == 'eps':
             x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
-        elif self.parameterization == "x0":
+        elif self.parameterization == 'x0':
             x_recon = model_out
         if clip_denoised:
-            x_recon.clamp_(-1., 1.)
+            x_recon.clamp_(-1.0, 1.0)
 
-        model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+        (
+            model_mean,
+            posterior_variance,
+            posterior_log_variance,
+        ) = self.q_posterior(x_start=x_recon, x_t=x, t=t)
         return model_mean, posterior_variance, posterior_log_variance
 
     @torch.no_grad()
     def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
         b, *_, device = *x.shape, x.device
-        model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
+        model_mean, _, model_log_variance = self.p_mean_variance(
+            x=x, t=t, clip_denoised=clip_denoised
+        )
         noise = noise_like(x.shape, device, repeat_noise)
         # no noise when t == 0
-        nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
-        return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+        nonzero_mask = (1 - (t == 0).float()).reshape(
+            b, *((1,) * (len(x.shape) - 1))
+        )
+        return (
+            model_mean
+            + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+        )
 
     @torch.no_grad()
     def p_sample_loop(self, shape, return_intermediates=False):
@@ -260,9 +388,17 @@ class DDPM(pl.LightningModule):
         b = shape[0]
         img = torch.randn(shape, device=device)
         intermediates = [img]
-        for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps, dynamic_ncols=True):
-            img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
-                                clip_denoised=self.clip_denoised)
+        for i in tqdm(
+            reversed(range(0, self.num_timesteps)),
+            desc='Sampling t',
+            total=self.num_timesteps,
+            dynamic_ncols=True,
+        ):
+            img = self.p_sample(
+                img,
+                torch.full((b,), i, device=device, dtype=torch.long),
+                clip_denoised=self.clip_denoised,
+            )
             if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
                 intermediates.append(img)
         if return_intermediates:
@@ -273,13 +409,21 @@ class DDPM(pl.LightningModule):
     def sample(self, batch_size=16, return_intermediates=False):
         image_size = self.image_size
         channels = self.channels
-        return self.p_sample_loop((batch_size, channels, image_size, image_size),
-                                  return_intermediates=return_intermediates)
+        return self.p_sample_loop(
+            (batch_size, channels, image_size, image_size),
+            return_intermediates=return_intermediates,
+        )
 
     def q_sample(self, x_start, t, noise=None):
         noise = default(noise, lambda: torch.randn_like(x_start))
-        return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
-                extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
+        return (
+            extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape)
+            * x_start
+            + extract_into_tensor(
+                self.sqrt_one_minus_alphas_cumprod, t, x_start.shape
+            )
+            * noise
+        )
 
     def get_loss(self, pred, target, mean=True):
         if self.loss_type == 'l1':
@@ -290,7 +434,9 @@ class DDPM(pl.LightningModule):
             if mean:
                 loss = torch.nn.functional.mse_loss(target, pred)
             else:
-                loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
+                loss = torch.nn.functional.mse_loss(
+                    target, pred, reduction='none'
+                )
         else:
             raise NotImplementedError("unknown loss type '{loss_type}'")
 
@@ -302,12 +448,14 @@ class DDPM(pl.LightningModule):
         model_out = self.model(x_noisy, t)
 
         loss_dict = {}
-        if self.parameterization == "eps":
+        if self.parameterization == 'eps':
             target = noise
-        elif self.parameterization == "x0":
+        elif self.parameterization == 'x0':
             target = x_start
         else:
-            raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
+            raise NotImplementedError(
+                f'Paramterization {self.parameterization} not yet supported'
+            )
 
         loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
 
@@ -328,7 +476,9 @@ class DDPM(pl.LightningModule):
     def forward(self, x, *args, **kwargs):
         # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
         # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
-        t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+        t = torch.randint(
+            0, self.num_timesteps, (x.shape[0],), device=self.device
+        ).long()
         return self.p_losses(x, t, *args, **kwargs)
 
     def get_input(self, batch, k):
@@ -347,15 +497,29 @@ class DDPM(pl.LightningModule):
     def training_step(self, batch, batch_idx):
         loss, loss_dict = self.shared_step(batch)
 
-        self.log_dict(loss_dict, prog_bar=True,
-                      logger=True, on_step=True, on_epoch=True)
+        self.log_dict(
+            loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True
+        )
 
-        self.log("global_step", self.global_step,
-                 prog_bar=True, logger=True, on_step=True, on_epoch=False)
+        self.log(
+            'global_step',
+            self.global_step,
+            prog_bar=True,
+            logger=True,
+            on_step=True,
+            on_epoch=False,
+        )
 
         if self.use_scheduler:
             lr = self.optimizers().param_groups[0]['lr']
-            self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
+            self.log(
+                'lr_abs',
+                lr,
+                prog_bar=True,
+                logger=True,
+                on_step=True,
+                on_epoch=False,
+            )
 
         return loss
 
@@ -364,9 +528,23 @@ class DDPM(pl.LightningModule):
         _, loss_dict_no_ema = self.shared_step(batch)
         with self.ema_scope():
             _, loss_dict_ema = self.shared_step(batch)
-            loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
-        self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
-        self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
+            loss_dict_ema = {
+                key + '_ema': loss_dict_ema[key] for key in loss_dict_ema
+            }
+        self.log_dict(
+            loss_dict_no_ema,
+            prog_bar=False,
+            logger=True,
+            on_step=False,
+            on_epoch=True,
+        )
+        self.log_dict(
+            loss_dict_ema,
+            prog_bar=False,
+            logger=True,
+            on_step=False,
+            on_epoch=True,
+        )
 
     def on_train_batch_end(self, *args, **kwargs):
         if self.use_ema:
@@ -380,13 +558,15 @@ class DDPM(pl.LightningModule):
         return denoise_grid
 
     @torch.no_grad()
-    def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
+    def log_images(
+        self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs
+    ):
         log = dict()
         x = self.get_input(batch, self.first_stage_key)
         N = min(x.shape[0], N)
         n_row = min(x.shape[0], n_row)
         x = x.to(self.device)[:N]
-        log["inputs"] = x
+        log['inputs'] = x
 
         # get diffusion row
         diffusion_row = list()
@@ -400,15 +580,17 @@ class DDPM(pl.LightningModule):
                 x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
                 diffusion_row.append(x_noisy)
 
-        log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
+        log['diffusion_row'] = self._get_rows_from_list(diffusion_row)
 
         if sample:
             # get denoise row
-            with self.ema_scope("Plotting"):
-                samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
+            with self.ema_scope('Plotting'):
+                samples, denoise_row = self.sample(
+                    batch_size=N, return_intermediates=True
+                )
 
-            log["samples"] = samples
-            log["denoise_row"] = self._get_rows_from_list(denoise_row)
+            log['samples'] = samples
+            log['denoise_row'] = self._get_rows_from_list(denoise_row)
 
         if return_keys:
             if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
@@ -428,19 +610,23 @@ class DDPM(pl.LightningModule):
 
 class LatentDiffusion(DDPM):
     """main class"""
-    def __init__(self,
-                 first_stage_config,
-                 cond_stage_config,
-                 personalization_config,
-                 num_timesteps_cond=None,
-                 cond_stage_key="image",
-                 cond_stage_trainable=False,
-                 concat_mode=True,
-                 cond_stage_forward=None,
-                 conditioning_key=None,
-                 scale_factor=1.0,
-                 scale_by_std=False,
-                 *args, **kwargs):
+
+    def __init__(
+        self,
+        first_stage_config,
+        cond_stage_config,
+        personalization_config,
+        num_timesteps_cond=None,
+        cond_stage_key='image',
+        cond_stage_trainable=False,
+        concat_mode=True,
+        cond_stage_forward=None,
+        conditioning_key=None,
+        scale_factor=1.0,
+        scale_by_std=False,
+        *args,
+        **kwargs,
+    ):
 
         self.num_timesteps_cond = default(num_timesteps_cond, 1)
         self.scale_by_std = scale_by_std
@@ -450,15 +636,17 @@ class LatentDiffusion(DDPM):
             conditioning_key = 'concat' if concat_mode else 'crossattn'
         if cond_stage_config == '__is_unconditional__':
             conditioning_key = None
-        ckpt_path = kwargs.pop("ckpt_path", None)
-        ignore_keys = kwargs.pop("ignore_keys", [])
+        ckpt_path = kwargs.pop('ckpt_path', None)
+        ignore_keys = kwargs.pop('ignore_keys', [])
         super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
         self.concat_mode = concat_mode
         self.cond_stage_trainable = cond_stage_trainable
         self.cond_stage_key = cond_stage_key
 
         try:
-            self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
+            self.num_downs = (
+                len(first_stage_config.params.ddconfig.ch_mult) - 1
+            )
         except:
             self.num_downs = 0
         if not scale_by_std:
@@ -470,7 +658,7 @@ class LatentDiffusion(DDPM):
 
         self.cond_stage_forward = cond_stage_forward
         self.clip_denoised = False
-        self.bbox_tokenizer = None  
+        self.bbox_tokenizer = None
 
         self.restarted_from_ckpt = False
         if ckpt_path is not None:
@@ -485,8 +673,10 @@ class LatentDiffusion(DDPM):
         self.model.train = disabled_train
         for param in self.model.parameters():
             param.requires_grad = False
-        
-        self.embedding_manager = self.instantiate_embedding_manager(personalization_config, self.cond_stage_model)
+
+        self.embedding_manager = self.instantiate_embedding_manager(
+            personalization_config, self.cond_stage_model
+        )
 
         self.emb_ckpt_counter = 0
 
@@ -496,32 +686,61 @@ class LatentDiffusion(DDPM):
         for param in self.embedding_manager.embedding_parameters():
             param.requires_grad = True
 
-    def make_cond_schedule(self, ):
-        self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
-        ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
-        self.cond_ids[:self.num_timesteps_cond] = ids
+    def make_cond_schedule(
+        self,
+    ):
+        self.cond_ids = torch.full(
+            size=(self.num_timesteps,),
+            fill_value=self.num_timesteps - 1,
+            dtype=torch.long,
+        )
+        ids = torch.round(
+            torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)
+        ).long()
+        self.cond_ids[: self.num_timesteps_cond] = ids
 
     @rank_zero_only
     @torch.no_grad()
     def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
         # only for very first batch
-        if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
-            assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
+        if (
+            self.scale_by_std
+            and self.current_epoch == 0
+            and self.global_step == 0
+            and batch_idx == 0
+            and not self.restarted_from_ckpt
+        ):
+            assert (
+                self.scale_factor == 1.0
+            ), 'rather not use custom rescaling and std-rescaling simultaneously'
             # set rescale weight to 1./std of encodings
-            print("### USING STD-RESCALING ###")
+            print('### USING STD-RESCALING ###')
             x = super().get_input(batch, self.first_stage_key)
             x = x.to(self.device)
             encoder_posterior = self.encode_first_stage(x)
             z = self.get_first_stage_encoding(encoder_posterior).detach()
             del self.scale_factor
-            self.register_buffer('scale_factor', 1. / z.flatten().std())
-            print(f"setting self.scale_factor to {self.scale_factor}")
-            print("### USING STD-RESCALING ###")
+            self.register_buffer('scale_factor', 1.0 / z.flatten().std())
+            print(f'setting self.scale_factor to {self.scale_factor}')
+            print('### USING STD-RESCALING ###')
 
-    def register_schedule(self,
-                          given_betas=None, beta_schedule="linear", timesteps=1000,
-                          linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
-        super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
+    def register_schedule(
+        self,
+        given_betas=None,
+        beta_schedule='linear',
+        timesteps=1000,
+        linear_start=1e-4,
+        linear_end=2e-2,
+        cosine_s=8e-3,
+    ):
+        super().register_schedule(
+            given_betas,
+            beta_schedule,
+            timesteps,
+            linear_start,
+            linear_end,
+            cosine_s,
+        )
 
         self.shorten_cond_schedule = self.num_timesteps_cond > 1
         if self.shorten_cond_schedule:
@@ -536,11 +755,13 @@ class LatentDiffusion(DDPM):
 
     def instantiate_cond_stage(self, config):
         if not self.cond_stage_trainable:
-            if config == "__is_first_stage__":
-                print("Using first stage also as cond stage.")
+            if config == '__is_first_stage__':
+                print('Using first stage also as cond stage.')
                 self.cond_stage_model = self.first_stage_model
-            elif config == "__is_unconditional__":
-                print(f"Training {self.__class__.__name__} as an unconditional model.")
+            elif config == '__is_unconditional__':
+                print(
+                    f'Training {self.__class__.__name__} as an unconditional model.'
+                )
                 self.cond_stage_model = None
                 # self.be_unconditional = True
             else:
@@ -555,23 +776,32 @@ class LatentDiffusion(DDPM):
             try:
                 model = instantiate_from_config(config)
             except urllib.error.URLError:
-                raise SystemExit("* Couldn't load a dependency. Try running scripts/preload_models.py from an internet-conected machine.")
+                raise SystemExit(
+                    "* Couldn't load a dependency. Try running scripts/preload_models.py from an internet-conected machine."
+                )
             self.cond_stage_model = model
-            
-    
+
     def instantiate_embedding_manager(self, config, embedder):
         model = instantiate_from_config(config, embedder=embedder)
 
-        if config.params.get("embedding_manager_ckpt", None): # do not load if missing OR empty string
+        if config.params.get(
+            'embedding_manager_ckpt', None
+        ):   # do not load if missing OR empty string
             model.load(config.params.embedding_manager_ckpt)
-        
+
         return model
 
-    def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
+    def _get_denoise_row_from_list(
+        self, samples, desc='', force_no_decoder_quantization=False
+    ):
         denoise_row = []
         for zd in tqdm(samples, desc=desc):
-            denoise_row.append(self.decode_first_stage(zd.to(self.device),
-                                                            force_not_quantize=force_no_decoder_quantization))
+            denoise_row.append(
+                self.decode_first_stage(
+                    zd.to(self.device),
+                    force_not_quantize=force_no_decoder_quantization,
+                )
+            )
         n_imgs_per_row = len(denoise_row)
         denoise_row = torch.stack(denoise_row)  # n_log_step, n_row, C, H, W
         denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
@@ -585,13 +815,19 @@ class LatentDiffusion(DDPM):
         elif isinstance(encoder_posterior, torch.Tensor):
             z = encoder_posterior
         else:
-            raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
+            raise NotImplementedError(
+                f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented"
+            )
         return self.scale_factor * z
 
     def get_learned_conditioning(self, c):
         if self.cond_stage_forward is None:
-            if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
-                c = self.cond_stage_model.encode(c, embedding_manager=self.embedding_manager)
+            if hasattr(self.cond_stage_model, 'encode') and callable(
+                self.cond_stage_model.encode
+            ):
+                c = self.cond_stage_model.encode(
+                    c, embedding_manager=self.embedding_manager
+                )
                 if isinstance(c, DiagonalGaussianDistribution):
                     c = c.mode()
             else:
@@ -619,26 +855,37 @@ class LatentDiffusion(DDPM):
         arr = self.meshgrid(h, w) / lower_right_corner
         dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
         dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
-        edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
+        edge_dist = torch.min(
+            torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1
+        )[0]
         return edge_dist
 
     def get_weighting(self, h, w, Ly, Lx, device):
         weighting = self.delta_border(h, w)
-        weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
-                               self.split_input_params["clip_max_weight"], )
-        weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
+        weighting = torch.clip(
+            weighting,
+            self.split_input_params['clip_min_weight'],
+            self.split_input_params['clip_max_weight'],
+        )
+        weighting = (
+            weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
+        )
 
-        if self.split_input_params["tie_braker"]:
+        if self.split_input_params['tie_braker']:
             L_weighting = self.delta_border(Ly, Lx)
-            L_weighting = torch.clip(L_weighting,
-                                     self.split_input_params["clip_min_tie_weight"],
-                                     self.split_input_params["clip_max_tie_weight"])
+            L_weighting = torch.clip(
+                L_weighting,
+                self.split_input_params['clip_min_tie_weight'],
+                self.split_input_params['clip_max_tie_weight'],
+            )
 
             L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
             weighting = weighting * L_weighting
         return weighting
 
-    def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1):  # todo load once not every time, shorten code
+    def get_fold_unfold(
+        self, x, kernel_size, stride, uf=1, df=1
+    ):  # todo load once not every time, shorten code
         """
         :param x: img of size (bs, c, h, w)
         :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
@@ -650,40 +897,75 @@ class LatentDiffusion(DDPM):
         Lx = (w - kernel_size[1]) // stride[1] + 1
 
         if uf == 1 and df == 1:
-            fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+            fold_params = dict(
+                kernel_size=kernel_size, dilation=1, padding=0, stride=stride
+            )
             unfold = torch.nn.Unfold(**fold_params)
 
             fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
 
-            weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
-            normalization = fold(weighting).view(1, 1, h, w)  # normalizes the overlap
-            weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
+            weighting = self.get_weighting(
+                kernel_size[0], kernel_size[1], Ly, Lx, x.device
+            ).to(x.dtype)
+            normalization = fold(weighting).view(
+                1, 1, h, w
+            )  # normalizes the overlap
+            weighting = weighting.view(
+                (1, 1, kernel_size[0], kernel_size[1], Ly * Lx)
+            )
 
         elif uf > 1 and df == 1:
-            fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+            fold_params = dict(
+                kernel_size=kernel_size, dilation=1, padding=0, stride=stride
+            )
             unfold = torch.nn.Unfold(**fold_params)
 
-            fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
-                                dilation=1, padding=0,
-                                stride=(stride[0] * uf, stride[1] * uf))
-            fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
+            fold_params2 = dict(
+                kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
+                dilation=1,
+                padding=0,
+                stride=(stride[0] * uf, stride[1] * uf),
+            )
+            fold = torch.nn.Fold(
+                output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2
+            )
 
-            weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
-            normalization = fold(weighting).view(1, 1, h * uf, w * uf)  # normalizes the overlap
-            weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
+            weighting = self.get_weighting(
+                kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device
+            ).to(x.dtype)
+            normalization = fold(weighting).view(
+                1, 1, h * uf, w * uf
+            )  # normalizes the overlap
+            weighting = weighting.view(
+                (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)
+            )
 
         elif df > 1 and uf == 1:
-            fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+            fold_params = dict(
+                kernel_size=kernel_size, dilation=1, padding=0, stride=stride
+            )
             unfold = torch.nn.Unfold(**fold_params)
 
-            fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
-                                dilation=1, padding=0,
-                                stride=(stride[0] // df, stride[1] // df))
-            fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
+            fold_params2 = dict(
+                kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
+                dilation=1,
+                padding=0,
+                stride=(stride[0] // df, stride[1] // df),
+            )
+            fold = torch.nn.Fold(
+                output_size=(x.shape[2] // df, x.shape[3] // df),
+                **fold_params2,
+            )
 
-            weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
-            normalization = fold(weighting).view(1, 1, h // df, w // df)  # normalizes the overlap
-            weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
+            weighting = self.get_weighting(
+                kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device
+            ).to(x.dtype)
+            normalization = fold(weighting).view(
+                1, 1, h // df, w // df
+            )  # normalizes the overlap
+            weighting = weighting.view(
+                (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)
+            )
 
         else:
             raise NotImplementedError
@@ -691,8 +973,16 @@ class LatentDiffusion(DDPM):
         return fold, unfold, normalization, weighting
 
     @torch.no_grad()
-    def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
-                  cond_key=None, return_original_cond=False, bs=None):
+    def get_input(
+        self,
+        batch,
+        k,
+        return_first_stage_outputs=False,
+        force_c_encode=False,
+        cond_key=None,
+        return_original_cond=False,
+        bs=None,
+    ):
         x = super().get_input(batch, k)
         if bs is not None:
             x = x[:bs]
@@ -743,155 +1033,211 @@ class LatentDiffusion(DDPM):
         return out
 
     @torch.no_grad()
-    def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+    def decode_first_stage(
+        self, z, predict_cids=False, force_not_quantize=False
+    ):
         if predict_cids:
             if z.dim() == 4:
                 z = torch.argmax(z.exp(), dim=1).long()
-            z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+            z = self.first_stage_model.quantize.get_codebook_entry(
+                z, shape=None
+            )
             z = rearrange(z, 'b h w c -> b c h w').contiguous()
 
-        z = 1. / self.scale_factor * z
+        z = 1.0 / self.scale_factor * z
 
-        if hasattr(self, "split_input_params"):
-            if self.split_input_params["patch_distributed_vq"]:
-                ks = self.split_input_params["ks"]  # eg. (128, 128)
-                stride = self.split_input_params["stride"]  # eg. (64, 64)
-                uf = self.split_input_params["vqf"]
+        if hasattr(self, 'split_input_params'):
+            if self.split_input_params['patch_distributed_vq']:
+                ks = self.split_input_params['ks']  # eg. (128, 128)
+                stride = self.split_input_params['stride']  # eg. (64, 64)
+                uf = self.split_input_params['vqf']
                 bs, nc, h, w = z.shape
                 if ks[0] > h or ks[1] > w:
                     ks = (min(ks[0], h), min(ks[1], w))
-                    print("reducing Kernel")
+                    print('reducing Kernel')
 
                 if stride[0] > h or stride[1] > w:
                     stride = (min(stride[0], h), min(stride[1], w))
-                    print("reducing stride")
+                    print('reducing stride')
 
-                fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+                fold, unfold, normalization, weighting = self.get_fold_unfold(
+                    z, ks, stride, uf=uf
+                )
 
                 z = unfold(z)  # (bn, nc * prod(**ks), L)
                 # 1. Reshape to img shape
-                z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1]))  # (bn, nc, ks[0], ks[1], L )
+                z = z.view(
+                    (z.shape[0], -1, ks[0], ks[1], z.shape[-1])
+                )  # (bn, nc, ks[0], ks[1], L )
 
                 # 2. apply model loop over last dim
                 if isinstance(self.first_stage_model, VQModelInterface):
-                    output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
-                                                                 force_not_quantize=predict_cids or force_not_quantize)
-                                   for i in range(z.shape[-1])]
+                    output_list = [
+                        self.first_stage_model.decode(
+                            z[:, :, :, :, i],
+                            force_not_quantize=predict_cids
+                            or force_not_quantize,
+                        )
+                        for i in range(z.shape[-1])
+                    ]
                 else:
 
-                    output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
-                                   for i in range(z.shape[-1])]
+                    output_list = [
+                        self.first_stage_model.decode(z[:, :, :, :, i])
+                        for i in range(z.shape[-1])
+                    ]
 
-                o = torch.stack(output_list, axis=-1)  # # (bn, nc, ks[0], ks[1], L)
+                o = torch.stack(
+                    output_list, axis=-1
+                )  # # (bn, nc, ks[0], ks[1], L)
                 o = o * weighting
                 # Reverse 1. reshape to img shape
-                o = o.view((o.shape[0], -1, o.shape[-1]))  # (bn, nc * ks[0] * ks[1], L)
+                o = o.view(
+                    (o.shape[0], -1, o.shape[-1])
+                )  # (bn, nc * ks[0] * ks[1], L)
                 # stitch crops together
                 decoded = fold(o)
                 decoded = decoded / normalization  # norm is shape (1, 1, h, w)
                 return decoded
             else:
                 if isinstance(self.first_stage_model, VQModelInterface):
-                    return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+                    return self.first_stage_model.decode(
+                        z,
+                        force_not_quantize=predict_cids or force_not_quantize,
+                    )
                 else:
                     return self.first_stage_model.decode(z)
 
         else:
             if isinstance(self.first_stage_model, VQModelInterface):
-                return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+                return self.first_stage_model.decode(
+                    z, force_not_quantize=predict_cids or force_not_quantize
+                )
             else:
                 return self.first_stage_model.decode(z)
 
     # same as above but without decorator
-    def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+    def differentiable_decode_first_stage(
+        self, z, predict_cids=False, force_not_quantize=False
+    ):
         if predict_cids:
             if z.dim() == 4:
                 z = torch.argmax(z.exp(), dim=1).long()
-            z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+            z = self.first_stage_model.quantize.get_codebook_entry(
+                z, shape=None
+            )
             z = rearrange(z, 'b h w c -> b c h w').contiguous()
 
-        z = 1. / self.scale_factor * z
+        z = 1.0 / self.scale_factor * z
 
-        if hasattr(self, "split_input_params"):
-            if self.split_input_params["patch_distributed_vq"]:
-                ks = self.split_input_params["ks"]  # eg. (128, 128)
-                stride = self.split_input_params["stride"]  # eg. (64, 64)
-                uf = self.split_input_params["vqf"]
+        if hasattr(self, 'split_input_params'):
+            if self.split_input_params['patch_distributed_vq']:
+                ks = self.split_input_params['ks']  # eg. (128, 128)
+                stride = self.split_input_params['stride']  # eg. (64, 64)
+                uf = self.split_input_params['vqf']
                 bs, nc, h, w = z.shape
                 if ks[0] > h or ks[1] > w:
                     ks = (min(ks[0], h), min(ks[1], w))
-                    print("reducing Kernel")
+                    print('reducing Kernel')
 
                 if stride[0] > h or stride[1] > w:
                     stride = (min(stride[0], h), min(stride[1], w))
-                    print("reducing stride")
+                    print('reducing stride')
 
-                fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+                fold, unfold, normalization, weighting = self.get_fold_unfold(
+                    z, ks, stride, uf=uf
+                )
 
                 z = unfold(z)  # (bn, nc * prod(**ks), L)
                 # 1. Reshape to img shape
-                z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1]))  # (bn, nc, ks[0], ks[1], L )
+                z = z.view(
+                    (z.shape[0], -1, ks[0], ks[1], z.shape[-1])
+                )  # (bn, nc, ks[0], ks[1], L )
 
                 # 2. apply model loop over last dim
-                if isinstance(self.first_stage_model, VQModelInterface):  
-                    output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
-                                                                 force_not_quantize=predict_cids or force_not_quantize)
-                                   for i in range(z.shape[-1])]
+                if isinstance(self.first_stage_model, VQModelInterface):
+                    output_list = [
+                        self.first_stage_model.decode(
+                            z[:, :, :, :, i],
+                            force_not_quantize=predict_cids
+                            or force_not_quantize,
+                        )
+                        for i in range(z.shape[-1])
+                    ]
                 else:
 
-                    output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
-                                   for i in range(z.shape[-1])]
+                    output_list = [
+                        self.first_stage_model.decode(z[:, :, :, :, i])
+                        for i in range(z.shape[-1])
+                    ]
 
-                o = torch.stack(output_list, axis=-1)  # # (bn, nc, ks[0], ks[1], L)
+                o = torch.stack(
+                    output_list, axis=-1
+                )  # # (bn, nc, ks[0], ks[1], L)
                 o = o * weighting
                 # Reverse 1. reshape to img shape
-                o = o.view((o.shape[0], -1, o.shape[-1]))  # (bn, nc * ks[0] * ks[1], L)
+                o = o.view(
+                    (o.shape[0], -1, o.shape[-1])
+                )  # (bn, nc * ks[0] * ks[1], L)
                 # stitch crops together
                 decoded = fold(o)
                 decoded = decoded / normalization  # norm is shape (1, 1, h, w)
                 return decoded
             else:
                 if isinstance(self.first_stage_model, VQModelInterface):
-                    return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+                    return self.first_stage_model.decode(
+                        z,
+                        force_not_quantize=predict_cids or force_not_quantize,
+                    )
                 else:
                     return self.first_stage_model.decode(z)
 
         else:
             if isinstance(self.first_stage_model, VQModelInterface):
-                return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+                return self.first_stage_model.decode(
+                    z, force_not_quantize=predict_cids or force_not_quantize
+                )
             else:
                 return self.first_stage_model.decode(z)
 
     @torch.no_grad()
     def encode_first_stage(self, x):
-        if hasattr(self, "split_input_params"):
-            if self.split_input_params["patch_distributed_vq"]:
-                ks = self.split_input_params["ks"]  # eg. (128, 128)
-                stride = self.split_input_params["stride"]  # eg. (64, 64)
-                df = self.split_input_params["vqf"]
+        if hasattr(self, 'split_input_params'):
+            if self.split_input_params['patch_distributed_vq']:
+                ks = self.split_input_params['ks']  # eg. (128, 128)
+                stride = self.split_input_params['stride']  # eg. (64, 64)
+                df = self.split_input_params['vqf']
                 self.split_input_params['original_image_size'] = x.shape[-2:]
                 bs, nc, h, w = x.shape
                 if ks[0] > h or ks[1] > w:
                     ks = (min(ks[0], h), min(ks[1], w))
-                    print("reducing Kernel")
+                    print('reducing Kernel')
 
                 if stride[0] > h or stride[1] > w:
                     stride = (min(stride[0], h), min(stride[1], w))
-                    print("reducing stride")
+                    print('reducing stride')
 
-                fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
+                fold, unfold, normalization, weighting = self.get_fold_unfold(
+                    x, ks, stride, df=df
+                )
                 z = unfold(x)  # (bn, nc * prod(**ks), L)
                 # Reshape to img shape
-                z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1]))  # (bn, nc, ks[0], ks[1], L )
+                z = z.view(
+                    (z.shape[0], -1, ks[0], ks[1], z.shape[-1])
+                )  # (bn, nc, ks[0], ks[1], L )
 
-                output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
-                               for i in range(z.shape[-1])]
+                output_list = [
+                    self.first_stage_model.encode(z[:, :, :, :, i])
+                    for i in range(z.shape[-1])
+                ]
 
                 o = torch.stack(output_list, axis=-1)
                 o = o * weighting
 
                 # Reverse reshape to img shape
-                o = o.view((o.shape[0], -1, o.shape[-1]))  # (bn, nc * ks[0] * ks[1], L)
+                o = o.view(
+                    (o.shape[0], -1, o.shape[-1])
+                )  # (bn, nc * ks[0] * ks[1], L)
                 # stitch crops together
                 decoded = fold(o)
                 decoded = decoded / normalization
@@ -908,18 +1254,24 @@ class LatentDiffusion(DDPM):
         return loss
 
     def forward(self, x, c, *args, **kwargs):
-        t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+        t = torch.randint(
+            0, self.num_timesteps, (x.shape[0],), device=self.device
+        ).long()
         if self.model.conditioning_key is not None:
             assert c is not None
             if self.cond_stage_trainable:
                 c = self.get_learned_conditioning(c)
             if self.shorten_cond_schedule:  # TODO: drop this option
                 tc = self.cond_ids[t].to(self.device)
-                c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
+                c = self.q_sample(
+                    x_start=c, t=tc, noise=torch.randn_like(c.float())
+                )
 
         return self.p_losses(x, c, t, *args, **kwargs)
 
-    def _rescale_annotations(self, bboxes, crop_coordinates):  # TODO: move to dataset
+    def _rescale_annotations(
+        self, bboxes, crop_coordinates
+    ):  # TODO: move to dataset
         def rescale_bbox(bbox):
             x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
             y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
@@ -937,42 +1289,65 @@ class LatentDiffusion(DDPM):
         else:
             if not isinstance(cond, list):
                 cond = [cond]
-            key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
+            key = (
+                'c_concat'
+                if self.model.conditioning_key == 'concat'
+                else 'c_crossattn'
+            )
             cond = {key: cond}
 
-        if hasattr(self, "split_input_params"):
-            assert len(cond) == 1  # todo can only deal with one conditioning atm
-            assert not return_ids  
-            ks = self.split_input_params["ks"]  # eg. (128, 128)
-            stride = self.split_input_params["stride"]  # eg. (64, 64)
+        if hasattr(self, 'split_input_params'):
+            assert (
+                len(cond) == 1
+            )  # todo can only deal with one conditioning atm
+            assert not return_ids
+            ks = self.split_input_params['ks']  # eg. (128, 128)
+            stride = self.split_input_params['stride']  # eg. (64, 64)
 
             h, w = x_noisy.shape[-2:]
 
-            fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
+            fold, unfold, normalization, weighting = self.get_fold_unfold(
+                x_noisy, ks, stride
+            )
 
             z = unfold(x_noisy)  # (bn, nc * prod(**ks), L)
             # Reshape to img shape
-            z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1]))  # (bn, nc, ks[0], ks[1], L )
+            z = z.view(
+                (z.shape[0], -1, ks[0], ks[1], z.shape[-1])
+            )  # (bn, nc, ks[0], ks[1], L )
             z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
 
-            if self.cond_stage_key in ["image", "LR_image", "segmentation",
-                                       'bbox_img'] and self.model.conditioning_key:  # todo check for completeness
+            if (
+                self.cond_stage_key
+                in ['image', 'LR_image', 'segmentation', 'bbox_img']
+                and self.model.conditioning_key
+            ):  # todo check for completeness
                 c_key = next(iter(cond.keys()))  # get key
                 c = next(iter(cond.values()))  # get value
-                assert (len(c) == 1)  # todo extend to list with more than one elem
+                assert (
+                    len(c) == 1
+                )  # todo extend to list with more than one elem
                 c = c[0]  # get element
 
                 c = unfold(c)
-                c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1]))  # (bn, nc, ks[0], ks[1], L )
+                c = c.view(
+                    (c.shape[0], -1, ks[0], ks[1], c.shape[-1])
+                )  # (bn, nc, ks[0], ks[1], L )
 
-                cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
+                cond_list = [
+                    {c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])
+                ]
 
             elif self.cond_stage_key == 'coordinates_bbox':
-                assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
+                assert (
+                    'original_image_size' in self.split_input_params
+                ), 'BoudingBoxRescaling is missing original_image_size'
 
                 # assuming padding of unfold is always 0 and its dilation is always 1
                 n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
-                full_img_h, full_img_w = self.split_input_params['original_image_size']
+                full_img_h, full_img_w = self.split_input_params[
+                    'original_image_size'
+                ]
                 # as we are operating on latents, we need the factor from the original image size to the
                 # spatial latent size to properly rescale the crops for regenerating the bbox annotations
                 num_downs = self.first_stage_model.encoder.num_resolutions - 1
@@ -980,47 +1355,84 @@ class LatentDiffusion(DDPM):
 
                 # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
                 # need to rescale the tl patch coordinates to be in between (0,1)
-                tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
-                                         rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
-                                        for patch_nr in range(z.shape[-1])]
+                tl_patch_coordinates = [
+                    (
+                        rescale_latent
+                        * stride[0]
+                        * (patch_nr % n_patches_per_row)
+                        / full_img_w,
+                        rescale_latent
+                        * stride[1]
+                        * (patch_nr // n_patches_per_row)
+                        / full_img_h,
+                    )
+                    for patch_nr in range(z.shape[-1])
+                ]
 
                 # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
-                patch_limits = [(x_tl, y_tl,
-                                 rescale_latent * ks[0] / full_img_w,
-                                 rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
+                patch_limits = [
+                    (
+                        x_tl,
+                        y_tl,
+                        rescale_latent * ks[0] / full_img_w,
+                        rescale_latent * ks[1] / full_img_h,
+                    )
+                    for x_tl, y_tl in tl_patch_coordinates
+                ]
                 # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
 
                 # tokenize crop coordinates for the bounding boxes of the respective patches
-                patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
-                                      for bbox in patch_limits]  # list of length l with tensors of shape (1, 2)
+                patch_limits_tknzd = [
+                    torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[
+                        None
+                    ].to(self.device)
+                    for bbox in patch_limits
+                ]  # list of length l with tensors of shape (1, 2)
                 print(patch_limits_tknzd[0].shape)
                 # cut tknzd crop position from conditioning
-                assert isinstance(cond, dict), 'cond must be dict to be fed into model'
+                assert isinstance(
+                    cond, dict
+                ), 'cond must be dict to be fed into model'
                 cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
                 print(cut_cond.shape)
 
-                adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
+                adapted_cond = torch.stack(
+                    [
+                        torch.cat([cut_cond, p], dim=1)
+                        for p in patch_limits_tknzd
+                    ]
+                )
                 adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
                 print(adapted_cond.shape)
                 adapted_cond = self.get_learned_conditioning(adapted_cond)
                 print(adapted_cond.shape)
-                adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
+                adapted_cond = rearrange(
+                    adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]
+                )
                 print(adapted_cond.shape)
 
                 cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
 
             else:
-                cond_list = [cond for i in range(z.shape[-1])]  # Todo make this more efficient
+                cond_list = [
+                    cond for i in range(z.shape[-1])
+                ]  # Todo make this more efficient
 
             # apply model by loop over crops
-            output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
-            assert not isinstance(output_list[0],
-                                  tuple)  # todo cant deal with multiple model outputs check this never happens
+            output_list = [
+                self.model(z_list[i], t, **cond_list[i])
+                for i in range(z.shape[-1])
+            ]
+            assert not isinstance(
+                output_list[0], tuple
+            )  # todo cant deal with multiple model outputs check this never happens
 
             o = torch.stack(output_list, axis=-1)
             o = o * weighting
             # Reverse reshape to img shape
-            o = o.view((o.shape[0], -1, o.shape[-1]))  # (bn, nc * ks[0] * ks[1], L)
+            o = o.view(
+                (o.shape[0], -1, o.shape[-1])
+            )  # (bn, nc * ks[0] * ks[1], L)
             # stitch crops together
             x_recon = fold(o) / normalization
 
@@ -1033,8 +1445,11 @@ class LatentDiffusion(DDPM):
             return x_recon
 
     def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
-        return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
-               extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
+        return (
+            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape)
+            * x_t
+            - pred_xstart
+        ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
 
     def _prior_bpd(self, x_start):
         """
@@ -1045,9 +1460,13 @@ class LatentDiffusion(DDPM):
         :return: a batch of [N] KL values (in bits), one per batch element.
         """
         batch_size = x_start.shape[0]
-        t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
+        t = torch.tensor(
+            [self.num_timesteps - 1] * batch_size, device=x_start.device
+        )
         qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
-        kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
+        kl_prior = normal_kl(
+            mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
+        )
         return mean_flat(kl_prior) / np.log(2.0)
 
     def p_losses(self, x_start, cond, t, noise=None):
@@ -1058,14 +1477,16 @@ class LatentDiffusion(DDPM):
         loss_dict = {}
         prefix = 'train' if self.training else 'val'
 
-        if self.parameterization == "x0":
+        if self.parameterization == 'x0':
             target = x_start
-        elif self.parameterization == "eps":
+        elif self.parameterization == 'eps':
             target = noise
         else:
             raise NotImplementedError()
 
-        loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
+        loss_simple = self.get_loss(model_output, target, mean=False).mean(
+            [1, 2, 3]
+        )
         loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
 
         logvar_t = self.logvar[t].to(self.device)
@@ -1077,65 +1498,117 @@ class LatentDiffusion(DDPM):
 
         loss = self.l_simple_weight * loss.mean()
 
-        loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
+        loss_vlb = self.get_loss(model_output, target, mean=False).mean(
+            dim=(1, 2, 3)
+        )
         loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
         loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
-        loss += (self.original_elbo_weight * loss_vlb)
+        loss += self.original_elbo_weight * loss_vlb
         loss_dict.update({f'{prefix}/loss': loss})
 
         if self.embedding_reg_weight > 0:
-            loss_embedding_reg = self.embedding_manager.embedding_to_coarse_loss().mean()
+            loss_embedding_reg = (
+                self.embedding_manager.embedding_to_coarse_loss().mean()
+            )
 
             loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg})
 
-            loss += (self.embedding_reg_weight * loss_embedding_reg)
+            loss += self.embedding_reg_weight * loss_embedding_reg
             loss_dict.update({f'{prefix}/loss': loss})
 
         return loss, loss_dict
 
-    def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
-                        return_x0=False, score_corrector=None, corrector_kwargs=None):
+    def p_mean_variance(
+        self,
+        x,
+        c,
+        t,
+        clip_denoised: bool,
+        return_codebook_ids=False,
+        quantize_denoised=False,
+        return_x0=False,
+        score_corrector=None,
+        corrector_kwargs=None,
+    ):
         t_in = t
-        model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
+        model_out = self.apply_model(
+            x, t_in, c, return_ids=return_codebook_ids
+        )
 
         if score_corrector is not None:
-            assert self.parameterization == "eps"
-            model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
+            assert self.parameterization == 'eps'
+            model_out = score_corrector.modify_score(
+                self, model_out, x, t, c, **corrector_kwargs
+            )
 
         if return_codebook_ids:
             model_out, logits = model_out
 
-        if self.parameterization == "eps":
+        if self.parameterization == 'eps':
             x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
-        elif self.parameterization == "x0":
+        elif self.parameterization == 'x0':
             x_recon = model_out
         else:
             raise NotImplementedError()
 
         if clip_denoised:
-            x_recon.clamp_(-1., 1.)
+            x_recon.clamp_(-1.0, 1.0)
         if quantize_denoised:
-            x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
-        model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+            x_recon, _, [_, _, indices] = self.first_stage_model.quantize(
+                x_recon
+            )
+        (
+            model_mean,
+            posterior_variance,
+            posterior_log_variance,
+        ) = self.q_posterior(x_start=x_recon, x_t=x, t=t)
         if return_codebook_ids:
-            return model_mean, posterior_variance, posterior_log_variance, logits
+            return (
+                model_mean,
+                posterior_variance,
+                posterior_log_variance,
+                logits,
+            )
         elif return_x0:
-            return model_mean, posterior_variance, posterior_log_variance, x_recon
+            return (
+                model_mean,
+                posterior_variance,
+                posterior_log_variance,
+                x_recon,
+            )
         else:
             return model_mean, posterior_variance, posterior_log_variance
 
     @torch.no_grad()
-    def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
-                 return_codebook_ids=False, quantize_denoised=False, return_x0=False,
-                 temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
+    def p_sample(
+        self,
+        x,
+        c,
+        t,
+        clip_denoised=False,
+        repeat_noise=False,
+        return_codebook_ids=False,
+        quantize_denoised=False,
+        return_x0=False,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+    ):
         b, *_, device = *x.shape, x.device
-        outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
-                                       return_codebook_ids=return_codebook_ids,
-                                       quantize_denoised=quantize_denoised,
-                                       return_x0=return_x0,
-                                       score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+        outputs = self.p_mean_variance(
+            x=x,
+            c=c,
+            t=t,
+            clip_denoised=clip_denoised,
+            return_codebook_ids=return_codebook_ids,
+            quantize_denoised=quantize_denoised,
+            return_x0=return_x0,
+            score_corrector=score_corrector,
+            corrector_kwargs=corrector_kwargs,
+        )
         if return_codebook_ids:
-            raise DeprecationWarning("Support dropped.")
+            raise DeprecationWarning('Support dropped.')
             model_mean, _, model_log_variance, logits = outputs
         elif return_x0:
             model_mean, _, model_log_variance, x0 = outputs
@@ -1143,23 +1616,49 @@ class LatentDiffusion(DDPM):
             model_mean, _, model_log_variance = outputs
 
         noise = noise_like(x.shape, device, repeat_noise) * temperature
-        if noise_dropout > 0.:
+        if noise_dropout > 0.0:
             noise = torch.nn.functional.dropout(noise, p=noise_dropout)
         # no noise when t == 0
-        nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
+        nonzero_mask = (1 - (t == 0).float()).reshape(
+            b, *((1,) * (len(x.shape) - 1))
+        )
 
         if return_codebook_ids:
-            return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
+            return model_mean + nonzero_mask * (
+                0.5 * model_log_variance
+            ).exp() * noise, logits.argmax(dim=1)
         if return_x0:
-            return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
+            return (
+                model_mean
+                + nonzero_mask * (0.5 * model_log_variance).exp() * noise,
+                x0,
+            )
         else:
-            return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+            return (
+                model_mean
+                + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+            )
 
     @torch.no_grad()
-    def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
-                              img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
-                              score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
-                              log_every_t=None):
+    def progressive_denoising(
+        self,
+        cond,
+        shape,
+        verbose=True,
+        callback=None,
+        quantize_denoised=False,
+        img_callback=None,
+        mask=None,
+        x0=None,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        batch_size=None,
+        x_T=None,
+        start_T=None,
+        log_every_t=None,
+    ):
         if not log_every_t:
             log_every_t = self.log_every_t
         timesteps = self.num_timesteps
@@ -1175,16 +1674,30 @@ class LatentDiffusion(DDPM):
         intermediates = []
         if cond is not None:
             if isinstance(cond, dict):
-                cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
-                list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+                cond = {
+                    key: cond[key][:batch_size]
+                    if not isinstance(cond[key], list)
+                    else list(map(lambda x: x[:batch_size], cond[key]))
+                    for key in cond
+                }
             else:
-                cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
+                cond = (
+                    [c[:batch_size] for c in cond]
+                    if isinstance(cond, list)
+                    else cond[:batch_size]
+                )
 
         if start_T is not None:
             timesteps = min(timesteps, start_T)
-        iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
-                        total=timesteps) if verbose else reversed(
-            range(0, timesteps))
+        iterator = (
+            tqdm(
+                reversed(range(0, timesteps)),
+                desc='Progressive Generation',
+                total=timesteps,
+            )
+            if verbose
+            else reversed(range(0, timesteps))
+        )
         if type(temperature) == float:
             temperature = [temperature] * timesteps
 
@@ -1193,29 +1706,52 @@ class LatentDiffusion(DDPM):
             if self.shorten_cond_schedule:
                 assert self.model.conditioning_key != 'hybrid'
                 tc = self.cond_ids[ts].to(cond.device)
-                cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+                cond = self.q_sample(
+                    x_start=cond, t=tc, noise=torch.randn_like(cond)
+                )
 
-            img, x0_partial = self.p_sample(img, cond, ts,
-                                            clip_denoised=self.clip_denoised,
-                                            quantize_denoised=quantize_denoised, return_x0=True,
-                                            temperature=temperature[i], noise_dropout=noise_dropout,
-                                            score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+            img, x0_partial = self.p_sample(
+                img,
+                cond,
+                ts,
+                clip_denoised=self.clip_denoised,
+                quantize_denoised=quantize_denoised,
+                return_x0=True,
+                temperature=temperature[i],
+                noise_dropout=noise_dropout,
+                score_corrector=score_corrector,
+                corrector_kwargs=corrector_kwargs,
+            )
             if mask is not None:
                 assert x0 is not None
                 img_orig = self.q_sample(x0, ts)
-                img = img_orig * mask + (1. - mask) * img
+                img = img_orig * mask + (1.0 - mask) * img
 
             if i % log_every_t == 0 or i == timesteps - 1:
                 intermediates.append(x0_partial)
-            if callback: callback(i)
-            if img_callback: img_callback(img, i)
+            if callback:
+                callback(i)
+            if img_callback:
+                img_callback(img, i)
         return img, intermediates
 
     @torch.no_grad()
-    def p_sample_loop(self, cond, shape, return_intermediates=False,
-                      x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
-                      mask=None, x0=None, img_callback=None, start_T=None,
-                      log_every_t=None):
+    def p_sample_loop(
+        self,
+        cond,
+        shape,
+        return_intermediates=False,
+        x_T=None,
+        verbose=True,
+        callback=None,
+        timesteps=None,
+        quantize_denoised=False,
+        mask=None,
+        x0=None,
+        img_callback=None,
+        start_T=None,
+        log_every_t=None,
+    ):
 
         if not log_every_t:
             log_every_t = self.log_every_t
@@ -1232,100 +1768,170 @@ class LatentDiffusion(DDPM):
 
         if start_T is not None:
             timesteps = min(timesteps, start_T)
-        iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
-            range(0, timesteps))
+        iterator = (
+            tqdm(
+                reversed(range(0, timesteps)),
+                desc='Sampling t',
+                total=timesteps,
+            )
+            if verbose
+            else reversed(range(0, timesteps))
+        )
 
         if mask is not None:
             assert x0 is not None
-            assert x0.shape[2:3] == mask.shape[2:3]  # spatial size has to match
+            assert (
+                x0.shape[2:3] == mask.shape[2:3]
+            )  # spatial size has to match
 
         for i in iterator:
             ts = torch.full((b,), i, device=device, dtype=torch.long)
             if self.shorten_cond_schedule:
                 assert self.model.conditioning_key != 'hybrid'
                 tc = self.cond_ids[ts].to(cond.device)
-                cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+                cond = self.q_sample(
+                    x_start=cond, t=tc, noise=torch.randn_like(cond)
+                )
 
-            img = self.p_sample(img, cond, ts,
-                                clip_denoised=self.clip_denoised,
-                                quantize_denoised=quantize_denoised)
+            img = self.p_sample(
+                img,
+                cond,
+                ts,
+                clip_denoised=self.clip_denoised,
+                quantize_denoised=quantize_denoised,
+            )
             if mask is not None:
                 img_orig = self.q_sample(x0, ts)
-                img = img_orig * mask + (1. - mask) * img
+                img = img_orig * mask + (1.0 - mask) * img
 
             if i % log_every_t == 0 or i == timesteps - 1:
                 intermediates.append(img)
-            if callback: callback(i)
-            if img_callback: img_callback(img, i)
+            if callback:
+                callback(i)
+            if img_callback:
+                img_callback(img, i)
 
         if return_intermediates:
             return img, intermediates
         return img
 
     @torch.no_grad()
-    def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
-               verbose=True, timesteps=None, quantize_denoised=False,
-               mask=None, x0=None, shape=None,**kwargs):
+    def sample(
+        self,
+        cond,
+        batch_size=16,
+        return_intermediates=False,
+        x_T=None,
+        verbose=True,
+        timesteps=None,
+        quantize_denoised=False,
+        mask=None,
+        x0=None,
+        shape=None,
+        **kwargs,
+    ):
         if shape is None:
-            shape = (batch_size, self.channels, self.image_size, self.image_size)
+            shape = (
+                batch_size,
+                self.channels,
+                self.image_size,
+                self.image_size,
+            )
         if cond is not None:
             if isinstance(cond, dict):
-                cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
-                list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+                cond = {
+                    key: cond[key][:batch_size]
+                    if not isinstance(cond[key], list)
+                    else list(map(lambda x: x[:batch_size], cond[key]))
+                    for key in cond
+                }
             else:
-                cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
-        return self.p_sample_loop(cond,
-                                  shape,
-                                  return_intermediates=return_intermediates, x_T=x_T,
-                                  verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
-                                  mask=mask, x0=x0)
+                cond = (
+                    [c[:batch_size] for c in cond]
+                    if isinstance(cond, list)
+                    else cond[:batch_size]
+                )
+        return self.p_sample_loop(
+            cond,
+            shape,
+            return_intermediates=return_intermediates,
+            x_T=x_T,
+            verbose=verbose,
+            timesteps=timesteps,
+            quantize_denoised=quantize_denoised,
+            mask=mask,
+            x0=x0,
+        )
 
     @torch.no_grad()
-    def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
+    def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
 
         if ddim:
             ddim_sampler = DDIMSampler(self)
             shape = (self.channels, self.image_size, self.image_size)
-            samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
-                                                        shape,cond,verbose=False,**kwargs)
+            samples, intermediates = ddim_sampler.sample(
+                ddim_steps, batch_size, shape, cond, verbose=False, **kwargs
+            )
 
         else:
-            samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
-                                                 return_intermediates=True,**kwargs)
+            samples, intermediates = self.sample(
+                cond=cond,
+                batch_size=batch_size,
+                return_intermediates=True,
+                **kwargs,
+            )
 
         return samples, intermediates
 
     @torch.no_grad()
-    def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
-                   quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False,
-                   plot_diffusion_rows=False, **kwargs):
+    def log_images(
+        self,
+        batch,
+        N=8,
+        n_row=4,
+        sample=True,
+        ddim_steps=200,
+        ddim_eta=1.0,
+        return_keys=None,
+        quantize_denoised=True,
+        inpaint=False,
+        plot_denoise_rows=False,
+        plot_progressive_rows=False,
+        plot_diffusion_rows=False,
+        **kwargs,
+    ):
 
         use_ddim = ddim_steps is not None
 
         log = dict()
-        z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
-                                           return_first_stage_outputs=True,
-                                           force_c_encode=True,
-                                           return_original_cond=True,
-                                           bs=N)
+        z, c, x, xrec, xc = self.get_input(
+            batch,
+            self.first_stage_key,
+            return_first_stage_outputs=True,
+            force_c_encode=True,
+            return_original_cond=True,
+            bs=N,
+        )
         N = min(x.shape[0], N)
         n_row = min(x.shape[0], n_row)
-        log["inputs"] = x
-        log["reconstruction"] = xrec
+        log['inputs'] = x
+        log['reconstruction'] = xrec
         if self.model.conditioning_key is not None:
-            if hasattr(self.cond_stage_model, "decode"):
+            if hasattr(self.cond_stage_model, 'decode'):
                 xc = self.cond_stage_model.decode(c)
-                log["conditioning"] = xc
-            elif self.cond_stage_key in ["caption"]:
-                xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
-                log["conditioning"] = xc
+                log['conditioning'] = xc
+            elif self.cond_stage_key in ['caption']:
+                xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption'])
+                log['conditioning'] = xc
             elif self.cond_stage_key == 'class_label':
-                xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
+                xc = log_txt_as_img(
+                    (x.shape[2], x.shape[3]), batch['human_label']
+                )
                 log['conditioning'] = xc
             elif isimage(xc):
-                log["conditioning"] = xc
+                log['conditioning'] = xc
             if ismap(xc):
-                log["original_conditioning"] = self.to_rgb(xc)
+                log['original_conditioning'] = self.to_rgb(xc)
 
         if plot_diffusion_rows:
             # get diffusion row
@@ -1339,75 +1945,114 @@ class LatentDiffusion(DDPM):
                     z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
                     diffusion_row.append(self.decode_first_stage(z_noisy))
 
-            diffusion_row = torch.stack(diffusion_row)  # n_log_step, n_row, C, H, W
+            diffusion_row = torch.stack(
+                diffusion_row
+            )  # n_log_step, n_row, C, H, W
             diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
-            diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
-            diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
-            log["diffusion_row"] = diffusion_grid
+            diffusion_grid = rearrange(
+                diffusion_grid, 'b n c h w -> (b n) c h w'
+            )
+            diffusion_grid = make_grid(
+                diffusion_grid, nrow=diffusion_row.shape[0]
+            )
+            log['diffusion_row'] = diffusion_grid
 
         if sample:
             # get denoise row
-            with self.ema_scope("Plotting"):
-                samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
-                                                         ddim_steps=ddim_steps,eta=ddim_eta)
+            with self.ema_scope('Plotting'):
+                samples, z_denoise_row = self.sample_log(
+                    cond=c,
+                    batch_size=N,
+                    ddim=use_ddim,
+                    ddim_steps=ddim_steps,
+                    eta=ddim_eta,
+                )
                 # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
             x_samples = self.decode_first_stage(samples)
-            log["samples"] = x_samples
+            log['samples'] = x_samples
             if plot_denoise_rows:
                 denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
-                log["denoise_row"] = denoise_grid
-            
-            uc = self.get_learned_conditioning(len(c) * [""])
-            sample_scaled, _ = self.sample_log(cond=c, 
-                                               batch_size=N, 
-                                               ddim=use_ddim, 
-                                               ddim_steps=ddim_steps,
-                                               eta=ddim_eta,                                                 
-                                               unconditional_guidance_scale=5.0,
-                                               unconditional_conditioning=uc)
-            log["samples_scaled"] = self.decode_first_stage(sample_scaled)
+                log['denoise_row'] = denoise_grid
 
-            if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
-                    self.first_stage_model, IdentityFirstStage):
+            uc = self.get_learned_conditioning(len(c) * [''])
+            sample_scaled, _ = self.sample_log(
+                cond=c,
+                batch_size=N,
+                ddim=use_ddim,
+                ddim_steps=ddim_steps,
+                eta=ddim_eta,
+                unconditional_guidance_scale=5.0,
+                unconditional_conditioning=uc,
+            )
+            log['samples_scaled'] = self.decode_first_stage(sample_scaled)
+
+            if (
+                quantize_denoised
+                and not isinstance(self.first_stage_model, AutoencoderKL)
+                and not isinstance(self.first_stage_model, IdentityFirstStage)
+            ):
                 # also display when quantizing x0 while sampling
-                with self.ema_scope("Plotting Quantized Denoised"):
-                    samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
-                                                             ddim_steps=ddim_steps,eta=ddim_eta,
-                                                             quantize_denoised=True)
+                with self.ema_scope('Plotting Quantized Denoised'):
+                    samples, z_denoise_row = self.sample_log(
+                        cond=c,
+                        batch_size=N,
+                        ddim=use_ddim,
+                        ddim_steps=ddim_steps,
+                        eta=ddim_eta,
+                        quantize_denoised=True,
+                    )
                     # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
                     #                                      quantize_denoised=True)
                 x_samples = self.decode_first_stage(samples.to(self.device))
-                log["samples_x0_quantized"] = x_samples
+                log['samples_x0_quantized'] = x_samples
 
             if inpaint:
                 # make a simple center square
                 b, h, w = z.shape[0], z.shape[2], z.shape[3]
                 mask = torch.ones(N, h, w).to(self.device)
                 # zeros will be filled in
-                mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
+                mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0
                 mask = mask[:, None, ...]
-                with self.ema_scope("Plotting Inpaint"):
+                with self.ema_scope('Plotting Inpaint'):
 
-                    samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
-                                                ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+                    samples, _ = self.sample_log(
+                        cond=c,
+                        batch_size=N,
+                        ddim=use_ddim,
+                        eta=ddim_eta,
+                        ddim_steps=ddim_steps,
+                        x0=z[:N],
+                        mask=mask,
+                    )
                 x_samples = self.decode_first_stage(samples.to(self.device))
-                log["samples_inpainting"] = x_samples
-                log["mask"] = mask
+                log['samples_inpainting'] = x_samples
+                log['mask'] = mask
 
                 # outpaint
-                with self.ema_scope("Plotting Outpaint"):
-                    samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
-                                                ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+                with self.ema_scope('Plotting Outpaint'):
+                    samples, _ = self.sample_log(
+                        cond=c,
+                        batch_size=N,
+                        ddim=use_ddim,
+                        eta=ddim_eta,
+                        ddim_steps=ddim_steps,
+                        x0=z[:N],
+                        mask=mask,
+                    )
                 x_samples = self.decode_first_stage(samples.to(self.device))
-                log["samples_outpainting"] = x_samples
+                log['samples_outpainting'] = x_samples
 
         if plot_progressive_rows:
-            with self.ema_scope("Plotting Progressives"):
-                img, progressives = self.progressive_denoising(c,
-                                                               shape=(self.channels, self.image_size, self.image_size),
-                                                               batch_size=N)
-            prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
-            log["progressive_row"] = prog_row
+            with self.ema_scope('Plotting Progressives'):
+                img, progressives = self.progressive_denoising(
+                    c,
+                    shape=(self.channels, self.image_size, self.image_size),
+                    batch_size=N,
+                )
+            prog_row = self._get_denoise_row_from_list(
+                progressives, desc='Progressive Generation'
+            )
+            log['progressive_row'] = prog_row
 
         if return_keys:
             if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
@@ -1425,7 +2070,9 @@ class LatentDiffusion(DDPM):
         else:
             params = list(self.model.parameters())
             if self.cond_stage_trainable:
-                print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
+                print(
+                    f'{self.__class__.__name__}: Also optimizing conditioner params!'
+                )
                 params = params + list(self.cond_stage_model.parameters())
             if self.learn_logvar:
                 print('Diffusion model optimizing logvar')
@@ -1435,34 +2082,44 @@ class LatentDiffusion(DDPM):
             assert 'target' in self.scheduler_config
             scheduler = instantiate_from_config(self.scheduler_config)
 
-            print("Setting up LambdaLR scheduler...")
+            print('Setting up LambdaLR scheduler...')
             scheduler = [
                 {
                     'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
                     'interval': 'step',
-                    'frequency': 1
-                }]
+                    'frequency': 1,
+                }
+            ]
             return [opt], scheduler
         return opt
 
     @torch.no_grad()
     def to_rgb(self, x):
         x = x.float()
-        if not hasattr(self, "colorize"):
+        if not hasattr(self, 'colorize'):
             self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
         x = nn.functional.conv2d(x, weight=self.colorize)
-        x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
+        x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0
         return x
 
     @rank_zero_only
     def on_save_checkpoint(self, checkpoint):
         checkpoint.clear()
-        
+
         if os.path.isdir(self.trainer.checkpoint_callback.dirpath):
-            self.embedding_manager.save(os.path.join(self.trainer.checkpoint_callback.dirpath, "embeddings.pt"))
+            self.embedding_manager.save(
+                os.path.join(
+                    self.trainer.checkpoint_callback.dirpath, 'embeddings.pt'
+                )
+            )
 
             if (self.global_step - self.emb_ckpt_counter) > 500:
-                self.embedding_manager.save(os.path.join(self.trainer.checkpoint_callback.dirpath, f"embeddings_gs-{self.global_step}.pt"))
+                self.embedding_manager.save(
+                    os.path.join(
+                        self.trainer.checkpoint_callback.dirpath,
+                        f'embeddings_gs-{self.global_step}.pt',
+                    )
+                )
 
                 self.emb_ckpt_counter += 500
 
@@ -1472,7 +2129,13 @@ class DiffusionWrapper(pl.LightningModule):
         super().__init__()
         self.diffusion_model = instantiate_from_config(diff_model_config)
         self.conditioning_key = conditioning_key
-        assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
+        assert self.conditioning_key in [
+            None,
+            'concat',
+            'crossattn',
+            'hybrid',
+            'adm',
+        ]
 
     def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
         if self.conditioning_key is None:
@@ -1499,7 +2162,9 @@ class DiffusionWrapper(pl.LightningModule):
 class Layout2ImgDiffusion(LatentDiffusion):
     # TODO: move all layout-specific hacks to this class
     def __init__(self, cond_stage_key, *args, **kwargs):
-        assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
+        assert (
+            cond_stage_key == 'coordinates_bbox'
+        ), 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
         super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
 
     def log_images(self, batch, N=8, *args, **kwargs):
@@ -1510,9 +2175,13 @@ class Layout2ImgDiffusion(LatentDiffusion):
         mapper = dset.conditional_builders[self.cond_stage_key]
 
         bbox_imgs = []
-        map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
+        map_fn = lambda catno: dset.get_textual_label(
+            dset.get_category_id(catno)
+        )
         for tknzd_bbox in batch[self.cond_stage_key][:N]:
-            bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
+            bboximg = mapper.plot(
+                tknzd_bbox.detach().cpu(), map_fn, (256, 256)
+            )
             bbox_imgs.append(bboximg)
 
         cond_img = torch.stack(bbox_imgs, dim=0)
diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py
index 62912d1a07..1da81eee5a 100644
--- a/ldm/models/diffusion/ksampler.py
+++ b/ldm/models/diffusion/ksampler.py
@@ -1,8 +1,9 @@
-'''wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers'''
+"""wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers"""
 import k_diffusion as K
 import torch
 import torch.nn as nn
 
+
 class CFGDenoiser(nn.Module):
     def __init__(self, model):
         super().__init__()
@@ -15,8 +16,9 @@ class CFGDenoiser(nn.Module):
         uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
         return uncond + (cond - uncond) * cond_scale
 
+
 class KSampler(object):
-    def __init__(self, model, schedule="lms", device="cuda", **kwargs):
+    def __init__(self, model, schedule='lms', device='cuda', **kwargs):
         super().__init__()
         self.model = K.external.CompVisDenoiser(model)
         self.schedule = schedule
@@ -26,44 +28,57 @@ class KSampler(object):
             x_in = torch.cat([x] * 2)
             sigma_in = torch.cat([sigma] * 2)
             cond_in = torch.cat([uncond, cond])
-            uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
+            uncond, cond = self.inner_model(
+                x_in, sigma_in, cond=cond_in
+            ).chunk(2)
             return uncond + (cond - uncond) * cond_scale
 
-
     # most of these arguments are ignored and are only present for compatibility with
     # other samples
     @torch.no_grad()
-    def sample(self,
-               S,
-               batch_size,
-               shape,
-               conditioning=None,
-               callback=None,
-               normals_sequence=None,
-               img_callback=None,
-               quantize_x0=False,
-               eta=0.,
-               mask=None,
-               x0=None,
-               temperature=1.,
-               noise_dropout=0.,
-               score_corrector=None,
-               corrector_kwargs=None,
-               verbose=True,
-               x_T=None,
-               log_every_t=100,
-               unconditional_guidance_scale=1.,
-               unconditional_conditioning=None,
-               # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
-               **kwargs
-               ):
+    def sample(
+        self,
+        S,
+        batch_size,
+        shape,
+        conditioning=None,
+        callback=None,
+        normals_sequence=None,
+        img_callback=None,
+        quantize_x0=False,
+        eta=0.0,
+        mask=None,
+        x0=None,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        verbose=True,
+        x_T=None,
+        log_every_t=100,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+        # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
+        **kwargs,
+    ):
 
         sigmas = self.model.get_sigmas(S)
         if x_T:
             x = x_T
         else:
-            x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] # for GPU draw
+            x = (
+                torch.randn([batch_size, *shape], device=self.device)
+                * sigmas[0]
+            )   # for GPU draw
         model_wrap_cfg = CFGDenoiser(self.model)
-        extra_args = {'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}
-        return (K.sampling.__dict__[f'sample_{self.schedule}'](model_wrap_cfg, x, sigmas, extra_args=extra_args),
-                None)
+        extra_args = {
+            'cond': conditioning,
+            'uncond': unconditional_conditioning,
+            'cond_scale': unconditional_guidance_scale,
+        }
+        return (
+            K.sampling.__dict__[f'sample_{self.schedule}'](
+                model_wrap_cfg, x, sigmas, extra_args=extra_args
+            ),
+            None,
+        )
diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py
index 5eafe1d7ce..7b9dc4706b 100644
--- a/ldm/models/diffusion/plms.py
+++ b/ldm/models/diffusion/plms.py
@@ -5,11 +5,15 @@ import numpy as np
 from tqdm import tqdm
 from functools import partial
 
-from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
+from ldm.modules.diffusionmodules.util import (
+    make_ddim_sampling_parameters,
+    make_ddim_timesteps,
+    noise_like,
+)
 
 
 class PLMSSampler(object):
-    def __init__(self, model, schedule="linear", device="cuda", **kwargs):
+    def __init__(self, model, schedule='linear', device='cuda', **kwargs):
         super().__init__()
         self.model = model
         self.ddpm_num_timesteps = model.num_timesteps
@@ -23,103 +27,172 @@ class PLMSSampler(object):
 
         setattr(self, name, attr)
 
-    def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
+    def make_schedule(
+        self,
+        ddim_num_steps,
+        ddim_discretize='uniform',
+        ddim_eta=0.0,
+        verbose=True,
+    ):
         if ddim_eta != 0:
             raise ValueError('ddim_eta must be 0 for PLMS')
-        self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
-                                                  num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
+        self.ddim_timesteps = make_ddim_timesteps(
+            ddim_discr_method=ddim_discretize,
+            num_ddim_timesteps=ddim_num_steps,
+            num_ddpm_timesteps=self.ddpm_num_timesteps,
+            verbose=verbose,
+        )
         alphas_cumprod = self.model.alphas_cumprod
-        assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
-        to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
+        assert (
+            alphas_cumprod.shape[0] == self.ddpm_num_timesteps
+        ), 'alphas have to be defined for each timestep'
+        to_torch = (
+            lambda x: x.clone()
+            .detach()
+            .to(torch.float32)
+            .to(self.model.device)
+        )
 
         self.register_buffer('betas', to_torch(self.model.betas))
         self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
-        self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
+        self.register_buffer(
+            'alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)
+        )
 
         # calculations for diffusion q(x_t | x_{t-1}) and others
-        self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
-        self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
-        self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
+        self.register_buffer(
+            'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))
+        )
+        self.register_buffer(
+            'sqrt_one_minus_alphas_cumprod',
+            to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'log_one_minus_alphas_cumprod',
+            to_torch(np.log(1.0 - alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'sqrt_recip_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())),
+        )
+        self.register_buffer(
+            'sqrt_recipm1_alphas_cumprod',
+            to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
+        )
 
         # ddim sampling parameters
-        ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
-                                                                                   ddim_timesteps=self.ddim_timesteps,
-                                                                                   eta=ddim_eta,verbose=verbose)
+        (
+            ddim_sigmas,
+            ddim_alphas,
+            ddim_alphas_prev,
+        ) = make_ddim_sampling_parameters(
+            alphacums=alphas_cumprod.cpu(),
+            ddim_timesteps=self.ddim_timesteps,
+            eta=ddim_eta,
+            verbose=verbose,
+        )
         self.register_buffer('ddim_sigmas', ddim_sigmas)
         self.register_buffer('ddim_alphas', ddim_alphas)
         self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
-        self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
+        self.register_buffer(
+            'ddim_sqrt_one_minus_alphas', np.sqrt(1.0 - ddim_alphas)
+        )
         sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
-            (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
-                        1 - self.alphas_cumprod / self.alphas_cumprod_prev))
-        self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
+            (1 - self.alphas_cumprod_prev)
+            / (1 - self.alphas_cumprod)
+            * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
+        )
+        self.register_buffer(
+            'ddim_sigmas_for_original_num_steps',
+            sigmas_for_original_sampling_steps,
+        )
 
     @torch.no_grad()
-    def sample(self,
-               S,
-               batch_size,
-               shape,
-               conditioning=None,
-               callback=None,
-               normals_sequence=None,
-               img_callback=None,
-               quantize_x0=False,
-               eta=0.,
-               mask=None,
-               x0=None,
-               temperature=1.,
-               noise_dropout=0.,
-               score_corrector=None,
-               corrector_kwargs=None,
-               verbose=True,
-               x_T=None,
-               log_every_t=100,
-               unconditional_guidance_scale=1.,
-               unconditional_conditioning=None,
-               # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
-               **kwargs
-               ):
+    def sample(
+        self,
+        S,
+        batch_size,
+        shape,
+        conditioning=None,
+        callback=None,
+        normals_sequence=None,
+        img_callback=None,
+        quantize_x0=False,
+        eta=0.0,
+        mask=None,
+        x0=None,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        verbose=True,
+        x_T=None,
+        log_every_t=100,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+        # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
+        **kwargs,
+    ):
         if conditioning is not None:
             if isinstance(conditioning, dict):
                 cbs = conditioning[list(conditioning.keys())[0]].shape[0]
                 if cbs != batch_size:
-                    print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
+                    print(
+                        f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'
+                    )
             else:
                 if conditioning.shape[0] != batch_size:
-                    print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
+                    print(
+                        f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'
+                    )
 
         self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
         # sampling
         C, H, W = shape
         size = (batch_size, C, H, W)
-#        print(f'Data shape for PLMS sampling is {size}')
+        #        print(f'Data shape for PLMS sampling is {size}')
 
-        samples, intermediates = self.plms_sampling(conditioning, size,
-                                                    callback=callback,
-                                                    img_callback=img_callback,
-                                                    quantize_denoised=quantize_x0,
-                                                    mask=mask, x0=x0,
-                                                    ddim_use_original_steps=False,
-                                                    noise_dropout=noise_dropout,
-                                                    temperature=temperature,
-                                                    score_corrector=score_corrector,
-                                                    corrector_kwargs=corrector_kwargs,
-                                                    x_T=x_T,
-                                                    log_every_t=log_every_t,
-                                                    unconditional_guidance_scale=unconditional_guidance_scale,
-                                                    unconditional_conditioning=unconditional_conditioning,
-                                                    )
+        samples, intermediates = self.plms_sampling(
+            conditioning,
+            size,
+            callback=callback,
+            img_callback=img_callback,
+            quantize_denoised=quantize_x0,
+            mask=mask,
+            x0=x0,
+            ddim_use_original_steps=False,
+            noise_dropout=noise_dropout,
+            temperature=temperature,
+            score_corrector=score_corrector,
+            corrector_kwargs=corrector_kwargs,
+            x_T=x_T,
+            log_every_t=log_every_t,
+            unconditional_guidance_scale=unconditional_guidance_scale,
+            unconditional_conditioning=unconditional_conditioning,
+        )
         return samples, intermediates
 
     @torch.no_grad()
-    def plms_sampling(self, cond, shape,
-                      x_T=None, ddim_use_original_steps=False,
-                      callback=None, timesteps=None, quantize_denoised=False,
-                      mask=None, x0=None, img_callback=None, log_every_t=100,
-                      temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
-                      unconditional_guidance_scale=1., unconditional_conditioning=None,):
+    def plms_sampling(
+        self,
+        cond,
+        shape,
+        x_T=None,
+        ddim_use_original_steps=False,
+        callback=None,
+        timesteps=None,
+        quantize_denoised=False,
+        mask=None,
+        x0=None,
+        img_callback=None,
+        log_every_t=100,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+    ):
         device = self.model.betas.device
         b = shape[0]
         if x_T is None:
@@ -128,42 +201,81 @@ class PLMSSampler(object):
             img = x_T
 
         if timesteps is None:
-            timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
+            timesteps = (
+                self.ddpm_num_timesteps
+                if ddim_use_original_steps
+                else self.ddim_timesteps
+            )
         elif timesteps is not None and not ddim_use_original_steps:
-            subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
+            subset_end = (
+                int(
+                    min(timesteps / self.ddim_timesteps.shape[0], 1)
+                    * self.ddim_timesteps.shape[0]
+                )
+                - 1
+            )
             timesteps = self.ddim_timesteps[:subset_end]
 
         intermediates = {'x_inter': [img], 'pred_x0': [img]}
-        time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
-        total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
-#        print(f"Running PLMS Sampling with {total_steps} timesteps")
+        time_range = (
+            list(reversed(range(0, timesteps)))
+            if ddim_use_original_steps
+            else np.flip(timesteps)
+        )
+        total_steps = (
+            timesteps if ddim_use_original_steps else timesteps.shape[0]
+        )
+        #        print(f"Running PLMS Sampling with {total_steps} timesteps")
 
-        iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps, dynamic_ncols=True)
+        iterator = tqdm(
+            time_range,
+            desc='PLMS Sampler',
+            total=total_steps,
+            dynamic_ncols=True,
+        )
         old_eps = []
 
         for i, step in enumerate(iterator):
             index = total_steps - i - 1
             ts = torch.full((b,), step, device=device, dtype=torch.long)
-            ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
+            ts_next = torch.full(
+                (b,),
+                time_range[min(i + 1, len(time_range) - 1)],
+                device=device,
+                dtype=torch.long,
+            )
 
             if mask is not None:
                 assert x0 is not None
-                img_orig = self.model.q_sample(x0, ts)  # TODO: deterministic forward pass?
-                img = img_orig * mask + (1. - mask) * img
+                img_orig = self.model.q_sample(
+                    x0, ts
+                )  # TODO: deterministic forward pass?
+                img = img_orig * mask + (1.0 - mask) * img
 
-            outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
-                                      quantize_denoised=quantize_denoised, temperature=temperature,
-                                      noise_dropout=noise_dropout, score_corrector=score_corrector,
-                                      corrector_kwargs=corrector_kwargs,
-                                      unconditional_guidance_scale=unconditional_guidance_scale,
-                                      unconditional_conditioning=unconditional_conditioning,
-                                      old_eps=old_eps, t_next=ts_next)
+            outs = self.p_sample_plms(
+                img,
+                cond,
+                ts,
+                index=index,
+                use_original_steps=ddim_use_original_steps,
+                quantize_denoised=quantize_denoised,
+                temperature=temperature,
+                noise_dropout=noise_dropout,
+                score_corrector=score_corrector,
+                corrector_kwargs=corrector_kwargs,
+                unconditional_guidance_scale=unconditional_guidance_scale,
+                unconditional_conditioning=unconditional_conditioning,
+                old_eps=old_eps,
+                t_next=ts_next,
+            )
             img, pred_x0, e_t = outs
             old_eps.append(e_t)
             if len(old_eps) >= 4:
                 old_eps.pop(0)
-            if callback: callback(i)
-            if img_callback: img_callback(pred_x0, i)
+            if callback:
+                callback(i)
+            if img_callback:
+                img_callback(pred_x0, i)
 
             if index % log_every_t == 0 or index == total_steps - 1:
                 intermediates['x_inter'].append(img)
@@ -172,47 +284,95 @@ class PLMSSampler(object):
         return img, intermediates
 
     @torch.no_grad()
-    def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
-                      temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
-                      unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
+    def p_sample_plms(
+        self,
+        x,
+        c,
+        t,
+        index,
+        repeat_noise=False,
+        use_original_steps=False,
+        quantize_denoised=False,
+        temperature=1.0,
+        noise_dropout=0.0,
+        score_corrector=None,
+        corrector_kwargs=None,
+        unconditional_guidance_scale=1.0,
+        unconditional_conditioning=None,
+        old_eps=None,
+        t_next=None,
+    ):
         b, *_, device = *x.shape, x.device
 
         def get_model_output(x, t):
-            if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
+            if (
+                unconditional_conditioning is None
+                or unconditional_guidance_scale == 1.0
+            ):
                 e_t = self.model.apply_model(x, t, c)
             else:
                 x_in = torch.cat([x] * 2)
                 t_in = torch.cat([t] * 2)
                 c_in = torch.cat([unconditional_conditioning, c])
-                e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
-                e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
+                e_t_uncond, e_t = self.model.apply_model(
+                    x_in, t_in, c_in
+                ).chunk(2)
+                e_t = e_t_uncond + unconditional_guidance_scale * (
+                    e_t - e_t_uncond
+                )
 
             if score_corrector is not None:
-                assert self.model.parameterization == "eps"
-                e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
+                assert self.model.parameterization == 'eps'
+                e_t = score_corrector.modify_score(
+                    self.model, e_t, x, t, c, **corrector_kwargs
+                )
 
             return e_t
 
-        alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
-        alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
-        sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
-        sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
+        alphas = (
+            self.model.alphas_cumprod
+            if use_original_steps
+            else self.ddim_alphas
+        )
+        alphas_prev = (
+            self.model.alphas_cumprod_prev
+            if use_original_steps
+            else self.ddim_alphas_prev
+        )
+        sqrt_one_minus_alphas = (
+            self.model.sqrt_one_minus_alphas_cumprod
+            if use_original_steps
+            else self.ddim_sqrt_one_minus_alphas
+        )
+        sigmas = (
+            self.model.ddim_sigmas_for_original_num_steps
+            if use_original_steps
+            else self.ddim_sigmas
+        )
 
         def get_x_prev_and_pred_x0(e_t, index):
             # select parameters corresponding to the currently considered timestep
             a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
-            a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
+            a_prev = torch.full(
+                (b, 1, 1, 1), alphas_prev[index], device=device
+            )
             sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
-            sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
+            sqrt_one_minus_at = torch.full(
+                (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
+            )
 
             # current prediction for x_0
             pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
             if quantize_denoised:
                 pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
             # direction pointing to x_t
-            dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
-            noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
-            if noise_dropout > 0.:
+            dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t
+            noise = (
+                sigma_t
+                * noise_like(x.shape, device, repeat_noise)
+                * temperature
+            )
+            if noise_dropout > 0.0:
                 noise = torch.nn.functional.dropout(noise, p=noise_dropout)
             x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
             return x_prev, pred_x0
@@ -231,7 +391,12 @@ class PLMSSampler(object):
             e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
         elif len(old_eps) >= 3:
             # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
-            e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
+            e_t_prime = (
+                55 * e_t
+                - 59 * old_eps[-1]
+                + 37 * old_eps[-2]
+                - 9 * old_eps[-3]
+            ) / 24
 
         x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
 
diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py
index f4eff39ccb..960a112001 100644
--- a/ldm/modules/attention.py
+++ b/ldm/modules/attention.py
@@ -13,7 +13,7 @@ def exists(val):
 
 
 def uniq(arr):
-    return{el: True for el in arr}.keys()
+    return {el: True for el in arr}.keys()
 
 
 def default(val, d):
@@ -45,19 +45,18 @@ class GEGLU(nn.Module):
 
 
 class FeedForward(nn.Module):
-    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
+    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
         super().__init__()
         inner_dim = int(dim * mult)
         dim_out = default(dim_out, dim)
-        project_in = nn.Sequential(
-            nn.Linear(dim, inner_dim),
-            nn.GELU()
-        ) if not glu else GEGLU(dim, inner_dim)
+        project_in = (
+            nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
+            if not glu
+            else GEGLU(dim, inner_dim)
+        )
 
         self.net = nn.Sequential(
-            project_in,
-            nn.Dropout(dropout),
-            nn.Linear(inner_dim, dim_out)
+            project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
         )
 
     def forward(self, x):
@@ -74,7 +73,9 @@ def zero_module(module):
 
 
 def Normalize(in_channels):
-    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
+    return torch.nn.GroupNorm(
+        num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
+    )
 
 
 class LinearAttention(nn.Module):
@@ -82,17 +83,28 @@ class LinearAttention(nn.Module):
         super().__init__()
         self.heads = heads
         hidden_dim = dim_head * heads
-        self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
+        self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
         self.to_out = nn.Conv2d(hidden_dim, dim, 1)
 
     def forward(self, x):
         b, c, h, w = x.shape
         qkv = self.to_qkv(x)
-        q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
-        k = k.softmax(dim=-1)  
+        q, k, v = rearrange(
+            qkv,
+            'b (qkv heads c) h w -> qkv b heads c (h w)',
+            heads=self.heads,
+            qkv=3,
+        )
+        k = k.softmax(dim=-1)
         context = torch.einsum('bhdn,bhen->bhde', k, v)
         out = torch.einsum('bhde,bhdn->bhen', context, q)
-        out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
+        out = rearrange(
+            out,
+            'b heads c (h w) -> b (heads c) h w',
+            heads=self.heads,
+            h=h,
+            w=w,
+        )
         return self.to_out(out)
 
 
@@ -102,26 +114,18 @@ class SpatialSelfAttention(nn.Module):
         self.in_channels = in_channels
 
         self.norm = Normalize(in_channels)
-        self.q = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.k = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.v = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.proj_out = torch.nn.Conv2d(in_channels,
-                                        in_channels,
-                                        kernel_size=1,
-                                        stride=1,
-                                        padding=0)
+        self.q = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.k = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.v = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.proj_out = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
 
     def forward(self, x):
         h_ = x
@@ -131,12 +135,12 @@ class SpatialSelfAttention(nn.Module):
         v = self.v(h_)
 
         # compute attention
-        b,c,h,w = q.shape
+        b, c, h, w = q.shape
         q = rearrange(q, 'b c h w -> b (h w) c')
         k = rearrange(k, 'b c h w -> b c (h w)')
         w_ = torch.einsum('bij,bjk->bik', q, k)
 
-        w_ = w_ * (int(c)**(-0.5))
+        w_ = w_ * (int(c) ** (-0.5))
         w_ = torch.nn.functional.softmax(w_, dim=2)
 
         # attend to values
@@ -146,16 +150,18 @@ class SpatialSelfAttention(nn.Module):
         h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
         h_ = self.proj_out(h_)
 
-        return x+h_
+        return x + h_
 
 
 class CrossAttention(nn.Module):
-    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
+    def __init__(
+        self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0
+    ):
         super().__init__()
         inner_dim = dim_head * heads
         context_dim = default(context_dim, query_dim)
 
-        self.scale = dim_head ** -0.5
+        self.scale = dim_head**-0.5
         self.heads = heads
 
         self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
@@ -163,8 +169,7 @@ class CrossAttention(nn.Module):
         self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
 
         self.to_out = nn.Sequential(
-            nn.Linear(inner_dim, query_dim),
-            nn.Dropout(dropout)
+            nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
         )
 
     def forward(self, x, context=None, mask=None):
@@ -175,7 +180,9 @@ class CrossAttention(nn.Module):
         k = self.to_k(context)
         v = self.to_v(context)
 
-        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+        q, k, v = map(
+            lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)
+        )
 
         sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
 
@@ -194,19 +201,37 @@ class CrossAttention(nn.Module):
 
 
 class BasicTransformerBlock(nn.Module):
-    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
+    def __init__(
+        self,
+        dim,
+        n_heads,
+        d_head,
+        dropout=0.0,
+        context_dim=None,
+        gated_ff=True,
+        checkpoint=True,
+    ):
         super().__init__()
-        self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout)  # is a self-attention
+        self.attn1 = CrossAttention(
+            query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout
+        )  # is a self-attention
         self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
-        self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
-                                    heads=n_heads, dim_head=d_head, dropout=dropout)  # is self-attn if context is none
+        self.attn2 = CrossAttention(
+            query_dim=dim,
+            context_dim=context_dim,
+            heads=n_heads,
+            dim_head=d_head,
+            dropout=dropout,
+        )  # is self-attn if context is none
         self.norm1 = nn.LayerNorm(dim)
         self.norm2 = nn.LayerNorm(dim)
         self.norm3 = nn.LayerNorm(dim)
         self.checkpoint = checkpoint
 
     def forward(self, x, context=None):
-        return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
+        return checkpoint(
+            self._forward, (x, context), self.parameters(), self.checkpoint
+        )
 
     def _forward(self, x, context=None):
         x = self.attn1(self.norm1(x)) + x
@@ -223,29 +248,43 @@ class SpatialTransformer(nn.Module):
     Then apply standard transformer action.
     Finally, reshape to image
     """
-    def __init__(self, in_channels, n_heads, d_head,
-                 depth=1, dropout=0., context_dim=None):
+
+    def __init__(
+        self,
+        in_channels,
+        n_heads,
+        d_head,
+        depth=1,
+        dropout=0.0,
+        context_dim=None,
+    ):
         super().__init__()
         self.in_channels = in_channels
         inner_dim = n_heads * d_head
         self.norm = Normalize(in_channels)
 
-        self.proj_in = nn.Conv2d(in_channels,
-                                 inner_dim,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-
-        self.transformer_blocks = nn.ModuleList(
-            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
-                for d in range(depth)]
+        self.proj_in = nn.Conv2d(
+            in_channels, inner_dim, kernel_size=1, stride=1, padding=0
         )
 
-        self.proj_out = zero_module(nn.Conv2d(inner_dim,
-                                              in_channels,
-                                              kernel_size=1,
-                                              stride=1,
-                                              padding=0))
+        self.transformer_blocks = nn.ModuleList(
+            [
+                BasicTransformerBlock(
+                    inner_dim,
+                    n_heads,
+                    d_head,
+                    dropout=dropout,
+                    context_dim=context_dim,
+                )
+                for d in range(depth)
+            ]
+        )
+
+        self.proj_out = zero_module(
+            nn.Conv2d(
+                inner_dim, in_channels, kernel_size=1, stride=1, padding=0
+            )
+        )
 
     def forward(self, x, context=None):
         # note: if no context is given, cross-attention defaults to self-attention
@@ -258,4 +297,4 @@ class SpatialTransformer(nn.Module):
             x = block(x, context=context)
         x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
         x = self.proj_out(x)
-        return x + x_in
\ No newline at end of file
+        return x + x_in
diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py
index 533e589a20..cd79e37565 100644
--- a/ldm/modules/diffusionmodules/model.py
+++ b/ldm/modules/diffusionmodules/model.py
@@ -26,17 +26,19 @@ def get_timestep_embedding(timesteps, embedding_dim):
     emb = timesteps.float()[:, None] * emb[None, :]
     emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
     if embedding_dim % 2 == 1:  # zero pad
-        emb = torch.nn.functional.pad(emb, (0,1,0,0))
+        emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
     return emb
 
 
 def nonlinearity(x):
     # swish
-    return x*torch.sigmoid(x)
+    return x * torch.sigmoid(x)
 
 
 def Normalize(in_channels, num_groups=32):
-    return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
+    return torch.nn.GroupNorm(
+        num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True
+    )
 
 
 class Upsample(nn.Module):
@@ -44,14 +46,14 @@ class Upsample(nn.Module):
         super().__init__()
         self.with_conv = with_conv
         if self.with_conv:
-            self.conv = torch.nn.Conv2d(in_channels,
-                                        in_channels,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+            self.conv = torch.nn.Conv2d(
+                in_channels, in_channels, kernel_size=3, stride=1, padding=1
+            )
 
     def forward(self, x):
-        x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
+        x = torch.nn.functional.interpolate(
+            x, scale_factor=2.0, mode='nearest'
+        )
         if self.with_conv:
             x = self.conv(x)
         return x
@@ -63,16 +65,14 @@ class Downsample(nn.Module):
         self.with_conv = with_conv
         if self.with_conv:
             # no asymmetric padding in torch conv, must do it ourselves
-            self.conv = torch.nn.Conv2d(in_channels,
-                                        in_channels,
-                                        kernel_size=3,
-                                        stride=2,
-                                        padding=0)
+            self.conv = torch.nn.Conv2d(
+                in_channels, in_channels, kernel_size=3, stride=2, padding=0
+            )
 
     def forward(self, x):
         if self.with_conv:
-            pad = (0,1,0,1)
-            x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
+            pad = (0, 1, 0, 1)
+            x = torch.nn.functional.pad(x, pad, mode='constant', value=0)
             x = self.conv(x)
         else:
             x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
@@ -80,8 +80,15 @@ class Downsample(nn.Module):
 
 
 class ResnetBlock(nn.Module):
-    def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
-                 dropout, temb_channels=512):
+    def __init__(
+        self,
+        *,
+        in_channels,
+        out_channels=None,
+        conv_shortcut=False,
+        dropout,
+        temb_channels=512,
+    ):
         super().__init__()
         self.in_channels = in_channels
         out_channels = in_channels if out_channels is None else out_channels
@@ -89,34 +96,33 @@ class ResnetBlock(nn.Module):
         self.use_conv_shortcut = conv_shortcut
 
         self.norm1 = Normalize(in_channels)
-        self.conv1 = torch.nn.Conv2d(in_channels,
-                                     out_channels,
-                                     kernel_size=3,
-                                     stride=1,
-                                     padding=1)
+        self.conv1 = torch.nn.Conv2d(
+            in_channels, out_channels, kernel_size=3, stride=1, padding=1
+        )
         if temb_channels > 0:
-            self.temb_proj = torch.nn.Linear(temb_channels,
-                                             out_channels)
+            self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
         self.norm2 = Normalize(out_channels)
         self.dropout = torch.nn.Dropout(dropout)
-        self.conv2 = torch.nn.Conv2d(out_channels,
-                                     out_channels,
-                                     kernel_size=3,
-                                     stride=1,
-                                     padding=1)
+        self.conv2 = torch.nn.Conv2d(
+            out_channels, out_channels, kernel_size=3, stride=1, padding=1
+        )
         if self.in_channels != self.out_channels:
             if self.use_conv_shortcut:
-                self.conv_shortcut = torch.nn.Conv2d(in_channels,
-                                                     out_channels,
-                                                     kernel_size=3,
-                                                     stride=1,
-                                                     padding=1)
+                self.conv_shortcut = torch.nn.Conv2d(
+                    in_channels,
+                    out_channels,
+                    kernel_size=3,
+                    stride=1,
+                    padding=1,
+                )
             else:
-                self.nin_shortcut = torch.nn.Conv2d(in_channels,
-                                                    out_channels,
-                                                    kernel_size=1,
-                                                    stride=1,
-                                                    padding=0)
+                self.nin_shortcut = torch.nn.Conv2d(
+                    in_channels,
+                    out_channels,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                )
 
     def forward(self, x, temb):
         h = x
@@ -125,7 +131,7 @@ class ResnetBlock(nn.Module):
         h = self.conv1(h)
 
         if temb is not None:
-            h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
+            h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
 
         h = self.norm2(h)
         h = nonlinearity(h)
@@ -138,11 +144,12 @@ class ResnetBlock(nn.Module):
             else:
                 x = self.nin_shortcut(x)
 
-        return x+h
+        return x + h
 
 
 class LinAttnBlock(LinearAttention):
     """to match AttnBlock usage"""
+
     def __init__(self, in_channels):
         super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
 
@@ -153,27 +160,18 @@ class AttnBlock(nn.Module):
         self.in_channels = in_channels
 
         self.norm = Normalize(in_channels)
-        self.q = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.k = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.v = torch.nn.Conv2d(in_channels,
-                                 in_channels,
-                                 kernel_size=1,
-                                 stride=1,
-                                 padding=0)
-        self.proj_out = torch.nn.Conv2d(in_channels,
-                                        in_channels,
-                                        kernel_size=1,
-                                        stride=1,
-                                        padding=0)
-
+        self.q = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.k = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.v = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
+        self.proj_out = torch.nn.Conv2d(
+            in_channels, in_channels, kernel_size=1, stride=1, padding=0
+        )
 
     def forward(self, x):
         h_ = x
@@ -183,44 +181,66 @@ class AttnBlock(nn.Module):
         v = self.v(h_)
 
         # compute attention
-        b,c,h,w = q.shape
-        q = q.reshape(b,c,h*w)
-        q = q.permute(0,2,1)   # b,hw,c
-        k = k.reshape(b,c,h*w) # b,c,hw
-        w_ = torch.bmm(q,k)     # b,hw,hw    w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
-        w_ = w_ * (int(c)**(-0.5))
+        b, c, h, w = q.shape
+        q = q.reshape(b, c, h * w)
+        q = q.permute(0, 2, 1)   # b,hw,c
+        k = k.reshape(b, c, h * w)   # b,c,hw
+        w_ = torch.bmm(q, k)     # b,hw,hw    w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
+        w_ = w_ * (int(c) ** (-0.5))
         w_ = torch.nn.functional.softmax(w_, dim=2)
 
         # attend to values
-        v = v.reshape(b,c,h*w)
-        w_ = w_.permute(0,2,1)   # b,hw,hw (first hw of k, second of q)
-        h_ = torch.bmm(v,w_)     # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
-        h_ = h_.reshape(b,c,h,w)
+        v = v.reshape(b, c, h * w)
+        w_ = w_.permute(0, 2, 1)   # b,hw,hw (first hw of k, second of q)
+        h_ = torch.bmm(
+            v, w_
+        )     # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
+        h_ = h_.reshape(b, c, h, w)
 
         h_ = self.proj_out(h_)
 
-        return x+h_
+        return x + h_
 
 
-def make_attn(in_channels, attn_type="vanilla"):
-    assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
-    print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
-    if attn_type == "vanilla":
+def make_attn(in_channels, attn_type='vanilla'):
+    assert attn_type in [
+        'vanilla',
+        'linear',
+        'none',
+    ], f'attn_type {attn_type} unknown'
+    print(
+        f"making attention of type '{attn_type}' with {in_channels} in_channels"
+    )
+    if attn_type == 'vanilla':
         return AttnBlock(in_channels)
-    elif attn_type == "none":
+    elif attn_type == 'none':
         return nn.Identity(in_channels)
     else:
         return LinAttnBlock(in_channels)
 
 
 class Model(nn.Module):
-    def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
-                 attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
-                 resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
+    def __init__(
+        self,
+        *,
+        ch,
+        out_ch,
+        ch_mult=(1, 2, 4, 8),
+        num_res_blocks,
+        attn_resolutions,
+        dropout=0.0,
+        resamp_with_conv=True,
+        in_channels,
+        resolution,
+        use_timestep=True,
+        use_linear_attn=False,
+        attn_type='vanilla',
+    ):
         super().__init__()
-        if use_linear_attn: attn_type = "linear"
+        if use_linear_attn:
+            attn_type = 'linear'
         self.ch = ch
-        self.temb_ch = self.ch*4
+        self.temb_ch = self.ch * 4
         self.num_resolutions = len(ch_mult)
         self.num_res_blocks = num_res_blocks
         self.resolution = resolution
@@ -230,70 +250,80 @@ class Model(nn.Module):
         if self.use_timestep:
             # timestep embedding
             self.temb = nn.Module()
-            self.temb.dense = nn.ModuleList([
-                torch.nn.Linear(self.ch,
-                                self.temb_ch),
-                torch.nn.Linear(self.temb_ch,
-                                self.temb_ch),
-            ])
+            self.temb.dense = nn.ModuleList(
+                [
+                    torch.nn.Linear(self.ch, self.temb_ch),
+                    torch.nn.Linear(self.temb_ch, self.temb_ch),
+                ]
+            )
 
         # downsampling
-        self.conv_in = torch.nn.Conv2d(in_channels,
-                                       self.ch,
-                                       kernel_size=3,
-                                       stride=1,
-                                       padding=1)
+        self.conv_in = torch.nn.Conv2d(
+            in_channels, self.ch, kernel_size=3, stride=1, padding=1
+        )
 
         curr_res = resolution
-        in_ch_mult = (1,)+tuple(ch_mult)
+        in_ch_mult = (1,) + tuple(ch_mult)
         self.down = nn.ModuleList()
         for i_level in range(self.num_resolutions):
             block = nn.ModuleList()
             attn = nn.ModuleList()
-            block_in = ch*in_ch_mult[i_level]
-            block_out = ch*ch_mult[i_level]
+            block_in = ch * in_ch_mult[i_level]
+            block_out = ch * ch_mult[i_level]
             for i_block in range(self.num_res_blocks):
-                block.append(ResnetBlock(in_channels=block_in,
-                                         out_channels=block_out,
-                                         temb_channels=self.temb_ch,
-                                         dropout=dropout))
+                block.append(
+                    ResnetBlock(
+                        in_channels=block_in,
+                        out_channels=block_out,
+                        temb_channels=self.temb_ch,
+                        dropout=dropout,
+                    )
+                )
                 block_in = block_out
                 if curr_res in attn_resolutions:
                     attn.append(make_attn(block_in, attn_type=attn_type))
             down = nn.Module()
             down.block = block
             down.attn = attn
-            if i_level != self.num_resolutions-1:
+            if i_level != self.num_resolutions - 1:
                 down.downsample = Downsample(block_in, resamp_with_conv)
                 curr_res = curr_res // 2
             self.down.append(down)
 
         # middle
         self.mid = nn.Module()
-        self.mid.block_1 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_1 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
         self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
-        self.mid.block_2 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_2 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
 
         # upsampling
         self.up = nn.ModuleList()
         for i_level in reversed(range(self.num_resolutions)):
             block = nn.ModuleList()
             attn = nn.ModuleList()
-            block_out = ch*ch_mult[i_level]
-            skip_in = ch*ch_mult[i_level]
-            for i_block in range(self.num_res_blocks+1):
+            block_out = ch * ch_mult[i_level]
+            skip_in = ch * ch_mult[i_level]
+            for i_block in range(self.num_res_blocks + 1):
                 if i_block == self.num_res_blocks:
-                    skip_in = ch*in_ch_mult[i_level]
-                block.append(ResnetBlock(in_channels=block_in+skip_in,
-                                         out_channels=block_out,
-                                         temb_channels=self.temb_ch,
-                                         dropout=dropout))
+                    skip_in = ch * in_ch_mult[i_level]
+                block.append(
+                    ResnetBlock(
+                        in_channels=block_in + skip_in,
+                        out_channels=block_out,
+                        temb_channels=self.temb_ch,
+                        dropout=dropout,
+                    )
+                )
                 block_in = block_out
                 if curr_res in attn_resolutions:
                     attn.append(make_attn(block_in, attn_type=attn_type))
@@ -303,18 +333,16 @@ class Model(nn.Module):
             if i_level != 0:
                 up.upsample = Upsample(block_in, resamp_with_conv)
                 curr_res = curr_res * 2
-            self.up.insert(0, up) # prepend to get consistent order
+            self.up.insert(0, up)   # prepend to get consistent order
 
         # end
         self.norm_out = Normalize(block_in)
-        self.conv_out = torch.nn.Conv2d(block_in,
-                                        out_ch,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+        self.conv_out = torch.nn.Conv2d(
+            block_in, out_ch, kernel_size=3, stride=1, padding=1
+        )
 
     def forward(self, x, t=None, context=None):
-        #assert x.shape[2] == x.shape[3] == self.resolution
+        # assert x.shape[2] == x.shape[3] == self.resolution
         if context is not None:
             # assume aligned context, cat along channel axis
             x = torch.cat((x, context), dim=1)
@@ -336,7 +364,7 @@ class Model(nn.Module):
                 if len(self.down[i_level].attn) > 0:
                     h = self.down[i_level].attn[i_block](h)
                 hs.append(h)
-            if i_level != self.num_resolutions-1:
+            if i_level != self.num_resolutions - 1:
                 hs.append(self.down[i_level].downsample(hs[-1]))
 
         # middle
@@ -347,9 +375,10 @@ class Model(nn.Module):
 
         # upsampling
         for i_level in reversed(range(self.num_resolutions)):
-            for i_block in range(self.num_res_blocks+1):
+            for i_block in range(self.num_res_blocks + 1):
                 h = self.up[i_level].block[i_block](
-                    torch.cat([h, hs.pop()], dim=1), temb)
+                    torch.cat([h, hs.pop()], dim=1), temb
+                )
                 if len(self.up[i_level].attn) > 0:
                     h = self.up[i_level].attn[i_block](h)
             if i_level != 0:
@@ -366,12 +395,27 @@ class Model(nn.Module):
 
 
 class Encoder(nn.Module):
-    def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
-                 attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
-                 resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
-                 **ignore_kwargs):
+    def __init__(
+        self,
+        *,
+        ch,
+        out_ch,
+        ch_mult=(1, 2, 4, 8),
+        num_res_blocks,
+        attn_resolutions,
+        dropout=0.0,
+        resamp_with_conv=True,
+        in_channels,
+        resolution,
+        z_channels,
+        double_z=True,
+        use_linear_attn=False,
+        attn_type='vanilla',
+        **ignore_kwargs,
+    ):
         super().__init__()
-        if use_linear_attn: attn_type = "linear"
+        if use_linear_attn:
+            attn_type = 'linear'
         self.ch = ch
         self.temb_ch = 0
         self.num_resolutions = len(ch_mult)
@@ -380,56 +424,64 @@ class Encoder(nn.Module):
         self.in_channels = in_channels
 
         # downsampling
-        self.conv_in = torch.nn.Conv2d(in_channels,
-                                       self.ch,
-                                       kernel_size=3,
-                                       stride=1,
-                                       padding=1)
+        self.conv_in = torch.nn.Conv2d(
+            in_channels, self.ch, kernel_size=3, stride=1, padding=1
+        )
 
         curr_res = resolution
-        in_ch_mult = (1,)+tuple(ch_mult)
+        in_ch_mult = (1,) + tuple(ch_mult)
         self.in_ch_mult = in_ch_mult
         self.down = nn.ModuleList()
         for i_level in range(self.num_resolutions):
             block = nn.ModuleList()
             attn = nn.ModuleList()
-            block_in = ch*in_ch_mult[i_level]
-            block_out = ch*ch_mult[i_level]
+            block_in = ch * in_ch_mult[i_level]
+            block_out = ch * ch_mult[i_level]
             for i_block in range(self.num_res_blocks):
-                block.append(ResnetBlock(in_channels=block_in,
-                                         out_channels=block_out,
-                                         temb_channels=self.temb_ch,
-                                         dropout=dropout))
+                block.append(
+                    ResnetBlock(
+                        in_channels=block_in,
+                        out_channels=block_out,
+                        temb_channels=self.temb_ch,
+                        dropout=dropout,
+                    )
+                )
                 block_in = block_out
                 if curr_res in attn_resolutions:
                     attn.append(make_attn(block_in, attn_type=attn_type))
             down = nn.Module()
             down.block = block
             down.attn = attn
-            if i_level != self.num_resolutions-1:
+            if i_level != self.num_resolutions - 1:
                 down.downsample = Downsample(block_in, resamp_with_conv)
                 curr_res = curr_res // 2
             self.down.append(down)
 
         # middle
         self.mid = nn.Module()
-        self.mid.block_1 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_1 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
         self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
-        self.mid.block_2 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_2 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
 
         # end
         self.norm_out = Normalize(block_in)
-        self.conv_out = torch.nn.Conv2d(block_in,
-                                        2*z_channels if double_z else z_channels,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+        self.conv_out = torch.nn.Conv2d(
+            block_in,
+            2 * z_channels if double_z else z_channels,
+            kernel_size=3,
+            stride=1,
+            padding=1,
+        )
 
     def forward(self, x):
         # timestep embedding
@@ -443,7 +495,7 @@ class Encoder(nn.Module):
                 if len(self.down[i_level].attn) > 0:
                     h = self.down[i_level].attn[i_block](h)
                 hs.append(h)
-            if i_level != self.num_resolutions-1:
+            if i_level != self.num_resolutions - 1:
                 hs.append(self.down[i_level].downsample(hs[-1]))
 
         # middle
@@ -460,12 +512,28 @@ class Encoder(nn.Module):
 
 
 class Decoder(nn.Module):
-    def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
-                 attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
-                 resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
-                 attn_type="vanilla", **ignorekwargs):
+    def __init__(
+        self,
+        *,
+        ch,
+        out_ch,
+        ch_mult=(1, 2, 4, 8),
+        num_res_blocks,
+        attn_resolutions,
+        dropout=0.0,
+        resamp_with_conv=True,
+        in_channels,
+        resolution,
+        z_channels,
+        give_pre_end=False,
+        tanh_out=False,
+        use_linear_attn=False,
+        attn_type='vanilla',
+        **ignorekwargs,
+    ):
         super().__init__()
-        if use_linear_attn: attn_type = "linear"
+        if use_linear_attn:
+            attn_type = 'linear'
         self.ch = ch
         self.temb_ch = 0
         self.num_resolutions = len(ch_mult)
@@ -476,43 +544,52 @@ class Decoder(nn.Module):
         self.tanh_out = tanh_out
 
         # compute in_ch_mult, block_in and curr_res at lowest res
-        in_ch_mult = (1,)+tuple(ch_mult)
-        block_in = ch*ch_mult[self.num_resolutions-1]
-        curr_res = resolution // 2**(self.num_resolutions-1)
-        self.z_shape = (1,z_channels,curr_res,curr_res)
-        print("Working with z of shape {} = {} dimensions.".format(
-            self.z_shape, np.prod(self.z_shape)))
+        in_ch_mult = (1,) + tuple(ch_mult)
+        block_in = ch * ch_mult[self.num_resolutions - 1]
+        curr_res = resolution // 2 ** (self.num_resolutions - 1)
+        self.z_shape = (1, z_channels, curr_res, curr_res)
+        print(
+            'Working with z of shape {} = {} dimensions.'.format(
+                self.z_shape, np.prod(self.z_shape)
+            )
+        )
 
         # z to block_in
-        self.conv_in = torch.nn.Conv2d(z_channels,
-                                       block_in,
-                                       kernel_size=3,
-                                       stride=1,
-                                       padding=1)
+        self.conv_in = torch.nn.Conv2d(
+            z_channels, block_in, kernel_size=3, stride=1, padding=1
+        )
 
         # middle
         self.mid = nn.Module()
-        self.mid.block_1 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_1 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
         self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
-        self.mid.block_2 = ResnetBlock(in_channels=block_in,
-                                       out_channels=block_in,
-                                       temb_channels=self.temb_ch,
-                                       dropout=dropout)
+        self.mid.block_2 = ResnetBlock(
+            in_channels=block_in,
+            out_channels=block_in,
+            temb_channels=self.temb_ch,
+            dropout=dropout,
+        )
 
         # upsampling
         self.up = nn.ModuleList()
         for i_level in reversed(range(self.num_resolutions)):
             block = nn.ModuleList()
             attn = nn.ModuleList()
-            block_out = ch*ch_mult[i_level]
-            for i_block in range(self.num_res_blocks+1):
-                block.append(ResnetBlock(in_channels=block_in,
-                                         out_channels=block_out,
-                                         temb_channels=self.temb_ch,
-                                         dropout=dropout))
+            block_out = ch * ch_mult[i_level]
+            for i_block in range(self.num_res_blocks + 1):
+                block.append(
+                    ResnetBlock(
+                        in_channels=block_in,
+                        out_channels=block_out,
+                        temb_channels=self.temb_ch,
+                        dropout=dropout,
+                    )
+                )
                 block_in = block_out
                 if curr_res in attn_resolutions:
                     attn.append(make_attn(block_in, attn_type=attn_type))
@@ -522,18 +599,16 @@ class Decoder(nn.Module):
             if i_level != 0:
                 up.upsample = Upsample(block_in, resamp_with_conv)
                 curr_res = curr_res * 2
-            self.up.insert(0, up) # prepend to get consistent order
+            self.up.insert(0, up)   # prepend to get consistent order
 
         # end
         self.norm_out = Normalize(block_in)
-        self.conv_out = torch.nn.Conv2d(block_in,
-                                        out_ch,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+        self.conv_out = torch.nn.Conv2d(
+            block_in, out_ch, kernel_size=3, stride=1, padding=1
+        )
 
     def forward(self, z):
-        #assert z.shape[1:] == self.z_shape[1:]
+        # assert z.shape[1:] == self.z_shape[1:]
         self.last_z_shape = z.shape
 
         # timestep embedding
@@ -549,7 +624,7 @@ class Decoder(nn.Module):
 
         # upsampling
         for i_level in reversed(range(self.num_resolutions)):
-            for i_block in range(self.num_res_blocks+1):
+            for i_block in range(self.num_res_blocks + 1):
                 h = self.up[i_level].block[i_block](h, temb)
                 if len(self.up[i_level].attn) > 0:
                     h = self.up[i_level].attn[i_block](h)
@@ -571,29 +646,40 @@ class Decoder(nn.Module):
 class SimpleDecoder(nn.Module):
     def __init__(self, in_channels, out_channels, *args, **kwargs):
         super().__init__()
-        self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
-                                     ResnetBlock(in_channels=in_channels,
-                                                 out_channels=2 * in_channels,
-                                                 temb_channels=0, dropout=0.0),
-                                     ResnetBlock(in_channels=2 * in_channels,
-                                                out_channels=4 * in_channels,
-                                                temb_channels=0, dropout=0.0),
-                                     ResnetBlock(in_channels=4 * in_channels,
-                                                out_channels=2 * in_channels,
-                                                temb_channels=0, dropout=0.0),
-                                     nn.Conv2d(2*in_channels, in_channels, 1),
-                                     Upsample(in_channels, with_conv=True)])
+        self.model = nn.ModuleList(
+            [
+                nn.Conv2d(in_channels, in_channels, 1),
+                ResnetBlock(
+                    in_channels=in_channels,
+                    out_channels=2 * in_channels,
+                    temb_channels=0,
+                    dropout=0.0,
+                ),
+                ResnetBlock(
+                    in_channels=2 * in_channels,
+                    out_channels=4 * in_channels,
+                    temb_channels=0,
+                    dropout=0.0,
+                ),
+                ResnetBlock(
+                    in_channels=4 * in_channels,
+                    out_channels=2 * in_channels,
+                    temb_channels=0,
+                    dropout=0.0,
+                ),
+                nn.Conv2d(2 * in_channels, in_channels, 1),
+                Upsample(in_channels, with_conv=True),
+            ]
+        )
         # end
         self.norm_out = Normalize(in_channels)
-        self.conv_out = torch.nn.Conv2d(in_channels,
-                                        out_channels,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+        self.conv_out = torch.nn.Conv2d(
+            in_channels, out_channels, kernel_size=3, stride=1, padding=1
+        )
 
     def forward(self, x):
         for i, layer in enumerate(self.model):
-            if i in [1,2,3]:
+            if i in [1, 2, 3]:
                 x = layer(x, None)
             else:
                 x = layer(x)
@@ -605,8 +691,16 @@ class SimpleDecoder(nn.Module):
 
 
 class UpsampleDecoder(nn.Module):
-    def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
-                 ch_mult=(2,2), dropout=0.0):
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        ch,
+        num_res_blocks,
+        resolution,
+        ch_mult=(2, 2),
+        dropout=0.0,
+    ):
         super().__init__()
         # upsampling
         self.temb_ch = 0
@@ -620,10 +714,14 @@ class UpsampleDecoder(nn.Module):
             res_block = []
             block_out = ch * ch_mult[i_level]
             for i_block in range(self.num_res_blocks + 1):
-                res_block.append(ResnetBlock(in_channels=block_in,
-                                         out_channels=block_out,
-                                         temb_channels=self.temb_ch,
-                                         dropout=dropout))
+                res_block.append(
+                    ResnetBlock(
+                        in_channels=block_in,
+                        out_channels=block_out,
+                        temb_channels=self.temb_ch,
+                        dropout=dropout,
+                    )
+                )
                 block_in = block_out
             self.res_blocks.append(nn.ModuleList(res_block))
             if i_level != self.num_resolutions - 1:
@@ -632,11 +730,9 @@ class UpsampleDecoder(nn.Module):
 
         # end
         self.norm_out = Normalize(block_in)
-        self.conv_out = torch.nn.Conv2d(block_in,
-                                        out_channels,
-                                        kernel_size=3,
-                                        stride=1,
-                                        padding=1)
+        self.conv_out = torch.nn.Conv2d(
+            block_in, out_channels, kernel_size=3, stride=1, padding=1
+        )
 
     def forward(self, x):
         # upsampling
@@ -653,35 +749,56 @@ class UpsampleDecoder(nn.Module):
 
 
 class LatentRescaler(nn.Module):
-    def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
+    def __init__(
+        self, factor, in_channels, mid_channels, out_channels, depth=2
+    ):
         super().__init__()
         # residual block, interpolate, residual block
         self.factor = factor
-        self.conv_in = nn.Conv2d(in_channels,
-                                 mid_channels,
-                                 kernel_size=3,
-                                 stride=1,
-                                 padding=1)
-        self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
-                                                     out_channels=mid_channels,
-                                                     temb_channels=0,
-                                                     dropout=0.0) for _ in range(depth)])
+        self.conv_in = nn.Conv2d(
+            in_channels, mid_channels, kernel_size=3, stride=1, padding=1
+        )
+        self.res_block1 = nn.ModuleList(
+            [
+                ResnetBlock(
+                    in_channels=mid_channels,
+                    out_channels=mid_channels,
+                    temb_channels=0,
+                    dropout=0.0,
+                )
+                for _ in range(depth)
+            ]
+        )
         self.attn = AttnBlock(mid_channels)
-        self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
-                                                     out_channels=mid_channels,
-                                                     temb_channels=0,
-                                                     dropout=0.0) for _ in range(depth)])
+        self.res_block2 = nn.ModuleList(
+            [
+                ResnetBlock(
+                    in_channels=mid_channels,
+                    out_channels=mid_channels,
+                    temb_channels=0,
+                    dropout=0.0,
+                )
+                for _ in range(depth)
+            ]
+        )
 
-        self.conv_out = nn.Conv2d(mid_channels,
-                                  out_channels,
-                                  kernel_size=1,
-                                  )
+        self.conv_out = nn.Conv2d(
+            mid_channels,
+            out_channels,
+            kernel_size=1,
+        )
 
     def forward(self, x):
         x = self.conv_in(x)
         for block in self.res_block1:
             x = block(x, None)
-        x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
+        x = torch.nn.functional.interpolate(
+            x,
+            size=(
+                int(round(x.shape[2] * self.factor)),
+                int(round(x.shape[3] * self.factor)),
+            ),
+        )
         x = self.attn(x)
         for block in self.res_block2:
             x = block(x, None)
@@ -690,17 +807,42 @@ class LatentRescaler(nn.Module):
 
 
 class MergedRescaleEncoder(nn.Module):
-    def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
-                 attn_resolutions, dropout=0.0, resamp_with_conv=True,
-                 ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
+    def __init__(
+        self,
+        in_channels,
+        ch,
+        resolution,
+        out_ch,
+        num_res_blocks,
+        attn_resolutions,
+        dropout=0.0,
+        resamp_with_conv=True,
+        ch_mult=(1, 2, 4, 8),
+        rescale_factor=1.0,
+        rescale_module_depth=1,
+    ):
         super().__init__()
         intermediate_chn = ch * ch_mult[-1]
-        self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
-                               z_channels=intermediate_chn, double_z=False, resolution=resolution,
-                               attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
-                               out_ch=None)
-        self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
-                                       mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
+        self.encoder = Encoder(
+            in_channels=in_channels,
+            num_res_blocks=num_res_blocks,
+            ch=ch,
+            ch_mult=ch_mult,
+            z_channels=intermediate_chn,
+            double_z=False,
+            resolution=resolution,
+            attn_resolutions=attn_resolutions,
+            dropout=dropout,
+            resamp_with_conv=resamp_with_conv,
+            out_ch=None,
+        )
+        self.rescaler = LatentRescaler(
+            factor=rescale_factor,
+            in_channels=intermediate_chn,
+            mid_channels=intermediate_chn,
+            out_channels=out_ch,
+            depth=rescale_module_depth,
+        )
 
     def forward(self, x):
         x = self.encoder(x)
@@ -709,15 +851,41 @@ class MergedRescaleEncoder(nn.Module):
 
 
 class MergedRescaleDecoder(nn.Module):
-    def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
-                 dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
+    def __init__(
+        self,
+        z_channels,
+        out_ch,
+        resolution,
+        num_res_blocks,
+        attn_resolutions,
+        ch,
+        ch_mult=(1, 2, 4, 8),
+        dropout=0.0,
+        resamp_with_conv=True,
+        rescale_factor=1.0,
+        rescale_module_depth=1,
+    ):
         super().__init__()
-        tmp_chn = z_channels*ch_mult[-1]
-        self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
-                               resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
-                               ch_mult=ch_mult, resolution=resolution, ch=ch)
-        self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
-                                       out_channels=tmp_chn, depth=rescale_module_depth)
+        tmp_chn = z_channels * ch_mult[-1]
+        self.decoder = Decoder(
+            out_ch=out_ch,
+            z_channels=tmp_chn,
+            attn_resolutions=attn_resolutions,
+            dropout=dropout,
+            resamp_with_conv=resamp_with_conv,
+            in_channels=None,
+            num_res_blocks=num_res_blocks,
+            ch_mult=ch_mult,
+            resolution=resolution,
+            ch=ch,
+        )
+        self.rescaler = LatentRescaler(
+            factor=rescale_factor,
+            in_channels=z_channels,
+            mid_channels=tmp_chn,
+            out_channels=tmp_chn,
+            depth=rescale_module_depth,
+        )
 
     def forward(self, x):
         x = self.rescaler(x)
@@ -726,17 +894,32 @@ class MergedRescaleDecoder(nn.Module):
 
 
 class Upsampler(nn.Module):
-    def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
+    def __init__(
+        self, in_size, out_size, in_channels, out_channels, ch_mult=2
+    ):
         super().__init__()
         assert out_size >= in_size
-        num_blocks = int(np.log2(out_size//in_size))+1
-        factor_up = 1.+ (out_size % in_size)
-        print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
-        self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
-                                       out_channels=in_channels)
-        self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
-                               attn_resolutions=[], in_channels=None, ch=in_channels,
-                               ch_mult=[ch_mult for _ in range(num_blocks)])
+        num_blocks = int(np.log2(out_size // in_size)) + 1
+        factor_up = 1.0 + (out_size % in_size)
+        print(
+            f'Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}'
+        )
+        self.rescaler = LatentRescaler(
+            factor=factor_up,
+            in_channels=in_channels,
+            mid_channels=2 * in_channels,
+            out_channels=in_channels,
+        )
+        self.decoder = Decoder(
+            out_ch=out_channels,
+            resolution=out_size,
+            z_channels=in_channels,
+            num_res_blocks=2,
+            attn_resolutions=[],
+            in_channels=None,
+            ch=in_channels,
+            ch_mult=[ch_mult for _ in range(num_blocks)],
+        )
 
     def forward(self, x):
         x = self.rescaler(x)
@@ -745,42 +928,55 @@ class Upsampler(nn.Module):
 
 
 class Resize(nn.Module):
-    def __init__(self, in_channels=None, learned=False, mode="bilinear"):
+    def __init__(self, in_channels=None, learned=False, mode='bilinear'):
         super().__init__()
         self.with_conv = learned
         self.mode = mode
         if self.with_conv:
-            print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
+            print(
+                f'Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode'
+            )
             raise NotImplementedError()
             assert in_channels is not None
             # no asymmetric padding in torch conv, must do it ourselves
-            self.conv = torch.nn.Conv2d(in_channels,
-                                        in_channels,
-                                        kernel_size=4,
-                                        stride=2,
-                                        padding=1)
+            self.conv = torch.nn.Conv2d(
+                in_channels, in_channels, kernel_size=4, stride=2, padding=1
+            )
 
     def forward(self, x, scale_factor=1.0):
-        if scale_factor==1.0:
+        if scale_factor == 1.0:
             return x
         else:
-            x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
+            x = torch.nn.functional.interpolate(
+                x,
+                mode=self.mode,
+                align_corners=False,
+                scale_factor=scale_factor,
+            )
         return x
 
-class FirstStagePostProcessor(nn.Module):
 
-    def __init__(self, ch_mult:list, in_channels,
-                 pretrained_model:nn.Module=None,
-                 reshape=False,
-                 n_channels=None,
-                 dropout=0.,
-                 pretrained_config=None):
+class FirstStagePostProcessor(nn.Module):
+    def __init__(
+        self,
+        ch_mult: list,
+        in_channels,
+        pretrained_model: nn.Module = None,
+        reshape=False,
+        n_channels=None,
+        dropout=0.0,
+        pretrained_config=None,
+    ):
         super().__init__()
         if pretrained_config is None:
-            assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
+            assert (
+                pretrained_model is not None
+            ), 'Either "pretrained_model" or "pretrained_config" must not be None'
             self.pretrained_model = pretrained_model
         else:
-            assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
+            assert (
+                pretrained_config is not None
+            ), 'Either "pretrained_model" or "pretrained_config" must not be None'
             self.instantiate_pretrained(pretrained_config)
 
         self.do_reshape = reshape
@@ -788,22 +984,28 @@ class FirstStagePostProcessor(nn.Module):
         if n_channels is None:
             n_channels = self.pretrained_model.encoder.ch
 
-        self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
-        self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
-                            stride=1,padding=1)
+        self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2)
+        self.proj = nn.Conv2d(
+            in_channels, n_channels, kernel_size=3, stride=1, padding=1
+        )
 
         blocks = []
         downs = []
         ch_in = n_channels
         for m in ch_mult:
-            blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
+            blocks.append(
+                ResnetBlock(
+                    in_channels=ch_in,
+                    out_channels=m * n_channels,
+                    dropout=dropout,
+                )
+            )
             ch_in = m * n_channels
             downs.append(Downsample(ch_in, with_conv=False))
 
         self.model = nn.ModuleList(blocks)
         self.downsampler = nn.ModuleList(downs)
 
-
     def instantiate_pretrained(self, config):
         model = instantiate_from_config(config)
         self.pretrained_model = model.eval()
@@ -811,25 +1013,23 @@ class FirstStagePostProcessor(nn.Module):
         for param in self.pretrained_model.parameters():
             param.requires_grad = False
 
-
     @torch.no_grad()
-    def encode_with_pretrained(self,x):
+    def encode_with_pretrained(self, x):
         c = self.pretrained_model.encode(x)
         if isinstance(c, DiagonalGaussianDistribution):
             c = c.mode()
-        return  c
+        return c
 
-    def forward(self,x):
+    def forward(self, x):
         z_fs = self.encode_with_pretrained(x)
         z = self.proj_norm(z_fs)
         z = self.proj(z)
         z = nonlinearity(z)
 
-        for submodel, downmodel in zip(self.model,self.downsampler):
-            z = submodel(z,temb=None)
+        for submodel, downmodel in zip(self.model, self.downsampler):
+            z = submodel(z, temb=None)
             z = downmodel(z)
 
         if self.do_reshape:
-            z = rearrange(z,'b c h w -> b (h w) c')
+            z = rearrange(z, 'b c h w -> b (h w) c')
         return z
-
diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py
index fcf95d1ea8..d6baa76a1c 100644
--- a/ldm/modules/diffusionmodules/openaimodel.py
+++ b/ldm/modules/diffusionmodules/openaimodel.py
@@ -24,6 +24,7 @@ from ldm.modules.attention import SpatialTransformer
 def convert_module_to_f16(x):
     pass
 
+
 def convert_module_to_f32(x):
     pass
 
@@ -42,7 +43,9 @@ class AttentionPool2d(nn.Module):
         output_dim: int = None,
     ):
         super().__init__()
-        self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
+        self.positional_embedding = nn.Parameter(
+            th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
+        )
         self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
         self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
         self.num_heads = embed_dim // num_heads_channels
@@ -97,37 +100,45 @@ class Upsample(nn.Module):
                  upsampling occurs in the inner-two dimensions.
     """
 
-    def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
+    def __init__(
+        self, channels, use_conv, dims=2, out_channels=None, padding=1
+    ):
         super().__init__()
         self.channels = channels
         self.out_channels = out_channels or channels
         self.use_conv = use_conv
         self.dims = dims
         if use_conv:
-            self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
+            self.conv = conv_nd(
+                dims, self.channels, self.out_channels, 3, padding=padding
+            )
 
     def forward(self, x):
         assert x.shape[1] == self.channels
         if self.dims == 3:
             x = F.interpolate(
-                x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
+                x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest'
             )
         else:
-            x = F.interpolate(x, scale_factor=2, mode="nearest")
+            x = F.interpolate(x, scale_factor=2, mode='nearest')
         if self.use_conv:
             x = self.conv(x)
         return x
 
+
 class TransposedUpsample(nn.Module):
-    'Learned 2x upsampling without padding'
+    """Learned 2x upsampling without padding"""
+
     def __init__(self, channels, out_channels=None, ks=5):
         super().__init__()
         self.channels = channels
         self.out_channels = out_channels or channels
 
-        self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
+        self.up = nn.ConvTranspose2d(
+            self.channels, self.out_channels, kernel_size=ks, stride=2
+        )
 
-    def forward(self,x):
+    def forward(self, x):
         return self.up(x)
 
 
@@ -140,7 +151,9 @@ class Downsample(nn.Module):
                  downsampling occurs in the inner-two dimensions.
     """
 
-    def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
+    def __init__(
+        self, channels, use_conv, dims=2, out_channels=None, padding=1
+    ):
         super().__init__()
         self.channels = channels
         self.out_channels = out_channels or channels
@@ -149,7 +162,12 @@ class Downsample(nn.Module):
         stride = 2 if dims != 3 else (1, 2, 2)
         if use_conv:
             self.op = conv_nd(
-                dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
+                dims,
+                self.channels,
+                self.out_channels,
+                3,
+                stride=stride,
+                padding=padding,
             )
         else:
             assert self.channels == self.out_channels
@@ -219,7 +237,9 @@ class ResBlock(TimestepBlock):
             nn.SiLU(),
             linear(
                 emb_channels,
-                2 * self.out_channels if use_scale_shift_norm else self.out_channels,
+                2 * self.out_channels
+                if use_scale_shift_norm
+                else self.out_channels,
             ),
         )
         self.out_layers = nn.Sequential(
@@ -227,7 +247,9 @@ class ResBlock(TimestepBlock):
             nn.SiLU(),
             nn.Dropout(p=dropout),
             zero_module(
-                conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
+                conv_nd(
+                    dims, self.out_channels, self.out_channels, 3, padding=1
+                )
             ),
         )
 
@@ -238,7 +260,9 @@ class ResBlock(TimestepBlock):
                 dims, channels, self.out_channels, 3, padding=1
             )
         else:
-            self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
+            self.skip_connection = conv_nd(
+                dims, channels, self.out_channels, 1
+            )
 
     def forward(self, x, emb):
         """
@@ -251,7 +275,6 @@ class ResBlock(TimestepBlock):
             self._forward, (x, emb), self.parameters(), self.use_checkpoint
         )
 
-
     def _forward(self, x, emb):
         if self.updown:
             in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
@@ -297,7 +320,7 @@ class AttentionBlock(nn.Module):
         else:
             assert (
                 channels % num_head_channels == 0
-            ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
+            ), f'q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}'
             self.num_heads = channels // num_head_channels
         self.use_checkpoint = use_checkpoint
         self.norm = normalization(channels)
@@ -312,8 +335,10 @@ class AttentionBlock(nn.Module):
         self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
 
     def forward(self, x):
-        return checkpoint(self._forward, (x,), self.parameters(), True)   # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
-        #return pt_checkpoint(self._forward, x)  # pytorch
+        return checkpoint(
+            self._forward, (x,), self.parameters(), True
+        )   # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
+        # return pt_checkpoint(self._forward, x)  # pytorch
 
     def _forward(self, x):
         b, c, *spatial = x.shape
@@ -340,7 +365,7 @@ def count_flops_attn(model, _x, y):
     # We perform two matmuls with the same number of ops.
     # The first computes the weight matrix, the second computes
     # the combination of the value vectors.
-    matmul_ops = 2 * b * (num_spatial ** 2) * c
+    matmul_ops = 2 * b * (num_spatial**2) * c
     model.total_ops += th.DoubleTensor([matmul_ops])
 
 
@@ -362,13 +387,15 @@ class QKVAttentionLegacy(nn.Module):
         bs, width, length = qkv.shape
         assert width % (3 * self.n_heads) == 0
         ch = width // (3 * self.n_heads)
-        q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
+        q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(
+            ch, dim=1
+        )
         scale = 1 / math.sqrt(math.sqrt(ch))
         weight = th.einsum(
-            "bct,bcs->bts", q * scale, k * scale
+            'bct,bcs->bts', q * scale, k * scale
         )  # More stable with f16 than dividing afterwards
         weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
-        a = th.einsum("bts,bcs->bct", weight, v)
+        a = th.einsum('bts,bcs->bct', weight, v)
         return a.reshape(bs, -1, length)
 
     @staticmethod
@@ -397,12 +424,14 @@ class QKVAttention(nn.Module):
         q, k, v = qkv.chunk(3, dim=1)
         scale = 1 / math.sqrt(math.sqrt(ch))
         weight = th.einsum(
-            "bct,bcs->bts",
+            'bct,bcs->bts',
             (q * scale).view(bs * self.n_heads, ch, length),
             (k * scale).view(bs * self.n_heads, ch, length),
         )  # More stable with f16 than dividing afterwards
         weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
-        a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
+        a = th.einsum(
+            'bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)
+        )
         return a.reshape(bs, -1, length)
 
     @staticmethod
@@ -461,19 +490,24 @@ class UNetModel(nn.Module):
         use_scale_shift_norm=False,
         resblock_updown=False,
         use_new_attention_order=False,
-        use_spatial_transformer=False,    # custom transformer support
-        transformer_depth=1,              # custom transformer support
-        context_dim=None,                 # custom transformer support
-        n_embed=None,                     # custom support for prediction of discrete ids into codebook of first stage vq model
+        use_spatial_transformer=False,  # custom transformer support
+        transformer_depth=1,  # custom transformer support
+        context_dim=None,  # custom transformer support
+        n_embed=None,  # custom support for prediction of discrete ids into codebook of first stage vq model
         legacy=True,
     ):
         super().__init__()
         if use_spatial_transformer:
-            assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
+            assert (
+                context_dim is not None
+            ), 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
 
         if context_dim is not None:
-            assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
+            assert (
+                use_spatial_transformer
+            ), 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
             from omegaconf.listconfig import ListConfig
+
             if type(context_dim) == ListConfig:
                 context_dim = list(context_dim)
 
@@ -481,10 +515,14 @@ class UNetModel(nn.Module):
             num_heads_upsample = num_heads
 
         if num_heads == -1:
-            assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
+            assert (
+                num_head_channels != -1
+            ), 'Either num_heads or num_head_channels has to be set'
 
         if num_head_channels == -1:
-            assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
+            assert (
+                num_heads != -1
+            ), 'Either num_heads or num_head_channels has to be set'
 
         self.image_size = image_size
         self.in_channels = in_channels
@@ -545,8 +583,12 @@ class UNetModel(nn.Module):
                         num_heads = ch // num_head_channels
                         dim_head = num_head_channels
                     if legacy:
-                        #num_heads = 1
-                        dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
+                        # num_heads = 1
+                        dim_head = (
+                            ch // num_heads
+                            if use_spatial_transformer
+                            else num_head_channels
+                        )
                     layers.append(
                         AttentionBlock(
                             ch,
@@ -554,8 +596,14 @@ class UNetModel(nn.Module):
                             num_heads=num_heads,
                             num_head_channels=dim_head,
                             use_new_attention_order=use_new_attention_order,
-                        ) if not use_spatial_transformer else SpatialTransformer(
-                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
+                        )
+                        if not use_spatial_transformer
+                        else SpatialTransformer(
+                            ch,
+                            num_heads,
+                            dim_head,
+                            depth=transformer_depth,
+                            context_dim=context_dim,
                         )
                     )
                 self.input_blocks.append(TimestepEmbedSequential(*layers))
@@ -592,8 +640,12 @@ class UNetModel(nn.Module):
             num_heads = ch // num_head_channels
             dim_head = num_head_channels
         if legacy:
-            #num_heads = 1
-            dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
+            # num_heads = 1
+            dim_head = (
+                ch // num_heads
+                if use_spatial_transformer
+                else num_head_channels
+            )
         self.middle_block = TimestepEmbedSequential(
             ResBlock(
                 ch,
@@ -609,9 +661,15 @@ class UNetModel(nn.Module):
                 num_heads=num_heads,
                 num_head_channels=dim_head,
                 use_new_attention_order=use_new_attention_order,
-            ) if not use_spatial_transformer else SpatialTransformer(
-                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
-                        ),
+            )
+            if not use_spatial_transformer
+            else SpatialTransformer(
+                ch,
+                num_heads,
+                dim_head,
+                depth=transformer_depth,
+                context_dim=context_dim,
+            ),
             ResBlock(
                 ch,
                 time_embed_dim,
@@ -646,8 +704,12 @@ class UNetModel(nn.Module):
                         num_heads = ch // num_head_channels
                         dim_head = num_head_channels
                     if legacy:
-                        #num_heads = 1
-                        dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
+                        # num_heads = 1
+                        dim_head = (
+                            ch // num_heads
+                            if use_spatial_transformer
+                            else num_head_channels
+                        )
                     layers.append(
                         AttentionBlock(
                             ch,
@@ -655,8 +717,14 @@ class UNetModel(nn.Module):
                             num_heads=num_heads_upsample,
                             num_head_channels=dim_head,
                             use_new_attention_order=use_new_attention_order,
-                        ) if not use_spatial_transformer else SpatialTransformer(
-                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
+                        )
+                        if not use_spatial_transformer
+                        else SpatialTransformer(
+                            ch,
+                            num_heads,
+                            dim_head,
+                            depth=transformer_depth,
+                            context_dim=context_dim,
                         )
                     )
                 if level and i == num_res_blocks:
@@ -673,7 +741,9 @@ class UNetModel(nn.Module):
                             up=True,
                         )
                         if resblock_updown
-                        else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
+                        else Upsample(
+                            ch, conv_resample, dims=dims, out_channels=out_ch
+                        )
                     )
                     ds //= 2
                 self.output_blocks.append(TimestepEmbedSequential(*layers))
@@ -682,14 +752,16 @@ class UNetModel(nn.Module):
         self.out = nn.Sequential(
             normalization(ch),
             nn.SiLU(),
-            zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
+            zero_module(
+                conv_nd(dims, model_channels, out_channels, 3, padding=1)
+            ),
         )
         if self.predict_codebook_ids:
             self.id_predictor = nn.Sequential(
-            normalization(ch),
-            conv_nd(dims, model_channels, n_embed, 1),
-            #nn.LogSoftmax(dim=1)  # change to cross_entropy and produce non-normalized logits
-        )
+                normalization(ch),
+                conv_nd(dims, model_channels, n_embed, 1),
+                # nn.LogSoftmax(dim=1)  # change to cross_entropy and produce non-normalized logits
+            )
 
     def convert_to_fp16(self):
         """
@@ -707,7 +779,7 @@ class UNetModel(nn.Module):
         self.middle_block.apply(convert_module_to_f32)
         self.output_blocks.apply(convert_module_to_f32)
 
-    def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
+    def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
         """
         Apply the model to an input batch.
         :param x: an [N x C x ...] Tensor of inputs.
@@ -718,9 +790,11 @@ class UNetModel(nn.Module):
         """
         assert (y is not None) == (
             self.num_classes is not None
-        ), "must specify y if and only if the model is class-conditional"
+        ), 'must specify y if and only if the model is class-conditional'
         hs = []
-        t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
+        t_emb = timestep_embedding(
+            timesteps, self.model_channels, repeat_only=False
+        )
         emb = self.time_embed(t_emb)
 
         if self.num_classes is not None:
@@ -768,9 +842,9 @@ class EncoderUNetModel(nn.Module):
         use_scale_shift_norm=False,
         resblock_updown=False,
         use_new_attention_order=False,
-        pool="adaptive",
+        pool='adaptive',
         *args,
-        **kwargs
+        **kwargs,
     ):
         super().__init__()
 
@@ -888,7 +962,7 @@ class EncoderUNetModel(nn.Module):
         )
         self._feature_size += ch
         self.pool = pool
-        if pool == "adaptive":
+        if pool == 'adaptive':
             self.out = nn.Sequential(
                 normalization(ch),
                 nn.SiLU(),
@@ -896,7 +970,7 @@ class EncoderUNetModel(nn.Module):
                 zero_module(conv_nd(dims, ch, out_channels, 1)),
                 nn.Flatten(),
             )
-        elif pool == "attention":
+        elif pool == 'attention':
             assert num_head_channels != -1
             self.out = nn.Sequential(
                 normalization(ch),
@@ -905,13 +979,13 @@ class EncoderUNetModel(nn.Module):
                     (image_size // ds), ch, num_head_channels, out_channels
                 ),
             )
-        elif pool == "spatial":
+        elif pool == 'spatial':
             self.out = nn.Sequential(
                 nn.Linear(self._feature_size, 2048),
                 nn.ReLU(),
                 nn.Linear(2048, self.out_channels),
             )
-        elif pool == "spatial_v2":
+        elif pool == 'spatial_v2':
             self.out = nn.Sequential(
                 nn.Linear(self._feature_size, 2048),
                 normalization(2048),
@@ -919,7 +993,7 @@ class EncoderUNetModel(nn.Module):
                 nn.Linear(2048, self.out_channels),
             )
         else:
-            raise NotImplementedError(f"Unexpected {pool} pooling")
+            raise NotImplementedError(f'Unexpected {pool} pooling')
 
     def convert_to_fp16(self):
         """
@@ -942,20 +1016,21 @@ class EncoderUNetModel(nn.Module):
         :param timesteps: a 1-D batch of timesteps.
         :return: an [N x K] Tensor of outputs.
         """
-        emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
+        emb = self.time_embed(
+            timestep_embedding(timesteps, self.model_channels)
+        )
 
         results = []
         h = x.type(self.dtype)
         for module in self.input_blocks:
             h = module(h, emb)
-            if self.pool.startswith("spatial"):
+            if self.pool.startswith('spatial'):
                 results.append(h.type(x.dtype).mean(dim=(2, 3)))
         h = self.middle_block(h, emb)
-        if self.pool.startswith("spatial"):
+        if self.pool.startswith('spatial'):
             results.append(h.type(x.dtype).mean(dim=(2, 3)))
             h = th.cat(results, axis=-1)
             return self.out(h)
         else:
             h = h.type(x.dtype)
             return self.out(h)
-
diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py
index 6b5b9dc9e2..197b42b2bc 100644
--- a/ldm/modules/diffusionmodules/util.py
+++ b/ldm/modules/diffusionmodules/util.py
@@ -18,15 +18,24 @@ from einops import repeat
 from ldm.util import instantiate_from_config
 
 
-def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
-    if schedule == "linear":
+def make_beta_schedule(
+    schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
+):
+    if schedule == 'linear':
         betas = (
-                torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
+            torch.linspace(
+                linear_start**0.5,
+                linear_end**0.5,
+                n_timestep,
+                dtype=torch.float64,
+            )
+            ** 2
         )
 
-    elif schedule == "cosine":
+    elif schedule == 'cosine':
         timesteps = (
-                torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
+            torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep
+            + cosine_s
         )
         alphas = timesteps / (1 + cosine_s) * np.pi / 2
         alphas = torch.cos(alphas).pow(2)
@@ -34,23 +43,41 @@ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2,
         betas = 1 - alphas[1:] / alphas[:-1]
         betas = np.clip(betas, a_min=0, a_max=0.999)
 
-    elif schedule == "sqrt_linear":
-        betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
-    elif schedule == "sqrt":
-        betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
+    elif schedule == 'sqrt_linear':
+        betas = torch.linspace(
+            linear_start, linear_end, n_timestep, dtype=torch.float64
+        )
+    elif schedule == 'sqrt':
+        betas = (
+            torch.linspace(
+                linear_start, linear_end, n_timestep, dtype=torch.float64
+            )
+            ** 0.5
+        )
     else:
         raise ValueError(f"schedule '{schedule}' unknown.")
     return betas.numpy()
 
 
-def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
+def make_ddim_timesteps(
+    ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
+):
     if ddim_discr_method == 'uniform':
         c = num_ddpm_timesteps // num_ddim_timesteps
         ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
     elif ddim_discr_method == 'quad':
-        ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
+        ddim_timesteps = (
+            (
+                np.linspace(
+                    0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps
+                )
+            )
+            ** 2
+        ).astype(int)
     else:
-        raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
+        raise NotImplementedError(
+            f'There is no ddim discretization method called "{ddim_discr_method}"'
+        )
 
     # assert ddim_timesteps.shape[0] == num_ddim_timesteps
     # add one to get the final alpha values right (the ones from first scale to data during sampling)
@@ -60,17 +87,27 @@ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timestep
     return steps_out
 
 
-def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
+def make_ddim_sampling_parameters(
+    alphacums, ddim_timesteps, eta, verbose=True
+):
     # select alphas for computing the variance schedule
     alphas = alphacums[ddim_timesteps]
-    alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
+    alphas_prev = np.asarray(
+        [alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()
+    )
 
     # according the the formula provided in https://arxiv.org/abs/2010.02502
-    sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
+    sigmas = eta * np.sqrt(
+        (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
+    )
     if verbose:
-        print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
-        print(f'For the chosen value of eta, which is {eta}, '
-              f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
+        print(
+            f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}'
+        )
+        print(
+            f'For the chosen value of eta, which is {eta}, '
+            f'this results in the following sigma_t schedule for ddim sampler {sigmas}'
+        )
     return sigmas, alphas, alphas_prev
 
 
@@ -109,7 +146,9 @@ def checkpoint(func, inputs, params, flag):
                    explicitly take as arguments.
     :param flag: if False, disable gradient checkpointing.
     """
-    if False: # disabled checkpointing to allow requires_grad = False for main model
+    if (
+        False
+    ):   # disabled checkpointing to allow requires_grad = False for main model
         args = tuple(inputs) + tuple(params)
         return CheckpointFunction.apply(func, len(inputs), *args)
     else:
@@ -129,7 +168,9 @@ class CheckpointFunction(torch.autograd.Function):
 
     @staticmethod
     def backward(ctx, *output_grads):
-        ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
+        ctx.input_tensors = [
+            x.detach().requires_grad_(True) for x in ctx.input_tensors
+        ]
         with torch.enable_grad():
             # Fixes a bug where the first op in run_function modifies the
             # Tensor storage in place, which is not allowed for detach()'d
@@ -160,12 +201,16 @@ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
     if not repeat_only:
         half = dim // 2
         freqs = torch.exp(
-            -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
+            -math.log(max_period)
+            * torch.arange(start=0, end=half, dtype=torch.float32)
+            / half
         ).to(device=timesteps.device)
         args = timesteps[:, None].float() * freqs[None]
         embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
         if dim % 2:
-            embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+            embedding = torch.cat(
+                [embedding, torch.zeros_like(embedding[:, :1])], dim=-1
+            )
     else:
         embedding = repeat(timesteps, 'b -> b d', d=dim)
     return embedding
@@ -215,6 +260,7 @@ class GroupNorm32(nn.GroupNorm):
     def forward(self, x):
         return super().forward(x.float()).type(x.dtype)
 
+
 def conv_nd(dims, *args, **kwargs):
     """
     Create a 1D, 2D, or 3D convolution module.
@@ -225,7 +271,7 @@ def conv_nd(dims, *args, **kwargs):
         return nn.Conv2d(*args, **kwargs)
     elif dims == 3:
         return nn.Conv3d(*args, **kwargs)
-    raise ValueError(f"unsupported dimensions: {dims}")
+    raise ValueError(f'unsupported dimensions: {dims}')
 
 
 def linear(*args, **kwargs):
@@ -245,15 +291,16 @@ def avg_pool_nd(dims, *args, **kwargs):
         return nn.AvgPool2d(*args, **kwargs)
     elif dims == 3:
         return nn.AvgPool3d(*args, **kwargs)
-    raise ValueError(f"unsupported dimensions: {dims}")
+    raise ValueError(f'unsupported dimensions: {dims}')
 
 
 class HybridConditioner(nn.Module):
-
     def __init__(self, c_concat_config, c_crossattn_config):
         super().__init__()
         self.concat_conditioner = instantiate_from_config(c_concat_config)
-        self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
+        self.crossattn_conditioner = instantiate_from_config(
+            c_crossattn_config
+        )
 
     def forward(self, c_concat, c_crossattn):
         c_concat = self.concat_conditioner(c_concat)
@@ -262,6 +309,8 @@ class HybridConditioner(nn.Module):
 
 
 def noise_like(shape, device, repeat=False):
-    repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
+    repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
+        shape[0], *((1,) * (len(shape) - 1))
+    )
     noise = lambda: torch.randn(shape, device=device)
-    return repeat_noise() if repeat else noise()
\ No newline at end of file
+    return repeat_noise() if repeat else noise()
diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py
index f2b8ef9011..67ed535791 100644
--- a/ldm/modules/distributions/distributions.py
+++ b/ldm/modules/distributions/distributions.py
@@ -30,33 +30,45 @@ class DiagonalGaussianDistribution(object):
         self.std = torch.exp(0.5 * self.logvar)
         self.var = torch.exp(self.logvar)
         if self.deterministic:
-            self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
+            self.var = self.std = torch.zeros_like(self.mean).to(
+                device=self.parameters.device
+            )
 
     def sample(self):
-        x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
+        x = self.mean + self.std * torch.randn(self.mean.shape).to(
+            device=self.parameters.device
+        )
         return x
 
     def kl(self, other=None):
         if self.deterministic:
-            return torch.Tensor([0.])
+            return torch.Tensor([0.0])
         else:
             if other is None:
-                return 0.5 * torch.sum(torch.pow(self.mean, 2)
-                                       + self.var - 1.0 - self.logvar,
-                                       dim=[1, 2, 3])
+                return 0.5 * torch.sum(
+                    torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
+                    dim=[1, 2, 3],
+                )
             else:
                 return 0.5 * torch.sum(
                     torch.pow(self.mean - other.mean, 2) / other.var
-                    + self.var / other.var - 1.0 - self.logvar + other.logvar,
-                    dim=[1, 2, 3])
+                    + self.var / other.var
+                    - 1.0
+                    - self.logvar
+                    + other.logvar,
+                    dim=[1, 2, 3],
+                )
 
-    def nll(self, sample, dims=[1,2,3]):
+    def nll(self, sample, dims=[1, 2, 3]):
         if self.deterministic:
-            return torch.Tensor([0.])
+            return torch.Tensor([0.0])
         logtwopi = np.log(2.0 * np.pi)
         return 0.5 * torch.sum(
-            logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
-            dim=dims)
+            logtwopi
+            + self.logvar
+            + torch.pow(sample - self.mean, 2) / self.var,
+            dim=dims,
+        )
 
     def mode(self):
         return self.mean
@@ -74,7 +86,7 @@ def normal_kl(mean1, logvar1, mean2, logvar2):
         if isinstance(obj, torch.Tensor):
             tensor = obj
             break
-    assert tensor is not None, "at least one argument must be a Tensor"
+    assert tensor is not None, 'at least one argument must be a Tensor'
 
     # Force variances to be Tensors. Broadcasting helps convert scalars to
     # Tensors, but it does not work for torch.exp().
diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py
index c8c75af435..2ceec5f0e7 100644
--- a/ldm/modules/ema.py
+++ b/ldm/modules/ema.py
@@ -10,24 +10,30 @@ class LitEma(nn.Module):
 
         self.m_name2s_name = {}
         self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
-        self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
-                             else torch.tensor(-1,dtype=torch.int))
+        self.register_buffer(
+            'num_updates',
+            torch.tensor(0, dtype=torch.int)
+            if use_num_upates
+            else torch.tensor(-1, dtype=torch.int),
+        )
 
         for name, p in model.named_parameters():
             if p.requires_grad:
-                #remove as '.'-character is not allowed in buffers
-                s_name = name.replace('.','')
-                self.m_name2s_name.update({name:s_name})
-                self.register_buffer(s_name,p.clone().detach().data)
+                # remove as '.'-character is not allowed in buffers
+                s_name = name.replace('.', '')
+                self.m_name2s_name.update({name: s_name})
+                self.register_buffer(s_name, p.clone().detach().data)
 
         self.collected_params = []
 
-    def forward(self,model):
+    def forward(self, model):
         decay = self.decay
 
         if self.num_updates >= 0:
             self.num_updates += 1
-            decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
+            decay = min(
+                self.decay, (1 + self.num_updates) / (10 + self.num_updates)
+            )
 
         one_minus_decay = 1.0 - decay
 
@@ -38,8 +44,12 @@ class LitEma(nn.Module):
             for key in m_param:
                 if m_param[key].requires_grad:
                     sname = self.m_name2s_name[key]
-                    shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
-                    shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
+                    shadow_params[sname] = shadow_params[sname].type_as(
+                        m_param[key]
+                    )
+                    shadow_params[sname].sub_(
+                        one_minus_decay * (shadow_params[sname] - m_param[key])
+                    )
                 else:
                     assert not key in self.m_name2s_name
 
@@ -48,7 +58,9 @@ class LitEma(nn.Module):
         shadow_params = dict(self.named_buffers())
         for key in m_param:
             if m_param[key].requires_grad:
-                m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
+                m_param[key].data.copy_(
+                    shadow_params[self.m_name2s_name[key]].data
+                )
             else:
                 assert not key in self.m_name2s_name
 
diff --git a/ldm/modules/embedding_manager.py b/ldm/modules/embedding_manager.py
index 7020a27b9a..677bc4ad3a 100644
--- a/ldm/modules/embedding_manager.py
+++ b/ldm/modules/embedding_manager.py
@@ -8,18 +8,29 @@ from ldm.data.personalized import per_img_token_list
 from transformers import CLIPTokenizer
 from functools import partial
 
-DEFAULT_PLACEHOLDER_TOKEN = ["*"]
+DEFAULT_PLACEHOLDER_TOKEN = ['*']
 
 PROGRESSIVE_SCALE = 2000
 
+
 def get_clip_token_for_string(tokenizer, string):
-    batch_encoding = tokenizer(string, truncation=True, max_length=77, return_length=True,
-                               return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
-    tokens = batch_encoding["input_ids"]
-    assert torch.count_nonzero(tokens - 49407) == 2, f"String '{string}' maps to more than a single token. Please use another string"
+    batch_encoding = tokenizer(
+        string,
+        truncation=True,
+        max_length=77,
+        return_length=True,
+        return_overflowing_tokens=False,
+        padding='max_length',
+        return_tensors='pt',
+    )
+    tokens = batch_encoding['input_ids']
+    assert (
+        torch.count_nonzero(tokens - 49407) == 2
+    ), f"String '{string}' maps to more than a single token. Please use another string"
 
     return tokens[0, 1]
 
+
 def get_bert_token_for_string(tokenizer, string):
     token = tokenizer(string)
     # assert torch.count_nonzero(token) == 3, f"String '{string}' maps to more than a single token. Please use another string"
@@ -28,42 +39,54 @@ def get_bert_token_for_string(tokenizer, string):
 
     return token
 
+
 def get_embedding_for_clip_token(embedder, token):
     return embedder(token.unsqueeze(0))[0, 0]
 
 
 class EmbeddingManager(nn.Module):
     def __init__(
-            self,
-            embedder,
-            placeholder_strings=None,
-            initializer_words=None,
-            per_image_tokens=False,
-            num_vectors_per_token=1,
-            progressive_words=False,
-            **kwargs
+        self,
+        embedder,
+        placeholder_strings=None,
+        initializer_words=None,
+        per_image_tokens=False,
+        num_vectors_per_token=1,
+        progressive_words=False,
+        **kwargs,
     ):
         super().__init__()
 
         self.string_to_token_dict = {}
-        
+
         self.string_to_param_dict = nn.ParameterDict()
 
-        self.initial_embeddings = nn.ParameterDict() # These should not be optimized
+        self.initial_embeddings = (
+            nn.ParameterDict()
+        )   # These should not be optimized
 
         self.progressive_words = progressive_words
         self.progressive_counter = 0
 
         self.max_vectors_per_token = num_vectors_per_token
 
-        if hasattr(embedder, 'tokenizer'): # using Stable Diffusion's CLIP encoder
+        if hasattr(
+            embedder, 'tokenizer'
+        ):   # using Stable Diffusion's CLIP encoder
             self.is_clip = True
-            get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer)
-            get_embedding_for_tkn = partial(get_embedding_for_clip_token, embedder.transformer.text_model.embeddings)
+            get_token_for_string = partial(
+                get_clip_token_for_string, embedder.tokenizer
+            )
+            get_embedding_for_tkn = partial(
+                get_embedding_for_clip_token,
+                embedder.transformer.text_model.embeddings,
+            )
             token_dim = 1280
-        else: # using LDM's BERT encoder
+        else:   # using LDM's BERT encoder
             self.is_clip = False
-            get_token_for_string = partial(get_bert_token_for_string, embedder.tknz_fn)
+            get_token_for_string = partial(
+                get_bert_token_for_string, embedder.tknz_fn
+            )
             get_embedding_for_tkn = embedder.transformer.token_emb
             token_dim = 1280
 
@@ -71,79 +94,140 @@ class EmbeddingManager(nn.Module):
             placeholder_strings.extend(per_img_token_list)
 
         for idx, placeholder_string in enumerate(placeholder_strings):
-            
+
             token = get_token_for_string(placeholder_string)
 
             if initializer_words and idx < len(initializer_words):
                 init_word_token = get_token_for_string(initializer_words[idx])
 
                 with torch.no_grad():
-                    init_word_embedding = get_embedding_for_tkn(init_word_token.cpu())
+                    init_word_embedding = get_embedding_for_tkn(
+                        init_word_token.cpu()
+                    )
 
-                token_params = torch.nn.Parameter(init_word_embedding.unsqueeze(0).repeat(num_vectors_per_token, 1), requires_grad=True)
-                self.initial_embeddings[placeholder_string] = torch.nn.Parameter(init_word_embedding.unsqueeze(0).repeat(num_vectors_per_token, 1), requires_grad=False)
+                token_params = torch.nn.Parameter(
+                    init_word_embedding.unsqueeze(0).repeat(
+                        num_vectors_per_token, 1
+                    ),
+                    requires_grad=True,
+                )
+                self.initial_embeddings[
+                    placeholder_string
+                ] = torch.nn.Parameter(
+                    init_word_embedding.unsqueeze(0).repeat(
+                        num_vectors_per_token, 1
+                    ),
+                    requires_grad=False,
+                )
             else:
-                token_params = torch.nn.Parameter(torch.rand(size=(num_vectors_per_token, token_dim), requires_grad=True))
-            
+                token_params = torch.nn.Parameter(
+                    torch.rand(
+                        size=(num_vectors_per_token, token_dim),
+                        requires_grad=True,
+                    )
+                )
+
             self.string_to_token_dict[placeholder_string] = token
             self.string_to_param_dict[placeholder_string] = token_params
 
     def forward(
-            self,
-            tokenized_text,
-            embedded_text,
+        self,
+        tokenized_text,
+        embedded_text,
     ):
         b, n, device = *tokenized_text.shape, tokenized_text.device
 
-        for placeholder_string, placeholder_token in self.string_to_token_dict.items():
+        for (
+            placeholder_string,
+            placeholder_token,
+        ) in self.string_to_token_dict.items():
 
-            placeholder_embedding = self.string_to_param_dict[placeholder_string].to(device)
+            placeholder_embedding = self.string_to_param_dict[
+                placeholder_string
+            ].to(device)
 
-            if self.max_vectors_per_token == 1: # If there's only one vector per token, we can do a simple replacement
-                placeholder_idx = torch.where(tokenized_text == placeholder_token.to(device))
+            if (
+                self.max_vectors_per_token == 1
+            ):   # If there's only one vector per token, we can do a simple replacement
+                placeholder_idx = torch.where(
+                    tokenized_text == placeholder_token.to(device)
+                )
                 embedded_text[placeholder_idx] = placeholder_embedding
-            else: # otherwise, need to insert and keep track of changing indices
+            else:   # otherwise, need to insert and keep track of changing indices
                 if self.progressive_words:
                     self.progressive_counter += 1
-                    max_step_tokens = 1 + self.progressive_counter // PROGRESSIVE_SCALE
+                    max_step_tokens = (
+                        1 + self.progressive_counter // PROGRESSIVE_SCALE
+                    )
                 else:
                     max_step_tokens = self.max_vectors_per_token
 
-                num_vectors_for_token = min(placeholder_embedding.shape[0], max_step_tokens)
+                num_vectors_for_token = min(
+                    placeholder_embedding.shape[0], max_step_tokens
+                )
 
-                placeholder_rows, placeholder_cols = torch.where(tokenized_text == placeholder_token.to(device))
+                placeholder_rows, placeholder_cols = torch.where(
+                    tokenized_text == placeholder_token.to(device)
+                )
 
                 if placeholder_rows.nelement() == 0:
                     continue
 
-                sorted_cols, sort_idx = torch.sort(placeholder_cols, descending=True)
+                sorted_cols, sort_idx = torch.sort(
+                    placeholder_cols, descending=True
+                )
                 sorted_rows = placeholder_rows[sort_idx]
 
                 for idx in range(len(sorted_rows)):
                     row = sorted_rows[idx]
                     col = sorted_cols[idx]
 
-                    new_token_row = torch.cat([tokenized_text[row][:col], placeholder_token.repeat(num_vectors_for_token).to(device), tokenized_text[row][col + 1:]], axis=0)[:n]
-                    new_embed_row = torch.cat([embedded_text[row][:col], placeholder_embedding[:num_vectors_for_token], embedded_text[row][col + 1:]], axis=0)[:n]
+                    new_token_row = torch.cat(
+                        [
+                            tokenized_text[row][:col],
+                            placeholder_token.repeat(num_vectors_for_token).to(
+                                device
+                            ),
+                            tokenized_text[row][col + 1 :],
+                        ],
+                        axis=0,
+                    )[:n]
+                    new_embed_row = torch.cat(
+                        [
+                            embedded_text[row][:col],
+                            placeholder_embedding[:num_vectors_for_token],
+                            embedded_text[row][col + 1 :],
+                        ],
+                        axis=0,
+                    )[:n]
 
-                    embedded_text[row]  = new_embed_row
+                    embedded_text[row] = new_embed_row
                     tokenized_text[row] = new_token_row
 
         return embedded_text
 
     def save(self, ckpt_path):
-        torch.save({"string_to_token": self.string_to_token_dict,
-                    "string_to_param": self.string_to_param_dict}, ckpt_path)
+        torch.save(
+            {
+                'string_to_token': self.string_to_token_dict,
+                'string_to_param': self.string_to_param_dict,
+            },
+            ckpt_path,
+        )
 
     def load(self, ckpt_path):
         ckpt = torch.load(ckpt_path, map_location='cpu')
 
-        self.string_to_token_dict = ckpt["string_to_token"]
-        self.string_to_param_dict = ckpt["string_to_param"]
+        self.string_to_token_dict = ckpt['string_to_token']
+        self.string_to_param_dict = ckpt['string_to_param']
 
     def get_embedding_norms_squared(self):
-        all_params = torch.cat(list(self.string_to_param_dict.values()), axis=0) # num_placeholders x embedding_dim
-        param_norm_squared = (all_params * all_params).sum(axis=-1)              # num_placeholders
+        all_params = torch.cat(
+            list(self.string_to_param_dict.values()), axis=0
+        )   # num_placeholders x embedding_dim
+        param_norm_squared = (all_params * all_params).sum(
+            axis=-1
+        )              # num_placeholders
 
         return param_norm_squared
 
@@ -151,14 +235,19 @@ class EmbeddingManager(nn.Module):
         return self.string_to_param_dict.parameters()
 
     def embedding_to_coarse_loss(self):
-        
-        loss = 0.
+
+        loss = 0.0
         num_embeddings = len(self.initial_embeddings)
 
         for key in self.initial_embeddings:
             optimized = self.string_to_param_dict[key]
             coarse = self.initial_embeddings[key].clone().to(optimized.device)
 
-            loss = loss + (optimized - coarse) @ (optimized - coarse).T / num_embeddings
+            loss = (
+                loss
+                + (optimized - coarse)
+                @ (optimized - coarse).T
+                / num_embeddings
+            )
 
-        return loss
\ No newline at end of file
+        return loss
diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py
index def6d2136d..2c25948b5c 100644
--- a/ldm/modules/encoders/modules.py
+++ b/ldm/modules/encoders/modules.py
@@ -6,29 +6,39 @@ from einops import rearrange, repeat
 from transformers import CLIPTokenizer, CLIPTextModel
 import kornia
 
-from ldm.modules.x_transformer import Encoder, TransformerWrapper  # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
+from ldm.modules.x_transformer import (
+    Encoder,
+    TransformerWrapper,
+)  # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
 
-def _expand_mask(mask, dtype, tgt_len = None):
+
+def _expand_mask(mask, dtype, tgt_len=None):
     """
     Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
     """
     bsz, src_len = mask.size()
     tgt_len = tgt_len if tgt_len is not None else src_len
 
-    expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
+    expanded_mask = (
+        mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
+    )
 
     inverted_mask = 1.0 - expanded_mask
 
-    return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
+    return inverted_mask.masked_fill(
+        inverted_mask.to(torch.bool), torch.finfo(dtype).min
+    )
+
 
 def _build_causal_attention_mask(bsz, seq_len, dtype):
-        # lazily create causal attention mask, with full attention between the vision tokens
-        # pytorch uses additive attention mask; fill with -inf
-        mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
-        mask.fill_(torch.tensor(torch.finfo(dtype).min))
-        mask.triu_(1)  # zero out the lower diagonal
-        mask = mask.unsqueeze(1)  # expand mask
-        return mask
+    # lazily create causal attention mask, with full attention between the vision tokens
+    # pytorch uses additive attention mask; fill with -inf
+    mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
+    mask.fill_(torch.tensor(torch.finfo(dtype).min))
+    mask.triu_(1)  # zero out the lower diagonal
+    mask = mask.unsqueeze(1)  # expand mask
+    return mask
+
 
 class AbstractEncoder(nn.Module):
     def __init__(self):
@@ -38,7 +48,6 @@ class AbstractEncoder(nn.Module):
         raise NotImplementedError
 
 
-
 class ClassEmbedder(nn.Module):
     def __init__(self, embed_dim, n_classes=1000, key='class'):
         super().__init__()
@@ -56,11 +65,17 @@ class ClassEmbedder(nn.Module):
 
 class TransformerEmbedder(AbstractEncoder):
     """Some transformer encoder layers"""
-    def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
+
+    def __init__(
+        self, n_embed, n_layer, vocab_size, max_seq_len=77, device='cuda'
+    ):
         super().__init__()
         self.device = device
-        self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
-                                              attn_layers=Encoder(dim=n_embed, depth=n_layer))
+        self.transformer = TransformerWrapper(
+            num_tokens=vocab_size,
+            max_seq_len=max_seq_len,
+            attn_layers=Encoder(dim=n_embed, depth=n_layer),
+        )
 
     def forward(self, tokens):
         tokens = tokens.to(self.device)  # meh
@@ -72,27 +87,42 @@ class TransformerEmbedder(AbstractEncoder):
 
 
 class BERTTokenizer(AbstractEncoder):
-    """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
-    def __init__(self, device="cuda", vq_interface=True, max_length=77):
+    """Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
+
+    def __init__(self, device='cuda', vq_interface=True, max_length=77):
         super().__init__()
-        from transformers import BertTokenizerFast  # TODO: add to reuquirements
+        from transformers import (
+            BertTokenizerFast,
+        )  # TODO: add to reuquirements
+
         # Modified to allow to run on non-internet connected compute nodes.
         # Model needs to be loaded into cache from an internet-connected machine
         # by running:
         #   from transformers import BertTokenizerFast
         #   BertTokenizerFast.from_pretrained("bert-base-uncased")
         try:
-            self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased",local_files_only=True)
+            self.tokenizer = BertTokenizerFast.from_pretrained(
+                'bert-base-uncased', local_files_only=True
+            )
         except OSError:
-            raise SystemExit("* Couldn't load Bert tokenizer files. Try running scripts/preload_models.py from an internet-conected machine.")
+            raise SystemExit(
+                "* Couldn't load Bert tokenizer files. Try running scripts/preload_models.py from an internet-conected machine."
+            )
         self.device = device
         self.vq_interface = vq_interface
         self.max_length = max_length
 
     def forward(self, text):
-        batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
-                                        return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
-        tokens = batch_encoding["input_ids"].to(self.device)
+        batch_encoding = self.tokenizer(
+            text,
+            truncation=True,
+            max_length=self.max_length,
+            return_length=True,
+            return_overflowing_tokens=False,
+            padding='max_length',
+            return_tensors='pt',
+        )
+        tokens = batch_encoding['input_ids'].to(self.device)
         return tokens
 
     @torch.no_grad()
@@ -108,53 +138,84 @@ class BERTTokenizer(AbstractEncoder):
 
 class BERTEmbedder(AbstractEncoder):
     """Uses the BERT tokenizr model and add some transformer encoder layers"""
-    def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
-                 device="cuda",use_tokenizer=True, embedding_dropout=0.0):
+
+    def __init__(
+        self,
+        n_embed,
+        n_layer,
+        vocab_size=30522,
+        max_seq_len=77,
+        device='cuda',
+        use_tokenizer=True,
+        embedding_dropout=0.0,
+    ):
         super().__init__()
         self.use_tknz_fn = use_tokenizer
         if self.use_tknz_fn:
-            self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
+            self.tknz_fn = BERTTokenizer(
+                vq_interface=False, max_length=max_seq_len
+            )
         self.device = device
-        self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
-                                              attn_layers=Encoder(dim=n_embed, depth=n_layer),
-                                              emb_dropout=embedding_dropout)
+        self.transformer = TransformerWrapper(
+            num_tokens=vocab_size,
+            max_seq_len=max_seq_len,
+            attn_layers=Encoder(dim=n_embed, depth=n_layer),
+            emb_dropout=embedding_dropout,
+        )
 
     def forward(self, text, embedding_manager=None):
         if self.use_tknz_fn:
-            tokens = self.tknz_fn(text)#.to(self.device)
+            tokens = self.tknz_fn(text)  # .to(self.device)
         else:
             tokens = text
-        z = self.transformer(tokens, return_embeddings=True, embedding_manager=embedding_manager)
+        z = self.transformer(
+            tokens, return_embeddings=True, embedding_manager=embedding_manager
+        )
         return z
 
     def encode(self, text, **kwargs):
         # output of length 77
         return self(text, **kwargs)
 
+
 class SpatialRescaler(nn.Module):
-    def __init__(self,
-                 n_stages=1,
-                 method='bilinear',
-                 multiplier=0.5,
-                 in_channels=3,
-                 out_channels=None,
-                 bias=False):
+    def __init__(
+        self,
+        n_stages=1,
+        method='bilinear',
+        multiplier=0.5,
+        in_channels=3,
+        out_channels=None,
+        bias=False,
+    ):
         super().__init__()
         self.n_stages = n_stages
         assert self.n_stages >= 0
-        assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
+        assert method in [
+            'nearest',
+            'linear',
+            'bilinear',
+            'trilinear',
+            'bicubic',
+            'area',
+        ]
         self.multiplier = multiplier
-        self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
+        self.interpolator = partial(
+            torch.nn.functional.interpolate, mode=method
+        )
         self.remap_output = out_channels is not None
         if self.remap_output:
-            print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
-            self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
+            print(
+                f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.'
+            )
+            self.channel_mapper = nn.Conv2d(
+                in_channels, out_channels, 1, bias=bias
+            )
 
-    def forward(self,x):
+    def forward(self, x):
         for stage in range(self.n_stages):
             x = self.interpolator(x, scale_factor=self.multiplier)
 
-
         if self.remap_output:
             x = self.channel_mapper(x)
         return x
@@ -162,57 +223,83 @@ class SpatialRescaler(nn.Module):
     def encode(self, x):
         return self(x)
 
+
 class FrozenCLIPEmbedder(AbstractEncoder):
     """Uses the CLIP transformer encoder for text (from Hugging Face)"""
-    def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
+
+    def __init__(
+        self,
+        version='openai/clip-vit-large-patch14',
+        device='cuda',
+        max_length=77,
+    ):
         super().__init__()
-        self.tokenizer = CLIPTokenizer.from_pretrained(version,local_files_only=True)
-        self.transformer = CLIPTextModel.from_pretrained(version,local_files_only=True)
+        self.tokenizer = CLIPTokenizer.from_pretrained(
+            version, local_files_only=True
+        )
+        self.transformer = CLIPTextModel.from_pretrained(
+            version, local_files_only=True
+        )
         self.device = device
         self.max_length = max_length
         self.freeze()
 
         def embedding_forward(
-                self,
-                input_ids = None,
-                position_ids = None,
-                inputs_embeds = None,
-                embedding_manager = None,
-            ) -> torch.Tensor:
+            self,
+            input_ids=None,
+            position_ids=None,
+            inputs_embeds=None,
+            embedding_manager=None,
+        ) -> torch.Tensor:
 
-                seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
+            seq_length = (
+                input_ids.shape[-1]
+                if input_ids is not None
+                else inputs_embeds.shape[-2]
+            )
 
-                if position_ids is None:
-                    position_ids = self.position_ids[:, :seq_length]
+            if position_ids is None:
+                position_ids = self.position_ids[:, :seq_length]
 
-                if inputs_embeds is None:
-                    inputs_embeds = self.token_embedding(input_ids)
+            if inputs_embeds is None:
+                inputs_embeds = self.token_embedding(input_ids)
 
-                if embedding_manager is not None:
-                    inputs_embeds = embedding_manager(input_ids, inputs_embeds)
+            if embedding_manager is not None:
+                inputs_embeds = embedding_manager(input_ids, inputs_embeds)
 
+            position_embeddings = self.position_embedding(position_ids)
+            embeddings = inputs_embeds + position_embeddings
 
-                position_embeddings = self.position_embedding(position_ids)
-                embeddings = inputs_embeds + position_embeddings
-                
-                return embeddings      
+            return embeddings
 
-        self.transformer.text_model.embeddings.forward = embedding_forward.__get__(self.transformer.text_model.embeddings)
+        self.transformer.text_model.embeddings.forward = (
+            embedding_forward.__get__(self.transformer.text_model.embeddings)
+        )
 
         def encoder_forward(
             self,
             inputs_embeds,
-            attention_mask = None,
-            causal_attention_mask = None,
-            output_attentions = None,
-            output_hidden_states = None,
-            return_dict = None,
+            attention_mask=None,
+            causal_attention_mask=None,
+            output_attentions=None,
+            output_hidden_states=None,
+            return_dict=None,
         ):
-            output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
-            output_hidden_states = (
-                output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+            output_attentions = (
+                output_attentions
+                if output_attentions is not None
+                else self.config.output_attentions
+            )
+            output_hidden_states = (
+                output_hidden_states
+                if output_hidden_states is not None
+                else self.config.output_hidden_states
+            )
+            return_dict = (
+                return_dict
+                if return_dict is not None
+                else self.config.use_return_dict
             )
-            return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 
             encoder_states = () if output_hidden_states else None
             all_attentions = () if output_attentions else None
@@ -239,44 +326,61 @@ class FrozenCLIPEmbedder(AbstractEncoder):
 
             return hidden_states
 
-        self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder)
-
+        self.transformer.text_model.encoder.forward = encoder_forward.__get__(
+            self.transformer.text_model.encoder
+        )
 
         def text_encoder_forward(
             self,
-            input_ids = None,
-            attention_mask = None,
-            position_ids = None,
-            output_attentions = None,
-            output_hidden_states = None,
-            return_dict = None,
-            embedding_manager = None,
+            input_ids=None,
+            attention_mask=None,
+            position_ids=None,
+            output_attentions=None,
+            output_hidden_states=None,
+            return_dict=None,
+            embedding_manager=None,
         ):
-            output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
-            output_hidden_states = (
-                output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+            output_attentions = (
+                output_attentions
+                if output_attentions is not None
+                else self.config.output_attentions
+            )
+            output_hidden_states = (
+                output_hidden_states
+                if output_hidden_states is not None
+                else self.config.output_hidden_states
+            )
+            return_dict = (
+                return_dict
+                if return_dict is not None
+                else self.config.use_return_dict
             )
-            return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 
             if input_ids is None:
-                raise ValueError("You have to specify either input_ids")
+                raise ValueError('You have to specify either input_ids')
 
             input_shape = input_ids.size()
             input_ids = input_ids.view(-1, input_shape[-1])
 
-            hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager)
+            hidden_states = self.embeddings(
+                input_ids=input_ids,
+                position_ids=position_ids,
+                embedding_manager=embedding_manager,
+            )
 
             bsz, seq_len = input_shape
             # CLIP's text model uses causal mask, prepare it here.
             # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
-            causal_attention_mask = _build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
-                hidden_states.device
-            )
+            causal_attention_mask = _build_causal_attention_mask(
+                bsz, seq_len, hidden_states.dtype
+            ).to(hidden_states.device)
 
             # expand attention_mask
             if attention_mask is not None:
                 # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
-                attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
+                attention_mask = _expand_mask(
+                    attention_mask, hidden_states.dtype
+                )
 
             last_hidden_state = self.encoder(
                 inputs_embeds=hidden_states,
@@ -291,17 +395,19 @@ class FrozenCLIPEmbedder(AbstractEncoder):
 
             return last_hidden_state
 
-        self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model)
+        self.transformer.text_model.forward = text_encoder_forward.__get__(
+            self.transformer.text_model
+        )
 
         def transformer_forward(
             self,
-            input_ids = None,
-            attention_mask = None,
-            position_ids = None,
-            output_attentions = None,
-            output_hidden_states = None,
-            return_dict = None,
-            embedding_manager = None,
+            input_ids=None,
+            attention_mask=None,
+            position_ids=None,
+            output_attentions=None,
+            output_hidden_states=None,
+            return_dict=None,
+            embedding_manager=None,
         ):
             return self.text_model(
                 input_ids=input_ids,
@@ -310,11 +416,12 @@ class FrozenCLIPEmbedder(AbstractEncoder):
                 output_attentions=output_attentions,
                 output_hidden_states=output_hidden_states,
                 return_dict=return_dict,
-                embedding_manager = embedding_manager
+                embedding_manager=embedding_manager,
             )
 
-        self.transformer.forward = transformer_forward.__get__(self.transformer)
-
+        self.transformer.forward = transformer_forward.__get__(
+            self.transformer
+        )
 
     def freeze(self):
         self.transformer = self.transformer.eval()
@@ -322,9 +429,16 @@ class FrozenCLIPEmbedder(AbstractEncoder):
             param.requires_grad = False
 
     def forward(self, text, **kwargs):
-        batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
-                                        return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
-        tokens = batch_encoding["input_ids"].to(self.device)
+        batch_encoding = self.tokenizer(
+            text,
+            truncation=True,
+            max_length=self.max_length,
+            return_length=True,
+            return_overflowing_tokens=False,
+            padding='max_length',
+            return_tensors='pt',
+        )
+        tokens = batch_encoding['input_ids'].to(self.device)
         z = self.transformer(input_ids=tokens, **kwargs)
 
         return z
@@ -337,9 +451,17 @@ class FrozenCLIPTextEmbedder(nn.Module):
     """
     Uses the CLIP transformer encoder for text.
     """
-    def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
+
+    def __init__(
+        self,
+        version='ViT-L/14',
+        device='cuda',
+        max_length=77,
+        n_repeat=1,
+        normalize=True,
+    ):
         super().__init__()
-        self.model, _ = clip.load(version, jit=False, device="cpu")
+        self.model, _ = clip.load(version, jit=False, device='cpu')
         self.device = device
         self.max_length = max_length
         self.n_repeat = n_repeat
@@ -359,7 +481,7 @@ class FrozenCLIPTextEmbedder(nn.Module):
 
     def encode(self, text):
         z = self(text)
-        if z.ndim==2:
+        if z.ndim == 2:
             z = z[:, None, :]
         z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
         return z
@@ -367,29 +489,42 @@ class FrozenCLIPTextEmbedder(nn.Module):
 
 class FrozenClipImageEmbedder(nn.Module):
     """
-        Uses the CLIP image encoder.
-        """
+    Uses the CLIP image encoder.
+    """
+
     def __init__(
-            self,
-            model,
-            jit=False,
-            device='cuda' if torch.cuda.is_available() else 'cpu',
-            antialias=False,
-        ):
+        self,
+        model,
+        jit=False,
+        device='cuda' if torch.cuda.is_available() else 'cpu',
+        antialias=False,
+    ):
         super().__init__()
         self.model, _ = clip.load(name=model, device=device, jit=jit)
 
         self.antialias = antialias
 
-        self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
-        self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
+        self.register_buffer(
+            'mean',
+            torch.Tensor([0.48145466, 0.4578275, 0.40821073]),
+            persistent=False,
+        )
+        self.register_buffer(
+            'std',
+            torch.Tensor([0.26862954, 0.26130258, 0.27577711]),
+            persistent=False,
+        )
 
     def preprocess(self, x):
         # normalize to [0,1]
-        x = kornia.geometry.resize(x, (224, 224),
-                                   interpolation='bicubic',align_corners=True,
-                                   antialias=self.antialias)
-        x = (x + 1.) / 2.
+        x = kornia.geometry.resize(
+            x,
+            (224, 224),
+            interpolation='bicubic',
+            align_corners=True,
+            antialias=self.antialias,
+        )
+        x = (x + 1.0) / 2.0
         # renormalize according to clip
         x = kornia.enhance.normalize(x, self.mean, self.std)
         return x
@@ -399,7 +534,8 @@ class FrozenClipImageEmbedder(nn.Module):
         return self.model.encode_image(self.preprocess(x))
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
     from ldm.util import count_params
+
     model = FrozenCLIPEmbedder()
     count_params(model, verbose=True)
diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py
index 7836cada81..c6b3b62ea8 100644
--- a/ldm/modules/image_degradation/__init__.py
+++ b/ldm/modules/image_degradation/__init__.py
@@ -1,2 +1,6 @@
-from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
-from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
+from ldm.modules.image_degradation.bsrgan import (
+    degradation_bsrgan_variant as degradation_fn_bsr,
+)
+from ldm.modules.image_degradation.bsrgan_light import (
+    degradation_bsrgan_variant as degradation_fn_bsr_light,
+)
diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py
index 32ef561699..b51217bd48 100644
--- a/ldm/modules/image_degradation/bsrgan.py
+++ b/ldm/modules/image_degradation/bsrgan.py
@@ -27,16 +27,16 @@ import ldm.modules.image_degradation.utils_image as util
 
 
 def modcrop_np(img, sf):
-    '''
+    """
     Args:
         img: numpy image, WxH or WxHxC
         sf: scale factor
     Return:
         cropped image
-    '''
+    """
     w, h = img.shape[:2]
     im = np.copy(img)
-    return im[:w - w % sf, :h - h % sf, ...]
+    return im[: w - w % sf, : h - h % sf, ...]
 
 
 """
@@ -54,7 +54,9 @@ def analytic_kernel(k):
     # Loop over the small kernel to fill the big one
     for r in range(k_size):
         for c in range(k_size):
-            big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
+            big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += (
+                k[r, c] * k
+            )
     # Crop the edges of the big kernel to ignore very small values and increase run time of SR
     crop = k_size // 2
     cropped_big_k = big_k[crop:-crop, crop:-crop]
@@ -63,7 +65,7 @@ def analytic_kernel(k):
 
 
 def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
-    """ generate an anisotropic Gaussian kernel
+    """generate an anisotropic Gaussian kernel
     Args:
         ksize : e.g., 15, kernel size
         theta : [0,  pi], rotation angle range
@@ -74,7 +76,12 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
         k     : kernel
     """
 
-    v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
+    v = np.dot(
+        np.array(
+            [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
+        ),
+        np.array([1.0, 0.0]),
+    )
     V = np.array([[v[0], v[1]], [v[1], -v[0]]])
     D = np.array([[l1, 0], [0, l2]])
     Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
@@ -126,24 +133,32 @@ def shift_pixel(x, sf, upper_left=True):
 
 
 def blur(x, k):
-    '''
+    """
     x: image, NxcxHxW
     k: kernel, Nx1xhxw
-    '''
+    """
     n, c = x.shape[:2]
     p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
     x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
     k = k.repeat(1, c, 1, 1)
     k = k.view(-1, 1, k.shape[2], k.shape[3])
     x = x.view(1, -1, x.shape[2], x.shape[3])
-    x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
+    x = torch.nn.functional.conv2d(
+        x, k, bias=None, stride=1, padding=0, groups=n * c
+    )
     x = x.view(n, c, x.shape[2], x.shape[3])
 
     return x
 
 
-def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
-    """"
+def gen_kernel(
+    k_size=np.array([15, 15]),
+    scale_factor=np.array([4, 4]),
+    min_var=0.6,
+    max_var=10.0,
+    noise_level=0,
+):
+    """ "
     # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
     # Kai Zhang
     # min_var = 0.175 * sf  # variance of the gaussian kernel will be sampled between min_var and max_var
@@ -157,13 +172,16 @@ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var
 
     # Set COV matrix using Lambdas and Theta
     LAMBDA = np.diag([lambda_1, lambda_2])
-    Q = np.array([[np.cos(theta), -np.sin(theta)],
-                  [np.sin(theta), np.cos(theta)]])
+    Q = np.array(
+        [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
+    )
     SIGMA = Q @ LAMBDA @ Q.T
     INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
 
     # Set expectation position (shifting kernel for aligned image)
-    MU = k_size // 2 - 0.5 * (scale_factor - 1)  # - 0.5 * (scale_factor - k_size % 2)
+    MU = k_size // 2 - 0.5 * (
+        scale_factor - 1
+    )  # - 0.5 * (scale_factor - k_size % 2)
     MU = MU[None, None, :, None]
 
     # Create meshgrid for Gaussian
@@ -188,7 +206,9 @@ def fspecial_gaussian(hsize, sigma):
     hsize = [hsize, hsize]
     siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
     std = sigma
-    [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
+    [x, y] = np.meshgrid(
+        np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)
+    )
     arg = -(x * x + y * y) / (2 * std * std)
     h = np.exp(arg)
     h[h < scipy.finfo(float).eps * h.max()] = 0
@@ -208,10 +228,10 @@ def fspecial_laplacian(alpha):
 
 
 def fspecial(filter_type, *args, **kwargs):
-    '''
+    """
     python code from:
     https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
-    '''
+    """
     if filter_type == 'gaussian':
         return fspecial_gaussian(*args, **kwargs)
     if filter_type == 'laplacian':
@@ -226,19 +246,19 @@ def fspecial(filter_type, *args, **kwargs):
 
 
 def bicubic_degradation(x, sf=3):
-    '''
+    """
     Args:
         x: HxWxC image, [0, 1]
         sf: down-scale factor
     Return:
         bicubicly downsampled LR image
-    '''
+    """
     x = util.imresize_np(x, scale=1 / sf)
     return x
 
 
 def srmd_degradation(x, k, sf=3):
-    ''' blur + bicubic downsampling
+    """blur + bicubic downsampling
     Args:
         x: HxWxC image, [0, 1]
         k: hxw, double
@@ -253,14 +273,16 @@ def srmd_degradation(x, k, sf=3):
           pages={3262--3271},
           year={2018}
         }
-    '''
-    x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')  # 'nearest' | 'mirror'
+    """
+    x = ndimage.filters.convolve(
+        x, np.expand_dims(k, axis=2), mode='wrap'
+    )  # 'nearest' | 'mirror'
     x = bicubic_degradation(x, sf=sf)
     return x
 
 
 def dpsr_degradation(x, k, sf=3):
-    ''' bicubic downsampling + blur
+    """bicubic downsampling + blur
     Args:
         x: HxWxC image, [0, 1]
         k: hxw, double
@@ -275,21 +297,21 @@ def dpsr_degradation(x, k, sf=3):
           pages={1671--1681},
           year={2019}
         }
-    '''
+    """
     x = bicubic_degradation(x, sf=sf)
     x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
     return x
 
 
 def classical_degradation(x, k, sf=3):
-    ''' blur + downsampling
+    """blur + downsampling
     Args:
         x: HxWxC image, [0, 1]/[0, 255]
         k: hxw, double
         sf: down-scale factor
     Return:
         downsampled LR image
-    '''
+    """
     x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
     # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
     st = 0
@@ -328,10 +350,19 @@ def add_blur(img, sf=4):
     if random.random() < 0.5:
         l1 = wd2 * random.random()
         l2 = wd2 * random.random()
-        k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
+        k = anisotropic_Gaussian(
+            ksize=2 * random.randint(2, 11) + 3,
+            theta=random.random() * np.pi,
+            l1=l1,
+            l2=l2,
+        )
     else:
-        k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
-    img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
+        k = fspecial(
+            'gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()
+        )
+    img = ndimage.filters.convolve(
+        img, np.expand_dims(k, axis=2), mode='mirror'
+    )
 
     return img
 
@@ -344,7 +375,11 @@ def add_resize(img, sf=4):
         sf1 = random.uniform(0.5 / sf, 1)
     else:
         sf1 = 1.0
-    img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
+    img = cv2.resize(
+        img,
+        (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])),
+        interpolation=random.choice([1, 2, 3]),
+    )
     img = np.clip(img, 0.0, 1.0)
 
     return img
@@ -366,19 +401,26 @@ def add_resize(img, sf=4):
 #     img = np.clip(img, 0.0, 1.0)
 #     return img
 
+
 def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
     noise_level = random.randint(noise_level1, noise_level2)
     rnum = np.random.rand()
     if rnum > 0.6:  # add color Gaussian noise
-        img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
+        img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(
+            np.float32
+        )
     elif rnum < 0.4:  # add grayscale Gaussian noise
-        img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
+        img = img + np.random.normal(
+            0, noise_level / 255.0, (*img.shape[:2], 1)
+        ).astype(np.float32)
     else:  # add  noise
-        L = noise_level2 / 255.
+        L = noise_level2 / 255.0
         D = np.diag(np.random.rand(3))
         U = orth(np.random.rand(3, 3))
         conv = np.dot(np.dot(np.transpose(U), D), U)
-        img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
+        img = img + np.random.multivariate_normal(
+            [0, 0, 0], np.abs(L**2 * conv), img.shape[:2]
+        ).astype(np.float32)
     img = np.clip(img, 0.0, 1.0)
     return img
 
@@ -388,28 +430,37 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
     img = np.clip(img, 0.0, 1.0)
     rnum = random.random()
     if rnum > 0.6:
-        img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
+        img += img * np.random.normal(
+            0, noise_level / 255.0, img.shape
+        ).astype(np.float32)
     elif rnum < 0.4:
-        img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
+        img += img * np.random.normal(
+            0, noise_level / 255.0, (*img.shape[:2], 1)
+        ).astype(np.float32)
     else:
-        L = noise_level2 / 255.
+        L = noise_level2 / 255.0
         D = np.diag(np.random.rand(3))
         U = orth(np.random.rand(3, 3))
         conv = np.dot(np.dot(np.transpose(U), D), U)
-        img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
+        img += img * np.random.multivariate_normal(
+            [0, 0, 0], np.abs(L**2 * conv), img.shape[:2]
+        ).astype(np.float32)
     img = np.clip(img, 0.0, 1.0)
     return img
 
 
 def add_Poisson_noise(img):
-    img = np.clip((img * 255.0).round(), 0, 255) / 255.
+    img = np.clip((img * 255.0).round(), 0, 255) / 255.0
     vals = 10 ** (2 * random.random() + 2.0)  # [2, 4]
     if random.random() < 0.5:
         img = np.random.poisson(img * vals).astype(np.float32) / vals
     else:
         img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
-        img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
-        noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
+        img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0
+        noise_gray = (
+            np.random.poisson(img_gray * vals).astype(np.float32) / vals
+            - img_gray
+        )
         img += noise_gray[:, :, np.newaxis]
     img = np.clip(img, 0.0, 1.0)
     return img
@@ -418,7 +469,9 @@ def add_Poisson_noise(img):
 def add_JPEG_noise(img):
     quality_factor = random.randint(30, 95)
     img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
-    result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
+    result, encimg = cv2.imencode(
+        '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]
+    )
     img = cv2.imdecode(encimg, 1)
     img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
     return img
@@ -428,10 +481,14 @@ def random_crop(lq, hq, sf=4, lq_patchsize=64):
     h, w = lq.shape[:2]
     rnd_h = random.randint(0, h - lq_patchsize)
     rnd_w = random.randint(0, w - lq_patchsize)
-    lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
+    lq = lq[rnd_h : rnd_h + lq_patchsize, rnd_w : rnd_w + lq_patchsize, :]
 
     rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
-    hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
+    hq = hq[
+        rnd_h_H : rnd_h_H + lq_patchsize * sf,
+        rnd_w_H : rnd_w_H + lq_patchsize * sf,
+        :,
+    ]
     return lq, hq
 
 
@@ -452,7 +509,7 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
     sf_ori = sf
 
     h1, w1 = img.shape[:2]
-    img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
+    img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...]  # mod crop
     h, w = img.shape[:2]
 
     if h < lq_patchsize * sf or w < lq_patchsize * sf:
@@ -462,8 +519,11 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
 
     if sf == 4 and random.random() < scale2_prob:  # downsample1
         if np.random.rand() < 0.5:
-            img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
-                             interpolation=random.choice([1, 2, 3]))
+            img = cv2.resize(
+                img,
+                (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
+                interpolation=random.choice([1, 2, 3]),
+            )
         else:
             img = util.imresize_np(img, 1 / 2, True)
         img = np.clip(img, 0.0, 1.0)
@@ -472,7 +532,10 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
     shuffle_order = random.sample(range(7), 7)
     idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
     if idx1 > idx2:  # keep downsample3 last
-        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
+        shuffle_order[idx1], shuffle_order[idx2] = (
+            shuffle_order[idx2],
+            shuffle_order[idx1],
+        )
 
     for i in shuffle_order:
 
@@ -487,19 +550,30 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
             # downsample2
             if random.random() < 0.75:
                 sf1 = random.uniform(1, 2 * sf)
-                img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
-                                 interpolation=random.choice([1, 2, 3]))
+                img = cv2.resize(
+                    img,
+                    (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
+                    interpolation=random.choice([1, 2, 3]),
+                )
             else:
                 k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
                 k_shifted = shift_pixel(k, sf)
-                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
-                img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
+                k_shifted = (
+                    k_shifted / k_shifted.sum()
+                )  # blur with shifted kernel
+                img = ndimage.filters.convolve(
+                    img, np.expand_dims(k_shifted, axis=2), mode='mirror'
+                )
                 img = img[0::sf, 0::sf, ...]  # nearest downsampling
             img = np.clip(img, 0.0, 1.0)
 
         elif i == 3:
             # downsample3
-            img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
+            img = cv2.resize(
+                img,
+                (int(1 / sf * a), int(1 / sf * b)),
+                interpolation=random.choice([1, 2, 3]),
+            )
             img = np.clip(img, 0.0, 1.0)
 
         elif i == 4:
@@ -544,15 +618,18 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     sf_ori = sf
 
     h1, w1 = image.shape[:2]
-    image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
+    image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...]  # mod crop
     h, w = image.shape[:2]
 
     hq = image.copy()
 
     if sf == 4 and random.random() < scale2_prob:  # downsample1
         if np.random.rand() < 0.5:
-            image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
-                               interpolation=random.choice([1, 2, 3]))
+            image = cv2.resize(
+                image,
+                (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
+                interpolation=random.choice([1, 2, 3]),
+            )
         else:
             image = util.imresize_np(image, 1 / 2, True)
         image = np.clip(image, 0.0, 1.0)
@@ -561,7 +638,10 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     shuffle_order = random.sample(range(7), 7)
     idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
     if idx1 > idx2:  # keep downsample3 last
-        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
+        shuffle_order[idx1], shuffle_order[idx2] = (
+            shuffle_order[idx2],
+            shuffle_order[idx1],
+        )
 
     for i in shuffle_order:
 
@@ -576,19 +656,33 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
             # downsample2
             if random.random() < 0.75:
                 sf1 = random.uniform(1, 2 * sf)
-                image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
-                                   interpolation=random.choice([1, 2, 3]))
+                image = cv2.resize(
+                    image,
+                    (
+                        int(1 / sf1 * image.shape[1]),
+                        int(1 / sf1 * image.shape[0]),
+                    ),
+                    interpolation=random.choice([1, 2, 3]),
+                )
             else:
                 k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
                 k_shifted = shift_pixel(k, sf)
-                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
-                image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
+                k_shifted = (
+                    k_shifted / k_shifted.sum()
+                )  # blur with shifted kernel
+                image = ndimage.filters.convolve(
+                    image, np.expand_dims(k_shifted, axis=2), mode='mirror'
+                )
                 image = image[0::sf, 0::sf, ...]  # nearest downsampling
             image = np.clip(image, 0.0, 1.0)
 
         elif i == 3:
             # downsample3
-            image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
+            image = cv2.resize(
+                image,
+                (int(1 / sf * a), int(1 / sf * b)),
+                interpolation=random.choice([1, 2, 3]),
+            )
             image = np.clip(image, 0.0, 1.0)
 
         elif i == 4:
@@ -609,12 +703,19 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     # add final JPEG compression noise
     image = add_JPEG_noise(image)
     image = util.single2uint(image)
-    example = {"image":image}
+    example = {'image': image}
     return example
 
 
 # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
-def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
+def degradation_bsrgan_plus(
+    img,
+    sf=4,
+    shuffle_prob=0.5,
+    use_sharp=True,
+    lq_patchsize=64,
+    isp_model=None,
+):
     """
     This is an extended degradation model by combining
     the degradation models of BSRGAN and Real-ESRGAN
@@ -630,7 +731,7 @@ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patc
     """
 
     h1, w1 = img.shape[:2]
-    img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
+    img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...]  # mod crop
     h, w = img.shape[:2]
 
     if h < lq_patchsize * sf or w < lq_patchsize * sf:
@@ -645,8 +746,12 @@ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patc
     else:
         shuffle_order = list(range(13))
         # local shuffle for noise, JPEG is always the last one
-        shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
-        shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
+        shuffle_order[2:6] = random.sample(
+            shuffle_order[2:6], len(range(2, 6))
+        )
+        shuffle_order[9:13] = random.sample(
+            shuffle_order[9:13], len(range(9, 13))
+        )
 
     poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
 
@@ -689,8 +794,11 @@ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patc
             print('check the shuffle!')
 
     # resize to desired size
-    img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
-                     interpolation=random.choice([1, 2, 3]))
+    img = cv2.resize(
+        img,
+        (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
+        interpolation=random.choice([1, 2, 3]),
+    )
 
     # add final JPEG compression noise
     img = add_JPEG_noise(img)
@@ -702,29 +810,37 @@ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patc
 
 
 if __name__ == '__main__':
-	print("hey")
-	img = util.imread_uint('utils/test.png', 3)
-	print(img)
-	img = util.uint2single(img)
-	print(img)
-	img = img[:448, :448]
-	h = img.shape[0] // 4
-	print("resizing to", h)
-	sf = 4
-	deg_fn = partial(degradation_bsrgan_variant, sf=sf)
-	for i in range(20):
-		print(i)
-		img_lq = deg_fn(img)
-		print(img_lq)
-		img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
-		print(img_lq.shape)
-		print("bicubic", img_lq_bicubic.shape)
-		print(img_hq.shape)
-		lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
-		                        interpolation=0)
-		lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
-		                        interpolation=0)
-		img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
-		util.imsave(img_concat, str(i) + '.png')
-
-
+    print('hey')
+    img = util.imread_uint('utils/test.png', 3)
+    print(img)
+    img = util.uint2single(img)
+    print(img)
+    img = img[:448, :448]
+    h = img.shape[0] // 4
+    print('resizing to', h)
+    sf = 4
+    deg_fn = partial(degradation_bsrgan_variant, sf=sf)
+    for i in range(20):
+        print(i)
+        img_lq = deg_fn(img)
+        print(img_lq)
+        img_lq_bicubic = albumentations.SmallestMaxSize(
+            max_size=h, interpolation=cv2.INTER_CUBIC
+        )(image=img)['image']
+        print(img_lq.shape)
+        print('bicubic', img_lq_bicubic.shape)
+        print(img_hq.shape)
+        lq_nearest = cv2.resize(
+            util.single2uint(img_lq),
+            (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
+            interpolation=0,
+        )
+        lq_bicubic_nearest = cv2.resize(
+            util.single2uint(img_lq_bicubic),
+            (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
+            interpolation=0,
+        )
+        img_concat = np.concatenate(
+            [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1
+        )
+        util.imsave(img_concat, str(i) + '.png')
diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py
index 9e1f823996..3500ef7316 100644
--- a/ldm/modules/image_degradation/bsrgan_light.py
+++ b/ldm/modules/image_degradation/bsrgan_light.py
@@ -27,16 +27,16 @@ import ldm.modules.image_degradation.utils_image as util
 
 
 def modcrop_np(img, sf):
-    '''
+    """
     Args:
         img: numpy image, WxH or WxHxC
         sf: scale factor
     Return:
         cropped image
-    '''
+    """
     w, h = img.shape[:2]
     im = np.copy(img)
-    return im[:w - w % sf, :h - h % sf, ...]
+    return im[: w - w % sf, : h - h % sf, ...]
 
 
 """
@@ -54,7 +54,9 @@ def analytic_kernel(k):
     # Loop over the small kernel to fill the big one
     for r in range(k_size):
         for c in range(k_size):
-            big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
+            big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += (
+                k[r, c] * k
+            )
     # Crop the edges of the big kernel to ignore very small values and increase run time of SR
     crop = k_size // 2
     cropped_big_k = big_k[crop:-crop, crop:-crop]
@@ -63,7 +65,7 @@ def analytic_kernel(k):
 
 
 def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
-    """ generate an anisotropic Gaussian kernel
+    """generate an anisotropic Gaussian kernel
     Args:
         ksize : e.g., 15, kernel size
         theta : [0,  pi], rotation angle range
@@ -74,7 +76,12 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
         k     : kernel
     """
 
-    v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
+    v = np.dot(
+        np.array(
+            [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
+        ),
+        np.array([1.0, 0.0]),
+    )
     V = np.array([[v[0], v[1]], [v[1], -v[0]]])
     D = np.array([[l1, 0], [0, l2]])
     Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
@@ -126,24 +133,32 @@ def shift_pixel(x, sf, upper_left=True):
 
 
 def blur(x, k):
-    '''
+    """
     x: image, NxcxHxW
     k: kernel, Nx1xhxw
-    '''
+    """
     n, c = x.shape[:2]
     p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
     x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
     k = k.repeat(1, c, 1, 1)
     k = k.view(-1, 1, k.shape[2], k.shape[3])
     x = x.view(1, -1, x.shape[2], x.shape[3])
-    x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
+    x = torch.nn.functional.conv2d(
+        x, k, bias=None, stride=1, padding=0, groups=n * c
+    )
     x = x.view(n, c, x.shape[2], x.shape[3])
 
     return x
 
 
-def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
-    """"
+def gen_kernel(
+    k_size=np.array([15, 15]),
+    scale_factor=np.array([4, 4]),
+    min_var=0.6,
+    max_var=10.0,
+    noise_level=0,
+):
+    """ "
     # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
     # Kai Zhang
     # min_var = 0.175 * sf  # variance of the gaussian kernel will be sampled between min_var and max_var
@@ -157,13 +172,16 @@ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var
 
     # Set COV matrix using Lambdas and Theta
     LAMBDA = np.diag([lambda_1, lambda_2])
-    Q = np.array([[np.cos(theta), -np.sin(theta)],
-                  [np.sin(theta), np.cos(theta)]])
+    Q = np.array(
+        [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
+    )
     SIGMA = Q @ LAMBDA @ Q.T
     INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
 
     # Set expectation position (shifting kernel for aligned image)
-    MU = k_size // 2 - 0.5 * (scale_factor - 1)  # - 0.5 * (scale_factor - k_size % 2)
+    MU = k_size // 2 - 0.5 * (
+        scale_factor - 1
+    )  # - 0.5 * (scale_factor - k_size % 2)
     MU = MU[None, None, :, None]
 
     # Create meshgrid for Gaussian
@@ -188,7 +206,9 @@ def fspecial_gaussian(hsize, sigma):
     hsize = [hsize, hsize]
     siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
     std = sigma
-    [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
+    [x, y] = np.meshgrid(
+        np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)
+    )
     arg = -(x * x + y * y) / (2 * std * std)
     h = np.exp(arg)
     h[h < scipy.finfo(float).eps * h.max()] = 0
@@ -208,10 +228,10 @@ def fspecial_laplacian(alpha):
 
 
 def fspecial(filter_type, *args, **kwargs):
-    '''
+    """
     python code from:
     https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
-    '''
+    """
     if filter_type == 'gaussian':
         return fspecial_gaussian(*args, **kwargs)
     if filter_type == 'laplacian':
@@ -226,19 +246,19 @@ def fspecial(filter_type, *args, **kwargs):
 
 
 def bicubic_degradation(x, sf=3):
-    '''
+    """
     Args:
         x: HxWxC image, [0, 1]
         sf: down-scale factor
     Return:
         bicubicly downsampled LR image
-    '''
+    """
     x = util.imresize_np(x, scale=1 / sf)
     return x
 
 
 def srmd_degradation(x, k, sf=3):
-    ''' blur + bicubic downsampling
+    """blur + bicubic downsampling
     Args:
         x: HxWxC image, [0, 1]
         k: hxw, double
@@ -253,14 +273,16 @@ def srmd_degradation(x, k, sf=3):
           pages={3262--3271},
           year={2018}
         }
-    '''
-    x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')  # 'nearest' | 'mirror'
+    """
+    x = ndimage.filters.convolve(
+        x, np.expand_dims(k, axis=2), mode='wrap'
+    )  # 'nearest' | 'mirror'
     x = bicubic_degradation(x, sf=sf)
     return x
 
 
 def dpsr_degradation(x, k, sf=3):
-    ''' bicubic downsampling + blur
+    """bicubic downsampling + blur
     Args:
         x: HxWxC image, [0, 1]
         k: hxw, double
@@ -275,21 +297,21 @@ def dpsr_degradation(x, k, sf=3):
           pages={1671--1681},
           year={2019}
         }
-    '''
+    """
     x = bicubic_degradation(x, sf=sf)
     x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
     return x
 
 
 def classical_degradation(x, k, sf=3):
-    ''' blur + downsampling
+    """blur + downsampling
     Args:
         x: HxWxC image, [0, 1]/[0, 255]
         k: hxw, double
         sf: down-scale factor
     Return:
         downsampled LR image
-    '''
+    """
     x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
     # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
     st = 0
@@ -326,16 +348,25 @@ def add_blur(img, sf=4):
     wd2 = 4.0 + sf
     wd = 2.0 + 0.2 * sf
 
-    wd2 = wd2/4
-    wd = wd/4
+    wd2 = wd2 / 4
+    wd = wd / 4
 
     if random.random() < 0.5:
         l1 = wd2 * random.random()
         l2 = wd2 * random.random()
-        k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
+        k = anisotropic_Gaussian(
+            ksize=random.randint(2, 11) + 3,
+            theta=random.random() * np.pi,
+            l1=l1,
+            l2=l2,
+        )
     else:
-        k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
-    img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
+        k = fspecial(
+            'gaussian', random.randint(2, 4) + 3, wd * random.random()
+        )
+    img = ndimage.filters.convolve(
+        img, np.expand_dims(k, axis=2), mode='mirror'
+    )
 
     return img
 
@@ -348,7 +379,11 @@ def add_resize(img, sf=4):
         sf1 = random.uniform(0.5 / sf, 1)
     else:
         sf1 = 1.0
-    img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
+    img = cv2.resize(
+        img,
+        (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])),
+        interpolation=random.choice([1, 2, 3]),
+    )
     img = np.clip(img, 0.0, 1.0)
 
     return img
@@ -370,19 +405,26 @@ def add_resize(img, sf=4):
 #     img = np.clip(img, 0.0, 1.0)
 #     return img
 
+
 def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
     noise_level = random.randint(noise_level1, noise_level2)
     rnum = np.random.rand()
     if rnum > 0.6:  # add color Gaussian noise
-        img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
+        img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(
+            np.float32
+        )
     elif rnum < 0.4:  # add grayscale Gaussian noise
-        img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
+        img = img + np.random.normal(
+            0, noise_level / 255.0, (*img.shape[:2], 1)
+        ).astype(np.float32)
     else:  # add  noise
-        L = noise_level2 / 255.
+        L = noise_level2 / 255.0
         D = np.diag(np.random.rand(3))
         U = orth(np.random.rand(3, 3))
         conv = np.dot(np.dot(np.transpose(U), D), U)
-        img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
+        img = img + np.random.multivariate_normal(
+            [0, 0, 0], np.abs(L**2 * conv), img.shape[:2]
+        ).astype(np.float32)
     img = np.clip(img, 0.0, 1.0)
     return img
 
@@ -392,28 +434,37 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
     img = np.clip(img, 0.0, 1.0)
     rnum = random.random()
     if rnum > 0.6:
-        img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
+        img += img * np.random.normal(
+            0, noise_level / 255.0, img.shape
+        ).astype(np.float32)
     elif rnum < 0.4:
-        img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
+        img += img * np.random.normal(
+            0, noise_level / 255.0, (*img.shape[:2], 1)
+        ).astype(np.float32)
     else:
-        L = noise_level2 / 255.
+        L = noise_level2 / 255.0
         D = np.diag(np.random.rand(3))
         U = orth(np.random.rand(3, 3))
         conv = np.dot(np.dot(np.transpose(U), D), U)
-        img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
+        img += img * np.random.multivariate_normal(
+            [0, 0, 0], np.abs(L**2 * conv), img.shape[:2]
+        ).astype(np.float32)
     img = np.clip(img, 0.0, 1.0)
     return img
 
 
 def add_Poisson_noise(img):
-    img = np.clip((img * 255.0).round(), 0, 255) / 255.
+    img = np.clip((img * 255.0).round(), 0, 255) / 255.0
     vals = 10 ** (2 * random.random() + 2.0)  # [2, 4]
     if random.random() < 0.5:
         img = np.random.poisson(img * vals).astype(np.float32) / vals
     else:
         img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
-        img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
-        noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
+        img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0
+        noise_gray = (
+            np.random.poisson(img_gray * vals).astype(np.float32) / vals
+            - img_gray
+        )
         img += noise_gray[:, :, np.newaxis]
     img = np.clip(img, 0.0, 1.0)
     return img
@@ -422,7 +473,9 @@ def add_Poisson_noise(img):
 def add_JPEG_noise(img):
     quality_factor = random.randint(80, 95)
     img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
-    result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
+    result, encimg = cv2.imencode(
+        '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]
+    )
     img = cv2.imdecode(encimg, 1)
     img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
     return img
@@ -432,10 +485,14 @@ def random_crop(lq, hq, sf=4, lq_patchsize=64):
     h, w = lq.shape[:2]
     rnd_h = random.randint(0, h - lq_patchsize)
     rnd_w = random.randint(0, w - lq_patchsize)
-    lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
+    lq = lq[rnd_h : rnd_h + lq_patchsize, rnd_w : rnd_w + lq_patchsize, :]
 
     rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
-    hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
+    hq = hq[
+        rnd_h_H : rnd_h_H + lq_patchsize * sf,
+        rnd_w_H : rnd_w_H + lq_patchsize * sf,
+        :,
+    ]
     return lq, hq
 
 
@@ -456,7 +513,7 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
     sf_ori = sf
 
     h1, w1 = img.shape[:2]
-    img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
+    img = img.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...]  # mod crop
     h, w = img.shape[:2]
 
     if h < lq_patchsize * sf or w < lq_patchsize * sf:
@@ -466,8 +523,11 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
 
     if sf == 4 and random.random() < scale2_prob:  # downsample1
         if np.random.rand() < 0.5:
-            img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
-                             interpolation=random.choice([1, 2, 3]))
+            img = cv2.resize(
+                img,
+                (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
+                interpolation=random.choice([1, 2, 3]),
+            )
         else:
             img = util.imresize_np(img, 1 / 2, True)
         img = np.clip(img, 0.0, 1.0)
@@ -476,7 +536,10 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
     shuffle_order = random.sample(range(7), 7)
     idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
     if idx1 > idx2:  # keep downsample3 last
-        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
+        shuffle_order[idx1], shuffle_order[idx2] = (
+            shuffle_order[idx2],
+            shuffle_order[idx1],
+        )
 
     for i in shuffle_order:
 
@@ -491,19 +554,30 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
             # downsample2
             if random.random() < 0.75:
                 sf1 = random.uniform(1, 2 * sf)
-                img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
-                                 interpolation=random.choice([1, 2, 3]))
+                img = cv2.resize(
+                    img,
+                    (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
+                    interpolation=random.choice([1, 2, 3]),
+                )
             else:
                 k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
                 k_shifted = shift_pixel(k, sf)
-                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
-                img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
+                k_shifted = (
+                    k_shifted / k_shifted.sum()
+                )  # blur with shifted kernel
+                img = ndimage.filters.convolve(
+                    img, np.expand_dims(k_shifted, axis=2), mode='mirror'
+                )
                 img = img[0::sf, 0::sf, ...]  # nearest downsampling
             img = np.clip(img, 0.0, 1.0)
 
         elif i == 3:
             # downsample3
-            img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
+            img = cv2.resize(
+                img,
+                (int(1 / sf * a), int(1 / sf * b)),
+                interpolation=random.choice([1, 2, 3]),
+            )
             img = np.clip(img, 0.0, 1.0)
 
         elif i == 4:
@@ -548,15 +622,18 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     sf_ori = sf
 
     h1, w1 = image.shape[:2]
-    image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...]  # mod crop
+    image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...]  # mod crop
     h, w = image.shape[:2]
 
     hq = image.copy()
 
     if sf == 4 and random.random() < scale2_prob:  # downsample1
         if np.random.rand() < 0.5:
-            image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
-                               interpolation=random.choice([1, 2, 3]))
+            image = cv2.resize(
+                image,
+                (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
+                interpolation=random.choice([1, 2, 3]),
+            )
         else:
             image = util.imresize_np(image, 1 / 2, True)
         image = np.clip(image, 0.0, 1.0)
@@ -565,7 +642,10 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     shuffle_order = random.sample(range(7), 7)
     idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
     if idx1 > idx2:  # keep downsample3 last
-        shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
+        shuffle_order[idx1], shuffle_order[idx2] = (
+            shuffle_order[idx2],
+            shuffle_order[idx1],
+        )
 
     for i in shuffle_order:
 
@@ -583,20 +663,34 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
             # downsample2
             if random.random() < 0.8:
                 sf1 = random.uniform(1, 2 * sf)
-                image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
-                                   interpolation=random.choice([1, 2, 3]))
+                image = cv2.resize(
+                    image,
+                    (
+                        int(1 / sf1 * image.shape[1]),
+                        int(1 / sf1 * image.shape[0]),
+                    ),
+                    interpolation=random.choice([1, 2, 3]),
+                )
             else:
                 k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
                 k_shifted = shift_pixel(k, sf)
-                k_shifted = k_shifted / k_shifted.sum()  # blur with shifted kernel
-                image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
+                k_shifted = (
+                    k_shifted / k_shifted.sum()
+                )  # blur with shifted kernel
+                image = ndimage.filters.convolve(
+                    image, np.expand_dims(k_shifted, axis=2), mode='mirror'
+                )
                 image = image[0::sf, 0::sf, ...]  # nearest downsampling
 
             image = np.clip(image, 0.0, 1.0)
 
         elif i == 3:
             # downsample3
-            image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
+            image = cv2.resize(
+                image,
+                (int(1 / sf * a), int(1 / sf * b)),
+                interpolation=random.choice([1, 2, 3]),
+            )
             image = np.clip(image, 0.0, 1.0)
 
         elif i == 4:
@@ -617,34 +711,41 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
     # add final JPEG compression noise
     image = add_JPEG_noise(image)
     image = util.single2uint(image)
-    example = {"image": image}
+    example = {'image': image}
     return example
 
 
-
-
 if __name__ == '__main__':
-    print("hey")
+    print('hey')
     img = util.imread_uint('utils/test.png', 3)
     img = img[:448, :448]
     h = img.shape[0] // 4
-    print("resizing to", h)
+    print('resizing to', h)
     sf = 4
     deg_fn = partial(degradation_bsrgan_variant, sf=sf)
     for i in range(20):
         print(i)
         img_hq = img
-        img_lq = deg_fn(img)["image"]
+        img_lq = deg_fn(img)['image']
         img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
         print(img_lq)
-        img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
+        img_lq_bicubic = albumentations.SmallestMaxSize(
+            max_size=h, interpolation=cv2.INTER_CUBIC
+        )(image=img_hq)['image']
         print(img_lq.shape)
-        print("bicubic", img_lq_bicubic.shape)
+        print('bicubic', img_lq_bicubic.shape)
         print(img_hq.shape)
-        lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
-                                interpolation=0)
-        lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
-                                        (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
-                                        interpolation=0)
-        img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
+        lq_nearest = cv2.resize(
+            util.single2uint(img_lq),
+            (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
+            interpolation=0,
+        )
+        lq_bicubic_nearest = cv2.resize(
+            util.single2uint(img_lq_bicubic),
+            (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
+            interpolation=0,
+        )
+        img_concat = np.concatenate(
+            [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1
+        )
         util.imsave(img_concat, str(i) + '.png')
diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py
index 0175f155ad..4b6e64658a 100644
--- a/ldm/modules/image_degradation/utils_image.py
+++ b/ldm/modules/image_degradation/utils_image.py
@@ -6,13 +6,14 @@ import torch
 import cv2
 from torchvision.utils import make_grid
 from datetime import datetime
-#import matplotlib.pyplot as plt   # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
+
+# import matplotlib.pyplot as plt   # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
 
 
-os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
+os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
 
 
-'''
+"""
 # --------------------------------------------
 # Kai Zhang (github: https://github.com/cszn)
 # 03/Mar/2019
@@ -20,10 +21,22 @@ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
 # https://github.com/twhui/SRGAN-pyTorch
 # https://github.com/xinntao/BasicSR
 # --------------------------------------------
-'''
+"""
 
 
-IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
+IMG_EXTENSIONS = [
+    '.jpg',
+    '.JPG',
+    '.jpeg',
+    '.JPEG',
+    '.png',
+    '.PNG',
+    '.ppm',
+    '.PPM',
+    '.bmp',
+    '.BMP',
+    '.tif',
+]
 
 
 def is_image_file(filename):
@@ -49,19 +62,19 @@ def surf(Z, cmap='rainbow', figsize=None):
     ax3 = plt.axes(projection='3d')
 
     w, h = Z.shape[:2]
-    xx = np.arange(0,w,1)
-    yy = np.arange(0,h,1)
+    xx = np.arange(0, w, 1)
+    yy = np.arange(0, h, 1)
     X, Y = np.meshgrid(xx, yy)
-    ax3.plot_surface(X,Y,Z,cmap=cmap)
-    #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
+    ax3.plot_surface(X, Y, Z, cmap=cmap)
+    # ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
     plt.show()
 
 
-'''
+"""
 # --------------------------------------------
 # get image pathes
 # --------------------------------------------
-'''
+"""
 
 
 def get_image_paths(dataroot):
@@ -83,26 +96,26 @@ def _get_paths_from_images(path):
     return images
 
 
-'''
+"""
 # --------------------------------------------
 # split large images into small images 
 # --------------------------------------------
-'''
+"""
 
 
 def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
     w, h = img.shape[:2]
     patches = []
     if w > p_max and h > p_max:
-        w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
-        h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
-        w1.append(w-p_size)
-        h1.append(h-p_size)
-#        print(w1)
-#        print(h1)
+        w1 = list(np.arange(0, w - p_size, p_size - p_overlap, dtype=np.int))
+        h1 = list(np.arange(0, h - p_size, p_size - p_overlap, dtype=np.int))
+        w1.append(w - p_size)
+        h1.append(h - p_size)
+        #        print(w1)
+        #        print(h1)
         for i in w1:
             for j in h1:
-                patches.append(img[i:i+p_size, j:j+p_size,:])
+                patches.append(img[i : i + p_size, j : j + p_size, :])
     else:
         patches.append(img)
 
@@ -118,11 +131,21 @@ def imssave(imgs, img_path):
     for i, img in enumerate(imgs):
         if img.ndim == 3:
             img = img[:, :, [2, 1, 0]]
-        new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
+        new_path = os.path.join(
+            os.path.dirname(img_path),
+            img_name + str('_s{:04d}'.format(i)) + '.png',
+        )
         cv2.imwrite(new_path, img)
 
 
-def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
+def split_imageset(
+    original_dataroot,
+    taget_dataroot,
+    n_channels=3,
+    p_size=800,
+    p_overlap=96,
+    p_max=1000,
+):
     """
     split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
     and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
@@ -139,15 +162,18 @@ def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800,
         # img_name, ext = os.path.splitext(os.path.basename(img_path))
         img = imread_uint(img_path, n_channels=n_channels)
         patches = patches_from_image(img, p_size, p_overlap, p_max)
-        imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
-        #if original_dataroot == taget_dataroot:
-        #del img_path
+        imssave(
+            patches, os.path.join(taget_dataroot, os.path.basename(img_path))
+        )
+        # if original_dataroot == taget_dataroot:
+        # del img_path
 
-'''
+
+"""
 # --------------------------------------------
 # makedir
 # --------------------------------------------
-'''
+"""
 
 
 def mkdir(path):
@@ -171,12 +197,12 @@ def mkdir_and_rename(path):
     os.makedirs(path)
 
 
-'''
+"""
 # --------------------------------------------
 # read image from path
 # opencv is fast, but read BGR numpy image
 # --------------------------------------------
-'''
+"""
 
 
 # --------------------------------------------
@@ -206,6 +232,7 @@ def imsave(img, img_path):
         img = img[:, :, [2, 1, 0]]
     cv2.imwrite(img_path, img)
 
+
 def imwrite(img, img_path):
     img = np.squeeze(img)
     if img.ndim == 3:
@@ -213,7 +240,6 @@ def imwrite(img, img_path):
     cv2.imwrite(img_path, img)
 
 
-
 # --------------------------------------------
 # get single image of size HxWxn_channles (BGR)
 # --------------------------------------------
@@ -221,7 +247,7 @@ def read_img(path):
     # read image by cv2
     # return: Numpy float32, HWC, BGR, [0,1]
     img = cv2.imread(path, cv2.IMREAD_UNCHANGED)  # cv2.IMREAD_GRAYSCALE
-    img = img.astype(np.float32) / 255.
+    img = img.astype(np.float32) / 255.0
     if img.ndim == 2:
         img = np.expand_dims(img, axis=2)
     # some images have 4 channels
@@ -230,7 +256,7 @@ def read_img(path):
     return img
 
 
-'''
+"""
 # --------------------------------------------
 # image format conversion
 # --------------------------------------------
@@ -238,7 +264,7 @@ def read_img(path):
 # numpy(single) <--->  tensor
 # numpy(unit)   <--->  tensor
 # --------------------------------------------
-'''
+"""
 
 
 # --------------------------------------------
@@ -248,22 +274,22 @@ def read_img(path):
 
 def uint2single(img):
 
-    return np.float32(img/255.)
+    return np.float32(img / 255.0)
 
 
 def single2uint(img):
 
-    return np.uint8((img.clip(0, 1)*255.).round())
+    return np.uint8((img.clip(0, 1) * 255.0).round())
 
 
 def uint162single(img):
 
-    return np.float32(img/65535.)
+    return np.float32(img / 65535.0)
 
 
 def single2uint16(img):
 
-    return np.uint16((img.clip(0, 1)*65535.).round())
+    return np.uint16((img.clip(0, 1) * 65535.0).round())
 
 
 # --------------------------------------------
@@ -275,14 +301,25 @@ def single2uint16(img):
 def uint2tensor4(img):
     if img.ndim == 2:
         img = np.expand_dims(img, axis=2)
-    return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
+    return (
+        torch.from_numpy(np.ascontiguousarray(img))
+        .permute(2, 0, 1)
+        .float()
+        .div(255.0)
+        .unsqueeze(0)
+    )
 
 
 # convert uint to 3-dimensional torch tensor
 def uint2tensor3(img):
     if img.ndim == 2:
         img = np.expand_dims(img, axis=2)
-    return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
+    return (
+        torch.from_numpy(np.ascontiguousarray(img))
+        .permute(2, 0, 1)
+        .float()
+        .div(255.0)
+    )
 
 
 # convert 2/3/4-dimensional torch tensor to uint
@@ -290,7 +327,7 @@ def tensor2uint(img):
     img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
     if img.ndim == 3:
         img = np.transpose(img, (1, 2, 0))
-    return np.uint8((img*255.0).round())
+    return np.uint8((img * 255.0).round())
 
 
 # --------------------------------------------
@@ -305,7 +342,12 @@ def single2tensor3(img):
 
 # convert single (HxWxC) to 4-dimensional torch tensor
 def single2tensor4(img):
-    return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
+    return (
+        torch.from_numpy(np.ascontiguousarray(img))
+        .permute(2, 0, 1)
+        .float()
+        .unsqueeze(0)
+    )
 
 
 # convert torch tensor to single
@@ -316,6 +358,7 @@ def tensor2single(img):
 
     return img
 
+
 # convert torch tensor to single
 def tensor2single3(img):
     img = img.data.squeeze().float().cpu().numpy()
@@ -327,30 +370,48 @@ def tensor2single3(img):
 
 
 def single2tensor5(img):
-    return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
+    return (
+        torch.from_numpy(np.ascontiguousarray(img))
+        .permute(2, 0, 1, 3)
+        .float()
+        .unsqueeze(0)
+    )
 
 
 def single32tensor5(img):
-    return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
+    return (
+        torch.from_numpy(np.ascontiguousarray(img))
+        .float()
+        .unsqueeze(0)
+        .unsqueeze(0)
+    )
 
 
 def single42tensor4(img):
-    return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
+    return (
+        torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
+    )
 
 
 # from skimage.io import imread, imsave
 def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
-    '''
+    """
     Converts a torch Tensor into an image Numpy array of BGR channel order
     Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
     Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
-    '''
-    tensor = tensor.squeeze().float().cpu().clamp_(*min_max)  # squeeze first, then clamp
-    tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])  # to range [0,1]
+    """
+    tensor = (
+        tensor.squeeze().float().cpu().clamp_(*min_max)
+    )  # squeeze first, then clamp
+    tensor = (tensor - min_max[0]) / (
+        min_max[1] - min_max[0]
+    )  # to range [0,1]
     n_dim = tensor.dim()
     if n_dim == 4:
         n_img = len(tensor)
-        img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
+        img_np = make_grid(
+            tensor, nrow=int(math.sqrt(n_img)), normalize=False
+        ).numpy()
         img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))  # HWC, BGR
     elif n_dim == 3:
         img_np = tensor.numpy()
@@ -359,14 +420,17 @@ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
         img_np = tensor.numpy()
     else:
         raise TypeError(
-            'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
+            'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(
+                n_dim
+            )
+        )
     if out_type == np.uint8:
         img_np = (img_np * 255.0).round()
         # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
     return img_np.astype(out_type)
 
 
-'''
+"""
 # --------------------------------------------
 # Augmentation, flipe and/or rotate
 # --------------------------------------------
@@ -374,12 +438,11 @@ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
 # (1) augmet_img: numpy image of WxHxC or WxH
 # (2) augment_img_tensor4: tensor image 1xCxWxH
 # --------------------------------------------
-'''
+"""
 
 
 def augment_img(img, mode=0):
-    '''Kai Zhang (github: https://github.com/cszn)
-    '''
+    """Kai Zhang (github: https://github.com/cszn)"""
     if mode == 0:
         return img
     elif mode == 1:
@@ -399,8 +462,7 @@ def augment_img(img, mode=0):
 
 
 def augment_img_tensor4(img, mode=0):
-    '''Kai Zhang (github: https://github.com/cszn)
-    '''
+    """Kai Zhang (github: https://github.com/cszn)"""
     if mode == 0:
         return img
     elif mode == 1:
@@ -420,8 +482,7 @@ def augment_img_tensor4(img, mode=0):
 
 
 def augment_img_tensor(img, mode=0):
-    '''Kai Zhang (github: https://github.com/cszn)
-    '''
+    """Kai Zhang (github: https://github.com/cszn)"""
     img_size = img.size()
     img_np = img.data.cpu().numpy()
     if len(img_size) == 3:
@@ -484,11 +545,11 @@ def augment_imgs(img_list, hflip=True, rot=True):
     return [_augment(img) for img in img_list]
 
 
-'''
+"""
 # --------------------------------------------
 # modcrop and shave
 # --------------------------------------------
-'''
+"""
 
 
 def modcrop(img_in, scale):
@@ -497,11 +558,11 @@ def modcrop(img_in, scale):
     if img.ndim == 2:
         H, W = img.shape
         H_r, W_r = H % scale, W % scale
-        img = img[:H - H_r, :W - W_r]
+        img = img[: H - H_r, : W - W_r]
     elif img.ndim == 3:
         H, W, C = img.shape
         H_r, W_r = H % scale, W % scale
-        img = img[:H - H_r, :W - W_r, :]
+        img = img[: H - H_r, : W - W_r, :]
     else:
         raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
     return img
@@ -511,11 +572,11 @@ def shave(img_in, border=0):
     # img_in: Numpy, HWC or HW
     img = np.copy(img_in)
     h, w = img.shape[:2]
-    img = img[border:h-border, border:w-border]
+    img = img[border : h - border, border : w - border]
     return img
 
 
-'''
+"""
 # --------------------------------------------
 # image processing process on numpy image
 # channel_convert(in_c, tar_type, img_list):
@@ -523,74 +584,92 @@ def shave(img_in, border=0):
 # bgr2ycbcr(img, only_y=True):
 # ycbcr2rgb(img):
 # --------------------------------------------
-'''
+"""
 
 
 def rgb2ycbcr(img, only_y=True):
-    '''same as matlab rgb2ycbcr
+    """same as matlab rgb2ycbcr
     only_y: only return Y channel
     Input:
         uint8, [0, 255]
         float, [0, 1]
-    '''
+    """
     in_img_type = img.dtype
     img.astype(np.float32)
     if in_img_type != np.uint8:
-        img *= 255.
+        img *= 255.0
     # convert
     if only_y:
         rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
     else:
-        rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
-                              [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
+        rlt = np.matmul(
+            img,
+            [
+                [65.481, -37.797, 112.0],
+                [128.553, -74.203, -93.786],
+                [24.966, 112.0, -18.214],
+            ],
+        ) / 255.0 + [16, 128, 128]
     if in_img_type == np.uint8:
         rlt = rlt.round()
     else:
-        rlt /= 255.
+        rlt /= 255.0
     return rlt.astype(in_img_type)
 
 
 def ycbcr2rgb(img):
-    '''same as matlab ycbcr2rgb
+    """same as matlab ycbcr2rgb
     Input:
         uint8, [0, 255]
         float, [0, 1]
-    '''
+    """
     in_img_type = img.dtype
     img.astype(np.float32)
     if in_img_type != np.uint8:
-        img *= 255.
+        img *= 255.0
     # convert
-    rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
-                          [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
+    rlt = np.matmul(
+        img,
+        [
+            [0.00456621, 0.00456621, 0.00456621],
+            [0, -0.00153632, 0.00791071],
+            [0.00625893, -0.00318811, 0],
+        ],
+    ) * 255.0 + [-222.921, 135.576, -276.836]
     if in_img_type == np.uint8:
         rlt = rlt.round()
     else:
-        rlt /= 255.
+        rlt /= 255.0
     return rlt.astype(in_img_type)
 
 
 def bgr2ycbcr(img, only_y=True):
-    '''bgr version of rgb2ycbcr
+    """bgr version of rgb2ycbcr
     only_y: only return Y channel
     Input:
         uint8, [0, 255]
         float, [0, 1]
-    '''
+    """
     in_img_type = img.dtype
     img.astype(np.float32)
     if in_img_type != np.uint8:
-        img *= 255.
+        img *= 255.0
     # convert
     if only_y:
         rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
     else:
-        rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
-                              [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
+        rlt = np.matmul(
+            img,
+            [
+                [24.966, 112.0, -18.214],
+                [128.553, -74.203, -93.786],
+                [65.481, -37.797, 112.0],
+            ],
+        ) / 255.0 + [16, 128, 128]
     if in_img_type == np.uint8:
         rlt = rlt.round()
     else:
-        rlt /= 255.
+        rlt /= 255.0
     return rlt.astype(in_img_type)
 
 
@@ -608,11 +687,11 @@ def channel_convert(in_c, tar_type, img_list):
         return img_list
 
 
-'''
+"""
 # --------------------------------------------
 # metric, PSNR and SSIM
 # --------------------------------------------
-'''
+"""
 
 
 # --------------------------------------------
@@ -620,17 +699,17 @@ def channel_convert(in_c, tar_type, img_list):
 # --------------------------------------------
 def calculate_psnr(img1, img2, border=0):
     # img1 and img2 have range [0, 255]
-    #img1 = img1.squeeze()
-    #img2 = img2.squeeze()
+    # img1 = img1.squeeze()
+    # img2 = img2.squeeze()
     if not img1.shape == img2.shape:
         raise ValueError('Input images must have the same dimensions.')
     h, w = img1.shape[:2]
-    img1 = img1[border:h-border, border:w-border]
-    img2 = img2[border:h-border, border:w-border]
+    img1 = img1[border : h - border, border : w - border]
+    img2 = img2[border : h - border, border : w - border]
 
     img1 = img1.astype(np.float64)
     img2 = img2.astype(np.float64)
-    mse = np.mean((img1 - img2)**2)
+    mse = np.mean((img1 - img2) ** 2)
     if mse == 0:
         return float('inf')
     return 20 * math.log10(255.0 / math.sqrt(mse))
@@ -640,17 +719,17 @@ def calculate_psnr(img1, img2, border=0):
 # SSIM
 # --------------------------------------------
 def calculate_ssim(img1, img2, border=0):
-    '''calculate SSIM
+    """calculate SSIM
     the same outputs as MATLAB's
     img1, img2: [0, 255]
-    '''
-    #img1 = img1.squeeze()
-    #img2 = img2.squeeze()
+    """
+    # img1 = img1.squeeze()
+    # img2 = img2.squeeze()
     if not img1.shape == img2.shape:
         raise ValueError('Input images must have the same dimensions.')
     h, w = img1.shape[:2]
-    img1 = img1[border:h-border, border:w-border]
-    img2 = img2[border:h-border, border:w-border]
+    img1 = img1[border : h - border, border : w - border]
+    img2 = img2[border : h - border, border : w - border]
 
     if img1.ndim == 2:
         return ssim(img1, img2)
@@ -658,7 +737,7 @@ def calculate_ssim(img1, img2, border=0):
         if img1.shape[2] == 3:
             ssims = []
             for i in range(3):
-                ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
+                ssims.append(ssim(img1[:, :, i], img2[:, :, i]))
             return np.array(ssims).mean()
         elif img1.shape[2] == 1:
             return ssim(np.squeeze(img1), np.squeeze(img2))
@@ -667,8 +746,8 @@ def calculate_ssim(img1, img2, border=0):
 
 
 def ssim(img1, img2):
-    C1 = (0.01 * 255)**2
-    C2 = (0.03 * 255)**2
+    C1 = (0.01 * 255) ** 2
+    C2 = (0.03 * 255) ** 2
 
     img1 = img1.astype(np.float64)
     img2 = img2.astype(np.float64)
@@ -684,16 +763,17 @@ def ssim(img1, img2):
     sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
     sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
 
-    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
-                                                            (sigma1_sq + sigma2_sq + C2))
+    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
+        (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
+    )
     return ssim_map.mean()
 
 
-'''
+"""
 # --------------------------------------------
 # matlab's bicubic imresize (numpy and torch) [0, 1]
 # --------------------------------------------
-'''
+"""
 
 
 # matlab 'imresize' function, now only support 'bicubic'
@@ -701,11 +781,14 @@ def cubic(x):
     absx = torch.abs(x)
     absx2 = absx**2
     absx3 = absx**3
-    return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
-        (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
+    return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + (
+        -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
+    ) * (((absx > 1) * (absx <= 2)).type_as(absx))
 
 
-def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
+def calculate_weights_indices(
+    in_length, out_length, scale, kernel, kernel_width, antialiasing
+):
     if (scale < 1) and (antialiasing):
         # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
         kernel_width = kernel_width / scale
@@ -729,8 +812,9 @@ def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width
 
     # The indices of the input pixels involved in computing the k-th output
     # pixel are in row k of the indices matrix.
-    indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
-        1, P).expand(out_length, P)
+    indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(
+        0, P - 1, P
+    ).view(1, P).expand(out_length, P)
 
     # The weights used to compute the k-th output pixel are in row k of the
     # weights matrix.
@@ -771,7 +855,11 @@ def imresize(img, scale, antialiasing=True):
     if need_squeeze:
         img.unsqueeze_(0)
     in_C, in_H, in_W = img.size()
-    out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
+    out_C, out_H, out_W = (
+        in_C,
+        math.ceil(in_H * scale),
+        math.ceil(in_W * scale),
+    )
     kernel_width = 4
     kernel = 'cubic'
 
@@ -782,9 +870,11 @@ def imresize(img, scale, antialiasing=True):
 
     # get weights and indices
     weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
-        in_H, out_H, scale, kernel, kernel_width, antialiasing)
+        in_H, out_H, scale, kernel, kernel_width, antialiasing
+    )
     weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
-        in_W, out_W, scale, kernel, kernel_width, antialiasing)
+        in_W, out_W, scale, kernel, kernel_width, antialiasing
+    )
     # process H dimension
     # symmetric copying
     img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
@@ -805,7 +895,11 @@ def imresize(img, scale, antialiasing=True):
     for i in range(out_H):
         idx = int(indices_H[i][0])
         for j in range(out_C):
-            out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
+            out_1[j, i, :] = (
+                img_aug[j, idx : idx + kernel_width, :]
+                .transpose(0, 1)
+                .mv(weights_H[i])
+            )
 
     # process W dimension
     # symmetric copying
@@ -827,7 +921,9 @@ def imresize(img, scale, antialiasing=True):
     for i in range(out_W):
         idx = int(indices_W[i][0])
         for j in range(out_C):
-            out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
+            out_2[j, :, i] = out_1_aug[j, :, idx : idx + kernel_width].mv(
+                weights_W[i]
+            )
     if need_squeeze:
         out_2.squeeze_()
     return out_2
@@ -846,7 +942,11 @@ def imresize_np(img, scale, antialiasing=True):
         img.unsqueeze_(2)
 
     in_H, in_W, in_C = img.size()
-    out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
+    out_C, out_H, out_W = (
+        in_C,
+        math.ceil(in_H * scale),
+        math.ceil(in_W * scale),
+    )
     kernel_width = 4
     kernel = 'cubic'
 
@@ -857,9 +957,11 @@ def imresize_np(img, scale, antialiasing=True):
 
     # get weights and indices
     weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
-        in_H, out_H, scale, kernel, kernel_width, antialiasing)
+        in_H, out_H, scale, kernel, kernel_width, antialiasing
+    )
     weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
-        in_W, out_W, scale, kernel, kernel_width, antialiasing)
+        in_W, out_W, scale, kernel, kernel_width, antialiasing
+    )
     # process H dimension
     # symmetric copying
     img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
@@ -880,7 +982,11 @@ def imresize_np(img, scale, antialiasing=True):
     for i in range(out_H):
         idx = int(indices_H[i][0])
         for j in range(out_C):
-            out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
+            out_1[i, :, j] = (
+                img_aug[idx : idx + kernel_width, :, j]
+                .transpose(0, 1)
+                .mv(weights_H[i])
+            )
 
     # process W dimension
     # symmetric copying
@@ -902,7 +1008,9 @@ def imresize_np(img, scale, antialiasing=True):
     for i in range(out_W):
         idx = int(indices_W[i][0])
         for j in range(out_C):
-            out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
+            out_2[:, i, j] = out_1_aug[:, idx : idx + kernel_width, j].mv(
+                weights_W[i]
+            )
     if need_squeeze:
         out_2.squeeze_()
 
@@ -913,4 +1021,4 @@ if __name__ == '__main__':
     print('---')
 #    img = imread_uint('test.bmp', 3)
 #    img = uint2single(img)
-#    img_bicubic = imresize_np(img, 1/4)
\ No newline at end of file
+#    img_bicubic = imresize_np(img, 1/4)
diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py
index 876d7c5bd6..d86294210c 100644
--- a/ldm/modules/losses/__init__.py
+++ b/ldm/modules/losses/__init__.py
@@ -1 +1 @@
-from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator
\ No newline at end of file
+from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator
diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py
index 672c1e32a1..7fa4124346 100644
--- a/ldm/modules/losses/contperceptual.py
+++ b/ldm/modules/losses/contperceptual.py
@@ -5,13 +5,24 @@ from taming.modules.losses.vqperceptual import *  # TODO: taming dependency yes/
 
 
 class LPIPSWithDiscriminator(nn.Module):
-    def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
-                 disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
-                 perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
-                 disc_loss="hinge"):
+    def __init__(
+        self,
+        disc_start,
+        logvar_init=0.0,
+        kl_weight=1.0,
+        pixelloss_weight=1.0,
+        disc_num_layers=3,
+        disc_in_channels=3,
+        disc_factor=1.0,
+        disc_weight=1.0,
+        perceptual_weight=1.0,
+        use_actnorm=False,
+        disc_conditional=False,
+        disc_loss='hinge',
+    ):
 
         super().__init__()
-        assert disc_loss in ["hinge", "vanilla"]
+        assert disc_loss in ['hinge', 'vanilla']
         self.kl_weight = kl_weight
         self.pixel_weight = pixelloss_weight
         self.perceptual_loss = LPIPS().eval()
@@ -19,42 +30,68 @@ class LPIPSWithDiscriminator(nn.Module):
         # output log variance
         self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
 
-        self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
-                                                 n_layers=disc_num_layers,
-                                                 use_actnorm=use_actnorm
-                                                 ).apply(weights_init)
+        self.discriminator = NLayerDiscriminator(
+            input_nc=disc_in_channels,
+            n_layers=disc_num_layers,
+            use_actnorm=use_actnorm,
+        ).apply(weights_init)
         self.discriminator_iter_start = disc_start
-        self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
+        self.disc_loss = (
+            hinge_d_loss if disc_loss == 'hinge' else vanilla_d_loss
+        )
         self.disc_factor = disc_factor
         self.discriminator_weight = disc_weight
         self.disc_conditional = disc_conditional
 
     def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
         if last_layer is not None:
-            nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
-            g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
+            nll_grads = torch.autograd.grad(
+                nll_loss, last_layer, retain_graph=True
+            )[0]
+            g_grads = torch.autograd.grad(
+                g_loss, last_layer, retain_graph=True
+            )[0]
         else:
-            nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
-            g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
+            nll_grads = torch.autograd.grad(
+                nll_loss, self.last_layer[0], retain_graph=True
+            )[0]
+            g_grads = torch.autograd.grad(
+                g_loss, self.last_layer[0], retain_graph=True
+            )[0]
 
         d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
         d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
         d_weight = d_weight * self.discriminator_weight
         return d_weight
 
-    def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
-                global_step, last_layer=None, cond=None, split="train",
-                weights=None):
-        rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
+    def forward(
+        self,
+        inputs,
+        reconstructions,
+        posteriors,
+        optimizer_idx,
+        global_step,
+        last_layer=None,
+        cond=None,
+        split='train',
+        weights=None,
+    ):
+        rec_loss = torch.abs(
+            inputs.contiguous() - reconstructions.contiguous()
+        )
         if self.perceptual_weight > 0:
-            p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
+            p_loss = self.perceptual_loss(
+                inputs.contiguous(), reconstructions.contiguous()
+            )
             rec_loss = rec_loss + self.perceptual_weight * p_loss
 
         nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
         weighted_nll_loss = nll_loss
         if weights is not None:
-            weighted_nll_loss = weights*nll_loss
-        weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
+            weighted_nll_loss = weights * nll_loss
+        weighted_nll_loss = (
+            torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
+        )
         nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
         kl_loss = posteriors.kl()
         kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
@@ -67,45 +104,72 @@ class LPIPSWithDiscriminator(nn.Module):
                 logits_fake = self.discriminator(reconstructions.contiguous())
             else:
                 assert self.disc_conditional
-                logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
+                logits_fake = self.discriminator(
+                    torch.cat((reconstructions.contiguous(), cond), dim=1)
+                )
             g_loss = -torch.mean(logits_fake)
 
             if self.disc_factor > 0.0:
                 try:
-                    d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
+                    d_weight = self.calculate_adaptive_weight(
+                        nll_loss, g_loss, last_layer=last_layer
+                    )
                 except RuntimeError:
                     assert not self.training
                     d_weight = torch.tensor(0.0)
             else:
                 d_weight = torch.tensor(0.0)
 
-            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
-            loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
+            disc_factor = adopt_weight(
+                self.disc_factor,
+                global_step,
+                threshold=self.discriminator_iter_start,
+            )
+            loss = (
+                weighted_nll_loss
+                + self.kl_weight * kl_loss
+                + d_weight * disc_factor * g_loss
+            )
 
-            log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(),
-                   "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(),
-                   "{}/rec_loss".format(split): rec_loss.detach().mean(),
-                   "{}/d_weight".format(split): d_weight.detach(),
-                   "{}/disc_factor".format(split): torch.tensor(disc_factor),
-                   "{}/g_loss".format(split): g_loss.detach().mean(),
-                   }
+            log = {
+                '{}/total_loss'.format(split): loss.clone().detach().mean(),
+                '{}/logvar'.format(split): self.logvar.detach(),
+                '{}/kl_loss'.format(split): kl_loss.detach().mean(),
+                '{}/nll_loss'.format(split): nll_loss.detach().mean(),
+                '{}/rec_loss'.format(split): rec_loss.detach().mean(),
+                '{}/d_weight'.format(split): d_weight.detach(),
+                '{}/disc_factor'.format(split): torch.tensor(disc_factor),
+                '{}/g_loss'.format(split): g_loss.detach().mean(),
+            }
             return loss, log
 
         if optimizer_idx == 1:
             # second pass for discriminator update
             if cond is None:
                 logits_real = self.discriminator(inputs.contiguous().detach())
-                logits_fake = self.discriminator(reconstructions.contiguous().detach())
+                logits_fake = self.discriminator(
+                    reconstructions.contiguous().detach()
+                )
             else:
-                logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
-                logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
+                logits_real = self.discriminator(
+                    torch.cat((inputs.contiguous().detach(), cond), dim=1)
+                )
+                logits_fake = self.discriminator(
+                    torch.cat(
+                        (reconstructions.contiguous().detach(), cond), dim=1
+                    )
+                )
 
-            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
+            disc_factor = adopt_weight(
+                self.disc_factor,
+                global_step,
+                threshold=self.discriminator_iter_start,
+            )
             d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
 
-            log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
-                   "{}/logits_real".format(split): logits_real.detach().mean(),
-                   "{}/logits_fake".format(split): logits_fake.detach().mean()
-                   }
+            log = {
+                '{}/disc_loss'.format(split): d_loss.clone().detach().mean(),
+                '{}/logits_real'.format(split): logits_real.detach().mean(),
+                '{}/logits_fake'.format(split): logits_fake.detach().mean(),
+            }
             return d_loss, log
-
diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py
index f69981769e..2f94bf5281 100644
--- a/ldm/modules/losses/vqperceptual.py
+++ b/ldm/modules/losses/vqperceptual.py
@@ -3,21 +3,25 @@ from torch import nn
 import torch.nn.functional as F
 from einops import repeat
 
-from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
+from taming.modules.discriminator.model import (
+    NLayerDiscriminator,
+    weights_init,
+)
 from taming.modules.losses.lpips import LPIPS
 from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss
 
 
 def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
     assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0]
-    loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3])
-    loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3])
+    loss_real = torch.mean(F.relu(1.0 - logits_real), dim=[1, 2, 3])
+    loss_fake = torch.mean(F.relu(1.0 + logits_fake), dim=[1, 2, 3])
     loss_real = (weights * loss_real).sum() / weights.sum()
     loss_fake = (weights * loss_fake).sum() / weights.sum()
     d_loss = 0.5 * (loss_real + loss_fake)
     return d_loss
 
-def adopt_weight(weight, global_step, threshold=0, value=0.):
+
+def adopt_weight(weight, global_step, threshold=0, value=0.0):
     if global_step < threshold:
         weight = value
     return weight
@@ -26,57 +30,76 @@ def adopt_weight(weight, global_step, threshold=0, value=0.):
 def measure_perplexity(predicted_indices, n_embed):
     # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
     # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
-    encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed)
+    encodings = (
+        F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed)
+    )
     avg_probs = encodings.mean(0)
     perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
     cluster_use = torch.sum(avg_probs > 0)
     return perplexity, cluster_use
 
+
 def l1(x, y):
-    return torch.abs(x-y)
+    return torch.abs(x - y)
 
 
 def l2(x, y):
-    return torch.pow((x-y), 2)
+    return torch.pow((x - y), 2)
 
 
 class VQLPIPSWithDiscriminator(nn.Module):
-    def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
-                 disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
-                 perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
-                 disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips",
-                 pixel_loss="l1"):
+    def __init__(
+        self,
+        disc_start,
+        codebook_weight=1.0,
+        pixelloss_weight=1.0,
+        disc_num_layers=3,
+        disc_in_channels=3,
+        disc_factor=1.0,
+        disc_weight=1.0,
+        perceptual_weight=1.0,
+        use_actnorm=False,
+        disc_conditional=False,
+        disc_ndf=64,
+        disc_loss='hinge',
+        n_classes=None,
+        perceptual_loss='lpips',
+        pixel_loss='l1',
+    ):
         super().__init__()
-        assert disc_loss in ["hinge", "vanilla"]
-        assert perceptual_loss in ["lpips", "clips", "dists"]
-        assert pixel_loss in ["l1", "l2"]
+        assert disc_loss in ['hinge', 'vanilla']
+        assert perceptual_loss in ['lpips', 'clips', 'dists']
+        assert pixel_loss in ['l1', 'l2']
         self.codebook_weight = codebook_weight
         self.pixel_weight = pixelloss_weight
-        if perceptual_loss == "lpips":
-            print(f"{self.__class__.__name__}: Running with LPIPS.")
+        if perceptual_loss == 'lpips':
+            print(f'{self.__class__.__name__}: Running with LPIPS.')
             self.perceptual_loss = LPIPS().eval()
         else:
-            raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<")
+            raise ValueError(
+                f'Unknown perceptual loss: >> {perceptual_loss} <<'
+            )
         self.perceptual_weight = perceptual_weight
 
-        if pixel_loss == "l1":
+        if pixel_loss == 'l1':
             self.pixel_loss = l1
         else:
             self.pixel_loss = l2
 
-        self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
-                                                 n_layers=disc_num_layers,
-                                                 use_actnorm=use_actnorm,
-                                                 ndf=disc_ndf
-                                                 ).apply(weights_init)
+        self.discriminator = NLayerDiscriminator(
+            input_nc=disc_in_channels,
+            n_layers=disc_num_layers,
+            use_actnorm=use_actnorm,
+            ndf=disc_ndf,
+        ).apply(weights_init)
         self.discriminator_iter_start = disc_start
-        if disc_loss == "hinge":
+        if disc_loss == 'hinge':
             self.disc_loss = hinge_d_loss
-        elif disc_loss == "vanilla":
+        elif disc_loss == 'vanilla':
             self.disc_loss = vanilla_d_loss
         else:
             raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
-        print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
+        print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.')
         self.disc_factor = disc_factor
         self.discriminator_weight = disc_weight
         self.disc_conditional = disc_conditional
@@ -84,31 +107,53 @@ class VQLPIPSWithDiscriminator(nn.Module):
 
     def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
         if last_layer is not None:
-            nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
-            g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
+            nll_grads = torch.autograd.grad(
+                nll_loss, last_layer, retain_graph=True
+            )[0]
+            g_grads = torch.autograd.grad(
+                g_loss, last_layer, retain_graph=True
+            )[0]
         else:
-            nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
-            g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
+            nll_grads = torch.autograd.grad(
+                nll_loss, self.last_layer[0], retain_graph=True
+            )[0]
+            g_grads = torch.autograd.grad(
+                g_loss, self.last_layer[0], retain_graph=True
+            )[0]
 
         d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
         d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
         d_weight = d_weight * self.discriminator_weight
         return d_weight
 
-    def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
-                global_step, last_layer=None, cond=None, split="train", predicted_indices=None):
+    def forward(
+        self,
+        codebook_loss,
+        inputs,
+        reconstructions,
+        optimizer_idx,
+        global_step,
+        last_layer=None,
+        cond=None,
+        split='train',
+        predicted_indices=None,
+    ):
         if not exists(codebook_loss):
-            codebook_loss = torch.tensor([0.]).to(inputs.device)
-        #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
-        rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous())
+            codebook_loss = torch.tensor([0.0]).to(inputs.device)
+        # rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
+        rec_loss = self.pixel_loss(
+            inputs.contiguous(), reconstructions.contiguous()
+        )
         if self.perceptual_weight > 0:
-            p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
+            p_loss = self.perceptual_loss(
+                inputs.contiguous(), reconstructions.contiguous()
+            )
             rec_loss = rec_loss + self.perceptual_weight * p_loss
         else:
             p_loss = torch.tensor([0.0])
 
         nll_loss = rec_loss
-        #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
+        # nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
         nll_loss = torch.mean(nll_loss)
 
         # now the GAN part
@@ -119,49 +164,77 @@ class VQLPIPSWithDiscriminator(nn.Module):
                 logits_fake = self.discriminator(reconstructions.contiguous())
             else:
                 assert self.disc_conditional
-                logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
+                logits_fake = self.discriminator(
+                    torch.cat((reconstructions.contiguous(), cond), dim=1)
+                )
             g_loss = -torch.mean(logits_fake)
 
             try:
-                d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
+                d_weight = self.calculate_adaptive_weight(
+                    nll_loss, g_loss, last_layer=last_layer
+                )
             except RuntimeError:
                 assert not self.training
                 d_weight = torch.tensor(0.0)
 
-            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
-            loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
+            disc_factor = adopt_weight(
+                self.disc_factor,
+                global_step,
+                threshold=self.discriminator_iter_start,
+            )
+            loss = (
+                nll_loss
+                + d_weight * disc_factor * g_loss
+                + self.codebook_weight * codebook_loss.mean()
+            )
 
-            log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
-                   "{}/quant_loss".format(split): codebook_loss.detach().mean(),
-                   "{}/nll_loss".format(split): nll_loss.detach().mean(),
-                   "{}/rec_loss".format(split): rec_loss.detach().mean(),
-                   "{}/p_loss".format(split): p_loss.detach().mean(),
-                   "{}/d_weight".format(split): d_weight.detach(),
-                   "{}/disc_factor".format(split): torch.tensor(disc_factor),
-                   "{}/g_loss".format(split): g_loss.detach().mean(),
-                   }
+            log = {
+                '{}/total_loss'.format(split): loss.clone().detach().mean(),
+                '{}/quant_loss'.format(split): codebook_loss.detach().mean(),
+                '{}/nll_loss'.format(split): nll_loss.detach().mean(),
+                '{}/rec_loss'.format(split): rec_loss.detach().mean(),
+                '{}/p_loss'.format(split): p_loss.detach().mean(),
+                '{}/d_weight'.format(split): d_weight.detach(),
+                '{}/disc_factor'.format(split): torch.tensor(disc_factor),
+                '{}/g_loss'.format(split): g_loss.detach().mean(),
+            }
             if predicted_indices is not None:
                 assert self.n_classes is not None
                 with torch.no_grad():
-                    perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes)
-                log[f"{split}/perplexity"] = perplexity
-                log[f"{split}/cluster_usage"] = cluster_usage
+                    perplexity, cluster_usage = measure_perplexity(
+                        predicted_indices, self.n_classes
+                    )
+                log[f'{split}/perplexity'] = perplexity
+                log[f'{split}/cluster_usage'] = cluster_usage
             return loss, log
 
         if optimizer_idx == 1:
             # second pass for discriminator update
             if cond is None:
                 logits_real = self.discriminator(inputs.contiguous().detach())
-                logits_fake = self.discriminator(reconstructions.contiguous().detach())
+                logits_fake = self.discriminator(
+                    reconstructions.contiguous().detach()
+                )
             else:
-                logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
-                logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
+                logits_real = self.discriminator(
+                    torch.cat((inputs.contiguous().detach(), cond), dim=1)
+                )
+                logits_fake = self.discriminator(
+                    torch.cat(
+                        (reconstructions.contiguous().detach(), cond), dim=1
+                    )
+                )
 
-            disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
+            disc_factor = adopt_weight(
+                self.disc_factor,
+                global_step,
+                threshold=self.discriminator_iter_start,
+            )
             d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
 
-            log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
-                   "{}/logits_real".format(split): logits_real.detach().mean(),
-                   "{}/logits_fake".format(split): logits_fake.detach().mean()
-                   }
+            log = {
+                '{}/disc_loss'.format(split): d_loss.clone().detach().mean(),
+                '{}/logits_real'.format(split): logits_real.detach().mean(),
+                '{}/logits_fake'.format(split): logits_fake.detach().mean(),
+            }
             return d_loss, log
diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py
index 1316dbd505..d6c4cc6881 100644
--- a/ldm/modules/x_transformer.py
+++ b/ldm/modules/x_transformer.py
@@ -11,15 +11,13 @@ from einops import rearrange, repeat, reduce
 
 DEFAULT_DIM_HEAD = 64
 
-Intermediates = namedtuple('Intermediates', [
-    'pre_softmax_attn',
-    'post_softmax_attn'
-])
+Intermediates = namedtuple(
+    'Intermediates', ['pre_softmax_attn', 'post_softmax_attn']
+)
 
-LayerIntermediates = namedtuple('Intermediates', [
-    'hiddens',
-    'attn_intermediates'
-])
+LayerIntermediates = namedtuple(
+    'Intermediates', ['hiddens', 'attn_intermediates']
+)
 
 
 class AbsolutePositionalEmbedding(nn.Module):
@@ -39,11 +37,16 @@ class AbsolutePositionalEmbedding(nn.Module):
 class FixedPositionalEmbedding(nn.Module):
     def __init__(self, dim):
         super().__init__()
-        inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
+        inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
         self.register_buffer('inv_freq', inv_freq)
 
     def forward(self, x, seq_dim=1, offset=0):
-        t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
+        t = (
+            torch.arange(x.shape[seq_dim], device=x.device).type_as(
+                self.inv_freq
+            )
+            + offset
+        )
         sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
         emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
         return emb[None, :, :]
@@ -51,6 +54,7 @@ class FixedPositionalEmbedding(nn.Module):
 
 # helpers
 
+
 def exists(val):
     return val is not None
 
@@ -64,18 +68,21 @@ def default(val, d):
 def always(val):
     def inner(*args, **kwargs):
         return val
+
     return inner
 
 
 def not_equals(val):
     def inner(x):
         return x != val
+
     return inner
 
 
 def equals(val):
     def inner(x):
         return x == val
+
     return inner
 
 
@@ -85,6 +92,7 @@ def max_neg_value(tensor):
 
 # keyword argument helpers
 
+
 def pick_and_pop(keys, d):
     values = list(map(lambda key: d.pop(key), keys))
     return dict(zip(keys, values))
@@ -108,8 +116,15 @@ def group_by_key_prefix(prefix, d):
 
 
 def groupby_prefix_and_trim(prefix, d):
-    kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
-    kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
+    kwargs_with_prefix, kwargs = group_dict_by_key(
+        partial(string_begins_with, prefix), d
+    )
+    kwargs_without_prefix = dict(
+        map(
+            lambda x: (x[0][len(prefix) :], x[1]),
+            tuple(kwargs_with_prefix.items()),
+        )
+    )
     return kwargs_without_prefix, kwargs
 
 
@@ -139,7 +154,7 @@ class Rezero(nn.Module):
 class ScaleNorm(nn.Module):
     def __init__(self, dim, eps=1e-5):
         super().__init__()
-        self.scale = dim ** -0.5
+        self.scale = dim**-0.5
         self.eps = eps
         self.g = nn.Parameter(torch.ones(1))
 
@@ -151,7 +166,7 @@ class ScaleNorm(nn.Module):
 class RMSNorm(nn.Module):
     def __init__(self, dim, eps=1e-8):
         super().__init__()
-        self.scale = dim ** -0.5
+        self.scale = dim**-0.5
         self.eps = eps
         self.g = nn.Parameter(torch.ones(dim))
 
@@ -173,7 +188,7 @@ class GRUGating(nn.Module):
     def forward(self, x, residual):
         gated_output = self.gru(
             rearrange(x, 'b n d -> (b n) d'),
-            rearrange(residual, 'b n d -> (b n) d')
+            rearrange(residual, 'b n d -> (b n) d'),
         )
 
         return gated_output.reshape_as(x)
@@ -181,6 +196,7 @@ class GRUGating(nn.Module):
 
 # feedforward
 
+
 class GEGLU(nn.Module):
     def __init__(self, dim_in, dim_out):
         super().__init__()
@@ -192,19 +208,18 @@ class GEGLU(nn.Module):
 
 
 class FeedForward(nn.Module):
-    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
+    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
         super().__init__()
         inner_dim = int(dim * mult)
         dim_out = default(dim_out, dim)
-        project_in = nn.Sequential(
-            nn.Linear(dim, inner_dim),
-            nn.GELU()
-        ) if not glu else GEGLU(dim, inner_dim)
+        project_in = (
+            nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
+            if not glu
+            else GEGLU(dim, inner_dim)
+        )
 
         self.net = nn.Sequential(
-            project_in,
-            nn.Dropout(dropout),
-            nn.Linear(inner_dim, dim_out)
+            project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
         )
 
     def forward(self, x):
@@ -214,23 +229,25 @@ class FeedForward(nn.Module):
 # attention.
 class Attention(nn.Module):
     def __init__(
-            self,
-            dim,
-            dim_head=DEFAULT_DIM_HEAD,
-            heads=8,
-            causal=False,
-            mask=None,
-            talking_heads=False,
-            sparse_topk=None,
-            use_entmax15=False,
-            num_mem_kv=0,
-            dropout=0.,
-            on_attn=False
+        self,
+        dim,
+        dim_head=DEFAULT_DIM_HEAD,
+        heads=8,
+        causal=False,
+        mask=None,
+        talking_heads=False,
+        sparse_topk=None,
+        use_entmax15=False,
+        num_mem_kv=0,
+        dropout=0.0,
+        on_attn=False,
     ):
         super().__init__()
         if use_entmax15:
-            raise NotImplementedError("Check out entmax activation instead of softmax activation!")
-        self.scale = dim_head ** -0.5
+            raise NotImplementedError(
+                'Check out entmax activation instead of softmax activation!'
+            )
+        self.scale = dim_head**-0.5
         self.heads = heads
         self.causal = causal
         self.mask = mask
@@ -252,7 +269,7 @@ class Attention(nn.Module):
         self.sparse_topk = sparse_topk
 
         # entmax
-        #self.attn_fn = entmax15 if use_entmax15 else F.softmax
+        # self.attn_fn = entmax15 if use_entmax15 else F.softmax
         self.attn_fn = F.softmax
 
         # add memory key / values
@@ -263,20 +280,29 @@ class Attention(nn.Module):
 
         # attention on attention
         self.attn_on_attn = on_attn
-        self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
+        self.to_out = (
+            nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU())
+            if on_attn
+            else nn.Linear(inner_dim, dim)
+        )
 
     def forward(
-            self,
-            x,
-            context=None,
-            mask=None,
-            context_mask=None,
-            rel_pos=None,
-            sinusoidal_emb=None,
-            prev_attn=None,
-            mem=None
+        self,
+        x,
+        context=None,
+        mask=None,
+        context_mask=None,
+        rel_pos=None,
+        sinusoidal_emb=None,
+        prev_attn=None,
+        mem=None,
     ):
-        b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
+        b, n, _, h, talking_heads, device = (
+            *x.shape,
+            self.heads,
+            self.talking_heads,
+            x.device,
+        )
         kv_input = default(context, x)
 
         q_input = x
@@ -297,23 +323,35 @@ class Attention(nn.Module):
         k = self.to_k(k_input)
         v = self.to_v(v_input)
 
-        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
+        q, k, v = map(
+            lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)
+        )
 
         input_mask = None
         if any(map(exists, (mask, context_mask))):
-            q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
+            q_mask = default(
+                mask, lambda: torch.ones((b, n), device=device).bool()
+            )
             k_mask = q_mask if not exists(context) else context_mask
-            k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
+            k_mask = default(
+                k_mask,
+                lambda: torch.ones((b, k.shape[-2]), device=device).bool(),
+            )
             q_mask = rearrange(q_mask, 'b i -> b () i ()')
             k_mask = rearrange(k_mask, 'b j -> b () () j')
             input_mask = q_mask * k_mask
 
         if self.num_mem_kv > 0:
-            mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
+            mem_k, mem_v = map(
+                lambda t: repeat(t, 'h n d -> b h n d', b=b),
+                (self.mem_k, self.mem_v),
+            )
             k = torch.cat((mem_k, k), dim=-2)
             v = torch.cat((mem_v, v), dim=-2)
             if exists(input_mask):
-                input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
+                input_mask = F.pad(
+                    input_mask, (self.num_mem_kv, 0), value=True
+                )
 
         dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
         mask_value = max_neg_value(dots)
@@ -324,7 +362,9 @@ class Attention(nn.Module):
         pre_softmax_attn = dots
 
         if talking_heads:
-            dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
+            dots = einsum(
+                'b h i j, h k -> b k i j', dots, self.pre_softmax_proj
+            ).contiguous()
 
         if exists(rel_pos):
             dots = rel_pos(dots)
@@ -336,7 +376,9 @@ class Attention(nn.Module):
         if self.causal:
             i, j = dots.shape[-2:]
             r = torch.arange(i, device=device)
-            mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
+            mask = rearrange(r, 'i -> () () i ()') < rearrange(
+                r, 'j -> () () () j'
+            )
             mask = F.pad(mask, (j - i, 0), value=False)
             dots.masked_fill_(mask, mask_value)
             del mask
@@ -354,14 +396,16 @@ class Attention(nn.Module):
         attn = self.dropout(attn)
 
         if talking_heads:
-            attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
+            attn = einsum(
+                'b h i j, h k -> b k i j', attn, self.post_softmax_proj
+            ).contiguous()
 
         out = einsum('b h i j, b h j d -> b h i d', attn, v)
         out = rearrange(out, 'b h n d -> b n (h d)')
 
         intermediates = Intermediates(
             pre_softmax_attn=pre_softmax_attn,
-            post_softmax_attn=post_softmax_attn
+            post_softmax_attn=post_softmax_attn,
         )
 
         return self.to_out(out), intermediates
@@ -369,28 +413,28 @@ class Attention(nn.Module):
 
 class AttentionLayers(nn.Module):
     def __init__(
-            self,
-            dim,
-            depth,
-            heads=8,
-            causal=False,
-            cross_attend=False,
-            only_cross=False,
-            use_scalenorm=False,
-            use_rmsnorm=False,
-            use_rezero=False,
-            rel_pos_num_buckets=32,
-            rel_pos_max_distance=128,
-            position_infused_attn=False,
-            custom_layers=None,
-            sandwich_coef=None,
-            par_ratio=None,
-            residual_attn=False,
-            cross_residual_attn=False,
-            macaron=False,
-            pre_norm=True,
-            gate_residual=False,
-            **kwargs
+        self,
+        dim,
+        depth,
+        heads=8,
+        causal=False,
+        cross_attend=False,
+        only_cross=False,
+        use_scalenorm=False,
+        use_rmsnorm=False,
+        use_rezero=False,
+        rel_pos_num_buckets=32,
+        rel_pos_max_distance=128,
+        position_infused_attn=False,
+        custom_layers=None,
+        sandwich_coef=None,
+        par_ratio=None,
+        residual_attn=False,
+        cross_residual_attn=False,
+        macaron=False,
+        pre_norm=True,
+        gate_residual=False,
+        **kwargs,
     ):
         super().__init__()
         ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
@@ -403,10 +447,14 @@ class AttentionLayers(nn.Module):
         self.layers = nn.ModuleList([])
 
         self.has_pos_emb = position_infused_attn
-        self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
+        self.pia_pos_emb = (
+            FixedPositionalEmbedding(dim) if position_infused_attn else None
+        )
         self.rotary_pos_emb = always(None)
 
-        assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
+        assert (
+            rel_pos_num_buckets <= rel_pos_max_distance
+        ), 'number of relative position buckets must be less than the relative position max distance'
         self.rel_pos = None
 
         self.pre_norm = pre_norm
@@ -438,15 +486,27 @@ class AttentionLayers(nn.Module):
             assert 1 < par_ratio <= par_depth, 'par ratio out of range'
             default_block = tuple(filter(not_equals('f'), default_block))
             par_attn = par_depth // par_ratio
-            depth_cut = par_depth * 2 // 3  # 2 / 3 attention layer cutoff suggested by PAR paper
+            depth_cut = (
+                par_depth * 2 // 3
+            )  # 2 / 3 attention layer cutoff suggested by PAR paper
             par_width = (depth_cut + depth_cut // par_attn) // par_attn
-            assert len(default_block) <= par_width, 'default block is too large for par_ratio'
-            par_block = default_block + ('f',) * (par_width - len(default_block))
+            assert (
+                len(default_block) <= par_width
+            ), 'default block is too large for par_ratio'
+            par_block = default_block + ('f',) * (
+                par_width - len(default_block)
+            )
             par_head = par_block * par_attn
             layer_types = par_head + ('f',) * (par_depth - len(par_head))
         elif exists(sandwich_coef):
-            assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
-            layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
+            assert (
+                sandwich_coef > 0 and sandwich_coef <= depth
+            ), 'sandwich coefficient should be less than the depth'
+            layer_types = (
+                ('a',) * sandwich_coef
+                + default_block * (depth - sandwich_coef)
+                + ('f',) * sandwich_coef
+            )
         else:
             layer_types = default_block * depth
 
@@ -455,7 +515,9 @@ class AttentionLayers(nn.Module):
 
         for layer_type in self.layer_types:
             if layer_type == 'a':
-                layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
+                layer = Attention(
+                    dim, heads=heads, causal=causal, **attn_kwargs
+                )
             elif layer_type == 'c':
                 layer = Attention(dim, heads=heads, **attn_kwargs)
             elif layer_type == 'f':
@@ -472,21 +534,17 @@ class AttentionLayers(nn.Module):
             else:
                 residual_fn = Residual()
 
-            self.layers.append(nn.ModuleList([
-                norm_fn(),
-                layer,
-                residual_fn
-            ]))
+            self.layers.append(nn.ModuleList([norm_fn(), layer, residual_fn]))
 
     def forward(
-            self,
-            x,
-            context=None,
-            mask=None,
-            context_mask=None,
-            mems=None,
-            return_hiddens=False,
-            **kwargs
+        self,
+        x,
+        context=None,
+        mask=None,
+        context_mask=None,
+        mems=None,
+        return_hiddens=False,
+        **kwargs,
     ):
         hiddens = []
         intermediates = []
@@ -495,7 +553,9 @@ class AttentionLayers(nn.Module):
 
         mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
 
-        for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
+        for ind, (layer_type, (norm, block, residual_fn)) in enumerate(
+            zip(self.layer_types, self.layers)
+        ):
             is_last = ind == (len(self.layers) - 1)
 
             if layer_type == 'a':
@@ -508,10 +568,22 @@ class AttentionLayers(nn.Module):
                 x = norm(x)
 
             if layer_type == 'a':
-                out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
-                                   prev_attn=prev_attn, mem=layer_mem)
+                out, inter = block(
+                    x,
+                    mask=mask,
+                    sinusoidal_emb=self.pia_pos_emb,
+                    rel_pos=self.rel_pos,
+                    prev_attn=prev_attn,
+                    mem=layer_mem,
+                )
             elif layer_type == 'c':
-                out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
+                out, inter = block(
+                    x,
+                    context=context,
+                    mask=mask,
+                    context_mask=context_mask,
+                    prev_attn=prev_cross_attn,
+                )
             elif layer_type == 'f':
                 out = block(x)
 
@@ -530,8 +602,7 @@ class AttentionLayers(nn.Module):
 
         if return_hiddens:
             intermediates = LayerIntermediates(
-                hiddens=hiddens,
-                attn_intermediates=intermediates
+                hiddens=hiddens, attn_intermediates=intermediates
             )
 
             return x, intermediates
@@ -545,23 +616,24 @@ class Encoder(AttentionLayers):
         super().__init__(causal=False, **kwargs)
 
 
-
 class TransformerWrapper(nn.Module):
     def __init__(
-            self,
-            *,
-            num_tokens,
-            max_seq_len,
-            attn_layers,
-            emb_dim=None,
-            max_mem_len=0.,
-            emb_dropout=0.,
-            num_memory_tokens=None,
-            tie_embedding=False,
-            use_pos_emb=True
+        self,
+        *,
+        num_tokens,
+        max_seq_len,
+        attn_layers,
+        emb_dim=None,
+        max_mem_len=0.0,
+        emb_dropout=0.0,
+        num_memory_tokens=None,
+        tie_embedding=False,
+        use_pos_emb=True,
     ):
         super().__init__()
-        assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
+        assert isinstance(
+            attn_layers, AttentionLayers
+        ), 'attention layers must be one of Encoder or Decoder'
 
         dim = attn_layers.dim
         emb_dim = default(emb_dim, dim)
@@ -571,23 +643,34 @@ class TransformerWrapper(nn.Module):
         self.num_tokens = num_tokens
 
         self.token_emb = nn.Embedding(num_tokens, emb_dim)
-        self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
-                    use_pos_emb and not attn_layers.has_pos_emb) else always(0)
+        self.pos_emb = (
+            AbsolutePositionalEmbedding(emb_dim, max_seq_len)
+            if (use_pos_emb and not attn_layers.has_pos_emb)
+            else always(0)
+        )
         self.emb_dropout = nn.Dropout(emb_dropout)
 
-        self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
+        self.project_emb = (
+            nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
+        )
         self.attn_layers = attn_layers
         self.norm = nn.LayerNorm(dim)
 
         self.init_()
 
-        self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
+        self.to_logits = (
+            nn.Linear(dim, num_tokens)
+            if not tie_embedding
+            else lambda t: t @ self.token_emb.weight.t()
+        )
 
         # memory tokens (like [cls]) from Memory Transformers paper
         num_memory_tokens = default(num_memory_tokens, 0)
         self.num_memory_tokens = num_memory_tokens
         if num_memory_tokens > 0:
-            self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
+            self.memory_tokens = nn.Parameter(
+                torch.randn(num_memory_tokens, dim)
+            )
 
             # let funnel encoder know number of memory tokens, if specified
             if hasattr(attn_layers, 'num_memory_tokens'):
@@ -597,20 +680,20 @@ class TransformerWrapper(nn.Module):
         nn.init.normal_(self.token_emb.weight, std=0.02)
 
     def forward(
-            self,
-            x,
-            return_embeddings=False,
-            mask=None,
-            return_mems=False,
-            return_attn=False,
-            mems=None,
-            embedding_manager=None,
-            **kwargs
+        self,
+        x,
+        return_embeddings=False,
+        mask=None,
+        return_mems=False,
+        return_attn=False,
+        mems=None,
+        embedding_manager=None,
+        **kwargs,
     ):
         b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
 
         embedded_x = self.token_emb(x)
-        
+
         if embedding_manager:
             x = embedding_manager(x, embedded_x)
         else:
@@ -629,7 +712,9 @@ class TransformerWrapper(nn.Module):
             if exists(mask):
                 mask = F.pad(mask, (num_mem, 0), value=True)
 
-        x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
+        x, intermediates = self.attn_layers(
+            x, mask=mask, mems=mems, return_hiddens=True, **kwargs
+        )
         x = self.norm(x)
 
         mem, x = x[:, :num_mem], x[:, num_mem:]
@@ -638,13 +723,30 @@ class TransformerWrapper(nn.Module):
 
         if return_mems:
             hiddens = intermediates.hiddens
-            new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
-            new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
+            new_mems = (
+                list(
+                    map(
+                        lambda pair: torch.cat(pair, dim=-2),
+                        zip(mems, hiddens),
+                    )
+                )
+                if exists(mems)
+                else hiddens
+            )
+            new_mems = list(
+                map(
+                    lambda t: t[..., -self.max_mem_len :, :].detach(), new_mems
+                )
+            )
             return out, new_mems
 
         if return_attn:
-            attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
+            attn_maps = list(
+                map(
+                    lambda t: t.post_softmax_attn,
+                    intermediates.attn_intermediates,
+                )
+            )
             return out, attn_maps
 
         return out
-
diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index f1f88bba5e..8aad3557af 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -24,10 +24,10 @@ import re
 import traceback
 
 from ldm.util import instantiate_from_config
-from ldm.models.diffusion.ddim     import DDIMSampler
-from ldm.models.diffusion.plms     import PLMSSampler
+from ldm.models.diffusion.ddim import DDIMSampler
+from ldm.models.diffusion.plms import PLMSSampler
 from ldm.models.diffusion.ksampler import KSampler
-from ldm.dream.pngwriter           import PngWriter
+from ldm.dream.pngwriter import PngWriter
 
 """Simplified text to image API for stable diffusion/latent diffusion
 
@@ -93,67 +93,69 @@ still work.
 
 class T2I:
     """T2I class
-    Attributes
-    ----------
-    model
-    config
-    iterations
-    batch_size
-    steps
-    seed
-    sampler_name
-    width
-    height
-    cfg_scale
-    latent_channels
-    downsampling_factor
-    precision
-    strength
-    embedding_path
+        Attributes
+        ----------
+        model
+        config
+        iterations
+        batch_size
+        steps
+        seed
+        sampler_name
+        width
+        height
+        cfg_scale
+        latent_channels
+        downsampling_factor
+        precision
+        strength
+        embedding_path
 
-The vast majority of these arguments default to reasonable values.
+    The vast majority of these arguments default to reasonable values.
 """
-    def __init__(self,
-                 batch_size=1,
-                 iterations = 1,
-                 steps=50,
-                 seed=None,
-                 cfg_scale=7.5,
-                 weights="models/ldm/stable-diffusion-v1/model.ckpt",
-                 config = "configs/stable-diffusion/v1-inference.yaml",
-                 width=512,
-                 height=512,
-                 sampler_name="klms",
-                 latent_channels=4,
-                 downsampling_factor=8,
-                 ddim_eta=0.0,  # deterministic
-                 precision='autocast',
-                 full_precision=False,
-                 strength=0.75, # default in scripts/img2img.py
-                 embedding_path=None,
-                 latent_diffusion_weights=False,  # just to keep track of this parameter when regenerating prompt
-                 device='cuda',
-                 gfpgan=None,
+
+    def __init__(
+        self,
+        batch_size=1,
+        iterations=1,
+        steps=50,
+        seed=None,
+        cfg_scale=7.5,
+        weights='models/ldm/stable-diffusion-v1/model.ckpt',
+        config='configs/stable-diffusion/v1-inference.yaml',
+        width=512,
+        height=512,
+        sampler_name='klms',
+        latent_channels=4,
+        downsampling_factor=8,
+        ddim_eta=0.0,  # deterministic
+        precision='autocast',
+        full_precision=False,
+        strength=0.75,  # default in scripts/img2img.py
+        embedding_path=None,
+        latent_diffusion_weights=False,  # just to keep track of this parameter when regenerating prompt
+        device='cuda',
+        gfpgan=None,
     ):
-        self.batch_size      = batch_size
+        self.batch_size = batch_size
         self.iterations = iterations
-        self.width      = width
-        self.height     = height
-        self.steps      = steps
-        self.cfg_scale  = cfg_scale
-        self.weights    = weights
-        self.config     = config
-        self.sampler_name  = sampler_name
-        self.latent_channels     = latent_channels
+        self.width = width
+        self.height = height
+        self.steps = steps
+        self.cfg_scale = cfg_scale
+        self.weights = weights
+        self.config = config
+        self.sampler_name = sampler_name
+        self.latent_channels = latent_channels
         self.downsampling_factor = downsampling_factor
-        self.ddim_eta            = ddim_eta
-        self.precision           = precision
-        self.full_precision      = full_precision
-        self.strength            = strength
-        self.embedding_path      = embedding_path
-        self.model      = None     # empty for now
-        self.sampler    = None
-        self.latent_diffusion_weights=latent_diffusion_weights
+        self.ddim_eta = ddim_eta
+        self.precision = precision
+        self.full_precision = full_precision
+        self.strength = strength
+        self.embedding_path = embedding_path
+        self.model = None     # empty for now
+        self.sampler = None
+        self.latent_diffusion_weights = latent_diffusion_weights
         self.device = device
         self.gfpgan = gfpgan
         if seed is None:
@@ -162,49 +164,55 @@ The vast majority of these arguments default to reasonable values.
             self.seed = seed
         transformers.logging.set_verbosity_error()
 
-    def prompt2png(self,prompt,outdir,**kwargs):
-        '''
+    def prompt2png(self, prompt, outdir, **kwargs):
+        """
         Takes a prompt and an output directory, writes out the requested number
         of PNG files, and returns an array of [[filename,seed],[filename,seed]...]
         Optional named arguments are the same as those passed to T2I and prompt2image()
-        '''
-        results      = self.prompt2image(prompt,**kwargs)
-        pngwriter    = PngWriter(outdir,prompt,kwargs.get('batch_size',self.batch_size))
+        """
+        results = self.prompt2image(prompt, **kwargs)
+        pngwriter = PngWriter(
+            outdir, prompt, kwargs.get('batch_size', self.batch_size)
+        )
         for r in results:
             metadata_str = f'prompt2png("{prompt}" {kwargs} seed={r[1]}'   # gets written into the PNG
-            pngwriter.write_image(r[0],r[1])
+            pngwriter.write_image(r[0], r[1])
         return pngwriter.files_written
 
-    def txt2img(self,prompt,**kwargs):
-        outdir = kwargs.get('outdir','outputs/img-samples')
-        return self.prompt2png(prompt,outdir,**kwargs)
+    def txt2img(self, prompt, **kwargs):
+        outdir = kwargs.get('outdir', 'outputs/img-samples')
+        return self.prompt2png(prompt, outdir, **kwargs)
 
-    def img2img(self,prompt,**kwargs):
-        outdir = kwargs.get('outdir','outputs/img-samples')
-        assert 'init_img' in kwargs,'call to img2img() must include the init_img argument'
-        return self.prompt2png(prompt,outdir,**kwargs)
+    def img2img(self, prompt, **kwargs):
+        outdir = kwargs.get('outdir', 'outputs/img-samples')
+        assert (
+            'init_img' in kwargs
+        ), 'call to img2img() must include the init_img argument'
+        return self.prompt2png(prompt, outdir, **kwargs)
 
-    def prompt2image(self,
-                     # these are common
-                     prompt,
-                     batch_size=None,
-                     iterations=None,
-                     steps=None,
-                     seed=None,
-                     cfg_scale=None,
-                     ddim_eta=None,
-                     skip_normalize=False,
-                     image_callback=None,
-                     # these are specific to txt2img
-                     width=None,
-                     height=None,
-                     # these are specific to img2img
-                     init_img=None,
-                     strength=None,
-                     gfpgan_strength=None,
-                     variants=None,
-                     **args):   # eat up additional cruft
-        '''
+    def prompt2image(
+        self,
+        # these are common
+        prompt,
+        batch_size=None,
+        iterations=None,
+        steps=None,
+        seed=None,
+        cfg_scale=None,
+        ddim_eta=None,
+        skip_normalize=False,
+        image_callback=None,
+        # these are specific to txt2img
+        width=None,
+        height=None,
+        # these are specific to img2img
+        init_img=None,
+        strength=None,
+        gfpgan_strength=None,
+        variants=None,
+        **args,
+    ):   # eat up additional cruft
+        """
         ldm.prompt2image() is the common entry point for txt2img() and img2img()
         It takes the following arguments:
            prompt                          // prompt string (no default)
@@ -232,118 +240,157 @@ The vast majority of these arguments default to reasonable values.
         The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code
         to create the requested output directory, select a unique informative name for each image, and
         write the prompt into the PNG metadata.
-        '''
-        steps      = steps      or self.steps
-        seed       = seed       or self.seed
-        width      = width      or self.width
-        height     = height     or self.height
-        cfg_scale  = cfg_scale  or self.cfg_scale
-        ddim_eta   = ddim_eta   or self.ddim_eta
+        """
+        steps = steps or self.steps
+        seed = seed or self.seed
+        width = width or self.width
+        height = height or self.height
+        cfg_scale = cfg_scale or self.cfg_scale
+        ddim_eta = ddim_eta or self.ddim_eta
         batch_size = batch_size or self.batch_size
         iterations = iterations or self.iterations
-        strength   = strength   or self.strength
+        strength = strength or self.strength
 
-        model = self.load_model()  # will instantiate the model or return it from cache
-        assert cfg_scale>1.0, "CFG_Scale (-C) must be >1.0"
-        assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
-        w = int(width/64)  * 64
-        h = int(height/64) * 64
+        model = (
+            self.load_model()
+        )  # will instantiate the model or return it from cache
+        assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
+        assert (
+            0.0 <= strength <= 1.0
+        ), 'can only work with strength in [0.0, 1.0]'
+        w = int(width / 64) * 64
+        h = int(height / 64) * 64
         if h != height or w != width:
-            print(f'Height and width must be multiples of 64. Resizing to {h}x{w}')
+            print(
+                f'Height and width must be multiples of 64. Resizing to {h}x{w}'
+            )
             height = h
-            width  = w
+            width = w
 
-        scope = autocast if self.precision=="autocast" else nullcontext
+        scope = autocast if self.precision == 'autocast' else nullcontext
 
         tic = time.time()
         results = list()
 
         try:
             if init_img:
-                assert os.path.exists(init_img),f'{init_img}: File not found'
-                images_iterator = self._img2img(prompt,
-                                        precision_scope=scope,
-                                        batch_size=batch_size,
-                                        steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
-                                        skip_normalize=skip_normalize,
-                                        init_img=init_img,strength=strength)
+                assert os.path.exists(init_img), f'{init_img}: File not found'
+                images_iterator = self._img2img(
+                    prompt,
+                    precision_scope=scope,
+                    batch_size=batch_size,
+                    steps=steps,
+                    cfg_scale=cfg_scale,
+                    ddim_eta=ddim_eta,
+                    skip_normalize=skip_normalize,
+                    init_img=init_img,
+                    strength=strength,
+                )
             else:
-                images_iterator = self._txt2img(prompt,
-                                        precision_scope=scope,
-                                        batch_size=batch_size,
-                                        steps=steps,cfg_scale=cfg_scale,ddim_eta=ddim_eta,
-                                        skip_normalize=skip_normalize,
-                                        width=width,height=height)
+                images_iterator = self._txt2img(
+                    prompt,
+                    precision_scope=scope,
+                    batch_size=batch_size,
+                    steps=steps,
+                    cfg_scale=cfg_scale,
+                    ddim_eta=ddim_eta,
+                    skip_normalize=skip_normalize,
+                    width=width,
+                    height=height,
+                )
 
             with scope(self.device.type), self.model.ema_scope():
-                for n in trange(iterations, desc="Sampling"):
+                for n in trange(iterations, desc='Sampling'):
                     seed_everything(seed)
                     iter_images = next(images_iterator)
                     for image in iter_images:
                         try:
                             if gfpgan_strength > 0:
-                                image = self._run_gfpgan(image, gfpgan_strength)
+                                image = self._run_gfpgan(
+                                    image, gfpgan_strength
+                                )
                         except Exception as e:
-                            print(f"Error running GFPGAN - Your image was not enhanced.\n{e}")
+                            print(
+                                f'Error running GFPGAN - Your image was not enhanced.\n{e}'
+                            )
                         results.append([image, seed])
                         if image_callback is not None:
-                            image_callback(image,seed)
+                            image_callback(image, seed)
                     seed = self._new_seed()
 
         except KeyboardInterrupt:
             print('*interrupted*')
-            print('Partial results will be returned; if --grid was requested, nothing will be returned.')
+            print(
+                'Partial results will be returned; if --grid was requested, nothing will be returned.'
+            )
         except RuntimeError as e:
             print(str(e))
             print('Are you sure your system has an adequate NVIDIA GPU?')
 
-        toc  = time.time()
-        print(f'{len(results)} images generated in',"%4.2fs"% (toc-tic))
+        toc = time.time()
+        print(f'{len(results)} images generated in', '%4.2fs' % (toc - tic))
         return results
 
     @torch.no_grad()
-    def _txt2img(self,
-                 prompt,
-                 precision_scope,
-                 batch_size,
-                 steps,cfg_scale,ddim_eta,
-                 skip_normalize,
-                 width,height):
+    def _txt2img(
+        self,
+        prompt,
+        precision_scope,
+        batch_size,
+        steps,
+        cfg_scale,
+        ddim_eta,
+        skip_normalize,
+        width,
+        height,
+    ):
         """
         An infinite iterator of images from the prompt.
         """
 
-
         sampler = self.sampler
 
         while True:
             uc, c = self._get_uc_and_c(prompt, batch_size, skip_normalize)
-            shape = [self.latent_channels, height // self.downsampling_factor, width // self.downsampling_factor]
-            samples, _ = sampler.sample(S=steps,
-                                                conditioning=c,
-                                                batch_size=batch_size,
-                                                shape=shape,
-                                                verbose=False,
-                                                unconditional_guidance_scale=cfg_scale,
-                                                unconditional_conditioning=uc,
-                                                eta=ddim_eta)
+            shape = [
+                self.latent_channels,
+                height // self.downsampling_factor,
+                width // self.downsampling_factor,
+            ]
+            samples, _ = sampler.sample(
+                S=steps,
+                conditioning=c,
+                batch_size=batch_size,
+                shape=shape,
+                verbose=False,
+                unconditional_guidance_scale=cfg_scale,
+                unconditional_conditioning=uc,
+                eta=ddim_eta,
+            )
             yield self._samples_to_images(samples)
 
     @torch.no_grad()
-    def _img2img(self,
-                 prompt,
-                 precision_scope,
-                 batch_size,
-                 steps,cfg_scale,ddim_eta,
-                 skip_normalize,
-                 init_img,strength):
+    def _img2img(
+        self,
+        prompt,
+        precision_scope,
+        batch_size,
+        steps,
+        cfg_scale,
+        ddim_eta,
+        skip_normalize,
+        init_img,
+        strength,
+    ):
         """
         An infinite iterator of images from the prompt and the initial image
         """
 
         # PLMS sampler not supported yet, so ignore previous sampler
-        if self.sampler_name!='ddim':
-            print(f"sampler '{self.sampler_name}' is not yet supported. Using DDM sampler")
+        if self.sampler_name != 'ddim':
+            print(
+                f"sampler '{self.sampler_name}' is not yet supported. Using DDM sampler"
+            )
             sampler = DDIMSampler(self.model, device=self.device)
         else:
             sampler = self.sampler
@@ -351,9 +398,13 @@ The vast majority of these arguments default to reasonable values.
         init_image = self._load_img(init_img).to(self.device)
         init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
         with precision_scope(self.device.type):
-            init_latent = self.model.get_first_stage_encoding(self.model.encode_first_stage(init_image))  # move to latent space
+            init_latent = self.model.get_first_stage_encoding(
+                self.model.encode_first_stage(init_image)
+            )  # move to latent space
 
-        sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
+        sampler.make_schedule(
+            ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
+        )
 
         t_enc = int(strength * steps)
         # print(f"target t_enc is {t_enc} steps")
@@ -362,31 +413,44 @@ The vast majority of these arguments default to reasonable values.
             uc, c = self._get_uc_and_c(prompt, batch_size, skip_normalize)
 
             # encode (scaled latent)
-            z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(self.device))
+            z_enc = sampler.stochastic_encode(
+                init_latent, torch.tensor([t_enc] * batch_size).to(self.device)
+            )
             # decode it
-            samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=cfg_scale,
-                                        unconditional_conditioning=uc,)
+            samples = sampler.decode(
+                z_enc,
+                c,
+                t_enc,
+                unconditional_guidance_scale=cfg_scale,
+                unconditional_conditioning=uc,
+            )
             yield self._samples_to_images(samples)
 
     # TODO: does this actually need to run every loop? does anything in it vary by random seed?
     def _get_uc_and_c(self, prompt, batch_size, skip_normalize):
 
-        uc = self.model.get_learned_conditioning(batch_size * [""])
+        uc = self.model.get_learned_conditioning(batch_size * [''])
 
         # weighted sub-prompts
-        subprompts,weights = T2I._split_weighted_subprompts(prompt)
+        subprompts, weights = T2I._split_weighted_subprompts(prompt)
         if len(subprompts) > 1:
             # i dont know if this is correct.. but it works
             c = torch.zeros_like(uc)
             # get total weight for normalizing
             totalWeight = sum(weights)
             # normalize each "sub prompt" and add it
-            for i in range(0,len(subprompts)):
+            for i in range(0, len(subprompts)):
                 weight = weights[i]
                 if not skip_normalize:
                     weight = weight / totalWeight
-                c = torch.add(c, self.model.get_learned_conditioning(batch_size * [subprompts[i]]), alpha=weight)
-        else: # just standard 1 prompt
+                c = torch.add(
+                    c,
+                    self.model.get_learned_conditioning(
+                        batch_size * [subprompts[i]]
+                    ),
+                    alpha=weight,
+                )
+        else:   # just standard 1 prompt
             c = self.model.get_learned_conditioning(batch_size * [prompt])
         return (uc, c)
 
@@ -395,23 +459,29 @@ The vast majority of these arguments default to reasonable values.
         x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
         images = list()
         for x_sample in x_samples:
-            x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
+            x_sample = 255.0 * rearrange(
+                x_sample.cpu().numpy(), 'c h w -> h w c'
+            )
             image = Image.fromarray(x_sample.astype(np.uint8))
             images.append(image)
         return images
 
     def _new_seed(self):
-        self.seed = random.randrange(0,np.iinfo(np.uint32).max)
+        self.seed = random.randrange(0, np.iinfo(np.uint32).max)
         return self.seed
 
     def load_model(self):
-        """ Load and initialize the model from configuration variables passed at object creation time """
+        """Load and initialize the model from configuration variables passed at object creation time"""
         if self.model is None:
             seed_everything(self.seed)
             try:
                 config = OmegaConf.load(self.config)
-                self.device = torch.device(self.device) if torch.cuda.is_available() else torch.device("cpu")
-                model = self._load_model_from_config(config,self.weights)
+                self.device = (
+                    torch.device(self.device)
+                    if torch.cuda.is_available()
+                    else torch.device('cpu')
+                )
+                model = self._load_model_from_config(config, self.weights)
                 if self.embedding_path is not None:
                     model.embedding_manager.load(self.embedding_path)
                 self.model = model.to(self.device)
@@ -421,18 +491,26 @@ The vast majority of these arguments default to reasonable values.
                 raise SystemExit
 
             msg = f'setting sampler to {self.sampler_name}'
-            if self.sampler_name=='plms':
+            if self.sampler_name == 'plms':
                 self.sampler = PLMSSampler(self.model, device=self.device)
             elif self.sampler_name == 'ddim':
                 self.sampler = DDIMSampler(self.model, device=self.device)
             elif self.sampler_name == 'k_dpm_2_a':
-                self.sampler = KSampler(self.model, 'dpm_2_ancestral', device=self.device)
+                self.sampler = KSampler(
+                    self.model, 'dpm_2_ancestral', device=self.device
+                )
             elif self.sampler_name == 'k_dpm_2':
-                self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
+                self.sampler = KSampler(
+                    self.model, 'dpm_2', device=self.device
+                )
             elif self.sampler_name == 'k_euler_a':
-                self.sampler = KSampler(self.model, 'euler_ancestral', device=self.device)
+                self.sampler = KSampler(
+                    self.model, 'euler_ancestral', device=self.device
+                )
             elif self.sampler_name == 'k_euler':
-                self.sampler = KSampler(self.model, 'euler', device=self.device)
+                self.sampler = KSampler(
+                    self.model, 'euler', device=self.device
+                )
             elif self.sampler_name == 'k_heun':
                 self.sampler = KSampler(self.model, 'heun', device=self.device)
             elif self.sampler_name == 'k_lms':
@@ -446,32 +524,38 @@ The vast majority of these arguments default to reasonable values.
         return self.model
 
     def _load_model_from_config(self, config, ckpt):
-        print(f"Loading model from {ckpt}")
-        pl_sd = torch.load(ckpt, map_location="cpu")
-#        if "global_step" in pl_sd:
-#            print(f"Global Step: {pl_sd['global_step']}")
-        sd = pl_sd["state_dict"]
+        print(f'Loading model from {ckpt}')
+        pl_sd = torch.load(ckpt, map_location='cpu')
+        #        if "global_step" in pl_sd:
+        #            print(f"Global Step: {pl_sd['global_step']}")
+        sd = pl_sd['state_dict']
         model = instantiate_from_config(config.model)
         m, u = model.load_state_dict(sd, strict=False)
         model.to(self.device)
         model.eval()
         if self.full_precision:
-            print('Using slower but more accurate full-precision math (--full_precision)')
+            print(
+                'Using slower but more accurate full-precision math (--full_precision)'
+            )
         else:
-            print('Using half precision math. Call with --full_precision to use slower but more accurate full precision.')
+            print(
+                'Using half precision math. Call with --full_precision to use slower but more accurate full precision.'
+            )
             model.half()
         return model
 
-    def _load_img(self,path):
-        image = Image.open(path).convert("RGB")
+    def _load_img(self, path):
+        image = Image.open(path).convert('RGB')
         w, h = image.size
-        print(f"loaded input image of size ({w}, {h}) from {path}")
-        w, h = map(lambda x: x - x % 32, (w, h))  # resize to integer multiple of 32
+        print(f'loaded input image of size ({w}, {h}) from {path}')
+        w, h = map(
+            lambda x: x - x % 32, (w, h)
+        )  # resize to integer multiple of 32
         image = image.resize((w, h), resample=Image.Resampling.LANCZOS)
         image = np.array(image).astype(np.float32) / 255.0
         image = image[None].transpose(0, 3, 1, 2)
         image = torch.from_numpy(image)
-        return 2.*image - 1.
+        return 2.0 * image - 1.0
 
     def _split_weighted_subprompts(text):
         """
@@ -484,34 +568,36 @@ The vast majority of these arguments default to reasonable values.
         prompts = []
         weights = []
         while remaining > 0:
-            if ":" in text:
-                idx = text.index(":") # first occurrence from start
+            if ':' in text:
+                idx = text.index(':')   # first occurrence from start
                 # grab up to index as sub-prompt
                 prompt = text[:idx]
                 remaining -= idx
                 # remove from main text
-                text = text[idx+1:]
+                text = text[idx + 1 :]
                 # find value for weight
-                if " " in text:
-                    idx = text.index(" ") # first occurence
-                else: # no space, read to end
+                if ' ' in text:
+                    idx = text.index(' ')   # first occurence
+                else:   # no space, read to end
                     idx = len(text)
                 if idx != 0:
                     try:
                         weight = float(text[:idx])
-                    except: # couldn't treat as float
-                        print(f"Warning: '{text[:idx]}' is not a value, are you missing a space?")
+                    except:   # couldn't treat as float
+                        print(
+                            f"Warning: '{text[:idx]}' is not a value, are you missing a space?"
+                        )
                         weight = 1.0
-                else: # no value found
+                else:   # no value found
                     weight = 1.0
                 # remove from main text
                 remaining -= idx
-                text = text[idx+1:]
+                text = text[idx + 1 :]
                 # append the sub-prompt and its weight
                 prompts.append(prompt)
                 weights.append(weight)
-            else: # no : found
-                if len(text) > 0: # there is still text though
+            else:   # no : found
+                if len(text) > 0:   # there is still text though
                     # take remainder as weight 1
                     prompts.append(text)
                     weights.append(1.0)
@@ -519,13 +605,20 @@ The vast majority of these arguments default to reasonable values.
         return prompts, weights
 
     def _run_gfpgan(self, image, strength):
-        if (self.gfpgan is None):
-            print(f"GFPGAN not initialized, it must be loaded via the --gfpgan argument")
+        if self.gfpgan is None:
+            print(
+                f'GFPGAN not initialized, it must be loaded via the --gfpgan argument'
+            )
             return image
-        
-        image = image.convert("RGB")
 
-        cropped_faces, restored_faces, restored_img = self.gfpgan.enhance(np.array(image, dtype=np.uint8), has_aligned=False, only_center_face=False, paste_back=True)
+        image = image.convert('RGB')
+
+        cropped_faces, restored_faces, restored_img = self.gfpgan.enhance(
+            np.array(image, dtype=np.uint8),
+            has_aligned=False,
+            only_center_face=False,
+            paste_back=True,
+        )
         res = Image.fromarray(restored_img)
 
         if strength < 1.0:
diff --git a/ldm/util.py b/ldm/util.py
index 3affd249de..d1379cae2b 100644
--- a/ldm/util.py
+++ b/ldm/util.py
@@ -13,22 +13,25 @@ from queue import Queue
 from inspect import isfunction
 from PIL import Image, ImageDraw, ImageFont
 
+
 def log_txt_as_img(wh, xc, size=10):
     # wh a tuple of (width, height)
     # xc a list of captions to plot
     b = len(xc)
     txts = list()
     for bi in range(b):
-        txt = Image.new("RGB", wh, color="white")
+        txt = Image.new('RGB', wh, color='white')
         draw = ImageDraw.Draw(txt)
         font = ImageFont.load_default()
         nc = int(40 * (wh[0] / 256))
-        lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
+        lines = '\n'.join(
+            xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)
+        )
 
         try:
-            draw.text((0, 0), lines, fill="black", font=font)
+            draw.text((0, 0), lines, fill='black', font=font)
         except UnicodeEncodeError:
-            print("Cant encode string for logging. Skipping.")
+            print('Cant encode string for logging. Skipping.')
 
         txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
         txts.append(txt)
@@ -70,22 +73,26 @@ def mean_flat(tensor):
 def count_params(model, verbose=False):
     total_params = sum(p.numel() for p in model.parameters())
     if verbose:
-        print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
+        print(
+            f'{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.'
+        )
     return total_params
 
 
 def instantiate_from_config(config, **kwargs):
-    if not "target" in config:
+    if not 'target' in config:
         if config == '__is_first_stage__':
             return None
-        elif config == "__is_unconditional__":
+        elif config == '__is_unconditional__':
             return None
-        raise KeyError("Expected key `target` to instantiate.")
-    return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs)
+        raise KeyError('Expected key `target` to instantiate.')
+    return get_obj_from_str(config['target'])(
+        **config.get('params', dict()), **kwargs
+    )
 
 
 def get_obj_from_str(string, reload=False):
-    module, cls = string.rsplit(".", 1)
+    module, cls = string.rsplit('.', 1)
     if reload:
         module_imp = importlib.import_module(module)
         importlib.reload(module_imp)
@@ -101,31 +108,36 @@ def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
     else:
         res = func(data)
     Q.put([idx, res])
-    Q.put("Done")
+    Q.put('Done')
 
 
 def parallel_data_prefetch(
-        func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False
+    func: callable,
+    data,
+    n_proc,
+    target_data_type='ndarray',
+    cpu_intensive=True,
+    use_worker_id=False,
 ):
     # if target_data_type not in ["ndarray", "list"]:
     #     raise ValueError(
     #         "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray."
     #     )
-    if isinstance(data, np.ndarray) and target_data_type == "list":
-        raise ValueError("list expected but function got ndarray.")
+    if isinstance(data, np.ndarray) and target_data_type == 'list':
+        raise ValueError('list expected but function got ndarray.')
     elif isinstance(data, abc.Iterable):
         if isinstance(data, dict):
             print(
                 f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
             )
             data = list(data.values())
-        if target_data_type == "ndarray":
+        if target_data_type == 'ndarray':
             data = np.asarray(data)
         else:
             data = list(data)
     else:
         raise TypeError(
-            f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}."
+            f'The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}.'
         )
 
     if cpu_intensive:
@@ -135,7 +147,7 @@ def parallel_data_prefetch(
         Q = Queue(1000)
         proc = Thread
     # spawn processes
-    if target_data_type == "ndarray":
+    if target_data_type == 'ndarray':
         arguments = [
             [func, Q, part, i, use_worker_id]
             for i, part in enumerate(np.array_split(data, n_proc))
@@ -149,7 +161,7 @@ def parallel_data_prefetch(
         arguments = [
             [func, Q, part, i, use_worker_id]
             for i, part in enumerate(
-                [data[i: i + step] for i in range(0, len(data), step)]
+                [data[i : i + step] for i in range(0, len(data), step)]
             )
         ]
     processes = []
@@ -158,7 +170,7 @@ def parallel_data_prefetch(
         processes += [p]
 
     # start processes
-    print(f"Start prefetching...")
+    print(f'Start prefetching...')
     import time
 
     start = time.time()
@@ -171,13 +183,13 @@ def parallel_data_prefetch(
         while k < n_proc:
             # get result
             res = Q.get()
-            if res == "Done":
+            if res == 'Done':
                 k += 1
             else:
                 gather_res[res[0]] = res[1]
 
     except Exception as e:
-        print("Exception: ", e)
+        print('Exception: ', e)
         for p in processes:
             p.terminate()
 
@@ -185,7 +197,7 @@ def parallel_data_prefetch(
     finally:
         for p in processes:
             p.join()
-        print(f"Prefetching complete. [{time.time() - start} sec.]")
+        print(f'Prefetching complete. [{time.time() - start} sec.]')
 
     if target_data_type == 'ndarray':
         if not isinstance(gather_res[0], np.ndarray):
diff --git a/main.py b/main.py
index 5653cf5e06..8c36c270b1 100644
--- a/main.py
+++ b/main.py
@@ -14,145 +14,171 @@ from PIL import Image
 
 from pytorch_lightning import seed_everything
 from pytorch_lightning.trainer import Trainer
-from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
+from pytorch_lightning.callbacks import (
+    ModelCheckpoint,
+    Callback,
+    LearningRateMonitor,
+)
 from pytorch_lightning.utilities.distributed import rank_zero_only
 from pytorch_lightning.utilities import rank_zero_info
 
 from ldm.data.base import Txt2ImgIterableBaseDataset
 from ldm.util import instantiate_from_config
 
+
 def load_model_from_config(config, ckpt, verbose=False):
-    print(f"Loading model from {ckpt}")
-    pl_sd = torch.load(ckpt, map_location="cpu")
-    sd = pl_sd["state_dict"]
+    print(f'Loading model from {ckpt}')
+    pl_sd = torch.load(ckpt, map_location='cpu')
+    sd = pl_sd['state_dict']
     config.model.params.ckpt_path = ckpt
     model = instantiate_from_config(config.model)
     m, u = model.load_state_dict(sd, strict=False)
     if len(m) > 0 and verbose:
-        print("missing keys:")
+        print('missing keys:')
         print(m)
     if len(u) > 0 and verbose:
-        print("unexpected keys:")
+        print('unexpected keys:')
         print(u)
 
     model.cuda()
     return model
 
+
 def get_parser(**parser_kwargs):
     def str2bool(v):
         if isinstance(v, bool):
             return v
-        if v.lower() in ("yes", "true", "t", "y", "1"):
+        if v.lower() in ('yes', 'true', 't', 'y', '1'):
             return True
-        elif v.lower() in ("no", "false", "f", "n", "0"):
+        elif v.lower() in ('no', 'false', 'f', 'n', '0'):
             return False
         else:
-            raise argparse.ArgumentTypeError("Boolean value expected.")
+            raise argparse.ArgumentTypeError('Boolean value expected.')
 
     parser = argparse.ArgumentParser(**parser_kwargs)
     parser.add_argument(
-        "-n",
-        "--name",
+        '-n',
+        '--name',
         type=str,
         const=True,
-        default="",
-        nargs="?",
-        help="postfix for logdir",
+        default='',
+        nargs='?',
+        help='postfix for logdir',
     )
     parser.add_argument(
-        "-r",
-        "--resume",
+        '-r',
+        '--resume',
         type=str,
         const=True,
-        default="",
-        nargs="?",
-        help="resume from logdir or checkpoint in logdir",
+        default='',
+        nargs='?',
+        help='resume from logdir or checkpoint in logdir',
     )
     parser.add_argument(
-        "-b",
-        "--base",
-        nargs="*",
-        metavar="base_config.yaml",
-        help="paths to base configs. Loaded from left-to-right. "
-             "Parameters can be overwritten or added with command-line options of the form `--key value`.",
+        '-b',
+        '--base',
+        nargs='*',
+        metavar='base_config.yaml',
+        help='paths to base configs. Loaded from left-to-right. '
+        'Parameters can be overwritten or added with command-line options of the form `--key value`.',
         default=list(),
     )
     parser.add_argument(
-        "-t",
-        "--train",
+        '-t',
+        '--train',
         type=str2bool,
         const=True,
         default=False,
-        nargs="?",
-        help="train",
+        nargs='?',
+        help='train',
     )
     parser.add_argument(
-        "--no-test",
+        '--no-test',
         type=str2bool,
         const=True,
         default=False,
-        nargs="?",
-        help="disable test",
+        nargs='?',
+        help='disable test',
     )
     parser.add_argument(
-        "-p",
-        "--project",
-        help="name of new or path to existing project"
+        '-p', '--project', help='name of new or path to existing project'
     )
     parser.add_argument(
-        "-d",
-        "--debug",
+        '-d',
+        '--debug',
         type=str2bool,
-        nargs="?",
+        nargs='?',
         const=True,
         default=False,
-        help="enable post-mortem debugging",
+        help='enable post-mortem debugging',
     )
     parser.add_argument(
-        "-s",
-        "--seed",
+        '-s',
+        '--seed',
         type=int,
         default=23,
-        help="seed for seed_everything",
+        help='seed for seed_everything',
     )
     parser.add_argument(
-        "-f",
-        "--postfix",
+        '-f',
+        '--postfix',
         type=str,
-        default="",
-        help="post-postfix for default name",
+        default='',
+        help='post-postfix for default name',
     )
     parser.add_argument(
-        "-l",
-        "--logdir",
+        '-l',
+        '--logdir',
         type=str,
-        default="logs",
-        help="directory for logging dat shit",
+        default='logs',
+        help='directory for logging dat shit',
     )
     parser.add_argument(
-        "--scale_lr",
+        '--scale_lr',
         type=str2bool,
-        nargs="?",
+        nargs='?',
         const=True,
         default=True,
-        help="scale base-lr by ngpu * batch_size * n_accumulate",
+        help='scale base-lr by ngpu * batch_size * n_accumulate',
     )
 
     parser.add_argument(
-        "--datadir_in_name", 
-        type=str2bool, 
-        nargs="?", 
-        const=True, 
-        default=True, 
-        help="Prepend the final directory in the data_root to the output directory name")
+        '--datadir_in_name',
+        type=str2bool,
+        nargs='?',
+        const=True,
+        default=True,
+        help='Prepend the final directory in the data_root to the output directory name',
+    )
 
-    parser.add_argument("--actual_resume", type=str, default="", help="Path to model to actually resume from")
-    parser.add_argument("--data_root", type=str, required=True, help="Path to directory with training images")
+    parser.add_argument(
+        '--actual_resume',
+        type=str,
+        default='',
+        help='Path to model to actually resume from',
+    )
+    parser.add_argument(
+        '--data_root',
+        type=str,
+        required=True,
+        help='Path to directory with training images',
+    )
 
-    parser.add_argument("--embedding_manager_ckpt", type=str, default="", help="Initialize embedding manager from a checkpoint")
-    parser.add_argument("--placeholder_tokens", type=str, nargs="+", default=["*"])
+    parser.add_argument(
+        '--embedding_manager_ckpt',
+        type=str,
+        default='',
+        help='Initialize embedding manager from a checkpoint',
+    )
+    parser.add_argument(
+        '--placeholder_tokens', type=str, nargs='+', default=['*']
+    )
 
-    parser.add_argument("--init_word", type=str, help="Word to use as source for initial token embedding.")
+    parser.add_argument(
+        '--init_word',
+        type=str,
+        help='Word to use as source for initial token embedding.',
+    )
 
     return parser
 
@@ -186,7 +212,9 @@ def worker_init_fn(_):
     if isinstance(dataset, Txt2ImgIterableBaseDataset):
         split_size = dataset.num_records // worker_info.num_workers
         # reset num_records to the true number to retain reliable length information
-        dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
+        dataset.sample_ids = dataset.valid_ids[
+            worker_id * split_size : (worker_id + 1) * split_size
+        ]
         current_id = np.random.choice(len(np.random.get_state()[1]), 1)
         return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
     else:
@@ -194,25 +222,41 @@ def worker_init_fn(_):
 
 
 class DataModuleFromConfig(pl.LightningDataModule):
-    def __init__(self, batch_size, train=None, validation=None, test=None, predict=None,
-                 wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False,
-                 shuffle_val_dataloader=False):
+    def __init__(
+        self,
+        batch_size,
+        train=None,
+        validation=None,
+        test=None,
+        predict=None,
+        wrap=False,
+        num_workers=None,
+        shuffle_test_loader=False,
+        use_worker_init_fn=False,
+        shuffle_val_dataloader=False,
+    ):
         super().__init__()
         self.batch_size = batch_size
         self.dataset_configs = dict()
-        self.num_workers = num_workers if num_workers is not None else batch_size * 2
+        self.num_workers = (
+            num_workers if num_workers is not None else batch_size * 2
+        )
         self.use_worker_init_fn = use_worker_init_fn
         if train is not None:
-            self.dataset_configs["train"] = train
+            self.dataset_configs['train'] = train
             self.train_dataloader = self._train_dataloader
         if validation is not None:
-            self.dataset_configs["validation"] = validation
-            self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
+            self.dataset_configs['validation'] = validation
+            self.val_dataloader = partial(
+                self._val_dataloader, shuffle=shuffle_val_dataloader
+            )
         if test is not None:
-            self.dataset_configs["test"] = test
-            self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
+            self.dataset_configs['test'] = test
+            self.test_dataloader = partial(
+                self._test_dataloader, shuffle=shuffle_test_loader
+            )
         if predict is not None:
-            self.dataset_configs["predict"] = predict
+            self.dataset_configs['predict'] = predict
             self.predict_dataloader = self._predict_dataloader
         self.wrap = wrap
 
@@ -223,34 +267,48 @@ class DataModuleFromConfig(pl.LightningDataModule):
     def setup(self, stage=None):
         self.datasets = dict(
             (k, instantiate_from_config(self.dataset_configs[k]))
-            for k in self.dataset_configs)
+            for k in self.dataset_configs
+        )
         if self.wrap:
             for k in self.datasets:
                 self.datasets[k] = WrappedDataset(self.datasets[k])
 
     def _train_dataloader(self):
-        is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
+        is_iterable_dataset = isinstance(
+            self.datasets['train'], Txt2ImgIterableBaseDataset
+        )
         if is_iterable_dataset or self.use_worker_init_fn:
             init_fn = worker_init_fn
         else:
             init_fn = None
-        return DataLoader(self.datasets["train"], batch_size=self.batch_size,
-                          num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
-                          worker_init_fn=init_fn)
+        return DataLoader(
+            self.datasets['train'],
+            batch_size=self.batch_size,
+            num_workers=self.num_workers,
+            shuffle=False if is_iterable_dataset else True,
+            worker_init_fn=init_fn,
+        )
 
     def _val_dataloader(self, shuffle=False):
-        if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
+        if (
+            isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset)
+            or self.use_worker_init_fn
+        ):
             init_fn = worker_init_fn
         else:
             init_fn = None
-        return DataLoader(self.datasets["validation"],
-                          batch_size=self.batch_size,
-                          num_workers=self.num_workers,
-                          worker_init_fn=init_fn,
-                          shuffle=shuffle)
+        return DataLoader(
+            self.datasets['validation'],
+            batch_size=self.batch_size,
+            num_workers=self.num_workers,
+            worker_init_fn=init_fn,
+            shuffle=shuffle,
+        )
 
     def _test_dataloader(self, shuffle=False):
-        is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
+        is_iterable_dataset = isinstance(
+            self.datasets['train'], Txt2ImgIterableBaseDataset
+        )
         if is_iterable_dataset or self.use_worker_init_fn:
             init_fn = worker_init_fn
         else:
@@ -259,20 +317,34 @@ class DataModuleFromConfig(pl.LightningDataModule):
         # do not shuffle dataloader for iterable dataset
         shuffle = shuffle and (not is_iterable_dataset)
 
-        return DataLoader(self.datasets["test"], batch_size=self.batch_size,
-                          num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
+        return DataLoader(
+            self.datasets['test'],
+            batch_size=self.batch_size,
+            num_workers=self.num_workers,
+            worker_init_fn=init_fn,
+            shuffle=shuffle,
+        )
 
     def _predict_dataloader(self, shuffle=False):
-        if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
+        if (
+            isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset)
+            or self.use_worker_init_fn
+        ):
             init_fn = worker_init_fn
         else:
             init_fn = None
-        return DataLoader(self.datasets["predict"], batch_size=self.batch_size,
-                          num_workers=self.num_workers, worker_init_fn=init_fn)
+        return DataLoader(
+            self.datasets['predict'],
+            batch_size=self.batch_size,
+            num_workers=self.num_workers,
+            worker_init_fn=init_fn,
+        )
 
 
 class SetupCallback(Callback):
-    def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
+    def __init__(
+        self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config
+    ):
         super().__init__()
         self.resume = resume
         self.now = now
@@ -284,8 +356,8 @@ class SetupCallback(Callback):
 
     def on_keyboard_interrupt(self, trainer, pl_module):
         if trainer.global_rank == 0:
-            print("Summoning checkpoint.")
-            ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
+            print('Summoning checkpoint.')
+            ckpt_path = os.path.join(self.ckptdir, 'last.ckpt')
             trainer.save_checkpoint(ckpt_path)
 
     def on_pretrain_routine_start(self, trainer, pl_module):
@@ -295,24 +367,36 @@ class SetupCallback(Callback):
             os.makedirs(self.ckptdir, exist_ok=True)
             os.makedirs(self.cfgdir, exist_ok=True)
 
-            if "callbacks" in self.lightning_config:
-                if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
-                    os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
-            print("Project config")
+            if 'callbacks' in self.lightning_config:
+                if (
+                    'metrics_over_trainsteps_checkpoint'
+                    in self.lightning_config['callbacks']
+                ):
+                    os.makedirs(
+                        os.path.join(self.ckptdir, 'trainstep_checkpoints'),
+                        exist_ok=True,
+                    )
+            print('Project config')
             print(OmegaConf.to_yaml(self.config))
-            OmegaConf.save(self.config,
-                           os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
+            OmegaConf.save(
+                self.config,
+                os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now)),
+            )
 
-            print("Lightning config")
+            print('Lightning config')
             print(OmegaConf.to_yaml(self.lightning_config))
-            OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
-                           os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
+            OmegaConf.save(
+                OmegaConf.create({'lightning': self.lightning_config}),
+                os.path.join(
+                    self.cfgdir, '{}-lightning.yaml'.format(self.now)
+                ),
+            )
 
         else:
             # ModelCheckpoint callback created log directory --- remove it
             if not self.resume and os.path.exists(self.logdir):
                 dst, name = os.path.split(self.logdir)
-                dst = os.path.join(dst, "child_runs", name)
+                dst = os.path.join(dst, 'child_runs', name)
                 os.makedirs(os.path.split(dst)[0], exist_ok=True)
                 try:
                     os.rename(self.logdir, dst)
@@ -321,9 +405,18 @@ class SetupCallback(Callback):
 
 
 class ImageLogger(Callback):
-    def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True,
-                 rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
-                 log_images_kwargs=None):
+    def __init__(
+        self,
+        batch_frequency,
+        max_images,
+        clamp=True,
+        increase_log_steps=True,
+        rescale=True,
+        disabled=False,
+        log_on_batch_idx=False,
+        log_first_step=False,
+        log_images_kwargs=None,
+    ):
         super().__init__()
         self.rescale = rescale
         self.batch_freq = batch_frequency
@@ -331,7 +424,9 @@ class ImageLogger(Callback):
         self.logger_log_images = {
             pl.loggers.TestTubeLogger: self._testtube,
         }
-        self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
+        self.log_steps = [
+            2**n for n in range(int(np.log2(self.batch_freq)) + 1)
+        ]
         if not increase_log_steps:
             self.log_steps = [self.batch_freq]
         self.clamp = clamp
@@ -346,15 +441,16 @@ class ImageLogger(Callback):
             grid = torchvision.utils.make_grid(images[k])
             grid = (grid + 1.0) / 2.0  # -1,1 -> 0,1; c,h,w
 
-            tag = f"{split}/{k}"
+            tag = f'{split}/{k}'
             pl_module.logger.experiment.add_image(
-                tag, grid,
-                global_step=pl_module.global_step)
+                tag, grid, global_step=pl_module.global_step
+            )
 
     @rank_zero_only
-    def log_local(self, save_dir, split, images,
-                  global_step, current_epoch, batch_idx):
-        root = os.path.join(save_dir, "images", split)
+    def log_local(
+        self, save_dir, split, images, global_step, current_epoch, batch_idx
+    ):
+        root = os.path.join(save_dir, 'images', split)
         for k in images:
             grid = torchvision.utils.make_grid(images[k], nrow=4)
             if self.rescale:
@@ -362,21 +458,25 @@ class ImageLogger(Callback):
             grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
             grid = grid.numpy()
             grid = (grid * 255).astype(np.uint8)
-            filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
-                k,
-                global_step,
-                current_epoch,
-                batch_idx)
+            filename = '{}_gs-{:06}_e-{:06}_b-{:06}.png'.format(
+                k, global_step, current_epoch, batch_idx
+            )
             path = os.path.join(root, filename)
             os.makedirs(os.path.split(path)[0], exist_ok=True)
             Image.fromarray(grid).save(path)
 
-    def log_img(self, pl_module, batch, batch_idx, split="train"):
-        check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
-        if (self.check_frequency(check_idx) and  # batch_idx % self.batch_freq == 0
-                hasattr(pl_module, "log_images") and
-                callable(pl_module.log_images) and
-                self.max_images > 0):
+    def log_img(self, pl_module, batch, batch_idx, split='train'):
+        check_idx = (
+            batch_idx if self.log_on_batch_idx else pl_module.global_step
+        )
+        if (
+            self.check_frequency(check_idx)
+            and hasattr(  # batch_idx % self.batch_freq == 0
+                pl_module, 'log_images'
+            )
+            and callable(pl_module.log_images)
+            and self.max_images > 0
+        ):
             logger = type(pl_module.logger)
 
             is_train = pl_module.training
@@ -384,7 +484,9 @@ class ImageLogger(Callback):
                 pl_module.eval()
 
             with torch.no_grad():
-                images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
+                images = pl_module.log_images(
+                    batch, split=split, **self.log_images_kwargs
+                )
 
             for k in images:
                 N = min(images[k].shape[0], self.max_images)
@@ -392,20 +494,29 @@ class ImageLogger(Callback):
                 if isinstance(images[k], torch.Tensor):
                     images[k] = images[k].detach().cpu()
                     if self.clamp:
-                        images[k] = torch.clamp(images[k], -1., 1.)
+                        images[k] = torch.clamp(images[k], -1.0, 1.0)
 
-            self.log_local(pl_module.logger.save_dir, split, images,
-                           pl_module.global_step, pl_module.current_epoch, batch_idx)
+            self.log_local(
+                pl_module.logger.save_dir,
+                split,
+                images,
+                pl_module.global_step,
+                pl_module.current_epoch,
+                batch_idx,
+            )
 
-            logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
+            logger_log_images = self.logger_log_images.get(
+                logger, lambda *args, **kwargs: None
+            )
             logger_log_images(pl_module, images, pl_module.global_step, split)
 
             if is_train:
                 pl_module.train()
 
     def check_frequency(self, check_idx):
-        if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
-                check_idx > 0 or self.log_first_step):
+        if (
+            (check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)
+        ) and (check_idx > 0 or self.log_first_step):
             try:
                 self.log_steps.pop(0)
             except IndexError as e:
@@ -414,15 +525,23 @@ class ImageLogger(Callback):
             return True
         return False
 
-    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
-        if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
-            self.log_img(pl_module, batch, batch_idx, split="train")
+    def on_train_batch_end(
+        self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
+    ):
+        if not self.disabled and (
+            pl_module.global_step > 0 or self.log_first_step
+        ):
+            self.log_img(pl_module, batch, batch_idx, split='train')
 
-    def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
+    def on_validation_batch_end(
+        self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
+    ):
         if not self.disabled and pl_module.global_step > 0:
-            self.log_img(pl_module, batch, batch_idx, split="val")
+            self.log_img(pl_module, batch, batch_idx, split='val')
         if hasattr(pl_module, 'calibrate_grad_norm'):
-            if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0:
+            if (
+                pl_module.calibrate_grad_norm and batch_idx % 25 == 0
+            ) and batch_idx > 0:
                 self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
 
 
@@ -436,20 +555,22 @@ class CUDACallback(Callback):
 
     def on_train_epoch_end(self, trainer, pl_module, outputs):
         torch.cuda.synchronize(trainer.root_gpu)
-        max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20
+        max_memory = (
+            torch.cuda.max_memory_allocated(trainer.root_gpu) / 2**20
+        )
         epoch_time = time.time() - self.start_time
 
         try:
             max_memory = trainer.training_type_plugin.reduce(max_memory)
             epoch_time = trainer.training_type_plugin.reduce(epoch_time)
 
-            rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds")
-            rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB")
+            rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds')
+            rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB')
         except AttributeError:
             pass
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
     # custom parser to specify config files, train, test and debug mode,
     # postfix, resume.
     # `--key value` arguments are interpreted as arguments to the trainer.
@@ -491,7 +612,7 @@ if __name__ == "__main__":
     #           params:
     #               key: value
 
-    now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
+    now = datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
 
     # add cwd for convenience and to make classes in this file available when
     # running as `python main.py`
@@ -504,47 +625,49 @@ if __name__ == "__main__":
     opt, unknown = parser.parse_known_args()
     if opt.name and opt.resume:
         raise ValueError(
-            "-n/--name and -r/--resume cannot be specified both."
-            "If you want to resume training in a new log folder, "
-            "use -n/--name in combination with --resume_from_checkpoint"
+            '-n/--name and -r/--resume cannot be specified both.'
+            'If you want to resume training in a new log folder, '
+            'use -n/--name in combination with --resume_from_checkpoint'
         )
     if opt.resume:
         if not os.path.exists(opt.resume):
-            raise ValueError("Cannot find {}".format(opt.resume))
+            raise ValueError('Cannot find {}'.format(opt.resume))
         if os.path.isfile(opt.resume):
-            paths = opt.resume.split("/")
+            paths = opt.resume.split('/')
             # idx = len(paths)-paths[::-1].index("logs")+1
             # logdir = "/".join(paths[:idx])
-            logdir = "/".join(paths[:-2])
+            logdir = '/'.join(paths[:-2])
             ckpt = opt.resume
         else:
             assert os.path.isdir(opt.resume), opt.resume
-            logdir = opt.resume.rstrip("/")
-            ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
+            logdir = opt.resume.rstrip('/')
+            ckpt = os.path.join(logdir, 'checkpoints', 'last.ckpt')
 
         opt.resume_from_checkpoint = ckpt
-        base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
+        base_configs = sorted(
+            glob.glob(os.path.join(logdir, 'configs/*.yaml'))
+        )
         opt.base = base_configs + opt.base
-        _tmp = logdir.split("/")
+        _tmp = logdir.split('/')
         nowname = _tmp[-1]
     else:
         if opt.name:
-            name = "_" + opt.name
+            name = '_' + opt.name
         elif opt.base:
             cfg_fname = os.path.split(opt.base[0])[-1]
             cfg_name = os.path.splitext(cfg_fname)[0]
-            name = "_" + cfg_name
+            name = '_' + cfg_name
         else:
-            name = ""
+            name = ''
 
         if opt.datadir_in_name:
             now = os.path.basename(os.path.normpath(opt.data_root)) + now
-            
+
         nowname = now + name + opt.postfix
         logdir = os.path.join(opt.logdir, nowname)
 
-    ckptdir = os.path.join(logdir, "checkpoints")
-    cfgdir = os.path.join(logdir, "configs")
+    ckptdir = os.path.join(logdir, 'checkpoints')
+    cfgdir = os.path.join(logdir, 'configs')
     seed_everything(opt.seed)
 
     try:
@@ -552,19 +675,19 @@ if __name__ == "__main__":
         configs = [OmegaConf.load(cfg) for cfg in opt.base]
         cli = OmegaConf.from_dotlist(unknown)
         config = OmegaConf.merge(*configs, cli)
-        lightning_config = config.pop("lightning", OmegaConf.create())
+        lightning_config = config.pop('lightning', OmegaConf.create())
         # merge trainer cli with config
-        trainer_config = lightning_config.get("trainer", OmegaConf.create())
+        trainer_config = lightning_config.get('trainer', OmegaConf.create())
         # default to ddp
-        trainer_config["accelerator"] = "ddp"
+        trainer_config['accelerator'] = 'ddp'
         for k in nondefault_trainer_args(opt):
             trainer_config[k] = getattr(opt, k)
-        if not "gpus" in trainer_config:
-            del trainer_config["accelerator"]
+        if not 'gpus' in trainer_config:
+            del trainer_config['accelerator']
             cpu = True
         else:
-            gpuinfo = trainer_config["gpus"]
-            print(f"Running on GPUs {gpuinfo}")
+            gpuinfo = trainer_config['gpus']
+            print(f'Running on GPUs {gpuinfo}')
             cpu = False
         trainer_opt = argparse.Namespace(**trainer_config)
         lightning_config.trainer = trainer_config
@@ -572,11 +695,17 @@ if __name__ == "__main__":
         # model
 
         # config.model.params.personalization_config.params.init_word = opt.init_word
-        config.model.params.personalization_config.params.embedding_manager_ckpt = opt.embedding_manager_ckpt
-        config.model.params.personalization_config.params.placeholder_tokens = opt.placeholder_tokens
+        config.model.params.personalization_config.params.embedding_manager_ckpt = (
+            opt.embedding_manager_ckpt
+        )
+        config.model.params.personalization_config.params.placeholder_tokens = (
+            opt.placeholder_tokens
+        )
 
         if opt.init_word:
-            config.model.params.personalization_config.params.initializer_words[0] = opt.init_word
+            config.model.params.personalization_config.params.initializer_words[
+                0
+            ] = opt.init_word
 
         if opt.actual_resume:
             model = load_model_from_config(config, opt.actual_resume)
@@ -588,123 +717,136 @@ if __name__ == "__main__":
 
         # default logger configs
         default_logger_cfgs = {
-            "wandb": {
-                "target": "pytorch_lightning.loggers.WandbLogger",
-                "params": {
-                    "name": nowname,
-                    "save_dir": logdir,
-                    "offline": opt.debug,
-                    "id": nowname,
-                }
+            'wandb': {
+                'target': 'pytorch_lightning.loggers.WandbLogger',
+                'params': {
+                    'name': nowname,
+                    'save_dir': logdir,
+                    'offline': opt.debug,
+                    'id': nowname,
+                },
             },
-            "testtube": {
-                "target": "pytorch_lightning.loggers.TestTubeLogger",
-                "params": {
-                    "name": "testtube",
-                    "save_dir": logdir,
-                }
+            'testtube': {
+                'target': 'pytorch_lightning.loggers.TestTubeLogger',
+                'params': {
+                    'name': 'testtube',
+                    'save_dir': logdir,
+                },
             },
         }
-        default_logger_cfg = default_logger_cfgs["testtube"]
-        if "logger" in lightning_config:
+        default_logger_cfg = default_logger_cfgs['testtube']
+        if 'logger' in lightning_config:
             logger_cfg = lightning_config.logger
         else:
             logger_cfg = OmegaConf.create()
         logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
-        trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
+        trainer_kwargs['logger'] = instantiate_from_config(logger_cfg)
 
         # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
         # specify which metric is used to determine best models
         default_modelckpt_cfg = {
-            "target": "pytorch_lightning.callbacks.ModelCheckpoint",
-            "params": {
-                "dirpath": ckptdir,
-                "filename": "{epoch:06}",
-                "verbose": True,
-                "save_last": True,
-            }
+            'target': 'pytorch_lightning.callbacks.ModelCheckpoint',
+            'params': {
+                'dirpath': ckptdir,
+                'filename': '{epoch:06}',
+                'verbose': True,
+                'save_last': True,
+            },
         }
-        if hasattr(model, "monitor"):
-            print(f"Monitoring {model.monitor} as checkpoint metric.")
-            default_modelckpt_cfg["params"]["monitor"] = model.monitor
-            default_modelckpt_cfg["params"]["save_top_k"] = 3
+        if hasattr(model, 'monitor'):
+            print(f'Monitoring {model.monitor} as checkpoint metric.')
+            default_modelckpt_cfg['params']['monitor'] = model.monitor
+            default_modelckpt_cfg['params']['save_top_k'] = 3
 
-        if "modelcheckpoint" in lightning_config:
+        if 'modelcheckpoint' in lightning_config:
             modelckpt_cfg = lightning_config.modelcheckpoint
         else:
-            modelckpt_cfg =  OmegaConf.create()
+            modelckpt_cfg = OmegaConf.create()
         modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
-        print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
+        print(f'Merged modelckpt-cfg: \n{modelckpt_cfg}')
         if version.parse(pl.__version__) < version.parse('1.4.0'):
-            trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
+            trainer_kwargs['checkpoint_callback'] = instantiate_from_config(
+                modelckpt_cfg
+            )
 
         # add callback which sets up log directory
         default_callbacks_cfg = {
-            "setup_callback": {
-                "target": "main.SetupCallback",
-                "params": {
-                    "resume": opt.resume,
-                    "now": now,
-                    "logdir": logdir,
-                    "ckptdir": ckptdir,
-                    "cfgdir": cfgdir,
-                    "config": config,
-                    "lightning_config": lightning_config,
-                }
+            'setup_callback': {
+                'target': 'main.SetupCallback',
+                'params': {
+                    'resume': opt.resume,
+                    'now': now,
+                    'logdir': logdir,
+                    'ckptdir': ckptdir,
+                    'cfgdir': cfgdir,
+                    'config': config,
+                    'lightning_config': lightning_config,
+                },
             },
-            "image_logger": {
-                "target": "main.ImageLogger",
-                "params": {
-                    "batch_frequency": 750,
-                    "max_images": 4,
-                    "clamp": True
-                }
+            'image_logger': {
+                'target': 'main.ImageLogger',
+                'params': {
+                    'batch_frequency': 750,
+                    'max_images': 4,
+                    'clamp': True,
+                },
             },
-            "learning_rate_logger": {
-                "target": "main.LearningRateMonitor",
-                "params": {
-                    "logging_interval": "step",
+            'learning_rate_logger': {
+                'target': 'main.LearningRateMonitor',
+                'params': {
+                    'logging_interval': 'step',
                     # "log_momentum": True
-                }
-            },
-            "cuda_callback": {
-                "target": "main.CUDACallback"
+                },
             },
+            'cuda_callback': {'target': 'main.CUDACallback'},
         }
         if version.parse(pl.__version__) >= version.parse('1.4.0'):
-            default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg})
+            default_callbacks_cfg.update(
+                {'checkpoint_callback': modelckpt_cfg}
+            )
 
-        if "callbacks" in lightning_config:
+        if 'callbacks' in lightning_config:
             callbacks_cfg = lightning_config.callbacks
         else:
             callbacks_cfg = OmegaConf.create()
 
         if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg:
             print(
-                'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
+                'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.'
+            )
             default_metrics_over_trainsteps_ckpt_dict = {
-                'metrics_over_trainsteps_checkpoint':
-                    {"target": 'pytorch_lightning.callbacks.ModelCheckpoint',
-                     'params': {
-                         "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'),
-                         "filename": "{epoch:06}-{step:09}",
-                         "verbose": True,
-                         'save_top_k': -1,
-                         'every_n_train_steps': 10000,
-                         'save_weights_only': True
-                     }
-                     }
+                'metrics_over_trainsteps_checkpoint': {
+                    'target': 'pytorch_lightning.callbacks.ModelCheckpoint',
+                    'params': {
+                        'dirpath': os.path.join(
+                            ckptdir, 'trainstep_checkpoints'
+                        ),
+                        'filename': '{epoch:06}-{step:09}',
+                        'verbose': True,
+                        'save_top_k': -1,
+                        'every_n_train_steps': 10000,
+                        'save_weights_only': True,
+                    },
+                }
             }
-            default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
+            default_callbacks_cfg.update(
+                default_metrics_over_trainsteps_ckpt_dict
+            )
 
         callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
-        if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'):
-            callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint
+        if 'ignore_keys_callback' in callbacks_cfg and hasattr(
+            trainer_opt, 'resume_from_checkpoint'
+        ):
+            callbacks_cfg.ignore_keys_callback.params[
+                'ckpt_path'
+            ] = trainer_opt.resume_from_checkpoint
         elif 'ignore_keys_callback' in callbacks_cfg:
             del callbacks_cfg['ignore_keys_callback']
 
-        trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
-        trainer_kwargs["max_steps"] = opt.max_steps
+        trainer_kwargs['callbacks'] = [
+            instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg
+        ]
+        trainer_kwargs['max_steps'] = opt.max_steps
 
         trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
         trainer.logdir = logdir  ###
@@ -720,47 +862,60 @@ if __name__ == "__main__":
         # lightning still takes care of proper multiprocessing though
         data.prepare_data()
         data.setup()
-        print("#### Data #####")
+        print('#### Data #####')
         for k in data.datasets:
-            print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")
+            print(
+                f'{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}'
+            )
 
         # configure learning rate
-        bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
+        bs, base_lr = (
+            config.data.params.batch_size,
+            config.model.base_learning_rate,
+        )
         if not cpu:
-            ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
+            ngpu = len(lightning_config.trainer.gpus.strip(',').split(','))
         else:
             ngpu = 1
         if 'accumulate_grad_batches' in lightning_config.trainer:
-            accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
+            accumulate_grad_batches = (
+                lightning_config.trainer.accumulate_grad_batches
+            )
         else:
             accumulate_grad_batches = 1
-        print(f"accumulate_grad_batches = {accumulate_grad_batches}")
-        lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
+        print(f'accumulate_grad_batches = {accumulate_grad_batches}')
+        lightning_config.trainer.accumulate_grad_batches = (
+            accumulate_grad_batches
+        )
         if opt.scale_lr:
             model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
             print(
-                "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
-                    model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
+                'Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)'.format(
+                    model.learning_rate,
+                    accumulate_grad_batches,
+                    ngpu,
+                    bs,
+                    base_lr,
+                )
+            )
         else:
             model.learning_rate = base_lr
-            print("++++ NOT USING LR SCALING ++++")
-            print(f"Setting learning rate to {model.learning_rate:.2e}")
-
+            print('++++ NOT USING LR SCALING ++++')
+            print(f'Setting learning rate to {model.learning_rate:.2e}')
 
         # allow checkpointing via USR1
         def melk(*args, **kwargs):
             # run all checkpoint hooks
             if trainer.global_rank == 0:
-                print("Summoning checkpoint.")
-                ckpt_path = os.path.join(ckptdir, "last.ckpt")
+                print('Summoning checkpoint.')
+                ckpt_path = os.path.join(ckptdir, 'last.ckpt')
                 trainer.save_checkpoint(ckpt_path)
 
-
         def divein(*args, **kwargs):
             if trainer.global_rank == 0:
-                import pudb;
-                pudb.set_trace()
+                import pudb
 
+                pudb.set_trace()
 
         import signal
 
@@ -788,7 +943,7 @@ if __name__ == "__main__":
         # move newly created debug project to debug_runs
         if opt.debug and not opt.resume and trainer.global_rank == 0:
             dst, name = os.path.split(logdir)
-            dst = os.path.join(dst, "debug_runs", name)
+            dst = os.path.join(dst, 'debug_runs', name)
             os.makedirs(os.path.split(dst)[0], exist_ok=True)
             os.rename(logdir, dst)
         # if trainer.global_rank == 0:
diff --git a/scripts/dream.py b/scripts/dream.py
index 6a17656593..f6feb10adc 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -8,62 +8,66 @@ import sys
 import copy
 import warnings
 import ldm.dream.readline
-from   ldm.dream.pngwriter import PngWriter,PromptFormatter
+from ldm.dream.pngwriter import PngWriter, PromptFormatter
 
 debugging = False
 
+
 def main():
-    ''' Initialize command-line parsers and the diffusion model '''
+    """Initialize command-line parsers and the diffusion model"""
     arg_parser = create_argv_parser()
-    opt        = arg_parser.parse_args()
+    opt = arg_parser.parse_args()
     if opt.laion400m:
         # defaults suitable to the older latent diffusion weights
-        width   = 256
-        height  = 256
-        config  = "configs/latent-diffusion/txt2img-1p4B-eval.yaml"
-        weights = "models/ldm/text2img-large/model.ckpt"
+        width = 256
+        height = 256
+        config = 'configs/latent-diffusion/txt2img-1p4B-eval.yaml'
+        weights = 'models/ldm/text2img-large/model.ckpt'
     else:
         # some defaults suitable for stable diffusion weights
-        width   = 512
-        height  = 512
-        config  = "configs/stable-diffusion/v1-inference.yaml"
-        weights = "models/ldm/stable-diffusion-v1/model.ckpt"
+        width = 512
+        height = 512
+        config = 'configs/stable-diffusion/v1-inference.yaml'
+        weights = 'models/ldm/stable-diffusion-v1/model.ckpt'
 
-    print("* Initializing, be patient...\n")
+    print('* Initializing, be patient...\n')
     sys.path.append('.')
     from pytorch_lightning import logging
     from ldm.simplet2i import T2I
+
     # these two lines prevent a horrible warning message from appearing
     # when the frozen CLIP tokenizer is imported
     import transformers
+
     transformers.logging.set_verbosity_error()
-    
+
     # creating a simple text2image object with a handful of
     # defaults passed on the command line.
     # additional parameters will be added (or overriden) during
     # the user input loop
-    t2i = T2I(width=width,
-              height=height,
-              sampler_name=opt.sampler_name,
-              weights=weights,
-              full_precision=opt.full_precision,
-              config=config,
-              latent_diffusion_weights=opt.laion400m, # this is solely for recreating the prompt
-              embedding_path=opt.embedding_path,
-              device=opt.device
+    t2i = T2I(
+        width=width,
+        height=height,
+        sampler_name=opt.sampler_name,
+        weights=weights,
+        full_precision=opt.full_precision,
+        config=config,
+        latent_diffusion_weights=opt.laion400m,  # this is solely for recreating the prompt
+        embedding_path=opt.embedding_path,
+        device=opt.device,
     )
 
     # make sure the output directory exists
     if not os.path.exists(opt.outdir):
         os.makedirs(opt.outdir)
-        
+
     # gets rid of annoying messages about random seed
-    logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
+    logging.getLogger('pytorch_lightning').setLevel(logging.ERROR)
 
     infile = None
     try:
         if opt.infile is not None:
-            infile = open(opt.infile,'r')
+            infile = open(opt.infile, 'r')
     except FileNotFoundError as e:
         print(e)
         exit(-1)
@@ -73,135 +77,156 @@ def main():
 
     # load GFPGAN if requested
     if opt.use_gfpgan:
-        print("\n* --gfpgan was specified, loading gfpgan...")
+        print('\n* --gfpgan was specified, loading gfpgan...')
         with warnings.catch_warnings():
-            warnings.filterwarnings("ignore", category=DeprecationWarning) 
+            warnings.filterwarnings('ignore', category=DeprecationWarning)
 
             try:
-                model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path)
+                model_path = os.path.join(
+                    opt.gfpgan_dir, opt.gfpgan_model_path
+                )
                 if not os.path.isfile(model_path):
-                    raise Exception("GFPGAN model not found at path "+model_path)
+                    raise Exception(
+                        'GFPGAN model not found at path ' + model_path
+                    )
 
                 sys.path.append(os.path.abspath(opt.gfpgan_dir))
                 from gfpgan import GFPGANer
 
-                bg_upsampler = load_gfpgan_bg_upsampler(opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile)
+                bg_upsampler = load_gfpgan_bg_upsampler(
+                    opt.gfpgan_bg_upsampler, opt.gfpgan_bg_tile
+                )
 
-                t2i.gfpgan = GFPGANer(model_path=model_path, upscale=opt.gfpgan_upscale, arch='clean', channel_multiplier=2, bg_upsampler=bg_upsampler)
+                t2i.gfpgan = GFPGANer(
+                    model_path=model_path,
+                    upscale=opt.gfpgan_upscale,
+                    arch='clean',
+                    channel_multiplier=2,
+                    bg_upsampler=bg_upsampler,
+                )
             except Exception:
                 import traceback
-                print("Error loading GFPGAN:", file=sys.stderr)
+
+                print('Error loading GFPGAN:', file=sys.stderr)
                 print(traceback.format_exc(), file=sys.stderr)
 
-    print("\n* Initialization done! Awaiting your command (-h for help, 'q' to quit, 'cd' to change output dir, 'pwd' to print output dir)...")
+    print(
+        "\n* Initialization done! Awaiting your command (-h for help, 'q' to quit, 'cd' to change output dir, 'pwd' to print output dir)..."
+    )
 
-    log_path   = os.path.join(opt.outdir,'dream_log.txt')
-    with open(log_path,'a') as log:
+    log_path = os.path.join(opt.outdir, 'dream_log.txt')
+    with open(log_path, 'a') as log:
         cmd_parser = create_cmd_parser()
-        main_loop(t2i,opt.outdir,cmd_parser,log,infile)
+        main_loop(t2i, opt.outdir, cmd_parser, log, infile)
         log.close()
     if infile:
         infile.close()
 
 
-def main_loop(t2i,outdir,parser,log,infile):
-    ''' prompt/read/execute loop '''
-    done       = False
+def main_loop(t2i, outdir, parser, log, infile):
+    """prompt/read/execute loop"""
+    done = False
     last_seeds = []
-    
+
     while not done:
         try:
-            command = infile.readline() if infile else input("dream> ") 
+            command = infile.readline() if infile else input('dream> ')
         except EOFError:
             done = True
             break
 
-        if infile and len(command)==0:
+        if infile and len(command) == 0:
             done = True
             break
 
-        if command.startswith(('#','//')):
+        if command.startswith(('#', '//')):
             continue
 
         # before splitting, escape single quotes so as not to mess
         # up the parser
-        command = command.replace("'","\\'")
+        command = command.replace("'", "\\'")
 
         try:
             elements = shlex.split(command)
         except ValueError as e:
             print(str(e))
             continue
-        
-        if len(elements)==0:
+
+        if len(elements) == 0:
             continue
 
-        if elements[0]=='q':
+        if elements[0] == 'q':
             done = True
             break
 
-        if elements[0]=='cd' and len(elements)>1:
+        if elements[0] == 'cd' and len(elements) > 1:
             if os.path.exists(elements[1]):
-                print(f"setting image output directory to {elements[1]}")
-                outdir=elements[1]
+                print(f'setting image output directory to {elements[1]}')
+                outdir = elements[1]
             else:
-                print(f"directory {elements[1]} does not exist")
+                print(f'directory {elements[1]} does not exist')
             continue
 
-        if elements[0]=='pwd':
-            print(f"current output directory is {outdir}")
+        if elements[0] == 'pwd':
+            print(f'current output directory is {outdir}')
             continue
-        
-        if elements[0].startswith('!dream'): # in case a stored prompt still contains the !dream command
+
+        if elements[0].startswith(
+            '!dream'
+        ):   # in case a stored prompt still contains the !dream command
             elements.pop(0)
-            
+
         # rearrange the arguments to mimic how it works in the Dream bot.
         switches = ['']
         switches_started = False
 
         for el in elements:
-            if el[0]=='-' and not switches_started:
+            if el[0] == '-' and not switches_started:
                 switches_started = True
             if switches_started:
                 switches.append(el)
             else:
                 switches[0] += el
                 switches[0] += ' '
-        switches[0] = switches[0][:len(switches[0])-1]
+        switches[0] = switches[0][: len(switches[0]) - 1]
 
         try:
-            opt      = parser.parse_args(switches)
+            opt = parser.parse_args(switches)
         except SystemExit:
             parser.print_help()
             continue
-        if len(opt.prompt)==0:
-            print("Try again with a prompt!")
+        if len(opt.prompt) == 0:
+            print('Try again with a prompt!')
             continue
-        if opt.seed is not None and opt.seed<0: # retrieve previous value!
+        if opt.seed is not None and opt.seed < 0:   # retrieve previous value!
             try:
                 opt.seed = last_seeds[opt.seed]
-                print(f"reusing previous seed {opt.seed}")
+                print(f'reusing previous seed {opt.seed}')
             except IndexError:
-                print(f"No previous seed at position {opt.seed} found")
+                print(f'No previous seed at position {opt.seed} found')
                 opt.seed = None
-                
-        normalized_prompt      = PromptFormatter(t2i,opt).normalize_prompt()
-        individual_images      = not opt.grid
+
+        normalized_prompt = PromptFormatter(t2i, opt).normalize_prompt()
+        individual_images = not opt.grid
 
         try:
-            file_writer        = PngWriter(outdir,normalized_prompt,opt.batch_size)
-            callback           = file_writer.write_image if individual_images else None
+            file_writer = PngWriter(outdir, normalized_prompt, opt.batch_size)
+            callback = file_writer.write_image if individual_images else None
 
-            image_list   = t2i.prompt2image(image_callback=callback,**vars(opt))
-            results      = file_writer.files_written     if individual_images else image_list
+            image_list = t2i.prompt2image(image_callback=callback, **vars(opt))
+            results = (
+                file_writer.files_written if individual_images else image_list
+            )
 
             if opt.grid and len(results) > 0:
                 grid_img = file_writer.make_grid([r[0] for r in results])
                 filename = file_writer.unique_filename(results[0][1])
-                seeds    = [a[1] for a in results]
-                results  = [[filename,seeds]]
-                metadata_prompt   = f'{normalized_prompt} -S{results[0][1]}'
-                file_writer.save_image_and_prompt_to_png(grid_img,metadata_prompt,filename)
+                seeds = [a[1] for a in results]
+                results = [[filename, seeds]]
+                metadata_prompt = f'{normalized_prompt} -S{results[0][1]}'
+                file_writer.save_image_and_prompt_to_png(
+                    grid_img, metadata_prompt, filename
+                )
 
             last_seeds = [r[1] for r in results]
 
@@ -213,10 +238,11 @@ def main_loop(t2i,outdir,parser,log,infile):
             print(e)
             continue
 
-        print("Outputs:")
-        write_log_message(t2i,normalized_prompt,results,log)
+        print('Outputs:')
+        write_log_message(t2i, normalized_prompt, results, log)
+
+    print('goodbye!')
 
-    print("goodbye!")
 
 def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
     import torch
@@ -224,13 +250,24 @@ def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
     if bg_upsampler == 'realesrgan':
         if not torch.cuda.is_available():  # CPU
             import warnings
-            warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. '
-                          'If you really want to use it, please modify the corresponding codes.')
+
+            warnings.warn(
+                'The unoptimized RealESRGAN is slow on CPU. We do not use it. '
+                'If you really want to use it, please modify the corresponding codes.'
+            )
             bg_upsampler = None
         else:
             from basicsr.archs.rrdbnet_arch import RRDBNet
             from realesrgan import RealESRGANer
-            model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
+
+            model = RRDBNet(
+                num_in_ch=3,
+                num_out_ch=3,
+                num_feat=64,
+                num_block=23,
+                num_grow_ch=32,
+                scale=2,
+            )
             bg_upsampler = RealESRGANer(
                 scale=2,
                 model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
@@ -238,12 +275,14 @@ def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
                 tile=bg_tile,
                 tile_pad=10,
                 pre_pad=0,
-                half=True)  # need to set False in CPU mode
+                half=True,
+            )  # need to set False in CPU mode
     else:
         bg_upsampler = None
 
     return bg_upsampler
 
+
 # variant generation is going to be superseded by a generalized
 # "prompt-morph" functionality
 # def generate_variants(t2i,outdir,opt,previous_gens):
@@ -268,110 +307,209 @@ def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
 #                 continue
 #     print(f'{opt.variants} variants generated')
 #     return variants
-                
 
-def write_log_message(t2i,prompt,results,logfile):
-    ''' logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata'''
-    last_seed  = None
-    img_num    = 1
-    seenit     = {}
+
+def write_log_message(t2i, prompt, results, logfile):
+    """logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata"""
+    last_seed = None
+    img_num = 1
+    seenit = {}
 
     for r in results:
         seed = r[1]
-        log_message = (f'{r[0]}: {prompt} -S{seed}')
+        log_message = f'{r[0]}: {prompt} -S{seed}'
 
         print(log_message)
-        logfile.write(log_message+"\n")
+        logfile.write(log_message + '\n')
         logfile.flush()
 
+
 def create_argv_parser():
-    parser = argparse.ArgumentParser(description="Parse script's command line args")
-    parser.add_argument("--laion400m",
-                        "--latent_diffusion",
-                        "-l",
-                        dest='laion400m',
-                        action='store_true',
-                        help="fallback to the latent diffusion (laion400m) weights and config")
-    parser.add_argument("--from_file",
-                        dest='infile',
-                        type=str,
-                        help="if specified, load prompts from this file")
-    parser.add_argument('-n','--iterations',
-                        type=int,
-                        default=1,
-                        help="number of images to generate")
-    parser.add_argument('-F','--full_precision',
-                        dest='full_precision',
-                        action='store_true',
-                        help="use slower full precision math for calculations")
-    parser.add_argument('--sampler','-m',
-                        dest="sampler_name",
-                        choices=['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'],
-                        default='k_lms',
-                        help="which sampler to use (k_lms) - can only be set on command line")
-    parser.add_argument('--outdir',
-                        '-o',
-                        type=str,
-                        default="outputs/img-samples",
-                        help="directory in which to place generated images and a log of prompts and seeds (outputs/img-samples")
-    parser.add_argument('--embedding_path',
-                        type=str,
-                        help="Path to a pre-trained embedding manager checkpoint - can only be set on command line")
-    parser.add_argument('--device',
-                        '-d',
-                        type=str,
-                        default="cuda",
-                        help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if avalible")
+    parser = argparse.ArgumentParser(
+        description="Parse script's command line args"
+    )
+    parser.add_argument(
+        '--laion400m',
+        '--latent_diffusion',
+        '-l',
+        dest='laion400m',
+        action='store_true',
+        help='fallback to the latent diffusion (laion400m) weights and config',
+    )
+    parser.add_argument(
+        '--from_file',
+        dest='infile',
+        type=str,
+        help='if specified, load prompts from this file',
+    )
+    parser.add_argument(
+        '-n',
+        '--iterations',
+        type=int,
+        default=1,
+        help='number of images to generate',
+    )
+    parser.add_argument(
+        '-F',
+        '--full_precision',
+        dest='full_precision',
+        action='store_true',
+        help='use slower full precision math for calculations',
+    )
+    parser.add_argument(
+        '--sampler',
+        '-m',
+        dest='sampler_name',
+        choices=[
+            'ddim',
+            'k_dpm_2_a',
+            'k_dpm_2',
+            'k_euler_a',
+            'k_euler',
+            'k_heun',
+            'k_lms',
+            'plms',
+        ],
+        default='k_lms',
+        help='which sampler to use (k_lms) - can only be set on command line',
+    )
+    parser.add_argument(
+        '--outdir',
+        '-o',
+        type=str,
+        default='outputs/img-samples',
+        help='directory in which to place generated images and a log of prompts and seeds (outputs/img-samples',
+    )
+    parser.add_argument(
+        '--embedding_path',
+        type=str,
+        help='Path to a pre-trained embedding manager checkpoint - can only be set on command line',
+    )
+    parser.add_argument(
+        '--device',
+        '-d',
+        type=str,
+        default='cuda',
+        help='device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if avalible',
+    )
     # GFPGAN related args
-    parser.add_argument('--gfpgan',
-                        dest='use_gfpgan',
-                        action='store_true',
-                        help="load gfpgan for use in the dreambot. Note: Enabling GFPGAN will require more GPU memory")
-    parser.add_argument("--gfpgan_upscale",
-                        type=int,
-                        default=2,
-                        help="The final upsampling scale of the image. Default: 2. Only used if --gfpgan is specified")
-    parser.add_argument("--gfpgan_bg_upsampler",
-                        type=str,
-                        default='realesrgan',
-                        help="Background upsampler. Default: None. Options: realesrgan, none. Only used if --gfpgan is specified")
-    parser.add_argument("--gfpgan_bg_tile",
-                        type=int,
-                        default=400,
-                        help="Tile size for background sampler, 0 for no tile during testing. Default: 400. Only used if --gfpgan is specified")
-    parser.add_argument("--gfpgan_model_path",
-                        type=str,
-                        default='experiments/pretrained_models/GFPGANv1.3.pth',
-                        help="indicates the path to the GFPGAN model, relative to --gfpgan_dir. Only used if --gfpgan is specified")
-    parser.add_argument("--gfpgan_dir",
-                        type=str,
-                        default='../GFPGAN',
-                        help="indicates the directory containing the GFPGAN code. Only used if --gfpgan is specified")
+    parser.add_argument(
+        '--gfpgan',
+        dest='use_gfpgan',
+        action='store_true',
+        help='load gfpgan for use in the dreambot. Note: Enabling GFPGAN will require more GPU memory',
+    )
+    parser.add_argument(
+        '--gfpgan_upscale',
+        type=int,
+        default=2,
+        help='The final upsampling scale of the image. Default: 2. Only used if --gfpgan is specified',
+    )
+    parser.add_argument(
+        '--gfpgan_bg_upsampler',
+        type=str,
+        default='realesrgan',
+        help='Background upsampler. Default: None. Options: realesrgan, none. Only used if --gfpgan is specified',
+    )
+    parser.add_argument(
+        '--gfpgan_bg_tile',
+        type=int,
+        default=400,
+        help='Tile size for background sampler, 0 for no tile during testing. Default: 400. Only used if --gfpgan is specified',
+    )
+    parser.add_argument(
+        '--gfpgan_model_path',
+        type=str,
+        default='experiments/pretrained_models/GFPGANv1.3.pth',
+        help='indicates the path to the GFPGAN model, relative to --gfpgan_dir. Only used if --gfpgan is specified',
+    )
+    parser.add_argument(
+        '--gfpgan_dir',
+        type=str,
+        default='../GFPGAN',
+        help='indicates the directory containing the GFPGAN code. Only used if --gfpgan is specified',
+    )
     return parser
-                        
-    
+
+
 def create_cmd_parser():
-    parser = argparse.ArgumentParser(description='Example: dream> a fantastic alien landscape -W1024 -H960 -s100 -n12')
+    parser = argparse.ArgumentParser(
+        description='Example: dream> a fantastic alien landscape -W1024 -H960 -s100 -n12'
+    )
     parser.add_argument('prompt')
-    parser.add_argument('-s','--steps',type=int,help="number of steps")
-    parser.add_argument('-S','--seed',type=int,help="image seed; a +ve integer, or use -1 for the previous seed, -2 for the one before that, etc")
-    parser.add_argument('-n','--iterations',type=int,default=1,help="number of samplings to perform (slower, but will provide seeds for individual images)")
-    parser.add_argument('-b','--batch_size',type=int,default=1,help="number of images to produce per sampling (will not provide seeds for individual images!)")
-    parser.add_argument('-W','--width',type=int,help="image width, multiple of 64")
-    parser.add_argument('-H','--height',type=int,help="image height, multiple of 64")
-    parser.add_argument('-C','--cfg_scale',default=7.5,type=float,help="prompt configuration scale")
-    parser.add_argument('-g','--grid',action='store_true',help="generate a grid")
-    parser.add_argument('-i','--individual',action='store_true',help="generate individual files (default)")
-    parser.add_argument('-I','--init_img',type=str,help="path to input image for img2img mode (supersedes width and height)")
-    parser.add_argument('-f','--strength',default=0.75,type=float,help="strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely")
-    parser.add_argument('-G','--gfpgan_strength', default=0.5, type=float, help="The strength at which to apply the GFPGAN model to the result, in order to improve faces.")
-# variants is going to be superseded by a generalized "prompt-morph" function
-#    parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
-    parser.add_argument('-x','--skip_normalize',action='store_true',help="skip subprompt weight normalization")
+    parser.add_argument('-s', '--steps', type=int, help='number of steps')
+    parser.add_argument(
+        '-S',
+        '--seed',
+        type=int,
+        help='image seed; a +ve integer, or use -1 for the previous seed, -2 for the one before that, etc',
+    )
+    parser.add_argument(
+        '-n',
+        '--iterations',
+        type=int,
+        default=1,
+        help='number of samplings to perform (slower, but will provide seeds for individual images)',
+    )
+    parser.add_argument(
+        '-b',
+        '--batch_size',
+        type=int,
+        default=1,
+        help='number of images to produce per sampling (will not provide seeds for individual images!)',
+    )
+    parser.add_argument(
+        '-W', '--width', type=int, help='image width, multiple of 64'
+    )
+    parser.add_argument(
+        '-H', '--height', type=int, help='image height, multiple of 64'
+    )
+    parser.add_argument(
+        '-C',
+        '--cfg_scale',
+        default=7.5,
+        type=float,
+        help='prompt configuration scale',
+    )
+    parser.add_argument(
+        '-g', '--grid', action='store_true', help='generate a grid'
+    )
+    parser.add_argument(
+        '-i',
+        '--individual',
+        action='store_true',
+        help='generate individual files (default)',
+    )
+    parser.add_argument(
+        '-I',
+        '--init_img',
+        type=str,
+        help='path to input image for img2img mode (supersedes width and height)',
+    )
+    parser.add_argument(
+        '-f',
+        '--strength',
+        default=0.75,
+        type=float,
+        help='strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely',
+    )
+    parser.add_argument(
+        '-G',
+        '--gfpgan_strength',
+        default=0.5,
+        type=float,
+        help='The strength at which to apply the GFPGAN model to the result, in order to improve faces.',
+    )
+    # variants is going to be superseded by a generalized "prompt-morph" function
+    #    parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
+    parser.add_argument(
+        '-x',
+        '--skip_normalize',
+        action='store_true',
+        help='skip subprompt weight normalization',
+    )
     return parser
 
 
-
-if __name__ == "__main__":
+if __name__ == '__main__':
     main()
-
diff --git a/scripts/preload_models.py b/scripts/preload_models.py
index d7538c82b8..624b61e48e 100755
--- a/scripts/preload_models.py
+++ b/scripts/preload_models.py
@@ -11,26 +11,28 @@ import warnings
 transformers.logging.set_verbosity_error()
 
 # this will preload the Bert tokenizer fles
-print("preloading bert tokenizer...")
+print('preloading bert tokenizer...')
 from transformers import BertTokenizerFast
-tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
-print("...success")
+
+tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
+print('...success')
 
 # this will download requirements for Kornia
-print("preloading Kornia requirements (ignore the deprecation warnings)...")
+print('preloading Kornia requirements (ignore the deprecation warnings)...')
 with warnings.catch_warnings():
-    warnings.filterwarnings("ignore", category=DeprecationWarning) 
+    warnings.filterwarnings('ignore', category=DeprecationWarning)
     import kornia
-print("...success")
+print('...success')
 
-version='openai/clip-vit-large-patch14'
+version = 'openai/clip-vit-large-patch14'
 
 print('preloading CLIP model (Ignore the deprecation warnings)...')
 sys.stdout.flush()
 import clip
 from transformers import CLIPTokenizer, CLIPTextModel
-tokenizer  =CLIPTokenizer.from_pretrained(version)
-transformer=CLIPTextModel.from_pretrained(version)
+
+tokenizer = CLIPTokenizer.from_pretrained(version)
+transformer = CLIPTextModel.from_pretrained(version)
 print('\n\n...success')
 
 # In the event that the user has installed GFPGAN and also elected to use
@@ -38,23 +40,33 @@ print('\n\n...success')
 gfpgan = False
 try:
     from realesrgan import RealESRGANer
+
     gfpgan = True
 except ModuleNotFoundError:
     pass
 
 if gfpgan:
-    print("Loading models from RealESRGAN and facexlib")
+    print('Loading models from RealESRGAN and facexlib')
     try:
         from basicsr.archs.rrdbnet_arch import RRDBNet
         from facexlib.utils.face_restoration_helper import FaceRestoreHelper
-        RealESRGANer(scale=2,
-                     model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
-                     model=RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2))
-        FaceRestoreHelper(1,det_model='retinaface_resnet50')
-        print("...success")
+
+        RealESRGANer(
+            scale=2,
+            model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
+            model=RRDBNet(
+                num_in_ch=3,
+                num_out_ch=3,
+                num_feat=64,
+                num_block=23,
+                num_grow_ch=32,
+                scale=2,
+            ),
+        )
+        FaceRestoreHelper(1, det_model='retinaface_resnet50')
+        print('...success')
     except Exception:
         import traceback
-        print("Error loading GFPGAN:")
+
+        print('Error loading GFPGAN:')
         print(traceback.format_exc())
-
-

From eb9f0be91ada14e41df150127fdfba2b951ce893 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 03:53:55 -0400
Subject: [PATCH 54/58] Set default to none for gfpgan_strength

---
 scripts/dream.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/dream.py b/scripts/dream.py
index f6feb10adc..5d5e8db4a5 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -496,7 +496,7 @@ def create_cmd_parser():
     parser.add_argument(
         '-G',
         '--gfpgan_strength',
-        default=0.5,
+        default=None,
         type=float,
         help='The strength at which to apply the GFPGAN model to the result, in order to improve faces.',
     )

From 46a223f229c3458f0190813d48e1a52ed5a5ff5a Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 04:05:09 -0400
Subject: [PATCH 55/58] Double check for null and 0, and add a comment to
 indicate intent

---
 ldm/simplet2i.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 8aad3557af..dd99f5e6b6 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -305,9 +305,12 @@ class T2I:
                     iter_images = next(images_iterator)
                     for image in iter_images:
                         try:
-                            if gfpgan_strength > 0:
-                                image = self._run_gfpgan(
-                                    image, gfpgan_strength
+                            # if gfpgan strength is not none or less than 0.0 then don't use GFPGAN
+                            # if the user specified -G x, and the --gfpgan wasn't specified, then
+                            # the net result is a message gets printed - nothing else happens
+                            if gfpgan_strength is not None and gfpgan_strength > 0.0:
+                                image = self.gfpgan.generate(
+                                    image, gfpgan_strength=gfpgan_strength
                                 )
                         except Exception as e:
                             print(

From 4e5aa7e714a0230eeefb93dd45c31935e2064587 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 04:07:01 -0400
Subject: [PATCH 56/58] fix comment

---
 ldm/simplet2i.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index dd99f5e6b6..5c3d08a5d1 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -305,7 +305,7 @@ class T2I:
                     iter_images = next(images_iterator)
                     for image in iter_images:
                         try:
-                            # if gfpgan strength is not none or less than 0.0 then don't use GFPGAN
+                            # if gfpgan strength is none or less than 0.0 then don't use GFPGAN
                             # if the user specified -G x, and the --gfpgan wasn't specified, then
                             # the net result is a message gets printed - nothing else happens
                             if gfpgan_strength is not None and gfpgan_strength > 0.0:

From 7bf0bc52086ba0b9259202f90081566c3e91c74c Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 04:08:18 -0400
Subject: [PATCH 57/58] fix comment

---
 ldm/simplet2i.py | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 5c3d08a5d1..93a33a6a5d 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -305,9 +305,11 @@ class T2I:
                     iter_images = next(images_iterator)
                     for image in iter_images:
                         try:
-                            # if gfpgan strength is none or less than 0.0 then don't use GFPGAN
-                            # if the user specified -G x, and the --gfpgan wasn't specified, then
-                            # the net result is a message gets printed - nothing else happens
+                            # if gfpgan strength is none or less than or equal to 0.0 then 
+                            # don't even attempt to use GFPGAN.
+                            # if the user specified a value of -G that satisifies the condition and 
+                            # --gfpgan wasn't specified, at startup then
+                            # the net result is a message gets printed - nothing else happens.
                             if gfpgan_strength is not None and gfpgan_strength > 0.0:
                                 image = self.gfpgan.generate(
                                     image, gfpgan_strength=gfpgan_strength

From f3fad22cb6e24d8672f8548fe6601da91df9ecf6 Mon Sep 17 00:00:00 2001
From: Sean McLellan <sean@baristalabs.io>
Date: Fri, 26 Aug 2022 05:27:34 -0400
Subject: [PATCH 58/58] Fix

---
 ldm/simplet2i.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py
index 93a33a6a5d..ffec5fda2b 100644
--- a/ldm/simplet2i.py
+++ b/ldm/simplet2i.py
@@ -311,8 +311,8 @@ class T2I:
                             # --gfpgan wasn't specified, at startup then
                             # the net result is a message gets printed - nothing else happens.
                             if gfpgan_strength is not None and gfpgan_strength > 0.0:
-                                image = self.gfpgan.generate(
-                                    image, gfpgan_strength=gfpgan_strength
+                                image = self._run_gfpgan(
+                                    image, gfpgan_strength
                                 )
                         except Exception as e:
                             print(