From 968ebd236b4a03c95421d47dfb673aa718028080 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 27 Jun 2023 14:35:59 +0800 Subject: [PATCH 01/30] Fix ONNX export of the latest streaming zipformer model. (#1148) --- egs/librispeech/ASR/zipformer/export-onnx-streaming.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/zipformer/export-onnx-streaming.py b/egs/librispeech/ASR/zipformer/export-onnx-streaming.py index 80dc19b37..ff3e46433 100755 --- a/egs/librispeech/ASR/zipformer/export-onnx-streaming.py +++ b/egs/librispeech/ASR/zipformer/export-onnx-streaming.py @@ -86,7 +86,7 @@ from icefall.checkpoint import ( find_checkpoints, load_checkpoint, ) -from icefall.utils import make_pad_mask, str2bool +from icefall.utils import str2bool def get_parser(): @@ -218,7 +218,7 @@ class OnnxEncoder(nn.Module): ) assert x.size(1) == self.chunk_size, (x.size(1), self.chunk_size) - src_key_padding_mask = make_pad_mask(x_lens) + src_key_padding_mask = torch.zeros(N, self.chunk_size, dtype=torch.bool) # processed_mask is used to mask out initial states processed_mask = torch.arange(left_context_len, device=x.device).expand( @@ -272,6 +272,7 @@ class OnnxEncoder(nn.Module): states = self.encoder.get_init_states(batch_size, device) embed_states = self.encoder_embed.get_init_states(batch_size, device) + states.append(embed_states) processed_lens = torch.zeros(batch_size, dtype=torch.int64, device=device) From 9c2172c1c42486c35cf98c8ee586347b57908925 Mon Sep 17 00:00:00 2001 From: Desh Raj Date: Wed, 28 Jun 2023 10:43:49 +0200 Subject: [PATCH 02/30] Zipformer for TedLium (#1125) * initial commit for zipformer tedlium * fix unk decoding * add pretrained model and logs * update for new AsrModel * add option for choosing rnnt type * add results with modified rnnt --- .../beam_search.py | 16 +- egs/tedlium3/ASR/RESULTS.md | 128 ++ egs/tedlium3/ASR/zipformer/__init__.py | 0 egs/tedlium3/ASR/zipformer/asr_datamodule.py | 1 + egs/tedlium3/ASR/zipformer/beam_search.py | 1 + egs/tedlium3/ASR/zipformer/decode.py | 833 +++++++++++ egs/tedlium3/ASR/zipformer/decoder.py | 1 + .../ASR/zipformer/encoder_interface.py | 1 + egs/tedlium3/ASR/zipformer/export.py | 1 + egs/tedlium3/ASR/zipformer/joiner.py | 1 + egs/tedlium3/ASR/zipformer/model.py | 223 +++ egs/tedlium3/ASR/zipformer/optim.py | 1 + egs/tedlium3/ASR/zipformer/pretrained.py | 1 + egs/tedlium3/ASR/zipformer/profile.py | 1 + egs/tedlium3/ASR/zipformer/scaling.py | 1 + .../ASR/zipformer/scaling_converter.py | 1 + egs/tedlium3/ASR/zipformer/subsampling.py | 1 + egs/tedlium3/ASR/zipformer/train.py | 1308 +++++++++++++++++ egs/tedlium3/ASR/zipformer/zipformer.py | 1 + 19 files changed, 2519 insertions(+), 2 deletions(-) create mode 100644 egs/tedlium3/ASR/zipformer/__init__.py create mode 120000 egs/tedlium3/ASR/zipformer/asr_datamodule.py create mode 120000 egs/tedlium3/ASR/zipformer/beam_search.py create mode 100755 egs/tedlium3/ASR/zipformer/decode.py create mode 120000 egs/tedlium3/ASR/zipformer/decoder.py create mode 120000 egs/tedlium3/ASR/zipformer/encoder_interface.py create mode 120000 egs/tedlium3/ASR/zipformer/export.py create mode 120000 egs/tedlium3/ASR/zipformer/joiner.py create mode 100644 egs/tedlium3/ASR/zipformer/model.py create mode 120000 egs/tedlium3/ASR/zipformer/optim.py create mode 120000 egs/tedlium3/ASR/zipformer/pretrained.py create mode 120000 egs/tedlium3/ASR/zipformer/profile.py create mode 120000 egs/tedlium3/ASR/zipformer/scaling.py create mode 120000 egs/tedlium3/ASR/zipformer/scaling_converter.py create mode 120000 egs/tedlium3/ASR/zipformer/subsampling.py create mode 100755 egs/tedlium3/ASR/zipformer/train.py create mode 120000 egs/tedlium3/ASR/zipformer/zipformer.py diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py index 17b63a659..fd59d4b7f 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py @@ -50,6 +50,7 @@ def fast_beam_search_one_best( ilme_scale: float = 0.0, blank_penalty: float = 0.0, return_timestamps: bool = False, + allow_partial: bool = False, ) -> Union[List[List[int]], DecodingResults]: """It limits the maximum number of symbols per frame to 1. @@ -91,6 +92,7 @@ def fast_beam_search_one_best( max_contexts=max_contexts, temperature=temperature, ilme_scale=ilme_scale, + allow_partial=allow_partial, blank_penalty=blank_penalty, ) @@ -117,6 +119,7 @@ def fast_beam_search_nbest_LG( blank_penalty: float = 0.0, ilme_scale: float = 0.0, return_timestamps: bool = False, + allow_partial: bool = False, ) -> Union[List[List[int]], DecodingResults]: """It limits the maximum number of symbols per frame to 1. @@ -170,6 +173,7 @@ def fast_beam_search_nbest_LG( max_states=max_states, max_contexts=max_contexts, temperature=temperature, + allow_partial=allow_partial, blank_penalty=blank_penalty, ilme_scale=ilme_scale, ) @@ -246,6 +250,7 @@ def fast_beam_search_nbest( temperature: float = 1.0, blank_penalty: float = 0.0, return_timestamps: bool = False, + allow_partial: bool = False, ) -> Union[List[List[int]], DecodingResults]: """It limits the maximum number of symbols per frame to 1. @@ -300,6 +305,7 @@ def fast_beam_search_nbest( max_contexts=max_contexts, blank_penalty=blank_penalty, temperature=temperature, + allow_partial=allow_partial, ) nbest = Nbest.from_lattice( @@ -339,6 +345,7 @@ def fast_beam_search_nbest_oracle( temperature: float = 1.0, blank_penalty: float = 0.0, return_timestamps: bool = False, + allow_partial: bool = False, ) -> Union[List[List[int]], DecodingResults]: """It limits the maximum number of symbols per frame to 1. @@ -396,6 +403,7 @@ def fast_beam_search_nbest_oracle( max_states=max_states, max_contexts=max_contexts, temperature=temperature, + allow_partial=allow_partial, blank_penalty=blank_penalty, ) @@ -440,7 +448,9 @@ def fast_beam_search( max_states: int, max_contexts: int, temperature: float = 1.0, - ilme_scale: float = 0.0, + subtract_ilme: bool = False, + ilme_scale: float = 0.1, + allow_partial: bool = False, blank_penalty: float = 0.0, ) -> k2.Fsa: """It limits the maximum number of symbols per frame to 1. @@ -533,7 +543,9 @@ def fast_beam_search( decoding_streams.advance(log_probs) decoding_streams.terminate_and_flush_to_streams() - lattice = decoding_streams.format_output(encoder_out_lens.tolist()) + lattice = decoding_streams.format_output( + encoder_out_lens.tolist(), allow_partial=allow_partial + ) return lattice diff --git a/egs/tedlium3/ASR/RESULTS.md b/egs/tedlium3/ASR/RESULTS.md index 38eaa8f44..cda77073d 100644 --- a/egs/tedlium3/ASR/RESULTS.md +++ b/egs/tedlium3/ASR/RESULTS.md @@ -1,5 +1,133 @@ ## Results +### TedLium3 BPE training results (Zipformer) + +#### 2023-06-15 + +Using the codes from this PR https://github.com/k2-fsa/icefall/pull/1125. + +Number of model parameters: 65549011, i.e., 65.5 M + +The WERs are + +| | dev | test | comment | +|------------------------------------|------------|------------|------------------------------------------| +| greedy search | 6.74 | 6.16 | --epoch 50, --avg 22, --max-duration 500 | +| beam search (beam size 4) | 6.56 | 5.95 | --epoch 50, --avg 22, --max-duration 500 | +| modified beam search (beam size 4) | 6.54 | 6.00 | --epoch 50, --avg 22, --max-duration 500 | +| fast beam search (set as default) | 6.91 | 6.28 | --epoch 50, --avg 22, --max-duration 500 | + +The training command for reproducing is given below: + +``` +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./zipformer/train.py \ + --use-fp16 true \ + --world-size 4 \ + --num-epochs 50 \ + --start-epoch 0 \ + --exp-dir zipformer/exp \ + --max-duration 1000 +``` + +The tensorboard training log can be found at +https://tensorboard.dev/experiment/AKXbJha0S9aXyfmuvG4h5A/#scalars + +The decoding command is: +``` +epoch=50 +avg=22 + +## greedy search +./zipformer/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir zipformer/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 500 + +## beam search +./zipformer/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir zipformer/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 500 \ + --decoding-method beam_search \ + --beam-size 4 + +## modified beam search +./zipformer/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir zipformer/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 500 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +## fast beam search +./zipformer/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir ./zipformer/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 1500 \ + --decoding-method fast_beam_search \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +``` + +A pre-trained model and decoding logs can be found at + +#### 2023-06-26 (transducer topology) + +**Modified transducer** + +``` +./zipformer/train.py \ + --use-fp16 true \ + --world-size 4 \ + --num-epochs 50 \ + --start-epoch 0 \ + --exp-dir zipformer/exp \ + --max-duration 1000 \ + --rnnt-type modified +``` + +| | dev | test | comment | +|------------------------------------|------------|------------|------------------------------------------| +| greedy search | 6.32 | 5.83 | --epoch 50, --avg 22, --max-duration 500 | +| beam search (beam size 4) | | | --epoch 50, --avg 22, --max-duration 500 | +| modified beam search (beam size 4) | 6.16 | 5.79 | --epoch 50, --avg 22, --max-duration 500 | +| fast beam search (set as default) | 6.30 ß | 5.89 | --epoch 50, --avg 22, --max-duration 500 | + +A pre-trained model and decoding logs can be found at . + +**Constrained transducer** + +``` +./zipformer/train.py \ + --use-fp16 true \ + --world-size 4 \ + --num-epochs 50 \ + --start-epoch 0 \ + --exp-dir zipformer/exp \ + --max-duration 1000 \ + --rnnt-type constrained +``` + +| | dev | test | comment | +|------------------------------------|------------|------------|------------------------------------------| +| greedy search | 6.58 | 6.20 | --epoch 50, --avg 22, --max-duration 500 | +| beam search (beam size 4) | 6.34 | 5.92 | --epoch 50, --avg 22, --max-duration 500 | +| modified beam search (beam size 4) | 6.38 | 5.84 | --epoch 50, --avg 22, --max-duration 500 | +| fast beam search (set as default) | 6.68 | 6.29 | --epoch 50, --avg 22, --max-duration 500 | + +A pre-trained model and decoding logs can be found at . + ### TedLium3 BPE training results (Conformer-CTC 2) #### [conformer_ctc2](./conformer_ctc2) diff --git a/egs/tedlium3/ASR/zipformer/__init__.py b/egs/tedlium3/ASR/zipformer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/tedlium3/ASR/zipformer/asr_datamodule.py b/egs/tedlium3/ASR/zipformer/asr_datamodule.py new file mode 120000 index 000000000..49b2ee483 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/asr_datamodule.py @@ -0,0 +1 @@ +../transducer_stateless/asr_datamodule.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/beam_search.py b/egs/tedlium3/ASR/zipformer/beam_search.py new file mode 120000 index 000000000..e24eca39f --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/beam_search.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/beam_search.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/decode.py b/egs/tedlium3/ASR/zipformer/decode.py new file mode 100755 index 000000000..ea1cbba1b --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/decode.py @@ -0,0 +1,833 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2023 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) beam search (not recommended) +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +(4) fast beam search (one best) +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 + +(5) fast beam search (nbest) +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(6) fast beam search (nbest oracle WER) +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(7) fast beam search (with LG) +./zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --exp-dir ./zipformer/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import TedLiumAsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + make_pad_mask, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="zipformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_bpe_500", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to specify + `--lang-dir`, which should contain `LG.pt`. + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle + """, + ) + + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.01, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + if params.causal: + # this seems to cause insertions at the end of the utterance if used with zipformer. + pad_len = 30 + feature_lens += pad_len + feature = torch.nn.functional.pad( + feature, + pad=(0, 0, 0, pad_len), + value=LOG_EPS, + ) + + x, x_lens = model.encoder_embed(feature, feature_lens) + + src_key_padding_mask = make_pad_mask(x_lens) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + encoder_out, encoder_out_lens = model.encoder(x, x_lens, src_key_padding_mask) + encoder_out = encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + hyps = [] + unk = sp.decode(sp.unk_id()).strip() + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + allow_partial=True, + ) + for hyp in sp.decode(hyp_tokens): + hyp = [w for w in hyp.split() if w != unk] + hyps.append(hyp) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + allow_partial=True, + ) + for hyp in hyp_tokens: + hyp = [word_table[i] for i in hyp if word_table[i] != unk] + hyps.append(hyp) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + allow_partial=True, + ) + for hyp in sp.decode(hyp_tokens): + hyp = [w for w in hyp.split() if w != unk] + hyps.append(hyp) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=sp.encode(supervisions["text"]), + nbest_scale=params.nbest_scale, + allow_partial=True, + ) + for hyp in sp.decode(hyp_tokens): + hyp = [w for w in hyp.split() if w != unk] + hyps.append(hyp) + elif params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyp = [w for w in hyp.split() if w != unk] + hyps.append(hyp) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyp = [w for w in hyp.split() if w != unk] + hyps.append(hyp) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyp = [w for w in sp.decode(hyp).split() if w != unk] + hyps.append(hyp) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: hyps} + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + word_table=word_table, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_words = ref_text.split() + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + TedLiumAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if params.causal: + assert ( + "," not in params.chunk_size + ), "chunk_size should be one value in decoding." + assert ( + "," not in params.left_context_frames + ), "left_context_frames should be one value in decoding." + params.suffix += f"-chunk-{params.chunk_size}" + params.suffix += f"-left-context-{params.left_context_frames}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + if "nbest" in params.decoding_method: + params.suffix += f"-nbest-scale-{params.nbest_scale}" + params.suffix += f"-num-paths-{params.num_paths}" + if "LG" in params.decoding_method: + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + elif "beam_search" in params.decoding_method: + params.suffix += f"-{params.decoding_method}-beam-size-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lexicon = Lexicon(params.lang_dir) + word_table = lexicon.word_table + lg_filename = params.lang_dir / "LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + word_table = None + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + word_table = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + tedlium = TedLiumAsrDataModule(args) + + dev_cuts = tedlium.dev_cuts() + test_cuts = tedlium.test_cuts() + + dev_dl = tedlium.test_dataloaders(dev_cuts) + test_dl = tedlium.test_dataloaders(test_cuts) + + test_sets = ["dev", "test"] + test_dls = [dev_dl, test_dl] + + for name, dl in zip(test_sets, test_dls): + results_dict = decode_dataset( + dl=dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=name, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/tedlium3/ASR/zipformer/decoder.py b/egs/tedlium3/ASR/zipformer/decoder.py new file mode 120000 index 000000000..5a8018680 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/decoder.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/decoder.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/encoder_interface.py b/egs/tedlium3/ASR/zipformer/encoder_interface.py new file mode 120000 index 000000000..653c5b09a --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/encoder_interface.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/export.py b/egs/tedlium3/ASR/zipformer/export.py new file mode 120000 index 000000000..dfc1bec08 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/export.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/export.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/joiner.py b/egs/tedlium3/ASR/zipformer/joiner.py new file mode 120000 index 000000000..5b8a36332 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/joiner.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/joiner.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/model.py b/egs/tedlium3/ASR/zipformer/model.py new file mode 100644 index 000000000..90ec7e7aa --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/model.py @@ -0,0 +1,223 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface + +from icefall.utils import add_sos, make_pad_mask +from scaling import ScaledLinear + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder_embed: nn.Module, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + encoder_dim: int, + decoder_dim: int, + joiner_dim: int, + vocab_size: int, + ): + """ + Args: + encoder_embed: + It is a Convolutional 2D subsampling module. It converts + an input of shape (N, T, idim) to an output of of shape + (N, T', odim), where T' = (T-3)//2-2 = (T-7)//2. + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, encoder_dim) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, decoder_dim). + It should contain one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, encoder_dim) and (N, U, decoder_dim). + Its output shape is (N, T, U, vocab_size). Note that its output contains + unnormalized probs, i.e., not processed by log-softmax. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + self.encoder_embed = encoder_embed + self.encoder = encoder + self.decoder = decoder + self.joiner = joiner + + self.simple_am_proj = ScaledLinear( + encoder_dim, + vocab_size, + initial_scale=0.25, + ) + self.simple_lm_proj = ScaledLinear( + decoder_dim, + vocab_size, + initial_scale=0.25, + ) + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + rnnt_type: str = "regular", + ) -> torch.Tensor: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + prune_range: + The prune range for rnnt loss, it means how many symbols(context) + we are considering for each frame to compute the loss. + am_scale: + The scale to smooth the loss with am (output of encoder network) + part + lm_scale: + The scale to smooth the loss with lm (output of predictor network) + part + rnnt_type: + The type of label topology to use for the transducer loss. One of "regular", + "modified", or "constrained". + Returns: + Return the transducer loss. + + Note: + Regarding am_scale & lm_scale, it will make the loss-function one of + the form: + lm_scale * lm_probs + am_scale * am_probs + + (1-lm_scale-am_scale) * combined_probs + """ + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + # logging.info(f"Memory allocated at entry: {torch.cuda.memory_allocated() // 1000000}M") + x, x_lens = self.encoder_embed(x, x_lens) + # logging.info(f"Memory allocated after encoder_embed: {torch.cuda.memory_allocated() // 1000000}M") + + src_key_padding_mask = make_pad_mask(x_lens) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + encoder_out, x_lens = self.encoder(x, x_lens, src_key_padding_mask) + encoder_out = encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + assert torch.all(x_lens > 0) + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + # sos_y_padded: [B, S + 1], start with SOS. + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + + # decoder_out: [B, S + 1, decoder_dim] + decoder_out = self.decoder(sos_y_padded) + + # Note: y does not start with SOS + # y_padded : [B, S] + y_padded = y.pad(mode="constant", padding_value=0) + + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros( + (encoder_out.size(0), 4), + dtype=torch.int64, + device=encoder_out.device, + ) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + + lm = self.simple_lm_proj(decoder_out) + am = self.simple_am_proj(encoder_out) + + # if self.training and random.random() < 0.25: + # lm = penalize_abs_values_gt(lm, 100.0, 1.0e-04) + # if self.training and random.random() < 0.25: + # am = penalize_abs_values_gt(am, 30.0, 1.0e-04) + + with torch.cuda.amp.autocast(enabled=False): + simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( + lm=lm.float(), + am=am.float(), + symbols=y_padded, + termination_symbol=blank_id, + lm_only_scale=lm_scale, + am_only_scale=am_scale, + boundary=boundary, + reduction="sum", + return_grad=True, + rnnt_type=rnnt_type, + ) + + # ranges : [B, T, prune_range] + ranges = k2.get_rnnt_prune_ranges( + px_grad=px_grad, + py_grad=py_grad, + boundary=boundary, + s_range=prune_range, + ) + + # am_pruned : [B, T, prune_range, encoder_dim] + # lm_pruned : [B, T, prune_range, decoder_dim] + am_pruned, lm_pruned = k2.do_rnnt_pruning( + am=self.joiner.encoder_proj(encoder_out), + lm=self.joiner.decoder_proj(decoder_out), + ranges=ranges, + ) + + # logits : [B, T, prune_range, vocab_size] + + # project_input=False since we applied the decoder's input projections + # prior to do_rnnt_pruning (this is an optimization for speed). + logits = self.joiner(am_pruned, lm_pruned, project_input=False) + + with torch.cuda.amp.autocast(enabled=False): + pruned_loss = k2.rnnt_loss_pruned( + logits=logits.float(), + symbols=y_padded, + ranges=ranges, + termination_symbol=blank_id, + boundary=boundary, + reduction="sum", + rnnt_type=rnnt_type, + ) + + return (simple_loss, pruned_loss) diff --git a/egs/tedlium3/ASR/zipformer/optim.py b/egs/tedlium3/ASR/zipformer/optim.py new file mode 120000 index 000000000..5eaa3cffd --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/optim.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/optim.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/pretrained.py b/egs/tedlium3/ASR/zipformer/pretrained.py new file mode 120000 index 000000000..0bd71dde4 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/pretrained.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/pretrained.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/profile.py b/egs/tedlium3/ASR/zipformer/profile.py new file mode 120000 index 000000000..c93adbd14 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/profile.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/profile.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/scaling.py b/egs/tedlium3/ASR/zipformer/scaling.py new file mode 120000 index 000000000..6f398f431 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/scaling.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/scaling.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/scaling_converter.py b/egs/tedlium3/ASR/zipformer/scaling_converter.py new file mode 120000 index 000000000..b0ecee05e --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/scaling_converter.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/scaling_converter.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/subsampling.py b/egs/tedlium3/ASR/zipformer/subsampling.py new file mode 120000 index 000000000..01ae9002c --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/subsampling.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/subsampling.py \ No newline at end of file diff --git a/egs/tedlium3/ASR/zipformer/train.py b/egs/tedlium3/ASR/zipformer/train.py new file mode 100755 index 000000000..9271c8438 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/train.py @@ -0,0 +1,1308 @@ +#!/usr/bin/env python3 +# Copyright 2021-2023 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo, +# Zengwei Yao, +# Daniel Povey) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +# For non-streaming model training: +./zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir zipformer/exp \ + --full-libri 1 \ + --max-duration 1000 + +# For streaming model training: +./zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir zipformer/exp \ + --causal 1 \ + --full-libri 1 \ + --max-duration 1000 + +""" + + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import TedLiumAsrDataModule +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from local.convert_transcript_words_to_bpe_ids import convert_texts_into_ids +from model import Transducer +from optim import Eden, ScaledAdam +from scaling import ScheduledFloat +from subsampling import Conv2dSubsampling +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from zipformer import Zipformer2 + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.hooks import register_inf_check_hooks +from icefall.utils import ( + AttributeDict, + MetricsTracker, + get_parameter_groups_with_lrs, + setup_logger, + str2bool, +) + +LRSchedulerType = Union[torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler] + + +def get_adjusted_batch_count(params: AttributeDict) -> float: + # returns the number of batches we would have used so far if we had used the reference + # duration. This is for purposes of set_batch_count(). + return ( + params.batch_idx_train + * (params.max_duration * params.world_size) + / params.ref_duration + ) + + +def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None: + if isinstance(model, DDP): + # get underlying nn.Module + model = model.module + for name, module in model.named_modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + if hasattr(module, "name"): + module.name = name + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,2,3,4,3,2", + help="Number of zipformer encoder layers per stack, comma separated.", + ) + + parser.add_argument( + "--downsampling-factor", + type=str, + default="1,2,4,8,4,2", + help="Downsampling factor for each stack of encoder layers.", + ) + + parser.add_argument( + "--feedforward-dim", + type=str, + default="512,768,1024,1536,1024,768", + help="Feedforward dimension of the zipformer encoder layers, per stack, comma separated.", + ) + + parser.add_argument( + "--num-heads", + type=str, + default="4,4,4,8,4,4", + help="Number of attention heads in the zipformer encoder layers: a single int or comma-separated list.", + ) + + parser.add_argument( + "--encoder-dim", + type=str, + default="192,256,384,512,384,256", + help="Embedding dimension in encoder stacks: a single int or comma-separated list.", + ) + + parser.add_argument( + "--query-head-dim", + type=str, + default="32", + help="Query/key dimension per head in encoder stacks: a single int or comma-separated list.", + ) + + parser.add_argument( + "--value-head-dim", + type=str, + default="12", + help="Value dimension per head in encoder stacks: a single int or comma-separated list.", + ) + + parser.add_argument( + "--pos-head-dim", + type=str, + default="4", + help="Positional-encoding dimension per head in encoder stacks: a single int or comma-separated list.", + ) + + parser.add_argument( + "--pos-dim", + type=int, + default="48", + help="Positional-encoding embedding dimension", + ) + + parser.add_argument( + "--encoder-unmasked-dim", + type=str, + default="192,192,256,256,256,192", + help="Unmasked dimensions in the encoders, relates to augmentation during training. " + "A single int or comma-separated list. Must be <= each corresponding encoder_dim.", + ) + + parser.add_argument( + "--cnn-module-kernel", + type=str, + default="31,31,15,15,15,31", + help="Sizes of convolutional kernels in convolution modules in each encoder stack: " + "a single int or comma-separated list.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--causal", + type=str2bool, + default=False, + help="If True, use causal version of model.", + ) + + parser.add_argument( + "--chunk-size", + type=str, + default="16,32,64,-1", + help="Chunk sizes (at 50Hz frame rate) will be chosen randomly from this list during training. " + " Must be just -1 if --causal=False", + ) + + parser.add_argument( + "--left-context-frames", + type=str, + default="64,128,256,-1", + help="Maximum left-contexts for causal training, measured in frames which will " + "be converted to a number of chunks. If splitting into chunks, " + "chunk left-context frames will be chosen randomly from this list; else not relevant.", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=50, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="zipformer/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--base-lr", type=float, default=0.04, help="The base learning rate." + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=7500, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=5, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--ref-duration", + type=float, + default=600, + help="Reference batch duration for purposes of adjusting batch counts for setting various " + "schedules inside the model", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--rnnt-type", + type=str, + default="regular", + choices=["regular", "modified", "constrained"], + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--inf-check", + type=str2bool, + default=False, + help="Add hooks to check for infinite module outputs and gradients.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=4000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 1. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=1, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=200, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - encoder_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warmup period that dictates the decay of the + scale on "simple" (un-pruned) loss. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for zipformer + "feature_dim": 80, + "subsampling_factor": 4, # not passed in, this is fixed. + "warm_step": 2000, + "env_info": get_env_info(), + } + ) + + return params + + +def _to_int_tuple(s: str): + return tuple(map(int, s.split(","))) + + +def get_encoder_embed(params: AttributeDict) -> nn.Module: + # encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, (T - 7) // 2, encoder_dims). + # That is, it does two things simultaneously: + # (1) subsampling: T -> (T - 7) // 2 + # (2) embedding: num_features -> encoder_dims + # In the normal configuration, we will downsample once more at the end + # by a factor of 2, and most of the encoder stacks will run at a lower + # sampling rate. + encoder_embed = Conv2dSubsampling( + in_channels=params.feature_dim, + out_channels=_to_int_tuple(params.encoder_dim)[0], + dropout=ScheduledFloat((0.0, 0.3), (20000.0, 0.1)), + ) + return encoder_embed + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Zipformer2( + output_downsampling_factor=2, + downsampling_factor=_to_int_tuple(params.downsampling_factor), + num_encoder_layers=_to_int_tuple(params.num_encoder_layers), + encoder_dim=_to_int_tuple(params.encoder_dim), + encoder_unmasked_dim=_to_int_tuple(params.encoder_unmasked_dim), + query_head_dim=_to_int_tuple(params.query_head_dim), + pos_head_dim=_to_int_tuple(params.pos_head_dim), + value_head_dim=_to_int_tuple(params.value_head_dim), + pos_dim=params.pos_dim, + num_heads=_to_int_tuple(params.num_heads), + feedforward_dim=_to_int_tuple(params.feedforward_dim), + cnn_module_kernel=_to_int_tuple(params.cnn_module_kernel), + dropout=ScheduledFloat((0.0, 0.3), (20000.0, 0.1)), + warmup_batches=4000.0, + causal=params.causal, + chunk_size=_to_int_tuple(params.chunk_size), + left_context_frames=_to_int_tuple(params.left_context_frames), + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=max(_to_int_tuple(params.encoder_dim)), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder_embed = get_encoder_embed(params) + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder_embed=encoder_embed, + encoder=encoder, + decoder=decoder, + joiner=joiner, + encoder_dim=int(max(params.encoder_dim.split(","))), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNNT loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Zipformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = model.device if isinstance(model, DDP) else next(model.parameters()).device + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + batch_idx_train = params.batch_idx_train + warm_step = params.warm_step + + texts = batch["supervisions"]["text"] + y = convert_texts_into_ids(texts, sp) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + rnnt_type=params.rnnt_type, + ) + + s = params.simple_loss_scale + # take down the scale on the simple loss from 1.0 at the start + # to params.simple_loss scale by warm_step. + simple_loss_scale = ( + s + if batch_idx_train >= warm_step + else 1.0 - (batch_idx_train / warm_step) * (1.0 - s) + ) + pruned_loss_scale = ( + 1.0 + if batch_idx_train >= warm_step + else 0.1 + 0.9 * (batch_idx_train / warm_step) + ) + + loss = simple_loss_scale * simple_loss + pruned_loss_scale * pruned_loss + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + saved_bad_model = False + + def save_bad_model(suffix: str = ""): + save_checkpoint_impl( + filename=params.exp_dir / f"bad-model{suffix}-{rank}.pt", + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=0, + ) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx % 10 == 0: + set_batch_count(model, get_adjusted_batch_count(params)) + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + save_bad_model() + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % 100 == 0 and params.use_fp16: + # If the grad scale was less than 1, try increasing it. The _growth_interval + # of the grad scaler is configurable, but we can't configure it to have different + # behavior depending on the current grad scale. + cur_grad_scale = scaler._scale.item() + + if cur_grad_scale < 8.0 or (cur_grad_scale < 32.0 and batch_idx % 400 == 0): + scaler.update(cur_grad_scale * 2.0) + if cur_grad_scale < 0.01: + if not saved_bad_model: + save_bad_model(suffix="-first-warning") + saved_bad_model = True + logging.warning(f"Grad scale is small: {cur_grad_scale}") + if cur_grad_scale < 1.0e-05: + save_bad_model() + raise RuntimeError( + f"grad_scale is too small, exiting: {cur_grad_scale}" + ) + + if batch_idx % params.log_interval == 0: + cur_lr = max(scheduler.get_last_lr()) + cur_grad_scale = scaler._scale.item() if params.use_fp16 else 1.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + if params.use_fp16: + tb_writer.add_scalar( + "train/grad_scale", cur_grad_scale, params.batch_idx_train + ) + + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model).to(torch.float64) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + optimizer = ScaledAdam( + get_parameter_groups_with_lrs(model, lr=params.base_lr, include_names=True), + lr=params.base_lr, # should have no effect + clipping_scale=2.0, + ) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + opts = diagnostics.TensorDiagnosticOptions( + 2**22 + ) # allow 4 megabytes per sub-module + diagnostic = diagnostics.attach_diagnostics(model, opts) + + if params.inf_check: + register_inf_check_hooks(model) + + tedlium = TedLiumAsrDataModule(args) + + train_cuts = tedlium.train_cuts() + train_cuts = train_cuts.filter(lambda c: 1.0 <= c.duration <= 20.0) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = tedlium.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_cuts = tedlium.dev_cuts() + valid_dl = tedlium.valid_dataloaders(valid_cuts) + + if not params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + ) + + scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + supervisions = batch["supervisions"] + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = sp.encode(supervisions["text"], out_type=int) + num_tokens = sum(len(i) for i in y) + logging.info(f"num tokens: {num_tokens}") + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + loss.backward() + optimizer.zero_grad() + except Exception as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + display_and_save_batch(batch, params=params, sp=sp) + raise + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + + +def main(): + parser = get_parser() + TedLiumAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/tedlium3/ASR/zipformer/zipformer.py b/egs/tedlium3/ASR/zipformer/zipformer.py new file mode 120000 index 000000000..23011dda7 --- /dev/null +++ b/egs/tedlium3/ASR/zipformer/zipformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/zipformer/zipformer.py \ No newline at end of file From db71b0302651d0fd6d0e1c742591f35f2ab224ac Mon Sep 17 00:00:00 2001 From: Wei Kang Date: Thu, 29 Jun 2023 16:48:59 +0800 Subject: [PATCH 03/30] Support int8 quantization in decoder (#1152) --- egs/librispeech/ASR/zipformer/export-onnx-streaming.py | 2 +- egs/librispeech/ASR/zipformer/export-onnx.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/zipformer/export-onnx-streaming.py b/egs/librispeech/ASR/zipformer/export-onnx-streaming.py index ff3e46433..3eb06f68c 100755 --- a/egs/librispeech/ASR/zipformer/export-onnx-streaming.py +++ b/egs/librispeech/ASR/zipformer/export-onnx-streaming.py @@ -757,7 +757,7 @@ def main(): quantize_dynamic( model_input=decoder_filename, model_output=decoder_filename_int8, - op_types_to_quantize=["MatMul"], + op_types_to_quantize=["MatMul", "Gather"], weight_type=QuantType.QInt8, ) diff --git a/egs/librispeech/ASR/zipformer/export-onnx.py b/egs/librispeech/ASR/zipformer/export-onnx.py index 1bc10c896..724fdd2a6 100755 --- a/egs/librispeech/ASR/zipformer/export-onnx.py +++ b/egs/librispeech/ASR/zipformer/export-onnx.py @@ -602,7 +602,7 @@ def main(): quantize_dynamic( model_input=decoder_filename, model_output=decoder_filename_int8, - op_types_to_quantize=["MatMul"], + op_types_to_quantize=["MatMul", "Gather"], weight_type=QuantType.QInt8, ) From c59c89fc1323ed4d809bad6445d480437206e75a Mon Sep 17 00:00:00 2001 From: Desh Raj Date: Thu, 29 Jun 2023 13:09:01 +0200 Subject: [PATCH 04/30] Minor fix in tedlium results file (#1153) --- egs/tedlium3/ASR/RESULTS.md | 36 +++++++----------------------------- 1 file changed, 7 insertions(+), 29 deletions(-) diff --git a/egs/tedlium3/ASR/RESULTS.md b/egs/tedlium3/ASR/RESULTS.md index cda77073d..bd8a5b43f 100644 --- a/egs/tedlium3/ASR/RESULTS.md +++ b/egs/tedlium3/ASR/RESULTS.md @@ -2,7 +2,7 @@ ### TedLium3 BPE training results (Zipformer) -#### 2023-06-15 +#### 2023-06-15 (Regular transducer) Using the codes from this PR https://github.com/k2-fsa/icefall/pull/1125. @@ -82,9 +82,7 @@ avg=22 A pre-trained model and decoding logs can be found at -#### 2023-06-26 (transducer topology) - -**Modified transducer** +#### 2023-06-26 (Modified transducer) ``` ./zipformer/train.py \ @@ -97,36 +95,16 @@ A pre-trained model and decoding logs can be found at . ### TedLium3 BPE training results (Conformer-CTC 2) From ccd8c624dd19c23b3ef576df3329092a78522e6f Mon Sep 17 00:00:00 2001 From: Zengwei Yao Date: Fri, 30 Jun 2023 12:05:37 +0800 Subject: [PATCH 05/30] support testing onnx exported model on the test sets (#1150) * support testing onnx exported model on the test sets * use token_table instead --- egs/librispeech/ASR/zipformer/onnx_decode.py | 323 ++++++++++++++++++ .../ASR/zipformer/onnx_pretrained.py | 2 +- 2 files changed, 324 insertions(+), 1 deletion(-) create mode 100755 egs/librispeech/ASR/zipformer/onnx_decode.py diff --git a/egs/librispeech/ASR/zipformer/onnx_decode.py b/egs/librispeech/ASR/zipformer/onnx_decode.py new file mode 100755 index 000000000..2aca36ca9 --- /dev/null +++ b/egs/librispeech/ASR/zipformer/onnx_decode.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2023 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao, +# Xiaoyu Yang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script loads ONNX exported models and uses them to decode the test sets. + +We use the pre-trained model from +https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15 +as an example to show how to use this file. + +1. Download the pre-trained model + +cd egs/librispeech/ASR + +repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15 +GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url +repo=$(basename $repo_url) + +pushd $repo +git lfs pull --include "data/lang_bpe_500/bpe.model" +git lfs pull --include "exp/pretrained.pt" + +cd exp +ln -s pretrained.pt epoch-99.pt +popd + +2. Export the model to ONNX + +./zipformer/export-onnx.py \ + --tokens $repo/data/lang_bpe_500/tokens.txt \ + --use-averaged-model 0 \ + --epoch 99 \ + --avg 1 \ + --exp-dir $repo/exp \ + --causal False + +It will generate the following 3 files inside $repo/exp: + + - encoder-epoch-99-avg-1.onnx + - decoder-epoch-99-avg-1.onnx + - joiner-epoch-99-avg-1.onnx + +2. Run this file + +./zipformer/onnx_decode.py \ + --exp-dir $repo/exp \ + --max-duration 600 \ + --encoder-model-filename $repo/exp/encoder-epoch-99-avg-1.onnx \ + --decoder-model-filename $repo/exp/decoder-epoch-99-avg-1.onnx \ + --joiner-model-filename $repo/exp/joiner-epoch-99-avg-1.onnx \ + --tokens $repo/data/lang_bpe_500/tokens.txt \ +""" + + +import argparse +import logging +import time +from pathlib import Path +from typing import List, Tuple + +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule + +from onnx_pretrained import greedy_search, OnnxModel + +from icefall.utils import setup_logger, store_transcripts, write_error_stats +from k2 import SymbolTable + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--encoder-model-filename", + type=str, + required=True, + help="Path to the encoder onnx model. ", + ) + + parser.add_argument( + "--decoder-model-filename", + type=str, + required=True, + help="Path to the decoder onnx model. ", + ) + + parser.add_argument( + "--joiner-model-filename", + type=str, + required=True, + help="Path to the joiner onnx model. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="zipformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--tokens", + type=str, + help="""Path to tokens.txt.""", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="Valid values are greedy_search and modified_beam_search", + ) + + return parser + + +def decode_one_batch( + model: OnnxModel, token_table: SymbolTable, batch: dict +) -> List[List[str]]: + """Decode one batch and return the result. + Currently it only greedy_search is supported. + + Args: + model: + The neural model. + token_table: + The token table. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + + Returns: + Return the decoded results for each utterance. + """ + feature = batch["inputs"] + assert feature.ndim == 3 + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(dtype=torch.int64) + + encoder_out, encoder_out_lens = model.run_encoder(x=feature, x_lens=feature_lens) + + hyps = greedy_search( + model=model, encoder_out=encoder_out, encoder_out_lens=encoder_out_lens + ) + + def token_ids_to_words(token_ids: List[int]) -> str: + text = "" + for i in token_ids: + text += token_table[i] + return text.replace("▁", " ").strip() + + hyps = [token_ids_to_words(h).split() for h in hyps] + return hyps + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + model: nn.Module, + token_table: SymbolTable, +) -> Tuple[List[Tuple[str, List[str], List[str]]], float]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + model: + The neural model. + token_table: + The token table. + + Returns: + - A list of tuples. Each tuple contains three elements: + - cut_id, + - reference transcript, + - predicted result. + - The total duration (in seconds) of the dataset. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + log_interval = 10 + total_duration = 0 + + results = [] + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + total_duration += sum([cut.duration for cut in batch["supervisions"]["cut"]]) + + hyps = decode_one_batch(model=model, token_table=token_table, batch=batch) + + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_words = ref_text.split() + this_batch.append((cut_id, ref_words, hyp_words)) + + results.extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + + return results, total_duration + + +def save_results( + res_dir: Path, + test_set_name: str, + results: List[Tuple[str, List[str], List[str]]], +): + recog_path = res_dir / f"recogs-{test_set_name}.txt" + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = res_dir / f"errs-{test_set_name}.txt" + with open(errs_filename, "w") as f: + wer = write_error_stats(f, f"{test_set_name}", results, enable_log=True) + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + errs_info = res_dir / f"wer-summary-{test_set_name}.txt" + with open(errs_info, "w") as f: + print("WER", file=f) + print(wer, file=f) + + s = "\nFor {}, WER is {}:\n".format(test_set_name, wer) + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + + assert ( + args.decoding_method == "greedy_search" + ), "Only supports greedy_search currently." + res_dir = Path(args.exp_dir) / f"onnx-{args.decoding_method}" + + setup_logger(f"{res_dir}/log-decode") + logging.info("Decoding started") + + device = torch.device("cpu") + logging.info(f"Device: {device}") + + token_table = SymbolTable.from_file(args.tokens) + + logging.info(vars(args)) + + logging.info("About to create model") + model = OnnxModel( + encoder_model_filename=args.encoder_model_filename, + decoder_model_filename=args.decoder_model_filename, + joiner_model_filename=args.joiner_model_filename, + ) + + # we need cut ids to display recognition results. + args.return_cuts = True + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + start_time = time.time() + results, total_duration = decode_dataset(dl=test_dl, model=model, token_table=token_table) + end_time = time.time() + elapsed_seconds = end_time - start_time + rtf = elapsed_seconds / total_duration + + logging.info(f"Elapsed time: {elapsed_seconds:.3f} s") + logging.info(f"Wave duration: {total_duration:.3f} s") + logging.info( + f"Real time factor (RTF): {elapsed_seconds:.3f}/{total_duration:.3f} = {rtf:.3f}" + ) + + save_results(res_dir=res_dir, test_set_name=test_set, results=results) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/zipformer/onnx_pretrained.py b/egs/librispeech/ASR/zipformer/onnx_pretrained.py index b821c4e19..e8a521460 100755 --- a/egs/librispeech/ASR/zipformer/onnx_pretrained.py +++ b/egs/librispeech/ASR/zipformer/onnx_pretrained.py @@ -56,7 +56,7 @@ It will generate the following 3 files inside $repo/exp: 3. Run this file -./pruned_transducer_stateless3/onnx_pretrained.py \ +./zipformer/onnx_pretrained.py \ --encoder-model-filename $repo/exp/encoder-epoch-99-avg-1.onnx \ --decoder-model-filename $repo/exp/decoder-epoch-99-avg-1.onnx \ --joiner-model-filename $repo/exp/joiner-epoch-99-avg-1.onnx \ From 98d89463f6840439e5c4902b98df218a45359198 Mon Sep 17 00:00:00 2001 From: MicKot Date: Fri, 30 Jun 2023 15:16:40 +0200 Subject: [PATCH 06/30] zipformer2 logaddexp onnx safe (#1157) --- egs/librispeech/ASR/zipformer/scaling.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/zipformer/scaling.py b/egs/librispeech/ASR/zipformer/scaling.py index 9f23eeead..78c4efdc1 100644 --- a/egs/librispeech/ASR/zipformer/scaling.py +++ b/egs/librispeech/ASR/zipformer/scaling.py @@ -36,7 +36,9 @@ def logaddexp(x: Tensor, y: Tensor) -> Tensor: if not torch.jit.is_tracing(): return torch.logaddexp(x, y) else: - return (x.exp() + y.exp()).log() + max_value = torch.max(x, y) + diff = torch.abs(x - y) + return max_value + torch.log1p(torch.exp(-diff)) class PiecewiseLinear(object): """ From c3e23ec8d2a3ed2547bd94dee7280bd3f193a47e Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sun, 2 Jul 2023 10:30:09 +0800 Subject: [PATCH 07/30] Fix logaddexp for ONNX export (#1158) --- egs/librispeech/ASR/zipformer/scaling.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/zipformer/scaling.py b/egs/librispeech/ASR/zipformer/scaling.py index 78c4efdc1..885f8f143 100644 --- a/egs/librispeech/ASR/zipformer/scaling.py +++ b/egs/librispeech/ASR/zipformer/scaling.py @@ -33,12 +33,24 @@ from torch import Tensor # The following function is to solve the above error when exporting # models to ONNX via torch.jit.trace() def logaddexp(x: Tensor, y: Tensor) -> Tensor: - if not torch.jit.is_tracing(): + # Caution(fangjun): Put torch.jit.is_scripting() before + # torch.onnx.is_in_onnx_export(); + # otherwise, it will cause errors for torch.jit.script(). + # + # torch.logaddexp() works for both torch.jit.script() and + # torch.jit.trace() but it causes errors for ONNX export. + # + if torch.jit.is_scripting(): + # Note: We cannot use torch.jit.is_tracing() here as it also + # matches torch.onnx.export(). return torch.logaddexp(x, y) - else: + elif torch.onnx.is_in_onnx_export(): max_value = torch.max(x, y) diff = torch.abs(x - y) return max_value + torch.log1p(torch.exp(-diff)) + else: + # for torch.jit.trace() + return torch.logaddexp(x, y) class PiecewiseLinear(object): """ From 9009d028a07b0b394b150692f973d3ca9a98cfa3 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 3 Jul 2023 23:56:51 +0800 Subject: [PATCH 08/30] Fix ONNX export for the latest non-streaming zipformer. (#1160) --- egs/librispeech/ASR/zipformer/scaling.py | 23 ++++++++++++++++--- .../ASR/zipformer/scaling_converter.py | 15 +++++++++++- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/egs/librispeech/ASR/zipformer/scaling.py b/egs/librispeech/ASR/zipformer/scaling.py index 885f8f143..4ee7b7826 100644 --- a/egs/librispeech/ASR/zipformer/scaling.py +++ b/egs/librispeech/ASR/zipformer/scaling.py @@ -25,6 +25,11 @@ import math import torch.nn as nn from torch import Tensor +def logaddexp_onnx(x: Tensor, y: Tensor) -> Tensor: + max_value = torch.max(x, y) + diff = torch.abs(x - y) + return max_value + torch.log1p(torch.exp(-diff)) + # RuntimeError: Exporting the operator logaddexp to ONNX opset version # 14 is not supported. Please feel free to request support or submit @@ -45,9 +50,7 @@ def logaddexp(x: Tensor, y: Tensor) -> Tensor: # matches torch.onnx.export(). return torch.logaddexp(x, y) elif torch.onnx.is_in_onnx_export(): - max_value = torch.max(x, y) - diff = torch.abs(x - y) - return max_value + torch.log1p(torch.exp(-diff)) + return logaddexp_onnx(x, y) else: # for torch.jit.trace() return torch.logaddexp(x, y) @@ -1348,6 +1351,13 @@ class SwooshL(torch.nn.Module): return k2.swoosh_l(x) # return SwooshLFunction.apply(x) +class SwooshLOnnx(torch.nn.Module): + def forward(self, x: Tensor) -> Tensor: + """Return Swoosh-L activation. + """ + zero = torch.tensor(0.0, dtype=x.dtype, device=x.device) + return logaddexp_onnx(zero, x - 4.0) - 0.08 * x - 0.035 + class SwooshRFunction(torch.autograd.Function): """ @@ -1414,6 +1424,13 @@ class SwooshR(torch.nn.Module): return k2.swoosh_r(x) # return SwooshRFunction.apply(x) +class SwooshROnnx(torch.nn.Module): + def forward(self, x: Tensor) -> Tensor: + """Return Swoosh-R activation. + """ + zero = torch.tensor(0.0, dtype=x.dtype, device=x.device) + return logaddexp_onnx(zero, x - 1.) - 0.08 * x - 0.313261687 + # simple version of SwooshL that does not redefine the backprop, used in # ActivationDropoutAndLinearFunction. diff --git a/egs/librispeech/ASR/zipformer/scaling_converter.py b/egs/librispeech/ASR/zipformer/scaling_converter.py index 54a5c2a6a..76622fa12 100644 --- a/egs/librispeech/ASR/zipformer/scaling_converter.py +++ b/egs/librispeech/ASR/zipformer/scaling_converter.py @@ -26,7 +26,16 @@ from typing import List, Tuple import torch import torch.nn as nn -from scaling import Balancer, Dropout3, ScaleGrad, Whiten +from scaling import ( + Balancer, + Dropout3, + ScaleGrad, + SwooshL, + SwooshLOnnx, + SwooshR, + SwooshROnnx, + Whiten, +) from zipformer import CompactRelPositionalEncoding @@ -75,6 +84,10 @@ def convert_scaled_to_non_scaled( for name, m in model.named_modules(): if isinstance(m, (Balancer, Dropout3, ScaleGrad, Whiten)): d[name] = nn.Identity() + elif is_onnx and isinstance(m, SwooshR): + d[name] = SwooshROnnx() + elif is_onnx and isinstance(m, SwooshL): + d[name] = SwooshLOnnx() elif is_onnx and isinstance(m, CompactRelPositionalEncoding): # We want to recreate the positional encoding vector when # the input changes, so we have to use torch.jit.script() From eca020263214bffaaf6997c62b031c355101a4db Mon Sep 17 00:00:00 2001 From: "Nickolay V. Shmyrev" Date: Tue, 4 Jul 2023 05:13:25 +0300 Subject: [PATCH 09/30] Add start-batch option for RNNLM training (#1161) * Add start-batch option for RNNLM training * Also set epoch * Skip batches on load --- icefall/rnn_lm/train.py | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/icefall/rnn_lm/train.py b/icefall/rnn_lm/train.py index 0f0887859..3d206d139 100755 --- a/icefall/rnn_lm/train.py +++ b/icefall/rnn_lm/train.py @@ -99,6 +99,15 @@ def get_parser(): """, ) + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + parser.add_argument( "--exp-dir", type=str, @@ -242,7 +251,9 @@ def load_checkpoint_if_available( ) -> None: """Load checkpoint from file. - If params.start_epoch is positive, it will load the checkpoint from + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, @@ -261,10 +272,14 @@ def load_checkpoint_if_available( Returns: Return None. """ - if params.start_epoch <= 0: - return - filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + logging.info(f"Loading checkpoint: {filename}") saved_params = load_checkpoint( filename, @@ -283,6 +298,13 @@ def load_checkpoint_if_available( for k in keys: params[k] = saved_params[k] + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + if "cur_batch_idx" in saved_params: + params["cur_batch_idx"] = saved_params["cur_batch_idx"] + return saved_params @@ -438,7 +460,14 @@ def train_one_epoch( tot_loss = MetricsTracker() + cur_batch_idx = params.get("cur_batch_idx", 0) + for batch_idx, batch in enumerate(train_dl): + + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + params.batch_idx_train += 1 x, y, sentence_lengths = batch batch_size = x.size(0) @@ -463,6 +492,7 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): + params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -471,6 +501,7 @@ def train_one_epoch( optimizer=optimizer, rank=rank, ) + del params.cur_batch_idx if batch_idx % params.log_interval == 0: # Note: "frames" here means "num_tokens" From 856c0f2a60cf2e157cc46013665e6053117efd4f Mon Sep 17 00:00:00 2001 From: zr_jin <60612200+JinZr@users.noreply.github.com> Date: Tue, 4 Jul 2023 19:12:39 +0800 Subject: [PATCH 10/30] fixed default param for an aishell recipe (#1159) --- egs/aishell/ASR/pruned_transducer_stateless7/train.py | 2 +- egs/aishell/ASR/pruned_transducer_stateless7/train2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/aishell/ASR/pruned_transducer_stateless7/train.py b/egs/aishell/ASR/pruned_transducer_stateless7/train.py index ef536c035..cbb7db086 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless7/train.py +++ b/egs/aishell/ASR/pruned_transducer_stateless7/train.py @@ -240,7 +240,7 @@ def get_parser(): parser.add_argument( "--exp-dir", type=str, - default="pruned_transducer_stateless3/exp", + default="pruned_transducer_stateless7/exp", help="""The experiment dir. It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved diff --git a/egs/aishell/ASR/pruned_transducer_stateless7/train2.py b/egs/aishell/ASR/pruned_transducer_stateless7/train2.py index fb35a6c95..c30f6f960 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless7/train2.py +++ b/egs/aishell/ASR/pruned_transducer_stateless7/train2.py @@ -243,7 +243,7 @@ def get_parser(): parser.add_argument( "--exp-dir", type=str, - default="pruned_transducer_stateless3/exp", + default="pruned_transducer_stateless7/exp", help="""The experiment dir. It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved From a4402b88e6748d7ad8afe756f909f9da78bb1742 Mon Sep 17 00:00:00 2001 From: Desh Raj Date: Tue, 4 Jul 2023 13:25:58 +0200 Subject: [PATCH 11/30] SURT multi-talker ASR recipe (#1126) * merge upstream * add SURT model and training * add libricss decoding * add chunk width randomization * decode SURT with libricss * initial commit for zipformer_ctc * remove unwanted changes * remove changes to other recipe * fix zipformer softlink * fix for JIT export * add missing file * fix symbolic links * update results * clean commit for SURT recipe * training libricss surt model * remove unwanted files * remove unwanted changes * remove changes in librispeech * change some files to symlinks * remove unwanted changes in utils * add export script * add README * minor fix in README * add assets for README * replace some files with symlinks * remove unused decoding methods * fix symlink * address comments from @csukuangfj --- egs/libricss/SURT/README.md | 249 +++ .../SURT/dprnn_zipformer/asr_datamodule.py | 372 +++++ .../SURT/dprnn_zipformer/beam_search.py | 730 +++++++++ egs/libricss/SURT/dprnn_zipformer/decode.py | 654 ++++++++ egs/libricss/SURT/dprnn_zipformer/decoder.py | 1 + egs/libricss/SURT/dprnn_zipformer/dprnn.py | 305 ++++ .../SURT/dprnn_zipformer/encoder_interface.py | 1 + egs/libricss/SURT/dprnn_zipformer/export.py | 306 ++++ egs/libricss/SURT/dprnn_zipformer/joiner.py | 1 + egs/libricss/SURT/dprnn_zipformer/model.py | 316 ++++ egs/libricss/SURT/dprnn_zipformer/optim.py | 1 + egs/libricss/SURT/dprnn_zipformer/scaling.py | 1 + .../SURT/dprnn_zipformer/scaling_converter.py | 1 + egs/libricss/SURT/dprnn_zipformer/train.py | 1452 +++++++++++++++++ .../SURT/dprnn_zipformer/train_adapt.py | 1343 +++++++++++++++ .../SURT/dprnn_zipformer/zipformer.py | 1 + egs/libricss/SURT/heat.png | Bin 0 -> 305340 bytes egs/libricss/SURT/local/add_source_feats.py | 85 + .../SURT/local/compute_fbank_libricss.py | 105 ++ .../SURT/local/compute_fbank_librispeech.py | 111 ++ .../SURT/local/compute_fbank_lsmix.py | 188 +++ .../SURT/local/compute_fbank_musan.py | 114 ++ egs/libricss/SURT/prepare.sh | 204 +++ egs/libricss/SURT/shared | 1 + egs/libricss/SURT/surt.png | Bin 0 -> 114318 bytes icefall/utils.py | 163 +- 26 files changed, 6704 insertions(+), 1 deletion(-) create mode 100644 egs/libricss/SURT/README.md create mode 100644 egs/libricss/SURT/dprnn_zipformer/asr_datamodule.py create mode 100644 egs/libricss/SURT/dprnn_zipformer/beam_search.py create mode 100755 egs/libricss/SURT/dprnn_zipformer/decode.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/decoder.py create mode 100644 egs/libricss/SURT/dprnn_zipformer/dprnn.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/encoder_interface.py create mode 100755 egs/libricss/SURT/dprnn_zipformer/export.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/joiner.py create mode 100644 egs/libricss/SURT/dprnn_zipformer/model.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/optim.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/scaling.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/scaling_converter.py create mode 100755 egs/libricss/SURT/dprnn_zipformer/train.py create mode 100755 egs/libricss/SURT/dprnn_zipformer/train_adapt.py create mode 120000 egs/libricss/SURT/dprnn_zipformer/zipformer.py create mode 100644 egs/libricss/SURT/heat.png create mode 100755 egs/libricss/SURT/local/add_source_feats.py create mode 100755 egs/libricss/SURT/local/compute_fbank_libricss.py create mode 100755 egs/libricss/SURT/local/compute_fbank_librispeech.py create mode 100755 egs/libricss/SURT/local/compute_fbank_lsmix.py create mode 100755 egs/libricss/SURT/local/compute_fbank_musan.py create mode 100755 egs/libricss/SURT/prepare.sh create mode 120000 egs/libricss/SURT/shared create mode 100644 egs/libricss/SURT/surt.png diff --git a/egs/libricss/SURT/README.md b/egs/libricss/SURT/README.md new file mode 100644 index 000000000..10a1aaad1 --- /dev/null +++ b/egs/libricss/SURT/README.md @@ -0,0 +1,249 @@ +# Introduction + +This is a multi-talker ASR recipe for the LibriCSS dataset. We train a Streaming +Unmixing and Recognition Transducer (SURT) model for the task. In this README, +we will describe the task, the model, and the training process. We will also +provide links to pre-trained models and training logs. + +## Task + +LibriCSS is a multi-talker meeting corpus formed from mixing together LibriSpeech utterances +and replaying in a real meeting room. It consists of 10 1-hour sessions of audio, each +recorded on a 7-channel microphone. The sessions are recorded at a sampling rate of 16 kHz. +For more information, refer to the paper: +Z. Chen et al., "Continuous speech separation: dataset and analysis," +ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), +Barcelona, Spain, 2020 + +In this recipe, we perform the "continuous, streaming, multi-talker ASR" task on LibriCSS. + +* By "continuous", we mean that the model should be able to transcribe unsegmented audio +without the need of an external VAD. +* By "streaming", we mean that the model has limited right context. We use a right-context +of at most 32 frames (320 ms). +* By "multi-talker", we mean that the model should be able to transcribe overlapping speech +from multiple speakers. + +For now, we do not care about speaker attribution, i.e., the transcription is speaker +agnostic. The evaluation depends on the particular model type. In this case, we use +the optimal reference combination WER (ORC-WER) metric as implemented in the +[meeteval](https://github.com/fgnt/meeteval) toolkit. + +## Model + +We use the Streaming Unmixing and Recognition Transducer (SURT) model for this task. +The model is based on the papers: + +- Lu, Liang et al. “Streaming End-to-End Multi-Talker Speech Recognition.” IEEE Signal Processing Letters 28 (2020): 803-807. +- Raj, Desh et al. “Continuous Streaming Multi-Talker ASR with Dual-Path Transducers.” ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (2021): 7317-7321. + +The model is a combination of a speech separation model and a speech recognition model, +but trained end-to-end with a single loss function. The overall architecture is shown +in the figure below. Note that this architecture is slightly different from the one +in the above papers. A detailed description of the model can be found in the following +paper: [SURT 2.0: Advanced in transducer-based multi-talker ASR](https://arxiv.org/abs/2306.10559). + +

+ + + Streaming Unmixing and Recognition Transducer + +

+ +In the [dprnn_zipformer](./dprnn_zipformer) recipe, for example, we use a DPRNN-based masking network +and a Zipfomer-based recognition network. But other combinations are possible as well. + +## Training objective + +We train the model using the pruned transducer loss, similar to other ASR recipes in +icefall. However, an important consideration is how to assign references to the output +channels (2 in this case). For this, we use the heuristic error assignment training (HEAT) +strategy, which assigns references to the first available channel based on their start +times. An illustrative example is shown in the figure below: + +

+ + + Illustration of HEAT-based reference assignment. + +

+ +## Description of the recipe + +### Pre-requisites + +The recipes in this directory need the following packages to be installed: + +- [meeteval](https://github.com/fgnt/meeteval) +- [einops](https://github.com/arogozhnikov/einops) + +Additionally, we initialize the "recognition" transducer with a pre-trained model, +trained on LibriSpeech. For this, please run the following from within `egs/librispeech/ASR`: + +```bash +./prepare.sh + +export CUDA_VISIBLE_DEVICES="0,1,2,3" +python pruned_transducer_stateless7_streaming/train.py \ + --use-fp16 True \ + --exp-dir pruned_transducer_stateless7_streaming/exp \ + --world-size 4 \ + --max-duration 800 \ + --num-epochs 10 \ + --keep-last-k 1 \ + --manifest-dir data/manifests \ + --enable-musan true \ + --master-port 54321 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --num-encoder-layers 2,2,2,2,2 \ + --feedforward-dims 768,768,768,768,768 \ + --nhead 8,8,8,8,8 \ + --encoder-dims 256,256,256,256,256 \ + --attention-dims 192,192,192,192,192 \ + --encoder-unmasked-dims 192,192,192,192,192 \ + --zipformer-downsampling-factors 1,2,4,8,2 \ + --cnn-module-kernels 31,31,31,31,31 \ + --decoder-dim 512 \ + --joiner-dim 512 +``` + +The above is for SURT-base (~26M). For SURT-large (~38M), use `--num-encoder-layers 2,4,3,2,4`. + +Once the above model is trained for 10 epochs, copy it to `egs/libricss/SURT/exp`: + +```bash +cp -r pruned_transducer_stateless7_streaming/exp/epoch-10.pt exp/zipformer_base.pt +``` + +**NOTE:** We also provide this pre-trained checkpoint (see the section below), so you can skip +the above step if you want. + +### Training + +To train the model, run the following from within `egs/libricss/SURT`: + +```bash +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +python dprnn_zipformer/train.py \ + --use-fp16 True \ + --exp-dir dprnn_zipformer/exp/surt_base \ + --world-size 4 \ + --max-duration 500 \ + --max-duration-valid 250 \ + --max-cuts 200 \ + --num-buckets 50 \ + --num-epochs 30 \ + --enable-spec-aug True \ + --enable-musan False \ + --ctc-loss-scale 0.2 \ + --heat-loss-scale 0.2 \ + --base-lr 0.004 \ + --model-init-ckpt exp/zipformer_base.pt \ + --chunk-width-randomization True \ + --num-mask-encoder-layers 4 \ + --num-encoder-layers 2,2,2,2,2 +``` + +The above is for SURT-base (~26M). For SURT-large (~38M), use: + +```bash + --num-mask-encoder-layers 6 \ + --num-encoder-layers 2,4,3,2,4 \ + --model-init-ckpt exp/zipformer_large.pt \ +``` + +**NOTE:** You may need to decrease the `--max-duration` for SURT-large to avoid OOM. + +### Adaptation + +The training step above only trains on simulated mixtures. For best results, we also +adapt the final model on the LibriCSS dev set. For this, run the following from within +`egs/libricss/SURT`: + +```bash +export CUDA_VISIBLE_DEVICES="0" + +python dprnn_zipformer/train_adapt.py \ + --use-fp16 True \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --world-size 1 \ + --max-duration 500 \ + --max-duration-valid 250 \ + --max-cuts 200 \ + --num-buckets 50 \ + --num-epochs 8 \ + --lr-epochs 2 \ + --enable-spec-aug True \ + --enable-musan False \ + --ctc-loss-scale 0.2 \ + --base-lr 0.0004 \ + --model-init-ckpt dprnn_zipformer/exp/surt_base/epoch-30.pt \ + --chunk-width-randomization True \ + --num-mask-encoder-layers 4 \ + --num-encoder-layers 2,2,2,2,2 +``` + +For SURT-large, use the following config: + +```bash + --num-mask-encoder-layers 6 \ + --num-encoder-layers 2,4,3,2,4 \ + --model-init-ckpt dprnn_zipformer/exp/surt_large/epoch-30.pt \ + --num-epochs 15 \ + --lr-epochs 4 \ +``` + + +### Decoding + +To decode the model, run the following from within `egs/libricss/SURT`: + +#### Greedy search + +```bash +export CUDA_VISIBLE_DEVICES="0" + +python dprnn_zipformer/decode.py \ + --epoch 8 --avg 1 --use-averaged-model False \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --max-duration 250 \ + --decoding-method greedy_search +``` + +#### Beam search + +```bash +python dprnn_zipformer/decode.py \ + --epoch 8 --avg 1 --use-averaged-model False \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --max-duration 250 \ + --decoding-method modified_beam_search \ + --beam-size 4 +``` + +## Results (using beam search) + +#### IHM-Mix + +| Model | # params | 0L | 0S | OV10 | OV20 | OV30 | OV40 | Avg. | +|------------|:-------:|:----:|:---:|----:|:----:|:----:|:----:|:----:| +| dprnn_zipformer (base) | 26.7 | 5.1 | 4.2 | 13.7 | 18.7 | 20.5 | 20.6 | 13.8 | +| dprnn_zipformer (large) | 37.9 | 4.6 | 3.8 | 12.7 | 14.3 | 16.7 | 21.2 | 12.2 | + +#### SDM + +| Model | # params | 0L | 0S | OV10 | OV20 | OV30 | OV40 | Avg. | +|------------|:-------:|:----:|:---:|----:|:----:|:----:|:----:|:----:| +| dprnn_zipformer (base) | 26.7 | 6.8 | 7.2 | 21.4 | 24.5 | 28.6 | 31.2 | 20.0 | +| dprnn_zipformer (large) | 37.9 | 6.4 | 6.9 | 17.9 | 19.7 | 25.2 | 25.5 | 16.9 | + +## Pre-trained models and logs + +* Pre-trained models: + +* Training logs: + - surt_base: + - surt_base_adapt: + - surt_large: + - surt_large_adapt: diff --git a/egs/libricss/SURT/dprnn_zipformer/asr_datamodule.py b/egs/libricss/SURT/dprnn_zipformer/asr_datamodule.py new file mode 100644 index 000000000..51df91598 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/asr_datamodule.py @@ -0,0 +1,372 @@ +# Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# Copyright 2023 Johns Hopkins Univrtsity (Author: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import inspect +import logging +from functools import lru_cache +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional + +import torch +from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy +from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures + CutMix, + DynamicBucketingSampler, + K2SurtDataset, + PrecomputedFeatures, + SimpleCutSampler, + SpecAugment, +) +from lhotse.dataset.input_strategies import OnTheFlyFeatures +from lhotse.utils import fix_random_seed +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + + +class _SeedWorkers: + def __init__(self, seed: int): + self.seed = seed + + def __call__(self, worker_id: int): + fix_random_seed(self.seed + worker_id) + + +class LibriCssAsrDataModule: + """ + DataModule for k2 ASR experiments. + It assumes there is always one train and valid dataloader, + but there can be multiple test dataloaders (e.g. LibriSpeech test-clean + and test-other). + + It contains all the common data pipeline modules used in ASR + experiments, e.g.: + - dynamic batch size, + - bucketing samplers, + - augmentation, + - on-the-fly feature extraction + + This class should be derived for specific corpora used in ASR tasks. + """ + + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/manifests"), + help="Path to directory with train/valid/test cuts.", + ) + group.add_argument( + "--max-duration", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--max-duration-valid", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--max-cuts", + type=int, + default=100, + help="Maximum number of cuts in a single batch. You can " + "reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the DynamicBucketingSampler" + "(you might want to increase it for larger datasets).", + ) + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help=( + "When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available." + ), + ) + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + group.add_argument( + "--drop-last", + type=str2bool, + default=True, + help="Whether to drop last batch. Used by sampler.", + ) + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + sampler_state_dict: Optional[Dict[str, Any]] = None, + return_sources: bool = True, + strict: bool = True, + ) -> DataLoader: + """ + Args: + cuts_train: + CutSet for training. + sampler_state_dict: + The state dict for the training sampler. + """ + transforms = [] + if self.args.enable_musan: + logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest(self.args.manifest_dir / "musan_cuts.jsonl.gz") + transforms.append( + CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True) + ) + else: + logging.info("Disable MUSAN") + + input_transforms = [] + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info(f"Time warp factor: {self.args.spec_aug_time_warp_factor}") + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=num_frame_masks, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SurtDataset( + input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + return_sources=return_sources, + strict=strict, + ) + + if self.args.bucketing_sampler: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + quadratic_duration=30.0, + max_cuts=self.args.max_cuts, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + drop_last=self.args.drop_last, + ) + else: + logging.info("Using SingleCutSampler.") + train_sampler = SimpleCutSampler( + cuts_train, + max_duration=self.args.max_duration, + max_cuts=self.args.max_cuts, + shuffle=self.args.shuffle, + ) + logging.info("About to create train dataloader") + + if sampler_state_dict is not None: + logging.info("Loading sampler state dict") + train_sampler.load_state_dict(sampler_state_dict) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + worker_init_fn=worker_init_fn, + ) + + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + + logging.info("About to create dev dataset") + validate = K2SurtDataset( + input_strategy=OnTheFlyFeatures( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + ) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + return_sources=False, + strict=False, + ) + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration_valid, + max_cuts=self.args.max_cuts, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SurtDataset( + input_strategy=OnTheFlyFeatures( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + ) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + return_cuts=self.args.return_cuts, + return_sources=False, + strict=False, + ) + sampler = DynamicBucketingSampler( + cuts, + max_duration=self.args.max_duration_valid, + max_cuts=self.args.max_cuts, + shuffle=False, + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl + + @lru_cache() + def lsmix_cuts( + self, + rvb_affix: str = "clean", + type_affix: str = "full", + sources: bool = True, + ) -> CutSet: + logging.info("About to get train cuts") + source_affix = "_sources" if sources else "" + cs = load_manifest_lazy( + self.args.manifest_dir + / f"cuts_train_{rvb_affix}_{type_affix}{source_affix}.jsonl.gz" + ) + cs = cs.filter(lambda c: c.duration >= 1.0 and c.duration <= 30.0) + return cs + + @lru_cache() + def libricss_cuts(self, split="dev", type="sdm") -> CutSet: + logging.info(f"About to get LibriCSS {split} {type} cuts") + cs = load_manifest_lazy( + self.args.manifest_dir / f"cuts_{split}_libricss-{type}.jsonl.gz" + ) + return cs diff --git a/egs/libricss/SURT/dprnn_zipformer/beam_search.py b/egs/libricss/SURT/dprnn_zipformer/beam_search.py new file mode 100644 index 000000000..c8e4643d0 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/beam_search.py @@ -0,0 +1,730 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang +# Xiaoyu Yang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Union + +import k2 +import torch +from model import SURT + +from icefall import NgramLmStateCost +from icefall.utils import DecodingResults + + +def greedy_search( + model: SURT, + encoder_out: torch.Tensor, + max_sym_per_frame: int, + return_timestamps: bool = False, +) -> Union[List[int], DecodingResults]: + """Greedy search for a single utterance. + Args: + model: + An instance of `SURT`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + max_sym_per_frame: + Maximum number of symbols per frame. If it is set to 0, the WER + would be 100%. + return_timestamps: + Whether to return timestamps. + Returns: + If return_timestamps is False, return the decoded result. + Else, return a DecodingResults object containing + decoded result and corresponding timestamps. + """ + assert encoder_out.ndim == 4 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + unk_id = getattr(model, "unk_id", blank_id) + + device = next(model.parameters()).device + + decoder_input = torch.tensor( + [-1] * (context_size - 1) + [blank_id], device=device, dtype=torch.int64 + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + T = encoder_out.size(1) + t = 0 + hyp = [blank_id] * context_size + + # timestamp[i] is the frame index after subsampling + # on which hyp[i] is decoded + timestamp = [] + + # Maximum symbols per utterance. + max_sym_per_utt = 1000 + + # symbols per frame + sym_per_frame = 0 + + # symbols per utterance decoded so far + sym_per_utt = 0 + + while t < T and sym_per_utt < max_sym_per_utt: + if sym_per_frame >= max_sym_per_frame: + sym_per_frame = 0 + t += 1 + continue + + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2) + # fmt: on + logits = model.joiner( + current_encoder_out, decoder_out.unsqueeze(1), project_input=False + ) + # logits is (1, 1, 1, vocab_size) + + y = logits.argmax().item() + if y not in (blank_id, unk_id): + hyp.append(y) + timestamp.append(t) + decoder_input = torch.tensor([hyp[-context_size:]], device=device).reshape( + 1, context_size + ) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + sym_per_utt += 1 + sym_per_frame += 1 + else: + sym_per_frame = 0 + t += 1 + hyp = hyp[context_size:] # remove blanks + + if not return_timestamps: + return hyp + else: + return DecodingResults( + hyps=[hyp], + timestamps=[timestamp], + ) + + +def greedy_search_batch( + model: SURT, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + return_timestamps: bool = False, +) -> Union[List[List[int]], DecodingResults]: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + model: + The SURT model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + encoder_out_lens: + A 1-D tensor of shape (N,), containing number of valid frames in + encoder_out before padding. + return_timestamps: + Whether to return timestamps. + Returns: + If return_timestamps is False, return the decoded result. + Else, return a DecodingResults object containing + decoded result and corresponding timestamps. + """ + assert encoder_out.ndim == 3 + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + device = next(model.parameters()).device + + blank_id = model.decoder.blank_id + unk_id = getattr(model, "unk_id", blank_id) + context_size = model.decoder.context_size + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + hyps = [[-1] * (context_size - 1) + [blank_id] for _ in range(N)] + + # timestamp[n][i] is the frame index after subsampling + # on which hyp[n][i] is decoded + timestamps = [[] for _ in range(N)] + + decoder_input = torch.tensor( + hyps, + device=device, + dtype=torch.int64, + ) # (N, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out: (N, 1, decoder_out_dim) + + encoder_out = model.joiner.encoder_proj(packed_encoder_out.data) + + offset = 0 + for (t, batch_size) in enumerate(batch_size_list): + start = offset + end = offset + batch_size + current_encoder_out = encoder_out.data[start:end] + current_encoder_out = current_encoder_out.unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + offset = end + + decoder_out = decoder_out[:batch_size] + + logits = model.joiner( + current_encoder_out, decoder_out.unsqueeze(1), project_input=False + ) + # logits'shape (batch_size, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) # (batch_size, vocab_size) + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v not in (blank_id, unk_id): + hyps[i].append(v) + timestamps[i].append(t) + emitted = True + if emitted: + # update decoder output + decoder_input = [h[-context_size:] for h in hyps[:batch_size]] + decoder_input = torch.tensor( + decoder_input, + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + sorted_ans = [h[context_size:] for h in hyps] + ans = [] + ans_timestamps = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + ans_timestamps.append(timestamps[unsorted_indices[i]]) + + if not return_timestamps: + return ans + else: + return DecodingResults( + hyps=ans, + timestamps=ans_timestamps, + ) + + +def modified_beam_search( + model: SURT, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + beam: int = 4, + temperature: float = 1.0, + return_timestamps: bool = False, +) -> Union[List[List[int]], DecodingResults]: + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + + Args: + model: + The SURT model. + encoder_out: + Output from the encoder. Its shape is (N, T, C). + encoder_out_lens: + A 1-D tensor of shape (N,), containing number of valid frames in + encoder_out before padding. + beam: + Number of active paths during the beam search. + temperature: + Softmax temperature. + return_timestamps: + Whether to return timestamps. + Returns: + If return_timestamps is False, return the decoded result. + Else, return a DecodingResults object containing + decoded result and corresponding timestamps. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + blank_id = model.decoder.blank_id + unk_id = getattr(model, "unk_id", blank_id) + context_size = model.decoder.context_size + device = next(model.parameters()).device + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + B = [HypothesisList() for _ in range(N)] + for i in range(N): + B[i].add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + timestamp=[], + ) + ) + + encoder_out = model.joiner.encoder_proj(packed_encoder_out.data) + + offset = 0 + finalized_B = [] + for (t, batch_size) in enumerate(batch_size_list): + start = offset + end = offset + batch_size + current_encoder_out = encoder_out.data[start:end] + current_encoder_out = current_encoder_out.unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape is (batch_size, 1, 1, encoder_out_dim) + offset = end + + finalized_B = B[batch_size:] + finalized_B + B = B[:batch_size] + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.cat( + [hyp.log_prob.reshape(1, 1) for hyps in A for hyp in hyps] + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out is of shape (num_hyps, 1, 1, joiner_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, 1, 1, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, + decoder_out, + project_input=False, + ) # (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) # (num_hyps, vocab_size) + + log_probs = (logits / temperature).log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor(shape=log_probs_shape, value=log_probs) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk(beam) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + new_timestamp = hyp.timestamp[:] + if new_token not in (blank_id, unk_id): + new_ys.append(new_token) + new_timestamp.append(t) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis( + ys=new_ys, log_prob=new_log_prob, timestamp=new_timestamp + ) + B[i].add(new_hyp) + + B = B + finalized_B + best_hyps = [b.get_most_probable(length_norm=True) for b in B] + + sorted_ans = [h.ys[context_size:] for h in best_hyps] + sorted_timestamps = [h.timestamp for h in best_hyps] + ans = [] + ans_timestamps = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + ans_timestamps.append(sorted_timestamps[unsorted_indices[i]]) + + if not return_timestamps: + return ans + else: + return DecodingResults( + hyps=ans, + timestamps=ans_timestamps, + ) + + +def beam_search( + model: SURT, + encoder_out: torch.Tensor, + beam: int = 4, + temperature: float = 1.0, + return_timestamps: bool = False, +) -> Union[List[int], DecodingResults]: + """ + It implements Algorithm 1 in https://arxiv.org/pdf/1211.3711.pdf + + espnet/nets/beam_search_SURT.py#L247 is used as a reference. + + Args: + model: + An instance of `SURT`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + temperature: + Softmax temperature. + return_timestamps: + Whether to return timestamps. + + Returns: + If return_timestamps is False, return the decoded result. + Else, return a DecodingResults object containing + decoded result and corresponding timestamps. + """ + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + unk_id = getattr(model, "unk_id", blank_id) + context_size = model.decoder.context_size + + device = next(model.parameters()).device + + decoder_input = torch.tensor( + [blank_id] * context_size, + device=device, + dtype=torch.int64, + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + T = encoder_out.size(1) + t = 0 + + B = HypothesisList() + B.add(Hypothesis(ys=[blank_id] * context_size, log_prob=0.0, timestamp=[])) + + max_sym_per_utt = 20000 + + sym_per_utt = 0 + + decoder_cache: Dict[str, torch.Tensor] = {} + + while t < T and sym_per_utt < max_sym_per_utt: + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2) + # fmt: on + A = B + B = HypothesisList() + + joint_cache: Dict[str, torch.Tensor] = {} + + # TODO(fangjun): Implement prefix search to update the `log_prob` + # of hypotheses in A + + while True: + y_star = A.get_most_probable() + A.remove(y_star) + + cached_key = y_star.key + + if cached_key not in decoder_cache: + decoder_input = torch.tensor( + [y_star.ys[-context_size:]], + device=device, + dtype=torch.int64, + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + decoder_cache[cached_key] = decoder_out + else: + decoder_out = decoder_cache[cached_key] + + cached_key += f"-t-{t}" + if cached_key not in joint_cache: + logits = model.joiner( + current_encoder_out, + decoder_out.unsqueeze(1), + project_input=False, + ) + + # TODO(fangjun): Scale the blank posterior + log_prob = (logits / temperature).log_softmax(dim=-1) + # log_prob is (1, 1, 1, vocab_size) + log_prob = log_prob.squeeze() + # Now log_prob is (vocab_size,) + joint_cache[cached_key] = log_prob + else: + log_prob = joint_cache[cached_key] + + # First, process the blank symbol + skip_log_prob = log_prob[blank_id] + new_y_star_log_prob = y_star.log_prob + skip_log_prob + + # ys[:] returns a copy of ys + B.add( + Hypothesis( + ys=y_star.ys[:], + log_prob=new_y_star_log_prob, + timestamp=y_star.timestamp[:], + ) + ) + + # Second, process other non-blank labels + values, indices = log_prob.topk(beam + 1) + for i, v in zip(indices.tolist(), values.tolist()): + if i in (blank_id, unk_id): + continue + new_ys = y_star.ys + [i] + new_log_prob = y_star.log_prob + v + new_timestamp = y_star.timestamp + [t] + A.add( + Hypothesis( + ys=new_ys, + log_prob=new_log_prob, + timestamp=new_timestamp, + ) + ) + + # Check whether B contains more than "beam" elements more probable + # than the most probable in A + A_most_probable = A.get_most_probable() + + kept_B = B.filter(A_most_probable.log_prob) + + if len(kept_B) >= beam: + B = kept_B.topk(beam) + break + + t += 1 + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + + if not return_timestamps: + return ys + else: + return DecodingResults(hyps=[ys], timestamps=[best_hyp.timestamp]) + + +@dataclass +class Hypothesis: + # The predicted tokens so far. + # Newly predicted tokens are appended to `ys`. + ys: List[int] + + # The log prob of ys. + # It contains only one entry. + log_prob: torch.Tensor + + # timestamp[i] is the frame index after subsampling + # on which ys[i] is decoded + timestamp: List[int] = field(default_factory=list) + + # the lm score for next token given the current ys + lm_score: Optional[torch.Tensor] = None + + # the RNNLM states (h and c in LSTM) + state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None + + # N-gram LM state + state_cost: Optional[NgramLmStateCost] = None + + @property + def key(self) -> str: + """Return a string representation of self.ys""" + return "_".join(map(str, self.ys)) + + +class HypothesisList(object): + def __init__(self, data: Optional[Dict[str, Hypothesis]] = None) -> None: + """ + Args: + data: + A dict of Hypotheses. Its key is its `value.key`. + """ + if data is None: + self._data = {} + else: + self._data = data + + @property + def data(self) -> Dict[str, Hypothesis]: + return self._data + + def add(self, hyp: Hypothesis) -> None: + """Add a Hypothesis to `self`. + + If `hyp` already exists in `self`, its probability is updated using + `log-sum-exp` with the existed one. + + Args: + hyp: + The hypothesis to be added. + """ + key = hyp.key + if key in self: + old_hyp = self._data[key] # shallow copy + torch.logaddexp(old_hyp.log_prob, hyp.log_prob, out=old_hyp.log_prob) + else: + self._data[key] = hyp + + def get_most_probable(self, length_norm: bool = False) -> Hypothesis: + """Get the most probable hypothesis, i.e., the one with + the largest `log_prob`. + + Args: + length_norm: + If True, the `log_prob` of a hypothesis is normalized by the + number of tokens in it. + Returns: + Return the hypothesis that has the largest `log_prob`. + """ + if length_norm: + return max(self._data.values(), key=lambda hyp: hyp.log_prob / len(hyp.ys)) + else: + return max(self._data.values(), key=lambda hyp: hyp.log_prob) + + def remove(self, hyp: Hypothesis) -> None: + """Remove a given hypothesis. + + Caution: + `self` is modified **in-place**. + + Args: + hyp: + The hypothesis to be removed from `self`. + Note: It must be contained in `self`. Otherwise, + an exception is raised. + """ + key = hyp.key + assert key in self, f"{key} does not exist" + del self._data[key] + + def filter(self, threshold: torch.Tensor) -> "HypothesisList": + """Remove all Hypotheses whose log_prob is less than threshold. + + Caution: + `self` is not modified. Instead, a new HypothesisList is returned. + + Returns: + Return a new HypothesisList containing all hypotheses from `self` + with `log_prob` being greater than the given `threshold`. + """ + ans = HypothesisList() + for _, hyp in self._data.items(): + if hyp.log_prob > threshold: + ans.add(hyp) # shallow copy + return ans + + def topk(self, k: int) -> "HypothesisList": + """Return the top-k hypothesis.""" + hyps = list(self._data.items()) + + hyps = sorted(hyps, key=lambda h: h[1].log_prob, reverse=True)[:k] + + ans = HypothesisList(dict(hyps)) + return ans + + def __contains__(self, key: str): + return key in self._data + + def __iter__(self): + return iter(self._data.values()) + + def __len__(self) -> int: + return len(self._data) + + def __str__(self) -> str: + s = [] + for key in self: + s.append(key) + return ", ".join(s) + + +def get_hyps_shape(hyps: List[HypothesisList]) -> k2.RaggedShape: + """Return a ragged shape with axes [utt][num_hyps]. + + Args: + hyps: + len(hyps) == batch_size. It contains the current hypothesis for + each utterance in the batch. + Returns: + Return a ragged shape with 2 axes [utt][num_hyps]. Note that + the shape is on CPU. + """ + num_hyps = [len(h) for h in hyps] + + # torch.cumsum() is inclusive sum, so we put a 0 at the beginning + # to get exclusive sum later. + num_hyps.insert(0, 0) + + num_hyps = torch.tensor(num_hyps) + row_splits = torch.cumsum(num_hyps, dim=0, dtype=torch.int32) + ans = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=row_splits[-1].item() + ) + return ans diff --git a/egs/libricss/SURT/dprnn_zipformer/decode.py b/egs/libricss/SURT/dprnn_zipformer/decode.py new file mode 100755 index 000000000..6abbffe00 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/decode.py @@ -0,0 +1,654 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./dprnn_zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --use-averaged-model true \ + --exp-dir ./dprnn_zipformer/exp \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) modified beam search +./dprnn_zipformer/decode.py \ + --epoch 30 \ + --avg 9 \ + --use-averaged-model true \ + --exp-dir ./dprnn_zipformer/exp \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 +""" + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriCssAsrDataModule +from beam_search import ( + beam_search, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from lhotse.utils import EPSILON +from train import add_model_arguments, get_params, get_surt_model + +from icefall import LmScorer, NgramLm +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_surt_error_stats, +) + +OVERLAP_RATIOS = ["0L", "0S", "OV10", "OV20", "OV30", "OV40"] + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=9, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="dprnn_zipformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_bpe_500", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--save-masks", + type=str2bool, + default=False, + help="""If true, save masks generated by unmixing module.""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + feature_lens = batch["input_lens"].to(device) + + # Apply the mask encoder + B, T, F = feature.shape + processed = model.mask_encoder(feature) # B,T,F*num_channels + masks = processed.view(B, T, F, params.num_channels).unbind(dim=-1) + x_masked = [feature * m for m in masks] + + masks_dict = {} + if params.save_masks: + # To save the masks, we split them by batch and trim each mask to the length of + # the corresponding feature. We save them in a dict, where the key is the + # cut ID and the value is the mask. + for i in range(B): + mask = torch.cat( + [x_masked[j][i, : feature_lens[i]] for j in range(params.num_channels)], + dim=-1, + ) + mask = mask.cpu().numpy() + masks_dict[batch["cuts"][i].id] = mask + + # Recognition + # Concatenate the inputs along the batch axis + h = torch.cat(x_masked, dim=0) + h_lens = feature_lens.repeat(params.num_channels) + encoder_out, encoder_out_lens = model.encoder(x=h, x_lens=h_lens) + + if model.joint_encoder_layer is not None: + encoder_out = model.joint_encoder_layer(encoder_out) + + def _group_channels(hyps: List[str]) -> List[List[str]]: + """ + Currently we have a batch of size M*B, where M is the number of + channels and B is the batch size. We need to group the hypotheses + into B groups, each of which contains M hypotheses. + + Example: + hyps = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2'] + _group_channels(hyps) = [['a1', 'a2'], ['b1', 'b2'], ['c1', 'c2']] + """ + assert len(hyps) == B * params.num_channels + out_hyps = [] + for i in range(B): + out_hyps.append(hyps[i::B]) + return out_hyps + + hyps = [] + if params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp)) + + if params.decoding_method == "greedy_search": + return {"greedy_search": _group_channels(hyps)}, masks_dict + else: + return {f"beam_size_{params.beam_size}": _group_channels(hyps)}, masks_dict + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, +) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + masks = {} + for batch_idx, batch in enumerate(dl): + cut_ids = [cut.id for cut in batch["cuts"]] + cuts_batch = batch["cuts"] + + hyps_dict, masks_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + ) + masks.update(masks_dict) + + for name, hyps in hyps_dict.items(): + this_batch = [] + for cut_id, hyp_words in zip(cut_ids, hyps): + # Reference is a list of supervision texts sorted by start time. + ref_words = [ + s.text.strip() + for s in sorted( + cuts_batch[cut_id].supervisions, key=lambda s: s.start + ) + ] + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(cut_ids) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + return results, masks_dict + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_surt_error_stats( + f, + f"{test_set_name}-{key}", + results, + enable_log=True, + num_channels=params.num_channels, + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +def save_masks( + params: AttributeDict, + test_set_name: str, + masks: List[torch.Tensor], +): + masks_path = params.res_dir / f"masks-{test_set_name}.txt" + torch.save(masks, masks_path) + logging.info(f"The masks are stored in {masks_path}") + + +@torch.no_grad() +def main(): + parser = get_parser() + LmScorer.add_arguments(parser) + LibriCssAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ), f"Decoding method {params.decoding_method} is not supported." + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "beam_search" in params.decoding_method: + params.suffix += f"-{params.decoding_method}-beam-size-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + assert model.encoder.decode_chunk_size == params.decode_chunk_len // 2, ( + model.encoder.decode_chunk_size, + params.decode_chunk_len, + ) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + libricss = LibriCssAsrDataModule(args) + + dev_cuts = libricss.libricss_cuts(split="dev", type="ihm-mix").to_eager() + dev_cuts_grouped = [dev_cuts.filter(lambda x: ol in x.id) for ol in OVERLAP_RATIOS] + test_cuts = libricss.libricss_cuts(split="test", type="ihm-mix").to_eager() + test_cuts_grouped = [ + test_cuts.filter(lambda x: ol in x.id) for ol in OVERLAP_RATIOS + ] + + for dev_set, ol in zip(dev_cuts_grouped, OVERLAP_RATIOS): + dev_dl = libricss.test_dataloaders(dev_set) + results_dict, masks = decode_dataset( + dl=dev_dl, + params=params, + model=model, + sp=sp, + ) + + save_results( + params=params, + test_set_name=f"dev_{ol}", + results_dict=results_dict, + ) + + if params.save_masks: + save_masks( + params=params, + test_set_name=f"dev_{ol}", + masks=masks, + ) + + for test_set, ol in zip(test_cuts_grouped, OVERLAP_RATIOS): + test_dl = libricss.test_dataloaders(test_set) + results_dict, masks = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + ) + + save_results( + params=params, + test_set_name=f"test_{ol}", + results_dict=results_dict, + ) + + if params.save_masks: + save_masks( + params=params, + test_set_name=f"test_{ol}", + masks=masks, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/libricss/SURT/dprnn_zipformer/decoder.py b/egs/libricss/SURT/dprnn_zipformer/decoder.py new file mode 120000 index 000000000..8283d8c5a --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/decoder.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/decoder.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/dprnn.py b/egs/libricss/SURT/dprnn_zipformer/dprnn.py new file mode 100644 index 000000000..440dea885 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/dprnn.py @@ -0,0 +1,305 @@ +import random +from typing import Optional, Tuple + +import torch +import torch.nn as nn +from einops import rearrange +from scaling import ActivationBalancer, BasicNorm, DoubleSwish, ScaledLinear, ScaledLSTM +from torch.autograd import Variable + +EPS = torch.finfo(torch.get_default_dtype()).eps + + +def _pad_segment(input, segment_size): + # Source: https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/dprnn.py#L342 + # input is the features: (B, N, T) + batch_size, dim, seq_len = input.shape + segment_stride = segment_size // 2 + + rest = segment_size - (segment_stride + seq_len % segment_size) % segment_size + if rest > 0: + pad = Variable(torch.zeros(batch_size, dim, rest)).type(input.type()) + input = torch.cat([input, pad], 2) + + pad_aux = Variable(torch.zeros(batch_size, dim, segment_stride)).type(input.type()) + input = torch.cat([pad_aux, input, pad_aux], 2) + + return input, rest + + +def split_feature(input, segment_size): + # Source: https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/dprnn.py#L358 + # split the feature into chunks of segment size + # input is the features: (B, N, T) + + input, rest = _pad_segment(input, segment_size) + batch_size, dim, seq_len = input.shape + segment_stride = segment_size // 2 + + segments1 = ( + input[:, :, :-segment_stride] + .contiguous() + .view(batch_size, dim, -1, segment_size) + ) + segments2 = ( + input[:, :, segment_stride:] + .contiguous() + .view(batch_size, dim, -1, segment_size) + ) + segments = ( + torch.cat([segments1, segments2], 3) + .view(batch_size, dim, -1, segment_size) + .transpose(2, 3) + ) + + return segments.contiguous(), rest + + +def merge_feature(input, rest): + # Source: https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/dprnn.py#L385 + # merge the splitted features into full utterance + # input is the features: (B, N, L, K) + + batch_size, dim, segment_size, _ = input.shape + segment_stride = segment_size // 2 + input = ( + input.transpose(2, 3).contiguous().view(batch_size, dim, -1, segment_size * 2) + ) # B, N, K, L + + input1 = ( + input[:, :, :, :segment_size] + .contiguous() + .view(batch_size, dim, -1)[:, :, segment_stride:] + ) + input2 = ( + input[:, :, :, segment_size:] + .contiguous() + .view(batch_size, dim, -1)[:, :, :-segment_stride] + ) + + output = input1 + input2 + if rest > 0: + output = output[:, :, :-rest] + + return output.contiguous() # B, N, T + + +class RNNEncoderLayer(nn.Module): + """ + RNNEncoderLayer is made up of lstm and feedforward networks. + Args: + input_size: + The number of expected features in the input (required). + hidden_size: + The hidden dimension of rnn layer. + dropout: + The dropout value (default=0.1). + layer_dropout: + The dropout value for model-level warmup (default=0.075). + """ + + def __init__( + self, + input_size: int, + hidden_size: int, + dropout: float = 0.1, + bidirectional: bool = False, + ) -> None: + super(RNNEncoderLayer, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + + assert hidden_size >= input_size, (hidden_size, input_size) + self.lstm = ScaledLSTM( + input_size=input_size, + hidden_size=hidden_size // 2 if bidirectional else hidden_size, + proj_size=0, + num_layers=1, + dropout=0.0, + batch_first=True, + bidirectional=bidirectional, + ) + self.norm_final = BasicNorm(input_size) + + # try to ensure the output is close to zero-mean (or at least, zero-median). # noqa + self.balancer = ActivationBalancer( + num_channels=input_size, + channel_dim=-1, + min_positive=0.45, + max_positive=0.55, + max_abs=6.0, + ) + self.dropout = nn.Dropout(dropout) + + def forward( + self, + src: torch.Tensor, + states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Pass the input through the encoder layer. + Args: + src: + The sequence to the encoder layer (required). + Its shape is (S, N, E), where S is the sequence length, + N is the batch size, and E is the feature number. + states: + A tuple of 2 tensors (optional). It is for streaming inference. + states[0] is the hidden states of all layers, + with shape of (1, N, input_size); + states[1] is the cell states of all layers, + with shape of (1, N, hidden_size). + """ + src_orig = src + + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + alpha = warmup if self.training else 1.0 + + # lstm module + src_lstm, new_states = self.lstm(src, states) + src = self.dropout(src_lstm) + src + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + return src + + +# dual-path RNN +class DPRNN(nn.Module): + """Deep dual-path RNN. + Source: https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/dprnn.py + + args: + input_size: int, dimension of the input feature. The input should have shape + (batch, seq_len, input_size). + hidden_size: int, dimension of the hidden state. + output_size: int, dimension of the output size. + dropout: float, dropout ratio. Default is 0. + num_blocks: int, number of stacked RNN layers. Default is 1. + """ + + def __init__( + self, + feature_dim, + input_size, + hidden_size, + output_size, + dropout=0.1, + num_blocks=1, + segment_size=50, + chunk_width_randomization=False, + ): + super().__init__() + + self.input_size = input_size + self.output_size = output_size + self.hidden_size = hidden_size + + self.segment_size = segment_size + self.chunk_width_randomization = chunk_width_randomization + + self.input_embed = nn.Sequential( + ScaledLinear(feature_dim, input_size), + BasicNorm(input_size), + ActivationBalancer( + num_channels=input_size, + channel_dim=-1, + min_positive=0.45, + max_positive=0.55, + ), + ) + + # dual-path RNN + self.row_rnn = nn.ModuleList([]) + self.col_rnn = nn.ModuleList([]) + for _ in range(num_blocks): + # intra-RNN is non-causal + self.row_rnn.append( + RNNEncoderLayer( + input_size, hidden_size, dropout=dropout, bidirectional=True + ) + ) + self.col_rnn.append( + RNNEncoderLayer( + input_size, hidden_size, dropout=dropout, bidirectional=False + ) + ) + + # output layer + self.out_embed = nn.Sequential( + ScaledLinear(input_size, output_size), + BasicNorm(output_size), + ActivationBalancer( + num_channels=output_size, + channel_dim=-1, + min_positive=0.45, + max_positive=0.55, + ), + ) + + def forward(self, input): + # input shape: B, T, F + input = self.input_embed(input) + B, T, D = input.shape + + if self.chunk_width_randomization and self.training: + segment_size = random.randint(self.segment_size // 2, self.segment_size) + else: + segment_size = self.segment_size + input, rest = split_feature(input.transpose(1, 2), segment_size) + # input shape: batch, N, dim1, dim2 + # apply RNN on dim1 first and then dim2 + # output shape: B, output_size, dim1, dim2 + # input = input.to(device) + batch_size, _, dim1, dim2 = input.shape + output = input + for i in range(len(self.row_rnn)): + row_input = ( + output.permute(0, 3, 2, 1) + .contiguous() + .view(batch_size * dim2, dim1, -1) + ) # B*dim2, dim1, N + output = self.row_rnn[i](row_input) # B*dim2, dim1, H + output = ( + output.view(batch_size, dim2, dim1, -1).permute(0, 3, 2, 1).contiguous() + ) # B, N, dim1, dim2 + + col_input = ( + output.permute(0, 2, 3, 1) + .contiguous() + .view(batch_size * dim1, dim2, -1) + ) # B*dim1, dim2, N + output = self.col_rnn[i](col_input) # B*dim1, dim2, H + output = ( + output.view(batch_size, dim1, dim2, -1).permute(0, 3, 1, 2).contiguous() + ) # B, N, dim1, dim2 + + output = merge_feature(output, rest) + output = output.transpose(1, 2) + output = self.out_embed(output) + + # Apply ReLU to the output + output = torch.relu(output) + + return output + + +if __name__ == "__main__": + + model = DPRNN( + 80, + 256, + 256, + 160, + dropout=0.1, + num_blocks=4, + segment_size=32, + chunk_width_randomization=True, + ) + input = torch.randn(2, 1002, 80) + print(sum(p.numel() for p in model.parameters())) + print(model(input).shape) diff --git a/egs/libricss/SURT/dprnn_zipformer/encoder_interface.py b/egs/libricss/SURT/dprnn_zipformer/encoder_interface.py new file mode 120000 index 000000000..0c2673d46 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/encoder_interface.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/encoder_interface.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/export.py b/egs/libricss/SURT/dprnn_zipformer/export.py new file mode 100755 index 000000000..f51f2a7ab --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/export.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" + +Usage: + +(1) Export to torchscript model using torch.jit.script() + +./dprnn_zipformer/export.py \ + --exp-dir ./dprnn_zipformer/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 30 \ + --avg 9 \ + --jit 1 + +It will generate a file `cpu_jit.pt` in the given `exp_dir`. You can later +load it by `torch.jit.load("cpu_jit.pt")`. + +Note `cpu` in the name `cpu_jit.pt` means the parameters when loaded into Python +are on CPU. You can use `to("cuda")` to move them to a CUDA device. + +Check +https://github.com/k2-fsa/sherpa +for how to use the exported models outside of icefall. + +(2) Export `model.state_dict()` + +./dprnn_zipformer/export.py \ + --exp-dir ./dprnn_zipformer/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 30 \ + --avg 9 + +It will generate a file `pretrained.pt` in the given `exp_dir`. You can later +load it by `icefall.checkpoint.load_checkpoint()`. + +To use the generated file with `dprnn_zipformer/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./dprnn_zipformer/decode.py \ + --exp-dir ./dprnn_zipformer/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 600 \ + --decoding-method greedy_search \ + --bpe-model data/lang_bpe_500/bpe.model +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +import torch.nn as nn +from scaling_converter import convert_scaled_to_non_scaled +from train import add_model_arguments, get_params, get_surt_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=9, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="dprnn_zipformer/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + It will generate a file named cpu_jit.pt + + Check ./jit_pretrained.py for how to use it. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + + add_model_arguments(parser) + + return parser + + +@torch.no_grad() +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + + model.to(device) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to("cpu") + model.eval() + + if params.jit is True: + convert_scaled_to_non_scaled(model, inplace=True) + # We won't use the forward() method of the model in C++, so just ignore + # it here. + # Otherwise, one of its arguments is a ragged tensor and is not + # torch scriptabe. + model.__class__.forward = torch.jit.ignore(model.__class__.forward) + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torchscript. Export model.state_dict()") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/libricss/SURT/dprnn_zipformer/joiner.py b/egs/libricss/SURT/dprnn_zipformer/joiner.py new file mode 120000 index 000000000..0f0c3c90a --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/joiner.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/joiner.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/model.py b/egs/libricss/SURT/dprnn_zipformer/model.py new file mode 100644 index 000000000..688e1e78d --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/model.py @@ -0,0 +1,316 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# Copyright 2023 Johns Hopkins University (author: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface + +from icefall.utils import add_sos + + +class SURT(nn.Module): + """It implements Streaming Unmixing and Recognition Transducer (SURT). + https://arxiv.org/abs/2011.13148 + """ + + def __init__( + self, + mask_encoder: nn.Module, + encoder: EncoderInterface, + joint_encoder_layer: Optional[nn.Module], + decoder: nn.Module, + joiner: nn.Module, + num_channels: int, + encoder_dim: int, + decoder_dim: int, + joiner_dim: int, + vocab_size: int, + ): + """ + Args: + mask_encoder: + It is the masking network. It generates a mask for each channel of the + encoder. These masks are applied to the input features, and then passed + to the transcription network. + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, encoder_dm) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, decoder_dim). + It should contain one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, encoder_dim) and (N, U, decoder_dim). + Its output shape is (N, T, U, vocab_size). Note that its output contains + unnormalized probs, i.e., not processed by log-softmax. + num_channels: + It is the number of channels that the input features will be split into. + In general, it should be equal to the maximum number of simultaneously + active speakers. For most real scenarios, using 2 channels is sufficient. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + self.mask_encoder = mask_encoder + self.encoder = encoder + self.joint_encoder_layer = joint_encoder_layer + self.decoder = decoder + self.joiner = joiner + self.num_channels = num_channels + + self.simple_am_proj = nn.Linear( + encoder_dim, + vocab_size, + ) + self.simple_lm_proj = nn.Linear(decoder_dim, vocab_size) + + self.ctc_output = nn.Sequential( + nn.Dropout(p=0.1), + nn.Linear(encoder_dim, vocab_size), + nn.LogSoftmax(dim=-1), + ) + + def forward_helper( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + reduction: str = "sum", + beam_size: int = 10, + use_double_scores: bool = False, + subsampling_factor: int = 1, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Compute transducer loss for one branch of the SURT model. + """ + encoder_out, x_lens = self.encoder(x, x_lens) + assert torch.all(x_lens > 0) + + if self.joint_encoder_layer is not None: + encoder_out = self.joint_encoder_layer(encoder_out) + + # compute ctc log-probs + ctc_output = self.ctc_output(encoder_out) + + # For the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + # sos_y_padded: [B, S + 1], start with SOS. + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + + # decoder_out: [B, S + 1, decoder_dim] + decoder_out = self.decoder(sos_y_padded) + + # Note: y does not start with SOS + # y_padded : [B, S] + y_padded = y.pad(mode="constant", padding_value=0) + + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros((x.size(0), 4), dtype=torch.int64, device=x.device) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + + lm = self.simple_lm_proj(decoder_out) + am = self.simple_am_proj(encoder_out) + + with torch.cuda.amp.autocast(enabled=False): + simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( + lm=lm.float(), + am=am.float(), + symbols=y_padded, + termination_symbol=blank_id, + lm_only_scale=lm_scale, + am_only_scale=am_scale, + boundary=boundary, + reduction=reduction, + return_grad=True, + ) + + # ranges : [B, T, prune_range] + ranges = k2.get_rnnt_prune_ranges( + px_grad=px_grad, + py_grad=py_grad, + boundary=boundary, + s_range=prune_range, + ) + + # am_pruned : [B, T, prune_range, encoder_dim] + # lm_pruned : [B, T, prune_range, decoder_dim] + am_pruned, lm_pruned = k2.do_rnnt_pruning( + am=self.joiner.encoder_proj(encoder_out), + lm=self.joiner.decoder_proj(decoder_out), + ranges=ranges, + ) + + # logits : [B, T, prune_range, vocab_size] + + # project_input=False since we applied the decoder's input projections + # prior to do_rnnt_pruning (this is an optimization for speed). + logits = self.joiner(am_pruned, lm_pruned, project_input=False) + + with torch.cuda.amp.autocast(enabled=False): + pruned_loss = k2.rnnt_loss_pruned( + logits=logits.float(), + symbols=y_padded, + ranges=ranges, + termination_symbol=blank_id, + boundary=boundary, + reduction=reduction, + ) + + # Compute ctc loss + supervision_segments = torch.stack( + ( + torch.arange(len(x_lens), device="cpu"), + torch.zeros_like(x_lens, device="cpu"), + torch.clone(x_lens).detach().cpu(), + ), + dim=1, + ).to(torch.int32) + # We need to sort supervision_segments in decreasing order of num_frames + indices = torch.argsort(supervision_segments[:, 2], descending=True) + supervision_segments = supervision_segments[indices] + + # Works with a BPE model + decoding_graph = k2.ctc_graph(y, modified=False, device=x.device) + dense_fsa_vec = k2.DenseFsaVec( + ctc_output, + supervision_segments, + allow_truncate=subsampling_factor - 1, + ) + ctc_loss = k2.ctc_loss( + decoding_graph=decoding_graph, + dense_fsa_vec=dense_fsa_vec, + output_beam=beam_size, + reduction="none", + use_double_scores=use_double_scores, + ) + + return (simple_loss, pruned_loss, ctc_loss) + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + reduction: str = "sum", + beam_size: int = 10, + use_double_scores: bool = False, + subsampling_factor: int = 1, + return_masks: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor of shape (N*num_channels, S). It contains the labels + of the N utterances. The labels are in the range [0, vocab_size). All + the channels are concatenated together one after another. + prune_range: + The prune range for rnnt loss, it means how many symbols(context) + we are considering for each frame to compute the loss. + am_scale: + The scale to smooth the loss with am (output of encoder network) + part + lm_scale: + The scale to smooth the loss with lm (output of predictor network) + part + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. + beam_size: + The beam size used in CTC decoding. + use_double_scores: + If True, use double precision for CTC decoding. + subsampling_factor: + The subsampling factor of the model. It is used to compute the + supervision segments for CTC loss. + return_masks: + If True, return the masks as well as masked features. + Returns: + Return the transducer loss. + + Note: + Regarding am_scale & lm_scale, it will make the loss-function one of + the form: + lm_scale * lm_probs + am_scale * am_probs + + (1-lm_scale-am_scale) * combined_probs + """ + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0), (x.size(), x_lens.size()) + + # Apply the mask encoder + B, T, F = x.shape + processed = self.mask_encoder(x) # B,T,F*num_channels + masks = processed.view(B, T, F, self.num_channels).unbind(dim=-1) + x_masked = [x * m for m in masks] + + # Recognition + # Stack the inputs along the batch axis + h = torch.cat(x_masked, dim=0) + h_lens = torch.cat([x_lens for _ in range(self.num_channels)], dim=0) + + simple_loss, pruned_loss, ctc_loss = self.forward_helper( + h, + h_lens, + y, + prune_range, + am_scale, + lm_scale, + reduction=reduction, + beam_size=beam_size, + use_double_scores=use_double_scores, + subsampling_factor=subsampling_factor, + ) + + # Chunks the outputs into 2 parts along batch axis and then stack them along a new axis. + simple_loss = torch.stack( + torch.chunk(simple_loss, self.num_channels, dim=0), dim=0 + ) + pruned_loss = torch.stack( + torch.chunk(pruned_loss, self.num_channels, dim=0), dim=0 + ) + ctc_loss = torch.stack(torch.chunk(ctc_loss, self.num_channels, dim=0), dim=0) + + if return_masks: + return (simple_loss, pruned_loss, ctc_loss, x_masked, masks) + else: + return (simple_loss, pruned_loss, ctc_loss, x_masked) diff --git a/egs/libricss/SURT/dprnn_zipformer/optim.py b/egs/libricss/SURT/dprnn_zipformer/optim.py new file mode 120000 index 000000000..8a05abb5f --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/optim.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/optim.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/scaling.py b/egs/libricss/SURT/dprnn_zipformer/scaling.py new file mode 120000 index 000000000..5f9be9fe0 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/scaling.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/scaling.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/scaling_converter.py b/egs/libricss/SURT/dprnn_zipformer/scaling_converter.py new file mode 120000 index 000000000..f9960e5c6 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/scaling_converter.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7/scaling_converter.py \ No newline at end of file diff --git a/egs/libricss/SURT/dprnn_zipformer/train.py b/egs/libricss/SURT/dprnn_zipformer/train.py new file mode 100755 index 000000000..6598f8b5d --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/train.py @@ -0,0 +1,1452 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# 2023 Johns Hopkins University (author: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +cd egs/libricss/SURT +./prepare.sh + +./dprnn_zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir dprnn_zipformer/exp \ + --max-duration 300 + +# For mix precision training: + +./dprnn_zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir dprnn_zipformer/exp \ + --max-duration 550 +""" + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriCssAsrDataModule +from decoder import Decoder +from dprnn import DPRNN +from einops.layers.torch import Rearrange +from graph_pit.loss.optimized import optimized_graph_pit_mse_loss as gpit_mse +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import LOG_EPSILON, fix_random_seed +from model import SURT +from optim import Eden, ScaledAdam +from scaling import ScaledLSTM +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from zipformer import Zipformer + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler] + + +def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None: + if isinstance(model, DDP): + # get underlying nn.Module + model = model.module + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-mask-encoder-layers", + type=int, + default=4, + help="Number of layers in the DPRNN based mask encoder.", + ) + + parser.add_argument( + "--mask-encoder-dim", + type=int, + default=256, + help="Hidden dimension of the LSTM blocks in DPRNN.", + ) + + parser.add_argument( + "--mask-encoder-segment-size", + type=int, + default=32, + help="Segment size of the SegLSTM in DPRNN. Ideally, this should be equal to the " + "decode-chunk-length of the zipformer encoder.", + ) + + parser.add_argument( + "--chunk-width-randomization", + type=bool, + default=False, + help="Whether to randomize the chunk width in DPRNN.", + ) + + # Zipformer config is based on: + # https://github.com/k2-fsa/icefall/pull/745#issuecomment-1405282740 + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,2,2,2,2", + help="Number of zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--feedforward-dims", + type=str, + default="768,768,768,768,768", + help="Feedforward dimension of the zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--nhead", + type=str, + default="8,8,8,8,8", + help="Number of attention heads in the zipformer encoder layers.", + ) + + parser.add_argument( + "--encoder-dims", + type=str, + default="256,256,256,256,256", + help="Embedding dimension in the 2 blocks of zipformer encoder layers, comma separated", + ) + + parser.add_argument( + "--attention-dims", + type=str, + default="192,192,192,192,192", + help="""Attention dimension in the 2 blocks of zipformer encoder layers, comma separated; + not the same as embedding dimension.""", + ) + + parser.add_argument( + "--encoder-unmasked-dims", + type=str, + default="192,192,192,192,192", + help="Unmasked dimensions in the encoders, relates to augmentation during training. " + "Must be <= each of encoder_dims. Empirically, less than 256 seems to make performance " + " worse.", + ) + + parser.add_argument( + "--zipformer-downsampling-factors", + type=str, + default="1,2,4,8,2", + help="Downsampling factor for each stack of encoder layers.", + ) + + parser.add_argument( + "--cnn-module-kernels", + type=str, + default="31,31,31,31,31", + help="Sizes of kernels in convolution modules", + ) + + parser.add_argument( + "--use-joint-encoder-layer", + type=str, + default="lstm", + choices=["linear", "lstm", "none"], + help="Whether to use a joint layer to combine all branches.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=50, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + + parser.add_argument( + "--decode-chunk-len", + type=int, + default=32, + help="The chunk size for decoding (in frames before subsampling)", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conv_lstm_transducer_stateless_ctc/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--model-init-ckpt", + type=str, + default=None, + help="""The model checkpoint to initialize the model (either full or part). + If not specified, the model is randomly initialized. + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--base-lr", type=float, default=0.004, help="The base learning rate." + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=6, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network) part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--ctc-loss-scale", + type=float, + default=0.2, + help="Scale for CTC loss.", + ) + + parser.add_argument( + "--heat-loss-scale", + type=float, + default=0.0, + help="Scale for HEAT loss on separated sources.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=2000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=1, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 2000, + # parameters for SURT + "num_channels": 2, + "feature_dim": 80, + "subsampling_factor": 4, # not passed in, this is fixed + # parameters for Noam + "model_warm_step": 5000, # arg given to model, not for lrate + # parameters for ctc loss + "beam_size": 10, + "use_double_scores": True, + "env_info": get_env_info(), + } + ) + + return params + + +def get_mask_encoder_model(params: AttributeDict) -> nn.Module: + mask_encoder = DPRNN( + feature_dim=params.feature_dim, + input_size=params.mask_encoder_dim, + hidden_size=params.mask_encoder_dim, + output_size=params.feature_dim * params.num_channels, + segment_size=params.mask_encoder_segment_size, + num_blocks=params.num_mask_encoder_layers, + chunk_width_randomization=params.chunk_width_randomization, + ) + return mask_encoder + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Zipformer and Transformer + def to_int_tuple(s: str): + return tuple(map(int, s.split(","))) + + encoder = Zipformer( + num_features=params.feature_dim, + output_downsampling_factor=2, + zipformer_downsampling_factors=to_int_tuple( + params.zipformer_downsampling_factors + ), + encoder_dims=to_int_tuple(params.encoder_dims), + attention_dim=to_int_tuple(params.attention_dims), + encoder_unmasked_dims=to_int_tuple(params.encoder_unmasked_dims), + nhead=to_int_tuple(params.nhead), + feedforward_dim=to_int_tuple(params.feedforward_dims), + cnn_module_kernels=to_int_tuple(params.cnn_module_kernels), + num_encoder_layers=to_int_tuple(params.num_encoder_layers), + num_left_chunks=params.num_left_chunks, + short_chunk_size=params.short_chunk_size, + decode_chunk_size=params.decode_chunk_len // 2, + ) + return encoder + + +def get_joint_encoder_layer(params: AttributeDict) -> nn.Module: + class TakeFirst(nn.Module): + def forward(self, x): + return x[0] + + if params.use_joint_encoder_layer == "linear": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + nn.Linear( + params.num_channels * encoder_dim, params.num_channels * encoder_dim + ), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "lstm": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + ScaledLSTM( + input_size=params.num_channels * encoder_dim, + hidden_size=params.num_channels * encoder_dim, + num_layers=1, + bias=True, + batch_first=True, + dropout=0.0, + bidirectional=False, + ), + TakeFirst(), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "none": + joint_layer = None + else: + raise ValueError( + f"Unknown joint encoder layer type: {params.use_joint_encoder_layer}" + ) + return joint_layer + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_surt_model( + params: AttributeDict, +) -> nn.Module: + mask_encoder = get_mask_encoder_model(params) + encoder = get_encoder_model(params) + joint_layer = get_joint_encoder_layer(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = SURT( + mask_encoder=mask_encoder, + encoder=encoder, + joint_encoder_layer=joint_layer, + decoder=decoder, + joiner=joiner, + num_channels=params.num_channels, + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_heat_loss(x_masked, batch, num_channels=2) -> Tensor: + """ + Compute HEAT loss for separated sources using the output of mask encoder. + Args: + x_masked: + The output of mask encoder. It is a tensor of shape (B, T, C). + batch: + A batch of data. See `lhotse.dataset.K2SurtDatasetWithSources()` + for the content in it. + num_channels: + The number of output branches in the SURT model. + """ + B, T, D = x_masked[0].shape + device = x_masked[0].device + + # Create training targets for each channel. + targets = [] + for i in range(num_channels): + target = torch.ones_like(x_masked[i]) * LOG_EPSILON + targets.append(target) + + source_feats = batch["source_feats"] + source_boundaries = batch["source_boundaries"] + input_lens = batch["input_lens"].to(device) + # Assign sources to channels based on the HEAT criteria + for b in range(B): + cut_source_feats = source_feats[b] + cut_source_boundaries = source_boundaries[b] + last_seg_end = [0 for _ in range(num_channels)] + for source_feat, (start, end) in zip(cut_source_feats, cut_source_boundaries): + assigned = False + for i in range(num_channels): + if start >= last_seg_end[i]: + targets[i][b, start:end, :] += source_feat.to(device) + last_seg_end[i] = max(end, last_seg_end[i]) + assigned = True + break + if not assigned: + min_end_channel = last_seg_end.index(min(last_seg_end)) + targets[min_end_channel][b, start:end, :] += source_feat + last_seg_end[min_end_channel] = max(end, last_seg_end[min_end_channel]) + + # Get padding mask based on input lengths + pad_mask = torch.arange(T, device=device).expand(B, T) > input_lens.unsqueeze(1) + pad_mask = pad_mask.unsqueeze(-1) + + # Compute masked loss for each channel + losses = torch.zeros((num_channels, B, T, D), device=device) + for i in range(num_channels): + loss = nn.functional.mse_loss(x_masked[i], targets[i], reduction="none") + # Apply padding mask to loss + loss.masked_fill_(pad_mask, 0) + losses[i] = loss + + # loss: C x B x T x D. pad_mask: B x T x 1 + # We want to compute loss for each item in the batch. Each item has loss given + # by the sum over C, and average over T and D. For T, we need to use the padding. + loss = losses.sum(0).mean(-1).sum(-1) / batch["input_lens"].to(device) + return loss + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device if isinstance(model, DDP) else next(model.parameters()).device + feature = batch["inputs"].to(device) + feature_lens = batch["input_lens"].to(device) + + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + + # The dataloader returns text as a list of cuts, each of which is a list of channel + # text. We flatten this to a list where all channels are together, i.e., it looks like + # [utt1_ch1, utt2_ch1, ..., uttN_ch1, utt1_ch2, ...., uttN,ch2]. + text = [val for tup in zip(*batch["text"]) for val in tup] + assert len(text) == len(feature) * params.num_channels + + # Convert all channel texts to token IDs and create a ragged tensor. + y = sp.encode(text, out_type=int) + y = k2.RaggedTensor(y).to(device) + + batch_idx_train = params.batch_idx_train + warm_step = params.model_warm_step + + with torch.set_grad_enabled(is_training): + (simple_loss, pruned_loss, ctc_loss, x_masked) = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + reduction="none", + subsampling_factor=params.subsampling_factor, + ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + ctc_loss_is_finite = torch.isfinite(ctc_loss) + + # Compute HEAT loss + if is_training and params.heat_loss_scale > 0.0: + heat_loss = compute_heat_loss( + x_masked, batch, num_channels=params.num_channels + ) + else: + heat_loss = torch.tensor(0.0, device=device) + + heat_loss_is_finite = torch.isfinite(heat_loss) + is_finite = ( + simple_loss_is_finite + & pruned_loss_is_finite + & ctc_loss_is_finite + & heat_loss_is_finite + ) + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_losses: {simple_loss}\n" + f"pruned_losses: {pruned_loss}\n" + f"ctc_losses: {ctc_loss}\n" + f"heat_losses: {heat_loss}\n" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + ctc_loss = ctc_loss[ctc_loss_is_finite] + heat_loss = heat_loss[heat_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if ( + torch.all(~simple_loss_is_finite) + or torch.all(~pruned_loss_is_finite) + or torch.all(~ctc_loss_is_finite) + or torch.all(~heat_loss_is_finite) + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss_sum = simple_loss.sum() + pruned_loss_sum = pruned_loss.sum() + ctc_loss_sum = ctc_loss.sum() + heat_loss_sum = heat_loss.sum() + + s = params.simple_loss_scale + # take down the scale on the simple loss from 1.0 at the start + # to params.simple_loss scale by warm_step. + simple_loss_scale = ( + s + if batch_idx_train >= warm_step + else 1.0 - (batch_idx_train / warm_step) * (1.0 - s) + ) + pruned_loss_scale = ( + 1.0 + if batch_idx_train >= warm_step + else 0.1 + 0.9 * (batch_idx_train / warm_step) + ) + loss = ( + simple_loss_scale * simple_loss_sum + + pruned_loss_scale * pruned_loss_sum + + params.ctc_loss_scale * ctc_loss_sum + + params.heat_loss_scale * heat_loss_sum + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss_sum.detach().cpu().item() + info["pruned_loss"] = pruned_loss_sum.detach().cpu().item() + if params.ctc_loss_scale > 0.0: + info["ctc_loss"] = ctc_loss_sum.detach().cpu().item() + if params.heat_loss_scale > 0.0: + info["heat_loss"] = heat_loss_sum.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + train_dl_warmup: Optional[torch.utils.data.DataLoader], + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + train_dl_warmup: + Dataloader for the training dataset with 2 speakers. This is used during the + warmup stage. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + torch.cuda.empty_cache() + model.train() + + tot_loss = MetricsTracker() + + iter_train = iter(train_dl) + iter_train_warmup = iter(train_dl_warmup) if train_dl_warmup is not None else None + + batch_idx = 0 + + while True: + # We first sample a batch from the main dataset. This is because we want to + # make sure all epochs have the same number of batches. + try: + batch = next(iter_train) + except StopIteration: + break + + # If we are in warmup stage, get the batch from the warmup dataset. + if ( + params.batch_idx_train <= params.model_warm_step + and iter_train_warmup is not None + ): + try: + batch = next(iter_train_warmup) + except StopIteration: + iter_train_warmup = iter(train_dl_warmup) + batch = next(iter_train_warmup) + + batch_idx += 1 + + params.batch_idx_train += 1 + batch_size = batch["inputs"].shape[0] + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + set_batch_count(model, params.batch_idx_train) + scheduler.step_batch(params.batch_idx_train) + + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % 100 == 0 and params.use_fp16: + # If the grad scale was less than 1, try increasing it. The _growth_interval + # of the grad scaler is configurable, but we can't configure it to have different + # behavior depending on the current grad scale. + cur_grad_scale = scaler._scale.item() + if cur_grad_scale < 1.0 or (cur_grad_scale < 8.0 and batch_idx % 400 == 0): + scaler.update(cur_grad_scale * 2.0) + if cur_grad_scale < 0.01: + logging.warning(f"Grad scale is small: {cur_grad_scale}") + if cur_grad_scale < 1.0e-05: + raise RuntimeError( + f"grad_scale is too small, exiting: {cur_grad_scale}" + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + cur_grad_scale = scaler._scale.item() if params.use_fp16 else 1.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + if params.use_fp16: + tb_writer.add_scalar( + "train/grad_scale", cur_grad_scale, params.batch_idx_train + ) + + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + + if checkpoints is None and params.model_init_ckpt is not None: + logging.info( + f"Initializing model with checkpoint from {params.model_init_ckpt}" + ) + init_ckpt = torch.load(params.model_init_ckpt, map_location=device) + model.load_state_dict(init_ckpt["model"], strict=False) + + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in model.named_parameters()] + ) + optimizer = ScaledAdam( + model.parameters(), + lr=params.base_lr, + clipping_scale=2.0, + parameters_names=parameters_names, + ) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + libricss = LibriCssAsrDataModule(args) + + train_cuts = libricss.lsmix_cuts(rvb_affix="comb", type_affix="full", sources=True) + train_cuts_ov40 = libricss.lsmix_cuts( + rvb_affix="comb", type_affix="ov40", sources=True + ) + dev_cuts = libricss.libricss_cuts(split="dev", type="sdm") + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = libricss.train_dataloaders( + train_cuts, + sampler_state_dict=sampler_state_dict, + ) + train_dl_ov40 = libricss.train_dataloaders(train_cuts_ov40) + valid_dl = libricss.valid_dataloaders(dev_cuts) + + scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + train_dl_warmup=train_dl_ov40, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = [sp.encode(text_ch) for text_ch in batch["text"]] + num_tokens = [sum(len(yi) for yi in y_ch) for y_ch in y] + logging.info(f"num tokens: {num_tokens}") + + +def main(): + parser = get_parser() + LibriCssAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + +if __name__ == "__main__": + main() diff --git a/egs/libricss/SURT/dprnn_zipformer/train_adapt.py b/egs/libricss/SURT/dprnn_zipformer/train_adapt.py new file mode 100755 index 000000000..1c1b0c28c --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/train_adapt.py @@ -0,0 +1,1343 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES=0 + +./dprnn_zipformer/train.py \ + --world-size 1 \ + --num-epochs 15 \ + --start-epoch 1 \ + --exp-dir dprnn_zipformer/exp \ + --max-duration 300 + +# For mix precision training: + +./dprnn_zipformer/train.py \ + --world-size 1 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir dprnn_zipformer/exp \ + --max-duration 550 +""" + +import argparse +import copy +import logging +import warnings +from itertools import chain +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriCssAsrDataModule +from decoder import Decoder +from dprnn import DPRNN +from einops.layers.torch import Rearrange +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import LOG_EPSILON, fix_random_seed +from model import SURT +from optim import Eden, ScaledAdam +from scaling import ScaledLinear, ScaledLSTM +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from zipformer import Zipformer + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler] + + +def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None: + if isinstance(model, DDP): + # get underlying nn.Module + model = model.module + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-mask-encoder-layers", + type=int, + default=4, + help="Number of layers in the DPRNN based mask encoder.", + ) + + parser.add_argument( + "--mask-encoder-dim", + type=int, + default=256, + help="Hidden dimension of the LSTM blocks in DPRNN.", + ) + + parser.add_argument( + "--mask-encoder-segment-size", + type=int, + default=32, + help="Segment size of the SegLSTM in DPRNN. Ideally, this should be equal to the " + "decode-chunk-length of the zipformer encoder.", + ) + + parser.add_argument( + "--chunk-width-randomization", + type=bool, + default=False, + help="Whether to randomize the chunk width in DPRNN.", + ) + + # Zipformer config is based on: + # https://github.com/k2-fsa/icefall/pull/745#issuecomment-1405282740 + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,2,2,2,2", + help="Number of zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--feedforward-dims", + type=str, + default="768,768,768,768,768", + help="Feedforward dimension of the zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--nhead", + type=str, + default="8,8,8,8,8", + help="Number of attention heads in the zipformer encoder layers.", + ) + + parser.add_argument( + "--encoder-dims", + type=str, + default="256,256,256,256,256", + help="Embedding dimension in the 2 blocks of zipformer encoder layers, comma separated", + ) + + parser.add_argument( + "--attention-dims", + type=str, + default="192,192,192,192,192", + help="""Attention dimension in the 2 blocks of zipformer encoder layers, comma separated; + not the same as embedding dimension.""", + ) + + parser.add_argument( + "--encoder-unmasked-dims", + type=str, + default="192,192,192,192,192", + help="Unmasked dimensions in the encoders, relates to augmentation during training. " + "Must be <= each of encoder_dims. Empirically, less than 256 seems to make performance " + " worse.", + ) + + parser.add_argument( + "--zipformer-downsampling-factors", + type=str, + default="1,2,4,8,2", + help="Downsampling factor for each stack of encoder layers.", + ) + + parser.add_argument( + "--cnn-module-kernels", + type=str, + default="31,31,31,31,31", + help="Sizes of kernels in convolution modules", + ) + + parser.add_argument( + "--use-joint-encoder-layer", + type=str, + default="lstm", + choices=["linear", "lstm", "none"], + help="Whether to use a joint layer to combine all branches.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=50, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + + parser.add_argument( + "--decode-chunk-len", + type=int, + default=32, + help="The chunk size for decoding (in frames before subsampling)", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=15, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conv_lstm_transducer_stateless_ctc/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--model-init-ckpt", + type=str, + default=None, + help="""The model checkpoint to initialize the model (either full or part). + If not specified, the model is randomly initialized. + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--base-lr", type=float, default=0.0004, help="The base learning rate." + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=1000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=2, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network) part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--ctc-loss-scale", + type=float, + default=0.2, + help="Scale for CTC loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=1000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=5, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 10, + "reset_interval": 200, + "valid_interval": 100, + # parameters for SURT + "num_channels": 2, + "feature_dim": 80, + "subsampling_factor": 4, # not passed in, this is fixed + # parameters for Noam + "model_warm_step": 5000, # arg given to model, not for lrate + # parameters for ctc loss + "beam_size": 10, + "use_double_scores": True, + "env_info": get_env_info(), + } + ) + + return params + + +def get_mask_encoder_model(params: AttributeDict) -> nn.Module: + mask_encoder = DPRNN( + feature_dim=params.feature_dim, + input_size=params.mask_encoder_dim, + hidden_size=params.mask_encoder_dim, + output_size=params.feature_dim * params.num_channels, + segment_size=params.mask_encoder_segment_size, + num_blocks=params.num_mask_encoder_layers, + chunk_width_randomization=params.chunk_width_randomization, + ) + return mask_encoder + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Zipformer and Transformer + def to_int_tuple(s: str): + return tuple(map(int, s.split(","))) + + encoder = Zipformer( + num_features=params.feature_dim, + output_downsampling_factor=2, + zipformer_downsampling_factors=to_int_tuple( + params.zipformer_downsampling_factors + ), + encoder_dims=to_int_tuple(params.encoder_dims), + attention_dim=to_int_tuple(params.attention_dims), + encoder_unmasked_dims=to_int_tuple(params.encoder_unmasked_dims), + nhead=to_int_tuple(params.nhead), + feedforward_dim=to_int_tuple(params.feedforward_dims), + cnn_module_kernels=to_int_tuple(params.cnn_module_kernels), + num_encoder_layers=to_int_tuple(params.num_encoder_layers), + num_left_chunks=params.num_left_chunks, + short_chunk_size=params.short_chunk_size, + decode_chunk_size=params.decode_chunk_len // 2, + ) + return encoder + + +def get_joint_encoder_layer(params: AttributeDict) -> nn.Module: + class TakeFirst(nn.Module): + def forward(self, x): + return x[0] + + if params.use_joint_encoder_layer == "linear": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + nn.Linear( + params.num_channels * encoder_dim, params.num_channels * encoder_dim + ), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "lstm": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + ScaledLSTM( + input_size=params.num_channels * encoder_dim, + hidden_size=params.num_channels * encoder_dim, + num_layers=1, + bias=True, + batch_first=True, + dropout=0.0, + bidirectional=False, + ), + TakeFirst(), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "none": + joint_layer = None + else: + raise ValueError( + f"Unknown joint encoder layer type: {params.use_joint_encoder_layer}" + ) + return joint_layer + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_surt_model( + params: AttributeDict, +) -> nn.Module: + mask_encoder = get_mask_encoder_model(params) + encoder = get_encoder_model(params) + joint_layer = get_joint_encoder_layer(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = SURT( + mask_encoder=mask_encoder, + encoder=encoder, + joint_encoder_layer=joint_layer, + decoder=decoder, + joiner=joiner, + num_channels=params.num_channels, + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device if isinstance(model, DDP) else next(model.parameters()).device + feature = batch["inputs"].to(device) + feature_lens = batch["input_lens"].to(device) + + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + + # The dataloader returns text as a list of cuts, each of which is a list of channel + # text. We flatten this to a list where all channels are together, i.e., it looks like + # [utt1_ch1, utt2_ch1, ..., uttN_ch1, utt1_ch2, ...., uttN,ch2]. + text = [val for tup in zip(*batch["text"]) for val in tup] + assert len(text) == len(feature) * params.num_channels + + # Convert all channel texts to token IDs and create a ragged tensor. + y = sp.encode(text, out_type=int) + y = k2.RaggedTensor(y).to(device) + + batch_idx_train = params.batch_idx_train + warm_step = params.model_warm_step + + with torch.set_grad_enabled(is_training): + (simple_loss, pruned_loss, ctc_loss, x_masked) = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + reduction="none", + subsampling_factor=params.subsampling_factor, + ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + ctc_loss_is_finite = torch.isfinite(ctc_loss) + + is_finite = simple_loss_is_finite & pruned_loss_is_finite & ctc_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_losses: {simple_loss}\n" + f"pruned_losses: {pruned_loss}\n" + f"ctc_losses: {ctc_loss}\n" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + ctc_loss = ctc_loss[ctc_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if ( + torch.all(~simple_loss_is_finite) + or torch.all(~pruned_loss_is_finite) + or torch.all(~ctc_loss_is_finite) + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss_sum = simple_loss.sum() + pruned_loss_sum = pruned_loss.sum() + ctc_loss_sum = ctc_loss.sum() + + s = params.simple_loss_scale + # take down the scale on the simple loss from 1.0 at the start + # to params.simple_loss scale by warm_step. + simple_loss_scale = ( + s + if batch_idx_train >= warm_step + else 1.0 - (batch_idx_train / warm_step) * (1.0 - s) + ) + pruned_loss_scale = ( + 1.0 + if batch_idx_train >= warm_step + else 0.1 + 0.9 * (batch_idx_train / warm_step) + ) + loss = ( + simple_loss_scale * simple_loss_sum + + pruned_loss_scale * pruned_loss_sum + + params.ctc_loss_scale * ctc_loss_sum + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss_sum.detach().cpu().item() + info["pruned_loss"] = pruned_loss_sum.detach().cpu().item() + if params.ctc_loss_scale > 0.0: + info["ctc_loss"] = ctc_loss_sum.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + train_dl_warmup: + Dataloader for the training dataset with 2 speakers. This is used during the + warmup stage. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + torch.cuda.empty_cache() + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = batch["inputs"].shape[0] + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + set_batch_count(model, params.batch_idx_train) + scheduler.step_batch(params.batch_idx_train) + + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % 100 == 0 and params.use_fp16: + # If the grad scale was less than 1, try increasing it. The _growth_interval + # of the grad scaler is configurable, but we can't configure it to have different + # behavior depending on the current grad scale. + cur_grad_scale = scaler._scale.item() + if cur_grad_scale < 1.0 or (cur_grad_scale < 8.0 and batch_idx % 400 == 0): + scaler.update(cur_grad_scale * 2.0) + if cur_grad_scale < 0.01: + logging.warning(f"Grad scale is small: {cur_grad_scale}") + if cur_grad_scale < 1.0e-05: + raise RuntimeError( + f"grad_scale is too small, exiting: {cur_grad_scale}" + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + cur_grad_scale = scaler._scale.item() if params.use_fp16 else 1.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + if params.use_fp16: + tb_writer.add_scalar( + "train/grad_scale", cur_grad_scale, params.batch_idx_train + ) + + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + + if checkpoints is None and params.model_init_ckpt is not None: + logging.info( + f"Initializing model with checkpoint from {params.model_init_ckpt}" + ) + init_ckpt = torch.load(params.model_init_ckpt, map_location=device) + model.load_state_dict(init_ckpt["model"], strict=True) + + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in model.named_parameters()] + ) + optimizer = ScaledAdam( + model.parameters(), + lr=params.base_lr, + clipping_scale=2.0, + parameters_names=parameters_names, + ) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + libricss = LibriCssAsrDataModule(args) + + train_cuts_ihm = libricss.libricss_cuts(split="dev", type="ihm-mix") + train_cuts_sdm = libricss.libricss_cuts(split="dev", type="sdm") + train_cuts = train_cuts_ihm + train_cuts_sdm + + # This will create 2 copies of the sessions with different segmentation + train_cuts = train_cuts.trim_to_supervision_groups( + max_pause=0.1 + ) + train_cuts.trim_to_supervision_groups(max_pause=0.5) + dev_cuts = libricss.libricss_cuts(split="dev", type="sdm") + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = libricss.train_dataloaders( + train_cuts, + sampler_state_dict=sampler_state_dict, + return_sources=False, + strict=False, + ) + valid_dl = libricss.valid_dataloaders(dev_cuts) + + scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = [sp.encode(text_ch) for text_ch in batch["text"]] + num_tokens = [sum(len(yi) for yi in y_ch) for y_ch in y] + logging.info(f"num tokens: {num_tokens}") + + +def main(): + parser = get_parser() + LibriCssAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + +if __name__ == "__main__": + main() diff --git a/egs/libricss/SURT/dprnn_zipformer/zipformer.py b/egs/libricss/SURT/dprnn_zipformer/zipformer.py new file mode 120000 index 000000000..ec183baa7 --- /dev/null +++ b/egs/libricss/SURT/dprnn_zipformer/zipformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7_streaming/zipformer.py \ No newline at end of file diff --git a/egs/libricss/SURT/heat.png b/egs/libricss/SURT/heat.png new file mode 100644 index 0000000000000000000000000000000000000000..ac7ecfff4937c566d0d9d396e2f6d246d833f879 GIT binary patch literal 305340 zcmeFZNv`bB)+Y2w36w%gi8i8z^rWQmKe-53P1jX8BJ6P8NjY3QT-Wvtm;s}pOOGKi z4<>mmP}|NOuF&;Rn5zx*3j5v;%b z+x zf5rJ<-~NgrMK{fV#f86OSk(7_OUfu8qh|VBo6b-K_nR&$vocM7)xi)9|0jg}Clt3) z{I58RlJJ2sf1?OG{8c~7)Alz(qdUvGj{b^C#NQAg^>2+RF59`AiofC14W+k z?XE}oV(c0ykx&%>8*=@F!grKKWBCsxuAZ&wIF()dyJP6z@V`Onbt9dl{8uSj9nC#P zIJBmj@{#>J-@*G|b>^c_f7QvmF0bK**k5t+uLujzU%xp2dnnPY zs~{i$yyy38uYz}Z{%#NojlG`BmoiDG-_(8;&0SZ|W&bY=Mv0K{YM|y!LxvePY^xa4CReb|ig0f<6YPdq-D=3GW z@9J*+89qECu76OTL{srY!+H^dIh!KRV?D~|M=f%fXq2D~@6ji%U)@yBzwFYdo9C_p397FfToixvA9?;GWJ#Ju zOAR{gpDD0)nO_!d?k>@erv8_*&dU9gU+(9H?4Qz0Bm8cHkVJC?ru^3>CiQI&=CyRW zr?nxvoOdkzJl;+rIyrnZu4|BMiDv`sQE>}_F)q;s~qlqNK2iw=Uer$pFs4clZ zV|g!9mp!3g_w_wVr*IU zbuSsa{zlRJ_)Tw5rLCVsIO<(jZaO@%VicHD>18nq@hYYIvEYG;IZRyBx249Tl&v3d zAAVijZRsYGS3E;S`5{}M{2MgxCzv$6bW+31``a+)tFbEiY%J}$q_y`3Z->*u0LlBI z7z^#L#}uP%N`33=@#6{DgUnDj^>^zWoOmz9BM8cxvvSjvZr~tEG;#wbld2LuT#PgC z9nIg3UVK}kJx%tB=u#n8+Rls0@Vyr&iQishwrU>_U!|~^W8=1BbI-G9nRMaH#hCua zae2hb)L;v)V?pzVf?2|2drg|5<$STw-GnI(3^$(Qv_UFs0S%B1gXR!E>HTv&UQfyz zr1mC`aL0^&-5Y3U3%O&1`4)=W6YIuy5LUnVhlP(I=~>piF1YJM{waqq1Izzo`7yA< zKVD}SJyv+WrFnucHjdPy=~_27MH3#|;vH|&=Cf8NlIp1XeBbTo`^^uQT-8tC@yh7o zb@zMGH=8z#HnbJoPVOP!B5i5z`*+Xha#68f(u3gfytUKz6{`Nr*(QR{Pp>d%iD4%v59~;9so8wqOaRH1pst@@C6awRQF8eb{%P8!{Ba zU1lfSE#Dk}uO?D z+RouD4)4;$PI2;mSdfn@-+cK~Jh}Fr-B_fhpXTCBoh^Uuih7HFIAMnP&Ie9N;PtV{ zHt;9G#JJ8rSA)c^R`i!-MV7UwRG#J~J=Dqj_3+RE-pO$Hjk{HgE{SUFE9U*aUki^P zpJNDI%B}GoSK#8@E_?%}Aw4dWc^ACD&zw*pg*TcOiEq)_l}u@5WS#>rZ6- zv?`N^&C4)hIn);%DZ~L6C7K~1Q!cCYdA=<47gKSRYp_Jiy>!i##P{x;0TAD`@}s}y z_tF>i`(>Tjozx~PA^8qx?l!&>?n@Q&)hbz={W>`F8s?r!b7_~eSihEPvU#@|bl+yY zkim)K$e|w(`*=j?_6%72C`7>$J>OKOiFQKc9as2H8cn7YLoCDUh)c?Jw2(7FA4Wp8 z=U!~{cVp|gaPEA8ZsE8G`sPr6qCZ3y%Mzk|mYz)FsfeohyO2o9x?iFMqz0_2u?1z9 zvZFz9bHkwZFL_i2p4>~;_nCbnUe++gr{FLryVOh@ZKcy-@q~C=cl@ju9@7Wst~(pZt71b?p40I z3r)H>>bG*lvaE%|@n1-;(%i-7Zy|$ML_FTcAEM#uk4mNZTqRswcAq*FlGehQbJL?J ze6ktGN#>xrS)7vwqQA%VO}Mq)IgyM-k@7P20hKj%@fTVJ+TA1kJdw7uR6_SC>9udB zzO`q8;j`_BE$H-1&t%|4fsAtBI1sBY<|VXUS}SxYNZ?|vV091$MZ zd)k{y-&UrF;=5S5yJfDoAhOGcZnuUbhlXdYn<+rY#s1XpkqfUu9{kj|k67XkxVkCu zM#I#*n)zPpT5@YY3@iU_mE}$* z5_)(N{&k%_V>}`%4{s$mqQ^BUP+#sU=+pk5qjF<}R4}@|{0ts5f2z)QcO6|lJ$u&5 zKy^U^LUs*8l$`FAzoO4-%J=*9sw=9NF8d!*&x;pO?wcnjrN(VZ60g=k*5^CCI&|)i z2G28+S+Fka)E%r{xJP-{Klxo_>TWR;;>&#(exLOl8n)nBfm#$$zovbYCjw;a8T!_J zx``uvr-O&(h%{Ukj6<7^%QaVH2H_0}rFI_axZKwBvXC1D^NTKCI%5&}TP$0ZYhZCT z*eMR~2uEG!0)A`aX4elzXFvI3QlXmgRlUA8xnZr^@o7&QD$ONk05ISK8(%mfDOmzJSmBvbuN6g+4}w_{a9D zr4dqYA14>#Q=%kqlsMNTM^R!v0v-U$3ua_zABnbvm5$H z$TwZ^_ETKR+ZVxJZUuB|6iS2rvMZ-MZ@iey`H<(*oNv$fe#wLU`f3VzX(Hv=p=y@j z{SNZ~ty$(68ReTsp8&bfkC*}C^RZ9my?v>`#xbBz0GbLS&dDjBWgRt|Ty0GxC;& zZ1!VBGeX?_+ik!cOniG<621q=4YmI3DRyt&NdZ9zJHBSq<%*|`AZS&Q4siEEJNwKlK2}_8ET)V>jotg*5F?`VqTbHr4R_O4KoTdlb6=`9 zlw>t3*sj@I3Utc+huRe(IjKkb&>L*7=HgbaqU!2Z_m$RjoLSe*#D1R8lA%XkZHKRQ zqt~I!r$t~9aBtlD{<=m=AfUbUjmsdUOf?lM#n#yiliX}~PCc|EZW8%%q#%avepX0y zuj#em%iA10F{ZJA+f(u+uoNVa1jGX`;mC&Dgsl=vDrM`ir#V0 zrV4VG`k)klY5(t&*tFAA$v)JxrBjjnsFx3n!D%28FZY(t?i<8IrD;;S*xUN7e;`BU z1x-A$&deFqtH0M`TduGEa>>xl#N~SJ!Q(wmg(J%>3i(%Y@*-!vo6o~M>ZO}gBc4?f zs6MUd-L`*>RLUpcpO&R2*TcQ z+*;dT0~f$|6bwFiS6pT;VkCC{J@p>Vo4lCCD!Gm4Sdk(Tjr|(zyqngnVgIdz8_aDQnUprBQ4^>Il41sZ7(r zRv7xIvMPFNh%%wH;nIyYZS~jYyQWIY1F^r$CE@VPFINnk3sw$QF87bQg!qbB_Cs8L$8eM!V{kV@>Ba za?`gK`ya)Z11iZodSK+Ga4~+2lT9y~pn=4Qvl4G)t27Pc5uEbs=$VikDW2|0)7bVH zyPK*HT%z1X{4?1ptR2L0#(R?1+6RlEr$5ew63CC6g15YD>?YXByW$qbH+_|{B``5# zl-HbVN*V9aZsa>KA;eyuJ5dDBtNoP$U6(fbqom!oMfc-g6ZlO;_GToea7E!MZlmrM zs9MQ_uqNl>FSnV;R6OGKEBS>;t0}dC+kK5|4YjKKd!Z4oCthf>;7i8zE0p?1DA@4# z<<=SjeV0yJ;Hw9IX3uA!`XhO-Io%KV$HJOjZ6ISPf5^a}jmu8KuOD`(puH%KjI2xP z^Vk(@M*WymEeJQ+bqq^=_?*0#})GLpL%|JJMRpe@mukj4QZfFN0F|t{MEEmomHa z<2QT2;;N>rM-y(;_k({jj4Qc&`_`fvZna%U8|?Z_XP<>(3qAM za!Iiz9r%|dEBa*EhhDAtO;V=!d5%J zoVM1=i+erez-Ae~y4@kzD($x?77{ygg=0BM#4$JYb_Qj`?{Y8plsKucRuV6J!4!A= zoJaZ^awKD4c6WP=Y4HTD%LVBcOT#r?)8}pwY0}Kpdyzpjk(I1OdxX^p+{{^A+)*Kt z6<%OE@=M%2Lwo3#0K#u4x$ibBzV`UM!d)C1Ysam#`K@27&~%K}X6)k{Y#5OBZpt_p zoZ_U(yre87i~0+8dLtqDTSMwDLh0}s zr|vw}H_u>`UR|3iJJvv+76Oc1tStBkr^2?s9F}>_Ud^PCchbMVG~LI0=E#w|Sek`J z9N$)WTPLH&n=optPT0fpl2*JN=hyHG*!y!XU6IuibX0t=jWO!x(3C(pKzKjCUu4O} zRP*1qx!&I>(-o}Yte+rS4$m7URS3o+7I*n#&r$RQ997$zEA3~PY;O`;E4hpqdif<9 ztW6O9Eo~p59kklCEJICE0WF#-(_SXg_0_jNxV&Tz>&(9 z(wMU|2KX`}nJK2Oi~^Csqn!$wHo7@laa6t7J{sSk>H`{jLAC~qfnq)&f3X*HpqO9-ohAZ(~f5-=E*E~<& zqf6Q4hD9v3*ex7wf+ptnt#tTYJ-YPiIWrcQ4{c^Z0J-l#K32*4P*J52QOpw9{IcfT z`NSUS{$@u)w}iH)#ByQLdY{r$_xw|X$a~EKjKimI6GRV0cC0#%5smdz$EX+`O3rc* zJEobY4L)=P^1Zk4L_XT$A}RZRs*KZY{rdnlrtOa}bV?aDnEw zTk`l(Bz<Tj8?vg<^$G9J{84#V@0hTOO{OR2+dcM zp`KjCzW4{lk*aE4Zc-fPky`Pi^D5@ckblB;%bBY5js}$t zsdNOKY*@IlJ@(tZUPj_RtgD8%a{hT#oS>GDW{Hh$i+jNYr(ccFJ8$xwohZ6zR%v?3 zti+pYR+S?XZtn_M+pQ}5yU}bJkI^YiH)Lg<`{$8QTO^bBPrRDmDa*Uw3yWd-gkh)c zdRfMq0(~C^`ABWImJ>UfRWAY8_%DrSv~-lzQq3e7EAGDE9 zmnOmvSvc(xxTgg+(3GcC?}Kvo{!qBs$}A7}!)V*TffdvJw#TJzrn{M`itHd?k(Ejp zH=TQwwcf+Q2WQv}CiC-y+P;>Bv7*r)dp$`7$CaFJzHb)M^7%jqtQ*4$qg)V$W!00H zfc*sZ&Fw~e6n^@<^J?GpFaz{F4#w=SIs>6zyqw^F|a?)nOjhi_coE9{!7-u%htjJ|V zGMu1ikq%;eKRSz3LolM$9equ#;hw}&9ZOD zeOfxf2da8uU2v`gP)s>r$PwyFi#OdGeSR=BC|r%{RG$BM-OZ3L|NDJFV*zID@7HNk z{^Moc#ovF1;QU5V{X^~Fc!B>%;V}#jiv?gd75oF9f@1y|Li3Nfm_Hz_ z|5so!#QzV_6gludEA79q6hdp{GYJL%5litOu@oaHyZ`&J6g)@%3oHc}|AJrWg~Zmt zaWwz^gQdU|J_Hkl;}CmHXR7*sNhXM&B?^nyje8BA^$AKFgGBZ%DL0KZw_DuvIcxE3 zC&pjJH>S=!Wkln7O%P2kc&BU$*)AfmAwMoR-!BHvE>axtmsm!!i+Ojsc~b3&U#>*> z6E3a6B^Mdo#$EJs=?{-_(aZaT3N7FUNfe#k4_ezhgPUW6zIgQ994|8@PrKO~>4 z&ne!gSe#4*hjJVMO;nMTzz1nxK**vdLg4cQvxUUmUc6Y3h?1Q<^D+!oRbfQ(%E=MV z6bP_Z0X!kG`LziB?4{^oNmM}K`gc9ikN73OJ#W%M5$49i%fO-IOfK&pZ&b=<)`TCH z=VW#=72|!2jc>%}@B{Fv-pox!dzX6!vHBM#lb+2{+JrQ!nK+X!#S_ErcO&+*E~cZw32T{6)_Ka@;09 zU%ci`zx!1}YMk4EW^^0?^SS78T*U^uAan8JImWMk>8x{=IkOdTIgo?)lPH|BL z4WgI7=nal4a);AR!NK#t=n3vpazFHb$d!sxTp9h}{nEuZkQ2Q}XkE}HC|1jzH6-ZB;*UI#yZR;Z`c(TvQA4ie@?ic@sva}xVqVb2Pf5@rD zehbxKk_YX3Lk#)}G%L^f&C|R(nF0qLx!2?d{mo268I!!__{KH#jqY2K8|Z zS>$)Wq&LUr&vz|V@*kd-ftGh;=S`^vf{1^jEsUXv0LC??JT$>h=_Z%3W~n zyOT34H+QS^!T05fP4&4~FBiW#fMs=`k0IE?O<&!#?>oDAltxsC?{B(r-~l89e#?cx z_FrKR_Bj(8&)-jL`@3FwZ9Hw#&!d6H$ug+#-&}}Z95(_ep3VsvpLI=Oe^@0^%koCv`* z@0T{@SoIO`hg-E{7n})hD|3lF%$Uh44Dt^j1cxb8l+$4JMXH@GP4xgM{E`>(Oo>ML}{vCbLlZU@g? zy%$-UyH_oQAc$q=>98qUg7Up)!bmyeJHwBX`wCWV>|KQtnU`fqQX@KlWNxy4d3|z zg(m8Ctk1+k35aV&$S=!0NGW%)Cd65(c&S}(^Jhl{tmzi@ac&n7t-V6qia0j-D=rCI zV~;n+90B?99>mapx0T4L3}Zt4bs}zDvjy^_lUNqyK`o3}<2S*LBXNh!{M{4pW0f^L zes3w2t+n?` zg;m>=hx9Wnp*d{$w#jdPk1bzSyka!2kaJ z#nB0#xVKOL*aNIH?J@v|J;EsABs&5$iv(eL?gnww-(DmH)Jhf-M^CFtv8?uU`|_C` z<8_TAeDj-4feh@rI{L!4SL!}w<3D;{WOE%AteY%+(FMOV_K>5PhZ70yew8}QA0(1K z)1Jyn=6w@?)a}nW>1yLVlzbDY#w{y>`^5V#IqJv3$U@*Fe=NUa(BNM{BVxwfAwl!o za_R+s`J3rX3x`1e=Eyr>L&sw_bmZfPU-}`oAGbRJ9ZD}uBntj$-y9!E?cYxVD1ow4 z5LrM%r_c@;oaW2Mh%V0pCS8Fo5U+OhJsdjgPnVDZD!s9%jJdvl9dW;ZZsDY)csdDI zU#|zDK?V}-k>eaFfSdVeM~7g;xDp$cvp~Y`Bgp~sV>O&-%unDQ`AULsx16B28TI?f zhkc!pwrQ>zV!iA|o@~K1$><|G0(&Cf-9Dm4oL>RgU#-paFL)KOZQ?1Em$__Q9t(Q; z>f%%FKXEQGYe&>0po*5-uzp#>F1Sen%I(gW9dVS|_+zbRKwJjPVAq>@&HR`QSjjZz zXM6{+h>{`#HWfk;|EU?jc>JkO+oQ(f$b}%4loU6Ff~db#0QBf)RL`@G{rN1;Uv?Xe zq=H=hTOM?gZ7ZAMX>#S&O3weH0v**kxH#}5vrQf-pZSDrz~>T3`(^BFrScCS4b7M? zaiMi^zo}kc>3=cTREwJqom_x(+%*R52 z8il%^o^pB5{Y2s!0#c!cO)b4KsQCy!jiI)!Op zZEvS<+LKJ8q1{U5)$eHBVA1CO(iPyJz!oA$`{aRa`6&KOK-obrI4=sv&H0Yw;~|1<&2I7$+qdnOjsbjh5Waas4BrWj%F9zS$&!@bbk8xGwCI|WbyirzEXD} z-TJh?V}sy19lsJKgl4ccv-GiyK^KD7z zmW1*NUK54I0WzBHM{NchKB6a%#qOjk6#gyX^~7Gc-*1L`vwW@t6cqR%f5wFj5=U(Q z=r>UkIf!L-HZ4vE2^fygmDxrEFl|78q-0zlFVTA)K0te2zrWO1Q*=l@$Jpxh#~?_WM~{qC^{zptvD2~P^tGUuJC zl|3NvV@j3Nw8^`_n=@hZIesTc>f>bsWkb9nRBwb1N+bQ{^(h?>INS~lM8M!{bQ%Q7 zoM`8(GB^7wm&_ojz)gB?vnFAp?-sUU`=KDsKT!{|UUNo8KQ0L~YOnk(ppfw|()pHV zr+re=_PJxrag84KpwbIJkHOe-%R9a`8g0i{r@)W206BGr-P$0fLCpa~i{U1+kShLy0uEXn;hobJk;OoeztRC7QNg>EUp(Uiv7B!iVHaqMe{24 z=py9OAS?kJfX1eGe+v&3JJRxO{io2GxW`S=;~Gh-hYzBEdiO#~?xcFE89>k9@j~By z(Kx-u=7`-p?ZyszQxPEX%&`zJI6kmrSy{242@Odfr$bU*SB9-di$2qJPMyGdLYDO! z&oa7JJ7`pe4(vxvxDr>-iSm~lEH*jBO8VIfSLW*!PqJ&F6Hf1)HM>WMZ^E|wA z<_q#oN>2XGrynf;1kwXQaqS6OBy^cDGQo)lgtQqACH>|*zTSW*1JAF#7cBR|hb=zh zPXD$s;4j%X_UzZv)gk-;i%)MEUE(|B-ugM#9Cpz2vx-eDY*${2iPwT zWnPb=Z>t&Q*yNN?QMneqBD(n9l?dayv1UAB%y17{=>8%LxTTiCO&TzTQVtvh@Y^i(W=XpwywQF^^Pls30H5m z%oEXh>pPAoI0KN5_CXT&@rC!ipO9_TkEF$`%ftet)!WEDJ|TTBbsxTqW)IVVV<2F? zmxp|QV3eg1QQsmvgGOiggogdC*;C_mEeCT5HOKeK)KB!vz)SB+?MgYK{^Qz=`xR(UY@= zxV6KMRd{?QiFy3N9h}BfemgH-vVa+l6GDV2T|$%%{H9~=-m%=C(&FCUu$?yZcE!b} zlE0kZ!m15?$lKNda>I&zkSJu-e#Wk0Bz^G9V6&_y0EXoCL8?H-L~^xnjMDGRM*(Co zDW8sq!~Um6xJi%U1@)=O=CS8x*%RA-5U!E65@l5|lyF!I%o@pku;^Osdl#>;(~XX( zk|H-5-O}l$@@u;u-1%dzTBi#{^y4^~=MsP(2;M6iU>DL;>K|9DA4&Zfi5|o#TyO?9 z)_^kQZZ6{^Gikj)^|6NDWXRu*486k?^ex2ZpG1nZr1-ct6G;r`Vyrs4o!Ax#HB~G# z`1K76P7XQTaZKe2f)lv|ssg z1$b@9VCx654YT;%7WaZFb3JGy$vnaS!W=ZMGKEbmHyEFU$pg3VbDmA9qEk`nlD;ii zo%y1^wsUzXPSSYTu|)neL+sHeV9!K!AA^N&EeY(IdoPANyDj6@A^!=!$N^{1JU4CF z0R*^#*O=2(=yu-SB*!#jVhVc55hIme{>SA4V$(ZGp7u9~{7XKWy72K{*~J#M4~j)< zIc)1uVraY7j|DPn?^XxB?2j6plo2)%O)vYA?s06KRddR-^z z^a-&Ja{F9sgmopnh^iU^(lhi2l?B;jOk|@TUnaSK^hcCC!-IN`bOnw@hQ8OqM~#!F zQn%@NkG~!41XQ-ymil-##Jq3M@hzjC^z8>944uQj&bQ_rSdvGbM!qf|7)p~qAk?Rz zvIQWXSNs!=L@EUYRZr^pLa5yV=+P#*8%hO5dspc1nW0D^!fG=eK*7Ws_xMmh+kQ8+ zDTt?jhvgFY=HK|;vF>4===Zi0crK$Avt`XS(lt96VRf6;U|nV6ISXy3beGZ+vDiyuGprPECN>b5{TcX2WqIoHc?k#Eb)o_IMxyS59%H8chCq=D(C4CyOHXsmE zYgQRgMV@*f6OBir&dwFzl^Af}$ELW~*???u39>*;u>= zw>^SHIiC=&2{H$`6sn`7oy1I+k^&P1S{o^;KaC=gdgvZc&TVi?B!GaR=1F(S4aD@4 z`JF{|FP58%e7d*E)dVko))AG~eCstR`QT4HeWntsOO^b~i#(^P)=W-1;( z>?6j-&y0~PL_%JD{eI(@jQA*Z~NlhplyoptDh3s9#~tX@o<7!|4M*T24j)9gcnvK|h%I^)g~zH#}JCQc%H7MsDnM@%g`ckJ|W?pu4;TVxPIJ%d5;}6f?XTWK6bt2;oQDr z@gSS~In34u3oIq0VlWpgoYnE@w}v1{kl?-gs@3fbpCPGmM@r5=Mm$14%5E7(6wxtk z`ulFk3pS>a>qHyWwSxf@1@tp?l=Vu$E9e?50Zdw-rVSh~1h`D?!4_%2!E z_YijatBS5g47W?)K1-HZpiYc#-G^QnKeEIGKP2Mgw?l;%61rKZ@p(^Cj z_$l@lINz!pe{f$g06qNik%s zH)PMdqHw=ttWYNe|1Uo-LSIifvIcg1cMDBDk?=uWVmU1#H>b!IN@<=-v`+ZFIdE_} zE-Z(HUE1445H7~>Gr7e!@lnq*sP_A5iD6FY(Sp%;6OX_iTDXeJArYvwVh1fiz6pE< zPe3YX*p}iZc-nogm#mEW8ldsghnBI4K*?nNhL>qRlISx35tGgUxNxHFegJyRV2*;UtigVnX8FrFwX4=+yk z6lCU0$T3`s606UO?8{FIa7Wu)laJ|D&&A-9z&$t5=tSx;ZjC;Q3_XbqH=hkkiF7Nu zHjmn0g(hR8x#EjbHotIy(o0Uzeb(0##k+O5A&(5`Cr4iQ&wiB6Zf(f zlX0M3pN#}fhvQpJ4k7D4-Z*RYUA!?CJ-`sb=?V0a7w}*KNOie4-!WJ6|qMbql+#i(2#b1I=Fo`lB7k{$GsxAZ4jMnucS@ZUHqKMU}6JPjhp)d=QPr}DJ?c>N8b@Rjs_c<-}852J_ltqe> zX7o|bM{wbNDXf;WePI?)3O$quLBknSM1xsxcXL({m;$3E+rFg~v?TVv4@?ix5jBK_ z0u;5+a5U^ZK-OuFoZ?K;Ou1`0SPH3^5#`-I8aNJ#_dY_k{Or45fJi3X=rEl662WE} zH5d_2g`hcnw-rBwt$_vBf>b&c3Cdrx(zJlIg1T%v1YrlV!!K7&l5G6}7>%54c0aAmZGylsqu3|bG zPWckjMTY<#ivGlm?C=T*LF_;wlES~|L9_Z0^C3k+yL9|5U2#Z!)A5s%fP*<|@PbDJ zN4hlOA-i|eg;A6IdsY`s2As0Y{bh<*;(N{WElV!b+Go!xKWvdN0P(`-k)V2w2m$yT zh3SxCy=G5>^P3)5{z?+SIw_nTV4oBAR|72~YtB1ogst6Nl%?#n7wwvYv6z+*St`M zn)GEP;p|9!l#4b2Lv&!c(qm)vOjiUdb=DexF|Qm*%?;qXxUnDSx#KRIo~YTy_5k5& zD1U;cNO&W}r_XIba&mB-%vxg^)mcRCKQ%KDNRo2r zO!LX(Ji^77TxjwWC!fy|rk4BuigfcTpwC$I8>qSb2Nfe)QZ78PK{G|tfsp!@t>oDs z^o;oas$YQr{2+R+2pS;Vx>|krtnn+arou5s0KcjA=V}^|DwW29k*aj1Wk{uYrB}e+sVCnW8 zQYK@Z<=RzpJzTJ`vJiEJFU7|@5`*5GkGqq$NaBp5R85v>8f_X068cQp0SjBZ}An3cR& zy-JG=6UMEaw-0wr^P;7{#^LRvB(^5Hl?HGw9?tx)?7dghqh7Nw_Gw&Y-|Qj?5SJiw z&OvtqL=ZWM^yyFY%zyU#+T~s43!h{A_?gnoNW{gWSLgmU4sjOiEe=-_bWdSdY%f#M z6lB7?>$|5a16UATk#>-a_}dRK;xWDmRll!Z2o50Bm@bZUlggGPNhH1!AJn-?A{{aV zohxTYaBPqwY*RGHcg6Z)=Qm88b&xo76NK`4-OFCP6Q;LO}IaFDk#q}kl8)(p+% z)hFr83HT9tegWO!df)9Ohz`Tk2NgXhY|4O@3$!JUQNC&Ym~-PnF3-5l8lt( zcE$Yez3{r5nHJ3=2drU^(CnZndPfK(LAz+~&ZN?CJeuH91%M{Z0Qcvb-1~QU94*w= zU)vS1ajIFvSwNh-_)?aq4<)vJMEO{Aj!0_vHgKHAQuEeKs~BBkh5)g|e!mpd`JWS* zlk5zpF-n#UZRG-8VHVC2GaA9w!7F9C5ekmP=Y0ZbH0rjr(T)jl@7tCy^2bvg6a6mk%Y@Q-W_9BYFA-j|*%eo=sbaSO#him*lc1f!NPD^|Dw6tipBlfKj>hnFH$@l-wQ zwM?iFl>knSmYcZ&UfU`Yza9R%)*U@E44GrTE_=){3qQ(*^@&FLS`F_vKtn7F#0+#u z=>Z~vYaUSMZ@zjG$?{;Gp9n$ZZutg65Fin6(MhNRrmomY zLa)FO1cav}BZvh)<8!P>GM?=cMf?4UT zaoN|@h#n~31B@DE0iGy4bJM`{Iou8J(hb)jsC=)^X$hh7SylnO3QTM*@vxSaVqMQm zs1}6d;7n~AE+Uq6MtR#BY%77REwQs`F>oLkJ3SAMCHNX)0yAs~nb_ z?{`gktwn2QRzczmf+Sz`I$nb(V(rh1-q*|b52D@8#_(i2pjn)W&lSeUki#Xx?J4nW1^cX_4bhfz?5A*|E5 zjHH`;>;t`$*>X+MJtr{Qqai;DE`rk%aXiI5YBO%E{{(kd7ov=plEyJfz3@SRGk^Rp z4xFt_Z&|AFYy!Ntb2&H58uL~MkLYu1;y30^_i~}E%7ickUu1z%iWj9WF#tP7K*|C) z9JEiQLNt7n=TEv#Uy&CD#*b?G44t)r3}zQ1emGyifbdTi5L_Zqro{Lv{4e#vyw3ad=ZVw9H?mGVP@6w#632cIWRUwJc3P+TB zpBDbQ9|D4LS%_NVry9Wx$JnVqo$z+pQi_9%&u}dQmJb0X{kL=$`P+@rlA++{VkXD+ zOd_+)COCZh0fD`y?lpgLIPVTGorQ>L2nd?D5Y(_1KsT%j5gt@L==kU6oZI1!s4Qsx z4gb{Cn?99>3t?nks36JS9^*$3ZYrR1$U3+HBxZs9fy+ji3-Syj2U5*`YLp2Coa1Qr zW2roKG~b)4x@%iNV;2JneR5#J@%aVdko%3mQ$J(LXU?7;J5B@CvD3=K8!&U?`xZq1vCo*e;*1Fjy6NdbxIZ)Z#CJzTE{b=_WF97o>+O zs3ENm&{S1NNSGGkk;`o}6%dd#Xyl&rX;4TqilpSFw8ZC+<`Z$bH~JW4w4FFWa|7}( zeS^JlqC?P;$CtQ!agF0ky1BZMm5z-m=JizXXI{K*zSF}C2_kZ#07VWUHBhjs2{gZ! z!0JkI#vw!=fVmHtCsXpeN_yEf?MbO^ih;CFLaEqLuNgd7LBdX=>p%r9V-VB7EiS!uwn+gT5G-~f za|FF|R>!e3mJ-Uafsaml_}+c=`R%SrB0u7^6I^cV$Tr< z!QK*D7H0~-ZQ!1_u2=L0#M-!nU6fC*en^=wpTt6adhkwYTW^m=t(O#+vsry#vmtG( zcrWLMqc4wsSkiC>8x3PBUvP-@%z#3+Og7f^h??00^#nK=%UqUvC;|3U94O;}x}?>3 zVqm8|p^~020EjL6A+O8OzycV^=S3NS8cjbmnp5n;?d>TslU~#%sp^}dTJl4RT5>nh5vg($hLTke>wn(sFFUmrz*WBhYf`G71xHUkz_#1wdZ_drU%{WmzBUM< z%~)wRdFvN#4zkRseBThDM@Y~HK;AII?@CgDC!LUy0zOr0W)!!4>e?L&0LJBWqmMCV z?)~v7GpOa~seuh`2z8b8h(`j^-M0b6(D2${_xG^lm3|3fwk5RzJv{mnb_*B?ET|3W zSVKuZXgSj^P=MrkSz3B{zQ#tu`lM~??FO^V8qq^VZ~+5I*$H3{=Ck*u1=8{Yn!Mw{ zGhcv13FR&k!r&3aIP-@H6c@T26)N+)A=f+skSFVdR4)azIe=dThtfwU)4^;kzp-B3 z_Tg^d0{4SnKG&1V8x@RF@bRv=Vr47PzR;ixs3mzvVKeRBdjrLke2afj=SsJKM+kg{JHZq+68_C}r^TyWFNUxS=301>NM@ zL~Y4IgG8i>y4K03#=yi7!cNOHSQO}VOwR!Z1<|3D*kDvfV=KfU#6x6Bg zA!ZFm9ei%RU*9_xj&NA{I{fF$=xU~lquX>biI)p$ed&WfiFk*ME}6eQnhNreyeF(Q zws24Qcd$Wr;8XTNS};00P562QY=*PpFKb6B63z>8vDw8|uheKZ6nolL# zCs5fT1^Kkmq%qgv91dg3*5rnbU(aN>RMk7Dj6ty{2<1wn2A_ty3pAT(m!R$~P*PZ! zstdE_GWOoQwXfVI%Vqv44P#T7GnCv}(Ro1WAF4LAH-O6_pSo|4UaW6G0l6*1k7K#A zLV$#YOjp0}w11GdVG+A;T_OGDvdiS?ZdDOh?a@x>sBix9g+PuIvl8x$?38t4E=KBw zS>EPfJXcv;x*Q6^N64H9*aXvauy1S@n@n&%JyB z{t?*I^GXEs^?dHy=0!^gYWhOzeHhnKz^7}tQ$4P{3Ucs^E>9Jy>+bSE`I66Gib=$^;0a?{H%Cz8suOQQgCi14cDBuYMa1C4h?uVc7 zNUra?N20v5eLvFBLa;kd*b={UkS6)eC$Uo!&Wt_uJF^;_u_6Rpc(}|cN?!;Pz?CLe z$%EtrIViU5@O+hABb;FBnK85|WX?M#_qAw9b((3oJP%LL)RN(N)W_-c zd1D|${sff!q0EHwDFS>RGSD&qe4%CS0T^m{&oj}d#8b;#+M9X5= z3(t|{2QH7)LWy;w_8#WgN!{;S@m0a!*NE=h5-v0Mj=3-P176g1rPw`&E#)ihE}Cr`!2E#33y1a zHJ`wbDmy6BnTOUU&hxFuv&&o+#KJuhHVHB|jf)6P`=n%MQ&4qaRZ!h>k3%!apT800 zLvPh%1fM%%(|lLrkc$I(K+4{GqZXd2V(#?#F`Z=edm^{M!HD^$-m^aa)`kK6OnwrZ z;87OA_xsI!J7+!+YZYEa&Kl60%y%(ggH9_jh43MJqZaRw*!3m9;IX}}>UoE-uKExI z;=GE8ghheW4ftOJgAq!XlDaq^4SIj+={KS$v0b5?LBG*M^M#fr))<*+HSGdAH>)5sbWEQH3@|D{LN5dC)Ctd22QV8V{NUkJG|3Ydez6vi73gV5gd%iLazyMo;9GAH%aGX zy8w&DRxjDbW(h)DYaSldszpq6)3gUuNZm|*-X2^6WH)<0At+8?lfk7ZZfM&{Ua2WC z$Cs1>4hIE%0FVkJF=+19@017TH=c+QB*S2@FiNq_>>|`PCmVF(D&h7?MW1+h>1l~I z(+S+8E-4*Dv2T;ee=7;dYl5M&sOwZXP}##7J@Ul38>I~V-Fp8t+{+QV#-Av6z0Lep zQT?^ZMpQ~Qz2ryno%QcEqAIEdEj$+Ga z@iTHwtUvhapNdNci1;yj;PcHWG##=xn;81!JyXPZh3uWUk0v}gbj%UdMCkyLLmdrh zC}MzyWP#~gkac%lv%o*@Y@)1Pw+l<8LC2q9bp4Dnuh%~3Im-|L$ohJ`!GBXz`9YN} z+M#?tA<5!oku`aMs`>n>voqyt)<0~KUc8(EMdY(B6JZ&4XoU!=<}9{mb;uj z7S%P1XRJU4*ttNGEEpm^G#sdqyk6dyZ?`u{P!xrIT|W5zE?R*Hn$u6sj@oy#SVcl+ z_Xuabnrn5z3F|$5Tgkq|ue4t*aC?K3LMVV9g#0rWd=Rm*2=Lbu=#{YTk+qj1%46V@ zfS28OP8y(KVDr6WJq@bN3Lsh?1-*$!}s_iZtzTi%goIVRX5sN z^~BHH0hhxy^E9OVfdZUB=7FhHyZ6)c!UhT^pvMvMww}fIheHB#2e}jE!M#nIcDX1( zjTLYrqf71Zvjlnx*fJ_Phu?h>L3?)qxyN3#aWSm?;vMYvv^<+Sw4X{r5KmBB1M1B9 zOu&|fe9T09f8*||S+U4`jx132I&aKA_YDajFAN9#>=s{$DmY!0xN#3X9(s7{ zeIxz*R#m6Eil80|QBE1?l|2c()o^+|90tp2KMU)U)%F-tYUk0|M{&v8~YR07>(M_TGl^I)iH1$I={q zDyJb=IG(#ez&5n$&VKs|%&(q%O8ZZZ70;(*dC1WHMlET zakq4`+qVQsE&1f9Su%Wm3hm8=+E>KGsCzPQr~8ioi6HkaudIb^t^Ow9-Vnb?lSn6D zt)`p!Aer<{e?P0$XH`%^^#OsLQ~sA7V4JTQiN6aJ#kB%pZ!D#=Yr0AEms zMaZ(Sh)OY?KFgx_&}8d6muHNJU?ZmZfl>_O)Yod7~)8A-iixw8&7G35cU zEkDs?*3LFL;B4~#Y#{MqpE=KT z1t_Bjylc7J{u2*l`(D)g-DvMGhZ(u~PsO~X(ncij2Jk%=`@2_tI5u(@S$?Ja-amr_ zyo4908309ky>iOf4xL~XpA-ExK^{RCG?dT&Jkym~ub}|ndAqoByw;0B&*xrcTn6zR z0e#me#BNWsL@RA|p#P`75 zs#CuIrO>kQq0P+AcMt8Ou?#hA^I{oVBA9Bf@zM)wfAzFODhZa?>_NMqE%FoGt=xr6^u}t{}=fp z_AlHUz#n3d1J3x4`!~$yKMH&B5&w)4{3G1$;tNOkd{Ul$4?)+7kz>7pe1xsBG-3(&H^g+Q6;KMgJhby%g%Mrj0>md9qU>kgyS@4R zD?@A@Ky@_6`%771JNzIlNkz*2V+qPY2iwM7(4hZ3Uw5IdWdsPTe|Q>Lj8IGp!~0LP zu?wA{hE!>1*MDXN2ohv8bfTHBlm5`y!&VS>t939){$3eMb73^r?`VGK=Ksrd^YP!% z^L*49$P8B_sVcS(P^SL!h7z3H?DkffhHM~lxjvB!%kS|G)(;f%bv5|w1;hhq*2Tqt zY=-XAh&G{TAb@2rf)40O@CVvc_yj-I3LFOb(LbL*eU<;!u^Bc(x*&92XH#hBY5i$h zUv=|)`CqX=jmrtZV#e|RxWYu&*Z-5P{~MhBe}Jj3t_IRTuC#a>is*wG!63Y+Vt<}(to%8KY}j(&k+Bo?$!N^E`2%0e>-*m z_wLmFi!Kf7(=WR8Kl=19y7Vu)^e?*fFS_)f*vBur^e?*fFS;~{#{W?$*}v%0zv$AC za}Nafi!S|(F8zxx{fjRBi!S{W?f6BP{zaGmMVID&(WQUUrGL?-0onT(UHTVY`WIdL z7hU=nUHTVY`WIdL7hU=nUHTVY`WIdL7hU=nUHTVY`WIdL7hU=nUHTVY`WIaq3c!ES zrGL?-f6=9X(WQUUrGL?-f6=9X(WQUUrGL?-f6=9X(WQUUrGL?-f6=9X(WQUUrGL?- zf6=9X(WQUUrGL?-f6=9X(WQUUrGL?-f6=9X(WQUUrGL?-f6=9X(WQUUrGM&A^6Q5Zj@>iUVqW0f04JZP6fZH;WfRxw*0O0)p`h!2(U3~W~blHVQ3&{TG^xF;p5Ayym zaOtay@^y>j&u$DQpmo^)+p(qp8RGxk*wWDFgyE_O6JJ2+dtux`WHM|PHk=Ku5^W^o z5^P^n=uH@hM&pRhnTmV)4ww^;>+2}ASAXSv))PE(aS?8mxe`hBzTr?qRPzND5_2M`({dAV~)>B?fYtjzE;ADQCHUp%b_#Es)WY) z;<1hSblTi+-v)oZnda(*1fO4Q>Hllk(&FBPeA9Y9!l)OWFa^Uh{EK0(MsDyG{#XT{ z{QGaG;GsLC{?Goag+?|2tldAq8OHm$&h_*0v+-w_a{a!p2X`qL=)DGiuDyj@Fx$WV zHm{Z#K)?3$YFqU4z1du$xo9&~;0&SiSvQbt=rsfv0Gxkb@ZXHIbasnKLPitgm&9rP z>r!$9>`0Iw^~yiy8EsFQ+_$qYYrR$lb_2xptW7ZZd%AnVYfC5 z(9zIKy;ayM=@KSR#DhzM79;E+xq3;t@2A<))7sE99zf4)AsFevdQa3yUb|PS!-G74 zCNV5U;?N{EU#~7&4fOo0c79`OgOPaB1qtH0} zcnr=W-OaDc3A~rGrxC0Nsr6my2`n^~r0?z3J4v?{G3IQ!Wgj}*<&xRdGGa)QTrkJ! z%5I@F$=b0?{ylSNW@J&8N?E)PK2j;39i35r#tWU#J~6M@q^az;{$O~Id0fYHOv>^% zt-P6gtdui56t4LS$D+&Jj>~#_nUJ4r#1;c%V05XqBzSmNRVHzP=BwBGckj@m4gUXg zuTNdZjA94SeYo5{a4d8zg!yevyQ8{Ni#?;-a5c5U`Y&&`D6de496D|dkq7p672W)QC8tI($@Ax+c+G)kSW-B;{J&S+lpneq|H2&rlcQ6zz>MN~C;!Mz-wmY^W zF5dbz$7y+&ai?XAhxAEqDfIeHS$K!P^ZG+a|2^h4W&Nn&3F%?jT4B4+8aYH&z4CDL zPY;Qq(YHlA3zoOhjn-VsfM*G( z(Ji8&hp$d9-v*#K+c(V!P9 zlKokKwp!g_y@#T<7j+`p3I$Jvdc(->qtm4EJBbNAC4;sjZurGgv)j+g)j_a%Od&Gk z%t_&$HzwAYWld3pF1>{>Xf`wda2NaTBielY@LY2ffNeaWBW$-KuRlB()**1gKg+6- zpLhLVd_joC#-^+mispVGW%9tTZj zSdO!5h5fQp$=trOK<0)2c9;~_h;orZj`GQtgW4RO=(sUIuO zhszXq{N7Q{-MF1wZjsY9;!r^XQLG28@j;gMI`p@Q-K%`3Fidra-SZU$skv;|dS znThcSN5EJOY`yu!syxw0PdMc_zpLGsNXBEkI-U_;(>;O!b(j+j7Z$x$%){qD;&j8A zI$<92)-bJrANVA1S9@w3yPn^CLHq9r^9{#jm@j(9u%~$wH4(wid{+c$w8q}riY}iY z9h&~as};Uh86h#lTF%~MjXspntC(-Y`2D&~aQa=(hHF4aVC2#Vs9DGCAhwES|)y2 z-kG`vCbS9MMHZA(8C@9VVvo4!z_qmiQ?`?_l`Z;Ez@$r%}1{w z1ariPqcO`dh>az-1$Dl6O5eZ|49>~223)usEQTyKw>(i7sou+%lgw#((^o(9%@_Sk zNh=n7*e}X@*fObpJ^?#<2H%oo9lx-A#R}rj^1m04bbV9S)%n|G{QQ}MF*z8YlJ`QC# zb*mcT?e)H+K2qSxUZWYAa+ba}R+{N&x$-ixnr^B$rSm?tudK02yE*zcmur`oylJRy zK^m$VbrE^C6V1=gf6$yarxO|7JWBgo@_|GjdnCF+#FZu z_WP1C;rmX~l{<1`@sW*RO)&1**7OhTRUh*6Gv9$csdmi{nLBVjG?o%=A7ff-57ax% z{c3*xQPg&^C4~51O~Jm${ANDs^sWoEFH`2;CEFR6$k!1#Lp2y08{N2CTT*Xq;~hSC zAlt6V>{Xi=_eY`1Zn?zWeY2?Bh>FS-w`m%~7^#H`OpswVlNIY*n4bC0oYN04_DTBF zrbXuGb`8yasV|T($dnCiQ1&C~t09DlqqQqF(TQQ#norF{SSzp9M7PKLVc_c8DL(Wv zS4_W8u=B9yAx>znZiXbWAT8EdL+Tca-WYLvvZcj9EEShVh{XU9Zpd&Rnc4lgx;Z_a@Y zZHTW|>UcZDWULq{t%4*4%P1bOx|PiLPiOOm8$=lJkK4_uTE2q2TYv7p7F(+@frKzm zAG60e;wur^Ub}fUE`J4G`h;%mwo$6Wv&+#-Uh2pI9*FzfaH^Y*h+)CCDk>E-b9Yo$ z?LzvgA-;=Eh$SQKA`@uJa_%^n6)U@}jUh$N%c}W#-e3EH#DPU+Ld6J3^}riUo`38d zFbQVf<{zV6`lkNw-_OjHTS0&(( zZtR?fH(0WJgrGDfE&8fYHEp;l^%s}fNA^`m%_Bomf93Nh6)dcoesG35dP->3EDi)5 zI-pjpG=K^IFvk4(l&F#Uu0%8__~%(!W_Nj3%+Bcb9`P4To^Q_MMbv)qnQm1DI^o4c z)xP*54;?w0qN*v-ly)s2OJO8~``jTn8o7HpGVPa2XuUR27{QWe`S}fOhHp3zD~@`c z!`CXCAzrd}N8Xn4g^k^Nq;&Dte9;C^VJ5W2-h(A;^>*7t{67wBN>g zPjXZ0VEvyJy6tONyo(UT;y}l%umuRnLB+gj&Gw3yy})^q#deJJ*}|&KrRfXSfCOksZ)~ z`80qZwDUd8jK@eRvz^#N=Ria~gB})!IGx*4!6Bu?rXJhbUOiJ{X9aPy4_xRWpO^%T zfV+z#s1S@f>cB}7=+$aq*P;i4P^7GtO?f=Mb! ztVf4a$^;>Q*c66n#xO~wwGXK?M-wuZQ_j5wOzs7uc$i*|zP;>?-(KRB#mMVws0vD) zz}~5a2#ho(7}OQ{c1!{-t$H1kyHhHBVq>vnVwHyjTK5o){{DR55G~ehX_Y^jx~CuN zZJ8X*&_#FkEsL>qXC|cdVc0$6MJ}I5?`p#%2Z#3U7-ba7LzZqN_wGMmHI{aRbF;>z zo6;wsL;N}3FDfFx`L=#8qDZmNGOTXKdQ*!CUB~Hmxp4N`>>Pfzq#t(I3ail`9mW_L z598u>Sm!SHCE|^1n-ER z5Ob6m5U+6LxqUg~^U(`Bpyk5mYO}wV-0+I7y{r|3H8?V0Fv7<89ezt&9-l}((iOdh zU;GY?kxieKo&^nGZMKv;9#*eniI6nQR^!S%RuLkw|N>fb~HU4xvnmh zaov9LU|PIdJ@^S^|LS;w@xFy00PT7=3(kvdCTN`cU6 zFMj#P?QNmOP*eR3i;Xa*1MTrPgpz5+PjWPSGg^dp>#@11BC*}~#&VgmX~@~PtK{zB zFx9TRc*pIHNlePmgL{5T3?^6bjAJG;!f{3wdoDKy7_3mzJE&^o2NXaUQueas2w8#} z-JMI>xrD zX9azs&VGzgaz!yrN=N!mfRY|EbC47B^%5*Nc4y?khwq8GDkO#TK`QzqLq?0Q)l_N^ zW9%pMTv8f$Lho<;`E74JR_AY7#B>$C)Fy6CU(HyZ8$|`yq+xODMg5GFtvM`6;+QV> zCTu*o>GH%>Vlk^uNd_PR-Ql&DBkP&rY_1(AvWMh^2okdVr^K=KqiQI&e~-&bn(Tx? z`}>$7r2AF8EH)sPa)S=`ZitR*66Yu!WPV&~zSL2P zKQ|dSmy&42HV&k*BUEL-Ri8NU$H`s`03wx7pl*s|dp$rTDS!{%l#X}cOLSWKX|P}P3l*YjP{~`rD(Z0@wD5VOyN^BbAsa6bg3Bgp-bu*;lP(b8=+8S1 zyvshdldgAk;v3&SkBJ#4`l2D6@vLX`16hv@& z^{2I6`>eDSmwQ%`z19x88#wus%U_TlWS^gQsxIcfre5%LZ)AhpH~s;hb4dYzhFv%( zlQkfGA>O3FExSR$vt1<*lzJ_PR^n@x6(Hmp**@ljm(D|DH!ORoG2HbF>H*tzpK&c3 z%uXJ5m)wZ-TXDF`u`@VKY?rsmsRoU!N=CSD!18798G0Ug1A4z~Q}%~hK4h0E8aHXc zPn#shzM8>6-en71J4gk$2uQqC&sGoElj_rUdZ-qf;`o2f1On;A9%Nw?%)QQVSR@j$cP-1nCmAQI4OoM5)kD_JA8db|)++qyB# zMtNSk1^9WRPzk<(Bc1FeL}XLY=c39((Sw-D@M^PXwcU{N{#pBj=6xe+- z^O5BHq(Y?jdV$@=bF!DsdnNx^_Uhn4HjUA2uXBY%4zzQ{dL{zlaMJi5%Lrnti?p#(8Vz2aqQBkx31-|RYG z-CYivSCr`CbrpVaJ#3gCYL2NH#hQ3s4sK^xSg|t$V)Lrxy9ywK*>Ef_c%YZ{Sh`dXA z86@hmeli-OJfE8fUthC9sc({a`(m7@`BtL97zN2-hQ)sSL#QxqWyf0UM`hO!AulOs z3&0~B<90=RQ5{3Rq7VJ0Ntmj}HZeE@qe%&b3tL&wTp|(`;%nfFe2912xRk1}r@-~g!n=ToIaZs#ojXd72PL@Ci~0^Y)Uq~B+a;&;{k69)vXunNyMqSK9-G4y z>DsfG!cl@(MCBAgp;Q%l>+`rqy%l&pjTu&t@x#Mb`o}8n_#D~Vz@v)L zwQ>YG%a?>l#|L$2jg1^bqP@%HOUdXmYH}a7bMMH%Ye6xoylujRMrxK*M8@np4#D!R zr)4&1A;Y7vV_xl#J1s?Qsfd}_r)uLh_lt=j58_8S!QvJ0Xif=l&o+C(;7-zqVV0l| zoV(g5vV=}*u!tLF_Q&|`&ttwHNHEzY>RRa#Adp&ht9yh~ERn!~$O6>g6Nr7amjZB- z&L`*E7>A{B`5uZw_Q}8tZRtKj_SQ95LhE7Qewy`xM_xhY3IY0FX0pAvTyT+&Q0-D^ z3?G{<%JdEaAYsl}cmh_Ie1))#2QFp5>jwv9T6F1n3; z!NY$V-!XNzLw*&TKi0cLeC??=T;fesx@v_TD}90C-ZW`m)P`y^%UzfQ>!(a~oIOV? z{bT9fKxiFdCs=PrKnCEC-QSps&-#=0Oxb=D9m-pvv}urrbO@i;g{BIe;NDO&s|dU$kRlj1gs}Lx zs>#0fBF3jBsjF@ncJT6-ewAkT^Xfsp73^2};lkJJwQ$q>c`1+Pf2u0M@ww1-PVge z(pZBPCu5UVX3M_(TAz>Tj=b}#xD6`kqpm_2&!kP=F7(G^^3lB!s4s^n`5BUrH}{gG zCHjmn&U3$Ctk8{daK|#Rch?0)`w^(*GBEeg5*NI-#AfDL=9fOjS3AZ!*Gsw3W4$Z} z(x#a$JH^%mIH_=1TQ+}>`Wl3(V6ej&oUSlTiR+j{S{X}|)zUr)s)Y@O=TOHoB>GbmKjb>fjQZcOFyP@f? zPsVR}y$MEAw6m2eI3#67F+x!w+gS0jq{PP`$8o4q(|1#ual@=DEzoS{?OB>Q+xhF$ zt+%q_6QpP-B-Ck}bz^GZ&-@PJLEvs*u8Nqr_3kv5D!s{7jOByN5wdAnxtGZ;_>kTM zk88SM>2m5A^P?uip*g|DDeLg&(6OXkMAG?O5p)dc>Bj9CEv9;Aq!C{Vk`6ZENO|2m z<0lR_2<7rkDdBa3+HPG79g!p^hFEt3#q+#;kof96ElDP~mtB;G*bJ}?9*#Q1~ zwnZS9(B$JwZzS0BnmHnk*bWVbC2J}8nd0F=$A+=8nlg?bNm1p{G zR&pw)C9!PL<8)C#)x1fI{2U%}1I||WEh(}?+grnSby2G@aN-lwVXI5&wQs2BbmnzrA$*2AZP$6?V4yyNh^BG1$zxD*3DeF?mIb#qMDso==$OxG z)#VUpUKHNmN>*CKI<0b9+GGD4B%^-WGM$UGZ+zC2&d&&NTnpl~?(J5451zpo1tg3w zKC7H@j*qu1bZTc)AMt@14$t*LJpz_&NBY~7%+g9mYdtwad*cVU97w@jDz-)P`g!Lh z3#CZoA{1ms96U8mQL>bk?Bb1CPE%S6oTDF=*YMlYM6ck1j-^$dwyq^inm+n0*Lk~= zz?fR^7&v*a+z6mf=4IaRd`J$vKleUi&WPqPO+GvG*R$&gxIM*r)N-MHFI4&LYPd5*b7ef=};epZ|H^ zd#|f(4?J@D)ULCit$+~xLig&mR&UwdkZ-75KS`@?^K!GK3*tvsecPZ1^z6?&s6!F+ zxL2dEE2H9z88FQ*VS|lsP+oq^uz&Z610N`=QW3NW0q=g9susddmpejc01{up1Q6Nx zb96Xd_v3h@P>HDf>~jgECbO+X#4!=w8-C^mm?nQ~^mz%vXzqk<5{tt?V%fx6^s!Bq zw~>%Sw%FL1>})sWXc8qHojiMZW8wI8+Ejz~y0C zWMw>fR&oj-3X(h~Fp9&e1Nu~ajh$0F+{grLnwBg)8D977k&-15&w()1o-E(pp`s=s z&&w_2-1;=<$JJ7`4kX&EuQwc-%}!wLJi2EDd2(qTuKT{vIP9oDikVD7s}>R*Wg>fy`V655pb zzc_NpxitNni?uzQ^U)@VEc`N9vu34FCuAQqjW6*e-xQDtr)0_B_Y7{-Pa2^ zAVjFpjLBra<#ZSD{-+eU+%Ne4EE?0Y`Q)B<#3@D2&=d1y`ua)T*T$mj*?Jps>BI_@ zJ^DxaD(b#HtCpyFeRk_oM343&-EpqPd+{5URbM>6mhz)rp{CNP00Rr1YOJ+kA(!Y&%VmY}Y)A=-7 zv|7p5e0uDbo;Fc5vc(DJ)Vzj!j#wbI;M(s~IcrNrd2}Qsg0K`0bP6GIjy`%PP&pC= z)DT5?RKQL3Bp3MkS;h_$PqlI6}iWdr!bKahGQi&UUauy$iqhii-wlrt)}hu(q%4 zwscUz;f$jjQE%cxAEgkLoXv;+0P?-L0YrEu?}rTJ55FwD-soiE!%|#WhY9X}U>Iez zt~iHxd<}{8B~jtWt{jh=b+2C_j#@zQ+k2E0#BE=kGr>2w`WJyMcfJE8iKW4(@cprN z65kICIeVoytootLb1k~HdbAS&Mi^oFn~eIR7}>k4Uh(UFweO&Wj0TR9Jxb0W>Bqb{ z8eljI4`leqH!^JL1}K4X0E$fBKGl>~;sL&;!apV^xsB(Pdck0o}j%Scx zi*MDKA9oZJzKp*y&uMEb$nLj$$Lqa)A>{L}_WW5lGiOR^EZrPr8x{L-z>}Ju19mfx4Z4mU#?6$mRm} zxraS)wJy7F6>SkpN{H3JAZ87op3}ImdnF*b&Im)>Jj~NLbfEmQkMs@2_1*j(u<)_U zqt_1A@INjxzFz~$ZPqBzENp<+bk7`{V2jnRC!PJ4Ml9XA_fZ+^_j`<54XD)QyLfUV!s1H8NN8V_X^UXMjASNeFSkYD_qD-pUz;^VL-&44O+G!>%SiFuocK+f+uIPgW0%hry5h z%Uru{^cl(l7(krvTwVOib{2rx@+V`jLc2>+dH!~dhovXnK<9lBo@xQ7QP^m0H zO|%oE(DVTg@*g!y$HuoB1;`b^O8PI=1jN9%AmjD@E$#B(3X3TKWrtg?LNqsQ`*$uk z>+w_|M)E~$HZS+zb8G~5gwLon33tpd5Sss0@aVTH1`-f=qh9|}$^fg}4ohH}!#S_U z|8mY2@aWh7 z3B-u0&%ftc4FmIjL#)ac=s6c~>j^?A`x1E;4_4M8_!6xtmLy^1(ww z%F@~6XGW*n3QLkuRS4{$(d5b(fxe;xsQ{|6Y{Fsl ziN+Y5db%tAqNNOHcKmujcBW-h?&mnnJ7?czK}NvXs|)yUu`c{1a3vTbW}cN&X*hSd zlrnj&1mqFs+AFj#f1cMG5)Tj7Www$4in#Z;->G~m^|cT?u~61H?5v{g#dI&Y7Gj=7 zN&d0Zr6-FoE&F!WMLS(}gzpV}_V~%%i@Xw{Frk#@n3PAC-3ts*p{1BDm72Mrznl3j z9`|y4A5?tE6is>$qkryu7jf`96^K8u+Op;4(}RC523qM*s%re<*4wvmyw4xJk-2?s7@B>UNL5m><4f0WzH@* zKQ*oflR&*88K)7S9_$^7$x)RM@X~WHkW01>mfL=I>2t9lpkdP~ID6u2;L}j}7%vAh zcu`Rb5rt6y<68JL$;KwjrpT`|_?$1{ep-yK%liF*0J&OgLztrRCHnzAvSTG8Kx^0z zR2iMr{2}Vtm$S4&(bm ze_z(GE={pkFl4j%daR}_+M@k=j2&L0D~2x{Z{lqiTYC(DkQ^)B=B zCLb$9B0p~U?coEIzUt3N@!o}_`@)7`0M6w?q2)UYBr^Me2zvJce;ZE`BbbxBh%2xp{#@I0S2OXK-b1+4*93SL4 z50^sYhV0g~XQFs#;aJKAC3CHg!o~^qC=wnKkNo_E30H+O)U13~2QtpzuN>c(!$Z-J z?QHWpLX^pB-6U>_RZv)JJNGDlw3OQ$_#ArZ3cxjS4m?4o0K}<3(?pkf$PWLtWU040 zfO);v-_H2`qSUcho22{>(~xmh4C%S+AyqgM_4_5hMl;49R#^bL$8NERKvpqi)}h)U z;egLPn&`ZM6ZA0rEd1ffQ>5F%;nO)*bn`TA2Cy^L`lkH=$W`G#Aot^lRS#6gF84BY zmJjk8%+So#e6)Y5>H%>!+L+b6T6g@4bsKk_pg*+z={ms&4c^=_Px9ty_qtK%zps(v zbtv-=aYGi_n^^j_QMdzv#HyXWXqR5QnL^3~AXLgjk`A{^{M@6bxmKyf>YokS&B;1N zAom`RYW*Mp%wwMqyHcs*2|)`x1Dr;CuQv<`R=Ag1Y{B55kGeoVTY%R>EHG*N3$=YA z%=C_S){vh}DAB<`r=2z-Ck|d6RT%;h*xliepdYy)%ksld*7K;JI{5^(Y!7$i*Id3r zsE@0Xvjr6ni?8(JxkZEtPt4pp&Vi(aphcTLr=q0Yjbi--&5+wS{uL_(Ta(W1K9VBf z*+kK-x{D2EpJ!|mp1jU+Fzt)cVaU!vd_BB?v7S;(x6d&Nm!yg(m2EtsDw0p$AXWM$<4f)9kK>_^ia_^zJCWUU?GvH8dfJ8g zS0DZUQV%6&gnoDu{!wxJ{bk_i3&bRc@z<{J(`9#E2vJ4$q-(Twq_I|he#3W?Rj}E; zVk*zKZUnB0=y1ArcRBZ+Z}1l>Zf)~cxTsWnMa){EQ6coM+dW}NiGFH0yqAU?3fS7n zvmf^Cau6H-roSIuZVOu7oMyYjh<%=cLezoe&o6-;hv@EBa33CnL$PfL zV&JrP3c>r9e!2~Kfzp)x_}o{JkX1{hv-^AO!~G3`hVbj{k^!Boj|tAV&BzB5sMP&j z{4|vwm}(^Q?)^3wN>23Ii;O+}C$WmP@uJ6zlkRiDC8SF~P+1vlz7yhfC2KD}#N#Y| z!^Wy~^yIu?GQHW;iYFaOQn^} zmrY*sa}m$-RK{7NALDy%HL6wZYw&!Gp~L-@twn_Ju!*idwkQY+<8a|74ykT zeE*KiylZ7_nWjEa!hgNEu-jMJFz|KC6!hNQx;NBJ_?8V29~>G=>TsTLJy{$S-|%je zZwvlIox2?#t3+!I(DUHodpqj|B3vr#zKv4%UB=;fJ?A+I4tXWcY-70bMeCulp=Fg-z2MDeut2XA)M&iSVApP3r$ z^d_F?OV1tHe5l@jC1~#dP@Re_;M&3w(e$F>P~+%+BNp_x0Hl`u@Lb=|v@ZZ7VXjG7 zhhLf)AR?fEaaz740SOAl+z1hLnLim--xmEF_QKLZP~e|D2qoEk2*eLMAd0@NUankW zo;yc-I8G+KPlieF>u7)sDA@~d(VrL8MT}1fYa7B0+`aiNbdnODR)iRhkaXs4A;XJy z;eMjHysyv;QXe`4jlL@2oTYSyixnT!7!y$ElSK!K<;7)4cR?iEy(eOhoj=Z9`zyZh zo~I37wZcR0|J4N!)tk6v1E zmAmKy?Pg#UBnVJZA91-3ilV2Ju6iEv8>G>!{h~4MpaJ3;K%6kvowkel_{KQJZrZQ> z5nOsjm>=`o-=E7oxD8TaFk?t32*5?BKAUr#b$kCz9Ut~p{kc)0n0?H4Xt-%{&`kMz zJ{z-5(;*|Q*Y>WeCqcm144GQyrB~L8@h*(%y7VpV9`ao3FJ|a33swwL9#OfL2?C(D`XPn^DL-p&i!z)Rm5)EET3`&r) zhfeR(3-S;!;y#7m5cbAh7M&Krsx5 zRFmBOK9jX<3bsm&JZ$xu9hLqKr~zqFGz_GO@Han~Kmr<#OYcBRCMuL;#B7D8Z#6z} zr```@Py10-nWeMlI+$1TRkRShEAfj2Na6uiF|V?aq_KvMtzOjwD65l|(bTZ$iNIt; zD;fMPT6u{Ho6Cpdx%U_LCzTUq#0)5-p~U>-w}uwb#P3p&EKjG8fq{nxyso6@dZS{o zBokOcK}`?0N|Yb@&0>%?(Aesgp1z#?TY{GH2q-T11r*K%+MP|lL8~SLzkBZE8fY?F8G)eICx0=NzTj&=dtgmk ztEtF5p*6#J)*xzZx(YA8CObD+_B=;p2r&YniMsj?tF_=UcdVBVnRbf%Bh_ngg0Y9` zi$;Ho5r6HuJ%UG^%%?jY=l$cQh3kO5wDnc+$nIe?@4`TkHED7At5f4EY}Z)x(I z8rg$LX6x&qSw{WW)XclH;EKJ+&-Uop_E`au;pvVaAKNc(ji~Kdg>R5}A=mXD(2CwN zK14tfA~F8C)-tGV^S6UGa>+U8NZ}D^G_U>%*&5a!F9?kCvBlImubXDH-b?Vj@J}hN zzaOx`H_5+-(O!2FL+_vkzDhPcz6pl~TG-Lwrb$N-uGAfx2Cq|FoFBH16;?yE$@T|d zB8M6dgBVdj^N@jTY-o?uFJF+TRgRCyZ%nazz^i?Mp6z=}^;>YZxzoX;zLVzU;g!l! z{Iq$TG_zV(oFUEBgsqq*N`jzcl}t0#1s{qvv+m3PYk%)Pl)f_Q&ia&X#*86SuLf`S z*7DZ9|LtE*F-5!J>mK?x$O@l;%YY-3&KRxEq`Yd7GnQz-8^;D+NRBZZVbsb53bpOw z-B^Si!Py?BEe5z{7l1k{+kwqMyD0F z1JL#0oIxY6yar+u9Olj}s%FJ)g_4u%K4`KX%^W_w-SAPT#=MUtfQ8Q|kDb&-CCPGL zC;x?f#^AClpCa9(jnxEyifx3_+a1P!4Mz5l){Rwvo=&`^M}vzTVyRyke{U+xv0AXO zh<6xLD-JUuhDX+2|4oe>X+rXdwj}@(eAn2k*QXsJA;$CZxQBVA}JW zFX0L)HhX2_SVj0RYA|a7rsy+x_I-r&-M);}azKRL3KLz@9W+|F%iki_W- zN?GdeL9A@?nK4T~mxH#+tU`YGRt`k(g8A$+i@{N{S5qAq;@e6D@n(kdT@Wph*%ab0 z2ILiRd2dg_&0r>;15XQfRho&pFX`h^+M->^jfM`c>~(2hzUFP+#%;{P_)gNq7#9; z;V*8B0#J59+14;g^6Sl7HjOqolN)GH|MYn-Ix9Cty-yuyQA`=Jk#$^Q|_I|O15mcIU zwYz%X2$QU~$JAzAL1F(wYIwedz>xJSguRFPYr?C_>uPitb{{dghj$H+ zP`Ew-a(#J4=7f!bm)CMj1WJD*R~s#u^|0VBvCeCg*4f>wBgq)?MZ)M3+d~OU%O5Zt zygrVFeL~#9u?5l`G8Jzdm~oU}M+Vu63za5V#r$>zU0mxPs+q6JfW*k}rO#;L3eG8d zrXDC8tZPuXCGYjGxT1K4-+GFGwD}*M8?6?ld%X*gE@5F4sgG@?xDl1DNwjh6QQYs7 zPqrGrd+|2rdH-%7*8K4sk-ot9gzUACP%zImlOUZv5Cko$Er(*EWguHJe>L#XfFt1f zt+79`=%ZtqSr@PCg(GUR-7_lwgXA!878U2NEwwNm#dEuWMU+p~vnR6Q>;P3{kRxF* zJv6X6IO78rIC12B+P}UyBi#9)*Gv;Apey{E|D(fbXd^8r7L=s&-&&aJsk)tOS-r;Y z0dO+HMDP1nv;ME>Hq00G9&3Ox7JJ0LRfJsw`EKjeCH#u=rXIkQY z+35!h407k5%&7c!BYai#p9wl1*CEI70X!6dHrb(K9xoepT;5X{$Ofu@$!qQ6iyMJs z=~ReyUB{`ekV+*L-)F>+gX4ZqZrMwD)9Dga4LK?9bF$Xcjro5bX77D5J~AvJmA8X< z*>B5;gWN$|eoViF^UY20eevxF`BdaqD?dnp%zC)_=Nbb5Bxc}%f}zKqY=05qSK3gu zQhq?rP`&AW$NYsgk<2hW{Y*tgtfjEyL23AhL=Xk!Auw)4<$>Zb|A43if8+_Z?rOLb zJJ2;SIyMsAFMQp5R4Uuv|zd z3($H5LaRe~x(UW+=^8~oO*UX6BD~XwDF_fgJH#UaFF&AH!0IDDj5!nlHfVeXYJLur z2Bh6P17iJtWqFQIs^5|VMlAaregUFyV@a#`B|qV5$hhupo!IYvr3yFHLdbD^m`j~B zSF-)`6ATa@ofqPo=`TqN79!i;#^5-U>&D4T8Hg>F^X-BiOT8lEX=PQmLg{LT4d5)< z7Z_};9Al-rzs#&Mg7tMhaNiC%&)+#=V&H~W-OS1gRFUm6x0_wBpTIpHoI9|~|EYHT zoT3kIDE=`!uNVAowa0fmP6owsxZ}ghg)@%v>F-WyCF1#&3#NDVHcX2_G92kkgS`YX z(j!v7j~e&Z)?6`-W^ysVezl<_0#|EbP}jUFapfZv%ywL;Sr-kN%r7k55)^B>*J5s` z{R?aK^@lvza8@TZ^P98LmM$O(T`z!A;+huKKoZO32b^fq)9gwpkXe-uYlnNp#!{vP z+7vE6xQ1Pw9wpPWD^u5Lmp;1T*02X;Aq4FweE*f*`w((?+(Y*qJLEA&=C7y+z43qn zDVCI%PhVO}RU(Q|P;bZ`o~-?mx_OFPk?Q7++!-J}Xz^6TH036Gi6Rrc=VCRqGkn7o zNBxWrPH}fbs#>GA4~GWtnFbsa(m1h@uLI^mQrz%$#$0$MeXTioLYc7of&YuGtM}N6 zd>{8cSeE#oTF*eZztH(!@Dlsl5TYfYD^fT7f_p%$$@B!MZZtf5ZvqaekeT}*C;`rv zOg)srxgz#{<0jL=zLZkD=E-v=kU=r_{FS*6NXdnl21tCO-hosI7F&RsXL0$X=wvKX zN}UREd(=z`;L-(EBi$vjFDmO(Z+=0-NwsVb^HjX|0ErSo0J?P_@Zek)%m0vF`VH)B(F{*#~__^_q;;b69a@EL2CRuJxAe*hW->&6wZb97w`+nA-+KP! zqDpyh&6V<^?mgK%^oJ11mJmTCIKUKoy`5=>1D4O>zSsBSu_GUpJRUrvoe9LyFV|{q z_?B!H0Y$mBiHwS2?PGb!{P;(5HBXA`UqWrDKta9Oy<~QUhf;=-x@}e8OJ*{FsSF-`rp-{P=XrNt|gGX@O5a-?^kzIpy3F2tmYJFt{wK}*i>_J^X}3#VwH;stZ=*=H=s zON^=ORn-;Efc_4-&~YTx%we*yhz!`rS1>>e`~?(Qw3Z&OkNDgch=Kk4`ofDIUDf$< zhpmGRb|xSJk^BkSv^aZ!X^PZ)Ojj{Jeb2{g;*tk6qSb$W#-(^XtG%`Pig(m{Eh z(75pPCh!bWq^F=Y4vIq!#n#Z%6{Sp$09k(RV?|8*!q4&D9!IE9 zKf>X-vp^LVclhPPA}pT3_6qtk{<@SbvA zABjDC$1AYk>t;-8C&FEmbqIt+6-V1RuM##YmJ_5lf7~Ai-Zo%!GzzidgI+6_uCBWH z@3-E_GL~RyHw~b_redwDR;&S(+L0QJR ze^2r`PP+3fQ^ymb!=6K*x+b{?p|x(w@J@U zxm2dz$u?Z$%!K@j!3*;qtxLhXUOW3-hwV)ONR7w7_@jS49f87~%-=8ltlr+bcn~bG z88XchjFX)^gmv6#6nhADJH5XpO@G}B3kcB(Sn!`2V%5=`&(8%MqCDY=d!aVm%jd)R zzOayVaJ^Dz>nI<^1g3=p_8x>Qy+CrL zL0;^t;@EFK)mWQC4X8xC<_l#)>{~sg6>Y$-@uM1);&jDt6SP_Z*RfER5 z6P}9f4Pth*)N{(gAxL=r$tmd%(4Qi1Jb%IK9!ZE6dnd#WOm?-m$uyLqeFw}+$k*c3 z!1iR0ANtm`Vd2&gf?Cm&Xw^O8RKW{ke5KU*y$$+`cS5BP3)it z56beBED-W*S9$73;soU_jR9W2C}1s4Iu#1**G#Zfhc_gj6USS{(?alJX1MPGd^JY_ z#*<+U=Z6_j$LHT0?QI!2KV+%~DqT1!Qy2(tinCz-Vsc;bzlA1j?vC{QJTiRzgN9cX zB3qU*oJQ6{4x)cGN~$#l*VZY~b#eWTGF%R}6VZ;*>ON}TF~oQ8`w|6MO7NrqVj`lS zH8Awb!G$k4ymern!u3UnkVC7y-mN+H(l)J_Dn*hJvyCa}t9FX;Mq77n;RHDydiiOP#GpBwN{)Rsm zA`8BR+(#&pD2f$?Dc?f){0>#83*aDLarO%&fTw}Q^F3xu zKcOl0W;&w`Pas~DfY4PVYl~$n?k4R#*xy$da)|np8Z|z^CbBs-eu}68%s@Es7pQZ9 z3a{cfhA*loUa$i#t;1&jR4fTQ4gv5dr}XSvVrHYcy~+XSF+}hicAGmIgddKM^`|k` z_fQMgN4q7@8VF-NDluU3ezkH8c4D{`CSg?#afK4bKkl~Q&8vc#C>W!f!E7N%Vl>+l ze>}_=XffZE$XQ^V_4?Sb%DgRbR!mA~!q%C=rGs#V613@GhJMYaY7tJvR`S03p+#crNRDKmzJ}ZwXbE1|3s=Bin+fsDB(j9yj2sYW8As_vQI| z>DlMz=XhjU4WhGH4!nE(`seH0K{tr+w@ASZCBq|(dgKjg%)u$b?g*>W!Mew^g$C)I z*0k;Uz0-Ptg8>a3^=en&9{AfYRR_ORVLu+9{=<`R_%Dx_71Jj{n$q(Ouxa z3?f_3S5z)z(|;UMmw;8IMy1WbbQ2SM8}bcJ5u0kiEUp1cJ~FlEp9AlI7|ASP$=pXQ zpHVdYHyhcLm{Rb{Ss2thJ2$35p19OBb=UeigwEpbEC^*`_>3@rp6@>!C}AuJ8+VbUUYA_<0l(T>2mh)1 zg*Y_vQoSd*1t+D>GGI$z>oEi!6#*&}4U&#rZyz|9fteB5($%SO!p;Lem%z%FXO832hS1l;EjR241MLmBZ!pRwJ#=}PLOaNmG zIwmBLCy2LUv2({AMsJ(!pfaX-ir257+b0<20=2slcoHyIxW`(wLzEaAOg?V_ja)LAL+yAkPdiuT)ma*Sn>_f4Uj~G4(bmm0o@* zJSO-~e_rX5BC>av0E4SqIR)$8e>)&}B6oKJXqZ`d!8+^TkzsH!EWn4nN-zHP&kA-o zEjXO}?khhh_P%3G07LMHneR*>2|p=672Lg~{w#fBIs`Nofamyrm*4P48`ybpyp#C` z{lA}a^Xh?{uG&ebqj-D$*JuR|%e@!DH3CzH`U*KVHuszee`{FrxH5g35CK0lQA zyB%D!5RGGeq6U z{<8!A<30?V3Ahj2>${T5|L*Jnue>`u42ydCiytxALCNa|Km=zT=53xY{LIkXdO-Z( zIa01Dv;h}NNr5|hO9slfa{Y~~e>y^R_o_G;Cm`eixc_zKfT;LWLxYTtbs$~6{^j~B zKjyy1;GBLvyb-4Rzu(tB?+e(n!eCy;e~k<9S%Lf5XpugfAt)%G`kNm{unNNj7}MEGxbwyyg^**r5Q z6}AXo%`+X~qJMR^B-uo$g?5kBlv9{wF927>e`Wym5c$AJ)eBX3I#$Pq0dZ=KrW0Sp@d$rltvzSrh<;dl4+Uy#@l!?geLP||nf;W7G$-L*8j zlwj6$z+Lo$OZvp;%a(3kB~%u^R%jUE`S3n>yv_)n=(Rw&{etulX9uu@sMos{6@|c` z|1_f7bFlUPPxmVHpNIZm?1zQo)IZ1k?*>@-KMx#`)K&lKigmks!On(2pj=k}HsV6a z{~d(~@5j^SpBI+@)o<&56fXJy1`_oDi%wlQfH@)v0Q0W~gspTYrI$EOhU! zTuovGOcScPQg!V^f#6f_h_RdBA&2Gi=l!aImIUht&}$thw0LcZlz>`m7&=*n!i+F;|@p zi4|bd;eBe89dQ(Mj;B#m#s-c!#wLOOglfTe*4A1eW@sRf$1O)HO=l`=l zSbX(=>VpLg;{U0y6%f?^`EP=h?yq^(KmD4zW2sPat&2XSNBZdVNVVqUuNWob@~biU5ASDp{@cfej-Gl>`Nk`Hv83q zY*ViFreLF}#qrA|V7YAhW&};Dji9R;R`v?^Mu3zx=%(_1IQ7t+LsX`3)PeL~17|ik z8lK#7`wmt#mZ;zvwSmQyywNLs*kMN76#=%w;vwCWBMpzi&lqwt=+q!aSynoUP!s6b zKj&VBfM!bc=;$e?|D0-VGM->8D=it7yn^@Zb((ElhAQlsakAT=77lD*Q&5{?%8ITI zNzbd9z#r?S7YSJ8ro5cSi^aG>IHs82ru)78?>mxcI>9Pa!#WGAw$mTOn8oq>!+ES* z*s_E`8wm(*dRRa~;}XKVz=OEX0aQ_l5aOg*``1R0{yq8w{EVW5`(+Z>*o>!UP`5$% z9dd?meV=1#LJ36(IVfraTF2fK0<2CKR(RU2?;H!pw5K1x?yK{jQHxcf6gbFrYKRhW zAr`W?X7?Ae-g}K+={LseuRk@3)Ew<99>7%`Z0A13vN3}Qfe$TwCJhEDECJ@)qM&>t*#@hxdz1g0^xIo~*$hawA?i>*^t(2R z`S?hIohsDhIi=Qk&+9rvJP{1^biVXO#(s767<{N2YMkK94)VD?`C5hzI-;>+A3P*( zX{H)HvXqkYye`Z6bCQEIp#p;;d)CraTNK=%|JSPoq|R^6m4H)KJzdMWF#Vw}PWIpi zoYp;jCj(32%iXCNk7<3Jwx)HY^HTgt#Ye*sBb996Wud=gg@POG(d=Uy=hHi10BV~M z7oU~DJ3<)Q^0GYr#Bjl3|MxJygW#~z=~tvkj!^O%z>ThB2?N2v$UZA{TUVRtMlJ{^Od&og}Uze z%8x9hRG#cT%w7IQoKg<&KW`GOtKJHg<7<<#VEO*;5j5|*a2Wor`{dn;SCJry|i0ESd1(hrV8Y9S?Bzr)s!{;Dv1^`fMQ zHT_;^3J88~6sJAGBn@0~zwe?2!{8^x`}v`QDT@k4N3aDOgZ3qTQZi)0iO{8TV4`b@ z;8q)}JN8b*DdfKz7!81dixC?8Ve|0Ch4E7LG=PsmUfeSFuMcQn;4cEGEv#W{Z5eCS zWXnp;wE)Ye&|xj#RnBxdTOx%s4O-Z~z~q|YJk@r$t`mbUN&u;I9q8%z$V5;IiFh0M zzk&|Qv_8ZAae$q*D#nTx?7X)8+%>Han$LM-_IwEt<;uueX%-d|0TjO5>o)q;1>#B>X}U`pxt|rR&O9X02cQbx_&-DeJm~7 z6%#OiH$eQ$zG_{2g9AO?4V#^jE!%F~Z`BACDr$GAPxENhJtLbVW8@g=Co#N-g(aaF z2yd3shY={SIrPQ6-S-M~^Y7;y@z2I(Cbla=O)!19y$SZ8 zf8*@T_V|QgICel!CjSM-r=szM=7FPmRV1_E&`GQ?J&PGl{aJXgmOyd1U60f&XXH{Ky%NLjjx- zp#|dFH(75I?tU5aXwAx-{dobj1?3WrTZZbLy}23Cyu*hI$royI8+4-sqZhYl6F@B! z{OyI!_T_W-H!lIgfx961kLBFj_g4b~``?;SLmqFLdwzlG(>A2|dwbSyMR>wfSPM>O z%NiunnQ-3$EY@>eE_;h)?yv&B)Xdig(=t1&`7U%|v)5Bv!J7^0&cAy;NYL_3hA2${ zvzbJJBKA**`41f+=yRs!Hc#I%qOY$H1u=$LRZ|99P+aK~7D$9v4DPQcn9)vBD*zYz zGvt1&gJpp;;X`^sOAlrF*(+6`nS769Yn$xl4G4h-(D8E~FX-SVDnFL8z9{`VP~Qd>Kh8Dsi}XEo0Atki z^Ub8!nZq>fe^-N~diUHbt|q{7@l!mXO7<}~>b}&{Bk$N^N zknVtp?*n2uohPAp0Wb?wz9}0l9aY_opg-FP!s{yryZ8%smI7G&3VxrPdoKh5V&LdF zlK`J1a7$UHSQnxi{owjGaT!o=l;ZByzctB2y+Y+NCR(u#AkUmNptRFCKndb!)B9(k z|5>&#AQ}6F$5@Qem!Jvb?`HwADQkdRg5C)|R13#DBtr`um%4p~i1)HVAyyY@!dcbD z39R9Mp;y+~ZNr)ypH+FlCzy_@d*0tBRdODEhr1Sivz)v){BMTVl-nI#Sq?Ydyo)=x zIi~3c4S;e{oZux%r->2T?3^A>V0;G8Pb7370Up8cxp{SAIZyBrnf&+ft~b7C+>A+sZTFm4+He5RG>*R<~=cPp@QbqgJ*$B`8f*nZp)1oh`6-dON z={Ok*vT zpBUB&mt?>NbyAjcpf&;PzGyLVn|;5@{RAZv)-hAr$(NQD6`0HGk{-W(Iq!a-*@ zD3MYRL}gk7;Ab1qOquCrwN_|CR(yx!ZHS5-2FJ;57oWnHg9MyybCm$eE>k<;x&niR zebL0i2v2|as{?$hVUGpn$12b^^*x*((}_l z;+2fQeiV60%b}IGe)o?1f8QEm(^s)4NtbMo#09NYKRpEtdsvl5=_SokySKVRm4PX9 z@tE%^O~T?w-96~Q6T+(^K?G%DPpr`8u}g(IdzfJ>3HSC1MOyluBW!WAJidk(fAV8v zcmC6z7u-(A?PB6P7D-HY2sEbRD-&A7mVPk+V=TkDP<{hczhtLNP+cb9a|nd9b&=y= zB-dSjki9j0v9tuc`Aq_GJ2bHbs+~Dk3lF(+HklXE{2sOoZmNSq-eW zcDrXd{BRp_{|js&Q27r3=zHEdGlt(e21?!`2k@X|H@}{GOMJQ9QiSoLa&mISHw5a0 z)Az%}G6e=DuIisUK~GqW#Ix57+qvg!p9Z)8M^Y-}hXV>(39)`)R`6Y+1?2hJTQRD@U@r&F zH9P}99lQ$c-?k^hj-&-eYJ4l`afe4EBjG*xZ3Uf5!gm+Je3ENk_*Ix@tVHue?^rF0 zx!1-bN`0ixT}B?0FIs|?8h!v-cZ>&v-ea!jjw|9VS zs`XTm1gU{~soGPK3k1j)q$$lBdq@1z=$Hdl2qRK9khNhWV14ztv~NxN$&<(40!Z%Y zD%_nrm?GaRticUTGH?c~2g^_lg#}A`4GNMc%)`s4?dQAqIkT~R5 zzQolNn84s>tPgqdfyNRc>1D`0?e0E_s33pd+;);KhgYfTbZ`SfrN5oe>pWra6o=Q_ z`un6MC_QKjM0@iNtF2=80`x&9KYjKh8qgOlN^9&HVh~BMo-G)0bh5=-<|p6*F$QzCYV_0zKPr@IKGD*AX)h{P6s#R;!@-|Ek=J$2x)>I+|V|fE`D)P#N zRTmnb9iWuHZ`T5T6l9)#SIXTan3~A`XasMDYy_Sx8DONMd@9KtPY=+nj0g=C^yP2_ z_!-2&g?^~5H=8%y#iAeeBnFQ=#;r+g-{?KsJMW7P7i8CM31{O=SX7Avq4onh25s0C z&LkflH3i|m4Vwtj4SofknBiaoma0D?Yr8uh!sSP-iqxW3@_?^6rE}EyHRD6HhCY{n z*!Ollf*L2(%kVe;f3Wvn%g$=smgXB(fG`r5Ab|kk5l(jqgb@;6xTmkiTzj8$GAkpi zqD<6Hu9$oObIKru8KaNhTmRbeQSaWm!U&kacaT9$3^hx=g;dL}sOpKOT{z5I)fd;< zI3<}O;hJs?3@t2aqRM`>IxFHeb1Z#$vZQ1$he4ejj9r$^89hc-Ujkwu`ateSF1(fN^!;~XPX}+| zbi^_uo5}V~?x9dVOZ#fl`yG47ysUTY@bcYB6VZfNE4 ztLofCby;{!tR9B`A~Yz*cKNzNd|2L}@o?3}wP1x>Dawy_ohcb`R^y6N1K4S9|PBunjVVO(soGWYdG6Qcs0Di8H9Bx1~R$SLi4q_Nm4P>18>gu_6;ZLO93N zYWxi(ZMKg06WVHD-@EqJMJ(C`zELIVbObj|rOsd-2NzqvD*Q)YyN$`Ow9E45Y2|p) zbJl9$^WfQ>-t1wIs?<5@FS>xX%xjG_kF%MthpQgt=VSA~;6utyD~HkjLB!_oE|b;z z8a`&(7F-iuM;~jTBR{n7xAPQuBqnrPzK4E3*)% z^Lx;NpNiq4aM&*=={Xaf$6o8!5Kmh$dME-nCbI!fX zVEvL;T`D~ob9{L`F&hi5eSSTob$?>F?PC%#O2C`twzDql}~8u>8CS5|}1vK?&K>vC%^ zKivr#{r;+_eP$ew81LgK?~gML6{P7)gXfA5jSWF=^e1{StrZ|A)N zu`13HzMy&XxR*A;!-RHNVuYu~p*tM|`R%SpZ)sOqNSqAlobVCPOBDgg)y_*&-fZZ( zz~R1nL?!Z4=~K3+#aSH~9tAx4taDkIuRwr1&H=2)dR;z)1hh9GVzx^=OnuI~Q^flT zRv6_E!UA5|&Jr&F&pF2gC_p)v>~));s9pGI<0W3zKj&(XJ(XsEYOe|1AZ0*CUsq7B z`3@%EV>RRRZhX6K`xAw(B!c)&Zs&f<1%F2E=ytRrJb3Gh$BPH=c0R&>Mw%TwLED2a zsL>Z1knB!!6R}Ss8HK(b@dTH_Wo;Zmd0-Mf4#Bk4zMLq_aiF!I)(~!gl0YjuW^|@4 zTgYQ6eQ;n}hW}j8H=P@Ej?h+3UH<>kI8YpxsJH-#|Ij#`i2sd2AhtjDUt|tP?tf#b ze;)m>LI;NU=du4^L=L#&_r3g|i5y;a3Ib#^kPJ&H#7JmZDU{XKbZK24A3(pMJ$KG1$33fcGQ zmFD;;Od1FBp9coKIr-N^|2nq+Z|GG1%lY~DiTEF#oc{HR$8!2_cjwb$K@9*nIJV&;EoECsqD;UH>1i)_-q-|G)$n?;K$zN5K`WeI>h)DJz%# zUVtSsJ`i*BKacKr#=myqlE%o`QT#uDVyRVQLOhqgU^+8#yXLz3h#US}dC&dd@*e)YUyA=Idepz)1lWk?=n@hC+o$!FoBVQn*Nr#um+Wr!^M(8fSWrxD$g&|IYD8*KerAt%8%Gz*sBX? z;+)Rhe-(oNsp!wB*}AFp*TV46@yc9Wh$CMo7N`LKk^noM@TaE!+#srO{tL9cCZNF9 zY?Znk{u96+=XV|tt8Qzkhp;KN{0W?==6vgQ&ZGRL@5dN)2Ze%|IX~~>{^XpeiSWOU z>%Xs~|1XYfB6wjgGBO$hfX#HBDRdaLjby>>!8;~$(vbA-u0*BM#q*uVjIjVeGAaW{ z;KCZZg9S~W!VVeT4!2!-ugskrqBfCY%z^=GJzgap89#vbs=VwlU`W>$y-#1o#@NXr zhPi1l#CAsOez+YcQj}DJJOCAJZOWgF{>^((wmWanTTHkVIH&+MFf_%OEG;kSA+Vg0 zsy@y&G(dV!<$wh3Yn}CbUf8dY#(L8MZeLrk(RJ0fyvh__1Nb~Tm6;GmMf0#_)7uVq zY)5x2H2>~cBq#sc3-CqHDu#ZS2g5cm1Rq$iaI(oK0WS7_zuLPw0e}EkZON-A49<@D zR^+|ygaw|RN%VR~43)6{w`StFMMIZNnSIjcz5PDK14O$rzV|GBL`%42WV_}4fxONyGAaG^PZ%L${$PBhH)+BpTM+E zPcvt+dfwOdvqO`-{l=OJFo(6nKbWGV`w5~46)1`%BDwV6k0%|CfvATZ)UH<(pOc13mdnNL6frdl)2{Q0aKSg4*+*4sbz*($5++StZ^lt!QwyB;sw+s|VU{gPMMPX?Zs< zlIleV`ZQ)r55>9jr63hLI}yrfYe7u(FFbO<;?!T|<(-M=ajieeWd_ehKLA29b13S~a&@PC{Hk}hG7A-=CYjYc%I8!0Gge&Gf}^~$^}!tBHT z{mqynk04+gj>+?IEsgNZ=fzY^h$pi}mG%MxBvM@QcGuO(>=8=t-g`TZ_LL^`hg4^$SqV?j5@5+>sN@EmJHM}-tHD@#J3Bj zZ7U?T&ClD)Us5T~RT$Ncn1!cgy8^r#UI_yea_7rKZ+zbp*FYl%(rJ<+shirl*vrxG zsH=nh2DItAkAV0o)#^0O4f@velN+9DVH4(5%Op4J?l~iw9R~FfNhd=%5My;2AEZl^ukm*QL}+|*C_hNEclGulEl1sQ*{>{Q2+;=k_J#g# zh+meiqWF+ahn)H6_J>VXGQEx#swTpeX@;^|h2S)mbJDu6)}HWiecJ0L?NQ;c79GC* z?kFF>2Mwb?cRP%9;M^@LGGZ6UT#xNTM>0PqI}9FbmgvI6Tj%m4y&wj4Zs!DJ^8KS3 zsk%Wz+m)K>rI{L=hl=}@uOr;n3xTf=y@uYoc_=27X+#s+o18Un5FGB34jw)}IlmO( zB>n!0lhCC-Uk62K%-&vzlz*f5R3HZ!jyw{F(n25NpASo1FV}ui{lh&Vs-9;p9j3Oh zLV7JvN;Ov@`loSh#vZzt8J1=5IRcLT^Ap(*OMQWu4&W6{-G=WkRv-|1A)N};D>Hh; zEKcUGI{BL3dWd(_<$E~c@;rX0bX6`Zh*M~oC%@+|DYP?rU(G)%{#(6$M_VSC-(D>I zt-}kZ82zit$)-s7Q~jK`pdfyZbTMuQ&mB59DyJ{2e{ACwJQiHG^#`;j_sBlu+g_u5b1iflpWWN8yr7Pr= zd+TYF+7#{_etwL)mlz*7zXG`jvtNRO5yH*pO9Jf?J82g-Ckeo#!}82X&HRbnbbhm` z&4#BI#Ca~Hj#;=*TtNHeg(3pl2+F+Ja*S|;_Hh`oCW;D!E2)$GQk*(|NcAaYdR4*tSMqLf?Z`8mLyst%ONApOccHfL)o{GlmLGkV_7dEQcLc_}djXFHAp~!B zI!4iEjvT)~uKH>d9 zToi&lX7#A0u3GP#P4A4d! zQ1I9-_Lt3o(%?6=evIwprotT_(h_9jx?POG27Q(bg>+qe^ZLe$)vA zBTA;Fp86lLoNYOAz{j4h1;N5wy6h%C91S*ZlWva_LErGk;MXdgN4ac3pE6M%&c7LlPlpw1q6rg?`-daQT;Y(zfg$csp$oeTK`*37& z;Qm`%=L9WdRm5E9H5>y<5#wGEgcl{^+@aMXYuB|AbqGiC=UZ&h-SXTMXg74{=u1op zI5Vk;%(}&W&(NC67>b<_fA+J)Jn}e6clYZk+TZZ?v zAmbkbTNaM^-{WCJeSsLuszs8e2%D`z|8|t8vJaWc#@z#&*e|K|wM{L;b19{fu@_$I z<5|Ki*C{^x{EB4V@fWOtt6!Z0HlEwp`(b|iY(9ByyWcFku5Hlt(08fOnS0YB-UPoK z^yJCTrFnQDx?SFHkE{qR9MlO`8dWmZH&<@}0%h(8Ywaf&`}@I!7Lp2+gEjbtf2&k$ zu{i#sFFkVCK)ZSAAu@u9^R@HKkbvifqwxw1atE>%-5iEb8D783^08dL_U{Ocf7-T@gVbwIJEBeE)F=j(%o1GCS@F_yB$D#=P|%a1-Bqn>)r?eu}f)e{R`*2 zID4$aa3sLs^j6E+a|3JaBhat+>pK1pbKOXL2G{2?^w1l1rnB#BK00Y*15>mQ2O;@% zCd3O>`nL#WC-QKBg)Vo3&R+`n!OxgKp{a$}F1+GnEhs%rn1~!H(NC^k$Ft$+Y1J{Y z+b;5+XmL4emksTM_T%@Z#Y2_708!9yf2zRCgc?p)Zu9ZzTfKHbK{;#&0tCZ$B%TgD zL0^nb`Lqt{cX`c+n^UkpW;#}^!M?1Z4%7Rlk%#5C?yIEU zhH4qo$&_GSU_;W#sKS8m!_h^%;0;ztBwl2`-D69^n1?3eBa93Nit(S1xnZdwHGdtF zB3Hx+`IixBijLpD_xJBHoT~lOM#L{%Xr7aia7Vi+mOqOEc{r)-P(mhX?!gMYVy|HX zwWRr`70Mh7=5zf-3fUk>42`t>q!)k|JLSDAeYBGC@*J@AnJs zyfsvm1hoFv%-_-hkl-^LbXFc(ur=^EUk?-Xa$@zSCk4!aHt~#&UCDibmarI(uCAZN z6#lY4kmYz!aCt5igD$k$@gp%%{{$f%M}*e2*tb`CT<8Hgtv=i@e_!o=f-L#tuNo0B z5FMZvQtsCpiGT41EYb zs6m6)F+&1W<$jb4^3iNkWTTziuoc5Io|u9pGx*Tvn0zNy2_yNmac`74%CCa&FZK6W zF4|D@<4ip=VnOzk*td4f#qdHb!NH`PNxiM>%1Q4Rxp;>M)u!E^L2v~?{c8ES?jt6H zM1NLmhf9^nyxN6AX@Hp5fghgz{P}Tm-4M)Eyi1_2ZNt1FAkEtMK1J?pw`ZV5Oi?mV z!j!H^c{Q8$*8^cC#dSQTCEIA$rtq|{bku70-5ddc_xqXxM~G8rD={y9y*iXSH`R8Ci&vyN!ndDbqsM|#tkT9{~2E+ChA};hDV!OT|7R}wzGymR_5WwQ?e}m;& zaryk?K0nCQKA*-inLfnOiiK1U-`47{vNOg+*3qM=W+ZmNgvJib$SD9F~^l4+}_psn|xX4oYnWSBc*14TO{Mr_ ze9}Da-fzWan6Kb39mlblO>NX)z#d9OGCxn{5wII8YB`Wl8r1ADbu%APVfgutM$qt9 zo@0X4=DG{gP8r5}#CZNuk56huA_ZNjlR1UQ#vNL-BipugC%dmdQFzasb{{&(yVdOZ z@I54}2{UOAq1o{A^!WCcaE2}4{K~Mp*K^tRLhUnhl_JUk5KYM=P5?^kQQOxB8a@RE%-b$2Dg-#a!L%?W_jWn!QE`esLugwbG%r z0Y})weIDWX?an!tHS9-B+_Ul0e%CvZi+~y)Q~ z;EjG_!iu)he(ElY|B1t=9_(8YTlaowhx08v+|r|9QdQBqwOt-^*xnMhc#}OB&RemK z*HRKxzHxGbzMW^6Q?PJ=TfQNWuul%D{;_WJwoP3|dZJ*DM zdNt8Hmu!K?P(zS%lKHfy9^X>ZvM4@<@+o>0KeWQ=UIvM$|Gw0+bqe7Y%~f!3P1RD} z?}QISLfC{6f;}Ku7PhZ3cXa(#(PCgk$e**1K{6lpJ4^j#O<;MrY7ZDoz>LJ@eKtT8 zr`2JR1;NLBLIdtw^Xb|?DRI43iqWV3>3uN7N^vifB;gQ0R)z@PUT}k*cFp>~$Z~&{ zD(=xnu6~90QC!3O9=x>jabNwU0Ai|d`i@`Y!8qPtEb{_RDNBH-|AP(42H%eM%R{p` z|1IrE`+8wfH*Fxw>ITm{5Io#E|L#{E&&J`Kdo}k5mMPlhWg}T+uo|g~@-Jfc%U~lx znhfz$wdscSa#7qj>cYW(VptBIn42iuTvJ6IB*hx@el0))o__R*KE9g=-5`k<_I@V6 zs{0ovd^t)N0}jgG_jJk8jCeQ|myg&VxYL;#h0=OtGV{ElzJ^rf`Fi4vJ*WzrBT~Nt z&IKywhsyM1ICpq?DYKQm%J>LLc|ULA)DN0bhNm8N{M&SsypaBo53aa?SG{JFaX{F` z36Ikwd*hO1^Fv3Slv+OVp_m+jJ2I(Q;tEiX0`dcGIDb zz0r;NI}OG)KLXA?ry;_v^vKzsbgMDX3Dl`Na)x9WbIV^D=Um<1?t8qw{0nJWGgua~ z0}S%iSo>uwOUL^O5jaF64xc9zJk;=Lfvm{wqDt!Vb|YT;&`n`u`S*r97*=x-+q11U z=QzTU=+MZCN}oc#oa`n>M7ik!9|<^#k8b~#<-DBVXND>=n0mgr>>duM*Zkhj-_Ep1 z9_)A+`y!;Rk~@77GuY1f_hTNoRz?t85zAD#zDbAEgwQc$?!GVi5;oiyHJL}+*&Utm z=zHhkFEvYN4qTUudgaXq?W>1uZa78ozfgGOHY~-P^~A}cP^j1R+9bCRy14d1gn%d2 zJ~`p$m8ft?nER5>_ISW2$#0$?{l<4rH*m#evcLJ`b%w$3+Wnes9ZoCZ_rPAe@b2~r zG-rdzK%hP7${cW!`J}fzQTzJt0l>yO@9gr8DBKEr?jg?=kM-|?EuT7@6!v2!eeZ`~ z(+|J^!7Hb9C7*na0}CICi@#qwQn|>1(Y|dVFOm}qK+=#3_y&R^dnyB1G9}MOc9x!t zgRJcr?!Pk>Aa|CSr)`+qbnf&EIqEj?Lxvv|eZc4E6$DbWAXE#)`F39!F7ska{bmKV zApbDn(waPsx6kpLTJFfNho(=jj;TwvnQsPmuIyhwu`<$|K1q+9z zxcl(kQ8$;GkwovrRicIF0y1BE;2B^Bw=qBM35Jh~^LBs03qALY8!NtJnXx`!p-CfzV>Kla9Ek_Y!4HwbSf zJ1*XrAdX{LM{K|6SNVbcK}!|YUCEfy=KWF!D`;+pn+aa&Z;z=YDyZE?1sXS7!I3Ym zml;1g0jN~M0a3F$Z+!ibuMX>IPy6tlr+s+j;LwQM2 zuF8NC@=e@2@W8C~$mJs)e;0H{Z2^0RpP3Sl;k%13clY;$%ZJgmZddsB;<^a5&;E@a zTmFRhv0T*9M4dX8`Qx^0{OuzB(#VC>@KP-xYI?x{Z>oeC^?vh`F;Ml2h0M!#27iy{mT%be@yy6GP zc;DhGEyG*oO$uEFr?lVhSqcJF{rw&z+`T75)8lt4&Qvu^#xws8R-#f6WrY!OJYsGW zuIDjf-d@*rZJC&!o4|rKNbABvWHZcR>ioWK}_wCuPn&a;yWd%wj z7s={=k<{KL_iLWL>ju4c7GFM6)`<21NynP7JKpP8D)tvVAQj^;fi=mh)IcM^>J3x0 z1cXb`;KjVF@GR&$zxrj(Xq{x7@mRCUp~&XrPiWPwia+cXIb-4j3-7Zxl3%>r`M`Dc zIz8rzu4`qub7%9ww1e#)rWh+6Y!_sZa5;#$%Al{Z9;*d`SA}et`Odx_ z4LFKE`1%Q2*Ujd^3$X(h;gg4|1Dg?Ccyqz@o*TOJJw}zmg8B;*i1AUwXZr|2+H82f z@iLdYJ=;fXtLc5v)aE@LW=~g>n?JOWUhhE>g21-qB$otLceCYpy-Fj!uk?Og`D&xM z<+Sk=P_J&;!yssoaqwTY6Rbxyb$L#?o%fOcqIK(%w#eTLUjkY}Q;hbwk%43Q{!bDL zHR-ot2_5lgLIv&dbCK8}dl0lWgp<*%^7moEa4Nf1>LdAmCbrM}@d<+P{i0tb@|Jy) zJlooH^g*)KeaWigaQ6HWY8Z4{*V2%*u)FPBM^V<-X>u|u4N|%Wh7fx!dOt>fre)34 zEZUzfUnIXeATWPOsw-NDz7TsXUi5w$ChI5aUCA%OGbA7T#Xi;&?Z8AuH_@|Fh0mg$t3r;KnY=r- zxxX@(-tCFAtL*6<5I$mx=BV2zixA{8rk$a;1!-+m^WF(!%tt@zN{P7yuQ`BQ&yWIPacO|C=W~VIJyx0cwV?hIGFb{OaDB*rGH+Z&)2S9<@ z>*b@t4J#q(>6i9z=84MniLt8cmH({khWJrXpLWyqcN>mGWgs2yXXCAnBd=0}>GN;c zg*7($LwWfrWB#rQAm|xMy=Hzz(0I3Dsd+`mH(9wn+~TkA>wsx`l~tsu!CGj>Inl!zW#< zAF*DvmtF2d-9X%h7u#>g)`9`8>xp52E#ir~iUR{ZmW$LB1i!l(2?87jA|Jvj4v@$aWnhQt_z zvbX#4{db9qSNFcii~n0-b?eXCx(_08U6Ndhu6l~-Z?~*DZUTJJu=a+BL{!j$!to}b zdY1Fq3}hB!Xt@o1e}RlR8T6Zz9iEl!#d95h;_LEs+>edLl`uZh(SH^xD0|d!4bRHD zT8oMVrKO^kx23N%Z7FBzV+6$7dgQ)VY{GDl z3eH{G$r-gKL;JzPQ++$~0HAo(pGaZfAbiz=s6kx2O-vNdPFha1%m1o26TvM2j#da> zgX-0X_`BY|zlU=#o`5|!BGA;#ctGpC{USMIorFId`4X1wn)RB&YB<08{i_)_-8Rca zrjMhVdEoC{{gs# zU~~HXX-T+i1s-`MRHir4gbne%`9q3udpubg;twy+K$H8(Vgig=PzDdmO>h~doTfPQ zeg5iK#=D}Ey8YzsTng^C_)>fG>C5lgUKPTvjI-0>8*H^fABKIs1Chk=rn=ynSQA4{ zvqh{mSPNFkYI0u!RHDvRhlJ6Aw!EKAdXf~{pnz?W@!YB>Jk9MMTS?jc?6n-*56%ra z92k@%cmEA_`?@D6`0~a)fc!RJ>*4?r8USk&@k|5G(W|G31vbFSoXF4k&OIh7#rj%$ zs(R1vQA}Xyf7kr^HddnSem^iW+NUpqIZgNnDW~Vj-M>ixBVpI$UCL9Eoz*OMvkZ*< zo1&~5SdJ)B>96-^_?>L@4(W6@D(?7R>xF&qcE9*m{UavtaAw9v%c@^2b8Z%_=6vBa z1vRu%R7$3w`{4=N4_r@W_50jxZAi zi|_tNfH<#;cFW2{TC4qD78iEq?x*I`|3A0I{O_< zE)y9SJ^n>WVKC&uME;l?U!iEc)^^*`a2DiloNV1}s+KuRNWrT)pXqa~#I4f# zcA_T0?@<;}eX~Jj(PG$M^!H7_KZ;&IOt4$pZ{as;r>Quku*Yrk z`Qk{-J_WSRai5oi{l21NX`d+eQ7PpDNBRw2aKA78^7TBgX^ppa_^CXa_~9L^v-+jI z80Pr-wH|+TkOq*tefT2xv+{^CSO6|Gu}!9!It!c2q`hw&6T7fk)U3~T^&&kFxM8O~ zYwqeVMIM*L)ddc4;7as?NPK#{BN5rcSu7&1OWRf3l#3;0T=hU91;n-POlo)W9k`x} z8(Eqd&^W@}8d9|%%J0;MsH&_V8n^Se7a<7OrY460{kiIm_OCO+&1=4BtGE^-_2ePq zblS0kEAgHG%10QlGc$y$ZFsXkO3zZsLH|dSz2o&u8{H&ap65~ovv%%!zsZO|`*$Eu ze(#FSgM&ng*zx+guy`p&I_PqoniE+2O)oA?)9< zeff8n$f_Vl@}xs=ZtlPL5t#WFpHbYdlw*D&X#O|KUfa83umAy|=pTEH91Sam9pt+~ zK>Hl^|8Acx=;+7)Za==hIEzX6IZ7V9;s100-b6V|iQO!}L3!oQcn2nw;N&(<4qNqr zZxbZt5l!gPG5Pm?7EQxHpNLh3j9mK+r=F(8yv~VJ#tLx7!UNVGLZVA#nC?rL%ireG zZF8-s%`?I<2YVjbOqoXGDCzC6OZ^k;`AWVRx_sewMOu^j{4Vu^)YrZkNZI!|*jk&u zRC&H&K`L-5yO`nXR^sq@9AUwSbP_Cn44O2;nJAE|v1~ZZ?E~rg?LVw@{x%uHe(PfP zJD!Bo6?`S`0FT>~J5mOBO(}k&wkHekPa!@pj>A7+!ZXj95Q@Dr!*`1`5IS=H+yKAO zD>*0wZnG)*gB@wCBTV>+a-N)=0Fa^CJH}<9xFQv?&u`1BB!y}W^h6H;u{D#R+@`GC z2V9FVSNKu>sng-TLk*Sibk*Uux@wE>4Ly5w;`c#b2}#^g%6sk<_LJ@d45ZN4NXV6v z-^Al;elr$vrm$~RV&x;p05}FWqv;c#@j4a4AB?s_zIj~Zt%FV$8cMa5b;#i}jIGT~ zXr9O8UBya|H@~m#;O*0!z1~&^Gt0^|>P1__oIKoKq?qi(2x~R0`hq`@lls~{Yg6{m z^2U?Tc6Rn+eS06S=-#;Ms~)X)%pDK&d@Mx&ay?BMvB}mfoPUd*duLcckiq+-P{K-$ zzQ>{P(QXb_@RFir5{XbhaV_ffNZWUsb|Kujv1?!2O{xP|=H-(`fLx4l2kBrA274c_ z71}}~AZz%|q|!RExe#jmfeTTY-ZHLNZ#GwkPOma$crJ5>BBYySrqP_)SluyLQnn!N0DXm@JxOd(m59rstr{7o)sq|4h!8raYN>MDoIa#Ut2?nIM1N})a5s==dPm0to z?%Bfze?ae}c_D$mrAw?!kU)``x?u@sG+|2G!a*BY1qyeigIT(FH}T5ANiM#5KJ(7n+HYh*KBS zB!|99XU_nu^$rWo(_)MZ2p;vIl+nF388~Fd0q?2@qCRizwV;Two;Jy7%?L2L98Not z@QXklJIPII&8m$S4t(bP4%ZEuAXfL%V4;YU1ivZg68GS8nX3sySofe>-?(m&xyn2i zYriu+YQ73A?vw|>O%X<824ArJA|_#Oli!ma@(&|R?+7yYV2(~xsIQZ{_{Nd zo&0ld`?yiQmaiUDWguSwhsOiylOF2vt(<293#+QXhZFqq%os}^B%$I97U?)$((VB* zy|?k2AIinX>Sh(3%$Mu?g}l{`h%}GAvUZ^!0T04Ed!*C2yYa*YA&C1>dl(MbC~%Tw z^7AQpg^PS#Ex~OXXfxiUoiVs{F#o~|O6VpIg!KxpP{%?SAz*i#KY@PeqAkk{lC-{% zUhKSJmh0XrH!}L%2v8qaug=ehUtqIcjmvqEaKqF6d2DylMb;?J(dS;QOs z0G||0u870<0KPc6Dk7D3M-H?s>3y`qc(ve)IaM;o30>QLONzS|?owRJi6{ zy0D+J?|=`1!uP^uHWv2y{h~;d>QY3@$}cpqS`A#w{5(Bx8A0q zbIeRVk>$I8U9iM!3@t5%V_eAmD;Z2I2xP)YJhK_4XutaIjj>}dg;y!v4^edeI5q9k z5T~>u^Hg(075&%m*@QKe&AxE5UD&?0G4sh(x3^X7*}uacz1wW}dWP@#H+2&)eg&`!@2n%pDVy@D1Y}e5O&>b!zNVh1@5%&l5G1 zOC|V(*GhU0gyIRuQ=riCjdz=uD!af75rCu_mZdiy@;p4ePiHlNctc_Br}6sv4sEki z)s3zbmFti+F@jHp(jU2}k}mP@*33@nJsaYgbMpl`x|p8HAbYG^M9lh3iXjR*Pi7E(Kg7PPk5A{lQSHr2N7 zHx$r+i$rREX-?zw6xSJYB>dKiW3$8$9ZdvBC{AeJ$u2BbL+ z4F%qML8GMbYJ4dn8$!{30 zzWIYP4%{QA@3$;Ca0J7p_g}zYBs+7HFv|BMWMJvTrRdRDMe1xRA2^F=iK({_bmueK z2Mf#XoD!c6k?iN$(eM-b)7(`{2IJz(@hX+2kZNrH%ZE)n?2(^e{O;;-i8#=B-k zpMm|fx+s$6JE1F=AHaX$Y758_l_OHjFk~pwtulbToDS2ZeIeG zifL+`iDPJrQv@jS=ZKaDB8DSyV=wTzgv^$Lx%`?xQ8c=VQ#V|t(W69t)$~{E5hyOi zQ0uLG73cYTanGX4_i&(M-jEC(6D_xJlt1By-c4W67&M@g9^sxpD2lF5JZi7-^20J( zunq2To;jp=8Z;-g<>xj@ul9M^jq|FnoQN*mA$Y)Hr)RR6^ZAFRFklREhX5R?-W8Vj zy=G5I_qhahQ6_shloYMu(;oxtp}mvEs@BcR!vFcIzdZ$ zXL|l+bJ`Av9@Q^)!YjePmh=k@9**KQWi@U(4CMWqlVo}Ehe+}7a^a+rJ{Uejmc%8T zgdoBDq%!G5Hp*`*W}&EYeERU4f9M#-CRO;>e@~Awe`iAR&|g8N^+K!EPh<@k>&W3k z3TgPYysl^g4cno1uq2a|qoYScLB>xtnklE8W_pV2R#o`{rp=`ff~=8Mh~1UQ0DxU# zQ5Cxv7V&snu!1R!f`w$Yvr_KZ4BA8FQ@Xn9m%VAlkRHNIo|U`1e_;%elV%9V01c9vDHvDFGKHb8*R z(aSjd$Mee*&$dmSI#B3Os2Vh-{G9LB<9R*J0F!LegK_CZFB|QV9KGODK75OQoFDO$ ztOlBP{p!>B-{>hJ6EdWPPu>|C*^?=9z`M=@zW6!S%Csu!9n{P+k6DMXs3wtPbZR}+ zg|H2*eNSgC9`a3^^OGJ1P>P&U$vJKLr;ni9e19(=Mm3R=W8n;<-O5k&`g+HhlI)Mo zJkhHrLEqiiQ3;vMEf3L9>oH4&(igt=sfKFO8cjvM&;Yr&pxX`1`%_Ta*A)8qAw8|F z-UJ64sfn^!n$a4l=sb^`hLv_qdF`>jVF#o8K^tz|H)|)i<8L0tum89Q=TCiytmr9&p$mx+bGV$wQbxD!>*VLMHZc_obWYP; zq%v_zy5LZZskI+|IB7lW;QEyrx7WJ#r}qcqTC4sO-ZQY;ti5k-yt6KxL{GbuPn{{K z4q66^fI+cdX?%Q9b*4W&Zm?>=hlJ$o!2f8yR62C1{n-10&yYjx?aZ(WweO;eM_QCi z!Vo78wj}N=1Yh$yh_Sc)iV!4AFE(HokLi5r2Aw0j5z_(sdZ@F`fh<=-*o4jOD~sYi zQnqrGVbzFNJbv@L57_&pd!$ny?#WdI!Y6q}PVAD`tK}H$<%{)uGdD)vQH&VU6TdACD^>gUZ0~r68eNy4)(ZawY?O{l%IDALwcc%Q*-&o%> zO~fnY>?;xpo~z%_Tz>1C^Dph5d+>e%EOV#xq+)z=h{NsleGS2gCAP4oQd@-7621@C z0Hx^3Kh4CNaEb)+nJQ#Qf-a#?u7{}_4_aHe;T11 z=tHf#c(w_2HkX<6oAJI!Aim@g6be5m?*Tu~tdyz@rK{)l5N+Kt1WIF{qYt&4w#Ys> zQ8Ar967Uv>m|s39v5^=(B){}R#()QT*qTk1B0*gBRiHEU>WE67Ywk>sLYDLgOJ0YW zvzfFY{_Hd%^v?KXlK%kqsqrr6jqq$GM7)`jmyaI~SlMLK5+U=7y!;(}RKe=$Q*+@& zyU5LlISnz(X8YWDZJ{5WU^N_JC+4nkjTDXGS?S5gp^%0dUP<={mI3d8sSX+~vd8DV z--YiEDZweyU}iI}mX5`-a=o-x;<<47iosPe#kdYjac`Ovj+64bSr~hS)FXP@w+?N1 zc7KUon-;+SUmU=7h4Oyv#i5a{95UbHBx1ZC6D}l4&4Q@puQ8uG&tD6k- zz4y1YWzH(3F(Tr%===&3;DYFf`^`7IDz4tL8~g$*tz`RUW`0lkoMG~i`Z?nkUU-IC zvhShhKOSf01$D8a*^ti-8fxhimML^-QfS`TEI&9r4T0AE;^u7Cr0y}5$&^JAeIR=9 z8|(I`)aG3{xc`OJc)mhlDBKG|*e`RVql)7GDalEkJVHOS_pM=Jc$^@~^}Q(aS^V*M zc_~-44?sDwRpD^3SPN%KaE#w|?A@uDA1N;x46ZJznw228yyifG^)U~^3310+$cXnW zAEVz7Fpm7|$Y?upQE9>`=2b2F;wpDm&DZ#7h=^DW4dyMb=&aJ+)O_WKaSerAW*`5u zEsRF|tHlIbn?qvsE0R;Q+})lTT9*#NJ5<@3{Kfo@B8Wd zO9XSd{OH&K_k^i$2`rRPV|vAMc7_d1e*C>qEVOju_vpVjFf_0Q>b3XSuZJ+v4yjpY z#%552`uOd4RP>jUJKTFoarW9$h1*eDe;ZUpudX=NblR*RWEFV=F1iOT&=0-8Ml%LD zsXcp~w|yyI+}D5ich{ie~ zD*8~g4${ujnR)7a1UJbzu!N%tQOkJMkHL^Au^Z~Lsc zo$kaG&;t{x3yh=!Gq(=6Qw61k8*RMJ*)owPW3RMjSXDmZ9-Fp)Azw#i-hrV27*?PL0 z-y4X_*j5}AF8$QHwBERNaR%=T(RC8{yXUdGQUNuX;p~5>7y%$72LlR2zsX^{ru0wc zAZxY%0Gx5~`i%JerQLIK+lqdEpbh3JFmGNsvR96x_9zd5-57hGW-(?Ync1SZr+bF>5!9JZ(sJ!a+O`}WVb~@o4igCC$^Rg0{%FY&g|WQP$%HVs8+Rm z)CKT6`u6DKDL<>k?OLzPr9Jn^h;{ejNNo$<&`4-HqTX%+;TQa=42vnW-@E*26HFk5 z``)%M60$_1=DQ0Wo%LH!+_rBfdt#6JBE!Bn<1-@|{qg2#so^rJp$XlPxLBRc$ngDf z6&-7CnK_{E9RRKF#xu_#HcO`{@+Iy85)nO1`7`91xz6423Ap?KufXbK@YWFO2pc?n z896^!hM1MpxDc`ae5&f(i9$0cB`j7E+fP85b5}_RGUQ*pj1w2x);9~;S8m}<+Z5Mz z{CT`5k2RS;5FVr#{^f;`bo9q01%=4GJYz7<6sGmnwmFI|_uuRSI+puH8DBfUgkWf57xTh2T|9Rx=`Tdn?mccMoT~BNW0yL^{$kgi}?)GvfV-< zu9iSAvFW6asPk5(KzsQ4wUVVi@o84KmO`S@l`-mtddpt&WfW-10kB%mdq)eMVPF9r* zcJ(Dv;%yH7s6HHhu!gNKGfH~#RizjAIW|A(>##v&Aw=yb?tfC2Cd3(@_2{Kw-n%ZQ zhT1TL#^Pa+MkRPfUrXEcu!$lX)f>APn@r6+y%Z^JM~6%C*u5;29z5Cf$Uyv*?52hs zWJ5Vw$8VTniqn2B1z%&igu-o#o=w<3*hYSUF(Hi;o2p%q2W{hrcF*M%Dw?%D1R~Oe zw-5MV9P>bIC-QyP_pmH!pTm6s;C`hCvfw4oI#Zh`Cr;G;p^dBuU`=i&fVxR|m23ho zs*q*!2fYEarF?HIh*eO|LgFUvIlGhC`j(+*A*O` z<7oR=?v>U7*|Nlj!SSC%<-k>`jH`;-g2HD8j=Tc3cyeZt9EfS2?MvlVJ!UdIzx!UL z^!oJ(uG-V{`wP9V$ku$@p9HcedvBB1qn)is@B~{-p;PAeaEn0?){Wv?NXVWpSn z?M{}=X7}0fqRAPl{vfB%t)v=h>OY$~4$EoF*MLAajr3V{w?!FHqDSUMKv^<ny=^_#S_YK6 zYY^FV-u?X)?m}9jwY@$wP7?qND2%xX`M@e^NDv%)b^72`N*W~1wl7SNzo8Br+&!R6 z84umj2B^TPQ5m%{x8ELW5teVWf>5ltV4HfnT3jHdcN_c)2Z)8Wx2WIo%GIXHy7Z%C z%RY}SF_O2}ms-P8LTJ9j_DxQK2Q2RRzY7*&;|sJ`(9Dp#Im+Yhw7t*W#(zE!fS9C0 zZa0VQ6WWmEG~f0D?y1Ygr(&p!us6`}y#{9g>^GMwnc6^44o2r#-?hR82EIqLM-@=>oBX+Iu_{+{>tG#$$+K&qX(gJ>oH>dB|QN#1l! zXSgbU`m#m{_~hQM_>@ri4Ks~Q@*z%NlR1XWo9)5?7pR*cv4Sxq2_MSs1=K0H2;Lv! zQwfs2jw%oIW?9gV6fjy-XD-J%Wp%tj;77rhCg*Jj9My%Nh4Fa3N6(RiMK%?RX?>S)v(Uzd!q|t|Y0xtW2;m+&}^UOdB6u z^Kt!L5QnI~c!4a`)@AjzFeU?wLxMe59$LXXY(IWc;-BJAJb|EeB2baO_6##)q|SbsDi2H|@*4 z;@{<9gg$Z$-AR^xLSa1w53{V+@9iF->`(ZJOZ-LazcWe8JnBT3OWBn%aaa4IPBV9#RNA zr?+Ihb3w5&#C&J_uEbqTsoncUx$pBxeR+>{ukj}Fx5K3bmF>Cnjwc{TlL~w=t;+1# zWDxC(cy*TOt=K{Nc5h-4kzK(k@CyYBP!XgbY21HZrTM({?LnPRiRwHsc#=P&W`J6? zN3r(&_RMLz>rMNlJR8t%^!jvgPO&yg^_dN?Zz;D_Xp9l$9Tub%gyfLq(G~g$9dfA8 zpOi&ppU#0EAA`_%O^Vt))IgyVLZq;Zl-yAp-y&?H=)7wjp7|!qqy1)}g>Fw)BN~ni zaBg1sP2zzDr69ZslqRS7ZoCOw@z`4Dm9j~ zG}f8|x3dZ9RLpAl{U+bsIpoP*5?Q&xzl?&nMJkW`d#GAkI2n9jr2$4Q9op za(iYPApQJ?#9D>g^jelq!}unyA`A=Mzipttm0opj;Z9%4L{rae23#B9V<8V1(M|4A z25LKjD_x*HjEG$}Uap^Ei{gjl{VCtY*If4X1Jun|(@xCDSqzt`0K1;L@s zo$M{);UJ-osUVSU-P<327(R`)FsesqJga1QYA^jZ#rgW?^ZbsYvv?PH#D4wnseZ*! zDXq&CWGFcc0cgH>pvD|b5zcF4-%L>VoE`l^z1-*7afWP>%zy;}4O(5Flkc<|;Y&5a zm#Qys{@PqM?sc>#*&oyH5}M}56IWq2onT8awC{cN==RhD{I$10vhU6eN8M-^)5WbQ zr?}C%ReL|6tw=v1bCj1i;~FbtvAo4{qXIJW0f@u@c4oe~4x-M`-6e3_t!$zg_ zsKB3@%-WIA)Mh?7CtH00l27%|?~j4^FCtk%ELpu8l^8|sf6&O@R<3lf8ua#X&%d+$ zphR~2c-&u4zkus(Ebd4NHp32-{NsH8$Wa9y9xfO`-yU;&FSEYU<*zvQ;|VSe>Y<| z9k>OpZ8o6y}p>)f{9-pryN~`WHLTkIUNI?kddK&noO2XRAQ(q2QBY5@mxO1#+CL z5XEJH4-_`T@S|BO$v&w>?bfZ|(hXobS;PYT!$@1$u*t*GhUa6@HYEFg*IuWj1a_vF z9u_!JUiAuGwZ3;83OJ6FAbvth-R06bZf9@)_!? z>!%r>A_#K(u39#0u;EV}oe z#*^6YoZl1FZ|^k)Slq6Kf4JpKL0jlH^yH$Vt}m~)ov+{RC; z>kUsYxnIX7IlCK(3g9)ZS#}vd=m4FE_P%l*H2=PX;OfE1dvKDMNwd%OzpNECEMzaj z7=e_5j48p@-xUjr5zb4isU|A^DJ4L@>q$`Qd$82!|6Q#BblVUy@aO1~PyD{xY?+;V zc^*2#gbv0vg0r9Y(N^1h!v ztp4anGV>O8(~LlbYQ4{dJYWBuLvL;n@xyD}?k3O=uuyLIc%r0apvv~^&#v}AJ9v>* zv9-R4kR#wuJP6_yU+)mecquN9tLs0k|NbMBG2UNpEAMrf?tfTh!C?O-1Nekg+DTPg%D$v`<@ zt-JoWzCLcS0T@>B>Q4PX?;Tqg87N1SyX3}6V~og=q*IwG%rW%K|DG52P?y8n z{2aCA^7T(R*lpT=`QcE~?BTI7{fpf_C3>tt*7U|x^n!JJjlU;nOX@0-S*Y#MvO;J@ zLOl*Y`pfX10NmCn{bQ9DbP)H62#5(pIO&fb)&k|G;;(EL{pZsEA97%STjWLQfAQ>} zX8)Gl6q!qRU4FC{Esh`T@XRezMQ08#t%)8kMF z^(kUa_@AoqpuPIfe=FBNFCA}&{KD&^`h4l|@i%h8<|;Pxgk>8Mlxf1_`Q)YedcM<8 z2*;_n58W)jdkpXdoBaMp3Bh}F?bJ$k0pQZF&s}7Vt+Q^agQ^}h-BIF8A>S1)0)!c~ z7u)*zWPp*SZr_Wo-_vctgyyzB3(devxaEW*q4F2>k9CO!43j3O>u)FpG$K+YZqN)x z!_)K>Wc17XNGy^Dw5`(X+eaM@P8b{SAO)XpV5frZ3g()RoI-=y7aCwXZwMOQAe*hY zK!j;(^<_LQciyOweht*FTtAX+S^GT^_(BL1taXmYK^m0AFby2y@b%f}u7^yX-WL7e z`}8PPXY4o|<126u%16n!wNOc^+S7$LpyuM>2ajMYkIv2_;JPQAeEi_KbWY{$4ww7q zf#&{vuO9BTYHy{+MT+q`(w^M9*woM_4KOkuBYsp3zB_4-yN)eLBuRU^+>vWtdjDwh z-}WA)x%>WFY*E8U`hoWJ*2Pz#O&Xgj?iH8sCh|1?)>64*1StR4p1*H#O-FNI*bZq= zk)gV?*XqM{-a4m$_vPaZtvx$8{yg!5}3fRz3PLL7-=pt0kZphUl0{Q#4c<{=Z)rm?fsg& z?{>H6!~b_*NBx54^zekVE;@nB`};?>4BW^Z4Jw)=?HTHkTsxw<>Wzdb+H>|`^+GvU zWZs@QYy8@)f;~`=+|duMmfu{YyQ_SG2hu$DhrR=HmBO#f^Zq z@r7qY+EXh-#x>gX32oW0y}cf*XzpX)-e0%<^#g39FBN)PucNuO{@NfiKPV>!Qn(FF z8uA>!_SBnEAs}RED7c1*9cgcG0m(v|dwst*R44xKOMT*6@#;<8`e)O>=JvPGCPC#c znR)2nl5}Eq- z1EO>nMXt`5m+{yVn;|y6MRPmZ*@*jB`o;%Idk5$4?9k?IF#p&frpVU9ytHED%O(Gr zs~05!OBW58iYv@ZRzKf;FsTFrvg+ri2oQKy@>2)oNFY9q&F|@4(s;${~QD*xmN_*8(u|a6pNtI6jXK$~ojY6b{_u8%6IUBt&Asx&O`! zxV;d60lXAEYhmai75fcJ3i14{(Yfsw%lBU^q>051`#D-g6Sv(HoeH@gbhtIQuyjCJ_;wBkEWtPpYx!AhPov1*Xg5i1n8;2!?-v}7kGR!GAN zx&K&UL97sWX`VtEGn_pCJtF8oJL~|W`!~N#gIqI^Au+blF#OY|<)*|E=`k?5|5#!{ zEV0ne;h)tImUxQQZ}4b_HePcUVE;RHLLn%cYP8+(*nwoqr(`of z_Z!>_S=nV+neUj#iq$ ztC5i&(!0f7!|$zYJv%bo!9J7ai&&!7zKGSCWAh!vYZ&$cEr&sZ3704qXv#2jvt^2?A zN~Mli!u^10PoFD$X2}+@$WZGZE|JbmANJ-xvN z_4(A0UPhtBakPS@lurIrZ(p@k@NHU~F{Iw{qsXT0-wpR|<%Dw*)e+Eyl6H;nvhUZk z6FnJM4O;LRX(w{1MSQKfOLGC0r_`UFxi<>siwdj{BY+hQnGR6f>qe@)q1jM*q`nyW zRb!_QHEUn*oOqDG7e@NVdzI(nGt`bT{N?3cPf-4j!sNb3-%a{-Qn{_>?Zw+MZ z`X}b!W~iIPqr}k}y25Y((=~Pd2gg~R^!X^RYv0EcE!CDVzlJC;!{Yn^oUsUx<9962 zg5J}GyY)Bz_5;lI|QU800Tm&Sp9BFX1%l3TZ}m;>JKM)Qy5GfmxzCSk%oD1Y|} z0ENn9vL73YZCqrc9?au?Rw@4{uEg}uCjV>iS2x;&^n4(sUCPb6OoIhfpDT-O6n&W! zfNRWk_hL%^5{?@$^{>B~)9>&k`j+c=?8JMZA*n%%x{uwz>s*|XwN&(HIZN$-p2`Ql zgmyoKdzI{WWX(|(MC7Wo@fh`56aKB&uKtBLgut19`2O>T!tEQopKX}c7B}*DPe61| zm9f&_K}YZ=o&*-8s~`4yD=1>l=D{ZGPM;Y3SKd8_!=5sNhO!I}D~loGZZ~H8uYIKU z?(!{M&hDdN%2!l*#Ao&`n~Wp=it6`n|VU+Wi3vP{_3Sx5GDp(@gdlJx~#^2s4p<<2Pur!R!p#mDR*Yrtv4!ZGh-W5 zzg@#WLuD7NABgm?rlN;MrH9| zS9|4o9u2&;f;_JNgkSXGEwTBV&#rtzmb_AsJCe2ZsK#!|AK`0^?i|jk$Q!TI+&qe< zxrwMX`FTU_;SJIXQ~e4d8a^;(#6T-7=Ytce?=}$iGOke)M<~^t)vwcr3M7|)cIkku zGHaOIoKoxd-J@vq^HQuXgt`PphILGe?(x3(UEQpFp7zcAzO%azeXSw&8T?r*;GaPL z-9KbN-nbFXo^yg7z=y*$830BfhB0K-3)eJA4B!X0{O||=Aeid7kwzvqE?V_>mhAVO z40w`#F!A`rfq1P5!4;=rtorg zG>zNmnW9$BNjD`)a2qXW$N+D=%Jt$x2`NvuE4(Iw`bBAUh!fVPdvDKq!v=RIQykyL9`A;7HRMB(r_e$F<1uWV@NzHYbsj$4 z9l8$NiDh5VImhPNXcF@sAby08x7(Xl14%itujsxT{++78VE9$%$2@{!X4q{zwV-L` z8ZU+72481<=33HTGYP7Cb{?)~B<$Ey`%3TW@`WlW>47E(sK5B2dlTg!b`P)ELjILH zv&=OZ_d5ZE`uKY8^J<dVvyn@jGjtiB|yK#%$VL@Rvwy=XEZ-kM^Z< zHC@YQnJ=J3xqP?3-WzH}eB#%oV^Y>!_Fk!-2~1Q8*TOAN2>dw419fNlr87aC>!=lL z@`I3tr6zcYHzfNFkWraZ{PFsBt=cF_iwz={*d<*pj+a)tji75jMSE}|V4}jBj+G}n zoI_ul_LWD82Ie_(oqsKrw5K!B^Iz`XqD~q;X6CJ!HbuDtaBsXblbL@aN6bkmAQ}>q zGJR#fTsQSW^seNl@Jw9eBdC?i`vo14ZLVft4UMxm;(_jKFFd&Yr@3!quz2xN~-z z+_Z{}c)>Sgm2tN)zXzJ|5y*b{{jx~%V{pZ_Sf;{{-SdddIL7mylG6^IJ`&$>j9j_1 z`*jqed4t2ILb-W2xyd2{fp+HG1~*}J-<$B@sFfFQL49Kq)Y)kzU+yk1rjZ7qqBODu zK{zMBK|~I`Gi$e-hk2gFl;iLUeAe(fr`3ivuOAVK%Vyw&`)f?DqKI>){Dx|r7k|a~ zIM{0f5(4xi0`c|N_3&xZLdWyBg0_6w;9Gt~HApg?$@Mam+1ioo~3^Dv8rt z*}zi|vYJXWhJxR7&~p*u6RT~g2QhRLQMK)7A9+@FOu^e(PZRW&B%u9vyx~{RYV@$o zDkEl(l0=6yR*@tCO@IR@{#*}E+TX>C+NxmEZUNwk9-)onK780)rhh~-&;R&K-bP-# zugtS|?mmAJVLKm3^LQcN+}>!_<=$LB z97px>%Wi_*ydoW)gF>S1A5Gf3LW@nv@Zz$*9&VP+?f0w(2{rBB5)ctB*&rEj9uo}+ z=(>fp13C2S@~i;IIyV3zHQGz;$Gkn zS(9(-HERU6c{k!S;^~I{&K8?VQrLyApJh915|(4L;}%pWotC9l@-5co0|GP1BwI`mj3-UaoT4->Y2rFUJqd z=B)IQtmedX=3uj(8s&VGb^{b}_VXxTg-`hAfvh6Pois=voJh(cb7K{;#=%SI@W#q zlXdVy;Yt`Vu(BI>pMD7w1Ljxpdh1SDBY`JSFRCO!%f?VOQ7KNNID-3}bw7z6;JX%{ zn;@8h53j(hUwm6N2wkqVB1A}wcSPCXH+`4(a@{toF$bH=$6d?tvkREW_kp1#;;#zFD;blL!+uC?MIJX|D%=GgJaq431Oe;P zEMElzy_gG)-^P&8@qH!?IKAD*mj#T}JL20QMUGyBn&C8X9i~w-vr;FvSWJ~oAfX(P zcQ4uNyCmTVR^2zBgkMKHz7uBpB~qYi%%c2ns9+SLPjSAVr!d`1LgnCx`Dh z+tmXF6SJ9^)An$Wf4&*zCQcT0LJwY};*otk*$=S>8;@6B4tZ@bcF7M)v!MpMAt$}p zt{zf0QNh=Z&b3;QM4s}v4`!fxr*rb&|M6&X;LML+I@Bc(~OyG>Dz} zc^xv}IgQRG)Mejj>Jc0Jzk8Y@wN z6i{eFRI5m(tAIOHxIrk0|AG56nDLV55GaK@QB{r3QOiQRBxdo7u&@}7!n`}0>%lV= zT{S*!Q2m#%8FK%MG@jJd@If5fc-uU*P}7Ri9#fgAw{z&IgAd?;cRX@paYn3ze`n;3 zuq<4d3gAdUdPdA>=w64CL%oI5Q_9WkcuFh3KMCgn>IeX~)0d@>Xs~<>MNqeDsBXUU zh*sYAyNK9Fe^n!Ix^7D0EN)`r8sg^Ir~iCG$z86}_Q2b)S#mY+Vs3xd_VTT4>xIqI zfh&xd6JFzfuqol=7s=&q1aDf1Z;iPVpKD1|v5wPb(PM$R9+Xoi*kl!Z)AfxzA59#W zK#F_;MYd}uul#5>t%hn@apyF$v!us^7Y{hymM`qCTkbq*O}T5yx`D?YC3LOd#BRs3 zRVl>D4(iAds)1_NH!I$|+y?=gK^(*4wajy8WmA}J>=MNS_);!drllVrql~Nq+P^8pl?R6pf-Rp38FZ$XS$3 zx1JHMiAx_=inr6emtUo};58|Z4^6r&qH)mC)ae+i{M5v&!81N_kX8|VHJGMk&VOY3 z7+$yagFgqGF*s;p@ncl7&%sUK)8|JFXI+`|)~h#f;uBWhVe^bO!mZ?_?^?3FD>mYW zxc6C>n<8I8RBmbGh!($SU%3*&%dhG=I91*6FISIO^>ADvJbKnYYJ1qL{j=P&*OT|X zvU)Z#{DMe`-MuasmN-t`VC$@uiuh_Ha-|>_{5uP}FtbRRrH(}iA-cgHqQxrc0lJY) zv2;QuJ$4_%RG*El1aVW}o_06pm#}-eymj*9>G(9hX@|`s54bP^fjF|!4si>x;pbng z39PMGK4#E;B`P=PG3z~37G4lh4Ffwdezk3Xk_ECr#hW9IVDaMFUABE#H<>;4hYjMT z){ySSc7GnN16*x-W&M-zl~*DWt$?p6=cnTJ7)~{N{BA%i1%9D4y7kaX3BvpYl_33g z;%js5Cl|nHa8wFcY!{2)=%?;oa~zKu{CPb0kIkB#6Mm}?+l#I2$=pDGV7^}Z zq&?b?lpJD|#ZcXHzP!Uj7Bi1{lSgQ=Q1^q#9P;8ye0NUdH)I))enJ>*>~Xp9piXsE zFc(dUf|qZDO8%}oLhUo~eMz+RblGECNYd=>=gX<2tDV57kcFA@q<1!7;P9ke$%Tfi zaWEtcH5E+z1A8uok{u~cTwa9vMlsXa`?Nx4ZQqbj`xf+ud9ir6POFfBv#MNX(@N-O zJs0UN6RxX&I6{$|1ItH%YUuYic!85?&!fCC7cO&8R%c>Vr;{e`l=2wH8oXsS`Qp2- z#oXU7+DBX1cP5Nk*PwP^8xM`c`* zv3^sZs=eKUCx6`uD}w(q-is2{v)y6?8{1hF&Qx;Z$T4o56J;tW5=yCo&|5~D#%QGQc^EXe)_Md{Nj87mxK_(~_G;z-I zns)om3o@Uc)+Qd_pKEbcU*s+L=gp2S7Ah3R8nSG>0;%b$A@+ouZ~Vml&fcluxZZ0# zA7M%|;}SbV-xN2C_EXud8LAtZ?VGw}V`X&H;`hp0ZK0mB^C80cAm`HK}*x7|24&yw(Tb-Lrz_L@!+O%n2Xo*;4X2Sl^-5f`!-G)W<=W#1RbV(^}Jl zE;dHsskHapiTu^7Nqo^LCz~CThvCO_?0B@VtF4Q_hj+3+K>3d=WyEcsrPc>ci6Qol*#(hg}9`*sk#KsG3_C(J-4K7Sx_)=6b~xF2Pw2A9ySf-8uIQDU`7 zi2-5XEP>MU0ds;p`9!~i3PWH+E;Qka84?OaNk$f(A zV`7jkykdn%@MO;PvV9S|!@unkm?N{UJiU`8WW|>9Y16SFrVP}ErTj`-THP_*9)>=K zdf6f9``7D@Pj1ZQ&CO8;EUyhJ}EJDJwO7DYNoT#dZd%QsM8S}-mW1|E(SFt z+R7TB)+^~#vuSSgEq~Ju^?`vP+APoW=hJ2u^#K#6?ReMELiBBu&q&px+vFhNN z10;?!^NAMjQQg$`FQY}i2x3#8OTI&~cB`Yl*QN0kjEKjt72K_ZWW8#rT?J=oD6Cha zX${E*<4V;-L?0rG)h=YgY7{ThCVds?@9qQ&S!>~H#=Qh@f5C*gF{0sln?gWOpe1!; zj$lfoEe#C&v=m8-m=ra&j8p30)`Bi~=Ak0Wmz3BYnI+UqY9Pb)h8(+x zAuscO&71v9Im=CXOgp&XY@(YW^w|G4Z1}s<+ z`Y=A#d!eY4?>0aI5k}33%<^>ud0Z*XQ-=Xo7Vb%&^QO$n*4mZlu>#ALdIRD`tx|0t z)}W3%Yy{?r3@dqh#-nFWB!6XS*qwHY7c<$~|AfxDvR+NaO2FAdS~i}ij2OmP!laF_ zHq@_U>D?#c%3%G+rTY6gYwRmTUK+{LDmdAJ3?{>E=0e<3`|;*{B0gOCn5W2=8Rd55 zCFwifabr3>P&Iv6-pJ%UP#PLW8@!`?fZ^g>Mz$2E7U(O`4HlcH!W$)N0EodA+Dh%y zhV-iLpQ+8^;DI@xyyX$Um!5_?3m<5$@dqc#1-JQ7k@cq|gJ&OX#@;9g>GKNUH7ICO zsclf|j>mc6(*bde?XAhN5q%kU*D*tSE*1)JYTX4)?cu%W4?&FozHQ{pKY!Rjh9tGq zUcDaSXJrN~t)VNuuEQx)I)ei_be>RL(P2{G3t<12WO2>vNZs#7vtDWP?_S9Cr& zvBvX4FqMah|E}L;j*I|ZKC}G!JmAqP&p(lFbr1BJ3|-)Twf%>R(Osj`&P;!Bblyj- zhh!^N)biqc^viSq1pMcR=phj_Al%y5{?f^yd;X!QQ0@WvoeP44iyZ&W!GYKUh{U_z z9iH+(T$*zyNK5^&RrkvCZ+!Kp69nlVxj)Bi|3>1`!9O^tV^yPrKLnoa7!EK8{M}d1 z1Q?7sEP?=#AV8!NYN>}Kfph($l}IS=wf=C?AJ{l0ZYZf?At##qQZ&mIi)=4(0Y8YQ z#1!vy55-go+BUyWEAmao$pQ+<>RmrwN>#1M)E9dWd`F=3{47oep@aonx?s}w`eMg! zvzQB#Nm_j4lXH*d2OroRo2hJ{$3dBc7d(!03fl1#kBv+@rAy3yI*l77lP)4iB-X!z z=a0zIhS@NS>#EjIj5{o>GU2Ho7f^G$dOZ5SjaLup0d0rPR&5k z5S`+g6$5ujG)Sy&YG7sp9u#L<}M zZ?a^UhID6aR^vuGLhFc1*_Vh$ATbe#9=Y(%R1fQR0Lj8rGO$&DKq71_okVTTMFgP@ zyR-TH42CunF10ho76dql-CVB&BKSZ%4176kz9hu73Q#2441Ke*D|wdlyj-*k;`eo} zSEZV4`^*xzpJ||_5?mquhP_c|Y4@oLb}!L+h8-TC%0`;sTZJufF4@m)-aHkR^ojY0S&2lfagT?4@?372@DQ?w z;>_|~f)cK{=i9kqQ!?+M*e3HO<6C=A?ndM1T-cAO-;>!;KsGO!4#*jRC??cqc`l>FAuP~BxKTEPJ<=L2kD5) zQd@-(R<^f4XDeBJ8SsY+On~$;%Js?@wQ#34+2vNi5DWnEpr|A1i9m8EnY3U*FeqJ$ zPFXyUd6gwx$1bxS+2zHte>3^KVnlw0aK;o1#mZEqV}+6FGL*w^)|LD=Id3(rgf!|}))bUk-(+J7Zq9Q}-uYHPAA zZw^qILre6DqEFSA`;@0~r!>N~gDS{l8L!=)$IIlHhM81?Aof{4p~U>0CE^Rug)vxi z$F9v9gxQoqUuDRzV1{E@lY9jKXlXliEz%u3j48iamC7M21hE|<`QCz~aVojJD))qW z%)r?>3<~<7u%I_AqyBVz-WnT9oMnw_{&_4Y(w*n!RTt0aY)(bC4$(rc)pV~6XT@W9 zfB8sTHr*Qv4npHe0v_Ves2#bpm6GEFLgVb4^bZo=^E+pzn)A!&?R>b^pQFZ<=c0HD zUTsU}Illw=LQ&aFllPmrBL!Mw(r3xNj;yd;fvOS{(2?$Nlyhzc)~extZ|W z#5dpL4r{GevcIV9yo&-<@(*`mK08a_G2yl5Y#I0*3#D}+i%hTmLq5>5%-QRS9FjJd z@28U%KA*@Ci6zHKo&( zg!j8Un-7k?U#InLgO9Ja5D`rVP&zZ-Al@dyzMqkX(5pN+-L}M1r?{Eg2&Qak-BsBb!-QX=63={jz2e z*wzQUedypW7-Z{EaHhZSs4IDgq&2m$Qm3HSkWMi?u84I>v&vjtBT!k!3@)A{eD zl2~;4c@1~bdZq6-|D0IpN2$442&vT^h8G}_cIExg)??n75v(M-iqIKA~ z{As-r2(NJk-L5q*0}cu_e>ysk;PfUjU!$|{JS4i!uP03Y6j*Q3&@$+CW|xU-Q3VAh z@Gm^oqibX`xGX6WwizlUBu^4l$MkDZ?e;Wv<%!c0uRqTx(n_Lo1Vr1)7RVk6v$Vmh z_@K*4k*9;bhpZaknZK0BH-ER7{>pf*(E7O-zu!K{9B*Y1@skj3HmwoRI8}3WNv6PZ z0UwA)abI_lAxfwpP5 zAj;Y{k05g@RXJA8hUXzuwk|Vuv&c#NPP~#|t%Pw|*nKmpC!`tjkf$%t`bgL5%XY=D zJCgUFaBeyP#DRQ>z>uzx%^|mwLrur_W4&PA-h3Ag85Zy8K*OEsE>BCLPaVa?7a&Jy zf?4hR25dcQyvI5^SYh$;>11xmAn}hacz~N{)KhrwM%UEhN90j@F|~(TTbZOsCzMSl z?YARH;NA$&+vE{}>k|tHUJ`tM@k8O9`aBa_gQ~lsV;7!6+vzU8jpzHEj;Gyw*^0OE zrAZ{BAKu7Rbm%Y^N{F<1-?4U!X>gcVOC58k6B4ik+UIEkU9bOYw_v6GqAqDUB@E7K z$!MUy$$aV2&%6$3iV4t>M=G2q>M5zF^rI?csZ*2DY()05=X_rdV~f?`IlKg>BhDjd&|K(R^pTPa9Q~O6mxVK8oRy)zhp1ag6>E1-z!*2Q^ej8)<$IJwc0oKzN#97Jjm+)* zDr>*0OYCK*yYHV2hN^-(fO(^b^TG>d^33??fS9T<%7+WI6HE~W0R8mXNZq%s@n{Yr-;5N|x6UFlI za)yr~MC_iNw5)JV``3YKv$~ev8;mm8N7W<6LajM{4nEtDYYVbS!14l#7eUV3rQGv@ za+ipb*$6q#Qr*Jh!iy$^hp~9^AVi+o*@h}17&h5aRPP8VJuLp(onN;9vQu!*YMZ%Y z{h-~i%PN$wBMzmA@hZM(zv&e)!-gZmBev+OV@BziqW+011dI7BOs!Gr<`T)_j2NU?~4jht~V5TXcM#b;W z!QR0R`Gcx)=xj`5_BdR|r}000y^^?-?#o+@8 z^6Jpp>%0VO_dN~yl5LoN_Rh-tdbAq%7Q?aUxC4PvBc^fu7?P5iWTS3vI4KH-$_QRt z>={a-+n+@vd-+~Zc3uCf&Lc|h>}dU>^p8HiB(Forp&XjIrODC|L86oi;P8| zHTA5;n|ix2PwT+(!yBJ`&TBN`DF^)h>CkeG0Q? zSRPO7?t6ahx*EyuID;UMr~xLKlmi4AGVL6D^gJr2k}F#zTXZzjZ+A121ZZhc4rP!% zfI)HguCCuR;yR7!it@R|z{v+_m9hL1;u9t z#u?-Dc$*+}%c7YJ0a~5O3B`6h`DmyBu^nz6xZFtx?AU}R{3UNrgDW9HBRn8w0vT)| zTTYlPINkptm-;ULCc>Wbs5}muMT7}&gpH?hGt*Q z#BH7iIngN_nVF4kRjVM76z=AexVh=mtC;QD(RP440phobc=g@yXjcm}v)r5XaEF(u z(F!1G@zz6r%IQ(#N)n$qtK*k?zF1nRAM&C9CIY~A0wiV*3%Ev`^tdiSyKJVfU%%J& z^%U6?c(imc%5-=2v(px+4m5a_kzNsdvNxy2a;G1_aGFwvn$abU`U$(g!jK1EQdx%^ zF?jGMDwn7h!;)DAQ8cUhh~vpaq7C(cKBE z>!{Pp;3kC?j~AI#5F27&Gj-PsIlIGf z7^!_|Cy=X-N{+@!b|?E!?{fO;EZkGOVO!)ZeGOC*q@Tn}QG&%$paG~YquSdWbJ>1o zwMdU|Cj)7pZQAhx&b2vyo#i87yU>LV1`MLTqd9oJrO{|e5^=L_4$gaN$tO6Ly_dr) z&%xHmOd2-JX-{mI9EX%-j_LI+jJgYeN?li$w@ujzkHo?$V7uknM^RiNi=jp|d(dB? zFe3%VFDnHNhXK9@qwdGuK~jTi5-fz@Y`AvO48sIhH)3AL(xorQ_5@!z3)QLJ(>{A8 zep+aDQi$((-UWl9nE~v0Ka&JJSzt^Sl^>R)-E{5fcN!kV!+i_=ZYCKtKDM&-WPMsC zPcscNp(2~SId~{b2M2y7-Qb02db1wpZbOGdZGjy8@LlDIFFrbmRtjuC_=_=7x~DIX zXt@Tkt#m(?7oP3}ayIgx7GTio~ZQ35Nx>scr347 z(#0Cs02t$jjWSj&7#K(zS3c#^wgbZ*f-!IoAKaF|-vh?1zt2k05pcarJ=Fndsl5G{ z`3B=iw+4u8?g2oo{hqj7LWq^d0*-zx&`}+Om-%3{mJfo|;3|up*c^%b1+2(@xGRCk zv2PCH{dj*X2q_Lt2u;{^?1UJ2c;zHtp;xEkoIE9RsD1%uVo z4*D5=5b_W(6!Xoom9`E3GDeh%f4@1Ptq;D94V}_iPacAX+ks>s75aRJiNu z#oOX5%jW&KCSHY`3J02WPeQlaMe&7F^H{kJJR#~~XdpkusFtK&1~Gad%;cLxc1lZT zW3n@Pt3Ue^mIf%^r;MvqMlxn2o{9BN#AcGlc2HiU?=n=Bz9lO;9D>=&>Z!RO8(T;v z^a^`j>1fOg*yMBd`V{6z>oRYq&y3Y-CEtqYvb)`mY0Zc0E=f#_Owed23)cvU(UH&8 zMbw^(WP_#_V9iHySP!;9SI1D8M#^FYF>GG*CGoy1$W^i0jkm2i+Aoc@Y7gunT-4h8 ztX-@Xei{8>72WLZr0B_u-@h&OYl;9$=w_&xVmfU)TtoNLeo$vZcpg9{S?od|M8DXC zvW!i1Uq|hu?~Us9U4dR66zxPE*AI+haI-OHV4^2GA5p%$M`xsvV7tp_0R7(IfNl9y zkL6SR+B)iq7yTTK2UDkN^DN6=F`U;JkGQ8vwE>yt+=GAb=X(@&*m{@Kt{fZd<3Fi|w`Yck0hv>g{3ZsB;@Sh#kmVKQlu&fymGRq`lw2;on^a zjL}yEy1QP#<6*o8BCP|6rhhaHz!JYE<~Muk;BKb_^q$Q8yt-G;pbuWlt5K>ddv@N) z5)w^QgBy4xJ!Nu9x7fvVD-B23BXmJyeD-rsq_bY9wcTq$w6Zro`-9YhtRz-r)N?dC zdCsWa&PTAW=;LLZFl~56?#RI$$|G=E3@V-T-fe*99gI6`r&ZrQ0)fWg_X;T{+i>5G z>!*^==-v73TIzNh)`_+&=T+_Np|shr*$j=4H$HgYq`ugoOd+7{gOP}K3{iZ+_<=BW zBEY|i{hX_bJ1Ps0h^X zT5ysh-(${dNKI5fh^KVV(dkgYVtmQG0n}=o?{!RyK1f-zI4!5r%(U!1(K)`dc`N=% z=>2BjG zhvfGgpBS2HxW?U=!|puRU)PBIh`Jzp;eGXe!8(b~b-$j$by=lb&0l+PPl*0q$u(eb z(Y{@WlOONbBZg1N=#A`cr!eGSoVTFC_%us*aFyUvUDT7y74xG@>DQ<0hPX+OuQ{f~ z?ukow2+ZA9`^X~`FCr$=!s1A z$Vj1GVzIWu*Y{pMdY5bi;<9~!Kou2#qG2Bo)ikZGbFT&S4wpl7q(N{Q;@5KQ z>GXc5ba(1K5+EP#m76-(U#A2$U10=DL784pX}X78lj{QF(+O4f`B{N|%>S_=Cj+25 z`qKK5EW{2Ugr&_bQokd?E#%@GXnKLW zx5;j=5-3wY7UquRw){%=%bd^Axcr={j7de0L`oRqE1$^tXq1Iy7SZCLn4u&d(F?Rs zC}5p>%>a55{>Zzt&8=EcC82?lRX=~_D9>s$fTCg5477~>{{<9gI6fSJlo!Z*kbkA$ zX#7Kdf^Omd?|=IPJakqBw?wYmIB9Vjv>PUP&lec;e=}w@%l*c&U*GhD8BK{WPPl_# zF{35=HwpC^a`ewpqj5vz;NJ%_8sAL5{1?b*v{#p90=-R|G|6jS|IV?PDt-g>c8_Jf z_eG8aG=A**wt6OLlC6GO_B6}$zc_>})vd&5qNMD}6Oze8$s$*@^uY1-f z7hr#r?|ijEjbm*!lyMT{06_vztFpgjGkKj}{QUlXk>A(+-}}gEiv50Qe|q5G2VNk5 zB65ty62A}q`m~w9>%;z*(!ct!zXjo654$SR#Q;YZ-Nv8C_{WFM(0>AY|A&6w(9VCc zx&N@ElQLemZS}&vg8sMbvIraMm;L?cI}e+;eY-XMZaOCSklKA$mS-$seAAqI@Du(QzZ)yGC0q_6IQ{w*0uKYRHCV9}K8~^M3 zzc1;p>1B5HZxrHeoa#UO%PPfWR#}OVY5$j3>+3H5+V%hC zn$e5z>;M0{GYI*X{lnt$AAjb5jl2QA&Hv!dic2R~|F6yZzhSfTq(hLb z=FeuOe|GQB-ufT5SsCG5xc_N3D^etX*sQR&i+|&k0)Y(RtqFCmZU&%E!& zZz8G@Cbma^1EGTgx?vz{?S#>cS7nvmm$c? zA7$uEfbf5!JO5>|evP(%|Cxj>zeftcM|%sA`e%_La;N0^{~|-1m4^dM9fj+ESjhnT zSC*44t01h-kbGpQLxP0IS03^Ayy4&1v6245JTZU8rTK}0e|2eo5|Muwe}?(?v3?bZ zEm0o~1vU!hXrOPBvM`83S)1(z{}?Nf7#~ph`==8YVk7=$?hMU;YvLb6^w*??A%nPN zo&Ryf1y@5Z=ofLZHp{~vlJQR{4}ViXW$_!5h=2O?Ur;6s?ze9t`p1!vZ};jK`8e*E zFfjjg((yNqnHK`}+qd`kAL)yKJnQl;pTCI1AERaWuO(T$f4U^g*Vg_UC0RI*{X<2Y zrkLMI7)iF`FQLv4S@N$sg!wyXSxA?_UznMHxv3~XeN{8b+Wg05jIV&^uSYZdPub;1 zlhC~Mdrs|Fx%dthewB;AudDjUjP1XCkN+JKA#sb}C|>&0pRdjLpUB5QqWAH~NvsCNHZ^c3QS>LYK|MXPr-`#Iz#6Ok>f3(f7&dJ}~Y5j4I{w&xb z%J&xu=sJCigrO&ejO(0V z)P8*0VExJ*u{BX%^pKD767678QX$TBQ%$eOzi4sF^hbykiZ04ANT; z{F+k@ZUxJEmx~Skkgu-t%0TKHxy#cT(6FqJ5y0?XB?sQcFbW7gQ?$Nn;QWk)JbD2| zWqlc$W%dAbm1a!)3r*|oGg%xrWqxe@c3)N53zMDN$Ow=$%{)snXJ%S{7+CMb*iw{> ziy>!&WSNs$K>T%Brxc4|J+djiS_3g6ygj|b3B7K|W;S@DZuO#RZ@{SD+ArekXBTFhj=S*6j+%e(BV#c# zeXT6#EUv&R1-#dVjH&MBT4*fQvCp`?<}MBX`(<`JV|P>Tg9vP-O@ZZR6IL|%%}6Fm z)`lUOH@%@&f#Y%RP~vCI08dHO839 zFPOhrF4v#q_@P;1oa6W7ip($7Uw)UPub1{9JtGjkqVmjFC+ECaCTy(ux5#JNQcMYK z?ehupmFSq!wO_nsv$>u_FSD5Aidw|kYqvbT7E!I-mS<&It)l0~HM;6Fuc@7)?9|ms z_CWTX$(+l1maMkj|}1Voz;Bn+iR@>}zUh*WJR}NzPQaEC#mggo)yglj*Oy z#ucv#DqWg5(HW!xs&acXmKvX4rn1UgYLm{BWEwy7u)v$6z?Q-l(H;pmSzF#nM}IhF zt1A-+_wn~hfR!64!)11CJR>;rVVd6C!PO3$EL491hqC4F6q9eqA{NaDzTTn z!s&p-{XKp3SlU~)*hRzobojY1n=tV`>$5jKKeOu4BbknAqt($=cGvEEVj{B%qBPDK zyx#;t35R9*L8cVfax`4+FNSd7#MtDR-Jd=AXUg?_!|CSLuu*sGpNYxF#z52Ct~&4i zGvPFWWpIIYx!dFQ=f3uL9gk$`*>P}yi?Ku89 zUAEcWpAT?pV%c$DdxDgWa~}LN*={(z%_cH^`B)BatKf9xzBFY|j)7UQ`*6NsD)Y2` zY08qzTpdg(KU37s7OdIa-o6_C&s3v_(?$O$PvQ|Am`e_P4#>ktrWH`=-i{uK`TQk? zWg=r{8t`Io$4jm~z{U9`)9WoZ*YdOww|Ve0o3=XvJzYM5!bAU=MOy*a2*^90_U8OQ z?bVCyD^Hz{tM^$L#;|z48mCuKtJCJyu}mB_+RO2w9-0;aUfK^v`lZ>PRbg>dJ+M@; zyXVCB^O2d(mAfuFg?j;^+pIAq|HY9G%rIApEU1$1xj$L@v>dL&j(faa7u<$#*3-_} z_r6e<^;7M=sZZ77>Q3|d^(VaoATjYI9^V96W4-=iAm3&*k+^m{xbvT+P$b91tJmE= zs1T4spc{RpFr5B0kDNeWm%w3SjNPB4kS0>tg3*2c*x1a8DmOUy8E+0MEga8+t|bi+jll?zQL`ew`|n;Rdlj!m72kvk_L3 zc?P~sf%(txmFo!dlyX1i%W7XXBb*PdF(6HMe}j~9WmvkP-KSLc9`lgMeLC!Uqqhz0p=W1CHS_#w_$h4{@W`b_5m$%a_$?y z=U?yp_t)j5>&!Es$Gz*5kzZLDl4?DLt$mpN^2FS)aumfEK0Sr>EWLPaJf0)2&(~gB zfA)=j047ZVI=139O$7!iFlN5*498EUgZ*4pc_Afy6%@L+{UWw6qbqd~EYMKGU2-#R2tYnHw#U-xxEV7$F*R(094Lz0yEo z64cQR(4F`={%%%n!J1LViM!?d!boqA53dKVsh_Wry9p^H`K&opPLwb21WJ);JnV<< z-H3L|OLd}PHF-Q}e^+|@NDDnymjOBnj9a) zA^;a%_wLf8uxjY!ytcuE{La;9`@0_6`0}-_1J42d_`u!PehkbKF*d{Q43;8ncJN1^ zdjk9`Z|T)y>jSz2Mrg?(f>1$s^XLq}~-dh9n-}2qK z^sO-fK6fdyuKQ*VAQfPuP0pnJ<=QQ*0KDsD=QJBUiKgP34cqfp@u~}!-*3I&Q3yh! zOGSHFhJpFxsZ4}3BimiK3bA+igMsYn?A?yqqgxfK@T0Fk&|eCmlI-34#~Zq?UtdTh zw>56$%PyUa6TA>;`fT|p1cS)FHmgdIfl3)W3lN-79#fsH%!9)YW*K$nD-N1PL$Xg^ z=+)2pPx!v&{W>1ckAz8c_hu2F*)~kFj`+zfUYiA>u(0>TvV`iD{blr{=fM%Or`7Xf zt$vt}Bn$9t%$4P0(Rpt2^mgE>_QcW#= z2VfOm{^7QN!mB{2e^fJkl~yUz2!_E@Z|H?i9Go>ZkS9ePGNiT5JYJKIJ9sl({X2f$@p>Gy$(4O1rBNr zT6|wn0tVf*yHQdL1JKfC6aoUVBwMC2jbMU5xL`(!#0=xmHtQA-^BP?n!2$11Fhv2g=kjx&RSErB zmC?>4a|Y`QPViC_^0*q3u%LG@iH%=;81X()d8x3hclg($O0yzAT=UM&fG$$rPV2cJ z%11JmcQFJ&2+`ox?xmbkU3n7(M0v-gE(1XI7})rW4`>fg5V^kRR*nV^>Qi-QU+ z?9PvGu{%57VXZ7djwXV_;2Q6>v$CIPGjST;$>;O4EhzRT0u^K9OCgC~y5#GAsw z+g{heCKtE}Jc31<;*eHxptU|oc{*Rt+Wdjy~H*2C8eN>j`4JqQ>+@uPJDb==XrS(hDK_=yR8*#$90>uD<8 znve@-xnIwOmP@;{4QS&A-iz&3@uS~X8mMI-&&m>4-J9o%B!pj~(H=}2 z&ff}acHg2zqnU3v4miiHiTWz;6_xh@gtKd77>HL;-6L>^U+|Mit4E*#*OU6DVkMZ< zavU!A>2V=Ybe|l6OL8&!rDg(FAGcNtUV5ly`8p@(^q|y8Lq6=Hj4!o*0>!p+V|Bh2 z!fUDu7V-`LcERnTG=2~_E2WhE3I@WHz$rBw94`UM*WIq=-rdS^|CNo{->l_{JJFZ2 z%0X_eYW*kc0aWFiw53Cb#}l_zH?+hO0*{{Ze$_%0RkdAfKIfeiMz-|`M?B`n*-ySWc`AmAGMku)SK3*gSYgcu|X*O^4~P`PO`ZK zcV%^AuW}b0$Vv(|{>RnFtA5vFo!`#ebO~OwXb^RGWq{DplDX+RTZQG-r;WNSE08b( zUogb@^=4!~57g&fR`mJpI7-V$dlu(p>=WBNAAx#@-MP;K67A5z!?%$`kL8Tr&Yj>6 z3j~`ob&l))?2ppZz8}FZj+@Zx_g#2!)Xa-_rvNDR#oMVVpKdNWXh2$U;Hg9fsdE$G zmF*^z_jA1&+F|I$fJ2*(2A}mB4FOPN^=J|oYX;mKLY8-aB#MYDm#=(ME$=P9y4IZi zv_YgNyMr{p&lWyS&QSCGJ)sSI2`FE_1DK=V9?9`ET(X;PeA$~$dY|6c^;-5%l@Yc) z^&r|*8n6`nmg^-(@D8lHpa}AytRt(op0+Odi}yh;EQ`Kx%o`XOwi{lvZ_it2F?d^t z-6Q+%?g%a7JCYxq;KUz`_7pUkcu^`Pe?ca_s|(y`g!}Me@8N0!9Pl@u%F0?S=X%VX zy#=KA4-Q|Jy^JC}J`zE+Phcaq9!k<*`vJZ(&t_aUypdr$?mK;dA>Q0ptCTS-!KThp zjhCB*&Ss9fGHy)2YJgPToP`QTNVB}Oh<10wW>2|SK*a@nB*GP&v7!f+$Pgq7;VBm6!PN1k@9!0*B` zUKDd!3T*i*#U~*1OZGh&Y$!?B`g*o~6>v+FbN2%qs~|5WAVEiobb{etF5oe10?qgq zv(tN*i;s0F!k?2lHZhl%Yr87>`0fQt$AHTFL0su`00ysoVmHPKzg@Q3Enb0DXJXly z1!lfc9(Yb~O}19tm@k96<tniF-TZX*&DQJSY`03 zZ7-{34~;G;oggU~+sGni;bV zB&l@kDuDZI(4g71%%%Y+&Uj`nM)w*l-9i|zl#2qu$Wsekdr?jfo!Hr<&z>h#S@7|Z z=XzkU;={9XTwiQc(asP?13X*{TFj7TgWvQXt4p*lXYg+>E+_$|*JTs)S?OjINV>ZOMsSo6;4{?5IxfW*R^j{a-=@gYwr62yBeXp59`nWfs(^{BL?$K}2RsvOEP zPWNyzI5N>NHMEg-nqhzO3;;{q`9--d7$x@O18eF1E$x!cUI;RWcP;f?olN0rePP~# zQGj_5fqP6TtF^fMXasue4N3`Y=h=7kd!qxgvn4m=9^s3;!2)-D?V9O$e7xW_-oVcW zf-L>Sw+VQP2up*^wL(Jt58?tc-6f8X*7Jt?%dG=pa|r52A{MzzYNw^lylR?j%4&2Y zl@Ch&T={I+_K7Tk`W%#dpfdtjd3-gR-JArpE)Ue}w5zEdz%#jezvsl{h+GH%3Q3GG zj@&g69)*hXj0ikXb$yji)Jr%#rC1O5hu~&62jSR?4>%SneHj~zJ#kr(W+|@KM~hTAiVKgphPXNQX+LwtMo1o1JKluMlB03$fj2?jF7IhojBFUR zT%viLkayeG1VNdd_?MUFylBC@m-IIS zqFh+rKI?uS@Aa}$6*(0*r*e53gZ<9(T12;L4(qz+j=fr@n~LZSJa*Toixo&LH!OQg z1<&8?kB=Q<=Jxgulh-D;)u7dgW0>UQFw{oaU;Tv%_(VKrnRCW|Oo7L%tBsokEV-yU z)Lmd^g3TZUo@YK?xWe2F`|FSd&mBuGUHQ<1B~f?ka2ABoeQ03;Yl97;m8VCl0QPJ9 zraF-gR{LcSOvNiDW*iK(#=XGq;u^s0e*~)o>iWFntocv~N03Vl%RGuR(cP@4QDh=& z^p&QKjqt7$brKjGPyw{rNV%?cmM9vm<-vUt6R2Iyc|4Q6F5;!kKMj6n+arBgTbq%K zCo@>Dm?&j{nVQbv3+P36SE}9A(4iDr1nEBIgWhO8zIe_fcx}8e=k1+3>@LsY^myO& zk&;>Rd~WR9{II}*Ufmj9x;jg#N!Ab-(2f_8t?q(QfaT%g4=LD(*hkvl&BiI;Uc8x) zNY{8N=(I_qpXRMNlB$GPCC}TH!6tFLc7;mr|AXL7bpJoj@lWU!ydE;Pv{r;)FgBr#ncDSqmAegi2iMRzY`0?8) z9b;{!k0B01U*U$>W36Yv!V5CU<{pU=tEvV--b6upi#?5G@o1TKxPBP-C3|Rg3*<|U zHrR^Q_ShLaxZ3p0+6UncG9k?7PEi!`HlQe`2J(W13KJHXTt~rF&gFl z^F^8WEa1lu6h?%oiCL+1*i^#VjNuhjpeq=tO`Rk~PC5b}JI;^pIeRmEeW6sKySg-f zwKpFD31UEIt(?>0^1ALqk9l~D*u@zWW!s9(E=~wRg3O6L;c=7Qf`l!sXK}Icpbmwb zFc(#Jo!mB6AXF6au)Ai(aNjhja$5AgPTwf zt5~FK@b&`yb-(B{&WRFdF>9x;!TXAD~CAc*> zc7!LOW&PR{oS03jkbKD~oy-OouZ7~8={TB?$EQzBGk7M5n;1y3#rrmQBpZ}+N-t)4 z6_C~V%_IBiX)L@J?C78n%*k8u*BVZym?-+RDT&(19jUBut!D{$Tf34wI9i8<<3OBg z*+^589p{@Nu1!9N?iO^zbEugevwl$*kjrRwKiZ8v-`4a#-PrE2dXRWyx!=9-#*wDi zjVZ6M8>&|_)EJ1OWb*mJ*IAgboM>IgI6Pe+$H9g`;_-*=lSS)FH+B}PD&1<^awjmbsq!G<(NKq3S_-+*GBK%k~$@ ze+=x!qN$X=3-pSJIF7F=J;P$iHxeO8(W|B8bDM&=bN6_huDNwE59WmXljXp8#5>KO zE_kD_5fgp`50&#|j`XqD8)Fy}M4Ac(Zzh)yibYGmWR9eMnE{ZfaY}$crPxImd1g z=ZO2%PzEiciq@rfF2$-JrqWUd*J8h#1(10jKpAh0T)~_#&dVOLVeX8mS=J&Ub}hO- z56s@9TiQ@#!J;Ksl0`o$sIlt<`#vXhy3uzxvajnkwYgM>ViD|=Yp6bkNypIvCrQ8b1#^CR zc4$FeMPks*YqCj)JrZ;@{mB_wK1m26cd+5&yQ|;4Vsw%7;aBR8==eo4vM79u(NWMK z!Lra?y+g4TlEQzl9sTrS#C7#FzZWSaW$0@xusm{49w%L-F)^)mdhBj*YXxvw(%llM z+iL-m#@#|-_NcIur>BAS^zfkwQDL1nvgI-1+z_FVxwM#d+3-ukUj=vPc?gLm>r#lc z_Ey^>-=|jA3lRhTk3+@#aa36}&Gs#dr8nnbI?JtaUWaGD8PqfR?Bw#-nfG{#m`pb| zU644uybad{?L$D`a(tnZ^H_l|*InQp_2b&vZ_t!WUSO~mtfb7sc*?w%k`tg~a87PR z^)&D3X|a#gVz;xd(L~Z(cm_sFn8BC&@YYVRs|?r-sOB=FViyg}jrd+=161 z=Rq>~2gko1{5GP5>dfWf z!#Q~>Sm&HR-MRdGe*1%~Ik@B_%O8&|9<2FI56>z}Te)mgi#N9Lb~ zm!a8RRu33XiEC4;9Z5t3w9s;lpky0~bNE)&B_??tn?y<_d-&q3l$ZA_Hk~(gmG1=bY{8#i)-cCaKtadf?Q5)8HVDczmr$%RX*I=kgKiI{g zd1j3F=fp!VH!HEIvzW=cCQZwNr3~D{tgalyP{(c*L0u|{@V4>yW0Y%&ps!iDt zN9&3N5PfKg##wCc1rO{4k2F5)yF7FabOxSD&yIFJ!HUTFEq^zJZe<ofOci!h zBBr))#{-KP;1PsJ>&~!O;t4n`(jFuBg=VpBx;t(Tn<6&vSVP$%_I9xNvvicx6#iC7 znK**Klo7Kj^=1maCrS z(+x~!H`2t2UH0cAAE)*CIPAJkvuQb(J`C3VjVpEeUh@xa-nIw{;)2-2J;S!V3qE;n zz*1@SrDUd>lrbi&avkmB?8r))7nepN(HGkW zweUq>wDY=6OZ6#fFx68}Els(WsI@+d)~#VTCLbS%nbF}=gMQG+BeC5K#oQu;k8X#S zPusA#y=Czss9Pgp<5W*6?Wa%+IAZwe&>!D-=vhSp3x#UyEHn!HX z?zE3{VLseuOLjw&@U7GDP%|*l(gPvEIo~cQeuIjq;?x;CpQ3m$d60E4d+G=RnVHr@ zG?e0<-ihTb8f?$a_;?~WOg7!+&>F&WC++VriV9;S;ntjvhCkX%v_ITBRE+m-yt>(< zdfg~xc)2GK1PzUNy}?T>Um&%UP#U2SG-@TehtM0k_!jzH&5ncVn(5_aes(e#*YO^5 zi;3XMR!J@~H>06^=lm8bSl-M}c5CZJnlH zlsh(9Q+@%m=@ZcqM0N!KeRuZSA?3EpvLUqL8e-?Glh8YvO-wpx%U z_c+2Ox9Xsyb)*KFY0QE9(JDEVCP=X}8qL$(^W}TLfv#0k_LH?8Hm-B!r50><$J5x< z%rz=FbBX>O1EY2P&<9cTSnlZMk~8N636d6<_f$(GANOR5#FS$sOG1AVP6F~!?(MHr zw}{o|`BmH*FQ_sSCrw?3&wk}N?D8w`a>l23ZGtK{8@DHOJNNU;G@Gd#~&(7ox1G^;@#JNOS8U^gr!3RmKwu^pvcB)TP-YSt$2h zq^1SqT!-^zEw#6auN-l9QdDeLsoSGAmiBr{c9tJdQbnbQR#I|COxL8nW_6xc?Rei( z>z5+n>5k1uf8UR7+ae~I#yy${&1*vo$!xNL_|_;!jjy!1eq;^OaLZFS28Ymr%zy;~cujdra~aZ4GxpBTE-_1B)L;Jaicq(`(m> zi>q=2E3@31Eu|^op*Psa;yl0JmxkVIdbMC5Pa%=!U9MhnW#x58g;Viv9)`2ntNdka zAO)~l_UIoVF+z9a=c{{0tJ#RBDsvQ)<8=IqRD6R@r@YNX{pCmuQ6N{Fub0=xA1>M` zE;u%_W106;dA8)H)u@qP3H>t|+2Z`XCj$7_rAWjwbpFV|)mj)J>x519bnY^5(dh~% zrQ1p^rhJARz0W9i@wcoC9#1J+YZCf;_6qH}plsbNK!A!1$<~j@rYOUPitXKWcO?oJ zbw(2+t(~W5N1Fo{D=86+B+e%yp3)HPY~Xd?EzbO5zLOSSEi&$@(Tyzv1)4wXPY>tt zA~|2JHZy*>+4A9~rz%WA)lv3_r@B9D;j*As6iM5Fm2V6W5+$aq z$hmE{G!^m0Y4Jc8iizS<4y%=is1LSBHcc-;x?;)V3QNgmN)Hc%XRNwE3R zz6!^{=9#O}f*1OFG80Rt&0yo*yJI#4-C^?@*W$g)Rf%NuuXnN)PxRFj(w;@7rz_52 zy*M#>MeXBLE%?U=V;=kdS>NrvTGztijk%=p;4_#+@;l6m-t>Ise#DCe8jOPrc?80t zr=F6cOX`S&EOh|?BI^=c_8hWt*HzF4&o&pBnmDtlp7AFXGu%Dfvgba`8EJTrfy*1t zf#z%Nbyz5(lwvfM9ftj!zxbzb583I+9iwC2j=R&NGOMzcTz;>%49#a1_J21$2e~b> zE9O=L+1Oe}=rfLrzubmb(j>(sdC701a3@rFRo8#p;TbpgVxdN{w+VUKU7g^}cfO|eL?&4BQ)qQl=yDrK3 zF%7}MYd)wf*uk3&O(~+5tJgwJF+`79wGZ$ zPWNz2(o$eL$r7sr&Fs>b$Bjz3d4_jEzPT(^yPRC>fnO{N>1OMz=W%Cyr9+Yh+RJv+ zT{#U63SYy_V+`Hfs^#|9#}5|1?uNS~EyUJ`uCAVkx^xY=GV<7m<_HC&@%_C#k%>ch z3BO8i@6M|=%*-xo;&=@GqK%vd4TGr}N3{s|1z&?>nQjHLPaaPOA%=D>^;pNIpxbxr z*SIKcIhfLDL1N`(C)5_0shO8J>@*tgRIIGyoDtru)J%dwWUdX~b`RdR9~{c(GV|O; zOO!-#L0ex0{%+2j(9yMBEtEV%ACg6A3gz2n#{cURYzN!M52`@WS?bzgaX5@m<3IW2 zM%1vDKzRE7O6$6Hr4 zGndIGcal=tK7Qpw(Brfmxl6#8yRlsoMQUB+v`7aw-OHy9NhoL!EhbNM-#0=zluk-Z zW9?FQ(h&7Ho?i3l&_`D|F}=&ndnFY7=^hAviFmpjIj1A{cNvTHR&d%r3aIB)H)Er* zExFwG*^H|_?BGE6{+@b1%g?mL2J~k5&e9-j(S&6&sNMqH?#$EJaeVv2C!fRY_ISz; zpFL0XvOoepL#K_zr8&69;A=qu$YrmG>1~HADm5m<;_21-H9R(Tfnv7@I%if?(I)Aq z?Yi8B>d~|4p_6rRtF2LNS>ZIzdNZI%fHSr7E_K2OdQeVawqIo-xxx;YB2SmxUw13J zKDzf6OYV2^G?n!N?G-HT@RPvrH&6>adaH$Gt%TN%ea_nS0K=zw3EhFaqG@#?5sG=P zmwQb?6Wi`?y|!0!0j8%r+4T~ahxRO?KqEZPrO~~X=#Zb4$Em)@TH~)<>p|6$xAJ|u zeJa-CC9cY?WGCURuCWCTyMGj^lilWlWmqu%<8G-D&d}<`3lNU>7(AKv?VVH%G z=}+S322bn8tmm3G9oz{zeoL2EUd@I+gRxpqw>sTs@Df#8MoXHvwososHJEWFg-^ul z_@8pT7;3uO#clKU1vJ|UudTHk`8zry_w$Humqq`!i`P8oPmw*CcYS5K;p#lwA&?j% zGB&VZqP!wDJk}ye#*BWThf|kA%#4m#x9K6YGD9ACN`*BsbG&}t$QQQ|uPHMNqNv}7 zJ@S3|`jVI$33El}637o^QO6Oi5FU=)L77ML;ITSf&m?=gt}k2DRQ9|hCas7`FLRs4 zz7@d7v#atzHz(%sK%xibRVqN>dARB&rdvaf6I58F_p%>>Bt1^X!XpKZ_{q?v=H}&18s3>vw3~z`r^IMvN|n&)Zwc zX)!RO!Ij!|F<#6=^^)p*0L%u+pNZ)*!nl6&j{(iP8dA0D;j6ODu(Y5&v+44~gsg9M z&>fOm!l;`8**W+yx7`p^!(6!g=2k?ZyS}d(y!4sPNNGMb-VZWYx;n>X@|VvX?0LDU z*V_ZB0Lf7Pm{r*B5bo}#l^`>9*9$qj!*Cd>eP}0;E00PJToraF`%mw3`sys)Q@deX zsuIgcj+#4 zU0L2XWhXom3#WkXmS-PDafvL38qw@Qf6Y8;O~D!yD+LYrUM?Dpx*vB3Ne!w=un>N; z;o3zr3=>@4h~z7Yv4GhCJNd{;T*i;oVXl>*!EE?Z2L?&-@TTCT@)EZtA#g{M1#oQ?b^xiP16iUyFH z&jbTl5tj%AH7#G9?pZYJiArw_!Iq0t^5nHkx>y4nVDWis#)@6C0!ib_r(D{0b37p! zlPde*w*37LuJro*tYj`7*SpkHT~PPR+kctw+biwXk4DWsLs_+-iVd00($Vc@A?F(l zbX3QQ93G6;@+0aHTxF3HnIRM?+m*K95RV)&mqyW=+N zjE8)Sh%R5k5qZ4&mNO62DJw;dcFfckq%xulTIB*IHYZO~cxnmE8m`&aKAg;S7>wfg^(A;Xqlkm{~iUVq&`}c`g7H z#ob(-t=M^ZB#>$<06Fc~?8u<(oFptY#7z`YEC{5b4Mw`a2`)Tf<255UH}K9F<%|Sj zZxEyqHwXM6Zf50RE~cgd*OV~hl9dFw5*!mXvBdyUF*XpB0B?cLfxCln0FS$xG#pfV zqCi2alA@D>m6(&+KA)L#aXKj?Z9U}7;dZ9RChCgZjwmN-H2Xi5=s*(G#6+!RwG`DH z*)`0?S)4Q-{@M3fLPQDF+bc^dSaNZT8>>s1vO0Yz>?1`(bAh zd0q_b$94wU$LG7rryEG3lHv83YQb?32r;)9b7?PF8+FA;fkN{TILMb|l zXc%coN~*IsYDtM3qg_~0atL`fX)6~lJ4q==UNa>XIW~*^E)8%+DNQlpswkS7sM>L< z^V+E)xLjN`+}&8@WD)jgcQlK#nwAmJGEO#bpz21}#S+E2KRY1gfEjsSkn#rruo1{S zP>{BCQ!&BtoFd?0PFXi=PE(|ox$A+36tx!Ra^vK3N0=(`@+u=V)L7+REG67EFt9__ z#7JC@OHqndQpsJ_4$UGiYieht21a3-VRQ%FRSOBWS9h1)uM`C43DnjAIOCvY z1bkVLk7*=piq^El)T|)6%$zik0PNYRIG9-RxU#5=8nUr#f<-xY7na|(MW6t}5mSk) ziE>myqrg+UxpJxUxLR6z$hb&gP=$sWw>&E*dlKkYRa@JGE;UwS8V&#nu~^B8%Oli5 zsTMCn8O>vC1>8(72YE|HGdng(B)bR5t=^YQU};R0T@@`kQ3y6(IS)A#4~+FT<#BN1 zb#X+Cy1Hm-u(24Mx$~-+%F7#is9D;(Ianw-sB)X}f?Q!uCp$H9Hdj^~I}eoW{=&SA zin52K+waaYaC2L05hrtV1v7gtBMB!{Q=|%}f<|0X1<+_{uk6BQiQq7HRJ1jf1W7Gs zj;h8cM)qh`ZdR0n5x1KQiwYYzkCB6`hq9^W?-gW}t(dfnxfIex)m5Bb#a`2pOM%na zQB}s$kwZe1OT_q3uOiT@$4yk$6g2K~S3w+95nxb_90F7^DM~s48k}6k|8$)(0>}e& zCE<`Z(sFW>0zwFf10k}IFm|+Zc9uhdMpKw`#YE&q#T8i;B&0HZ6h`ua zm#-lMG9$T7fw;q!*c?SH+!ZbN>o-A$s)?(V8bXnaN6Q8z9=JIn#63LVa7|mZk_0G^ zurM=4Ns75RcsSetdmmA79~B!zO?8l`0VYEnDW{H*K*`xlNV)-&00an=gl;aS=ICK7 zrH;~)M*OKuHMf$J1r_UZvQ9`<%vbkohRnG!JcPD%cK_!N|IJYVW%MXC$nJ9eryqtZ z8pEw}3y^d6k;&!;k?N{TEz5^ymWUKC2zL(LsT2{D?+ zURD|Hg2^d^+p#(VDfV#WMJoV%1hSw(`Rx8PVKVDwS;eJYIWSsT)5Y3Zp4Z94&C z0@icHSxv1yz}l^cf(5Iktc$XgDI4f%24Y8Qn4RSf)f_>gl9G+HhJ&gc$gq@Fkrqc` zYKujrBn-v5EM!D6i^-}0z9~ppi<)_W+?{=?WieMoOPGpF+j)rDYsnh%O3FyETY^ML zb1p+`b6#g#Nw7@84bnc?crn`12rRyU@&j%+CmthV4Gv2pP z5>ggoz!GZ8VdC)S#&TAmXP1kKlOeB)5k`?1JBcfs85_!h6#;vITEImVxST~i=`wGFDDi2gGv@m9`zr@ zYRPRYB@d#tjvCHfNNz(-Lv@7F0s4}46L+x0ly?DmDf@4)98FB?C^s>CQMeTRPbVrH zG0;uRQVN7g6%nAh+<%$-0T8YXrWxR$o(EEvQkeQeB@qWvtpn7DDFjo)@bLdKIU*>O zqot;urIxJ;dY?yuoEicahl561qMCAcNLzIt4LJ`-j96pPxRol(TGUf&KtrSx^ant>UvfSbt9y$w44T)Dm!rQ#8Llr%}~R1%>bUgycH%Q z!^Z7k$IXS-PyuH;@S4Gutp0SXu#~nIv2jzgRC55O`IuU4jIGvG#q_-a{fFT0?%)ix zgBx1i#OVOAt651L&7A)>Am_|P0t~N+nPG@yD zQ?RDX?E<B#%SrL^#oCSP`{SHf# z=EjKMo}d)q16NlBrZwpA=d-g0;Q$j(V5|;0Ct!33C?*Bu{nKp>)1AQ@q)IEf$^Gtt zg5fK;h?x`SqyrukRg`9dyIRWuhvd(dL1`|a2Km3K`QKynKQrH0shYCGT^-~^KxVj% zhNPvrs~zYv^+z{YfZ#UV)l^2rhRX&dE?axBShAV%Xkt7iP{e0v4I)0`EGi(Fg+apm zK7$i z;Jw8KfDb2VUCaUE9+_8@2qqUCZy2L8|Tk;Bx-y zjQ?j>1vVBSgNN_=-@T3a@15@d+3oVb?)3lr+oGm6fcwO*u>daz$j*QIkJ)r)|7&b4 zmKaV1TF*N^gPPYXa8UUmp)=hu*g+HvA{v2~OwJ~%C>FGx#?LR52|9VOe&l6^P zjxiY*D}eHt`T4_StQ^Pm;{W}O&C1I9`yPjG`e%RVKTr5S1#h$eZ{9w-Y3?Dr`-hMJ zNAfn$Ew*C_fsOnB4b}ZOhhy&hXG_@s53=~aig!ezz^)O0DfmCe!qL#)LJnnOdejK) ze`;cBXzpkTwyfAc`S%HCmNr0nVUFaUe$jm51nh(qLPW()@8eB?a`axUKx&^ zB8W2we%2_k*G_?-+Tkj`jV=Ew=6=^d2iBx~e1<1NwAMHLVDO}!sGzC8ubd)G8yYbc z=fy+w*G+w}?%ug561d%O@!;6wp1|Uy6(&j(Cy%l`HWU!?J21FGtiRu}`CVIqdy6C#0an{E6@ zrvi`)(Ew-MIUkaC?3q+BXXNPeoqqV&^Y}oWU0m{_I;JJ`m;L_w^qdG}4(m8wL|N7+ z{h{cJZCR?r$=I)SGWUTeIkIh9P8}FJf+}TXkMnzKX;PseQs?2v^)GRiFzUrnp9V zY-qszaN-dmvg%Zgn+}tWv^v>Wgq}!poxacB{K-B0N4St4yIIp$vjp4NHkU6?I4^PA z=`?ohdCqdmCGxPEr*KG;m8|w!qoTrSU3)ke^zMqbmt3B+g^^H zS(NXRm$&jY!{$U@Mw(Op(ButAu4VqvR~D_bv#;x^7`v~9MmSxxIL_W+KHNxPp~GM9 zsvUV?AuRB;$v!(X`V#`*ZHs!uTLjj09A~ioq3$GRw`=lY{ylU*20iCH#m?)d6AG@w z#ylXz+uFbNV@P(Diu zy$Dz;A38jgq-sNymYXX_?Y$gHeoH4yL2%>h zY&Dd2y;yHK@Gq`K-vl!H@O;XV8&|{p01IU+XHI`SuP^I98zpnA)pvKo_pH_4&o?d8 zUquP!Ha1@w)+Q?SO3+5$7YTfATyIt5kL|I$F;VEX;W+!YUA`CHt-Dg%;w>L*YO3>i za8U0t6ZW9{1ZBm}Y9E`nkA?idHPj)^Oak6zhg~ z`1$P;Ggs)>wzjCHhxDNlk3vr$p0|Us6}AH+S^B`@hzPG-iMXY)u+ieH=eyN6#73Lw z+$L29C{ohnK6#4dmZUoI_o>X3*q+VrkIj=Wc1!@V`eB1>_#XdMc(=Z6kT(nMdF|BuZikSFUvNuw8@Ve_J;%DUAbC#zuSH33-?Nh&^FB&ngsgIu6(xbG_fOr+uDF@_ag^Yam4Pdi zW#LJ3fWoI|^1P_PZ$dfFs(=<H>+*^_@*WLmLmPwaiS4wL#?w-lrB4G?nevb=7rDz!TcMU}0VK zq+}+qTq(~Ii;Vf*0{oXX9n=tucpMgroYei~ewfRDOD{Hn2&50?iPG%(n-Tv`AS@Ki z>`Wl{pxYH*j-?6u6t9Bew2U*fCB?du&R%U&^i!2vb254pB|jdv&lV3Qua@cR>dIsL znSPF)TItr0>oKYhBGzm@5X1wA6zFN4DHOhGU{(lJ_|P~vIImg*nYbEABf(-21MKaC!*;S?fvJf_I5|V zHl4Cxg1!5CMVH8M@SS_F`$Up*+3opvONmUE&PkNP-aiW$%qgF*)b`!}LG5&wUY*Er zpbX7OMkX7wHCvpb=ROu=GE|D8^tJO;n+PEM?(XUQ%FP^ENq?BXI&1G=*8Bw4Ng_b< zIxS%N{)g@CZ(u}q<_0IOKFJ{1etYy=t^McpE}_42I1 zGP31|E4SC;$Mc22AgHSn8E!65HIwr3@o~+i&@he>Vq4muI}CshcocaJfFnG827hQ! zp99^Y&qYXCv`;1aY)&zXmM_*(pc@%8O)k&1`0hIG0K6mb?hVXSuVDzK7cOlRFq{&X z9X_PaQ2VTZf5L~(X<8+&+S{2&1N3yQ4+2OwX*f4q@c9;_6VLAYk2j}ppL@Vuv8-fU zHkAP(_FpHcjsFiAL>5C?6*p#{>mMIt0f?f(_xC}3!*@<81)B-(?EU)8{31Ks@@oV) zMo&=~6fsI(9&g_oSKc09ct-1wOSV1ET_v?zwKZ2hlNLhNMweg{wcQKMNN&~cN`8Y7 z4pS%*h826CjI4DiD_8=hw%|pf(%yxyf>?O>lH-s}97uge|Gekr{n^*<9|Mj0W)Wq-+kS9 zLM;+_itsjaUtllA>AE~_QagLM_P#L&e&ULP5j3c?y3YL$Ohki2ic7C$+3x0y^3VIv zaW$_#{9YR}laXNOxC@le&mlY=qQit7j4OjIz~+W_ci(`)oT%?PI+%+iAD`TI|CO3X zK*1#|L5Yrb8FV*i(k_w(BB^iETSnMkry_cR+b(g*RLCD1Lg^B}ySr9J&-?~h^J#$f zl&_v^1u~xfnYl`5ZOGTRk*AsG2mVA&1+VOLh%zgULp&bgJ#wZ?994iZ74qNxlKwS= zL9@IQi9fWdF^FrqJ!O|;tc>oHL%&0PL&?sRf^Sq$aEfTSPIlh7q6iw`*5I7XX;B0_yq;-$S4^^-vWn zY6V{&^3P>urub-y2IJ$(ib2>*FlG$cCZ$HMbC2m(fgKyn7LMe}VFYZ@-_~YCkl2^X z>{eNB@gMAwN+du7x5;miD^HHaneHs5v;`BieKiyF@ph8NsW`8nwLa%L-pVLYI8h}n!Dt?T?|?*?svRbH0UuCy#YM$nUQ6hKXqyhAVJOgBi*V=P3QJ?$aIb->&z!V>>=J)c~rf*GLsZ`D!*#C3+qv4#`OBYz`zqB-^$oqx;Ysc zbhE&$n3lUR(pg~P(nW)5MP+4XN@Rr9%Ja{rD?SsC8F4h7e*}Z~G?c>rq6i}O8z{gY zFoqiARxC$dx>a?1<|~vI;xKlBY48}4gYVF>ZN z0SNfT+lqJOA+lTn{VP5=EWSrB1x}jQNzKju!UdU~okib@^xT9HJ5sb_9XVMT1YD|h zkGO*SLyKktByRlzB-)9|9Jy|4GKR#ZX?E8mhcPq~0nXw+34W=)J!%`KHGw)1$QN{y zI4~54NC1v(|EU;!S`F(ihV9{iW{cTa!!XKSvg|aP@YZ(HL*E3qOHIULVq)s5o*~$T z^WuCc{13*_5f@2ZQ|^2%07{^)pZoQldqd6Y?!-pee0j=RP5ZfH*Lw*}X=CG;kAGbV zCJ8=ymoE;k^I&2J*%kG?p+;OmQ1f-Z_m{0<%Of5Ih@_~2DKH%yH~O0`IsBTNn^mK< z0=V*0AJoeb(Rg${4x8WPWh9NV(O%6+kXBp#$c=+doFS+D^V5|8@egjtXxRkAZGYns zaQ=->b%B4jP_(V*4|TDA(b1oXPy6Aapw?^Avx_c%H*MN-f=asatj0?2Io(Vhab5Vc>JU@m~d%uYDGmw^EF3>6d2s@iq^3amxK_PoH;f% zfFbk1n7Zlv01|U8jxt5Z8w`nh-s69pN1$+b=z#VZ2s^BG0U-hypL?RyeD27^VaOFG zq6zl|LPVv(A$CNDfQ&zX{_&C|%j6lf-G}AB)JCc}c2?-Hw;?XATk+FtM>G*~9`N|| z&RK)q|0&EFBl+)MjixetvZ+t6Znx#P5RcC@3`UF?zV){TE+YrSXS)$Via=Vw_^J3T9VuC0D;CZ#zgV%h%0+zK z07UD6p=-EXdVJy^oVk3}QjN+fk!fmpcXj->lTTjlTkZ+fU$2(lxfQWaOn7hg*)>BGib9?%Xmm>$hWqsqWVl3xeYbN!>qs7-S^Zco?vOTT7!7ONgjBVC$v3PGf zXSO_ic=q#vdn2`1)pg3_u@64`^U9d2Ax}$P72gJH#Ad21a z$l()yY*3x+Q&AkghL}cWg-telXQ6zr)l8oqd^Q*k;v>%MD zO6K^2#+&6#U)?mncgpEQrmW&T?*x7saU(h6m2J@c-Xr+E!Du5^#&e33G3x0nGwg$j z5aQduLv2R^H$y29h-2;m-~~TEvQOOiBiyp|TXBB5=c!gYUH(?FwH;FHnhWr=Fo$q0^eY7`JA242?l z$G9iU*zZH;4$e9JmluZZ)L-NL!p>7pGYqU&xdRJjaT~iqh0rLH1<(kFcJ-`e0%DW# zl;)_|I09vVjeh*dL5x+uIqdUo&dgq20E$q(L)*>JPEODRC-cibAwp!hFinDB_{Hzv zew0QGl0Z&ypJ+cOO$?>NMPVoDA`Eh9;(xG7W!Q33Jsyv@Ou%);4)yD#X%zJJhXyp@ z(njxajrt#vUg0meZ{NPf-{kiF=!=VVYLq>uSAqFv6Ub7CH}Npgmow8Vwt(tjL;ay( zAz+H)!Rw=}*@;bW@6t{UQN=ODu->5J(z(GT3h|W40ary07R`UWHnkdkZgb+`5`}_FAuX5)w-!@QJ1= zo&OPGhiCx}kQx_KbIka4LuP1j>X02`D-^X0JA=hCG{y ztBMw|yDSn|-H*nH{9~n3pFl=jbQErRD*3w265?snh#6P#d9{7#GivMZxM24(Bb!AH z(ntqJ(5B;1_?jF*pWqS zGmn4I*?;9nENLSbVJU33yvc1sVi%%85 z(ocYzH*_UftJbs5@X#GKr@%TUW6crydnRDHatGbkP`JIIpd!LTI2AyIkUU?;VB&+D zf{4K`)@9v!eiK9uI?8#0Z!EMOL0eVI)S@j==w2EC?J`ecbX@wP)P0)%cAAFH>z!5A zpED9tk&=o|-%b8uKEF*eAeB|tElp>@k_nHy^0Ws>V1YK}56SA`c;^b?r}ENZCtlsSwPv1$U1e8q8iup_059B48EmkzXba z;2$~XdHH|+<1BrkT2{(eo8Ga27eS>CZypVov`uICF)mIkFwr0$>{G4r_9>l@mx6(NBNe~zAkC+qHObrxV z?Nj?1NJ0u?{E06RL^Slo1pnTo5wlj*G^~?14d9i^&N;S!r0xLwfMvLn8&oDaj>8k= z1RxkzauG_KE@EvJC17~N@}opSS00lv=MLj*W*^1YqM}Lzcr5S>QKQcPE)ons0thDs zHkDzAv5{Yt*cJbo{{4ITv7IS7b-J`jYvIQI2!MlZE*kD8^j4V$X1^bvh_vgw>!+QNCG$Gk&~U~sxKTtMtvBgmdkbr;05;kze>kAIieQFkCO{ zT^#UuPx!nO!P@#60->vbVeAYeM|42YEtx8a+85q69n-eCV#W*!qW0l2O%~|ZXZGB7 zV@(i>5ywgl510Y&T&%e z7i*fGZubb@MmE6waVbsklue!(rBMe0Gb#+4CnaCXtK^%>MG$?z)bmVyY-3j3>Cg~w zf?>C(hecEYOtAfh=hU$YK3ROnoyD$*=Mx(LCDfpIVr#+2APBO?xr9qw@`5XVPv1ri z;_6#^`CPtvf=rR17XfYj8T7%DIeJ*c`n-=(EG_pyJX%9QXs`5VZc)_8Vr3Y#ML>Pk z5=s1j@qsySVi@3yL36^DhTFsW!2z)McT|6!Lxj#3-0Ntg<>dI}v7)N^LU#4(JBjE1 z&`WFx;3UeqIK3GupQrJBuQrxcqOU$Pr?#FJLb|!E+*3ftzdU4uD$mugXeazfcJ9a+ z=(g}x6psr`++gL;AZR5hn^+ODTFm*_pk^HWu=Sd<&&$_L#;-+B_;FxhG}FJ*a_}mP zzz-e(B2l5vec}++AV6fy;yDELy^-2YfSle57?pVJZjm!yK8q!#XZ&i=L*~JG-BFc7 zmJk64e~-LM@i4@%>5#gAC%Y8ePS5_0-sYs~l5!ldbg`zEs@+yAb)Jvt%LAZv3|41l zg64$dJZ0Ysr}l!`ROt(2)jKr9`&6}^Aw7p+(Y_wPlrvRq3L!rIQb+{SU}vk^l3RTJ zx1L!McsjA#XG%z+hMvEYhJB{Pek`WJq3=gs&cqXIfFN=b&Yb#3Mf%?)dh=tmE2Z+e z#_d>i{CA|rvnLWajDixwROw#XSFfLPet(M|;_h*uz^?#`3B{DkgC+gOd5WB=I6_mu z5!!zr@Zc3hr5b|UtWws8(oGF4YS)bKih zP;Gg+{ket8^lXe)a0XMIA(M7S-I-evW#s}bjQL>zzTGbW2X1eZOoq8k<+cn;`#4t8 zbfiX*wRI)fqh9&SInRhelN0F9$|%XXovkXbyHh6BmLB&d)vQ;8k}p~9(>C*Rzi zs^Ps(N9uleIAsvvahvhp!9l9+8nX+Ag(?X9;GvF=^N7%oq+5;Zq&us{&yiX4*5d#H zdN250Vy@(=g1rwVo-UoD6SJ#O{jQnuRbu>E2n^miID>!rK+jr{myCVXUf%vSVd}Ha zVY*$b_6?WT%RA%>dh&y)Y2&sNL=;fR(&eY9QVev(w4RXoasT}UmKYdRhI6k%+h=jL zvnQslO>(uKf1)gw5vXX0fKRS;Cn5bOuAEcz+M+aJ(w|$7n+4vZW=!yJUKoBP-1N}0 z_f&fa<;=y7A3i@;yVh3uM2jf9U)6n==ka@100i#(OiQOVh7{YESKb_-z7u{}z)+Dw zCu%KMW3_W8%!QQQnlFo)HAmZI@`yDM9`+YHlI~>NzbdePUiNID0p^EGec$48S{i{U zS>ZFjv6jp%`mTho3>s3!j)#`{uRSN<4)k&4xT_3YJZ)#wgA-vf3S& zHlhVF5cpuekByGPr@g%xNL$5*5eW@V-@h}f3Rv$ zE#dd^1^LV%_gtqCx7W1IYXhn22NH$upT~P`CNE_6)&!u0(&LjVM%q8oTtUxOPFPH| z@^A6*l{Kn^;ixovD*>y(Bu%;V%krM~K6(2us)SR?F%;XW;?jf&ATE}VI(#gd;%E^5o`KbFc*ZVO-iE~6ff-7A;bmDj|+{0-uss6vyCOm6$TIrsZ4gNKcX)!HhU$glUMhqs_J#E zy~cp+5R)dUU~U--|MYzf2tdXFCd+Z`VLMy)4ffu3&7wVBjy{{UKZ_|Cm4;Bg->YQA z1sw&?710+;eq4#ckhK^VeNzJNUoPRZawEvRrTH7fwa`~Kk1zQy1Vk4Nto2_Bg2!m; z2s*4nmffQ@nmJVk8p~4{&r40C9~DI3W6c5rTEc3 z0^0j_RJr59W}HD3oVLHDdfl6DRX%v|kSL1|3{U4vBb^=LSIu{&xjAqp4gfA6YpO>f3sZu6o(X7>@7|2^I+r$u)wvw7jbjKx7>I@ zO#dDX-WpixjI<%D5eOBfqk77|$~O?_(29%IiSey^)$T7I^G;!R?&O2f@ETk02l;|O zRe)|BlK#C|nVODe?b$W`t4crb8Rq_9w; zcb>H5;Zm_36f>O>pNNKVB#CNVmH1wYg0G`XCS@=FH3M9Dn6T!!0to-Bem-{qi8RnZ zKxodp!SOV)_yeV79HHnDR?QTjoKGv=naoA+3xWb8Bad z&a*s=I1i(NUhkh&eAfBwotG_FZBu%kq-2M0ns&}MvvVQ@sLOc?XUki=zXqZnWn6)HVFHNk3YTnK0A*YyDE zPg+GcvDsZlBg5nqxS%Ck0Z_aU)9?Wut#Y-lCxxcv4p!GgBg7tnRV_QlTl<~`@(0$~ zS=z*kmbJpqle#7^m`yAM1*i+M9HUFVQd8xvSGC5BoNrA>`mDot#X2CPRjP76#ViriAA zKrnp9Bdi2$h*#sJdBA}=DyfMgfuUqw9I5-R%3z5b{h3K4)|-Fv{(?H}l?X(6 z!(itQ?EPp%n9oFCfrm%>VAfALuE^lCs*~B7WTms*yYD;fQHujEGYPuNkKS~OEq@?M z&!F*3c%7MhvEcrbFsPrl(&>ZYdyX{*7J`bH_~nL&a{|mP+CI&$d@mzh8P+DeZY9x4 zF{8*s8tHz=XSwQfF76dNm2CTQ!}{404!+gh+0zJ=rD#)(cf&TGEQY6yJ4uV!;?k0SsFPB~h_CedhlYpN$A2%cEMj$%BJ9m5 z@TA7u9mcC-$$M&prF3-*)bejpJ(KqgP2TbTcsB7;Ne>AE+8@ON!we4yWipr((U2?# z;phz8bldmI6wlnf=Bw-_TszA>pchB!mVK%A0p}ZOy1<36eT36<-E)fH@EX+marIu# z>*kkiXqN1S2ELc?42={O=Tp7XbVHkfI=`0QTeX%V#C>&rW~9F3b!x5C7M$W7fRjQ_#M|9AyMkb^kcO0Fn1pOv&SaZQNb6zv1yjs1z9ms$ZgD873Rf1 zAn8e&X@RZFA2Vea&x-z8b+m_Iks!V~N15G<&n~4F-IuWr&#B4(aTRR_mZpj+j1@kP%DEH(_Ieg;*Nldo{@8bP1w}+%1(Yjc@7)~q-}3=+t)HAvX5URa6&MV`;%=uO~Q1!=UYojlTdL&%3KhT%ZMP$ z!P8%Z3C(S5oNuZWeo@|?e!n6W_TERR!$*5ArEB={T1qQ(a+Szyw~p*L1_?}*I(-7s z=N`h~jOZ!;eFa=Zdmra7?cRvt-Ix7-tOCSaMom3#v&aVcU6$|b z#3y`{_L}x44ghv)Bep*gf+cX=&!hiG=4wmtj4UO`BCO@t86mAKp?5BEO+`>|RtTeA zJ?Cq*b<*qZDJxs6m40)9v z*LU$RdVJ+rtdP3^@3}%J)IdZi&`OJ`?f@YD8<-LH#)}_HZN4?u0pjgS-0R+Pe4z#v zJ4G+U1*6FBF$%gD{EWOJkjBvzpZ0ZoBe~bZGWy#Snzt5_@D{;7YI6HzHip$P=Ffv( zOK%4>=HFeh9^lI^@53ekRvX-q%pnyf-f92iL4ls~ieR$L(ksmfbc%fcOgja5<=gw%VPs?m|?OpUt&J!&X zylU;s%AdE^eX~8Zj(BrOY;7jem5Ne@#y~^#Uh~kN*DRaa`JuYl+$$K{I1`V2?~-8i zY)&Kp5}cSx`;F|4ab3?WM|ZL%$+@GdCyYpuq&hM|g+jP&=)@>52N1vd~6hJ<9o z>MqZ$%kyWih26Max?RxoeL1a_&A#``dj(N$ zk`UgeJ{5T*^ef3st1a_DMirA!fA)ol>^8V^iIl{geY#eHID=78VWn#YZQ5zE*{s!X zUJ4JWE1#3-t-I~iZyL)TG^t zRqeQ1oK}auc7Kt5f7luZT$r6fl>oEBj`U>fyvT~N*pF#frRfrIHkhl@gKzj0ysoM{ z{V{{;>J6&6fv>|V5}}mdZUYZel8FOPR=la{X z>s-uyzgMigGhP5y?@#xhKvY-nD5F z7tD!|u5%hz+snC|+c)_z^rz5*9E3$y%QqrqCZ%qAqtg0{58AQxIifqk# z`CR6?y+)VkeD?QP?akiRRh~X7hS<%!m3{$J6{F`eFNwA@X7Uj3Sx>#H<7m5_HO82mPmmQnYptEmDtR$syP}; zJK8pehWOJ1gXL!ZKJ+hx1$^+bAv1vIp~Niv5OK96dr%Igp?sW;iLzAv{8Wkimu~Vz z09&PoiaYn^g?p%nf`DpQ@U0+fFJxC)A;`k(+@RPwM?5wi3)To|^!9^L3?bExc`=*z zw)&iR;wjrbtq0qKCp~f}we9Wlrq(ld>UYs8^tx8hWAiU_+^>)uxZE9SDHxM1b8WkX4r`3qO;F-=r=Uh}y?a$; zK_R;MM$0GVe71R|2FJM1a_pfMIDrFfUozXAvID8R|IKDvfJMwKX}jZ`18E<~Ok6BM z5P4c4ew4RAA9&>JAKy`cSeAQp5mu{P{?o;*OeQS6N*F!)H821Sg7V_2Mu2X%uV5Vn zANYhuLzy}yuzsI`iJ-IdoQ*ZC_ zl)<6j{h7ySAQLxx<+1*K$lNDR!nHdjPI$k(uyC60fDqE~hR8g3+hD^QMT0K7x>B~6 zK4P9*DbGFq-H&B;CFn0NFHIdjkrRGOxNpbSzrrPPX2vU})f&6H2J%#lzjUDzme@^J z8`&tcV_vc~g8^iGMtyRm<{KpRv)d`hY-!xloUmc4xJ+TNPAkoHtvgo&K){uf=_ZT= z3r()B8gkWHIQ=n{4HGCG7f$5^&*-L8EK9gJA%&yqErPv7vEUDfW`uv>BBTmYMzXJI zkWi*?U3<>2!D`6C3JQzJ6wU})UpgCgb7DvQ`e<4Ouh8p{qRFC-<)xl48(b4P zRL}O;g!%PxnUq=ZPDHMK;5~T~a-Bw)$nX`PNMJgyc8=;uLXd1+GIAZhB$e@&?K6SDQkGYLd`2 zp$i6rD=x@Vs&&uoFr*}B;0GNWOL^oC_~`2n4l&pXtW(9-EH$TI>?s#)1QI0*-&qoT zQ#Gt^lj6E8Yy&{kiEd()rC2~QcEDWnzDD_uX;P6>$ zN%^}56WJGNt3LI2`}o9=QZUMCrztt!QBbHnQ#*LKH0yF_ zPooz_^pazcAU^ZNg;F&Szi45284NBXBh&aeDJhB3_tx4>YrfaDvCT*cDya?j{H+9TQ+?#cc zPc%}_oL!F$Mg(WYZ*>cI^log|7OnzNK>8}RSU-6R*)~mRyM6^ON>%iUQi;VNwI?7Ei&64 zpL}sS(iip&Z|+RhjmM>#{d;fT;+~_{kq*=8q--j@w{fu_Jy!#A`?RF|p<;{JBPR3_ zUweAotJ2lhLK`VLK9M=YEQV2E@4zI+{BP2Qezl)SVnm%No%7Ypw;{sw-457ZyRt^V zX3n}cq3+}9);J^)p15oeMqGeL5ZIfAw;d_^Io{rGdm}q9;@9%6N_@evtn-KaRef`& zT9R_#!^&3@36xEF#g_6@+J-X{#qu;>-=Dk$KQUN135wtUv<7HYRi00E5w~5dMLpI| zdF6YPH6~?W_jg~9`DSG_e>uiYDTfC?-ji(-sOGQNMs7o_RAIVeewC%;Yr_MOOQ+Rn zB_WVDRfdIj(91c{3uNLH6=h@$WEc&vlKC{J!M!&W8*UDK)g^MtJ{p`)bsLME=Wu7hhc%!G!4He@*GXo9?oJU;Ob z;}e_?Vlg!|O-*sIWN^dR7;iTqC4i-fObUq2SdnU7>E6AzxZ5F7Jg` z?geL8E1lkn0zt=0_~Ux>>tpdh+KP&}%qekckq*_~<@oqm&N~||O1JYs!a_mupvTvM z(Mi^N$$r#uyo3IPi1IG2x#@~pu7Bm z27)KYc1e|XVAUmtHY`c~ar4@XsK*!crao`KL`l(I zMgoq@zW(^}hDMtTiQ}hXv_dCWg>bm50kVF^;L*vz(LRr0n1AO44pBu6!f%f*j?DEZ!ai}F+aMPkO<(~UM zY;OMLRkGahyzUWInM9wpa{H?2-HGP-gkKX@UIW_!AGfd6ZO>P#atM(i+*UKaYa%}= zKG-$;$^W2Nk>-(p%)z+M03M%9YTFdZBXm7{QeSSL=+=qo5ph zcB@z+zE{7nx^Tji>d_SH$@lYb0HD%+f;loF8ccZQ z|Hk`$$8HX`SjCW}WpR87Kvwf>2`Z%~rXg6O7yCgqAF>^5t`WQIP9xh#l@p(qkyR@3 zr)o(v&ILT@-2n-?WK^@`4bjoCXqS++39e@oC}$sjy^XVm2*YFe&aSlSrUa*7&5;ByvT^yYQ#OkL}VD z+Z)SmGFBxcVGnjV2^E+5GgpJ~-xK%LYLO0ED@-`tS!w3S1FO32T}3y=cAcwUUX$6m zcrSf@LXh6Go|3F`BKf6v9Y_~jb?M`!Ic2>Og2&{>TIkJ?{_S=B^~#-9tM!k}>CGKG z-UAPw(I=*UYn4E4N8xrOXqAkgp_ZOrIdC4fN!@<5bE6j%@9q37p!K#LEO;!=QMkIJ zV_45w;2xbJ&x0h?WJ<2K1sh^RFopP>pd7*EM9CaEv?huKf3B%9hI9!@#`b!iNQZzF|>@09W~Fxi*>3YLi8a~ZS~gx{g^ZTGPCL_ zdPS~TOWOCRt{}q!@KdlCa#nC7Jtr=?to+dS;<+EP+^Zj~^>S#^wS7sXL#NKIU~{|+ zxtKGm_v=m6;|<yp))xbE=e0RSKZ;-h~}hY&>AgfA}jqID+*~ zgjd3IS~$oP{Ps;nAXMR-^eR{1buM%c>DetVIqNT%ICqC}l+@$e&Aq}0I=5<RCRSn?eP_p{`Ls@k`O%1X z3}I94sq$*t4PCA|w5soVf3;r}yKRe!|dq8$GnT99vI??>~jKb)IC zvs?mY%FHD6wEI}ya=kr97|rb@^s;63Ud;#I$5eCYK@y=mA=QO2hEaJ(-}gE3y8v!H zZQ%vzc#R8p3eitE^TyU8!mGaF9G#5bXVGgxeq{LDZr|qT-hA7Ar-B|n3-WTx$UZGg z?LCOW-?aFJOA_z&<@-vF_fRYzPD7VU5L#3Enm?SJZh)L52m)0)1syETW-w_Yl|*I;Kexi!R8Cnfr13r-;6C1>V&`s{&3=7<@n zu3b;siF;F+=IX8Li?w%OK%tvPn>s48g1+lnpMIriIFnD)#$8x)+EWn_Ow%9wE?LM` zm_tL_+B=B0!spE49(*CdJ){hR$8C2By$W_oykD)JqW@{&Pw2ou_m+a+<|T<_Xx+20 zK-y+Eus$NcP<{XVY|Oj`dhI$r1M!2kMwN88(Ikl^!Tu0^5lGc0&WHb4_(K*~)pN*wNWJZsu%*Ux-` zB@rsP{Ej;q`}!^XqY(_pqBeek!_9G=6eXU06xooi4r7z>_ zf~Btd>fdW7U}rxlKoFKITBFHAtT{o^+DG;l7>JU+9Q{OFcAHAd*|g+W=-)U|fg(JX zknW44u|_A8&UafI_^dn5H2-9U4`ACf2}gI1T9-+%!;&MVmV!=NwXH;EvIx zV`;%)5w;e)aRbN+cT1vt4FEWsCSl$DB#?CbI-VGQ6k+-xp;*Sm(Bhpb)x@J) zw~tBbw)0XgmOj;hK>Qw^2DW*>4#CoeWo5Zq^d?oe0Nx>$ZED7h(=Ex|8$y1{Z!j@A zyKnG?aaZ$!nD#x!DJ#K)8Rn^rVu?vIEbLoc1V>noXR-L6&KM!zX%BM_^?d`Zrzp?% zbDDT(?X&YA#QmIa7>$Jd%(Z;l7IW=!`kQV{9XADbD3BgUonT0#hW8qaA%AEvDkLJ0 z2)_NpwCp*6d~ST><@x7?`$F{w8}5?#HY%`*=pKJFO;3L3fY%*|WfEO>C~_Z5#dQmk zbdeuM%Qa+NG?P6%ooDsYDe8SSwM(VCkYCYYawKF?=o}YTF7RAksg=8|>~*8aDuh~O zhcHZ~8vgA$*VKn`B^#Tm>lv?I7!m4<#;5oRfP2>q6<$cA?~V4fSO<(6-M>*SUs&h& zojl6{3M8IXN&y#iPkE8^KGbCZ=bF@9 z?4NWNx)QQn7Q&=FtW#iUJMwMZJD}&xHmKVji|M*##^Y*E;cZQo8$2z1S>qxtTEQzD zn_6GiaFEq$2Oj&RyLi-_HK`Vq5_>ItwdONiejgFrw4#2pbq&1j{?4jzYe`7ow1z=hm32c5)1#XH z{)dcRHjP8*4~Wn$3C-{5!F&#_aW`DE+2R?4A-06~m*(XwDoVMuxH&<$y6-x7@y%c^ zl+ddg~0UUyt%5ScEpcxzY}r)ZuLRsAPlAZ6ySH+RNne!li#I9S^Q|e zUfP5q+&*{f=lJ{g+l0?4L<|z~rl-8FnL2cItV@o_m2urHcaWhsouxFE;%<|1v6*julZ}&?Y5SaQy)eWtJn> z3sDlYO3CfhSjovsf(Qqa-2$Un4tw=QQs`H|Gq34{7V*N%)M>Q)o$NdUmDbyvKk0!I zwd#!~8Yc9`qq|k8NJyU(pdyI4Oufnkqo^<(qw)BxS?&b*w=6`axF?X{chU$b+u6fH z)r)U2{{t$ilOha;kkdE^F*%5UMw{-7?HXw%UQqVA_|eQ(-WjWs>t8eCUNlfzv2~j! zX0^$*UXHfVun;sqo~7noRE}|Fc621uK9Y3}fu$o7U@D*zB=35afu;CU9*G4qqN4-0 zi?x_ABK&BaGnvCI_85M@P|bw#l(yK((Sj0dc~GtWr}nxzFbb^BF^}(|3tYcd$@}FY zui*r9*CD5BY>V&6o|#i_=XKooUSH3iA92gpvc>x+Z_<=f3om_lz${0WYBbS_2rgC3 zhKzgYQaOl-92!EgrY;UIiYrSdWYUKTl_euw2DP2O+>v(-D@Q%Sga+#Y7V-e4IsF3| zqs;D!7iY^Ymz(Y94bIva&`;)__9Hi9x@d$I53Ma96zDUow-Q{)eaxKk?h{qpxfl#E zD13?AjtQ;Zs*%5b@J46pH&8O@69y>a-w?jXx9suoXSfTNiuF`+No~g6U`Rw~yaTMz z_f(PNbb~{`m+AV9r2I6YUB8@Zwfd_L<-_{z0ORv_B4nXvh>4*n3xp2c;ffX(%uoud^d>Lr0-5u`HPd{j#^pj&@?{ zOXXv9Hu6l}zV2WGi0WVcdB95en(h;@(*v7a(}f-6S1M?N&@uBL__eNI8SQWdN4k02 zH>+v<&&&as=L*y@Ny`=@LqGANgjJu6x?h<$R!%cb+CA7~S!P?xspXKj&n|NGsD8?i zzqcX%Pv_h=vLzjfP5T$xx@ORfj$*I!COaf4{BQ}f&O4p7kp9l=*eUuduIeiX(=!yCD3A;8O{wgM>XP~yb+4U?Tz)xTEY|34{>3i4@ zqj2Ek3{5v?_Qgojom9khOGYpfmnc*xK93@{x1+nDuE$0`nmN(qNRmR?;M;?Bx| zs8nPU+Gb^2r&2jsCFv2Fe@(|Zw++?wteZcf&evr38%U#8cG!xTos&o2RAr|bysE#H zMB34{3ZDwI_9(f7x2q2Tn$bLa@EWL{K;|;IZ~cEuazVLJknQ7FdgGNQjR(_9^YupYGHfHGfF(vgvd7e2%{YRYPE!~J|%^3rJQaB_$+p~U4g6C>o zdz8ze3+k_O2&-K3+oPg}aLQ_I=SfU(cRt8?$ zF(S;ID&xHaPVeWyOFps;IM2)7CInyAas3_)Da91@1Pj_bePpyLV$F;5QhM$)x#LiN zwx6g?f+c(ql3cR9I^i=l(-{AT6~SF^8|rwvTQr2kG9nR+u}H+Cm)i}>f9U#Cjr;St zSoCvHcK$$gCle;)KJ=7=k0e1-(vrbvO7FnPrC?}^9oY;AqUzfKVfK3NefBRDQ&24u zznO1kTIwB9=yhn|uri&NqE~fDRTthnwytbS`}2$f{t#FGPN`S<_sW6=cd@(wfGF|F zSTm*5;J{KSqAOMXSVKwe*(KwXLU#>o5&|}R%G(>s?#fF zU}7lOGGIsVQO7{M8wny|0^zjdJT0ER^l;Plr3+gWzw7bY6nejrcNIrhc+ z(bKO})keSY%4t8*y9KU+it);lE&jJZLY1%|G+^9A^9Ynf1>T zig-N)b$|8z1F#&i7{t)L9YA*D7!4~W^v;437<#<7H^cW=L}NteLoM)wA?+(C9*!l_ zp#wdc{N?HT&*zy`5`8n=0kk3@B$F9E7umk|hMlDKzc(6*qLt!Qx(opmSexQ$Z2$;Y zji=l!=w@L(uh&aa1l^?0aY<9BcR)PJ^Ulk+0UGveL+6}QkA6!mi{9K-!{egM<5OCY zcx1nkPYh7wz8*0GDif|RK~z6saZFYU7rLXz*8T7+D3GR8eKq>8t}oCw0|K+=ua%&c zaeWL7a;zxBmgG@-k?5FTy908VPMxKg(B1XO-|Yf)b6~;(da5+rlrbQ;i=bXhH0J$v znXtDnx^$5pEn(z=T?~Oz9FQ@luhsEc47X||9ZOBV%`%F!=;u8$f8PyCzQ4_#Su7y_ zX7z1=$8m%ryA;$2=_R=g{xJsXszH)X2R0Y;!*xTHThy-OZ8`HjI`}pCmDXPvwNgfb zpm)UQ4ON0mOqiHjha+LS#SAYGU%19)nHMT0JQu#QmC$2cRq14PbSWS~?@Or~*^=Ilbt;G3^Vv)+5B=?$#s_U zXzdkVDyu~pqdq7DJ=}{cxyk2R{ylO%y^2P&yZ3Uh_kZiyFe?VE`-$wgPJ4*rgUGsl zAsyNu_;W;o_dsL$-pCw_{lMt^zf(Jct|J?Yw^5HDdj&)MS;$5}(^!UgVXY#-?#aZ6 zGv(#zuC2cCt|j%~;JF>9Opsev*VtU9ZWR*(*zTDIokGo9`oRH=ES276A_1_t4sihnJY{KT)zI@ zWi^55p{*^C6XJG98rRU!vv^#f?dI|Vr>B}2TjOnOjoDU5U#7gucQoZvviYlrqyZX& zFjSl62`*3&=-tcvPcZ@5PV){c+fD6#1OtiapuU0LO7?Q>d zmvOblE&{AcU&a90Z%WPyx!(}J7k7SHYbhR5J82W;=lVG%N7Pg>JuH@gd(YATS)jx& zu(`1@WDd|Tps6*e4teHT6v6S-@J;&F_<=kNe_ESt_CY9Jg#bfbH1>UF*C&JNXF#= zLw41FVnfSkRDllO%R>~=6R>Ok~m|H1MFDI#}v|N61(#Xoau{MHILrbB;ur`EdF zX0*S1Xr;kCi0A~c47Xbd!i}@Ufq#JWu}W#86WMZXvriI$0}60&y#n~ehE+N)Z6q^` zkVvpuY5C|2AKU>#Vz@E*87%-=AX|Gs;WRJ)T>;RLAF>)_M!_gx2J5&&5Ghn&xC6NL z8=G$uCSi2JG4EXeg~C5LtKp{^7Tl6-G|M~;s8cn3K9)PTCkzcTK)N<;Y@K3&NEsxN z=zUiI-r_eaj=3>GZ!4YmN=Alss6rb>y14Z-Z^vqS8*NO2!P`E>k9I8hO|oSv20+w*rI9c?Y>V*2ANJfoWG#12 z&j-L)Q_XgdaXEnI(_T=%!S}~;A@mH2$Fa7Xm*?tyNm6E6njfp0fQmiOKJ-O_`5acx&`6H833H&+qOkq?vq@m!9i@*18i_a?3 zM0`os*aZ4Yi7Rh@x2v=eQyLvCX?OGMoj=h_P$VRO!{8(3(sbHeNv|+E89AN6D4j&W zbury%;QD5F2xHLihm{-}BIFGphe z5ZjOL^+Dkuq7sAtFh=SE84?HagWeu%s39oXS(WB{%)UA3Q}Z=k93yY~@X#>vfxiA( zc^t?61q^b9>6yJLCi{Mu^@7Omq>tN{G1>c|$GM6z>vtUv2$3U_)0^M;c95-&Kd^zf zrpBGXpiU0fwPq0?|1U+c$fhN{DGJcs%VqI3NOpkDw^2(dq%Z@QdYb zVA_M^?xU|E$HGsu^ArC&N(uAVb?buZWa8*PC3~z~m3h!?J80X``WOCqF^@|`ME49A zHFUW6D@&&O-SIrBAYMhoPj=}+yCp5^1_D7c6y_EAGM^p>X&#!G#}Lw?6G86Lh1z$N za7>@Me;+pgg9W&l$zziTu3PCX!???wrlAP8t(s-AN$=!#lnH$fkq!+T^ z+GxwG!aiLb5vK^1v>(*BgxH?0o9>(*_2p8F=%K@<{=UAW--12lL`WTizYkiJVKQXJ zt{)(;@lYZlR(wNiA0UXp->-5eI%qoFTF%&bT86G{^K?4B&&^GX*3UI6PQb(kZ|3GB zsXK~`!@9%?cWyhVeTPCn^@+>3b5!LE_3qITbw*0rL_y#QAF2-olB~MYN}xvMN6eFi z7pu{8ERcsfAvqy(|FNtDYz27#1HzyU@b^n_JUo=$Y*ih~LWxQQ{c8PTSOZFk^858r zMG7Vp(S1Dv!hyxh342#gc=|nREb0ZM_ZfxOl)sNo=FBQgMyFt~1xAPZ_Z3S4D2#vn zO#>FI04|VlGADgw47a%2uOSd~E{iN&_etSk8&-lQ_>V3g<4we!^& z-VzcMX_7}k|)g^pnr3}-?$%ib&2aof) zYA!h}Rvy)|Bl_SGrKG=a6qT3aJ1oy)erMIhdPdlf$0E6`(3L_!p6lp~`*nY}gpL4w z_5;O&M@!=g3|0N{WIyrqNCp~4o2SDA;h{hIbZiYpz*7nWs5+ig>Y_Gurr{7lc>}X# z|5I50dH;kS2m+pWBqFwJ=vxg;T@`s41|Q}X{k?T89Bdu7GIj){088T|g`$~79?suy z|2c>KVZRo(@@X|kDN5=i;X_?`HTFAnRoRdGdKdp5Z}8?JQt;53Lg45T5Jv9j!x;a; z*k7DsVFiZ=ux&V0%MzaELxj8%)bl<}=7`F;!r-s0PaqeL{vPqaUq}SC!RN^RCFSrM zx!mZQ>cdz~K3CKS#xm%N!GK9GZ`^g0`FlB$yMV}!YSLkSBB%jDc-{QdJI0>odE_#* z{tAn7W}rr!H*TwvMt;sl7*#*#BqE*yCu~{j?Co&q_9Sr@(Bb z?WY9c&T@I)A}Vl&fskCuiuZnQ)BeNMX8K(y9bZ#vz!f9*DmEk<& zFLE3(T;r(Ed4-Nu1S0>TQhy)hG#4Od3avFta-oE8=SnqP26KfvY=5-&#uL>>LJ(5! zU_}o)l;orS=zWCLJ*3dVTJ;@aGV#MC~ub ze&33&4@n)$L_6%0q%+04{_=HdPu@PuqBZdSt_&c=ggck$?_pj^0Y7)`d;DnF4yb0l z2Rspw7n#wp+9HfsTF^fSfoT+F z59#iczmCBb3J>kQ`OrA08~d23MNtY(om46OdsC`xGp`x8&}Nq03xxX8D}`zDKeu3U z0^+3j;G7ZmKp++bo1yUCL_m&2wk-;Ym0&W3=x&XW za`|Sy#~-kYg}G5b;9NQoyxhDnLg_}4Wc=#utgNbV{}!xV-mvIDc#KVqEYR132{?CzJR0S-f2(sI+O$q0Hq4dFEDC;19`*p2k? zJv!JFx-x1`dj5;Ny}yL3CjgRPH&-kEKdxFsyfR%^l`Z3>VQ`T_MSmMO3DLEuMiS`C z@NEVd73jhL^t?F{I4m~qA9Mb_w3Qs76C1Y*9@>qi0h3wEcg_0iWBuc7y%t3(L2ICN zo&-GlaFvbJP_{8e-pqe__D2s)g#l^p)%ky12i*TcJnix=Bel3UDP z^m1+g9_tC9^wRzAPJc(#*4BneH8~E9aoT6WLpei>l>Q#l-`o^rKzcEOfL_7GPP;+>MFex;l0iuoK+CSPW3Gnr4Cb2xL9&Iz^mV}^8~gh8L6+h%m!Nn?F_ADbCIX_-}`JrAp^wId61lm;uV zy7B&y@-~uecep$p-J{{);c1sCZ=DpSy!OTFog@lVep;yT8cW96&qSAo+<#^N-#2O8 z;EZI=vqICkiB&5ir5+Qm#3JSbb+L!89TXSeJgqA(ziC&A6@70&FapxfGiN8qhhmzx zif4~Tg}0AwQaa0pX@bHOycd!df#fWfyVDr<7^z-YOqvDjs|!~zJWt5GN3^k^k-`co z8xgpT;LKq15<^=I{oLx`=mdYgj;Mtb2wn47c+Q7hHqZqW>8$^1$lq^*R2jJ4H}tLy zz+-IG6rj{Tb9#qzFqv^R6ciR))bU6NS>cRn@ih94o9tr~7KLG@RgCcmfj#%>fXVu{ z_d(RXmmI2tafoVvruW^ZQBL|DiV;q4WP$Z`NupgDFc>Bf&k_>OLsMKsw0W{OBJgb? z+GCJnpvgr@)es?-8Uq(nsddBQ4@Hjbr&I*G@;|XhzeL1huv4@CJ<! zaVg~0-M|S=7h8_^=0K$e@||uu<*GA|wEL*CoJg&`p$KjHSk&RQJUynnNF(knI8E}2 zgAdDUhWLM~a+taTDkB%@a6?H#vxf$jhoy7`9-SAjk?zAjVd1kYU`~(2g2}u1oZ1=M zsH<`9Ew{;qoVDng!`H}yzA+(}%tr`4e2Gw>;LCLJvDxD;MH6}?Red{_aN}OCpN_t3 zuv!p6`a5rUc(D*ccduz^gIxu|g1UfgLG%*RLvlQi{B94_41p}*X?&ID|1b%G5Lf=7 zCxlS@x2qV3(gl)(2!l1Fn)pPAKs!dZb~FB z#+X|vBos*R$e_+*BS5=CeFB9#qA~>y_V=WJJn>?ji+3(2-F}p&Mu^gqLEjeTry3u+ zjk?V#L^i-o0HltFF8BK1Fm_-H_+nq4n`$I+03Msi{x6nMHO*sdCSsjNB3h?ur7*T$ zuX|LOg{HIi0oR zh=JvbYj*Yg2;C;XAC7u>NpbBCYC{ls_)8GSoPpli_?#Z%EDHurAF_rYt;cY|sBku> zo+8X_LrhR;P?jgp_rV4J|E>afDNh0jK3q+|46I@*#wxx?M^`O{L5Ly~6uT|4eN*U_ zp-0Ie9Q{$>Xdnm7u$EM*j}Ws8TZXpd(@#+*wGXeUk(N;K$67JORTi+Ic0TQ6r!mqZ z=&Exy^RKV1k)d8(S9iOo4U9w9)BDN)Eq_75K!X4gNh4N%6D4I1@k~U;Ldguqxct#| z2PGA)FKF+Nip4%XSU)*Stutg~{K`fWBO{`9RYPt+M>}*ZwGV|X+fsF%5xc7R zax}XjgdZxiKV^aA?bvV-Il9z~(a{nwp194|mqq2qS19}D&3?lw<`k`i%ybmjx9tJ> z5=-jvT#d{R<8Gs4O_>^T5=T^c_usb_+HO(^zf2H6>h`+J5pd-f*KjFfl6f-czN&XI zQiQ^sbZy=_?-9K5A}(7^Qe{7!*5cw>s^zk-?)|c(8Lh%b@2{077w2?1^D*FS( zcSNupya{Db@oX`!7HzrqW(2oRW&<{kQ=c7U8|GKY?L9713rMbN@~0eiyBL*KMLZ9; z@5X2Fn0v!zV*9u-d8n+E`OLnaAn(xl&5v5F{hQ4wiBwWstXf$*OBGp5hmi0Gy(EOzkEvSXq(7{ST&LD^91{P4 z$$3q{(UJtIA5Po|DlvOI?e=AS=ctPM88)df{Q3S!eAlaekOST!73 zC~6wy1Fh)ZToM}cg(@J!+=%Yyn&i+W<0sg&roNB!25w${raX5(S!DPBP|?&dlO)_N zyhuW(_AYjZMfYH>T|`Z?^Cd>qvG-PkVRF@>&(;ZQ+c)MEvn8%>4k!JmO}p2{1uCw2 zQnN7;DH}g3C}%D@_`q+-aQrDsDY*sIBVPtXNST)DJD6Y%KJwN%C9_BEv#cs{3 zl1B;0#hr|C8Z~Y;^KS*6zh5xF_wBhAEctU^-u*_EY~JWq{ZXp0+w%R3*?_7aqWDw+ zW)lJ(YL{~x1UG~qKF|?!D+V5zKlU?f(UBtO^W&W$YMQ*JcFA3z&ht_0Jn1%ga8JHF zc_XC779S4Md#P1+BthTlVhN!1wQQj(E2{%{Q#bAnHYtBaw|~@Ya)`@HfV&Y?0S^@m zWVa_xxySkF8d}iTce6_YOJf!jRKBYPnKHq;7*sf8_to=6<#yykbpx+}TeG2%=MEh1 zA@ou*s2xRM>qpPt6;gf?myfP}i&R&QA7QB3$SiBbGrNybkf@(cspCy2DqxpSR&&_v zHwBiAAIn9J+8 zpuI25UMX6{(EjM5*@XA{$pE$H8;P@@T{o{vui75zH;QlGA)9i&KcQ6Ux>1tv-NRpC z;*=o#$~KeT((IJ?$yXBAaQsaL57+2F!Iu;T6hs@xt)Y_6XX{l}X%z#p&pkKGN5>YO z>b943ohm&y`#JCzD@~`Du6~YsTqZdejq|FRw@O+{P}Dle>Xdv^paws~se z;e30aslA7_^7%CUaEklrq2Z>90v3>;EJk%!Sa7>#4cEuF9U|KWaP%I3=!zohC9IHR zZXy+OM@Q<797lbV-VuWb+FZ7_$+9&Un4kvnEpy6YU z5G*P(4KN9m z@8yoBJ2qx7M`%m5Ih&{qjXZL@W5PajHEuJI-0{DxNewn@JKG{&v%12@jH{f>oAx*| z&ATBKTOl9w^gUCt7W(^DZ|;SLqlX{z7&u|oiS~D0=VcntnyxGd#E*^-_Akf#^wtOD zFPeN_KnAO(O5I)3g0~`IWR(Z@_bHh8%5XiZ_FxjWgl_Y zsq}!|ai|t>^8l4VhUw#y%b|+3OsG+z@J~!p`oq|=# zh{1v{KyAgpkGg1f{^~MJvT-g)vA7(fGQcG1-5fo-vC`ze{T|EZ>_yetDp7uQN>}+- zPKoD&Y~z7;#XF?+uI)?akw9e0p~kA8C%DH!G@s(#fgdWm}eMT!v7lyE#D;#80C z(v{*&dhT?!7`1m={Z z#ihiv4uiV8$_cM`sw+AK7b0EL{#7UXL<%HO_9x9FtyN2@PUmI9Z=}yXh-Z?Uc|52W z_!LQ?_U2c$WAWV~NmpN$$;|*gG88zFV^Z}CQU&COx2Bo&ev~&6U>)|^<^Stc*z+9Z z+V`T637OF^YQxhMusxX|dV1DpcOL|(agssJX`Cg6Hrq6I*{=t$CWtb!LlTqBX03I6 zacsRXKn+ZUuU4%L1oGr_gp>WRhklB@vI}BvE@|>eQ-H5ryORv6V*Q=yA~4wgvnO{| zLiS!s;0<}s9XvAYoK4-QIqp=JVEpueId0;v9J*ozSS-;El?X_Qbhj3q`(C>3sq1@( z@)rR!A2us99p`1NRa3fS4@NArC!HL~pc@CZ3pBTB0_ z9{XLrD4_U;eP^aciS#5FP$D*23=_BJq9Aqp<-BIj-_ivn9O641~h1VDPb40NEWvPY+iCqF*yAK)eZy0-Lv^CC!hS<0=cM_Nz z)WZ9g+2(Aqm|mmu9Y}*pIPfllR@I|nGN}zm@9i2644%ydCpvvLt}X9DK-@nmcF?Ll zIJA!wvl#zsw!)NH{LLwojx4>#~P_6Hb-QYMT zfO|RX)wwrTiu=tFu3E%+2X#kA-PU11)lyr;5c(7NZaZ+7{Did9{}pIV^hT3ye#yp- zj{Ff1{uzlq2UZzUu*Ee>OQnm8hNn@RTd4<)sXv<)GZ4<_haH1;Uk{Jk6)m$$^feVo zq2_B-AML$6-(Qv_7F7syJ@~Ld7g}GodEEJm&7{h` zfX_4e89emCoui8-iNkTv)UxEk)<0N)*KHeTOWx!K9ra4!1&tXS97J;L~)98RYZ7WXrW`*tc8B?@S6u)jy4*jfXZTi*C^MY)&Cu!*m)VH7V ziThlmncbuz2zMkn3C4YWCx~Gjq^MXAbc6W=sjn%~yy2v`CU&OXrW4wEjd$B_50fbB zngta4-6g#tMG5{eCf39$T#6GG{MIgIV(a@6|5=?wQ>l%1%@ zvU8tJ=BM@LUStHtZW)v1e=7+;WYTU={K$bpgiGw(K5FufZ812aW97(A1jC`}1p-+_ z&|a^7szNW$xWf6kR;T%GTH4;y&sMmS0oAD^>F;Zwu9B-b#nEh3!=dKix0rZsKi?|! zT~chqS6bdaj)*DSs}QY9OrWnR0E24q>vQF1iDFJHX2>@@2K|+<=nodU`~+v-QsOrR zz40EOxlRg1tdOa{&SOhwoiUzs-_33EV%?`7t6K0QPlqUXR}@E}Nvu1$?nYKM!r6}S zQv9vI-#t>nK|Ru^pFhTbxmJ^9y*+oek&-fG)@AH?VVa^OUwdfO=e{^D>x9$c6i^lJ za~L~CR=AlHwx0^&I=?0=S6f+7>urZ>A^`g&J6FyVJ@d7BK)+EEBxld)yIRcgmcTDK~PNFYwT-Kn7?!-9o-1 z040bO>y2Sut306R^kurd6Ty1m$9fMgWUKbHv4rWZmV8obdnXeD!qnM_Q$a2=R0+kt zX9+`{MwW3M1s@bpvP07uU&#RV5eA1gUy6TE_V_@<&$`9;;WF9d{0?zXs+>*A`1z67 zTa$_NqZhOR%8SM31%UWcaI#*d3SrnlsTMDP0})EF#w zzACpxkk^G%I3?OJ_3G!BA8*+|=Av0;Y`?QFhpyCZy{ZK2SEoNm9|H@j58a+y4tgjk zmPfp4hAi<9VlyHkFxa?{gd-t&)6Qu8b>pMcq>Ete^HC+|(Cd-cJH z`?|?VHy?%Tj8{SQBPBSZJHB<9p@-vXI+$2cv|0H`qDdS&KRKvs*`gvi@aSxl;uKyN z6E)$`g#RZa2W9a?r6z?kB}+sEHRz%XmdjnLaF&Q`#!>-E_aQzxNN#c@UjiwC9rB|E zMt%60ORKn-?S#_C&e50${1uM3&f&h?)8VtC-XWa#0;y+XKRS*V9e9EUGnmiU-v#Up zY{Up1kAx(Kg(ecz#WPfjWoXxt4Rd9f_WAcTRW**+6a}1!ycPG;-#gL0`fe9J`Lo*n zEE^4&{p(7AdF(lxg3W|xa^I4sYSdikdJl8Kwkv;8>ogl6WT)Nf7dURfm_q68KD@Dh zP)HZ63vFrS(T9iX;r4VZzC_H)YZr&b^@L!UzcSdEjBK~DW4o@5jQ}8S|H)17n`StR z5f{c}IIVr*VG+;fqT~)cJUaqJwpAgOEV~N06d<0a6>?*HpH=0;t*h%x?AR$fiI7$7 zi5)*PmIndB52@x5zYr}BvTVjW80ZW!5vkQeR!LsGD9Fn*ym2ll6S4onO>n}HrHTcW zOeIYPYJTi|S2g8Xojrw1pGee7h8(VOap1#SpI(!jY;%d6e{LFF_%K9JhQk4?jw0V1 zx68A-^E5r-=18(IaZYY_;!GSu{pxhRd(4Ww>2lby{>7zyJRdFF=FWw8s^15npTew| zcoxYNe7ts<_(cDK2LKyvZ6fvxrNP0;Ab-r_g4L$*`;|D@%+1d3fO5Py)=_?x%W_B}M zFWpI*N3D0<9Z{Co@r~~?lfIK0h=7nC?K+#ah9a)*Jfw6eVZng5eAjhAIlrxd9Xn-8 z44pym$jTQZ?MwMFE;#8H@)~_di!d&uxjK=>f@r{Sj>OjUCkW7*iock9y`BKok@Leu zBO6OI3%DNGhX%cE~+_^V{4Io4HUivvvUtX;b_lbu$ z+9W4Gu_iHq>v^7ScAvD;o@y*o^LVgJ@MIL}iV1mjyh@aRk{Kmvr+Tq{Na0j=naeR~ z+;B3SxZ$c|FPSN4;Q9TQTsmt|RZwkz54Ni)v?at2vZX3y5F+nn6rho3^<7+OCP=@; zvWBkRLdga<5gv-K5eN3qUd}@k?9?P`rFQ#E87mlFTSQziLLJ*4piT&7K>WL+Pd#}Acs3^m&+_*J6)FpsSkt?roYWgV?< zpWL~&C9@fZUm?euzSw)8|1}a?z{tCi%?V?#Ju)Gi@)xbloK@^|s7$vWHP%1bAGAIG zK}F4KD6bd`(|a3%U;%hKx^$AxTa z!=c5TZBEd_9KXQnM=05JfMnNf%py_c3c&@w=rakQpAkHf-HD@9cYVZbih%Di-W4a# zK?Y5qoew{AGw^$E>MMPb=pei;)RpNtf9TRUqxeHNsAq&^SOJ+U{1JIP}t{(47%bjq(}8`YJ0GYM#`_o7A)m zSA-)tXxv&ssgDxhAd<<35jJMgwE4P9W%RbYq0fmzPq8c=3>Q@U#f&hZ(2a;h#_dw$ zBeKO?+9k|MOt5;qXpWcU`nRt`Te1Rm1LaY9=*(W<(?WIP-ezmQMfGa8^!A}dxg;3| z{bfoyf?^9*5G`ttAx3Y4u!$>TwDWy(OG%2{g&r5e+d{O~A!u-|oOxai1NZGxP7n3N z6jf*tj5#(6QQQZu-`j^^3Wi36im`g%31ihUS$b|@Swj1~9mKMczNjC(PC%xUo|Ysk zEn1Tdnt?;k>*nSnVEER&Ppq|`t!Q5^=!XT)@$*!_r5vg+?{D3Ob4$;aNgt+~JG?!3 z%O0dI(CgWCN~V_dCEqkh6^m!1oqV;Xos8XY?Y1?Iv((q8T29sUc(egiCVYnV07o~x zJ?~p__ESHZ8A{J)^e-m+YqXTug_I$6(@AmSgPZTY55NjNqP&QM$(`2XWFhZugY4 zV{ees`~TRFdSBLo=VPWpHNnQDUw@f1nrQX>FnV+yDD5zMBuJ#``_4i~m4aci1OED_ za!!>3X_`sP$(ogj<_)tW@EB}F|3}%^3+=#{gcK{N_9?)*Pv!zzWYRg{g8H?rX@!fy z=~YkYpluecRyz3-by#9Jy&-e=;49?_NM&{RM9VveiO^E70lmpa$}YAuIX8cmfK7;EN2JsdtX~64|9uYk;?>|I zN3}S48UG>D+zc&TgU^bxO#1N3`1jUA5x8L4>>7LD1eWem@m(AipbhF(_SaA95a7mrz&Q+cTYG=6zw4utgpi}EK3l2Nx7ilCELs}vS+*IyeAd_GbV9xS&LMgp zB#4$@N8~uKKdP#{R(oQQIG9;R2Dh?=2d`2bmwCq{4|_)9v6>2> z;ZqHQ{C7W!=iB3~gmu_aTa)FdSLL0n=bFfiXJZZ*dMKPK*UG4UPC0X2*1)viD*4&n zWcXSQgakVXh@r$Yj)%ttaslXV7DKL%tGPqCBU=&Dv&_OJ?PHEw=g9?`SjF4NnKhPs z{_KU#kCP*2=v^eOW!DYsea(pQgiJj$d*5i>=Ocn{q!)y#R3+`}khPQHEjQt!krxFn zMnUA!M6FS{-yqD~^o+;t%Wj7Z;Tqxm;NcJ9g6VCrM>Pz`A=OA?$_yP;omYp z@mA*oKZ*nm$@a(~C*24L+bN9&lv7@~BrL^I6Qp_$=LgDq=Snw|=va?Q1oWPu3i^|! z!!FbMh#01U`qvBR2a)Mbrv-}C)I$$qCFQd_WGHhwhEhM@mL*jGSB-EHj)3^0^1 zAcIN|2uLH{HH6X#(%s!94Fe1fqJX5J0xI1w#L!5qbeDv*bpL<8_ulWj-+k{|e}**- zzgcVIoPGA$``OR4&rPTn`MMPZ+c7Obc7~w%rhV_lJtsKGc;hVJn|?hw69Rb{`ZQ!N zirq?DoBb88DAq~m*iH8Et9rDVX8Yrch|5~b5c7MGfTvxS@2;^bigaXtMLk+1a)311;qSXbTpbH z$t#7F43e<8d#JW3Zyh>`sm)1majRx*RyQ{4(!3)ivH(WwzgQ1O#%06oe8j?Mo#* zjZ-zhVt>46wa5sEf3-XUvTT`8GgQwhB|uF(7CTm`qeT`P@@#D5M38>CW)IXryU-EXT=Adu7C{I_}dk zKeQ8Zu~Wsd9Qu{x)R5C<6_u?ags8oAl?d|$?O zf??U$FBAgInjc?gZwC3tk4>XBc0O2_M_d`aGmwWXS~{(m1t#1MbR5~%#oQiVvs1+m zBstHt+7qyrPLZrc4CZ(&YSU3r9_U=cq7C`{<^6}aa27~j{d$=G#2-GjNL;C}>T9IB zm_=Ud-$&&&&R-VP6O82pYG`m0JbuDlt3H+wu=oH%m5SaXT7zcNT&vuH+ldH}mOL_~ z&LPGG25k)o7RFJE`;DuS4;J{Huv>qosacBHH6 z(oOjCl=$);Q`CppsrKO+bl{@0+CtI^97IE`Ntg~c6CHO-3sys~Mou|#c^5X?Pep?p z20{~ZX0qiFX6UkM5(qo2y=fZ_Fv1daC}@q8h*L=mn9fwI??rjpt_s0sm9agtF25HF z9w?7~Mjse=tXSJ$UsgR!00B(6pbH0^SVvBPT1lxdQcDTY2S?&@*o>Y(0;tqkP{WU*+HBq6=*T=)KXGwx&Jo4;^EVszPnr{79Op!$gWp1LXL6^MxN! z+na}R+$7>kC?mldwh(%w6=mPbY6vtN7HiaVgL$O5*z2vytGgO;u-_hAo-+3%rr1|N z%^T}34i1Ih%20@%30lMn3^S%EwBCP-)e-t8teO^DFxTxzgq}}4(Rcq^hcrD6Een|m zO?=T*7~{y-|4Eplk~qiW^ib=uvmtY88dEs77>c<;N8RD{*&D+@RK&le1ur!Ap42Bs z>sS0Cp*k|;pZu2Q#x@kR~U0`tJk|tNdyArn`XGbNtuVku; zgxk8W4gHuGa2SCPWQoPJ@T6=e01i~xe%;sVgoW|Xui^pd2rh6?3y)UgGk7_eNDbo_ zqvyN4rEXM|9rWx_rRKYb%9 zu>cE;CWacpVKTT(deW*5NhZs}QP_v{QA)rU!!Z5=tcBH-f?ur9F`o^p!>~+rbZmZ? z1gdHLC&}!U!UzdhvjIqF?T#QtGIHOO4yFs7 z%M`;!r;DI4@HECfdm|@kxALJeUAdEh?L+KIsRnYQIY(OSp;C%PaNUW4*)l%k z(1XuEeimo1<@Z-Oy#i=F;5eFd|AV0Z&jXf+f4ryz3!pZUp02`Pqs3xk)ZO&qCab$* zm`KlBOtXaKz1o<42yHcZ+VLua5Xq-$CF4TrY|6ZbEscCJYRceF^L({#vA%6IH{m}7mJBkZ#oM5?HpFq z*-#+aUB|SRF|)_!+cz3yS3{*5${#1?5GEj4dtxPm2|}-AD(C0RL2vIS3^*uPVFU@~ z6Qun^TYTD(_1)O@*eKz9a{riX)HC=U8>aeLS=FMJK94$7vZ}pkwT$8)2d7*`FW5}~sGC2e{q%f5r zs3z z^B7=>VnGZ|wxbbr+oZo}f6pSf5HGq9UbM$5aR0Kk079YHhMx}`RpGkdGLlxr){GbR z;dJDXjRe4bdiH22eY5_9MkW1<{Lc^$DWySjU(~yJA>mJwi+q*EY3Z)PHys`rY7v{y zGsPzZlfl;yV)2=I8rUBhAasi(<(nI>D4zH0iUm{7maU92;H|@_!v^Pb@&^<7 zTE3D$BkIV;`I#_Id!RPthMK53w!Eh|3-5NEz1-YkFB|+pk(X&|OY-{Z(_9xOc`A&2uQD z=Y)t`E~#Er@Kgw(xH&)!9OiQ6le<7#jmyp|oq!s?R+2U&Ap6GN4h>?t*R02lVz(X( zYV_5b?KU-YKekcB<~CjHKA+yRMBY4wvqWn2%2EQG8b zBPSFsP6vAhUxd9tFE z`5;l2ODQ@ziKZjShM)^N-@@Y`V#EovTv%NeVxWZ3YsaQZ{~&MM-7}qaCxwufneB1K zn!Azh5?*{`3&75C@F+tA3IGWVp8{lg@vBCcizjTPF#RRc+Fbg?5%kNw z_)ozrTz9J(5`R{$&!ISpy)kM*IE+UhqPcTPf3_|1&g^;}?DZs&DM@~K6?|U({MWy@ z0D#l{JzTA1{~9^+XwvK7wDJEO>rm21fV<(v1!Q9Q(`;~rfsl7BQMx%Q^Eng<)3b_= z)zF$0letnjZn7j7_z!Ohv`s8&^?>DotH2HvglG`A47wwA*t!Obb#mZX9581Vtl2E2b#8 z)3^B`P80e`PbGkIOyBAA8y84gA_|D#ElC&EN)Q7TC$;GM{Z~zjma%TBiSwFR90L*@ ziV1rkG{ubmh+#i}?4Kk_q%B$%`5yB&`!NhI3s;s$ioiQ)%r5O{g;`1*cD|~+Kh`O3djU3^*mosNw1P$ z*8ME5drkd3JbS-C&mac;X}|M1^W^3F7+w3*hC*vwhldZi3_hT^6<6LDfp=OrrzL>- zECn6<2XQz^Z6K0lYqm60kcp8EyV<<<*TV?tM_xIEXZH5Ys@zM#{jSR|kp%%Yul#jD z^Y31uPJhg9Q_E|1r5Q%_kGxi<7846DX2XKex2QYkR-IB@8m=hl=Z(6C06iZ3k0_keZU~7XnjT( zt@|)cIflpJ;WntQ2%{ElFC(KuLQS6{@}2s*Eo5mc9r=m^(#CinaxurY#n>i zZ{DuCY6^D2zfqlY>?=fDz5TeRSUgS%rXquTRm`=%g`yS=3AvQRvzH-EPsDY49{mY@ z3_bEwQ`)AyJ!4kYCg4uSuKq1&uvQ#@m(}p2#vKyePM*`Z?~Xu z#Lv3F6Muw2zCZuM&`DW8yWJ_lFvD)VjS3Hx(1(BQ^uGzX6x043WgYN`*{M9Y=oJl~ zGff(W|BJrHocFpI(X912D`62dDWTJw$r@j z$TQMC4At5ZGl19TLdof!ekGi+?#P$Tgq3~;*Dksk?ktQ4E9IJ@9`bTTdk9D*@`~4$ ztTd(|lS*j?dX$fRCC}`VnH($T^B;NoK-zZwppXuj^yFn1V=Sc2u&?KAX7)-1|BzzWfC>Qgleng z5J~?0@6VDRw_ToK=1+3ElIKco1Pr;!4vn+93MsM4>=Gp(+L5~zq{9&n`~%rzJuO(i ztslm3#k8KnlHMap(hf>J%@+i}L%B}GR|`Y%I8K^}b{*c3=o99l=*>0A`Q&-R0IOfj z2E?Es91WxY-Q@l?`YeC7R&cJ_)f^?@q1h&&GVo-aaR7P}+(SH}fLQZbo`1|bw0fj6 z#9eSI^wxnRUdm%CG;KbCG(i$n-DO~aPKcVW?Gh%?xgiyLifX-*^PG{U%mrAZ24_Z( zb=9l7zMkcmb3B#y%FY32bbKsq*OdD_{kvUESH?S&m#UA> zvg|QnCb_8mndm>EAk5L|+21_)J4f)n2;^d$2s#mO1#;|DK7;xDhbnuTPb|Pc*lO@A zA|r4yK|JyRkjy<&`}ytkrcIuwaK+s(92w#OdXT^|g5A~H5^=piH%a8wT1Qg-1)E^g zJ0~JPo~v={ptje>jDWaoW0h*=_A)s${;mR4-<_XON~79Ar_i?G2gXeVYd&=udCpXRKE)Y4ogquRQ|v3Rac!;E1#d_lulc-tl;Z zIoV7w#p)*@+#|Fsj0S5vDl#ZwFQgax) zxl#z#64a;kA-K1vaW4!m5WbCJA1jW?zG~1^I%#IY!XkWw#TC;`m)ocSecU$zqtmDx z|2v{swG2;MsFKs6bTi!>XUk0LwvaaO7dHkl88@{s0sZwdC7jc+1a`FyU(-)|+l7HI z(qG#ir%PEsW;TAx!Qcs?LQd7T;MAD*_B3WF*<=f*DhIV)Nw(iqG)!hX710#ydVWWy z)x*{2RI!#kOD%mTr^ENwqgpHGxtjzE-kxV6o8xfExh=xCq*o2Ii2!fh4pkjJUWmv_ zT92YQnB8Pw_H&^^VLyop!dY|1U=+riHlCagg1vI31KOPOFqPDAwrdnT8h1q*n z9GO!~736?EGuC%U$Hv|s*+LSu{u5sM3vl`^^-dQK>X*(n%yC`Z``WtQ4MzN?A0_U;Brmlur;}*phdYhf%^^Yt!)=fpVw0FUVi#R&A=h6&rmva; zcn;k5O-GYg7vNb4wqX*2+DCW#rEh4sd7#-{x&GdU0m)+$772LyRhB8{9r>JA=^1Ui z9gg?me&rj)CsS6rxJ+hK%~Sd9j8K1<0So}59f_{uV1fnU!Koh9Puwh|8xo-paLxg|Kp=#@oAg)H}QC)Vn;@69NC;Z7B z?ZM2Pq?5A4n{Alc*UHKaW&~DDi2=k395i(rKz(7E_{EB(3cR8V9Tk?eDxl!VTyA9W z@d$5HtbA@0`k-^3_duItMtFP)$%G+bF!>f&8B+pp*PwQUGO|Fv=n6B7E#W{IQnR>M-KGnehGKoTj+gog}w;F z|I(D!i3ERPKfZf?@=S6ry17n8Kp*1NrRfB}DtgRxsqAsJXSPGZl`7FSU)CJ%`Q&7; zd$E;&TO2hKeSI2fB6go>n-u)zU@!U;9CWE17Yi1s+crM_^y6h(z-9cvA~GNrY#dh> z7&+5C+Oqy2%}R?q1AK|SuwqcF41v5-cm|Wf@u(A}0_;RI?$z|a^`iQx2OY@<->X8J zQjt+SWZ$b6->8bZu4Q@mod+}6Z83g8_1Kw^=H*QIQ-eDJq-Sd71f!qJqbg2J$Dssl zOW915vowS=vc~?dj>P;sUjFE2D|MYd>|0e5NbztTE^4B9XPlV~qDnZO`3Y%C{LrZ| zlOuH+v-P^7tKhV)LT(iE-Y|6DF%%8G(ot;;5f7+~n)lx6R9`!}9NzDl=AqAv|6YJh z3drGrN)kPH4ky|U1Zp667>rl73;KO>H~m?RPPcMXEa=5t45G5yFu;`@+*T1Mxc+6If$R>$>scm}sP*0}NU!RzIhSs&l=B==(vLyZz~ z9HRpBLw>w?2=sS5&%$^+KGuTV5A>?JoQPyOKC+@=wqdolo(^fJUhjh6Co`XYz^37` z(Y7sA(pu$crj`nq71E`_AMQ{ZlK-Zf!n)6C_xaf8s*V(rL%DQ>^7WT*%p> z@zIaeC6z-Et-Xv248W4ax=jo$i)&seE<2g)KYnf-h%ZJkfNxx9!7kEvWr>FlH&!V( z2G@cOGOBeM!&?yLmr{>PZv&k(o`O`jMc)|h*GqwFR(LtQ11}SNv&k+A`~ANHwWn|B z3qKvY(G2Apku&$`z_Pkwv&$fxZ#0n@l%U@_{Xs%}q|4s<0Vy#4wCqcjyo+3)cjuqf z|48M%{Wt)rdH{Iz&fnvDQrKVI4MCv6hsy!HLMoBsN6I0cwtXb2jFFa6!*u96uS2rU zcjPM6p#}_IB4b|^JxXoTfAgE^@E2((z(}Qa)Em^zImp}PAZG}gb{>aPW95=W8nzp zNa>!{XsG~=(`|B%KXt}vC!eBjDA*~j@}n%0!1>&^Zjg=4M0vInTL=|ABVI`Z8}8Z7 zPQNnEwF?Gq^D_J}KQ$yf<5WvxUC9+c@w>nhdf~JAns-B?Y@giMS=zhb29tF=k|ySb zoFsE!sIRJfW1KH}n~)Ge+qI;9;9Kr0cWyHQtB}^HmUff-byKAITm-@$xop^MG+9(| z`Q-|)2d-VUL*>}wdKc$+tl7)@7p}gWveSTE8rzrUKJ_4sM$ka>S#j5!k;y)9R2$Oor2!g7`~ZdFxx{qumTQ z%|E*P=39Z_r21Q8S<#{f^KxvR0D2wg`{%%(!Rq^xWG~d*%@-06jRB#9p$vQ5u@xH| z^jj}tDODq08IT<(p#qL0BI<8W7j>s&NHh#@vmT?Hy>nyx!9ke@sZdRwynKaK>Yxk! zM?i4AEn^J0A^OeF&VRQj_dg9lX641k7aZXTtuRIR^xL!W&NG;*dC+d$$5cZ(#HvkZ z@Y1$jVwH{ihLt}Zm5Yg3t8cfHqebH+Y#g+R=gMxckfRg@G+_wmn4QBFBb$-i)!Q(d z`DslYw<3y+kN)U?CYxlRCr;yirG%CYZ@h~cls`)7N8_%Y789~l-b6*E6c1d;V2EJi z7A6^RTrdBsUki3bh4XTpJg^{mFW};oJZaff-)R&3S`7qon0}g_viUP+u^vVO3G)vp zP<1q`ZeI4)@sKWXXHxHH0aGWe+(i>i-S6o*2*%mn8NEOq5NL`CJyIk?cWm+QXOv`_ z^|M8*QukI{y}2=WC4kUBY>yXV`+(W=?o5MBpA#Hq=rYf=AaWhWX}$rW^;0^%io+(a z1rcR6SMgqbG%L>HBvfq?oxWdS7NoW|yfaAbGDp+mJ6e~_L#=pO1(tCMy%Zr2vajR5 zOT;%dd2Uy#ysNAOg)H+MN`|Gr;SERL0kB~=JUeAp2)%AnVlZ)4?u}TVHNAuagnpqT z^|J)(bnZ^mE}E3eo*x=xo@>)Ja>!*jlt=0qek_gRH?3Xbf9hJJ${s?+IFapFU!>8Fn)k^eZdCuvE&+aNrkw>O(ujFX5ox zt6#Q!0o?U-Oo1=3m$qOv$%WI5T1rLCgrA~5nziG~yPYbX#U*C{5>ooy6cbD7KALe`Xup74l@H$O<*LG6js!dR-q54eaoVnBiND;pRqyOOp^B0Xj3ah;%~fgMp9nB zFwfzgNgQ~kvB?B$$3ovyowaqONqI+&q#|bFp@n!8v=(G`#kU9dRGP#0en7FAQ*k&D zTemI|SBH|Zqt^Md@+0`c0PfJv%Xvor{VCXcX%(00p!AvQkRKR5ja=HqMW1=?pzm*H?knB+LiX96Hiymg%n-x{X7xQp1K zuM=0Ui7GsC9gF!XkdU@WUvEH^*I5MeF21ffWWNp*_pJ$@;qgw07^uOmdW(#oF5UVR(UnNjlKmBL~$xDqX@8HoGo)X>1`VlZMd@M9X{l?f+`uXbu8nX-bx&uOu zuglu=Ykhm$hm%V#S_jnOpSBGY-cauDa1f30pIbQRzv=H0a=iZ`pol2{!RSRz@$X5= z^K1Rs$?4VbiHQ#j2%;?dWc7aGx9J&qkCj*AZUXpQ3rz%NDV|u4Ji&=DVqFHrqRF*XD%6zl4yXI}9I>3f>Vz2(Q!%THXc zY_9xmxQ#N0`KML{E)%vocgKv30I=j3ym9e=()WL*@^g_=-o!J%M>vQzKi=t;9sCT# z1`3nZSwY%qBXbM}P3%EQ-))gtl&Sx40(X|4BD?$xZ{I z7uFA}5xT@(s)%Yn=;9!|&(7y70F%Xiy2%G&0b^ti*P7Z8+&wr|3kcOcY^3YewhIMM z8VZ;Vdc$YdBL^K~(ffhGiOQ}moFg@d&r5Ri(G4)kH(q8O&bDHC8&yYLhiNfiqFpN` znJP3L%&ue$!|R+;C7$I7oFJ{RRQE_;>RH??>Jw*MwH^<_eJbW(xQ|?^aEA>d$zGDr zPLY)2Wxghfd8vKZWvr~fE}##mKJU5w;9)I|?r`6M_lFqGBwwdkR_-svRV^iE#=Q)D z0rU>VS_?99j&({#eo_=bZ2HUnN<61N-PtR1#PXHhZ<}!+1oz|3C{y8iRmHlX9T6?WB`V5(F>fIJ-soAxv%zxevZJX4oeP6!md`N_xwhx}mpp8E z8F?t2czXx^3Vn!_I>bK_%Vj-tDHSZ*lntsb=8EkIUTa!nIIFBPp`hoInKXbeC?cNu z2#e1;2wAPA74NLuz4#|jAyfc73Cq|yN!UB78Gc4ay3M;P>srcY|FTA#QtF45++26u z!*ykb)ge4T-_muiyTiS`sc+`;Jv|`w2HiYve9%X2F@q_- zmc@S3AabQx@<+<)EGmf2r=w~QQdIg%9cSM3A3(7?^XVspb@cBcjT^MayBj8i_8mii z4Hf7|gQrHmSNvC-xB9E$BJnMs3_IHD$v!P}Ey=3_Up=|c_uGzs1-l=0)92q%U8KiD z6PMP8NiE6>P?{}rh?%pwiBs3f(_5=Om6w_e=Ic}J<3+jc7fO_$++CI?g_SnQYDTLh zm&RHsG*PPNlv_p7aXV~C@tpj&l|<8Z03A-g$;>_(FGcfQD|tyV3q;FEgT#R#)n3H$2O4zNs~w? zsVrLQNY_5I1Ag*~IgIQY8Q(VC4tuQ6ds;tvYCSx#dKw@>Z@HG`F?^omkv6~XlcNB_ zr#C%aC0|tK{NUd8Yc!rDkFr9jz80zEsO+1}GW0rXzC0+F=^k9K+&CdPjprN;X;0Ss zg?$-JFsxN^tPzGq#7!?_(y9E;QbUW~89*|q;kJy@w^f`2ZC_EDFv2lH74 zJb%7!K)(N+;_tW!a0Q1XG^SNZ!#*-d&uA;O3}tD&OjGPaFLRb4_H35=*Nwz7)n0yu znZ-`&H~-no;Js;|lUo=6_X0zUqjdvz!Gyt60Y28m6^yu0y+wvAk19xUWm3(fibsyE zH%7IS^<2ZpjnB6^|`0cv@^}sj5N$?1?M?Kl*j6Z-E9L1Zim#=D(5VdosGGB$@z5 zb&5rLzH}rWa&PHpg}+m$mx*Z9`Q4$2RCoVyJpB#7pVwRCvXOaEC#O^R zK=s}UUecsB^Ccc^?U^pO1imS$vE#02XBEt!^UHB?bNZG%-`8C?E3lP#>Rc_>@Z_KI ze{XWgrDFnizrps^#$(?DHAaRm|M;PAi+-saMazjXKdEhqsyXRph?HM7PSS!H;Ty!i zL<;`frw6#Q((jgC(#Ros$SGE*F^Z5nObwYMIvU{%jk`w-Oo(LSg!KXrHH+zl)Sxzr z{t08#Vg9Yz#j~LK6o$hRgV>)OPdW}}E$z51|G7Go7((qKuzJarZ?7#1wTt6`cV}TR zlsiMnASQAO zeje)?PaIBo`=e?^1Gpay_T>y42X-HyHNV*`3E~ebTfe;tyeSj7Q9w)hw0SRGyR%;f zVo<#(4bDCB3!L@59?-JUF`Sg7ThO;1`Mb-)53O?f7Z(6{mj9`g3`j}_=}C`7;B?_} zF!CjhNXcN~5i#;@yn6Ty`6i7-MFs?%mB~df^ZO!i{V?>==+J@5cU&xDTq`Bk*J&j5 zJrC4*{+1X1gOU~=f&jn#Hnk&Hh5(bodXqU78mTtjKj4;1;oAmT^R(G^r!UyZhu->p)u>#M~g%D_v zsY3s(eqZMdBSW^&9DfNXxXZ!y!!Npg4YG5FKbynolm|x)e6rS;8e$~7(;`O1C;W%+ z&L}pq$A9_unR#&$>6krBky;pZBhu7D*pl)pBd@i4&5EjCLbdZI|I^ClqArG?rim zYxiuQfKULv%sq3r{Fx9zFjs}JketfrMaHK{b?HeFEe;6aYf`P10?GeXs{FsjOTd;m zuyTP(@8SX`HO8h0`e(vS??4pqIJ8~+XeTthk$*vEW!d3NnoXf)MXNOOcwKDxFw5ia z@FDB1VU`nMC9h4*$Kc|BoEnXdw#EMlYybU?iX1RWVWY=KC&KlmBl$kuZn;WATW^w! z62&b4#6P;Hm63GsQHNh~BkXsaF$8neW}gV?)IkJ|mQRTe=E6kf&#n*Hil zs2q|`u)FN;^RDQ`fYK)_K`GY$Ju559OUi$Q95>pEr)Mr(Wq@JG+LId$V-6Q=+}NEC zS1ZlOxuySF(qTcJ(B^9lOqASk$ipJFKc8#8$zJR4>khc$r7R_b5XqS}j$DH*agwqV z_Q3&%OuTl`a(q(9fq(cqAQC07x<-NO-0)1s>;#?eZ~CO721QkmN*79VxMvE!GUg{~ zx#Rxaa-8s{61-)$C>GDQSX!TIsT=++E?AvEn=w}kr#r6bEZ1T4)oJ>_M=9K(uGn}laJV?mZ@uKzEFywo9ZX`w% zoX%vzNW##E6V&T(q#fL6D*{`Pgh-GiL zAY#mbaaPhVAyX7*2W^cl^|oa_nN(?SOY2V8wx(cWiUPughhk!Bd+%kI7}#tE{Yy`C zsB6EquJyy;2?Aqh8~E?b;eQ`X#Y2kDD3ytKi+SXaM~sN`gG1_tCC8ZqN3(BTpZ0i} z8jt%u*qL{pT;%s#@%wZ>v#TSTaoHT2&}&*J@npbYE(xF6>(GRmXK%)k=ak%g%9S#A z1{F>8?StUAgpGsE(Igp@^=8!JX-$Jg{3@P|52q%@W=rnh zl-7;?8g^e_dHiO;&b~unk%rNnWD^`TD!`AuWVnDgkfNaQ4N`m-X@axx7zS#s3 zTuU{md$>I7zZ}pO=$W?T7NP-2{LYBUn&{^*Q=HaH6T1bW6R)?JZtba5<2K%8mt+U; zRk)N8cFU=D9p3GxplZy4tL+G#25XQy8~{g`#tO<-lTp-p9TIZw)1K__y;*VH4x=zo ztLBvnW1-oKK6pY@Os~qY<9M_}XV|B2@|pWBG_p(L?7ae+2@MK%aB~7q){*~yHM}#o z79EFj`Gf#}v@c8-P9w>?{}StSDFTdT-^j1~-!_5JmvriGh4~2Zb)}{Q?+q)|*5Xq&mQls!KBaeUr!Oa>Lyosly25r{Vw>|V<22qFfH{zE5seDrg zXF?h+y|i^xk%EKYj7j|y0(H17(`qfw4h5R|&tU$=dY!6UDqx;Sxo{tg-I3AX)O@~z zjbz5bJ(OcXI64V-)`5b|hz=#kq2E3WmYKP<1G2Xi+ph6}2{Vx`ZJg9<6*_bx`v!NZ zPuH^c-^~T}|!?YWIfnjG6>ZS4EJ$cLsPEpDnrsn98>;KtS=E>+L=&}!zbTh zd@<8p8IXg|?{N;kvinMWjUvm_iORk?MjlTFYWU&#Y2J(?#`~@J;}3Nb=Ni}i@@wB!hpZTZ-oe_B@gx#>dc(wx&r&Ib% zrwLt@faD!9&=8DnH0nBOi7?>?lgsS1_=Xo1x=vNlc1d1xW4LNf(IxTBX8B8KeVNiT zlF&NR_~=-n%%2VXuY(^Gw@`!2>YW4yZnbG;K*UYQMAbF?-=POc!l~Yof_g4G<*EWz zQWZRtVsz(u({+taQyt~XQ?t&gsCu5n-A?E=q|81953>Dspp=-pg^_>hR`9XNKEnso ziT8p?oZPN!8^PGuUT{2Y&2z54d3(67QrK9i`WT4pCn<~3(yOqUo7#aNAIjD@Z+6QP zxc(_R{f`atTK?TKmnsmEZy4TwKOago9H^Q*T6$TYmIH;({eBU(N!dQTeNwkGFnz{D zGh@%aRqLOaw_5-ACcRQ5OBr?;n)fBn~`sRU`$!11L(WS2_3V;o<_w=TD0Gx$1btqbiyW3SKK$~hY6lMw_(-j6O)YU2Te zzM`5TuhRL6!IuH9=5u9tS9$!+49yP*rtD=9;lD{>=~&zGQpkPZCj)gh7KA=SQv^R& zw{|UUFUQ~Qv}E2 z@~t@Gg=bc^rOJ!ur{ViAZ70z2Ase#?ABhMh#-^tove=BE4T$_n zHp^Kad|l5}H6M&;*A2d_wrq=daEU=M>gikdz$~nD*eS zsc=ReBDpgWv~9hm3$!5be(CKDr5Upa77pg?4%7pEh30Sj0H4BCZvW!HWNYwuH(;$R zulEQrxe**d6@n;*3J0aDo`4TrmJwR?VI>s)U+*K%O@7?z*#3EY8SXlMa&3*KLHFhu z%cLqHNs?-A^zUzPnf~bb5(R3)!~Wz87blnkt`1aj1-Rl^*dnS#OO$#YjJ-oSgHn57dxB z#N@Od2XKpWsq9;>rwQ9TaYU+Sse_MkUDx|L_`I!Xf zEBQXU!4s~^ytFfsU1~t>6-~J^GQU_+Y~f?PD0KUv;1`EBlH0{2qt&CHvbErwkFJj% zz<9~0_mtn%6${hR;j8M#->#2wsx|CM{RZN>lu#bqf8Wz|t}t+lEj+h-E7+RLQ4rL- zb5HP1Snw1PV?6o0F*O+wDs}Q@xeXg{EFGj^v@dWpA>Q}KqtM7gS;}fSZ+wUZM#x~c zSqbwtsW%@LQC*j_v~ysL>NgAjt>VjiHv)_=SY-;k5lOmpb-nyrtT=-6+el^Dih zD>JwtC5yf;`#9qEH1gVs6n1nDDj|d5#k6S_ zu6HWTjgSa5PpJ~0Wv~$y`Ek-e)&Ii3;bBWtjnRi5Y^Nz3YrCytOc?}o$O1Nf#m?Dy zVlxKi3e<1=dnLvDwRe@sJ4>faSWgTOA1?z1P=?7vB2>EXL&Uz2_Y&3ocOSYKY8T(-3v<ZRI{I;gk2}^{g5_5 zG0FH!=%Tz6riHC0Xhm~M4kwufKlYDFAJci<3|LqQ6wD{O4Zrt) zNSLLnmc)gE95}9qrUmk(Tvblx??izfOAQXQ0A#oa>^5l%J6b3{5>`u53YUM@*C*+@uIl5y!*Cv)3tQe>tH z+Kgl{|lN~yqLOF36=1@7tUC+Y3=^wwbA4+ z>McQl^gsXszIssK)8+_VynLix?9^)qX75?8q04Wuc-qQ$^Z{e><@@x_3d`XEUZCPBjM8>2U z@2w$1{(9L_=a|?IJ(>jR3_T!xYqR@~{tcq54^^j}eAWmD-!B=*4q1Nx(z@Oc?VB4g zmdBL=QLMV09QGTGM*avBJIs^HFXNn48Y#qDeJ$H?ogtV65tefcOZH;!tjjgTse zs>F^)e^XbX@$%aj-SgV(BX!0RU~fW7n|j$n@6T#g+-;$}BbvMcVzFeUTnQH~0(j|I zC!grFJU5bR7NAC!0m-sYY^v~boQd0)3kA}w#4p#&? zXy7;wMociGdeX6u`_aKaH@T}7pGd02P_h4(R$E)>8Q?itk_xf9TM0$_NR8%?POd{g z(T@T6T_U!X3R}Flt=&8{OMBvNpmJ;K_Z_2JVXD6A?azLF z-GjZi-w1kIXZ%=ok3wcS%pm!&7b*wh z6(yJyCz<&<9ZeGfe!6-CiWo+p%(S*P9I2?z!uF;*miauX-(I zbp%M>hrhWwp3UJ1v-y?7)%jpy^3YC<*118|H1sC`7m{Rs2HI%-J(eXD`;*?EX06l1 zSssChz2kQ8P{)rXg(S3%n4&c5FjSi(pPxK_L(=AvP}Ll}F5VX!B(zL}0H6Gx&?MFU z5J~(&sBHVtb;3}9xuuz0s)^c%TKFCWh!;VF)O^> zC=_ZunP8PrAF((q*EC&Q)5fC#-bQMYKqMo#PJw2?DLvDx)U=EL#+VLRPH5A zk4#85V4G6Xn<>Mn3-vGFiv;E`Q=?1igE23G@a>=XN zGU2}VVu;j-q%E|^J=oD`md#zh{^1};*X_};HQlOeaT^vu5%NPL%$*5-zuj@(;?<#0 zXNOg>Y+cA$bk*Bsg*!!$4;Ky4g_8BZP=!9u~r2_ae9! z`mTW-XIy3GPOZIAT)H<=VL(eJJe7yZDlpLHq<3S6#i70{o4|2r zFtJNQU_S%(8dco}(VFfwD~k+{1={i2=BR}hT8H!<`Jk2z|axZw5NNY^C4n+3cp zjtAyqytxfFs2NA*Cb`D)1^Pm66Vy&X6|LSi3HQMu*DDQ3`|;u%O{6IVqKw?PUFZ#< z4~tll)Pz7T3baE>*BmPqF_>5?wmyAiRqQv(m8E4jkH>4?7MUYFAR?*Rs+J3Qn_`48 zEjh^~X}hq7fn)pJbmjSQ2zVvf{90B?-!kEfj2V9j`}E-D?_`1Hz}DtBekvu~jmf?6 zEIb0T3NRz7G2=5vH|F(=LrsshM{?jxTvX4_?32(%S#*Hl+xs0;F+w!iLgp0miw{lBOJxxLK3)u*Vo65*7_NbFUP@8rNAY>=Gl8Fc z+GBMtWqQ;=>w5Qz`i4Zw*ryJ=l+d@>cAvv~h0}$8P%YXem~lN2wxZf^qti^oBaaN_ zdREbk=S{@Be$`6dv}K}aO1fY1E}&GlcIB3!p4GE`U7u&dT@D|zE#bV<#941VQ!@=S zpS&L3{Q?cO7Seb@(xdrB4-Q5R+ho*OBr|NcWIu-2)SSyVbmy`a;-{qm2acn?g#IfM zGW|$hMFz1tl(DLsX$9AnZyzRd67{i+fY^HNE1PL68TAwUIB|s*Qn0~=k`!_i$cRiZ zr}wziioK?dpBU8p%&25S#L+(1(@K-Bb2AI6d-DkJnqnoz#EItk_`Bw_x{I~fWu|-x zO^;#6vI(H0DZD(_rxFZ_ds|UOG3srcvDk6{Z8-W&uio81l(t}nQn&eykDZ%k|~{}Pi< zeo%6M^?kUAY~?0bOj=(ICET_>taav#DTlk~YJIba_&}p}NSOjVxDL^Mm8n0;*$r-KEoy{Q)41&UX5XpXIhSw6o7;Es2vK-mTen7EgbDp_953+cU6rLIpQ;x1 z@2Xt4PTI1L*CfjJ-;4VMQ_w&&slS%vzHSH9$#hOpQdV0AsXPMZUV_+Ci}NkK({LI= zg+Ld&2ocx&XfV^j*{FQz3MB7uXcq`qF^rIwS>Z9;$|_bz3G{guFlX>y{l9055qdPZ z)Pe4wRY0*L+|Ov|-58^)BjY3dh8i8-zMl2BpP+}Ag>^Soi<>H%ith+t-|IG!WD9A6 z)yRGArR`{T3u=gKsCgKEf8nqghgqk!yF;$|Ao;A*!;w9Z;mjl#)Zo zYuq0sAr5qE7~4D@7%M5eM&M-YnItutM#=@Hhk~B&rj#B|Doc9MpQ_jtmOmKp`u5wJ zGRDGb;L`(H`%M{9X1XgS<7k>Ctdq{%tKzmN8hw)c3*D;+5HaBCc(Rf;)vU`m7`?Q?|74u+bM=LPek43MUFRaXvn(uOr8PhfAI|>KcVcDU?4wN{4X|JOJh1YkSAw6*-uHzeX4Wkn5^CjW6pWGR8I#%F8b^`C zukix6z$t{KNwD>;2aESW2)U3V+$kkjaE4HLt9WKMQ`F}ZtP&lcPk*MJIH$lCzsp^Q zNSe?{o=mmAaL9o~)L>iaC)}G-Hw~-OzpY>-c7scAOu^<6+x zt#DI7aNI!y1A8uW)8Ogk*+7?w6ldCDw(@sZj~SEGbCV79gV9<~kH@jkyBL@55w<%d zB0KNP33|3SfosRydcv|l3%)}W&Kh%;Q>JIb+HTe?Tb;3?>Y&W#mk6fm9(;hkDS=s1 za|9^!U-0JldETv!-wvr9#SgBRh^>n=R&-WM`9=w6tG@pm4RsOa(AfeR^y)HlkWYGO z1Av;3_Pyl>1sT3Yc@4b|;x_&2^%*XFpADGeb;T$$IS5J#N}r7qM~sJpFO@^H(kTN4`cir;@{GiR`fO}Kwx5@o(azmY}e^QAA%k!j|I&zCVb6vTrh<>d#) z=PNsH6)RvfsoIkbU4?{DhJt>8Ldz71QDMhIT!+ zjFT-S+*IH}m1uRv2v?7oa+lxmHG=y?A=g$Khww6qy{Z7k6Z_CB`dyuu5}tM;%Tr;C zS2$E6Xl1v@FENjtyz|Zxv&(NCZWtLN(KlZRXv@-85QO){ zFq&6xZp19eEBebPmN$;-a>v??v7#_O8LM)&4!t6C-qzx-e9VtOuN4fv@XQqM_9FS} z8Gy5q#GT~=3~SN0#5;n`G%Dzq&k?EsXUmip*)13FiP5UQ9hgBE>mg(R`E|d~##grw z)FzKK9j)#Ktjf;6z4~ZY>)T89PVQdib>(%oeMI(t7Wf)IYc4zq+oCNjMuL~Y*G2R2 z-pwZp!UJj7#5eD1e7Kk9UEvuUc=fcqWz!;kGw!$RNQq4t6i!nQng6)}S%{$aZ!nMo z5xl4{a^_y9(9)CnV7cQ*X?Ar1z~^xc_DP_feE8nX+uFmNLCqt?%+Auop*glCE7yXp z{j<`k+eJQ zYSa6_a$6_%v|hg0bo&NFpjS)pZfi@sao2I5Fc;en?&~kCmS@D~HNHJ%cQS>9YQoq)hIMnL&%HlLCTXF$PsUafMT}M ze6fd80!TL==QoYTlvs^Vnmr13fFb-Jh1Y(i4O|U~XkXq5>;rj>w-{(BfosQmPnxhn zbmYMuy4xYp2z8MUZH_5C*rcat1q{gsZs^XObG~ZTeVe9ytHwgvOECH)_pNZR$B|2jg@nWH;_nl15c!t zp4S*-Aj^Gn;5$bPm-O|J+s^h9m%U$p>k*LHs8ODx{{2&Keb z=C2@P>f~~%kg&75;NFpz_JAE)p1+@EpvVmEDX63}hg=bQwqoMv`i3hEn&8`&LrE7Z z^6)94_=a%g8mbWXQA2dlH24&{HMf3UtP=r)TG z{#GAtB*)>#tHQp{PRqUH#ivMBOGMF<6@T&S!gN0n%}0SS<$EK`BxiatO3Y&U0=Qz5 zVxKZUbgb_kuX1jedS%vPkY1$KjsBB<$dqWwrdliZWzU(XC=jR$mO{O}~<6(ZTy)&RT{x!3&i7j4VpdgfeI{fc3~ z!xS0{lKpPyzVg7wB?8l;q3pqB+4f|-U85Bl+KWw-G5`mb5G zXN?32;;Dsg4sNfT-*4avB%a&~aGW;(+Pz2Fa%44q`Rj?wqv8k9a}*VVOF<;KfW zuR{UB@KVwUqB!2J!u;wNUlLi`L&8KPRVplT(Pcg_U~KheS~Cxw#Xe}=evWUJ8Gf4E zWVjypCM2MmBI0<+ZVN(QF1~n2JYxc9uuNNY)yKQn%!^P9iKbARc~q^QfAO|QvSwZ0 zc0KYd+41wrDjn5nobtD)aRg0@Y7{fy<8q{Kkwg61+D#dJ!um9hK9#(?C5DI;e}N!u zEE~m%2)=?7VPk=-a?2i6!00hVpM7}_0@LUB2>!l~{O>A>HwL(Zf`YT=M2TyH?zdt! zFtJkOd^!UbR=bZSSUmYNFXjdt@Bg0DfQ7rfw%6kks~8dsgNi3#Eeoru7L}1LoiT5e zu4MnAa+R%pM1#PYE=9rBrbzEtIKL)x0*IZ{TWG-cDtzFeW) z*|%-tZa!svoD|g0InvkT%eJtXsj_-$yA~$QtGbz1X?XSV@53z~(XaLhfrtkd?IoUN zelj?s!fC)FgjE)&q);FX2Mph^|2Kr~i2`cyhBgqgUk5Ut--$PT#%LNHqcz7x>wbg6 z!Y~K6CKkE6C|H!R#=)J@G9<_g9-%z$^tjS%SWA_VN(h6v}QH#4OPpc%Ua^ z(nA|Ab>qLF@RhlCgB1l$J-Ep(0+F5dnBCkeNI1H*qhR3%OKXO1XVI#*kZA zB5h+#2qi5CLzr^J`2+s;w;Q`Zwf7r0LpJy|W%b|myosqO3$B@X>Q~O4rnzzPzQJ$0 zIy_OW^utLnUgIQWyY6+CVFDhGa5Bx04*DF$2%85JHz_SsvP1?r6nb!+EL8y-7il&& z=HTd9X=o{b{YUY8vSOA0rX)h|8@9Y}6rzC{=Kmp0YAYi{t^srBpLCjZ+^A?$RfY0f z6T!=tUT;2LiJy2{A=%?P?fW`+j4|t75w<2Ocm&Kjwz*U(nP;+dz(Fr5J5uPV5BVsF zs$wF;1?!;A7=G_}l&Uj0LZeXu%+1JeHwvJ<9YRpHmGKCBU!|OJ`niLo%zg$y!tEeCuV0x2Em=|g~vA59vNqhGw)7Kj|=H_@D);Cq_macmGeoc z1YGUA{XX8R_3Uw%9JQOeDvIaQ?HD7#5}exA5=w+X1#(k@vcz<)*H790XPOyA+OAFe zG%n?7)*$LJgVjh2gZM&Iw*8;yH|w_;|erg{gY=Cu%S?G<-F zsj&VZ!Ie$lO*NJJTNe@QMVM`c|XCu z_7N8w@`vfcTpSvDE$%?2WZMHVk$Iw>@40zNml}cEJrZhhI`Mt5vbRLBCrG|UOvc=| zdx_5&j>}1nNP77R1tD1dwC{vOJ7^792$yxpmgnJ`COt}dpC}boy#a>{L{{z+3Hm3y z*dxv__2aJ=DD@5iC@;@OWUdJ?0|x@Bn_J|-hKyH3iR7Ql^hAnT>WawX(4<_nyo)tK zp=Wg7r3j8qe%wk&HNRqgOOM+4Vmpya$6<_oAVKzQ_12r;3J@!o%e$S{ciznY$}=Sa zHFzO8U+5OFBK7hzV5T|!thPPl)uFW8^jBIHhM{Xl!K-`Ff~d(*7ahBQ|# zMyFWOc93y?SFkUpRXAXm__mo@1D|%9^ouL5jiTk(wfxij>ccGs$z{5nw|` zR}R<0*zCUhEHJp0f19j$ZuClN77FjzVA+zXh4qlqFP=nXJLq2$_!fNabo27T=Vcv_ zb3B*L{HIN`V1{$ewCq@kYeLE*zFYaP8Tin#Q%=SSD zF`t!Qm4nW=UqL+F3Z9c-i)!v=UXGwfm_B;`Qdc3_ z%(ba{!s^L}70X_(DajSdk|BkC!-anFpi1cv>D<$=0O!y^9 zCx!-zwPdZG9##bU+G=~^EDh;$rt%tE%2&bv9uj?|g*hh%;ee!0M6|^V4Xq#xeTJV` z3E@8>#_GGiG>AL6gAf^?q`;^nJ~yT7J%N-bNm(nk8gI3|wR>~zsY?Zb*%q5PBaF%4 zA~>*te>>E6pXs`uaMB(&UWO5i-?U9<0*I9BJhZYSw{QZv_PrFB_r+sRbM-L zHMkdC7wi0`s<9h8!#Trut*~sA7(`f672QN1h~Qk>DV?NJGqDEGbmVqX;5Nw@22)O9 zK-bXS#i5oWgh%5x8!Hiq8DESu(XWfMak`4OcwCEG8HIo1W0AXU^P7mO&$!a{=;RWVA)gZXWzG%jz^ zg7eCSn zin&fv<<#r8XmEH58pt;fo0f!x9+vM2#1-v7a|ALMfEbG)_Y>@Y$5>h3xI+pj0yX>! zBhj*3(#<!`NGR3_%K|7Uqt?0p=EXgNfJCipdem^Z3!XrI^tB{% z0)@~pD&aX$uXAMQ)O}dfn!t-hf2&xNpxZUy)>&)@qj@p0Ph{l#^j#-btb%``jeEaS z(+FBAvtAb=Xi$As@d&R0z?ff6G<=D(Uv@w)(G1yXuungp!^U4TP2g zV?FT;?T>h_iM@icsp(1RJFWLtHHt@)-?p_P%t?n!cFCweBD0iK{Kekea|2uXOMz$P zf%gK76Ns#XJYpM%1N47+GMwxdiznnV-&r#P=WwsPzS|p=jbS-$#XZQMZaLo8NxXQ#!95% zxIK*cR)qyl0`mT^@1LA%XT1Vio{=$qDUDelFI|c^k+iO_d4(stQGw?;QTA;MTx{49 z(Y=H;??7HB?4@)tMgBUpb>5vmyS2R_^?iVx+tUj*?tqJzXSegK1D9W@UY{;cZ2?iGVx_;sBOIDT=ws>fvOHH6Il8i$A@*|vdC zaMbv(lG_PY!G}T zEEh{Cy`pmBIV>%8_WtH%k)9y@jT&wkF+6ShVtr$dc|W)F(JhsmK*cw*E2$0Q+5r=Yh?CP@v8e`HBQoo1jhaunYZ z%yPUF&;!Z>_5WS0E*!6yuZ|k%YG3KrmYVIfRXmwu z?v`|&lRH1^R?L6m8WMEZYl^cNmMyG)vQbGMqj1dg>CL8<=9f@%1z)Nklijg1poVnss z7n~a{!&l0}HCT7NR@|T<&W7Hvg;m;7kK}91dLRG#L0&MveQjKoO^w%`iQFkH{pMGj zzE3lPWM%8cmV|IILJ&+%q!dPNdw|-erLVd{s!SpUHc13>OZU>wT~>wp5y))ud0`|v zJp=^{lrOyC?P`2JjUII$m=ez=p70;$Ee~= zvDo!{j`DunkgHk^9@ena4=`81{E^a6N~nlzkF#JG=^5Aar|%1vRr)PR-5*EqQd_#WGBtMr7em0?&h$z&E5mt7lC*QVjmE+NlU{wNs`@qGIlb4 zocRyrMsV^w<1RhOBg z>*}KGtTFq5X=B=2hF>{dk8w>#LhR({KNYD%CF*aF{$R$!UIdydf1{CET%gKe=XgE3 z_vMwh7rGMDB}YGWyOX6w2V>bXSa_AW{t63+6(Ks(i;%54D}$Qfg0(YoA0lXJXOf)? z=}X>L4$Ab!G!BoBE*G#>7~&LqbF-zSWCl3XgCy>2u(djU z7>gIM67ve1yW>|?I2l~w14s8EFPHL(qDqFC#^~L1G1)~$&{s)HqDaX7(@32Rk6uHu zv;b+@b4R~`@zEa&a4!SkM(HM{6ki9b*>I_ZYPg`LMl#~ML}O4<(SN#YW7%;iSsD*+ z-vrl?a-pO*Dqh1ZCq)S14^h~ry+8N?paUyAx>0g^THXQc7Lsi3tBMys^FJ~iXnZWD z-dW#eNoM^j)$%*);*dQ??yKy&$`x5J0W6jvq}t@Y(?+ld6uM`;C>+2i{qG3jT0L$b zc~e7n%toJW)=UeP>SE1x!)HTaSG8&q9KLp486d6h)zaGq0obD_7Dy;1Dk)wQVqFJM&Gy9Eonh*HQ53NnINDS~mW2tckJksS%v1za~j5Q<2Ck@t_%I zxy@E0zt74%l^a&`relea%yds?I$1@a$~JgD$n2&{=oO{Ar7~>oJPSRoY_1nys%g8p zghZbCxQ3uXiW`+>Idmc8ad((`a2h+`! znfrOvid0kIT*g3@LEQ z{Q-?ImW;Hq40IsyC`?TLLK)151*NF9Lf(U}XP|5M=}C_Nfowq20C;WfhyLoH(MDE8 zP@a-H>5yxn;_(~nHbE?TZ;l9|O9Osr-uo&X_*-*AIa>a}w5;jiO9{bjg=Hv<@D+S+gnI*f9m7KUJzcb%)h}#2u*Gy-nA^X;(>5-kK>Xu-~?`_~ZC+`^mN+ zqt~RR{h4HN*Z#!4%!4V}?Z(hNd!A~T<>r^_^COq{y|_sQsZS23h5~9TG{_ysia`RH zY;GrxENL&qZ9p}cFY6&FcS*pLJSV;UtDdOn2;czKF7*SCb${p;sq~v4oG~4V*NM3L zQvbSpcM_Gg2C8*1)x(BC@W1Bg(~Tx99p>Gn*wOZ3s_E=zoX!J}RZQ@lM-7U535U5_8N>yVq87(KROnMf$6HHZrq(-Frx#g4ct(S{~` zjuFYC#o+&^QA1Axa(>cskvi?#(p0Pn?tf78&Q8$A>;O%bI{R4H6R&-__vINCX|^>b zV#aT~sd?ly!OZ1HrR)QsS#YbHn?@b z7FVtY5KG=644r#3SF!aWBc%Z01R7cM<#`L7-E(hfVNh#B&OsrCBj>B znh=PPvmCs*poK1NFxbNVHXb`vg+vZ=87nWlFO)9`jOqL*pJPag&@;iZZ}E>&i{ob? ztUK#yT2_mPWiP9bWnrsFAF+cbWty-*3rRtt1vZ@P6KW7Bt<+2u+LS;*&u-nO$k~nSAbsgzjDZcl&YYO*B$xth zT+OfLty7?>NX=IWv)og{#Cm0~Sy|LW;Q|6pgp9tFh*1p`x@?bc>3ECka&~u^p*jj( z@pc@VM*EI5`PmP?jSTAZ3zJ|xzxr_iB`e--dt?fyQwNlzQ?D$2n-C5tXN_z2!v-Au zRd*&`&@RkMq37^4SbCkI@(o$XF$t$Tv zzHG>~VR<@+4O5*n(0ZLhi=b+09YY3E>|FlHxkB1dr4TbeXHOSSKx;IT}?2m^hP|GmcsKqh=3B@aXh^` zs~4I1sn>&S+-!D)+_pQ=d1xp_W|q6Xrp8q5g!<4&IO^7-+H)V8+e445ZddVx)?Q*D z>1z+T02yC~kBR_=T9g7vYa}nfmi8wLK7uDnIMQI_@Da@9)&;G(cQ999!7I!4iSomd zt$32nziI*SO;bXKoerBfze@Ebq_E9npexc+1ip6I5CwQIwCvX-zaMXakdm=xM6t?& z67k5(OU(ah=c+H(N}g;rxDFPVPmYqvt6JCn{K)@%&7|dvppohzQ{PQJB%0&ZA*&bx z#nL4Y?}U#oUKPCN>g1)q7Try9L!o~}|B9Cs67(Nmvp|xBnGI1EJx`2l{RkInFH5^e z{2Awgqzf5_T7xabui}v1PlYvrr%r`cjE~20aib&s+f)(Ib02>&#yW%69nE>m`zfVj zuoArXQ4;5{MbKC;;!?zki_mkxWoE?p$ESPwcp3`Cb_2{};e zh-xE0g`HVBkKx0sY%3PkiFApw-X-O?07NN_#I_1}bA?Y6N>fnCB46WGJi5XKt9ZkI z;7902URU1~^T`o1M{r78q2d;hY-eX?;Wlh;LkYs(mG;6M^AkYB)E(7Uh)Ch55M#XB zOXNY|Q=i*t-vzwdlNm%nL%wC#3PF37R<2Ht&dU>D04SQJ&S8`t%ZnDqfEPnGMzK($ zLkOUvgsAq~>p!V@1fdbU-FY&9aqe=p-~Gv`zS>*BH+x?k%iZUn9R9X882D&<75ym} z49ZAEVni(4A)1l3ZQ!R!t z`xg4y@4eN>R{%V@*}XAU*`UyJ^flu5L2b+3cai|BLa#txs9Uh`<*WO#j3uT@pQ6Yqe$ z!xSeGFPJk&+q`r69FTh7v#x5}z`$wQ2uzJ&(-7cAVcfU~6$zd!(pV;Fy6Gs8Qt_gW zd>?lEq%@KFPjfQ00L;YzcMhjjG`Ob?A@~3g$Wvx7o+&{%|1DFyENu~qdPF^VJ;E>@kyo=#~OXSUn2)umgW#6@x+}fTKKG|f2yq@YcXR4Lo*P5qks_EfY z4Z;JxJc!mJ!Bv8<3Jqn+JwsrU*Jb;JgZ?l3By8=I7nXfdQB^Ky8L~N|TeZo2C{vP| z?Krsg1B9L~mRg}xul|7v2yu6%1#W#!lAAi$NOMVW@oh&-Q*w9FOZIolPIsUtWKk{@8|E;DsiRV4LV4x&i(Wnvl;kk<$; z(9Jw|TYKxUCOQ);WdyR&-F%_~= z{cVE*e!jJdO``YZl(=>63T_D%#C#kT0^;lNc*~)5wRvgo^HsQ3nUBw`vl>1rE~k)Ye?+k&xT;qIEBO1vj^{vais`xp7ENw-(t4?zx=otegI9 zpuH=Gh|_|sVXeKVNa{cEw}i419PV&GWmMx&AoTn7R>qH@rZ??aJ~@xP?$>9{DNBOB zigXtMPFt^+*6N%Le@2-4q7C~ZeJy?vVIzSu1i;PS%*XT_!d3)odq_dyiz(4F!&Ff# zeC`Zy1i8Wy>v;Dt$RNH<2w^*&L0psDi`=x-O^NyuDL%M!(L&{n-YmOa{c+pF=GC*E zlNkSHcHnHZSQexUd_Uq!6|*K!^B?GBER_3MI{ya> zBcc}urAhE|f~mE71#jZ}UlY!vra>%^Nm0`Tzeg+(!xhLQe<+JkZEc(@y48BIya%lt ziu7qq2N&{F&Ow}WZ|~+j!^_0^*>cnN$E6C!I)3`E^Pb~lzwE$FQ{WFVvKC^1Do~{C zF~f+2KmE?b=BAVhTZ&8@!<^n$d=}e=pv`Fv|?N&pmg*fl+ODflm-q<5|`)A zKdZQWIVxOcWfZ@*&eVI{s&iw@y&l!>H5J1ooA4=D@TJ#p=cTSFIbiY|ff2eYA@TO& z{KRBq@*7<`A%)hfU0||4&CNw$JAXV4TFvME_Z~8qeRaNpxfB3K^!{YtAU~BZrw0X7 z!gu%taKk8f6zHJvxd|216S4(r$~VCc1t;!&007oU?{_0gCNEMa`vS}^Ty$6Zybfxe zlz_&tRMM`+=f{<}!RpEu616vR^=$%Mqnen@6zllzT}j9fXNOnl_ky@h*hw)uH0ow% zsd*n5%(**0VSOJ|79TdL6+e=r^G_?6(iC(w6qZfjow?(`YHr6ZYnq_z@KmcEB+2Z+zH7;JXO_2nY*%h+4=sOEexK{6@3+Xcp9v5T_hYN4}vW7U(HE zj9c~FPxxi@#Z>Pbyg?Kpy)jtY{wn^$wbswVXyKyG+D)>%#DgfRcQ|Sw(SRfMjiCZM z%8Hd}x6h;B&av@$pq_AtC?|>s&rsqIsG1%uemL&$+A&yguxge98fqb%CHSnN39{za zEAst7jna*|!d7?$T&#@eH~{h(Dy@}<1f(28fwG62wb%9qWuBjHU9ug#5WueJa()KB z+~vU>wjLBKD`0*de6mfpCQQ5;jvW3B;a2+Rti6rXNg8R2q}WgSsyp6zbtE%6pBZ&a zu?6{J*wy+Rj2;qU4>zQu&xZ#@u^J}M*!q2?I3+nU%>-#K-I_DbALFACO0G^U;#XnG zEF^JIdg1Ew(?i?N4%db68zp5!D4^VD@sY0&0i>p840zknbAGkg$ws zKiQpbrSFftW6AETGpB>Xw?_*aAcD=3WPN^&Owq-?$IYA4XH%|pii5fR{6%~oxRVNL zFI_|pr=tV(qodm^(WlV{B+r2xtKk87s$1?2EieN6*w>CT?nMFC0F3#^_su_wRnIfU zZ3neRm_b5v#=J>!XQ?qk3w=wd?b`3rT?|$zhSrY1q0t&_ z`@&y=c!T0K?b|oZ`qLZ~sC{oq_4T*-X-|xCfqw>pjiI3=bO0eENh%yyiU46K(*3k3 zzXn7D;?!$cEy6?zG>S~d9a8Oo)+7d;gjfieqbwtVvS`H$V5_!|5!oc|dSNa1#;=i5fvpHJNKlZzp47Aa{ zTmwVupY9(x<<~$A*m_lvkaT*4$O3d(5LGe!`x&kpZpY77>q7c`99>=s73NXHs=r3K z3F3NUIr@F-P0}}V;Hf<16VGe-*pRC3>lT2{cuC+Hc-}uH9{rV43;sg_mR}v#!Yut3 z_*#((|7Kf&SoB%c`Z_SAio6b3`2+-ks6-4GWCvw}rGoof z1&iKA7t4Z=Kf~562LzR=j3cnz={_!Y42nAr!YDdYM)>J#A_tiuPMUbSnXi27b??FT?!)AOEZ+&f%RCUo z3J#5}VD8ZW!vFwYyn(c63%RV7D!IAvW28Va?TbZ`K@JDOTKRQ342~k?fcO|jP-e{& zv&Sr&Lf3wsMz&&Tq~se-puHe0P}YxD+ZpMj-fH zH18bM*mp!1B5o}JRm{zwAMkXsd}*;&Ss5K;_{r?bj({6e-QzXeeui+slG)f3eU-LG zDuT_rwV^>ANAU6u8&(}ag1s3N1l+YUoXT&0E-fVF(!z+l-%qpbkh?S6m|YfLsQ;H6 zizT}ND|X7nFb^N;Fjl z8WydR@3%H0gfH+B9F7gW8&L;S^?<<~o)^zrGQq=9)D8{Z3*IYH$$)^id125tZ%B?q zo)RP;e1_L{(_vMs^O2CqMDSUQ76hLUh{M{0mH!9s0M76gwi;HR6^o8JYLh-!qQ(xA zd(NeT4J5H?x0|^6I=E!`3WOW6pS3l84#*dTTg4JH-o3e@mg^D@3^(8)$*Oo~fVq79 zIsOT34WA1<7k{PEaTE~uKi_!RP+h*i38;a?*TX#oxY*xl7jTUH&)Xj;p`XPgacb1B z>+B8$w2(CZ08Nv*Z!W{%*kf7l*DvR_#{VBHO_B7kdbFBPg?&z|BSD}#2>+A<7WAIW z0+c)zu=-Mnyh+TS!=Hh=JQ=cXv{_(X_$6RHhnJ718!~M=p)T>S_E5Sc!1ixm`hZDV zI$Sq9u%vMKE3~M8Yo?f*V)0L8@q^UUXedvH`>&)HEAA7x^qyAV5fRpo_`MskGJlww z_^tiS{T^(jIYy+9Q<}S6<`$73yo|GLh4r!lrQIw`rEo#T*3+q8CT%v*Ba+?I>};rWY=@CvW-#lfmYRatc?xE7=pn{ zL!qYqxs@ZIhA*+J`Jxa_EM|rDAH}5{jQ(NXB5VZ>Hu>}3FcX!93u)x35Z_U;sN5jX z5(w)NY_(eO?G2o@#g5$J6Umi9|Fgqilwdc*J~_?~Ml6kWabtIR{;N1i-8^n>_2{_=ogUf6(Eh9tx5!iO?H@ z$Z0BzLyjOK_@=a|>lL)UG=y#EQ>XYPfkY|uki_rFPk{k!ym?jfA4-15PRI1DKw)Fc z|DYrvueWyM^(D%>Gzfm&8&W$}haMy+tEyGBq^2$|vBZd5BGZ&ZeYZf-zjZ2?gup+G zF~S-~^L!K%U1V0i77Kk993(|GNn=WU74HC1Pl>@PUO1&5{t^Lgd(Xj!r02T+ulhKp zQW!WJW1FJH01({$sdHaNDcN!daGLYfu5xilBcrgV@T35j1LGL>3Z*E@70wox7+W_^apta>)M}^GIf6^pEOv zg2>1+RoYy0X%y}H`a!-$l38|qX`@n>P7+j^zteuoK=Y5&%)mYjBCkyxV2_q-zGKa+ zL?+y(9bbACnV9Pj+xmspM`h!z7c@H zl^|>JO1xoEFd3%27`61z5X|6>KR(=gnFr-fZK{GMqe#*@Nr^HKvQsJAc!$XydwBIj&Pm;M!n{KRTyf|_fDa0cmaPm$u$ z5q36YVR(@|oQE5%^DhtlKUza69{UL&xp^kGKpha)sltKN*@1`C*@>6!9-vzHc0e1e zt9Tyyr`tfwt8*?Gm$rkV?5b+#V`-X>X)m+J0jX!|s8*DP64rb@vo8nETx9Vb?nBZZ z>LTR`r7`RS)(X^_{&ik|EYaVI|I0ft{OLn!C}5^h7+9Nw-Mi7TcQBDfWn1nNvsF90 z9ZdJj^Ukj8m=7H4Jj9PA_;9PQx9(vSenMXmuyEYJ4_V#W$*NE8R3;et{ew`%+pRR} z27onk2({8>;=_n(739E87@-DNDyi`B=8lapmlSansI)?wCQ~*RzTqhH;g>xlRa5I-rlEpQ-3_SoBhKFZV{x+L16oo-xtb ze(jX*cmY`sYYjdI=hgtW_H}FKMCz0u`m6D;U#ogGpi~6 zDX}nzm@D(-fY4RqbTK>FX5u-c>*)CN=dr>j@#Q|aSg2X?X|lL&zCAey>e2|`Ci!XI z#4RBc2YeCxS0H1bH!ee7??RkkZ6AH2xgjM1q;S3@F z2sIV9!VlSNq%RL-+fHm0`vFDlvE?w!9gCO`5loNg=$C%4z6=Q+4tCUsf8D(pn)3^) zH%k9~k?EI5e;Dm9GA21W85AYYkgB6a$e9#3>?iShvx_%1*2zm_QhVx@;-Cdx_u^gb za5yhcLYxJXwMNaVwG&O3%KW@7$a%)*wcq)WUs>784fX+bhJncIzC(NcRxyvZ>sS2; z>#es=#2OP~FMbTj6H&`jV)RzBey zSa$xid$RMQIMj$ot!iOV-(wyePCYvCU#c#R+MBpHlnB8KS&&ysMigKS0X0c65tu>@ zt0o3-Jh~EmKMcCR|I{|a9VF^_gj?=-<6D3anua-ixXU{H0%q9>orf<@S{Y%6FL%B# zrucpWF~kfXcI){KJAJTa?WmouD}1Q$HNk~#&pE$!A8a~%+(?t`v{pH%#_4Yj zX4Sd;O@I%@!>wJaNodH6{jeXc4z(YZEkGFnNVEnXgj^<0c)HGiO8Azs^v$r$C_EcU za6L5-!WSJx1m|;6uTuVd`#}`nuYomwzcxPW#R0Z><#wqlSP&m8*aIJ3C*OCZV{ymc zkZj4Vr3Ig8YF72BKr=1N}L}d2^s1Q1j@KEvMcUuz$TPOC?PL z$*lks{_=y&5W`yE2RzDYrnu6jd+Ej*{K?H*Zvf?~VgHceZ}Q|}kE3vz&rT8SYuLih z;8hUC8)@7R#Bht>4--$e_y^(2xF5S&GMATMz9eCCaLT$BK>@?fAaES7n|bmW?YRyb zOS_G=@N51-X&aaJG;4;b8B0s+1A_UlI5rfXA z)^ETxY~%*ih*yE{?4xdE$q+sQfchl?0$VlDgL{5?5U>z%dVp_&Q@F6yp+6TitMP}B z6vu7ZN&WGrHM?FTOxZyE6=}2a140gpibwWnPEAC~y=_w*q!+!&B?gALG`EoM*Ba(i zJ%#MZDLn_(8(F?sZuRXp_A{mdC(#qC9|pBfzIQvE;tdXNIuU#Yj~`GJ&p^2W(Cp78 zj3Th7-unhn{RaNchHOAg9bA@x?>p_pWa|?R2Yzr`RPIw$JCNQEg%i52sJ(}`zZ3)i z*T|I;#@3!2bG@Xm>tuVC27$@#8ZBh@Sm8((Qw@VQ$=mq)bLtsj&(NTwG+i(QFbw$l zwB(aKcs3NS#}=YPfEAm+79#S;LKgnD5Lw(*$!n)SbcJ*k=odT4&&oRz4qiY(B#ikW zM<#!D%H<-10)ZJj`~KR%n0mu$4MSJ7M1S&<{l}*a>GQ#`#W_EAr%6C#hIITd z&YXJn*!9#~8R#3}?sK-T!45W1zb)eY*4Dc9!0Yifa8?q*54kHpGbE2t4)<_6b69w{@v0 z>K30~F|Bo({R|B9AlYN#$HPrTIFH6T#UtTP52W`=vcsHPb;S?#*^su-eLqe;JnWz1 zg_hhk{FhErz^aY7`fMzhsc$|7e76a0{GH$78bJB2JkWEhSzanjKbOQHpYHs3rrEUf z@JrCo*fG(f$ms7D2hy>6!kS%Gqg3E*Mn~NoZR3^Z*#R}1rEI=@Wo8!vwY+{Msu48S zcI1YbSX(H`Oy+Jp5!~15-CFP~rEgdP{1zQmqxGkT#8Prr2fN$oI$JXEIT?#3YWiT; ztArR(=aQORVm|v8W;y?abZ;eI4O8LogFe}c06OfO&w#+!(6d7+|NUaYU+WWlB>R{! zN*Fut zu+BpUNMc!a59URTL_a zU{Rv}~r9UwLF)v1HXaxnq2#520?VW~s)$s1S3dcQc z6uVXq4IB4Mq_eWwdb5;Ojgsd~ncz2;^8I*mIEPgrH^8)_splta{GdG%s`B(m%9X^X zc2;z$+v+LVW;WS(%U4Q=*p994;>i6kVT?@$8l}j>21p_izJ5a%&tOVTSPUiK zXzxn{`YK(2Y+IIn=W7I6%bD}&pzP4zh$x2VI?VyIm+5C2f*v{kJn$eYhxaj99!4q& zsUO(zO;nDC4Z6EYI%RDYD1PwStUh=ab=|&cWq!9ycPE|vHOI@kz>_PVE)%=W`EXmY zh`TWMQtGh!jOx#RzFcZZ_=^KxpY~ztMQWGs zSU^%*Sh`ke5Ky{7Qo6fg=`Lvn5ox4Bx?4a%8Yz+PhWFGS_!{V_Nr9%Wd_+IwZMCDE-CI zTFVK3Gf@X1=CY*!#{~dyY+M)sv2o#rA+I-p`y?vT_zYNo0xAPzFsdVC zN{3CwhTn6Fn9kE}YQr0ylG>IrO@=48?I;kxi)Mt<*=tJx4D$zyv7Yi`AA>{MaqyZp;pkl~+D#sH((~jKI+t;g^Tf z3rx8TUXx0Kfd~qz-(SZ9buYiS93=is)f5f{#A>yf{X*YPqMLwka_2IB&l0Hi=q&!n zIR1HWUqk+5pR|AxqXlcN>Bp})+t^&X8Vqv4dKk%Hknumfh%Ncu^spL^d(>cqhHBrM zaZ_7bU^VOS-}O_oeikkLDzaOAzg4)C${l{vM6*5zzzmVV^J~3e;03KaZO4z9aWEyd zexb|YyIsYDQ2THVhT$}3i(a2~edfsEWg19k$u8nzAw2Aj!Br7ib(&QA5v{+TMakVc zn@rG0oylG!w5SYRh?pYnaXSi}+K+UrtUhw*B`{eUgXU5yB-4IWThr-Wl|ifssBK8> zOsB{{B29EOfHVPxnXG{y=l}@+KxV!71+bWZjy?AXzbV6!|B7g=5do+>OI|~OGyddr znh=b@HB-$&9Vo!21bwq$%HSLLW);!p_V6O4_+zt91Gr=DZ;7rjAyoD{w0S?$X)Bv* z_St$K_aok#b+vew?X}w4hHm9JH}2IjSG0cczncre472wgvR^*l;J8>MS&b1G{I;aD zSq|cEz5Yqeu(GiDGH5wd;KtTnKbZTz@r99uJ5A%k^fEv+S8)$qZ86OYLThT;F=$}t zO$=+Sv75md!C3^H-`5Xk+M0ZBtMuwA*(5v;L@_er8Y4O3vOE^CGdKWqF0h#Fsay=s zKNx*$+lbGcB5^t@?C@|28}&XQ(hLP|X{}IK^D#sx51gQovvsQM7{>XRCWw&*$mgYAZothCC+8H--)?;0RLlQAMZU5pun8 z0#tL?GqkOdf1FmOLPn_|fWqWrQFXuJ6O2x(jy`CLo~=Vyhe7uJdkHNy8N?q(Ln{I( zX)0&jsTEyk7oN=JNv^)z?qTryF|S)`Uy)df*>AWj>A(vNA!4XUmcO65b=Y%Vqk%-` z63u1}J=#FuE6s`c{x;ouq4!JdcA)zo>rJUcqI8x~LclBDP^8EQGV> z)Bf~V&<$~eMkfENVls^~r^`M29gjVr*;q|f*K$1Jkhb{__Xai zn*8z0b|?W)qJ2UsMPlUfup`*DZg2jD-g}SeEpHAO!&fs}n@S3x9i`$Svf5mUl|&jB zeXW;lRD`!95aTO2AHPVp5EIgkxtgjA6*+r@iaCf;_$|-j1qf0Q2ZX^s+X`nIJ_(Ck zYjG-0y<~D>*etnhL6oLl=;3LGZXS1Mijt};y=K=ZEIc%FP#hhJtS5me-^W(a_f zt3GiaG@8QmN;WA}G8D*4%Td^n@WG`iEi!nOGdId6xJE#)yTka>p^K%6LGbUFg2g43 zh&3&e@IaIy0U7ysA8!EH{o3F$p5jy~E8_<(ICSBZ>ekzB1MQU-;of>V>-ul|0S(Ao z*q1p*#_@N^pT!+e4`1HQyfXiVz3_xDaB^Zp+6wKT@BJUdPk9BvNeVxdu@1JF% zdST_p*OtP~P&l{<&+)+H_6Wq;`bP&Ta*Hf0d9O!?=Bik}^JOc(v8;4(9}hym|Rjzq+&R|6sFuasBhvFTdyoR4Jmggmsic_Ll&3OZMRw(;8?W+n{#XB1i-L_ zoHDZ?A77FQr~wgbcG>hJPcQ=Ja&4DvCka5xw;NK}#%X_e;*Z9Z^0VB$-BRXX5zcF@ z$B3TsQ|V*y-cP;@!~npaSrU5q<)(n{#P$oPLUd*s>oE6aSD~za%w0sV3YE! z&cfP`{`0n~)PAr;fwRzpP}o)$Kx!n_SkbXoy@>>`79=GqX8bOM5G6{jt;Mb4`6C=2 z?jY+QbQFQLsJG(ep;e|h%eUripI$}e<)@Q7+f%r0{gL-|iivl=eT*FrgQWRbi@CAy zZf-tC5-7{+a7$^%{y1B*#*xqCd9-<+zL&ahe?9*GUu&Pr)PWdZ1Apj6MEc-&(%$a2pByR6-C?0)<*@m|}5$5@M4Si?o^TYY&SDxz$x277J z;Yvtc7Rnz@CV5>L;6RN8_Hc_Te6Vmn`^HWN)tDg`5>EY>Zric|U=e1mn1Y9MVv;hJoC@=+k+xWo?buSV&V!i{9@D$p|$Nk-}`MqyASD&On}3{Vi#DpB_KmUi#eF> z{wltagiwyHYKSAz8rqQ{qwzUOI^TC{FImFlFOQPDGJT3RUoS zrK57jAO}ZG-pKXVqpMU~&TX;wIf*Z@Eg^v(Cy6Od0r6xgt@$lGTvi9m8*?G`)0<|n zUx%Qq6UIC(AAhpDx~=!R%=xO*jcNY9w{Lcbb4GtX}atynkg?Gf=LxK^H~Y7 zFjZ;~S}BDL{E|UIucIDJphHtT(MSS@@a_o1!S%l@v3V&vfpPWz(!n%T4%+fZ(JW!a zd7eKUU_L}nUo+6(ArO@C_|t6_rZP4u7HD{RbT*LkT_Xo_U_yWvwH6ljTRlEIREsqk zcq#{@(~Q1gb552oVZ@j(?!qzX`Wo5L*3|VeDRqr4Isf++{ew$hVUGwD=3Gw=qy&qR znrsK=$R9rBXChrloBW|{gyzt18_Wj<* zV=zH38!u#7>>?AAq@rBV*M#xeTs6FZ3qjk`7k2^M zmFqEPsYwKFr|GAz9Cjs930a}Wf>NHrXPby}{DeC7-LF{agIaM&SA^5`JdA_5O(PXP zRoYT4IIB!GiT6JQL!lTD1|SmA<5k@$eud141);>S|NCZmRM^y+x{*U!fo+tcVhsp~ zmEQoqx0mcqrmq@6>Bcsn>-@`5{<(!gX~1AMR%c6UfrqSlwA0Zn3;aPJRST(^1Rzc9 z{#F`0>{iLe3XdA*a~2AOf)bI}bMK3PmX8}WPHa5fb9U<5r&usr$s#GMN5j2N9#wakO>up53m3)XXp}#dbbDy={?Q>y%M{ht>SLnXD885 zHap)fP%_0QW(kSYj*voKWta);qjt7!O5tLH;Otg~_oZk)OXtdmx#I%hy7hjtsU%xs zKq}-(^g{CbD=NL{^#{*L;HmiX|5SdMF4rHry*jbr)T%VHYM9yZlq#i)om2l83Gn~3 zxBy3}^vIb6IRcpq5w;1AyCje}`5!6}L9wYKYkkF!x(oCN*dLGmNru8^d#Qp3$fQhr zKoj*&gxZpv&>*&o(@~F+X<>+hl$iPlB5s$oI+4DRH`g~U}};+Sm=W(iNPN% z2psfdCIocQc5v8F&!LbCpLbPNs|Fx&LWMBt865a<4NxnpTKT*osSs#8U=tv%CaWF- zQa;Pq%^01>B6J!)Gqr*LeCmItUU{fZ@W;>GsYxabkZ?S`lDevoAx?5{S>idXKEfzn zbpD$Zs@x>)2-<&p42q3My zpiK7Sf8JB7ZQB_%W8hqta<>*IQ+nqA8cta`#NjY1SO#!ozzI5nU%IRmksfA)Z>R8n zHymb+0n}%YX?wtLS>WBk40`MqHA6r=xt$!4K;dwN_5ODtf$NEFwyQvG3I=edDfsKO z_J3SF)~8b6|HlFT4?Dd7$er)!&0)C$0U}A>4&9KHM9{4Gy(5*)^tu3S(T1~2+kz2x zC^3faQ2T~Ba3U0>HFQ-;AGDs3=5y@6@I%bumuf|iEpTIxiBro>xp>i%N7q2Vf}z|L zT#M^{`ePt)E`y4s1_xk#zEELNL@ERogC=WuK4nym*QI+U-84}nnzkI=lWY-gfhk1&y<5=8BfzW~fu%x`ST5{Dp-h~ zPnG|!k>6&CXCJP|iBfp|vKzpS!HxIB?6?ZZwP=7_h!=4+R!WU5w8>Ls1^PX7z6egJciwT z|0ERv?l04v0(Z0r@anAa_$TYFH|JDzLDARobXt%g)Gfw8Y&|5<26p{9(O3}GNSzjY zL}@FHutq3oHRbNRZjohTVY&@jX+oU$_9 z$7uc5(K}LKZluyot|l8Ssg{GFm3!pGP0ub<6oE&UA|f#EEVFOI!H}pOmQD;cQ{Hkh2Jmj8akVe4n*G8 zFF9S#FYAdV!exlwu!_Fu`udgwh!KFqrQ2X;V~$-acEwOKLt*v@5nZR~1>EznLs145 zIM8^(&(CkzACI;>ZrR0}-VTf%Y6m(2Q#te+9NZc*t{$9LF!?_Z;`FgZ zaLRSKFO&tmo~Y7kT{~nT;6F}lldT-93j8{)(FH7p$EZ9`S?yX4KoJo;!qkTBRy#T$3`bag0! zJn;Qflq-X7_?YO{@$t}=;Q$Z`(e5SM^n~HWen;;=P`fx>?KMC-F601J4n^L{P)E>O zk@DN0*Qf0urfPkK-_LI*$`WpYROkMXv}zV&K~kTeyaaW=AtlxU@Q9fOFy-Vpz(xoA zs%31ftvdAUmF*htaTM(Q?rzrh8kvYP55CACxgKz*%csfq^GX4L{ z-XB5bt0^VGfu&%ZrMOEY`w1)(ZXET|pX*z5pKl#6y=dvw@Jq{;##3^dvy!6_Nm;Hv z6$z9FL!(KJ(_ue=sYVPQ7y1qN*Frnhbhwl$IzR@w+w!d{78wVp^>tqB5|>A1+Xr#x z<3`l^w&N79I$O5#E$_W|ukcV6L!cdU_!NMs9qK|7f( zXIgkXA7LQ9l*PwDy)u#uG)G>KZN`~%-B>}RCyMDSRToR0pG+YgbF~}-OQajMGn6{I zQEQXBx|&|rVMV#;p`bCb9ViD`XzD&bG|R&T|7w8oBTJ2KQ#Sq=4P;SZ2N*fItlznd zqA%5GELA!Zii3`u5jt*XH0i2gORbACZ3e^f>14wZ%nB`g_l41urJ7;}3!04*^l$c^ zg$riF_DV{2K$YYJrQ2T3|M4D^AHif0wOXJgL)g8Xst@QiEf>hl@6$le#*hD5{cs}g z+Pzk;AF3A%?f5msslww~-4}qAQk&RFdACz8k7?DBwNk z2byd%yS|MH@u#;rny%U)F}`x@hRBIs$%*5H-f~D`KyV4kxriYl1xWFxZ8vuFRX1lR zp&*Og>`Gh~s*yDk1Ba`tu}>O`s^vDX-G^fwvJ+!X&v(tdew2W?79`C*>M8#d>zl70$@gb0$2|!WS^e2xY zn`CJM2Ldao^o_k8M=sd-)Uql`1!&Sj$K18v&I&+fWt8P4zVSN>?l7_E#3GEsnR*)p z%zZP^Tp|Xh$2ip*m|zvfQHg!8m=}$ znF1sn@OVK+aj3{_>*Awm;P#LKgJxHICR8f2ARs>PA_(n=TV@|LgNSbKqZw$MReYDpmM8L?&KjgK(JI`wi0{r?WxQtdY#(n`7~{_j6gz z(+DgSba^{E!~ADbr~*L(?|343N85M5yEp2ne>($W6Z3Eb;qw1hUjG+k-#x|~JQBNm ztN;l=PoLMvnGD?gQ^ud)=OaR|Y3{*QC4=wYHta+qP@0cDd$GsRPa+}oz(1@x`U}*j zdjIs(CuO-L=6--jmLmIaQH9(4mRwF@|N0Jgj6_g!G%+W*$uqx2fTyU2*&Ovq#g`lo zJLHtb8Y^$fX#95%r_?#3Na}$JqLeIFY>C5J+uGujJ}5jf+^k7%WroK)#^)Nn zYpYB;;RhQlW&GAwRV~AUnE8aN#@8{4_b$z^kd;-?j z1&J=fyr(5)U+aKe=`Bu?+pGTrbiLAn8}cIcaHj;Q%|*$#JG;0*B>D&UYtPKpCz_wX z0!j-8L6I#sIfA?c85Y-)dnQYQX{>PYQSj`Ug4l&|S2PaoGkl<>1HP+LOQQXYqxfm} zD>1E}hd6)PO3_ZBlqKxuqAa2IV)(;5fLd@Q4+unA-K(eCZz|a#zQe!0pDIe6&JmaW zHU5@PYMdSu^`JUxH7{6X$LzK25fC>MbJ~dubPt&3`ik_%5X?LQ7(LOQ7TTjE(Th{f zPOJmqQv3T}U9-m$CC0*grL4vOBgb~1|-@8Npe6- z*-{6_#oJ`j$$btG1aABx`bGR@Ckd9djoG%^dN>Y6yDRNB-S!K7i`-zc`m_o}$&YHc zJ)O<5TybweZLiP;dU_H1Rn-Bg#{X~e|9?$zI!^f8pXEtk2OYqy@Njiv_#1^oK#2r; z8d54*mZW_BD%`ZdC2F3{OvL-_*_^f9dwJVjTTP0L3b73|IQF3Z1&SlGw61M;BDjL^ zSc}qk*RfQsclU<(zcudS5tl||qBv`P-u&a79al=3l*0V>-#6mf+nFWO<(g13E~*i# zs0^KlpW=MnH_Mav0ae4w%Mj33_KYiC&|yi%g}N~Y2hDW$oa?NwdmJ!Gtn{_<2zlDW z-7<)R|1^6F`>OZ#HS6~C&Eca^c05tS)z$E34hhUyCf6J<>xs|Jpv|JODHS}rSZbc- zbswTY1gwk@nuGaw*;TV^p%F8ahYs9C$Ya>!3#3XPMJ|oHg%U{ATaQyTOt{BJiJr^n z5o5lIek_ptK1Ne6A~*nlNh*?V#{TMeR5l30aMvY#0M>5+yN{6bP>Wrk^=0xUbf>k3 zkO|cgt7N$3V&ITZ?TbdV!iu+9^0PJn4>(1BB=00%r?g}M`W{c08e3!JMWEPcl(mjZ z^I~w;sIIg+&F8Q8fxcFy9I_1^$($RCMl`R4A=#ei$zRVCr6T>j>i`r$IJ2&ar}4+) zf@OvA0bS#WLO_V`MI%3$mF*hqd8bBDhVb6{0bSdWR?CAJq9K1S>X4vgH~{IsOv^9L z1sPw@)`~@0799QZ&|Yx%4A8#G+z}a1AYqVpi~gp0aIjsuU5kra)5Xo%D3=~pb@Ylq zrNr;|zurcO;`L}ZQ@$LPaZHbYgFaBeDRd~(7iqA5%{rrP-26UcDEEPKwQ-9TsNdTg zu*JlLE>awGC)*_Z#|mu|;-kP6#DSaHINEo3s3*1pq=e?6Rk`b>ShP zeXNoQj#kLlhybtfUQ@bpm*w4$I4*h_jVJL-Hd4Uyg`8l?=MF(wYwHJvQ>>|NeJg<} zTtb*ku*z>fcDcsJJ&GPem!qNFpYI0Kc1d0z3-t89x(r27tvMv4OqzY!NawgML%LV- zYeRt&tZ1R!6wN{Ve2bCx5B^Rey0BMX=NW$Gm{;DO5o)+sv@F7l?c%_t#M@2&YcVch zH504Ra6#KIRB}6)xeC;UkpoYKd`0kbY(ubbV#?1eEYAJyz%^M<_F-JxSIAWuMjMv0 zIZtAmkoBEP87-PRvQ4~d^2hKe-kdml!0WEQUtmpEDc(!IYnqte zb>!a3zc)^=qyB(*(J1^|ka=s?rgF^~#-0LZ6~m5pm=ldszxKk2#aJR3zf`=TUV;Q- zZUtO1RMOnnx;%GaN~gNN8aJvqDojf;rSlYl9T!0Nyr@jv>niMj*?~ocUG`g_o~d-4 zueDRD&w5(%ORaILZM}=suiA2%ykE&#nMrWola2LEyQLcT>RD!L4fm(!6?v#t8&$uc zPCp_py+nn_V$fGCnnk;}_hA{iSqsYgF!@JkbK72sTL?281ZUWrIq3BvmY%MDjMV?5I8i4hrnOPNtsv25UJ*AV`)MagY34CnopvYoj?(>*VOE7AGz zl7Zu9HKAKhzZqNDA)aC!~Zb{RkFb z%=?GsV}2HP2zl5w=IlNYbkM}EoBr)_Zj=isCxa`SPhRWRy}tsHPG|RxH>F5uV!DKb zC6Ozto8AVQ9s6VcrFKj2tLl4Zi+gq`uiMy&IBVM)C{#f{YK^)1taKm41S-csG9KU+ z_u|u7d9Czrsl~6qd2_eTYc2%T`h~u^AL|uaSDe_fW)hb3m12-nHTkuQe`KYf?-6p) zKIo(7q{o?t;a8v@dGbc%5RZy=o6V;llu@tm8vUot5i^!;rf#(h&>-B2pDgUbe z(kr-;z9ak(5NmBl>pcJ(hguU3t0JFg|W#yq2Vx>I(A&v##1 zX*<;TledfR$~fmo4i-*S{*|uL{kJ>*QDUy(NShcz408M6&lxK&FvE61n0vc8?Dw%F zIN^#xDn53Q3M^b=i$c+$AK<^g!8|%rxrgO9?)B?6!ftrZl8BEIr2Jb^&Kta_rfRomMy&x zE!%DFS6)bEB&(fY6cfTw+ZQnyUjID0Pl({UD@2Y>ut>Vh4e0vTN{Llf&ns?o&ywz$ zk((hNKrdM?H8cdyeBy{1DYsIFz#sl~xzYX`>Xr=qRb|_md+CgpBd$PuPKFrr<=vrl zYXt$L4Qwr2iK0P#)kB}aNh^+D`TUU-k!B3)A?DptJGpj?^r)L^w+pzFjpwUI|;52d+yrcLgTV||R)G3brrFOAJP+i$r$(@945ego!gKC;Iq z%I4H*Yfh%rIqoKx+{&`!;P-zHm57h&Z+5n>P%(wt+yoWP==2-h&icQkozZ@5)x#tq zyUYbv?$F5XqZMI{jj-kYV8j5%i3y8dI_4a-kp3nai64xo+2c;eIN4tWQyvr->DjeZxt+Yx9P~-q z2?k-hE&ZBxe)xWITLbOUn+eAgp!6*0Gw_a(=q8EkKab$bTx&BKV0&NjGl2WvzV5Eg)7V6x)_Y+P(KBBUw02#z_w4x2$ z{t)L-XsIoV_Snv6!ZhhHfWAPzsv3N&l3bB)jPJD93zSUkLUi?hzF>h{hMi#FKEwDb zilevPUSHkn?fgV>b~B2Qfmz=YGX#`GIoGCD9?U&ydpV|Up0uxL zaT=AQ7A^V&A`uw15qlKVyia~R(d@c}SO`8Z=92^qnOuAsmDq%(raa%7eoyBsh@5 z7QIyv-*Fuwl$%W!oME$i%cozoQNuLfQK=)xW^`CI2RbA1tv8XlESW8S^u`3#DO_ox z8fx*{8b@@zqyxF!l*wm{_FCO&=ieF^p049J_t$4`_gR)($ap=J%o1@HeVX$BhVb+W zRIQoYYCS(0b700**+9A?fU=LWm^i@FLKh+rLJmQn18mZ|2H1C-*fcM2y*zE=Zf1}g z?ZOnAZ30o!_}VKpJGJ5{V@d9p#JMYBGvC?*oheqQ&C{(Va$Y(SqZnF;UwlRHEwIGa+9dLJk0^3Yqki1(0DDg{)^5FUf{E-HWe&1JcGQ-Ot zn8+8%yTakI);V06WKmtUijs}>#DMx)-V{2`H!q(;MDQ#opJfRw(elX z#{C%ek=@3WztN_BfhoF3-YXK~;=}!a1x`XMSKDtn4YYC2bd6|c+}~VA)GW4ZVvFz~ zPwz|r3H8wwnqIr%&&7KtYG~B%V`yn)F}!5Nv#BsY=yYX(oVIq3UQd(0D7{b8_C!Ww z3A1oUf|AgyWOl`QuzIOHOpK=i`8HSQZZMr!BHo8GqU!|V<{n_g_g@q9G-0_KUhov0n3 z&WuKb;~uaNYCAoux`;cJx%``$;gxssMB@{2dGEuT^l8BXd?P4ZMH%!f`xv|i0Uo_~ zPqW-E*toWDQVtMCW3G-!EUQ_#!cbURK*G%j7`AU;#Y`iiBIWSF%5C_Aw$`|*o@EY5 z+4rxoBR+mAXMnTv*Vv2sUM(?Ca~j#mC(P{UUQbMzuIA@=zAf0O&lFXfN=>q?EKK$) zZ_1wHwO-22;pd`{o)w?I0+_5ptEI)8uuxE#fOaFcplmd9TCSfSy*Lju?8~k`?>fqMCWg3n zu_7sl)53N{$Xeg?^j(fJ_$dFyM%NEPapmghh}FLo;>;|y3h>P?edBPP z^_{Am%N80s0+}^!QerZFJ94adRXD|+`f8q~;P;5S4)B zAK)WveUpte8`f>2TKrQkrcSmFm?K`&?TswGLM89~()jCL&r`!ny1jTmr4MGX^>_v- zDf6kmPdWd(!U$~;igqFU!&3t7OZVRqm?qqY(SRgewHaR!5tlq6zF2esju&K_z6q!P zpjZS;aK0=|qXaZhvu}3`i8CgFYU{Wp^;WiKI(=s!O%y$_Wig5|xG>e477WVh^i8mt zZ*U9)3ET z^(22=T$1g+{)909`>hbySK}X1o1|!uZ=AVmOoEiFZ z2Z{9CE`Ykiu35+Y?v9=*b!PJPJOCQ?k8RgP2^&McJ?{fwc0-mDWD;TZq$tfZs}@u+}>h;M!V^`^GWi@L4JnLVI7kBVRZ-no~?HA zE7G_YE<~z9+|fTpv+}Px`%qCq>w_}82TQK$?QcX~yItmww9&98-q!3rL(k=K6pc+n z%i(m+@fPIhY);#b7QOHB?L}!it2A{H6(Q7p@6Z}i=hS^Gtr!c+s(IAQKjt2y5j!5u zCXD5Tq@!}K|Ex#7rapf-$il&?FGB9f4Z1GMe<$wm?cKYvGDEifUW}|Wp2@g9qS7!` zsSO7nMQUW>@FCCm86OSqtKI#=((u#)2ia-s;}p{lf*+EVP*>LOoNx~Zju48Z>vAi6 z*e|y^x+7`LD?(xM&W<~+zaN%dcwzW@=;V6h4LNi|q^Qc$+dh%Drq_d{^7N{Ty1{(- z+m703Ri|hC4=uwpqm?%jLlQ9fvCkH>ad@)BYvEZw5rOa0#+#FciANVhjej3bz?pa& z2)dG@ZI9WF>R;p%nnysxBfpvSL27ehL+>E_53n1KCyhpt8x~_CQbYkP3|Nh zv!@W;cibw@jA_Pd$cl~xhvLE*!QvO4QX!4}l?O!v#im6rhX#HRBEfRA-g(Ja?A8aM z9-^!mQ$QkCBWg;PTAdyp{=T_KPZ^Dk50pl78Na=-9c9v(*jBJWx?~1{nw$=$WCb1N z8ggfKmW%Z3YD|IMs;@C=j&b%2p2Ho`wpI%k+gtlT)_8Dzx$sex&!af}+Zlgas4qRX zH?dpMqb4=yK=WZmTsqduy}SLh_n_a>9Vs>C)0uaIQFrU_E_*XVdgS z`d)On4usNMNE{9)2J+#yp(rc(Uha_UO<-e?$zrnYEejBk@9}1*Qa{#%r~?)V!U3>T zd0oD53akeT5x-+GS^l|Sla!y|@y!Y|^0~?4J<$E*dAW+fA^kP)2V=>D&AEMQln|o} znhp0q@OI^Al+3roK8}4$JbpLhU(4n4rOiQ6NshO04nQWu#{(y4<-x`aUN3wlq*$htWqH+d*jW)Nz=dX zPDU{9^7uuZCnKfg z?^vFDUJEb&%ZEWWJE3#(1z`jK#nnakY`2fqqjQFUq^jJWvt{MDPyE#`w;3+_D4){- z&>9k9Wxn*pOe~Jn`0_XnAfJAb{<+8|_;RxZbjO5NkK&TUtE&=#UYIDJ&nHiXW&alv0I)rQgdE1qJAAFA{AOKasb;&)`i5~gk5?a2tFl!G82RfiF)>EAD^^s;d% z9G3i=M{MaWtM%H2ytv|E(+NS>_qHU`njzR{9+O2$ur=)EV(4uin3x^)0ndM0siCQ4 z&hz9}vpkCU`C!AP`(!Ti;?J0gzkBSj4S=0kn7zWZl6s!o?g3aB6k2906fw5QDD&gy zu)#|yG?ql1-C>7VEsW1E2(xpWP0@#pED3{to@ajI5?sZwEG%8dZXONGI@TtFqP`c# zO4bYoOYUx}vwfz3g2idF9&9u9M=fZtK{cO|??*?*9r7Q1NAunpvH4@n5@r21dX2Y_ zoqlAnS-KZ(e@+>vkX=3Z!sexG^Pbty;?j_NCWJoTQAyyzjjmvBD7t4t?x&F8qHSMi z5G2b`s_Wgjk7)X?%d>1uNaTw8t}91NWNX*Tz3U3FjfmxV>Zo?&ig3Qy=tgaxTtW2L zX#<#U7ja0~U}0W9USF)ta2Je9uq8DqN)7vQz|uCvn6SN8P<}KJBbn$)e&%45n=O5 zbR%)@97JGI8Ro@P<6*SUj&G9wEFWT9k)3hZNph+{!IJVPS-=lIadMOwe9=OPc!ra; z7Eti1YC95J?J(3bjoRPkDJLESxv-Rt4?^7D)yE_=0i9$VSC%gz>jj1JJt#|kJ!^7=Uxr`S+q*Dah>GK+=b$@!m8 zkW6GcJu^Kxn9DlZe>*iGyJaRtY`->5hTXih(9@z2nD60XSp3kAa_32VkoOV+^e_^& zJ#GAU{^ue^fp(daGd!I+eXMyG8bl6(UaAD~X&ghU(K87H4>AsGGo>6U9P?C-!P=#Fd!a&4*9;@Sj6cA(aZ~{#_vO$Sy~Pv z)rV=Mi9P`0v{AA=*Pp%@%?)D;q|mnvc_MBkI&JRH&Bu)>IV0qh3}h$?B4vNQW|2bu zHe&!Z+L2dqhn-C5`~?<{jlTWbfujF3ii{WCev`e0Y?AKBU-8oOgE$KN-n${g(snw- zgqo)Vrm@eieodBPNuS;<_xYav1jyys;9uVMI({o{=sT$sM07!X%%vPD%LW)yPjGz=@pi4-dA0v9|BS>dvR(EccK35{1_ zLnLaP7eY?(_<=)Sl*In-a`{A(z!-P6j*47k zN9!A!-j%iYC1AR|EaTP1RiEwVSkUUvqa_aT6=OQV+-8O|g~v>Y z#=Rj{)tmb>hrBrPpc7=$fisS}zbxTs1AQEmBLhd8zCWZ;s}_ly-?J1aBVUbzs+$~KGEIY&oWWXWLtlT@s|N-E`bV`(La(}_ ztrD%}9Bq@+C?_c6AX)y?ogXkGR#2JM@H@!r(tI%DrE=Nlx`!=7Xg|L= zN>Zz#Wc;(2Swz(+yM6JPM5<*ZmwK6@uBP#D5?q1qO?ra%vm$J0zZa=)QZ+s zR3ef=ql=ZM3P3`-P3pjK7VL1v&sTsdzd<2bR*ZqE*P8<`o!ztTt)`iY?`MHGWN|m} z-^GhxFf0Zp152apTkwQ;PZgRO*PY<$L#_E1{m+QGlh_);(zNS0m{FzuC1oL@Al&7y z_@=bMd580NV*Z>!E=_%8#V+5-So1Jy%#ks}5&jOr+;K?q*CR4lz1Kz)FV>#~w-y=K zaqnzCwZ!zW%jf5dt~l)5C?8J2`p;b@#=y49JLRa_yRw9DbFc#aKKsK`#dkMW|Z*o8~}T$slfgX8hHT zO#5pVy!AVimG`=_M(?U_gMUut-kMFIS0i%+Dmz?&7pe-1P@CHj245oa6yHqdD(iH- zYK*G%+pS`b6M-sX>Dp|}2X(2+Fbb|k!^51N&!zaTe3x3hcCdetVQe1&6A(1@3#CGw z;=WC%1ddT)sjBg3c3G6bmGV-M+_iS(*lzZ$80twhnGag<&SX zL|Pg|6M6fypUZK1Ju2;+zxg)nml?W10>i$*H)N73B*}_b0Csb*ARE^_Axd@n~ zhHw`?H;fggOMHJ^w3W9JE@;rjkOa#VSoOK2GoN)Z#1ZxY<2MwwBEtKC_L|5ropsz` zj3n5d3ma>KBhb~NeT_Q$;RS6urdmtEL!Yz)oB6mz+k+EQ>KH>`(lCdDMp^6jF>8r4FJ{_aO07%FD;QU$l-1|c3k8C=K`0Dp~Y8-@KVFi;aEl;?o9a75v z(E`Yk{E!?_mV_919xN>NvVMv@2y~WWrOi@7yhhHIx*=KGKf31MnRIQdCx z41Rn{b;83}g2~~q^34D8Qc?OQnQZ%0_Z^`{;$Ouqg!z&}uJ(fiFF@w*#8dlLZWH?}-(4ybYtt`= zuIAVduyTF;cMs-z!HFE0%<)_8!Bdi zhQ|lB8?st6nFw?dOnlY{_F^#u%4v-dP0NIxjg^itB$V|gn=d6N`aul6ZvAv}%0es| zd$fj@Kb!PpAeKx(QcD9v4tc&$&#{!_ozSw5--1c78?19$-`3;%m!)}Px&Ac%soJ^R zF|B57c)nANoP`zApljBySI9fx@0@1wHE4;%QnQ37Tx^!%IDNvWuh|IYDlXH!0G;Nq z8c5yyP`@+Sy!)l(nKWLHO?n_%t-7)34wh7ix-y;Tf@7-1>G^!b?NQwElfM^ZI#5|P z6`N0~_~ZqpgSX-858qbsq9v%zUtr_*x^g$~O8uTt00ui0_3(#wuq|rHJ!0Se z5O5-Td8Y^-x_Qd^f({O&86GJPj8Ew-=EEVAK9TXf+V{uDT}mOZ!T!6ih&bFWkB8B; zEO!n61fw!X5kqguU~kkCCEhFE%P~n#(6Q)RJB#z>P&&cEkRLxJG`uj2m5|#va|Xds zvkO2xO4p*_g5@*l`IKa0dd1^AAb~Qipv1{(7|Eh`+5$;~x1zgfpW%|}GXrkTi3uS# zwdn(XV}tgTmrWWmZnvb5J{oghw3N?vvfXK%422_?@5KnBQC>(~O5YB#{xppPV z1)77(iL{{G70c-w1|IA3MpU2Kdj0(t@SUXgDwR zh<8JmyP_Oc#{6Rs*ONF7TGZ2l?)B$VA=1ELgq5I>krECo{zozL^_d@)6Xo*A$HUP0 zz)2}->4`|P=`v@aMYw2&L@iawt;ukij<#c7U0IIDyN;%Xh{D(L1>_`BQT44;9Fw`heTH~VMEq8s`4n@a zMXu6uW!LW4`#+tb4P#=udau6=3%?n0m4T9~j-qv(B6hGS;5`h*pr8fwVhm|u#K)4D zx2^@M3^iR(F_gTPg9Lb`CeKuQ5w;4~s}o(n8X~SePb5MZQ#61%4VnBn_Py9Q4rHFc zF|%rIhJ-Jmke%yIL4iZ`@m1s#C;lWPfw7d3rRa7fxpZ8UdOJgt zoC(myz*9DoFuR~osshV+k;UT)Pi;kwE?4P@8@75gG);?*IZ`^Cm8oeeOo&FO7T!Hw zW|PA@_NBT*{AdVh#Nm4&XJnZOs!_tEo{cAr#c(ulu*Y?$fyE<>F2yzS49MEB}}VAOkx1sUD|f2T;SN9r?P6lZ`n_)x+*4ylO98PaK3fqeTz1%~MsR=2tI zNo1YT`{49IqDuzy2bvXy`@pV}-fzuyCgKi^BsEz-m&1GQEuKOO#JBm;gdfOF(n97^ z`yMMS8!(ionEURbDuQ$Atw9Ue_*|(Iv^;INsRIR}px@fbuFtt;Y9nlbF*Jw6hr>UQ z{D~)lorg)z78-X!*v~T_zaF4V0P_? z1`wz%cpo^PxHG@knN9teU$W)m#qKSMFw(P3Z7*L>OobweWeP0jYT~qo63GNn%nBJI zRV8adIZT=N@jd@Py52e}s<3MxhGFQM0R*HOKtfXK?k+(}8bm=tIwXf2LOPTN2?;?O zq(f3dy1P51>vwqa{nq2R*7@VO22b2)-#e}yR~SN+ERje6@x$Xi1gL0U0HGe!L3YYm5&oU($7&JR*aLxKUg8}(+$5NlAHgy3iF-s^c zsuXBdz-`_{(`t_ye(55#OY%I`cw~Lofm!P7TbAH<&LwLSPdi(e;e+forgB52@aCvhx+H>zqQtA$Yc&n;**gXF zzyzKLw#85VcNwdfVHDjua#`Vb{_#X-T>Te&qd-C+#SifY=nmAC!*e)~=8t04^*+@K zX};qbsmraO$v)K)gD(ocoomr-`{u|-mh{7I8dFvHw;B&jX1xZ(W_982R>fHPv(ar_ zHi@ThF+_s(s1XIPCFL0^(Dqx~m$=ekNND~1gG0QcaHYX(V2_PXRFS}Tn}pu{qaXHq z3$r0U{AX86=)7amdlxM$84B!RJUH}=%>emE0Iu!jRPXHgxOGYEjPL!kiC4NV56l~P zHIZPqwk6c5|Ad zh?xX_XCRuzYp40sR<{x}DU9SgJvf+>$_{MB+up*+p4S;;g`m%u0;)c zxXz=bF8i7fz&mHAv7zZx=&=Ng%8$;)KOGrtJOse;E5c0Di0HD|26@pnOLKKOT3g&| z-27hFLw0TIKW-SSaHuSoL!ShhC7xyv${-@+PVND4Hn?>x+t8Z9H_ z8%i`dIKg>h8icIzEC@sAm=Pz0WKp{nQ(sA3&UXO-_?ma*1vyznaGfOMgd*ob~Qlllpa%fy8|alvb!p zMrT+a+{z}2+YlF3Xl& zwg*3Uo$lduq*qSe1S$e2Qnm)K33`()t$#}f`h~5Q#bD(SL5leA?rc>nE8 z=i4s-T|C4XOE(x&>pJHKNbW*E*Qy+*S&U)SisLTkh zzLZuWvNya{84KmZGd~0n2|tR_sF9!w@Z}i`^Cs}etdq8qoJ1cqX65mgbt_I!PfuF^ z-Gx6KixKT5c@f@&rO!o3j`?A$`kxn|8IH3sz_4&oYYApJR&seaB`hPN9p}4i{ewUf zYmU+gbkMUS-{}Ql_8xmbIvi^4%gl@(Rr0-u7)Ei7Jb5DHABaY<5E#HK0W^xax)n!1 z_p8Go)7)n?6v-bkpHc!a9v=e=m~ecFvZjGdX) zZ`Ay>z3|#q3=Xsx81xV+vsA^mKf`%jwG+QYN^!$Y8@!|UOKj?X1HjS-In3#yM*WIa z>YpI3rRN{KYM4DT?zM=IN1RjE^L~~NC*JC%CJ^s9;S*e+N^TKgSp!%`F(SVMcxG4-d2Yr6BxV>=#u*X>Ws<==(n z-qTwIisWI}5BSwMp6UX<2coPivH6dG!!p5y(ooSv^cS5oG>%A69&Z zc=d`u$D6pI-@!*K4_!|t6NW;b!mPgam7Trx>g7N~dHt7f8X${@nNj5?tR=BeYKLMb zs(eb^nbnZ*Cy*B22j6x`Y7O%Q8iDU;1FU~yb_Xd{G}SY7{br~sGNes<5VjEFZ5Ak(X{M`J97AW_##P$}C?Tt}x)C~@_rEYoeEvkw^X)%RM z7ASo6_RS5iQC+dBUhwwp7tuc|8=qK49CFQakCr+G!@d0iG){ZVDxBW+VQK1x&Y)93 zlj(UR>MxhkH&f10HV7c~x6YC483Ar=zr4B*d`~V-I*>D|7IGy!jl+ub)V`HCjf?Mq z_?dJS(#)~A4N+913AIwO@<`HSGlw2OQY*#!b8deg@R&o~U|rtnYa$6vHMKlJ*ZLsx z+sdg-Ar+J9Z%@F(=KJ+z!c;+7u1$W-?oAihilGZJf_6i!m)-*WJFN(Q@qzaWxG?~r z!g;IpT@}|f+}z_gN70KNzrnx~?DB=B?;ekFVw@Bv(LHM4Bh%sdEJnfYbuT?kh(Br0 zRG&1V#6-0q{OBzRjhQkeyNbW;(KE4c)IP?fApAjdoeqqwQk)! zor4_s*cw~ps=YsWUEG^NP*{f@`)w-ThaWb42&2#M2Ai@xBfzNhptn4gcfUx~1C>Uy z7Q4N<*5?e#RME$|L@rK1@hNaXd5af zNiVOG=w=4UfuRASNuHL2eN(t0PaSsahIr5Ezwb}d?7zD@dS23%;rUj5(7q|OU1sZ~ z%M>$MU#}z0pcRu}+k|F8$>VW5TkV-~-Fc`}BGKpPj&m2H;Tl$y>(kfSv-tiVtjm5G zajSL)O{qT^d*8y$F0iSZt0OV*d>qBFf#G5N0T^_b!c=dUa?*`0jHAY?=VgjlH984S zHQ(e83w58Y60$DKu2+A%QuEIE;mSqYuCOI|%Ta3a;@a6zggZ>c=aUf4vw1adNRHFg zZi^TG-G190n&kOoDqFvmeXyHwx3}@S+~Ikk@IhFMf`)K0PCjSc5vP4AN)c{^^Wx4{ z5y8!H%YoUM56WO$31+nQ*PEQcI948eDY{vPnU34HZl9Bmi_@EY1?+2(9c=X;1yG;z z|C$!b1e)A@c7+^eFlyH83pN7%U)5Ul5i1ze`IZ7BC>b6`(8a>%FX3I7bO+khEL1(~S~Vd`Q_fJfv}L-U7*LzpBO zWe3EFXge=6F2UCiwV?FzYA_ z#!pgfWneQEK(d|n=vLeQdo|mT)qY54T$a@n{yOa=7w3j}I!W2cGtdm9>j)fTdTKKH z#HkjdzkBaD={Ai=kmPXf(}n)%NTJ;f;bxMtOM!#ir??t@a40}QcjtRH0B#7|-oIb? z>=}oIX}Z8?fi4mKlah!}9qb^ZUsdw;J&8({Z&B0xs`2TjS4#S877o@`kA6O?va9t& zVEOxDYspW>cw^EA(!0I68$3{c^0|JkL}C9}WH63#X&&IcxJ-TS#Bp++e!%fc7=D^q zinfT=7Tj=x3zqdg0Ln+amiP3ty$#useLQL@bmviNN{H>2B8np8G+tlgOiJW1g}p9; z>fkW%8~!<^`ayL_NkU;0wd{Q6K_4XfX%Kry)#Q&95B$PDFYH-9O14Ofp;{ou1^b=# zw1B?tRoDkr4p2!9vcT?~9pUL&!;{87l6;h8{%?Uy166f4+?gCjMT^9+?jpOcQ2EMY zr9JNFtjtO+3S#QU=xhezT@3xf#{scO4Qa#x@q8vyzXMZe**sjp9bA)%8!@!NgDn5< z5ws6Av4#tMqkts8VD4O15u%T}9| zx;tlTRaf2*3>?F7KLph0B;_5>V*)oO2FlZZ{#Z{4Cr2Y>Z+W38nbWPk>mxtL*^>7m&# zpPgTN+WNUAqyXEy1(gwm-V7G}bS9;2ak0G)B^u*g^en2a;`mzd?TQXw)BhakcKuW@3mM;fnfecI@_efCEjo zXTJuG>1le|byjbq%NE*dM&BB;QUAW(9+%1LptXN)xS>$`m#q&r zldy|muYEk^LuJM`@r!UDUAB>AmdhfGqd*CSa$5eyfs9iTED&qm#AAZ6Ry0A9>lDEx z)p~`G{(10C%ughr#-b%c++KMX?HjD`R9MDJp5#mooX!K+mY34oQk4f?(>e+1Qmlh> z=bR4hwd6zV)2e7y_4?L=St9rm7tl<-7LpB6GCw81&{n(49|eSl?G%R%hmp7Au>D_RXsF9BlCpGT_^LvP2@ zLBG2Ee&cL1l&6xfOr|&pD!UkCG@l9A;ygDEFZBs?{o0u?F;N!W#YH}1PxnR=`$rQ! zuDVP=yYuk;2pT# zkvg`LeIGVp;md+TMU>^&a3r9F>GDEmgY!8iE>JplWSF-D+7_5 zON3h7jF+Ja^JoybPex+EatI|1YmZIwI6ZWACG?LkEj+>>HUS7R{-|}X{sZd&&z1;k zX*}09-&Dtf4qG&l{m20R54UM`MkJU?TE0uqJVPP#H>Q_^hl;({@o)x+f&)%SECw?E zgZ9U+I<2q{kN+hoLK8XQsxOHW$(qST3t3JZC10@xo|}kt8EA~t%bde!q9B;Q?$-q6 zugw||f%h>rVsv}?&{{EEq2%SbM2jE3F&SO@Lx%3D1cG8(ehQu+`i+8?+}FV;d4JM*jM?mhS59L#k zf-np)>BT`0$Iu1vHF zfJ}fPXHr+o-i_CL*}~K#&@cIqYy9&{4;T9bUd9dP^ByGJX>IGkQ|tYm&o~*DLDR}6 z?S9ybegqH&H&tIg5YxIXuuV1ZmIE~2&F5W_lzrzt`Pzu@?-yXj`Z{m_jFrq;ZO+tm z8bFX1)VxKX$*Ee+oN;$YX|9Sisfij;^TJ=&i{_~9?~H7HxxNN1oDXttG&Scw6{~?r zG<~V-`^0tk*!$Aw!%66Ry;u;&Z*{XY8@h+L%Fc!bc|P#gPorQ#%WWSyHdu+9Tn> z2Ua?E7$bQqnkVwKAnMnQ$21v|U@Rz8MbRQRD~U=RV&xJ0J#SdUA(p;tcH!c|lJ|L&WAuSg6g0)v*AFEAPe z-rz11gh}sHS-+-321M0iT;R<=zC1jSKGUk@y^rR{EUEsyU9`b8$%l`V)nlTg5iG8W z1qDm``1nNc16Dg;k7^g=FRvEu7cLIWy*F|vdXq&_Yw207iDv7_nL3A28+d&GU=(pIZV`SumBwn;D z-TQ(u08ltRncckqTs3)`PktpRFyUyJucxrM9sAIikkFmd^Q^aO;gj;>&`by%8xHmd zZquUm*PDogVlbs0r4V|4i>a}af!&9CUwnXFAl)oUk61k&M@?YWxskt1q{ig0Gsd@> z^WZqFaWe4k3f?IISLYaOc!{=fW>XVd$BGw1qfc4I_IG-?>q<0j(g0#Q+0VpjnROsu3uDm!#2b*+vA8Mhu-z<~faGbaiRq>mCBtsNsC2x#( zV`D8@Qm4KJ%7ecAU#EzRC38#5IBS`2m@f#iRHJ!f@TLW!vd>q z0ZM?k*)rI2_L&7O&3SrEPe)h?^L$kN@vl0GrwF;$NY^*M4VVbbAlfP4sxP>zxbMg@i3G#%to>P*@gT zfAw;$w6Ve_oUSa3PU;e%g$shXu%V7lrY<{Z6*?T2A$Sr`*@IohBZhS~BK>9|j*d17 zX(vAw?SzAoi&V&U8$H{%hvu6e_o#I51ZlfsT(t$MQwJAlzx4V1=KC@BDCz54o|j@p z2lXWD(OD|8G-6_6n|G1QB=;_T>oSLxvy*K{ZUPTbzFp0-LTl1R1Z+`Y}2Le{Y-$9kkZD^NSBa+V6 zK~>9)G~|+9QGA?lBC^Iybtl8S-(EUWmNsY*wuCl&P$__Xz|t5r13=f99={{_Vx63F zrkY{Hh;gSzpAUP&V5R+VX|`Y>m*lf}u_U<2R-ZJ<2y5lfNY>s0{z0n1kj}R-FW9(z zsvkee>gqTqI)(nzjlM(;?jEDKMp$4oX>88e)F__cI$CU{ghCpmE@E}QBT z9DRY>A0TxyGRL%^+#$75ed(0-5!2CRx^)Qq?WN- zO=)5M{LL?M1%;lrsfxEzPSgaqHGi1YI^O;D6VOK^A=i*g4udgNeF0Tu^2Yru45Elw zTrEr7C>W^Fa_fb+RDucuhs&JZW`4*KdPQ-gVO~)CHiS~&cI!NqMmQ_89`ptCK_Lz2 z>FD}3viAthbaSPlvuSJZcW!(0jb!7c6ZRM1Ygs2jVCakPgF2_i$!4)~Qi*kAkdtQq zmAbAa(23pvlr&Q&B$P!UTohdMuKJHAWt5mvoG<;^8xJsM zApKBNW6;SUpZ#&q6GbJsj`1nWZzWdp=rtc^Ypwm}Wp}!BeTO#ai+%+33Hy(3^(|6e zc^U%vcc2@M6Q7kJXK(g#(@sf%z-z=SFM*0T62?D9jSvzt$8s{;61)u@u4=Cg@2D>w zi3XE8h}ldO?G4y(dg&D7Gcnzuxbc*M+795v934m(ieZR=k8s3@YQR{ol5@1CAwpdp zX#tg_g;dN)K*T-6iy&Pl97%z3iGBZG8eC$2?#n9K@OEvZ{k#wM3Fc~VW1amasq&Q! z3=9nR9iV6q=e&BqrLcoV4*NfD%zs@Pcr=)TFE9ril3MWs(UOqF1iwBQY@IOPsTwr3 zXio%JpPa0DWTP5RsB2k_p;fFcl>b(m^<}_q*`GVxe*s16jvRJR-*_U3CSa6qs;ItI zyP{la{Wf1JdLxk_+V|eOk=I4Cc*AizrV=MV?Ty*->x}XVRoAj03?ty%QdL1(%kC00 zzk%OVLW5Vl7ISbijauXrz zl@fe^^rCZha}W&U#msaz10F`2~ZaJgbIo5WDSTXcsNMdX7nJFUM6p2Bm-(HD1H9%fs7 zJ8wwf(ZD3vWKSm>eP7Juuwj|ADZcl3C#!Lb*GCCerCU$6TTTXqh9_c55Co8`d;H^j;)?P^RBj* z*5Z+VnJG=~h*IL>1HN8x5=KF@5` zo(nApy!fJARWw%4iZI_F%F0t5guHVy%U*eONA0x;Oi!W$CNMTnD5eh*6UjF@=u0eO zV6WDIn}Pgj$L4PqYU#$c>KVs`J@*{`!0+U|zldR#*60n#VLDUGng~cJ=R#DlU&qMt z30qY)#RkD!L$?FFLdtCdQrD8_fGy2RzH?did16)`Ny)Z5nPonhF4*c9!5&k)#+?6Y zb8hD4j>-UChIP4!!T6LP@|m|$AjbQqt=16g4>Vf?Uzj7^*Ym|rfhMbad{Gaz{ajVW z3(INqwB08Vdg()V6BkST1rEC)1{O-8q&Qp!9}=RLcXL1Rreo-aUduJj3`?a=B8>P#gGzA3GCbU0y z9&y2IwU8P=ObOZLzK>CS@q%jA!!ySt=Iu|$wAagwVn+p(+kpEAOYt~RGLYzy-`}`T zNFZZjft6-lS;~8s3*ByhVN2NVpwubSAIuB0<_`iWn)QvdWa-a$*$r74{9~_86+(^b z?YyT27=1j`}%miE^|wUS9YfvK%kjDxd}Gi#NcDc zucrs>js_{b#CG-lA*}%^;_xI&B|2-_&rbKZL(Nx(b`8rPIK4;-04 z27dvwK;6yB(w-=}km@(M!Us)79T-btW2GDsCj$XoZ&)j(LKeLbB{s=ci2sCBANQNJFa0 z5u&B5%P4HW-gdcc5^A5<8QbU3Do>mt`wpmCl?C;W{54d{c>=@_8cs&T-*k>|bo@Cy z&o(!Iw@csOnVC`we5K@9o${;kszV2ckG^Ond)pp&QlH)Cs%`mo83UT9`i&6QU57_8 zbhsEGFj>oWwV3cnkJq$6-B51_V`hlFeD!fDKP^CAf}Y`}0nEm5z@@TT+O6>3oS9IK zSz*!2#F z1z^NmFfLisWv8oJXifL4z1b;Z+MA0m(dNlG!h8&aK_Xa(l2Bu0+;tR2Q3up~RUYN| z^O7nb-FY|P{Gr*Bq{th$#O&;M4CQhCb*GOUyrKCS)ILKA5V`U6S^TF@;R#P&s_y0h z5&QGXykK7c;KV7*ySe%dJASN5#ekbtvg+c+6Boir&wq_ZQGQ%z8TsQ9qyisjE> z66jHgbtl_`n5`+?OI%lu68EHXWOo!z(S8v=mWLatvY13Y6;tY4MK)bnnyKJBzC3I22?f2-@TRcw*m3pOXO!DMjsPv$Q&wWk0i}CyzS$vrB^D^HKZfqrc>vd% zWQv{)Y9(7r*4~u$xlWUOFUgO##35cgeh~qDb%Sj)Jka7`kLfNOW3GY2*6OtJ9bf zP=`>SJpi`WfQK3O;Jf86fza|V+93Tb-bcOddjAlgFB4rF-_s-{oiWP^50m_K37wn@X>9f*O7 z$|?KN|KP1y(O$Y@3eEUi4GLOG3FJSIo-TT3vP+OKPrbxxYpF;vjY)=C&8eK|%J&ds>1Gux5QpB{8cSjdYD5 z7>%Wvi%phr^bwNq6lk^yjB8jRP>$UwJvR6-!>h9Q;akZWzPrNhtO0J4!Lnu~l7jmk z^&W(*Vcxc8tsc4-02WFjSOPFMB1F5wt7L@Agqu9CWZu zOlOC@96@XL9Ln(%CQtBU3~=v1oK?OQ1Yg!v(pj0%-$vx@VQ6*GN~G#ZlRhdEQhE`C z56s7hQbOC169&I?G#!5BVS;hxci(DUd7X}luEE+QK?RC541m)GOguKw(gLEj!0NYA zbL16I#-t)iz83qBl~4}-cvPGiboOz8+r~k%OBHwKnjRY}>8h>a_3ahr!NNVV%FDui zNm@y^k2g0rG;_#77`XK}Q-XhkZ^Qw^389sF;y;IwZ-K&!bz=+wv#O9r3AF4BE`bT- zo;=Za~56yz>OP<`))gkW&952K4Gr z+2|xnIGBuqw81)w&Ft`lt!kV7={w2{AC>}<3tisjpy(%$p8bTjPI+*gqdAE65Vb}` zD0Plz2YG)}^fT%t4kx7<)`-8(iX(1y5Q|=*6ndqY1G5s+{x*E{lSJ_DBx7>e#d+@? zb4*_gJnskfOe99~k9$E?%&e=4_(+Un0owCgnQ&ow)44sM?`e%6?0G;DT z1*}W5{|C@U4ulrjy+;IqaaWIDTmRKvv69VfXt+W0)3zG7b>-{B;0^?4r)kG(WPVrI zAhwM58)D8U1qfQh6vb&IgfPF?hkNGxJ`tehH)mz({W<<^i4s@}E-f0)mjx>f{wP97 zuU-`vi0AC8-z|k~24S@686=*Z?77l^WrR1ihg5xX6U<-9rC&)pWe`)1*6HNf;j&lZ z{dw_B1`(ZskEqQv7DEwo+t#hq(Aj70!I|V6yTJj>S56#`%U(ARQ?}phZY=<;~H6qR2_m?+Llo?*jFU;tnlG6G=Tq-zilhb-z*hs^jy#SCe=Y5;x zaG<L8Y>hB{4#fW+1|9d5`$ocI?4AWiqefzhYDlK|#bMK+(Gj<<(UkB%yoyE91Y zg&n=fwZH~21HnA zEG%Sw#FUu%CXWQusQ1<0{9NH2l%4o=re>zT#Nym12WH!yeC|#!&ni@Dn}mfK`-IF= zjgi)$Zrff%zV@ed`H9g!l;Dt^eVog!1ik#th{#7oEiGbGan|^C$-MVO04laHim-Ga$EvS7C?bCD zM`o7jcrC*`Gueew=ofhg;FH=VSN2R+b|Tp8qgAKPzslW1QVMnJA=^CqDHmV#aB(Hd zWp^gOy?l4>V)==~v=tgah^wv+NojO%0LVM?x$IIyPX?Y3m87x3Vm9UhlDnS`9-ld+ zAxjb97z?@j6EEM9CzRO4o4U0_0d#V(QW7Yaa;oJh+M<&aTLRTe)3xIl!KS9?@`oNb z?kHlHp{3)3%?%W~KSl%IW}Idgvwk?o-Vlz|gd^BLoW@py01$|YnASfMSk%a{8avy9 zxod*gXxyu7dWxg5apw8fG1SakOwthTNQk%rd&g;8Id`R%x#{*x2UboKQnDb7XD6@b zqC9Q;c49&6144iY%UAsp5>qNUP6Pi#8~yBohQm}j72_n3a2VZ4^A9&Ntv^305D9`7 zswQN|oNNcFEIrSsSws`O`#{IZt-Is6khRyuGZFotxKJ1v7_i?Cri%<5R4}lV7fb)$ zWIujbU7SxHtfZhIi~_En@`{EWX(_$W!f(ejtz7L+zC@PCuYF*>oVJh#++e=aP%P-{ znq~y)6J<^sxO-yu#zp3PW6V?h}NHwCeh^y2-%^9RAjoPhcShJ zopk0(Oe4mrZ~uC#!pXld?0N4aoeTTr&^L`Kydy6VG?63#4%$1{x=|5txp`=cneW6>TjzLkWHT5tGir8CrwOd0t)uj2fwyh;8g(Y6O z)#rbKEX69l#`6mM%pbhECXu^jIX|nBja~P~f}Kg)&#?&@#Q$Z()8WQs?GxPPvb0uZTMY19uBKsJ9b~v27oYi|GHU6va_(+YWrcFmL1?VU1ayhe9~i0FxjyCECXw3 zJgH0gDV%ROQxjP3`nKyoTn;NA$0n=#@U-7Al(c>g&*CHaxT!`M4v`qI&ne~KpwL8} zu}G@fjUZt}VkBjIs|v8gw4o^2(h#2wtA^mUlyxOeyRyY~@Os)?+%{&4w$wT)$t z89kvxkH=ue^hC81WqaWCDJaM?{EyYSok|_RTwW>_$)l1Yl_Bc7kVZtkG;L5nuDbW# zB7B`>Fti$9<;6i^5l#d-BeUi%&5yYGHcIt;Nb8+2L@vo`kY_aN`lLnG3Ysxg$h@Aq zK@Sgx4SkXXJ8h~b_(&Aw<0YX41p%`{~^-da1^##SA8DW{ZW9WNq}WC zRRM6TvARp{301#;xg#*gz*%JkMt2L2+8GV9;Opl^(4dI2AFl#zm{h8GoKDB?Bm>oVlQj^BQ*Q+mS&vM{SPX4lBdeWv-k45v zQ={P)F8qXj7r873ST8x|KlB+7tpi=YsyM_j=^4gxI=C|9!*ZL?K0cgt#LkpENRQjP zS)p10u)F3ftYlr0F14yR%v{c06@Zsn5X^O}do4v^iapBya;3>ey3|VY3~?3!mC1nt z9eiSNbtJYLhp zL{Mo1OqEfQlBk>>U~%~vZcj4=m)OVtZF3jz<9pv&;=i7on_RxnhXZ}7+iPpOBCDSn zt;jGIgkkJcAO(Jt;Z2+}f(tx&W$$E?4K3j8RsZ7d`m79iLjaN2%wN+6@q*AASI6A25tj51r!z-bi= zKN&n{=K2_~lH$y&n`yoz8MUzQsJDU3CQT~vOcw#^@9to}GQK}v1`_5Qdh&S?+GNo& z9XSg8)vh{p0T-Rp@uTH%Bb(W;sEtXR0)zshz^lwuTV0wwD0H;PZNeagG37SPd| z5rYG`i?lXs6a)MM1IPnX^HC+tfy4eA)xO$%B2gN(YeYgJ@)uX`H7?-Il)ou_?wCnX z8#}ji1oROS$dlo*_|Sql?NZ@@xAAVoPJZz?H>tt>=;xuto;re_r#(~R^D<(wwJqq5 z_q-Z{%oB+rJE;|mplb_dOa|yIy%i53N>Ft8MtZm^0$2|G%geILd3P6`l0PQ<2Kc-- z@@%vZ9^u=j1;`p6<9L zJTaeOPe7ycJhQyGvIa=Bz#-NEYwpTNR{-=PqC3q{YA-7NaCkOzhlkT9oBHBYGOfVL zp!TykKw>`&lu~H56Lt>{;$P8xfn+V!o#6acX)r&y3q+s5uC#U_puT)a$|#|P)t&{AqVm(Lu)< z$0ah-H$)(K_#^QCTfFA2xBWn0Gi$-U>q*3 zA2E@XI}{kzHouSVuBp5(vhb!X_K5_HKs8Cmc8h_8%WeRMHx$TEjhGbgoXidiDu1SO zn&sK_6k=h%6m0b3bxLMI#a zy7(40fu`cu@u?J<#$wnLhB~-q4K+cSiC8!Fwp&4K*)oFj9$hGPys2|*j=vULpA3jC zsDpy-88qGnkElN9-0_Zc`*$sXMHqbi8{1g*pLR}e`TN(iOU3s)=76qvrY(QLI}G#* z7XNfx$c1E?>R{+I!Z#nX?jlde>5=(>EmTYOf3@U)&dZa5H`WumwGwd$f`$)QFE>^m z!7AUOrh4Q79T6j)uBeuZK)Y=^v0c+t$gBZ&k5@_?HV#=g5G)m}TYG!Pvbq+O z;pAX?apwtO6NO)Db#<(WI_fgNL-@DosUttd>wBC z^kWHkHwgUV?w53_3hwP)t$P!gf(?ZsOymNeE(OBS@!z?x;NsuizK?P|mY0p;dAcRI zI{WNSH@x_#_cb@5xK!1DN~p&?u)gPCQA67G2_ z3VTPoL=>xmwq$8cY5ixxYs=cyA73@{iT~T|Fm5<;&*@e)e||`={7P1f4E`TSH1h~E z6>sqiscJo>r8fD>F_nV4K;B!Rgjp%=-o?4`Yw2LjwBz%zeH+Zt><3sxxKaW5217X) zagN89K^Ur#vCBO$Zr8`>FAI^FRhXqWN4nbP+;r#|yV}Yv{(+Pxvh`qQN0IV$ii(QF1L-0&9(P%2pHtr>K6i!F{l#9v)q#i{qY?0U7f&TYxXJ+2Aih{? zK$Ug7*nrSB6E;vW!0A<6z`ClZ5iVW{BztpQw_g`r1np|?PmOeJ<&(3}Z0#B)UUc?x zcyhfrvRA#ll4Z*dmIz=adZcB(q3Wp;iV&X;uO7vO`sFIQ0K_dL!1S`c_g_C0uuFg9 z?g_cMk(Qmb?*j4?-P05T!!OS>@hDjt5@RMrFFOK&LW8fTiLA8#PPX0eF$aLm<&vD( zaX}@;J`ok1h(4&B{JpzoFE9p*$&M<8cM6YM>_$E5o%F0a8V;vJ8)hECREGu&3-{04 zy_#D}XrNBLtFtsBh+*dqHM|x`vvU-_Z>C&dY^^@-U4grPCrXbJ_20-%lJW=8v+Hr4 zyvW+GU*&OaA8H`)^#IIU^KI(N|E{4W4(J@Ymaik!2U~m|TWi)V_-dO+_Wp~ATy6eB znXryM1ufYaGOfKIaBtUMJ&rNJ+(y59PT=XA4J2No)itQ)Um-!`Y2 z_I_+*hRs8=MsgHfS+MioAnso+#(Y+yp`~qWY=%$ebBu=6qFG*_N!?$VyWd|9Mz;V$ zHtP~^OZNprJM$)W<$n|BmaL$%FlcI~PaN8+c_85qDN2eP^}_1{#ypjjyh*hTQIXE2 z3PDV65#+o#mRADumFT|#5*vBs{VHd2i%xc#|a&G}@N{-`~&r zgAb#47wR(W1{A0Oj6KxgVP=6;ey60!L6Y_CV5pJodSAjRnjMMJ}V zHpWT$Hc-CxN-BgKsH%tryk$vg%(BFmv!C7)v()>w?IBbm%41Ogca>6t_B_K$1b3eO zMjVf}M)u0(hREc137sva99yB=(0(-oXM#N^*veWNeRLt1fB-h@#_?<3dpBIgX6t+} zpvj%9VNjIZu@>?51h@oxO;{#3>)HVF)Zx?9skDfD}B>o z78vi~2iUNI&l{%94+cB_Q*fXl^c%#q8-ndNX90A6CcMUk=Bb?*(pY#EJZ5FpUvIs+ zRPKlLw|y-^>!37g1ySxyOvn_ciC0u;-X>dn0r0|e3%A02f5-#&SoOu&0BXjprPboL zu;qS#`$Hhwy5}+Ok$_acb?vK%yYxz~}Wy~`?2;fiiUkf+!{p4_@RYir%P++n*j*6R_9=k>;Azg=1a~JD<`H~}zJ$cH0U;VNW zUS_UR`Hn$iKZmtGhmW8LvNWP5;=Y6$Mdr!9^Hd|`C-ecA`PG^w)iPZrSZ!BzZLSBJ zmefiJUN*xm(!QJ_>lUuGvt62XQi-6nX8Ik;PupEKZ1hkzQ!4-SGFCJUsgsR6)<^N4 zfkBGpOknut#G3%n2>@Vjp7$R8?}Frz!LKM#q@!u)paOtN80kii z+T(CQh>Akg2A!d(i09F#(m`5U!a$62(CgHi`DE#|Aw8X_0Rk;C~naxxX5ul>@5f|4%p(`xd1t zw}%?UEsB-?@0#HMxI4fJS%i44c%xIBJ@Pf$jyeiAg>6`Hyc4h;vXHe=Qf#<5vd(egj4ceCbqln&@JXP&vLSvWJ< z7&wgj_#BwXxRR845<K z*Le}IKrRdng6ZyU;Ub>gRZg#V|Ja7xt4swL1O6GE>?f8C0k#^=_v>FsV4rGt*cH4X zJ~-u64;}-FaRi&jeD$}Wf0|x&9(D<{vRvOg4}4zRB&d3uDb{Isj9B5%5}upc@G^?{Sjp`v+P%S8q=KaUy*CdOv8Y zK1z4?t9h08btZ=q&OyasuuUJS8=6^>oQBr|9%=g2ZOWH zsJ}q=x^Ik3GCnh#`!OUjzTOXyQ{x4~@g~F;PatJac`Jgp>(8dVA-G7u(;%b@;AFqXU@bn{zH z?CF66Wq(C9)YDBsebFs3zcW+qH^4g@*4WHog`aQb`|o%Pa}MyMG+B*&(tP^kv*|aY z2i+B>2C4twJbpA6ICT`fghr%BppN-#Vdvct<|rfCp&HSLua;84Yf``iW(*}dneSEQ zy(DNQvq$<@03d9cfkt8+C>>`t*lgunu;j$KM5ae?G-R!`!v&0q*{&|#vK6oG@b`BK zkq1p92^bQoP~P|BFSOt2&xWu7U#g=Q3yR@&{Vme>wl84WjMDD6QpKAV1;)pp1jod$ z&j3S98-s+I!-uRQsw`teL;W=wk5m+)6;tiT(V!-bY&%oEh#U;KiXjz1yT#!^Io+#nYo;Sn$oW_0Sf`s>^BD%f`CEY z5EoDq64GFpG{nI_$J}1gG%E-hMH%s2G6%LC&>mvz5OBk$efGZL|JC&sU{P+*|8&Pv z(k%^wqJ(rwNr*_Ns36@)EUlm@9nzpE-6;)HD&4(wm!wPW|Lpx7TBtJTf)Yb&qN`Gcat(&SG z)z%H%A7q@|+*=f_p7&`?BNBJR|MNy2boXU4B%Ih55P`G3Y{#F6wbZ#r_(oO?W=#P%LX=a>L*e z?H6{Yca-`T?d>!~48u$=?5zm^s0lI#xfr+oEinxfPh2CT$*K?XXmY_o|CzG?#p&@5{A?ilm$5JHDeF9ii6Z zKo*|Aj02v;U~KbfVHG4(QV|0j1;TNvXX5D;gCBnre=8;B8IH3M=y?8}-Tu=DgY62- zFGyt0qcITrZdU%&$C{aB-)Odk4Qq$qbw^Ng;5fbN3S5v z(;H4d6nJKFkifsKIsws#q+RT2Q`I)W$>eW5*Xo@AoT)eqVgcr#d5!#_Lm+dsT0dq3 zQs+ht!giB-;12F)kWk)$#&03v1nxb;imGT&6@mX>k$l6w<~wj8`=1qfvj4Fm1GZS^ zu29LVR7Tofm%Z8I>2yDm(TN&T8i=tUqaVE;3Mq2H#Yc+fN<}#zV3jtj4n6q$jP|&Ps<;$e9^rA#&>R*PERUCW z1m-YSH_4cgaH$Sz>j1Fz7~!~vw%MU8pggg zTU9?REyi$f*Kb}O>IY%R0CY71DznTI0TDw@3tVt@YU_CE3sO09@*iRVIVOG7vp9d( z@>da=5-Sjotbs?R`D!xA`WhtmgR~6t%m-NL`@|R1$>iz8u0HPi%T?Vn}L*FN1nT?IhCT}(JwKdU_M&wAFOv1Y(?>hmE{%;3z$_cn5 zL*!*Qa32XF-6n5e(b`}T(Tfz275Z5n_`8FmrY;?Lpp^pKuLcRY&I^N_@K;n1?~xxR z`9$7=lDEiZ;K+19ic5gO(SECc9Dt$3#D?2X!pxSlkfx|RyNCBX$n;Vws9fc(vcAkc1$(nB1bI3FA!Z(( zH4u`Yx+68o60r1rF)@kRz@%VI1Zh#|dd2Z^vu+`Gz1QhD`~}Ieo( z-@6w@(}~10RA2*vYdds?{Ii6A9;nrul^Zl+0-O1ZIMiAfqW(3^T^i8G%qZ}e`9S6< zg%|)@BoY(jAqd9XKFus30Jqi5zDq%S2Mr9W50u>7_F6EMTL2t)LAKF-Hz zxQZXej%jGKaxqO-S37=h$LbXck`2C@V&I;L5slRIOQnxgsgVz_leLIK|HC%FV`nxAqtjlnjzx`DLlS)VfpZ`jTj%Vb|#MtHLx!`Q(q7tdS*Z+)GePZ6ji23avf;F4zipa>0WeVY13+= zNIH3Sk|Qk>hP=x?Y(3*T+1vwAPZoxZq$tGnWd0{S0s~}!0`dcq#A_u3feXlZUfUCp z=If*fPIAu&VpI}VZEX~gy6E~=4#3T0d=*ya_@q~RyO1aNWuSu7#!P}NzmNjl5Sp`T z2?`HC@k%I(Kv4~v&bRnELe_6%%2$ZeWrKNBT#5cWcZN(-84)6736h3)p zYqEOCjE3zXrf2Y*Pj-7isw}@M{1jQ~*&|_%CgGHF5;w>$h*jC{}SJ5Rqrdui(fqC4&C@RWRWB z(t9jnrE>8TzZGa{UiyNChtFp)>vHQC%cQZ8a)ZtuCHGHM~(j)`i> zVTUk;9Ed>DYm|UWDh;H7Ow{P;f09JH3Y0+qf5OmU)IC<<*i-(MEevL#J3uw{D@2C3 zMvIA42Xu%0!Qtb)39)+L9%~*{ff+siM-5z!G)C(0BKV5cpZXgx5e?9;neDHF>rC9x zXTZxKM4V?+=Ig9S`l*)k4u8}DLgv^(jdkr)n{?Qfd1Wn7pMB?-%P=8l*72H}lZ#7V z#QS*5Y3E)@eAN?T%JA6#+>0clo( z!*z?6O+uLXbDZm$mXCk2Q^^KRK|vu80TAoohZq3oM~A-lg6>3r1|{2Mfh_!eq7Wj@ z$w6`79?aMbhsXqpFl#@UXadF4!`?>}$TIt7$L@bfHi1Nc*rs}hC;!(wNRzw;N=wB` z5_;-N%KTSY%t{*V1*+}DyZLZ2=b+x`1Z;4oGuwXzAU!&ElOb4JM9II0&%mcaJeA1^ zX%zny%E(isnY<8vNDSC@16e`;F9fPQ(hgX00u<(-FAP)zgtDnl9u2e=I@HKo0WsvUuuvbx3>Zgh7CgJQO2U!yCTeRfE;xR!bU=se6PJiSX!-#0&ZbtodoA)k#>qoVbemAkxsV2W`>~}ev(k$&4;|X(Zol(q)4@Gr{JE=i}eY~L5S&hv2@^q6`uPh6g z#*Z{1vElR*Z*P2n^dvonVBuehwOH)nra(!!%|Hym#e9xykuUk1#L+(E5QSrh-=CzH zl4L~3TFP}%wO(TQi3Q*_&<-ET=P}ohKdruRDndj%N?_4Z5n6M&bY5KV4B2hszsT3Q zkT_kVOYtyKC19K-*lqH|SxuWFHLmjtn)Eu`mizHz(YZj+3B%KqCS0oV6U|VK6WrIW z3I35Z-G{}+>L3apR~xW=(8p8ax;{Ty$>f%oUzh{Od66lHzvqV-yOFc~q$k)IRCMUa z*oN=ZEDFy zFqou3-BI!ITt||Fu+{3%c9?CabhbOc^xPTEERfL;3s8_}Rk3Y~(@h$j9S4>9h9_Z_ zsyk|zior<;okG|1C62-e3L(Y?V-*FWi5h1HL=2ue2kL3lq>A)$@3|c&v^p@3Rk=svtg|FD2Do7!BRK8Axy?`CBxg} zEJYo@Q>Wr{`V=#szqom0L=n_XU_(!r%of_>ttG#Gy_@3atLheE3HC>q4_YI=nC9vE zfk>G(Mc!zk+-xMXm892A1NAflht6G(L*0;^ptF&cND zPF>;SuZMW~N}5q|n$|$#V7|16#$8hlVtD+uoL9G4P5s`yVpBEK`D)pKrO_GAnQ{X& z$wwc>@zIer-Q^OQp5)o(+EW+a$gPt>$=g^x2PjiJIL;Gb}!XH4&fj*STVT2ybB{ucw)KKT_Q%JG>%5 zW$1fZtn<_$`DXxuOB)BtYIOeI{`}cWqJ8CqtBz-3rd!QoyW=uL&Ccx6*$}0Ln&tI#ZLReDmVhR!G-t!B&?&@ZreL`Q;TWdw{ymJKUq>< z?k6;m)bZPxZH`|0vJm3>o4O8M3GK>hEc0?>Jlv+;vM2eTb&H}`)G~$?vqn`}S;}>1 zi$4#3RP3=Zpm+iwA-mqA$QY;S&CNCoUrBEKaXh6$=jS6(cS_@2wBCfcKmpe-ZQo13 z&)o=r(q}N=hsz=FanVKBmpVmHX&;I=-bddcPB~inC>YC#x1T+r%4eyy=&Q)vZGvh`P*s z2CpiO>#*pD9(NE_G_KH4c^{kE6`RQHdYr-#3RCG9ywDXERj=E>V!g*ACy~i_l2X|E zMeqKyFuz+iWIW2c2itq72E*i8Ci$50iCn*O(^(OJn$NyJ-e8?-wLNM7=6mrA`!H5XL7pQWI$Gz>Q7^)LH>pDZlczj4f8#j% zYAR(DPivoV6}^TOw0ZZY$IJ)=;sRFnR`zQEI##V4{uR=hXBM>Lmp?9*j0>PJfRFm7 zT5K}#GCL(GhE2Zabi?i$e7OrPDIHPIclo1J9fz$M+xrsY9cM}|IQ$vKAH>Wp3JWxm&wsq22 zL#LEHLM!y6VRo@{Ow;jrqJ*1UQ``#TQ+I)xHIoJvD)=mW+M~U4_!sQu$zK{p2v2tv!^Lq z!seJR*S*%!!jZfp@=_T@F~U0ka%VzNQIzxMgQyI{Z1v+EXWV+pBW+sqKye1|%;OKWJ;xy$vH8ZGJ$p&TOvPC{9w2cVV~20{aSol}Ak7a~vrI z+#MX#>8Io1UZ?9tBJ&txJ+1E)LPo^Ji10~=edhwgxf@!YI^kg?y`tGD#f_8itGp9T zYB3!e9#ne|>f@L0D_224Ab;u;2Z|G1twv4ziO_&@;SN9WkDAM7g$vWThaO1tgcN?!K zX`TDQs{%QXBvhyR$ka2fC7Q@1)~ArGh0kj!l>}z=r9l135-{+lQzIfNEMrTjM;CKA z(rvghX!|>54;35;=-!6Vi}}12FpsGfRyI`-C(|pcch2_$V%v4LfJWMPYujD^twbG9 zxL8r5^K1Abquur90)1nFhw?4}U;7xeB-lntH0-xXl6G>6tL0Y`MV$A%`tW+05m7>~ zHd%Qv+*^8XxRLHL`$S46dfLy#ccM{f`EtJnd%5z2vDzZA;3D^a^(af!E$7p9_{J;; zG%EI1HLMXm9jcSL0es%BxLylorXyU8S7`D|{g=8;fmvF6+mp{{PGAw}8dAn$k-nRw zEmM^|ZJ8N|iIyb?V@*1{Zl{Zk_nv)W+jCzJghoibNcEW2_kLY-)%9}I^g*&Z$-MN) z3&Ome0KfCk#M<^vzH8;qk2M=<^Yc`W9(VHlBKUs(G;98zhNziKiLk|*1i+IToz+-{orSf zo6JYc=NG3F>Yx#WP-iD2;|H|%haUV2XyrV*$9Iy27@Ah99ZquRs>)ftCdy_-R^E!g zru)PYtvG9(a&k42r?V!^@En4l-pQ~T;D@L)?vD-?z9r`gXBvse;SElXl?%bjovSr; zWs(h!)AhMpd0`N=kTOSg*+1);2KC5>- z$<9vOc$ItKkSnt#l!UD^oMxWx>2zz1Qtm_2L`w$MzZxzIrF$@ZrR3A1Uq1|yk`BfR zczgmqG4K0KoS%Tt-$*r+=6UDB_;@9G{I*-(Y28`9CnxAEgbO+DOL{OBTrsvDfQ=G? zI3aq_Ql=^HYE}3x00#oR#QP3MUMI#C0CE@UUuEKi`PIbFxPgO+aA8=p)RtN(Jl_Sr zd)6b|C3?E{5xbiq7>Qw~#QIN{?r#ipdQdSlgiA9G7erN(5qompoj&>#OcWK3-#cn! z$)jT~x&;pgO5bm%I3iuGHZu3{1x*=MjuLO+kXdt7h)`)iKm}^?=4ADW9}eS7hdJ5$ zjXXQ+?8?|vv_zMcEnTrp#9_92&2oIzkD>JAg;{TpT;NR=iCwEaNz8wCu3Upd1Pb?*rut9YMrc*u@9?{kKE-^*_v#=-#9xFh&1RCX zHFDEuyyVH(9%gX+(M96|-qcHJ46N4c+RC78X@N_5fJyefrYwesHC1&Ex3-jt&v&M9 zy6YnneVZTj-Ro@ zI{D<+PTR;tehjLL9QKM;hPVS)Z(!wPax05S{j=*rYHu@U9Xv1LR?yFc<2N3^I<3xf zjMOH|d?j)#mVn+M`S!ah7$SV82>~q(%{+izz7QHSP5-)9-sk|L&=;gkDF0zuyX=u! z$&+)JLm^a;jPvsY!-viOMp8G+Eq8Ur)@F`ZGRHrZu?Bj3)ay0Dd=A{ft%AmR0jm6b zZ4xK0IWus?rfGF2;yfZXf0lktY`emTfF?F9!SvdFdki8naAl^HS118_DISTs=wo!9 z!rSk9x3kD`s&&|I1(w76`OG-^=&6Kqp3V6O*w2jSv9mpYp;sk_>OkGMGjpfVB5>9S31N_IUhaV4fKtgkU-F)?**vRh> zG|2h@LbbQs+2{eyMho+&DZR46A&j#@XFKHwRZXYX#758N<1@@(K|jUZH+J_n^|rHl z&`H0BFP>vqO;=z%RXzJO98@M?aHhuM4-_$TTm&zrn3lG}I;2Y8G?SC>q@F~3wVE$W z?iAHcE;gKeeIPB80#fRd4=I~GT|fmjZ!Yx`onpA2)o6kJi908=tXYHn4|y@{%#HRT z-Rx9Nb&Aa1C;;59FxJv~j(=^uKr1nrJBFiLdm*m5g7Le6Gd@XMT$>ZAV>R1CNQ{Pk zW-zW+D=rCIAO8XwNoFwjcN1{oVf*5MHKt-_Fpje%iIi2+mBGyEbq#P=EBwIw;dn{y zFtU^T^HJRmpt-yn74)(6SWzTuqI~= zmDS#MV&UnG(-G*7QALWls4e5k-!8US)7X^5`}XnDBRrDH>1V9BrS_H9VhNn3rWS zm13@)()-5;lxLfTCTkNz?spw2aZ$oFSt)?!gW_|@JC3OaYus>P88{pfHNB5RvNY{| zO!eaoMm#s7OFUZZQbQdIP~$|dJ1K9)0zpRUv8vzQ#Ig^g@Mh6;9C8p&QNm@Gox6?0 zAs_mrp^a4@5)y5hChBi!KT)L4d;q!>rCMXs0rnL)jG|kn+X0rhKc2361KM%0h|D?nif_B`_GI%mWD{kZ{hi05oS%0l*1^(BHaTw35lhm zd-SO%mQhEGNv6`1NWzx{IwD}?H)k}&2PlN6_vHFFzr`PQT<=`U9h;HGm=}DkqG|*^ z5~@e|ind)d4h*6d*0BI*m%B;BS`91LqJnXd?r-^%?asH?`2c#iLBM^hW75o4e(tXLUMVqVrykD-`f{ADe7^ z?joNP(dW_^_i#8T{;eR#vjne#aGU^(SL`+EIO&gOIo_@3yGUA^1fNVfoa}GD2z`>p z$MwZko|Es^xqGLoL40vjQJZBteFx%lH*K`J&ZWtzH2h_COzn}6Id^79@t~v@%mcL3eD8a@{k7c;#PUkZcy(t z`9tP#N?yQ=M1|&R7mm@Rs?_MoyU8zlZ{sN{^YgugPikI8{1EZg9b9PV-w{A8((=Yl5eV!GkWvp3EPI^RIDaDu=q{alpM8v zkeQ~@ZgSraOiO1hRnt)-1iLY-L%f9YmlXV#a8Q%~yJw1;t5;e)MpBA&mh>kwDh*5K z!13;|TTOMLW^)I9-WJU$QvuD!Rd$;ZqlI?)&K~`PxIa{y4+F5>7qg}8!tnWd+xq+( zK^e6h%0eEK`}z*)%iS#H;L5%gbK|N;9q>yZOPleS;)dJP?s5{X(Bh-#}TE0t;e7_;U*?y1O zspG~KaP$S$&Y_-?DF9Wi&f!NI4K)g~rTZG_2O2Zcqu{m(3ZR zp5~jFAUg0Lx%^mB>FZN+;i`jP7!yxv${THZc|({fRMT+E{G)&M*X;C5&5w7#;L*zC ztO~m2-QPbzx3pP{V3Kw4S@4YNAeApc<3IMopiRi!fH$LuEIp?bx5&#he@89Y@^$co z;u3&(fIuMr60x5Mj~N;CePhkD1h`KJLH3#!Y6I7$OmIrd%|!7}z1|WA<{=@Bl7kkI zmRjM;DtA>+ILUxO`zs15NP{H1`k~PYqmRHS?{`Rq&7}1~A`Z*ir;B2Zd`TUUx}tpk z9C3?rbDzBS?(-{z*X#FHVtkGs)aehJ@6Hmz$C?@iVNb``CM@yU1Y=ILy#+r`PzK0T z^e>YXP-AxZ9mPGmi({md2Rem4K3PskW`C=X*LXR3KEohS)!^H()SdKL(gD84nv3vF z3^@!$qi(VY$s+tdZCP1iKg;WF+50;5s>hBImqDG-QWXXX%fq`}Z*W+yKC0H_!hlGl zp#J{P8b4|v&IMbKC+P#G>7e~f@EWuY4)sohs;;{Yo=*Bfv$yAR@7sUrJKB2d$l1gE>Ih@rBIESLZ(1pE7Q@>cy#cywE`MYu<0OaQodsQ=}6~B zVgq&GR+uyk{!Ph)*L3=|!`jz3Ii#+a(-26my1p#{<-dZT11@tF5&-7FAr0=G1IF?I z?es-W9z3q1J!&J1MkK?YQ4@8i^K%H>3Lr)!Wsch^u^}U-ehnuntD~bTUgqYScLH%< zir0D-(aHobZHrllqoCn1`Tzcp6ND)gf-O%JRe^Tf_)-etX;fuzCumo5IA~j*RwD_@ zde6s-B+O^CM%O1QW8+u8Fe0ACzouE%r@aJH^=Pr<%KDlkxVa7H-FM(#D4BD)u2J5I z@cuaMz3WIVdgIV*-|uYd9{da@VU7Yl^WGeFBH>qta5Xl5v6ILegI3wma*%O60qK`* z=ZVMJ*@UBghho)+OYqf*Z#KoLhw4E^0sh|?(Z@8$s$Y$C;jy^7ed=+>_u8thB^to4 zF}U{Ws^P0J=*bC8x4LdI9DSu;2b|B0;1hjikC(b+*y%EcmNo|=IuoT9fv@Ss#_!rm z*~G85yo+BSN_C!bllxG9f2puq-Rn%hX-K%OQnF9N!)O?`VF3*?V3g+9caC32cgx8Y zy_W7b%kI!Lvo*65rHFS`SXgcL*?ZB4>6$XZ-FKs+NfQ77B6`}W+7pMqC?~gZrVusg z4Xt4VH2-Mq9rUOf9WEL2a>gyE;z>W zm>F4f&ZT+pcW+%WaDBbz*5HuNp$%M^WRLiOPcANO62_1EnL=V?3E~XxMH{qVAa1=Q z{>a}bVrXDcyFBBO4DE=R#g4zmhck8<3EQmkJpcZFzehN`Td3S}^{r;lLOW}>Tb9Bu z2hH7n-*Plk8m&PgE9lkL%bcM`Nt+M^jYn~w!?0N8_-AB!^b7bQFXn`nmorgzP zZz}x?4R{+(y|>q5I4r-mK8LeQoAY}`7&kR4tEV_7MvN7o7{O?S25q!e-QAO>JxtR+ zd2C2zW!nrbJAYeF*cJM=qSwwKuUVJuFuQu8zL|Y;{O+Brs85Y*m7R_7SfQK7)lkTR z<`YGXXu0O)=!Wtq%A1{x$)_99_?{;-9Mr3+9NF$$krq6S?@17M)p6?RdM_LtviS zOVf@)1bkfA)7|LBIXlM+qR4DUpIKV>KW5y&!W%iAY=2{_!U1+ojlbclF(Kh#)#ET% zhCSYy7?;I23te521zh(ZwaF2)g& z1)R}WkC%diaMmmU$_mx|F>b-SJmpaLXj-H%*=7Efeev0IZW@ddhP8&vQz}qH+y9Lu zSWGu(*Rp$j5gKXj_uiDTLL<@nRLv2}S!4d03ce5~^l4wEZweN5g(uX^k4$V5>gG*| z*_~R~cLu4vm|afx`H&-H9z@uVFQ%;}{eV zop2t&p7n+bxNRVO7+SH!R*$zA)i^s$-hF>rvD<1#X=j}N>|G1*#k{GjXs{w9{W@7n zse=U{mzIwEc(5W1*PAzFJq1(-TMh#!6~$OsH+h#3B@)pG>q51`vN8dd>nMo*v8M3Y zvNDntne!6!#`3p%Q`YGlyxarige)Mde5ZS?!tI(3{8d`7u`#qtf1}xCt6c4{qPVhN zRpSJ*IXPWb*wd}`kir`xBURofLedLb^0Jh76JVlZXz6CSAbckbwOCT?G(z|<43nYj zA8vSU8wd)@Y;vS`&6=LGaE7{c5Qwh!2={m#>yxyutJzwUDtgt@LAS0yeYXE4BN?%_ zcCWnca~ic^J!+_C=f!*zf={2)gleb4$L|FOHxJK;q9XhIZf-|6y(w6_lG8G2Tnv7< zf&Vj!Oue9i`2xY$`Dm49M2~l2eebKijeLuxRFB3OOnjo#3PncgA|rjyp^u_HUW*>n zrLcMu*!_W!7x}vBcC%~5I$N8)APfQVL|l|YALe|cT64xSxIAA)+jT}ib%3tr6Z=(y z|EetrkmPMxNott&m%kbZ_{%Hip)TMc0dxK)nJ(P*d3(31AV@OQxZyNd`vE=vY5#+> z-1>UyqQ*&TM=K~gq6M26Q#@zPEZ_wMvWXM(zV6WV!4TQdkadaCHzFh78@)Dj@6<)j(c7^`1w|~yhBz55NOzi+F z>f6WPXu1g?5)|7OgcLK-lfBR)H3=P0c=i5Pd0L`iAYaSg7mmUwRn*2WnDss#^vfutp^=X4!^yrNP zCvQWBG~VStB{gXGHc{l#$B-+5IP7yL6m%UK^R^TyQIBo1ih7-wBY?1-DN`W2rU!w% z>Q5xY(=EI)MOT~~_4)B*X0bNU<+vMOpRtr(ur3*5CmY_17xJ7nBCM^geaf&gNTRce zbGiO)%5e_0{F!9=Q77mFOz>>S9ko{aPVQEp4JfwnAEaXw^B(TsRBotKSM}R7%htbq z*cwbA_#Eox^#jpiXXW_C?{er`5VqJ-r&RBaq2~15cP(+JULm~t3u1Bsqa5)}vf`6b zN|N8LG{hq^1)s0&qu18U7#ddVpIU5<6YOr`eix_g-0()-)-cw2S5+lvbS(-L^nY6$ zSikY8E3uB_5SlNFj)oIV>geIF(ph9YsBg+oy_&{Dz1X2FzsVx=N}3#Oop;R-@Bgu@ zlq3QM2eqoJtl|61xnm**!P3zjlLu{2h&9b3K=b>r~u>fzD&14Q1 zeARYL^ZHNfFzujKR#ui-kK{dl@Zdr67Pd#*#a%kcA2tZ0Bn@5%cbrQb7idZ?{}0q! zWrk15f4&Jst9jiOYl#S0DziVxWG0ORk*9*p++`(^Wvb9(h*=(DyBbX6Ue|+QB|G6EAfHvqi#ztlKkuV7n%Ur^-S5B z?vI(2RDkR$PuOYG{G9u5GZ}ysXUA2|TYr27@HG$z>d;O7FKNVz2a^Phz$&i%-=Tmk zoCC=excaZpLqdzC?d%E*3@WV+9y}OfKZ~OOE@th z(cYu~F$Goya?j0N-25fne~}7!G;wipTDrQ`xwW-kNf-U$BIzNYng0}dTppmu%#}T{n&q&u2KA7#|Sh zy9oM@oB`ufet8vBiht{U;QxM}<$zxVz! 5) + .compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / "musan_feats", + manifest_path=musan_cuts_path, + batch_duration=500, + num_workers=4, + storage_type=LilcomChunkyWriter, + ) + ) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + + logging.basicConfig(format=formatter, level=logging.INFO) + compute_fbank_musan() diff --git a/egs/libricss/SURT/prepare.sh b/egs/libricss/SURT/prepare.sh new file mode 100755 index 000000000..028240e44 --- /dev/null +++ b/egs/libricss/SURT/prepare.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash + +set -eou pipefail + +stage=-1 +stop_stage=100 + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, they will be downloaded +# by this script automatically. +# +# - $dl_dir/librispeech +# You can find audio and transcripts for LibriSpeech in this path. +# +# - $dl_dir/libricss +# You can find audio and transcripts for LibriCSS in this path. +# +# - $dl_dir/musan +# This directory contains the following directories downloaded from +# http://www.openslr.org/17/ +# +# - music +# - noise +# - speech +# +# - $dl_dir/rirs_noises +# This directory contains the RIRS_NOISES corpus downloaded from https://openslr.org/28/. +# +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data +vocab_size=500 + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Stage 0: Download data" + + # If you have pre-downloaded it to /path/to/librispeech, + # you can create a symlink + # + # ln -sfv /path/to/librispeech $dl_dir/librispeech + # + if [ ! -d $dl_dir/librispeech ]; then + lhotse download librispeech $dl_dir/librispeech + fi + + # If you have pre-downloaded it to /path/to/libricss, + # you can create a symlink + # + # ln -sfv /path/to/libricss $dl_dir/libricss + # + if [ ! -d $dl_dir/libricss ]; then + lhotse download libricss $dl_dir/libricss + fi + + # If you have pre-downloaded it to /path/to/musan, + # you can create a symlink + # + # ln -sfv /path/to/musan $dl_dir/ + # + if [ ! -d $dl_dir/musan ]; then + lhotse download musan $dl_dir + fi + + # If you have pre-downloaded it to /path/to/rirs_noises, + # you can create a symlink + # + # ln -sfv /path/to/rirs_noises $dl_dir/ + # + if [ ! -d $dl_dir/rirs_noises ]; then + lhotse download rirs_noises $dl_dir + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare LibriSpeech manifests" + # We assume that you have downloaded the LibriSpeech corpus + # to $dl_dir/librispeech. We perform text normalization for the transcripts. + # NOTE: Alignments are required for this recipe. + mkdir -p data/manifests + lhotse prepare librispeech -p train-clean-100 -p train-clean-360 -p train-other-500 -p dev-clean \ + -j 4 --alignments-dir $dl_dir/libri_alignments/LibriSpeech $dl_dir/librispeech data/manifests/ +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Prepare LibriCSS manifests" + # We assume that you have downloaded the LibriCSS corpus + # to $dl_dir/libricss. We perform text normalization for the transcripts. + mkdir -p data/manifests + for mic in sdm ihm-mix; do + lhotse prepare libricss --type $mic --segmented $dl_dir/libricss data/manifests/ + done +fi + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "Stage 3: Prepare musan manifest and RIRs" + # We assume that you have downloaded the musan corpus + # to $dl_dir/musan + mkdir -p data/manifests + lhotse prepare musan $dl_dir/musan data/manifests + + # We assume that you have downloaded the RIRS_NOISES corpus + # to $dl_dir/rirs_noises + lhotse prepare rir-noise -p real_rir -p iso_noise $dl_dir/rirs_noises data/manifests +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "Stage 4: Extract features for LibriSpeech, trim to alignments, and shuffle the cuts" + python local/compute_fbank_librispeech.py + lhotse combine data/manifests/librispeech_cuts_train* - |\ + lhotse cut trim-to-alignments --type word --max-pause 0.2 - - |\ + shuf | gzip -c > data/manifests/librispeech_cuts_train_trimmed.jsonl.gz +fi + +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "Stage 5: Create simulated mixtures from LibriSpeech (train and dev). This may take a while." + # We create a high overlap set which will be used during the model warmup phase, and a + # full training set that will be used for the subsequent training. + + gunzip -c data/manifests/libricss-sdm_supervisions_all.jsonl.gz |\ + grep -v "0L" | grep -v "OV10" |\ + gzip -c > data/manifests/libricss-sdm_supervisions_all_v1.jsonl.gz + + gunzip -c data/manifests/libricss-sdm_supervisions_all.jsonl.gz |\ + grep "OV40" |\ + gzip -c > data/manifests/libricss-sdm_supervisions_ov40.jsonl.gz + + # Warmup mixtures (100k) based on high overlap (OV40) + log "Generating 100k anechoic train mixtures for warmup" + lhotse workflows simulate-meetings \ + --method conversational \ + --fit-to-supervisions data/manifests/libricss-sdm_supervisions_ov40.jsonl.gz \ + --num-meetings 100000 \ + --num-speakers-per-meeting 2,3 \ + --max-duration-per-speaker 15.0 \ + --max-utterances-per-speaker 3 \ + --seed 1234 \ + --num-jobs 4 \ + data/manifests/librispeech_cuts_train_trimmed.jsonl.gz \ + data/manifests/lsmix_cuts_train_clean_ov40.jsonl.gz + + # Full training set (2,3 speakers) anechoic + log "Generating anechoic ${part} set (full)" + lhotse workflows simulate-meetings \ + --method conversational \ + --fit-to-supervisions data/manifests/libricss-sdm_supervisions_all_v1.jsonl.gz \ + --num-repeats 1 \ + --num-speakers-per-meeting 2,3 \ + --max-duration-per-speaker 15.0 \ + --max-utterances-per-speaker 3 \ + --seed 1234 \ + --num-jobs 4 \ + data/manifests/librispeech_cuts_train_trimmed.jsonl.gz \ + data/manifests/lsmix_cuts_train_clean_full.jsonl.gz +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "Stage 6: Compute fbank features for musan" + mkdir -p data/fbank + python local/compute_fbank_musan.py +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "Stage 7: Compute fbank features for simulated Libri-mix" + mkdir -p data/fbank + python local/compute_fbank_lsmix.py +fi + +if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then + log "Stage 8: Add source feats to mixtures (useful for auxiliary tasks)" + python local/add_source_feats.py + + log "Combining lsmix-clean and lsmix-rvb" + for type in full ov40; do + cat <(gunzip -c data/manifests/cuts_train_clean_${type}_sources.jsonl.gz) \ + <(gunzip -c data/manifests/cuts_train_rvb_${type}_sources.jsonl.gz) |\ + shuf | gzip -c > data/manifests/cuts_train_comb_${type}_sources.jsonl.gz + done +fi + +if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then + log "Stage 9: Compute fbank features for LibriCSS" + mkdir -p data/fbank + python local/compute_fbank_libricss.py +fi + +if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then + log "Stage 10: Download LibriSpeech BPE model from HuggingFace." + mkdir -p data/lang_bpe_500 + pushd data/lang_bpe_500 + wget https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/resolve/main/data/lang_bpe_500/bpe.model + popd +fi diff --git a/egs/libricss/SURT/shared b/egs/libricss/SURT/shared new file mode 120000 index 000000000..4cbd91a7e --- /dev/null +++ b/egs/libricss/SURT/shared @@ -0,0 +1 @@ +../../../icefall/shared \ No newline at end of file diff --git a/egs/libricss/SURT/surt.png b/egs/libricss/SURT/surt.png new file mode 100644 index 0000000000000000000000000000000000000000..fcc8119d4b4720e2620fd789773a1003d5aa3871 GIT binary patch literal 114318 zcmeEv2O!k_|2U$~h&wCU&bnmp9p}#88kFd4WpzeKLMJ<`LCC12ZB>+z6+*Pouo8+= zl+ygaKj%)TdV0q9d8+66|LVH#-uL;u$7`?m`x9qrZm^Vrn}LdoYN?SS#+r(XHlB)# zx(~J(w7gUvC8{i%6=IP~1C90q|_eoS<*2C97KvV%E zDld=o^OJV*#JS^f-u}`)t^uG4{O;}R;_2q;>N0nYysW%}l&rFpoWg231yKcESyk|+ zs3t9^AiI9ZJi^0G2l+hXDwsN8H{mu!Uf99dmE~K*&De}?_dKw z&dCfLgzfAl*ll(8;k?1uD=M_kT;~eVz1477v z#rp>OxDdG)&?I`Bhi8DRjUUb#x_lcTCD7&(;Ozyz$$^jVUO0aUk4m~5T_1xJGU~xv*1GF9OS~VsF1Kh;vzXYIhDC_w|yo`d07%`c~BChtkT?tI1(ec&vy>7*B>Dka1DvpzP|uS zc7H*V3SXh!)djE@i3)u20Uo~YzCJiF?B`bWLc;ibjj6A%AAm0e^3ARR0U;z*;sOJF zK@$bBxCVO$*n%r%rIo-B&^J5i8|gC!0_pr02%f|*J^g{gDTNJ$Q%hf<8$Z*c zlDxdMqO98IKXW*SAObB%PDKI0ohZA>L;L#%;+(6G__n;Xdo9zbaDyFqiz}(vh@I3N2|2Sj`px6-6u*iNB*O@xB2-3J}dlHAvu4 ziYQ3!{l18T?x0YNXj$m*yy*I8HJzf0lC*;A90cY$TS<*9QYh4&DhYg)+73b)Bw)yr z=Z7jglJ@;mocuqjH0zc~lugXS%r1!m7e`wI9NM61XW7ZBizhg=uHp0c0Kqwn%N ziMc8HUFko`@T5giFuaYgTYxt%_@CwYFAQBhJ$>-!mzM-ea>~;3|4vCjn)RQO1Pf*S z{~#BLCM((Koi~9$+ppj!4B31onznzz?EUjPoftWgmtDZ)b5#G|(&?miQn0us-ql4K z2t#w%fNj3`E&m&<|GSjAf{LoNqVi`?L5(B;=M}pO30%LU)}h$iPgCpUNYwvvGvvjQ zy}UVu{O8$Oj^t!g@H5uO8ALh%MQLke0?52)xlD~rMypO0^S!;D{GpUolY zpEcHhm%rszq~%Fw`Y+AmK)68i<-g0~Ut8;QEB>xs4x*$)uuyWj!hg~*2Pva}IRQZa z`S${q-;fP|9`__|{ok_K$W%@t;>~dWTReT-|1P6#LH6R$BrFytD6q;TrSPTln4{f) zR@BR>ewl#)Ay^QAAhHdmY*!|SYAE9}it^;vpAwmo`#RDlDi4I4qNtJ{l&1sfK2a64 zsi+D@R0#_{fo4CvC&-PeU>xVZLfRI5!ux+96mI#PaU?#Ay&23+<_4yM>uZ zT|6Ea0_*~^B=LiCqyCU|`QyFJ_4tj0;R3zBG)aAYeLzGKjq~z!hjNb2z-WQY zY&7Hxc>=3mm-K_Tr;7_zc=kTk{%0>1&sZ9ATrhcp@*~%< z{TzJ%rFzVi6<`iY-!~CSpQID4Fm-YPbKD z;OPI5dm=Ic@TI>tcDQSdkoBvSX?w1Lf zDt&3H%Bhe{)!z>`K-y;E>l?|CuLh7O$GQI80Kaaof=pBtI`L<02l%l$eObA$i~9d1 z+LCNj{~QzcFNL`%a`$#1K0Y7y`YeJM>}LH9ZTZVexUcr6Dk^~$lf0pMV@yTm%i^QI ze?zLGnwqrQTx~A#om4hFKL;=`aXORy-`ehBMA8bPW)0`#upEDu8PObbR+t0p^wEQ&lL-voJ4)Jr%kAHmv4KoL3kjbOoVNr3NWyvPOKlmtN5oc}Qb zAdxzS_ctdYDJX)xTgX4q8|&o-jx>Zy6e+h0fdl@a)<3aX$O-4X#hqBuXb~9T1x^zp zg?%V1C%`dz7$vMK209JsM@VgX)h|Ob5c>glOqRl5lp|#bDU)O5Hgd@1haTrcK81(^ zUq3Ko{b#~`3rfQ00Q-F={23|cKjZLy6)aLvCJ_udpf+DWO(u=M98LRyn-j=I_Wvgm zzzM2fiiB@eI+5A`mk|l%(yIjs1SUFpA1SfG$?V@%-SjgGn&iJa2u4A5K0Z#N6BJ~n z)kr~MN*W+*4>IrnP_>{)n$Yjl08ns5+8}B=3N7+`#jpRr)H;$BK3Sy8DXV?!u#jJp z2L64aamLj@l5r|HfY zyvu!AMDopocU5wB>DSQ#Ux(!uYX1&;Sr?sOMZmUsYtff$m&H_k>rq5*cap!G$mn?t2};=aX}KFza?mX zi?F=Gfi?dhQ`Y>+I6s22e;vXg)ph-r&LK($cM8G?gfz|n5yrf2OZnm3LPHT$NRdoNveJ-KCHauQG-;3m z#T3Dd-w%QRFB=AcUHQ%30^ndA(%u0GesU_(V1ve71BE3_e2~iz*$zNHr-#z>C0hkQ zG>i3Tg6jWlt%uSV^es!4g} zCM^XAlAT27K&Rh2(S+h*NKSK)*)nw^KT7Mj5`-=|{Fcv;xdKn8gE)+$5p1RbNnt3A zMDc(TDS%~gNb%fL+lY@h`Xg>2R+)IVF{KAN?@^NWgF@rDf!wDdDSi1P40}Zd#jm6^ z6uSj4%0wj@PYV>dqBO|_{3&iDS@uwn4jPa10Z&N-$B%>%PcnrTK|w^nMhu!r0VIr* z;P=n&)l^XWQi_m^Un$fuIJBN@;V-~3DFyj+FifID3JiZoFyOsH0REl9X`q0Oe)n-f zv7pL-PI{0(0n!1T57Sn!-a zXv-c+oXl0A{0PRER{)!mNRbz^dXZE3=JB@R*=M8+7m^04@#oM6nRUMNw8hU#NI!z;pya zP$X}&{T^;r)yS6LuY=uh1#G{t;#_@PfLBL+s4%H$U_mJug&VN&jKewF_^L+i?{Hly z80K4sGP&&!KZNnmc(`Aejgg0&$GtMijv_1SzkesF!dHiQkvE=^$NV>xs*w*jppY$d z9tC(7_pe#cve1`U;H46s)VXQ@v+jkG8aR@kq$cJ`j(nB~C1-$YClVo2${Q&70dY8) zP=07#lhW5=U{zTu7f)|dd1Wui2XMlJPwvnseJ7mH*UvHg^=t>SUHp&Q4r+2_kARGE z@TiP$nTJZ>z&M4I@pF*=R|32Xb;(}{_x^_Zq1;WUNTNo{2rIem?@&{JSpPsg`FCs- zA;ul?zP^Beu%fcMfXhF>$R)_&nIRr#;MrmzR7Y_Q@mCj5oI4)pP5Oa!1E}c-5kTlt z&n}-RDk?!LBaH6qV26nu`uk?L-(D3cFkFo;usX2UI5UmGsO$!Bdr|k(6Fwf8%N~9_ zOiLwvja!f2%gfx$!!1&^w8-9=m1AkajT+NW*UPTkT~&YgV07Z#=(*P)PhY=!_1&Se z-R?WO)!$8g*q%FTw+}`~t3&neS8Ne0DuN0|dg(CHd$r$9+@8rL&2PGx8cs!gbsnJd zIrDNOkL_X_K`P?QGJ}eMoM0yX4z2WCpHq>)nMY1Cz zEcX^PHm%krBe6~xEJgDU*ZfLrSJA-Z-#f=0n(swN(0~e!P+l~TT3&#V_=hL1xRFsr z{S>s^2#?z&NWLZlT4N9A=M{oWVOVM%?19fs^8f)K;09pSp^DALWR!j$oj6lKEBPp@ zqA}Lb>!-Y8$Pg@)cimFuUGq4k`~WT$WT&Iz*f_lt_v>UMxRIyBm&}G++nw-AO;7J}G1Ia^w)z zndxV}`Jyn9D8RcINntT+8o?A+8im**XXBB0&nHHk#1MQZk9ig|(6Z`Mbms&V!%q?oZ^rq z;!?L3W;V8}g7Egh{s;ybt^TGrDe~Of6^ZW*P-yK(<8ei65zoVr_ZX1tF&M_UvDdYn zE8gxIM|2%cTYfXvL-(bd-TK;PZNf*loe?ZBJ9u}q{pb=F>3nPZQKgLqE*VNLhRc^W z1!^x(h_j1`NQR&RW7`M`E*(Q!coZk=m?8_?t`vpTYoZf=s+_TR)OvVU#uX_bK3vz$ zQeC5UAofrylXhxs-*X&|J6qSs-tarDU5B+_&e9|J4nB?Pc9ju$%XET~kVl8*{4fF& z9A?S_Vk8d^!H9yRf-m%NUze>}XVCLz`* z#o@RKW>x58*?Ti4$jy5{%t|5EJDhWs;yfB$^5emXFiuUVkU&oG(PE78L^I$=4#+B5@AQ0Qj zMNU_lU<_Qv&zVlqOvzhtX5BI5yRVPA5c)VJc1ElKA(lUeP2RD)i&w}4XK6mj1yeH_ zEO2fW901A#xun>V0}>O8!Vz%Ac1{uckp{tG(@lGfQA4sTnJ=(LYelOn8cW|Un~ftF zFx6ajYMt;p$`@yNf7gc2wGyY)(zB-yLpPpU6gT^(QFr|K22<#Vca=0-yHPIpE` zNkkwFp(I#vHr8VW!dtr3u~(UMEpzjo*@LmRw0Au=*E^SdDA2@W4#2ylF%5OLNnU%z zI=Y9CWb0tE#|`)1j2qiGA#?v2O7p@hN%j5XvWw2AdY?Xc`kLAiWy7a$0!#SYu*JOx zVwuGX*7SDjRwmsJ%djh(2p@RAx(1P2b*F7oMqE(UB31P%>#;mFk(eiR^bkfD(dyLh z0gKl_2Ma)pe<=nQf4N&c=uRV8ylj#&YP}gIyEfBAh=sel#;GJmV5-t@Yz+5vcje;=>+uvpF8l&Mi&X&FC8^n8M*bn*E@@@d{`amW`pg5BGGOH^; zBJkQ>nMo^7n~Lj^g>UU+8duB|hOXVbi9cDd({^G=qzjMZbazo>e`gwiM9xg5dUL$W z9Bjcf@S7G$rZz~v;O@>u-mc7z{b+>wBvdecq&wt{(hfBWc2W`&Y(hf6l;JrPr!6grNV^Q zkaE-6326G}&Lk%s&J^82dksZE&NSmDS*;EU7VsA~o=)Rfq;GV#ECO#a^I946GV&#V zf?4Tkmmu=uuGll81*r@?F?2<~rLRH}UAEE6_E;FIKz;l5Dt*k7-ty7ni^=whDsn5! z*~i0mf}TY`DQ*_q4lfCI(ca-BjX1YoeZaDIGR~0mjjidH%+vJ_HVGV=nb9jaGvCgs+leJ?~l5VR4)Zj4-Wn&E|L1ibvsnH=T)S5d=~#%{+D zUDwj6CtR(iKUCY?*=2A(RcotlBHRJz-I-^t~7F39t8ZrC%M}=%VQ}ZMQa7F4>(k zu#L%<*ZC}RM3Q0J85iHk%XxoTq}x8GzNrE6dL-psR@|6lZ;D3d=psyjSrF9>>rlg; zkvfNU?`+$0^{xen+qcc0n3yS)EbYCmRFe!>Jl0@YLqko-#@PUwUBnt;kSej_)*?tf zUZxkMHJg0Z)pwCAIvd-9kF7t_%FExB?>ObTVIn%f)Yy2boXpV%J726|GdW z#}g)G^y0=iA8S2xBslNbJOvvnsSsc;dAmVbB~`P$f@RomC8rizz~;PYKanIVo`RoZ^K`9eXw3uR*G>*=)P&T9RWL+m^CT=N#AOjUyka z4u$fji8lz%+;xgHW}i_WdamSiorYUna5P273S*7$*jV1F7}wCz%AeEop?~HmLQg?xvOV9IvrcK4L+*F8eM;cxlOEWr2EaG z}Uxq zb(hOI4U=K`8`lNNkj4Y5P=@^yZk!OFJ&GYsg?3E1Ec6B?LWeG@Q@{D5#zWm{jw2(2>| zT4&$*;iG{tH5&)c4g+Cyb_Ie_F3jr^N;1(;n|DSZCnHe#eCy;Yw?_Vtj_jPSwqwN- z*?KYQ>-|+Zx=W0ow>ijMa_1?_t8+?3xmoab>=Mb{E5^FgAL%AkK*E3$2qUEqs zySFXdHrrpZb}e_k}@g z+ykxA7_8C8sfdMgjXA_Mvd(lMDm^O@2`lgw`^RFZpFe*wjrS4Ylv0fhbVr8TaHmcW z`&IM$SMG~dZEHWHX@MDaSy@t1hfF|uoy|UyIo=|^e=DmGvzhZG-^>sK8;t5r@>b!` zEpMVfg~jQ{Tkw~y#J|bdm*S>7cZb`yw} zYj!D4I6$y#hf;JZhiP*r658IVU20ib4VVyXS+rj$B1K_>7VzCMU|y(+?tTl2nY>kz zw$XVd28;&~da4G}h);br<%G8^`)$g{ShV(VB8S_$PHe5tJa&XrlAyUUZ;Ai2Lm$cv zy8Q8vG>$6com^hXCooIW#?~DxxQxL}x~JP+GpMOEFfLRmyp;^CHDY?IdW!W4s(hUVIcTd@db+`8d0BQ;olqcSmMJzPr6GlZFL% zt-a$+*ifh3%qiB!Y_EfQRBGaBDEnORA-^l)-ZM1065GzLHkhC`$U0;A2osZ6vV>_F z^MRpUjUX>0KK1e=SKc`IRqcf1N4S=A1JN^ZOJRa&lpBgf_7L)x3fSvQuC?We#a?;V zPD{vM&eGajrGjr>V>$3F-Mn>3&NkvrZsR6i>883pD7??1WpS}={0T^OUc;nhpC65v zl`0|r@;XelVA(Om=zE0!0lg!Ih%O_sd&8ZQ;U?LfVxy+=;@NJKD9*GeF7Jkpy|yV` zbuUSL^kb@p?aK4olDC?VHE%^f-CDSK#82+fs>wxGs>{#wI=O4#PV64t=jH0}w3YM3 z$F#Td({3yx4s7P(QZb{MTJddhXV#2Px=e~G)0$0%AFp+8N|f8}K`79t5^~?1ZOI6X zIwbY&8tzCwq8p}D1@qvGYC_U&hrjsrv{dCoU76vgk?!0qIWdP@&Mw|G=#Dzhc-z`d zLV9~2n#L9Ixb+`klk(qa*EVRcfjqlZJbFZXxQ-t+5rk)#~WUn<*3v#(#&O2|WUbB0#zn{AcFh7C@Lcy6Fyj&DUv!KotS0deEj z5&Z}}y~pw>vF)YJ4ti_*u>C$cx%Y3!`MJ^4t1;T3hS-|Jx0Sd|*ahgH=M%WyfpaOi z@1s6)$Wg!$^Zp_#?3Q}&#YU%eUh~FDe@EVqhG>gNL!kxFC-Kd<^6e0MRo4f7H942f z^i;Mfs&J;+9!bR>-&Q)fF}w4gPt{tx>pWdgr3ynh(6h3L4;w?%8x=ds0yq%f`y4K) z@7QxWNVVq1dYjQw_w?-dSr)13Yzh5(7~WQK<3|%K@KMy;SxXEM%gWZ-@{0?hqUh0OtJ15{!i)T$Flt zvAVHF9T!ixRAB4+-FZqy4cAj#GGh#@u1?k;Hr$b)bIVkxIWxm21KowNli?9BQacql zJqoknH@_$;uHZ&(#BBAjx*ccT*pyy>O*K_ML9U?qYI4{Rr}&w0`%p*i)*Lnsf5%1! z@rV&#Gs$v4T(T(hH3>!j>skJ@v2_k=VN)3!mz!WNo9=rPpr);BCOvZa@vIhSdQZB6 zw41I$hog(RWCuDsdm_tU;zdtEM*0C3;exCQvAsJ!1tSlhvF-Lehau#xX$_ZYMk02} zMw>s1Gc?yddR!Y=O6xR^ziy9awkxKKVi4RU7O_Nnf*x+TIPysNpxtTaGaEW)POTO4 zDD6(!cOgZReQQr9%*0>W^_H~0AhnRYQz(uBdw{L$u(C!1mYw<25L}E?bJ$+*{;fBX z#ycj=F#Q>Chv6kVv&@AaN=Od-!MJi^V!4HDRmM5$`}DGdGZQA1gcEqP*BP)+_l&T$ zx|F>!d}r1BaJX?i=wW6w@A=s!(H3Ic<+k#RGqbQgl0!Ou#1Gya*=-_IoL3(bseJH> zl6XP#(e7aBf}Bbge;;Aan_?#3r_&C+Zej4qX3Ew&_=a7bdELV*YkaeVH=~6(9dGLz zC%!1LQIWfw2eA?u5kk(2$tBr%w`2(b26+5LH@^}){XnD%g8f8f#0YJ}GKaKPH$5|) zE$^8oz!mL8B=!~^9Isk+lD9IB<}N)Tf3|=+_IR50sZkH`*5hJ!h*-Tt%7jf+;&^S% zl@)${!kibYW$7_O891gZEoHL;jH@JNxB7@zW~3YC8e(|&aIP6VGQ6SmQ%{0trO24w zqXLGy4%^*%91Ln~5tu&Yj3SzK%Q=KRE1$2myKYk%1@&jZt{f>daM7 zB6O@_B2O8wZ{B!A_G;eDVNPHNqpd8dI%Sr})%RwhHVo5Vt6fC16 z-#nIWAVq<>p6!tv@?gpb*X>9rUkSCaJ(u%)R!WSqUV|$RN;UYMs)~!+y}$e;0$*5F z0EQ=69;td+R0L^oqTP9!9*oO^PW|*w>IPgf-B8!i`spW{d#^I`QWP#7ANt_JI^fdD z>u@!$@=>V_Vh7*QUNsfAQIE814HGg?!Nb8P_B9R$WK(G-vYv!1_GZCwt{=Tvvhjs& za-I+}_8#EpSOX$q)}qm&f_W;DM|KSAq8Qw2u$)ngeG4{mu0GdIh|WH4&hsh_tE1iO z(AFm5YuQ<*9UH5?hHF>}DIq7yvCd4tVoU~2y514fs8cUum7%v#w+ z1H+QTxM&fuDh=;0<0lksnz45g^1!D(LC?lTWy*(Qo0re%(7+~)F{@0b2M731dE(>M5+dh;=eQ=PTI zXdt?_iOH=repk+-n|?Wti+zeMAfrfZ@*9NZae*? zJB+{`wZExY0a!;zHyQ+K#}*0>OL9Ns;-FO&%nO8Z*LA$VCj`1}yb)%RIo;sjxgmel zV_&uE_2vDJ@791o3YPW5Fz^cN+^8a5;Fbuh)pP>7v}h~&nccS~(MxqrHuPi;UvVx@ zZrvq8C*v6~(!~zW4T5L=O(%vnn6`yE*Q>-d(jL|UeHSa7@0x^5&Sbx^3C4$7XBIg&??w>{ z_7bvnQN}lKGx@(thyW4|-(z=mkCdlA)nN04w=hGp(6U}OgEc7D51H?#w&1d79s<;d z$-o2Nq~AP_vFmTqI(idcj(V0Q#Ez}ZVpUfr6cD5(3-_thm{&hPHNgHhfu50RLi?KN z6CNfot9ymTte+U@^&<&+E$Gy&Vg*hoeWn$M!;gxp=^D;hIz{g8f2eLx@DpxgecHJF z{o7Q&4Qt^C2s!BIa<85HRP8@~uzM8X60^VQQ$cmFbE_nK%S^2Hqsov6c`F#yrsEl& zD$u_1zy9gHX2Ei=SE6}X*un1i7|FZOSDnP%azn(Aw>a(PopKAK5<#>W7qh;?YCV&Q zx2!~?*QLIbKe=5!nC;O{<(xj7^$%>eCXcv3E(Rz#CCL7G`_rV;uJCxLO98UHv{NwE z4|V=X&aE5yd}QBeI3GN3CiKGM)Un?B=GQ=T-P-!ZOk`9!?OrL@)Ww>X2=zjEf>NhlJzC*oj(i0{$1x*zM_sO%Sy?zr(%jTwLD{klmz zczhJ*)W-hQn7j~r4L0x74ctPkEt8p!Y=?9uskWvz3YA?*0fyC{V2ebV8qOJpdxUI` zcYVoklXu__m-1YHtaa2Fc2JTvdPhG~ezQ;Gi)v?(tKb#ZTLkY+i?}IMg-*BpkYL0P ze}B-K{k(yaTmCEdnGCJOFh*U4n4CCjyCR$guk_kJt@jsm#M36!oH;kd^O-oJ*xR1G zLf5e-$a(GDd9$STeoT&~h}ZN6LHUfOD-_hXj;VMl7#6B+c)EQ(e+E^%w(cI$`;c@)}Q^m6YQ+GB4kB3GR! z!Xoe@Tu3*zf7zfr-Z2PmC3juu@hkDU8$Fh%yU#-t78se_Fd`$A?64mNG``u#V zqErY-0d4m&WG+yJ6EI-yp5j1k#jd zICVTcd^hF+y=M?Qbzk#pQdV3ziS`&yKatgqlpFj(8C--y0(>aC0zd!=F4(@`^@bxN%_TEXccO5l2y9L;vJK*Dl%1$zs>TIraL z2UF_a_1dCIqt>XGropB!vOh3u=rw1iKZv_%N3*s&7>0d(k3T;e*kq5KhLLJYpCqex z4@|JgrK?$zCeyoo+5>;F3Z|w|Co=n%H9D*2=A#=&tM{p- z7Pq&*+IS%33iBu}P{{tQrH@{!hMsJ=+31%Mjhq!xdY~B(pgLt#-m#8X>J+S^Qs7be zsb@x)!wh=q{n3d+?})K;lNVEBGWHElnZ->W%bPW7mB}|GcylnWdDyAIxwof2 z{^~jPD@l$j+x0TNCHJptS;_;+czlU({6>~+&Ak;|+-8a*@KD%#IMcB6#OXbi?#W`k z!Yz{QFW%W>_%v8V#S0E}&#uqLi)ofc7w5^-KFgA@o(k=sxM10ki{*^P4897)o9T~b zz7p5fya&w2O6!a#19gKm+y^5-2D&PIumznMCU@>FGGMAv^l(}D61KN#YY+a!1<6auYNQ z>u&k4f#dhDP#@U! zN1X%H6bB(#~E=%@jyI-ta76%;7I)4NG7V(oM@Ooro<@4>kE%$1*n1(wB zf69pFOYK|_GrfP_(KAkH0+x)FJJf8~@UXcqHKfW(SzU{MDbZ@NH(p&8hq(Yxu&3G+ zk`CP7ID0+hilCD88@9YdH-z5t;Y2?c+lrXG+ZdgAI?7)RQ!5+qGY1&pMyEdDHy(Ni zkJmgUK|r!R?_{S{L=LP)$&ZQ-WnoVa8Nj#+1%4`tx#B#Y%cD4@&9(IPn(g}3Ye5KL z!`+Z&ty+VT);U|!HaBV0O07u1vEkiR+MS{hhu)p<^55Ps84AMn56o{p+yTS#-=SW{ zL>^&=(Lz(RM>J$wn06`JDwwBfLTljF6^l#o^5>RM6o7U4(k>yxZm1#4(6oJAN!TrR zlaKOf($+TwwbhIPb;1L{^65aWyaLPJ0>cKq6Z4$ZLroPIfMwL`*}ywvBY5(grH9)z zFQa#+uD!em)PDHZ$DW&-F0?%P)d(=ORU2owyFVKuYD6L3@{&IS@8+IsCu@D#y=+|N z+2o~4-~FRZ$pyg^RL~;mmobtSkvA3X{_3dPp{RZCsp&C^PKS)(@pZkjZJD1q7=&Ls z(V;u5I;T51h1~fEj$S&>P!J1m>j5&muS(cF-io>VQT(^Q4#vI+2Mm&oVsRw zLEf`Z88F@HT+c3k1fEm=`7qOHW0s+1gzet!4+Ab@FkOH2wU~<_J~q2)O~IwkS;)Eg z=&c1nVT(phQL&im>Il=Q?;NBzg~u!2mE||ng&j1AIMELLXJdv}lCa$wFERmL4dkzYd0=!#Wi&zAiZ^?%Dd@yPFd-1c05&wE zVQQTD>>ReUgJ!fP%|VP}7irL)Xxl!xUI#kE?9^^(PUzH~8-RKt+*2^L_e35EL+N6zE)?N3y;N-Lt{5o(H+iX`G!MN@0 zwARrIF@Nb5)gMm`?|ou|5svi`LA**xJQ2PKqBOAu!!)LOXsB_ZN7Xi$bD)*zggmGR z^_w$UZDH6u8Sx=(c5&LsN^iL8*wq`X;zRYM8 z(O?mVVO#IXZ{+~L*QOzdC8>%!E}Gue0(1idvYDFUoDe(6MR34lsi9bcosdO6uXFKj ziHumdV(vO1K3a7i3~#l3rdi05K(8~kw{7R;^Q=*;pER(uwFMbc33)I__O3B#&g9Fl z?YAIxTfNf*xW#+e`J!}0Xwzl@S)$UsheIt1d6oJH`r$(EdvwsK6R{@2Y`q~=nRG{; zd}aqGJT{1(mq&MotzQpFiT1_@6=FaF5Wqe0x&kEvvOl#z4<1X9qlVmxL={owFCX<|iLRs-2QxtqtsZcLuy^=$+LP0TlYWN z^~+%aVVgyfs>B)Y6J!sGh$)0dqoOGh+_W!@gQdR2Rvm^Fl1!f!SsA8`j@TmqInfYMomw_O7%5Q$h+=8jS%=Ku-giQa-NP;A?O# zxm9qJ2|$cgK^+GrgzvIi&aMQZ7ViTryRn9~@1M6HV!o>H!LoaCB$_rYC@02}aG9Cr zBP$@DNO6+(fQrX-OlZtMwlb9%UK1r)A=pw@=@E;SXlT?ChC_k;=u~qJrt>0^5VTG% zz)f02dX2mu>IR-3w|6lu#%W`jVMDG=M?TF9!(CY868eh{qu7po<{Nj5!W{(Jj|S^G zKZ%4;MZJ=8bPby5kF9MgPqIb!CKz&WR_3S2oZDc|b!0iUT8q-oJ*x#Gl(H7L#i>#A zot>U?uV*;h`kIkYkl|~ZadwWXfdZQ+&pbZrX~)b*S<|QOw#;6#;5Cx05_==Hk0<1* z7}G8tK@mW_qs@-&tYJ*)20F*at8R~?VMSiY>?i6}h1hY2cSY2IEKkS)Jta}ZM}*b; zh`wz`r^?=! zyf1M~Cmn>qwi2NDGm*tp^%q}9B2$Iyyt4OMx^O1OpX=USD%)i1&MZ!-Sk~v361_%Y z+w5*$A7~Su36wNng0wV6rO^p*wGlYk za@ zED?m$P;2BjKii>frD=IjD>z1$k11a+QufUBT5+oNLRLoQG7Y!UhFfc+nzEJ_dV*r$ z)EH49_91f#RMCO@#dsYq7I+B_jk7MGuaQp0X}LiV2$;Qc|4D!mZhHCGX5Ruuv%|QSg?NNFEyA3NsI`U1Mnq(>-JCFAt-xsJ zCe-5j4z0GIR(d6M?PdqWfZ2x>oWtl-+KyW?i>V@X8X(HGg-lV%Xd>n*Pr(SYREU+b zvYfO?F~OMr=hxkmh08jf!}}+;x^(xo3ddcAH%hYi($fS@8YHT$sgDWCxq9)~<}J8V zIZ$TYTwehEm3grQVAcj;771XMr$&u6O9gf; z>$#%S*%j=gv~^V-dMJFgwg)v0jwt(-DWs3!TH3m>h**w{3~t7JR3YO5blFopsy9UB zC_Hm~1pvouBzs(yz3TGzSJsDxU_GD%mxLlex^GX16K!R>OuEr+8G}rj$sMVxBsbS& zM}zRgby=vk@hY2q!{9?DucD{C)jkBrUZ!SIJT`2(YAF$IuHWg9*=?PmV3i3WcvJPN z3)f0$kKBU+{@W(ZUUF3OWeZL6hCvpVD?PJ zp0Fo8DNTB)v~Hs?fe#AB{s#F_(E$Ct_q{FW zN0n(YKZt#7;!{eY;l?GP695h7Qz@6 zrQHzALA{D!aP`MDNxjrhtddRZqIWN$B9>@97LDjEdw=frLv?{BA8SN>8^2!Z#%2rl zCJ9_(1slZrbPHL3;OZh^l-WwD0XDe>-^9XJwj`l>%W6ql3D1c46&*2GFj$m?n}Pkv z^2H{J)HKl$pB1Z6ngJ!qf!9gr*7Ph{WHsxflTXwb7TxQ)_8EH1x>{EUL8CqM=)lbJ45-11Lt`4e3b@>V*O$e3cH^3=lL6@)p+w z4v$;tz!*HIE>6Ew7q}&H5{8XFvvowRP;S{8%W<#-EF_s?e6u6xwg0}x6>j&%6N2q+8y-46S}HQ)#LUA^Be?2n+_<25%S->G zo$IWL&`n#Ym7c$&ra@bkUt`HffkkOm)1ERnOqleVl6w74eNudx5HrVZaLo^lOU~P)?12An;BPlqM>^U&9R^w!~;D&_s)&gy=Hdc323tG1BoJN6#-u_#NN1_4+qT@ zM^j<(yjWu)OM?w6Yp=O(k@TVRhN^klML-0#!in_|Bj7`4Mu5zQ160r0G*4}0t%hrw z3XmKLYYYG&BssSZ9(nU#h8~gutQnh_1yj}%N1XZ=jg?SwPgZi4EWS%?^=bVz{3DWat_Fe1Ko14~m>#D3a0>edI(CGOlG?Jc!* zt*nGmcS!W(0!Z2*h=Xe_gmKD|C9wPN8#a$$v4xUvYS+29tCos8XD96Gp52eRB`RiV zC&_g3rD(x=+|m(Tc1~PTZp@4aHy`!=l_8s07E%_UfZ%}nnWiwLa6{^`7&4>UY15j(wBQLq|J=8Q6%t>w+s*TXpc}mKI6m(0Hme_Ud?*5c@f= z-ZXQKr-~a@AN9%w7ncxHVb~oqsQjF5Cyw(SSg&#WNVqVQG;mfo@VV}Sb_$)a{bFEs zZ5>r=kB{RumENp%`uCs1w)er~vrn-;DU6ITuS^Q4>KG*y%uc`Zmz0o^IkxsRlR-f< zp#bqA;0CvXKyNDBH9_{qPfzH8CCR-|>FQjClACu-K3%L%S5es;QmjgnmrV>dLS+mr9>d_1p?89JRQ&P~YZ{Pt2oLP7I>-bKrh@S~RH;Vt)Gb}SKijO2VA4E%N4eAu$H9xzN`?k1HD zTLt5amVm+;uD&I6`Z3m9?x2HZb;=yQ8ZGlFwXYKL*Tk2z&@jm ztJ82>SHGr@9&2&H+$wR^IB2JxHI7CZH-*X-DYB^iNKIt! zYzL%-yqG`_BJ5bng+0WtN1YhJieSL3R`f<@h|M`Vk}wDojDE86$Tf1SLt`!W6 zy&Mb8b`(dNZ3z-0lT%c6wvbl$q6I@ruw%biFM|!rdI}k3xi>8*#*^$WM(lyy#iGm- zTpv{{5DG_n^&`|U>Wo;SggJuOMb)`VsDd4C9)>fqA=4;{*Yp$Aw<2TryxLSyx(opn z^xm#H9R!pdaT+={X4nf7DiOMny+ym`<>R=B6P7RUP4aj=I9pT6y(Kv`un7OCwetKv z?sEBXhnivUqaXWb2ZYsM4egA-_mUZFFNEAGK`FdBsG7b7jrl%07__0W7fdVJvXRrn=%9=gFe5KCdKAN-FUO9H$QOk*4{p8xl#cOu%L~{3* zx|$qZwX^yX9dq%TkPx+Vn_Yur_OT&84Mf+l9oiUIw634g*aK4|n0>8Emq)E6@okdq z1h*09!;!waA?HI8=^yNh&ZwZ?o>xv-g_b>t#T>PhGkouLDB8vsY2ko@$G?1lDQ228 z0xknBB)a9P5O~v)<6)t_&wy3hb~p9Jx|4;9Q*{>(6@~_G9({801&TGv^?Jd6@4IH9 zZy#L{k9%$GJ8`qAHBV?;(xEM{F52%LdLy^1qJ!?hO!da2)%P7OU#$zf?3KLDy}H+A zTfWEW#-I=Qk3#})LkA}bqxXA#1zt4XxSrg-5v4wsQtuf0xIH9!wq#Y#bGgaD+aJ`n zzUTUweZ&mqWM8^2^@dV=v`VVG3}BfPCc8rCm`xDQ4}77)5@u%+7#3&Q#8n~)vU@`< zeEtoCO*QAGSPJj7XIXs+y|kjeH^H)D5j$JLUZ&!rTNU9P^@lwkzs~A8Y?v_hMp*c@ zfbyHrr&{p@OLoq3{s-pIT4~nbClnOFsaPI^t!8J$u64i$ajm%=7d8zT!R3O;+!it* zb?QLm6BRaM#EWP-m0oc|E;s#zrQ-IFG@-*SqaID$r#&BS+al@yz$16}t!i(^&D~Ed z)e9Q01Xw=QUwyPSh~r5pM|>1K0qi*FZ$`cZ)o9uph)}l~6y99*K4;SSI-|oXsSy(m zCu9AWA0HN*x}5;>c@?=!63M&6B>?7*TcY_ipx6~`@Ixnk5d*dqC&|9!`5-d1CXD*B z)~MdW*Du`Zo@!TrK)f{Vt?@XpUdcZYSnBUNiQ|UC<48Ibb%|pdEYLP_9_vWUxvM7T0c1Oa5}qBwFwg$7omoF?;UbHjWHIVCFB zZ(VR(#(T&@T_ay_1NV;6q}3}51d6q5=)DASfHnEC^)0&-Q7aE7j}_f%e>|ofV|8zT zjOBPebHVOc<96#A^9{WbTc2bqiD!ovp5K{$$2dfOOXSInb*Z3=cr^qQ^KFVxNL#`q zw$rSGTP;`VKR#l`&_?u zj_7!k6t{k`Jkce`f;YRRgyfIU>lhOvd>?2*A;b~{p?8Z>T4)gM8k;|8c-fB)Y+e!eQl8;s(Z2xXoR*SA_I<`iUgOJRA* z!e4+gm@KGxh+$yX{PUs-ui+p2waLHX``lP)M~<3Wk-I-bEvMz7^xjmI9%fi#+gpbNSX8wP}=*{=pRO-JB0(2mJ!4iRdPfM8nze@-- z7r;N#^(jSIPIgREVz#TJNxUSfF$8P`!wPR|{~R$56Xv4q$ho%!~D8|AegI_a`} z@TcN*{Qn~RY3*_$%y2OV;O>5#_P>vFLIvGlnJ~&3lWQ;~PCGHZoasrqZ%)@(lRzaiSFklR4|cXKSoi z+*<%fN*pBvRV;c|L8-)PnDhl!o%b>+Mv)dru|6_g+Sk8kF+d85zFCx%|L^&`j#qk0 z|I741{UcjrNO9c`(XJO$&YM0K8-cL>+3l*U5$>hJ z?2=|Jhlkf=kYcvj!9dF z({eK1@6Bw~?nHt0oPz2yLf_|})b4?1WGDn$X}3nJ)#%D=CoF(?gB%&kV*Kx@abHTOI#V2DO}zCZCb1SP{5Z+2qt5=?S?wU)EnZ>+^WD=Kt6P^rd&a!8|yrTy?;n zLVhUw3s}WG?J8D4e{APf^irg{3G_+0zxJT^!0Xl7_b1PBnkmR#_#qWy;(;^L_qOQ8 zr$OveJm#+o&QvCb0q{5`7C4r$&Zktqs%-NyGHL?yzCOKlPD}5hlNpC$cgMXVjeqzW zDdu%G-Z$o?x^|rh=WQ75^o_&!Z-|}otIXTYyH5LWE?UMor{=}ix?8kXnrP(_+1+Ff zv5`VIrM|arh<#)Z*WXa<%-`c;;NM?o#TtNl7!|?|;GG2&KO)L!PP3br(c8PVKgm zK~43}Ib}7khxhKHgdd}53mm3VB zcj};c+;?M=hq0ylbfTG2+ET#+Rq&b54p)ME=u6ar1$gm>CoGOpQ#68zGs|QUk)xsa z!INq>GwKbzpGEk+ty^EeaE^+?wP{+Zag{d);X}|HA=k%wZ45$hQPY$W7!amvj&&%Q zyO5d%=dS^LEKMB!X8O66W+UU+e`^y0;6>Usqq#~kx%w?QknZM{K)LN9{u1~>bwNr2 zx9hh=%51xxxy$E7y!M8NO+2U&MZu51XRB2&-Dhn}0^QD}wi9P30FsB3j!sMo*6`|| z_^?p)RJ0~o>vGPWFVjcyy=ZCOc!6%vgV(K@6q$5A8bplRVkUbbD@WhkLz!|I{u>=q zc0$$H(EA;l<%|OXX8N8$iw>OQvz3&Ua5}i$RaV8JVEs%RJ?%?goDkP^nYA7sp7y1)lW_DmePS-Ul%$J&x?~mKH+_y7K%@-So?QI`Vx~*3Ir*(LY z_m=&J+5Ksm8HdhyCnk=`ZXRx7xs{He6bcO#VT!_DUAf(qdV|R>Im1HQCU-+CTK6}) z(t}By@RF8?G(8_Lt_7dRu{zcqMW5hE{$lw<7lOn$f{t?+-{c!og}h&I}koSU+!Vz)2!Q8>ioykKbROyY6WD`dts2b z6Q?Udc&GNq010U`?Av|5gS8b9Ql#6ki2YT)SjXt~$)ZP;N`rF{k#KyheD-1!J6*Az z>ufi=xD);gRT$81T0wby9Um;=4dHvZ|YR znsTk*$wmo}pPt(pQ-lqosS@gYGRP-<>~xKSq_Y~m`iGd)806f7mS;vC#y)*?9v7tw zldVmGL_IGjP^$axh?~)H(gmf{ek~LEtZhq^IH!dw_bz}V6LcN6Ji5=?bz9`iEo%cD z-MinYf)34p`uh9x)C$y9Ew^;zqUe*2$zPbTO8}9=O*#obaOam(^@j^T-h{p3t9iks zeNi<+>VdQ1vS6*6>HfgET41XI%^aKC3=}xJ8p}IE*7&DS%stwY=K3>x$h+UQ1TrRW zb{L~#*3dUMlNgU7csqxLB2WC$@rruxm5@fRzR&QZrRU!tUC1%O0WC#Rkqdop$y?_X z3bd%)aeC(f^kSHt$T42}xeYlr9@>i-z*ef~J;hd-rkq3rs9({zQ-L7x>fWtlOKH!! zXeXq1?YC0$Dd1J_yA2(Mj9IN!- z&?HV_&&}@*k|*S-Vhq~c){CDTOcr3k{2|}Bod~blPb9)7%s4dn>lM?$npmP`NIRKk z>Lme0h3wg`(IG{; z<17F#7E{eLHKc7T_-o7SeHg3o{dt_7R$|>ZGP3C5tzNpI(N02`7%|r!`9yBnAfD;{ zO$~!9vYqgL^m#9xTm&<(C}$WSJe4HUq)L>z!Rj0H$VHZni!yMhc#Bp6nNHwk{B*dM z8TBB=q@`olNR0tW5xD(<@?g|B)Z4(jK9DbPIq$PG?_kTZSby5*u$>WP!{!|q05329 zfsE%FRJO8!3W*_a2%4?3D7MmT;%iX;BO7af;fF4$!j&rxLH+N7XfL?;3u~ShSw_Jx z_9oS?_hXPJYSfGBCs{JKe>@5(OwF@{&)#``!%h7+xercCqzjW_q_=zLwU&Aw<-pSa z;%{$J)V^KP$!ycqqU=4(O#>4JC5Uu!v&S4SrwK1fYQ&{L{h;);fQ5c4PtBIeACBG+ z@?fEI14?@<;lSKy&m$Ewk(hzp;)uJP)#f?>!5{8e+`?S3>D3crT8Sw*SMStpsa_5g zVP`t|I&RvlCL=pWQB4x@`rgSWdsipo+rQ)#I8QM+EuGU&h=KyWt~calxsF*+Q>KD3 zw(#9oL)wOO8MjMI+rRewTh!#ML3_MSq8@u1$`Y(sEIstCc=7Qj^sCV9kMRE zop`qJrvmDDvG8K2)s$_qW$Dw09KpQ868s6~~n zVI$(Qf+Zu!_931LxN1}fw-x_HWubZppyH)9*j-`okz7HYb`xLNQT@fglMh6gvN=nO zY-{rT^Lk6OwszD!plsIXqqW(>eEM1aVvwu!-!_*b&XIVAA+=BkT$0F(amUkq`x$r!n)|N*Ll`dg6=-gH>Z7Y3ChZ#a#d7oa)(f)9^cuyLGgfo^$OUYy&e?5lp=o4~(!DnqdpyG9ipb8- zeKhJ3L=Mdjp61bD7OF&4e=2vZ&nc`Iz>KkTfo0hyWbLHZV!T}MCZFTO3W)co(`lJ$|B|l4C0>B?1 zcx^A72tuDKU3ZHnsCJi#fQ zR;^!{QNg%9x4s%yMl{$V*>@7Iz3B#ixgJs0-3ElaHPbsL3W`F>9Eaxh_aX@AOrM`C_)`)5PI1M^ zPz6=QFaJkdy@_VhuFkqXTL=owkJxOC8xk8SikcU%i6AxqWU+%$v199Tpl#r5s=E}X zm%uvVdLNDCqI6WjTnybhz}kWaS()t~Jl00PLfqvr<$m<1;___@^=Bm?NSbj(4D7g( zN#5x#xHqr}oZa=OZNe~8J-d!{T_^3fhEfGBS@jKRqO|SwJJlN9Z)fe#^27|Af{lfM ziJV!cZQ7A1>D%3nwGM>$ZB<_N69q16$+AcOP>y5vE!pro%?DLge~MT>_@fV^F)mw? zl?;5HvM;65m~EF*k7r92+fpqCWAE+kHjMk*zK@QwayXi88Zlr% zY8G!$7o6uluU#0l7sHcn`V_x2%qk{)47j-*)^RwjT_bnA-bmc4!aZwlq;9Up zXdLZxoa^EVJ)3_YlHYQarB`|^%sM(Iw{OofFsG9iY@OC@?TUew8%V^!<(vQaRyR2U zjcVcuVQwjE8EN&OBg?{292yo!%mpsxncQ~OAql1Nq$Y9mf^Gi}YIBXDdDV|;pDl9g z`5=nf__k`9tHTK6pvGM5e{~B448H30PH?|rq{R*40z_={5B7YxI1S2O5E5J!=}Mre zuKwR7ga1qO{rT(U=Py?kMJAG^Ar!bA2r4-KUH|^@Qf8{l;0E4M=7z*G<-7))13n9v zWu*7_Uw^AW<=k|YFOD+cz_czS1hEsqb(2en3= zyNh1W1nPT=gX@gHFt#XHJtcG$gz8A;6MvyGt&B|~=8n!&zH`{El8!gs zhPYX`+#^sdX3wo|E~YTM_JG5+6(ua!dJd2y+!Ethe5%@HZ1<)f6(WdXiNtGX!?4V3 zd3HTaJN!`Zd+QgvH^a*A1{PfnmS=sRR!=pLe56T-O_&5)NZh{|CGnkjTaZ+T+Pk$o z@i3T2kaJPhL4$rU@9O=L-XjkS+9P*C%O9LaGVx4W6Mw@)qiY6-yqh=p0gCK3x&ClD z#XZlVc^6Z4X!&$OF4UXKpy9*aK<{PJm^Gg2{XDtztN8Pr(O==x3FFT*zn6p`u*-qqhR&J%`tOuHu<(du`1Ao2< zj>>7d*=;cI#TDP;jEvq|Td!l(t`0>k`OI~P&gnW$T%Xsp96%V+=N2cR{PW1 zxoQP|X8mzY9b%)g8gJOG*6k1LQg;$ft5f4ZQW?QpPO4LS966o_k%`!$NlpS$LfYj6 z_P6J|i?LmAT4;`d`-}yQ15n-Zn4?Jf<78z|Tjty^cE2`~sg7uZS9xJBDz@&&hZY|b}PuCFT&n@zBFePn21NxC7j z;2WEXe9{{|_ro37?_t3SE2Dm{16slR)43S*nZ?6@fi8EYCY+`dKohKC-pd&k?z(g} zbU0m3I`}rGS3Y(fSh(JTu)6CfhD@J&1PgUrG(CYkTKk9ez&fcT;91uNt;?0nPKa zG7lZXz2Jzq;H1&zpulWS7}_VSFH*$3S@N*Dv8S;$vUtHxmu#0eo-eBRLcvj&Ynv>; z`u>fx?z=+W{>OnnS~1L)wlLt+2Ua3+0+$Pv0z1> z6joWgr(T%22!-ZLz)YnPU4o6WylvTwC5qXp}@%zfS57GGO}3(mAYS8jO|V&e*5U)wi=?5D-|`+J+qs|N?<%Bcqyq> zXD=U6?RBds^5WGw$FI-H{o1yJnTHD~n{JQ6eETaL=A)^OyLnRDvmJ&bq%H!IDPcW2 zd`=efEX_PAZ}v^lRuT#or+2^q~i#%iRA*DSgtPn5u5SBYoDc4_aK z9kzsIinO=3B+<^^vl~k%;-GWa`k=)kO17;(Sn)m{agVXHw3y$yU#44b-}vYl-U+zR zv-2wDc2dgcdCtBBGSq)t<;21**RY`6GgfQ+v8QfI8@<@bl)jJr?O_9xl0yuwe6C84 zf@+Jm*DCe2ZvFJuxKc&7A=83j==3TtMt_8Wc#F+dQQJ$4!`c|Td9Pan>$62XF{PL$ z;*kwg9g!(r&p%(^REW*P2|^E>E;iQ)JUK2&7;XpXU^KuW{UQ3cyrzYP4%!!_L(idj{Xv%hy>9%mVD(48Y-pQAh=fAQMai%3%6T@duyrT&2nd zXM~5|pzmtcDh24|b+b-g%3=b0*rxXlt5>Zn(!tahTZ^&%Z@b>3@4ALJ%fIYwRqD4l z1qV=b(^zEWDipa}+SWC@X!Oc~xiKp=qEUI*1SEwEHNA3^uPsJjiAB?5w3-0c)@H2A zR_Ok45Q|WD8qf39GZKFh>UK5-5j>VFO5BiU|IgHzfiXkp3)zM(nhV3n9-{l=sF0h( zpmD8ideDLQHd^IoH_UukaCd}zL*=zzN{v~3HfczVj>EO%@ekg}V$qV8ub!-@rOQ45{7~anf;$|w zXKPLi=CpU@$a`po?b4eDS@3W?`*R6ss&RrLJnSakuQc=}Md^faz_g$fve$;>03c?5 z@-M>Yi6B9k>WLnC9!IktN5LNAo#&iP>Gh9!tN3LJb#`o*W2z>*CQrBv7LOt zJIsmUuj-%h12&E~uyMY|>q@S5M{)DbSDLGsElLRJkp#f2_NGQokD4hJo((NZoXVJ4 z6d3Bu5oYxK_5cqb|0=L0`Hc9h>v9~%NJ9E;nNnZjNmKEsqEY|}W?8+AjXQs@5Lsj- z%4SKbfy}id38V=dhUXq4e@TC&lMRGv0zPXzrXxTzRm$Gil5T9dW8(5nlg2vrYBqLA z%Cqon8`cak7(fP|Op@zq)9FFFi;rCHR?3^dYix8C)Aa~bME#CP#g7JWJ6sRu$N0wB zjUFJx+%|A-lfjI`$G1PvIJEd~k#FXjJ%Ijrtt{}C$1CkS^?PhGUBZud4i>YOdF@K` z8c|`;N~?(}RlZFSuUr8VTeh4B9~`MQtn!Rh^BT@5W@F<^$ew)_b|AkjMKYpFD@HA;}=RG*>FY$GbDt zOi9ctB!S6LKPbof2u`4Mmhuq)gQNTUW`lsoyB%uKNh5@eis2RDxOe}TpWuc9WVcNi zpGaT$B6rPlduF+Og(Hh`4X0NRQtw6xcxHV;$>{F@#%_Ms(scTZP&htMS-w#a!juo6 z|36T{5SkR@>!Q!iz64%(9|qk_-^jy$$t))^rC@w{hN}_O>}`Vr;O228voxUqZq*Uf ze1f>$ISfiG{P>uu#hsHl9RJS1)N}M5%(d&!rUL&cKTG7`1`{qOAhlal;%5Y(;ZEeK z&`lXMxnJZObw`eu8i?m>R%91IEBjemQiwQugsB$$3>O06Wh@F=3^v2A+9CF;XOrW( zD?+d0Hm;eujg7a3Xu-p@GkC8H|D)^7beaM9K z^kPB?b|_!9rFn|;q-#rPjx0cwXgdOL?BRY;H3g^!%oeHiKH5&63}u-PD+8b=4wlcB-wC`sT{zHae{h8xgaR19?1-UJJ_R-|l0sH1 zd}KsoJS7-G3D+$*a)?YU2?>s|2Ry-_kQr%bgM=$Qkx2MeiNUG~z^|0QIPSmE&y&2^ zRjsm!^;M$;%_t2tmH^*zp*MzZELKBL#&WVyX|GYb2q38xTTXDD$p8A>QWD)pMfa$5q=}PiktJe4 zDrVmrcn|>_{r*Iv9rP5b|M^t~3bE&R@fDg#ss8vYl^-2@fa`a_Ym)XEykl)J?t3AD z2i`SR4(4{b+tHya=e~C@GJ3pqQ_vwgFvU}?O0#G?qM3T9J+rm02#Xhq7P^ijkf(xT zuA0vSq|3Wc>%M;dDPf(wq6Ka-cl3oHrcGWKWJAn9ntQo*t3QTNE10b=%Lgh0e6ErR zPfWoW|F`cuthy4mUf*_vm~H;#n7R6B4?Oc}z3xAuriwsP{{LkJ_n)8^e+7Ziarg6@C!r(2@8(~}l>Wz4 z9IxxqTUopODN>iU3V{A5$uJwsmgn%kb9LVC)edI@?^FJAd6hcZrbiU?pK;ZLxe4R}ezD~km-F`uts|9s+c1KI_8GlA`Ly4&Sw z13V}tNFFAXdUL)bE*Ho6p&k#2bs3i>FK(d!j^BlIq8AD*6X6nLPM2vldv-q9><2H}mZRC^fETg{bzH&t4z1=Eq-W zBz3z3;{aX(1c%hb4$@Ptd5>G@x{>PM3j6%JQMnu8&8#jx({aZta#e5-JRn8CY_cu-& zc+tr*!?;enni&$4`HX`XJTHHVQ{E3~0ugv%k*=Hcbjplb7!^}sGe12wKvx!v{>%gt z&M)ShNKAmXJRu0>vYHmTJ*d%Fc+Iz)o4FlGY?HngC7hP8QDXBcQc>7Q$#j3Z?0niN zY6Gzu{*FD@NGjTQ_&RPv7d%_(hUZj=z)eCteA?)Z*yr{yZdyj)xKYOk27TJLs>^K2 zAmz~Y_}cd!R5gn-CSU-5vcapA!vp?fb1mxu{FiO-p##aNhDxq`*z3}OQCC)2vyr%n z8aPSN2Wc=xP~S~&4dxab$Ea;E-koRKH+@Qos@B~Vv$xAd|`J|`=R%jZBd0R!+Ep!LOngqt33T? zGGA?q?5StjKuh0S2qE{eo|k@1<4d&hTDQ@v+idXF<(_H4iN&lB;;YQvPW);bb?rs! z5AQ-Dscj8e_`{7bXScgcs5=JtLGdz7y*CJOd&}-|(}46fvyhvt{*=DzPZPLNU&=k* z-QT{GDe}|xOZNb#E;tO%$uWQmWTX_OYvj8@rtTmB5-?b0QGpg**CHf`D4B&HuHGM1 z&)HP_*)eAZk+}!+=mXsUomeb`Mzyt(8s!_Y=u(fh>_t|4knlktq+jnQfIi_iDfrxnr#hcA! z+uxraFIyhv33VN#m0SSKS#mom8J8|Y`m48ak+$5@eeGxKFHkLjBqkaSX#wzd zdXV64pQHP3-G|~$u7`)#%DthvNaK=m3J2Be8yee&-QoFM037X-)00T6MTbo3x{zX$ z3#IVbT>z9)tgF!VdW_@FpLimtmaA2aS)a%I*x5t|8hJ3$%{D4MsRK)@yQ8VQ(|AmA z!naI-6Ha-(`*~`)k@Fk?5L6_t66e0LUaxboa<(W4`$5;OE7G0#o_5$1Wln)jfJC<_ zfgRSS6}ukpr!xGjA}%B`BpXv?s#f`(z5JhjD|IciNm-46NNsc;tUjVDC&*BDuVKyt?{1^N*x2oQu}L z0xXeYIKx$T_w))10(dJX0oBseE7#2!khX$gx@}3uUvn7JHaBI@ftgSQTM#;h2q1Hy z0m$VY^>RmYPZ2bWEij&#u_2kedQR@q`&2<&ABffJRn`O+fA|1Z=S1dqW|R<|fJy7E zgV{nor+gw?!0vdS%D1g1#H{7`AGvs)`#J|+pRxvc1@yd=yFc$F@nBwlU_4*%fbV>o zsXecswVUB09ydce1QHzUNf_SHaT34{|K$) zRhAltNWmCE>AG$kv0aua9>I9(6*(hxFi$aZ&ts$ohhf1w!5;W%FCA>(T`?O1#{I4? ze;-UVzOWES=uf2${FWFhP(F%l{(h&-Kdgh0tT#RpxoF#hYPxr11Oab3QZJe{*wD~dEmkR5oCS`r?OLp$4AaQz zKq&9W4Dic2QY^qH0Z>?^DSwQkL+(-Hv%b}_Zt&o~72=6OY5f!Q>9dOZMaKt|DB?0{ zvMI*HS{d}-DT(UL7eE`eIx^Gl6d*nx667cHt&ehFsJ=pK0rF)Zc0>iV15q&rH8=lY zH$>II=C}>5f4JGp)8e%4ZruZ>{8>xgm*^8O;4Fk=lv^%r`AbfN6!j~=e5Rd&7B-^W zSxeP@YvByyN6)`s+PvtE;th5weYmwHet78LZL$6%TPM7!ny+eZIbp#$IX;s!VhHc+ z^PvcYO0eI_5?D9%;AjBDCnwLpTz$rtE`pvPOwL~_FcP3kX2*;8$tI3ja}b6$y5fag zyWxjU`vo8T48tp-0-T#Nq?e+!)1X-;iKUDYFJE1#c@uy~N*e4zSEGx_#&z2gUfzxhGHqGy$2*41)UHKpfx+=?>x z1BFa_q23ATEhNuk7M**K`orhCBC~+HmhNbr9YLEv(Hzpc7U?rD@xootsGpV&^dCvM$BlSLMV6M3D4Y4%}4Upn@#YWm# z2*womyqSq)yRJj#*FlCjblcdRs1SM>>bMO5(;2&jl+c2BX$V*&t;nydGydw{Y@tm; zW}wr9oZ20!w-JgBp9(Xvm6Qre0X|_=5q2ba(!Nl~);~?QE&?Z3ej#h!uUE-a&qo^7 z=G076Jx=IrjApAG-h*Z?cD8P1@In_K%-%F{Uh2+zL<#0??JU^merBAlcVbYRYbLVR zugYf<(*zTFZY_}SLyNkw_{V46at59qBg2_GBX z_BK-r%qJ`+d2XGe8n|^-ji%~_PlsDd?lTT=IG4yOo%Yn%umq}zxsvpYOo~3iv#2`5q?CvP z;208Z}vk zh8mP%XYSM0g!ul@@nWxTH=w8KRjJla9>-+Ud{Xi_7rVqBlyzb$Ty%B%U6}i>9_6vo zs7SYE@lUIe{>(dUqYP=1=9TkjSnooNq5vC){ZQfz>BAUErkKn`(>W$!Z@Y0U1f=jb zvS+|=`k66I7iGS zdX|y$SHqtuU;vj+rIk-IX;g1{uyMTENjYjVyxxdkGP^#tjo?PiSENN-d9HR$MJ!zc z9R|LWj`tX|4-dB(<|}P~x^C%G$0HGo4MM%ul{O3EZ-noj^&^BI8`IJ}Z0b++06iND z*fB@I9$`%t-y>IdpC3oy{Gk3!5+ssd+u;9PSREY{3SwZ=(#qOu zwN;LYg6AbJ2W7{G@FgaD;=;fo;A2_QaFjY$&hG$QxHMtA9gH>V7F%xhGhIK6*XKe~ zR6K1^wy(3_5dQm^a1+rw#_khW&ym2HiEHFXRV|ODSfX75&!k^&(RVs%g1q_o>v`zM zH_bs%!KHDd9!&@@;tQrYE%I-X^~Dqe`KDke7t|4?|_YM_WNF4W}e| z>Z}^NPD|q?V&aBrY>Z?LL1;xa%nX@R-L@}$rRxHGsp^O_PbYJcER>8R3KX`i)QO<1 z;L%rX_0OVy--dI2{DmIoFM@OXw?p65>(JC{$@3k0Xh#Wd!81q`ALqeLqj_S9uWvb> zblK0@iD#je-T#_ABLyf64yZ@4mdXm4{U4j9srHMk|_Cj0L1Pi}dv7zCf+3C`El7D3ro`amO;RaI-l&!kgnLl5IGva_Um_L8^ zhvB?s(yVfVmFW)uW6SkkavtUWPA2mwJy*7Z`4B->!w@# zq$J8!>sD0lC3tXaQ&YmD4gDV&p+%OtCT~KohL!&Faj6M1g9G@xd?EYwW$KcF#%7u6 zJ>YmQ>y9=~iVc*MKY8B=5={gvD!*3EU+2CBm3@S~-tDOyM%BFB*4EKYmsWsUlE4ro zU@&e=R^f9HFCTRSXo$QTqC=V;1APH*n@Qz$z-()OcN8%!#DI3Ot2UZ&-rz$1Wv)7N zzRb488U=xH{&siuwbtkH1Fv2gTzI^=XhlgPGAKasyTev-%y0mPhj@S7r{&S{agOQ6 zYiyLC&tF)I zZEeLS?DlUJwWQ+a<^}@)Ghq7V3GHxJZZcVTkx4r=Z(R5ml8ki}XtS1}iqK7}@oUz1 ze!;KJY*V>B|1jHx_OZtftp$EORZ_wGj*!kYSFJfeSLid+Bs^DkkS~avRH#9-fl62i z$As(bwB+y0j+NN+bH|2w7hA5s;?B;_XnGI$%psV62Sjlc6mQ++fE|h)`f*P)K)Jr+ zaYpr$<|NGb29w*J!lKOSw|$x~)dwv&+hj2kukbP>L<0oU?32~%6;MSFJ^u0hF$Nb| zrPnLaTBFblSTYN!rS6wdU#kAv=2_>xiI&Z*cVQAs)O$(AM50P%hO1?YS7+warRb)e znd29)=Wef9k+^IGdN?CWb3Y}*f6_&LVh;=R>OTWyg+ ztNRtK!NVpvEG(!yN?+vLN25VM$Xq8zk(WYR<3MVON)DBsk%P1I%a@{b3Obtr%ZHs{;f zZ@g&iIM9I&Onl#X%xyV#vFo0Y|9SRaGhNz>odVqETWsDn^qF?D=Jy#0Br}2=Tjq=j zzD6=V#6@r>TRbk&Quy%mT~M!2>qKu65v_}hX)z5>49}T$uwb<)cq^Uj5Kij<4>gPnEap-z=rQySTW)GuNs5&3rC6~};!eZH zcX*dmVy*Oyk^(aOiJsvReTDxBS*G?*;QG%bU(K`b-2VJvmiI5i3vobYd?P5~S>NH^ zENQLbRbhX-x?XL*#(18iqp2%dbve?yj8}R&pafpgcq zYVlHHpDaCHdfy^_EAN%rN6UJY+*hh4Na&&G%+W$nYJzbsSaLEhw4;qOJmfnp;P3C< z%j5vepT1PjwkHpKfXKwqoVA{dY;ABU>mbUJikkH8_qseZ;&BQ^!uj!oGhH>WjmRF= z7<)cFAlf+(+6sG_(nT>ji@b(ODoh49xNq-E*Zw?~%_;rNGQ+oYd$x-IJ8Mv7SV;+8 zrI`}3^FEIPquXb^QyL8^8ZX_PRNzu zq@crZ6nKtwH~bIo45cwpQ9KLu=WZ|}F0RS1NBAeoy*weYi&l5U}KFa&3E>Z@7@ z`A_VpLkXnVhl;NWRc*10*<)}fz4u#M@2;^$?~oAQxYG~}jjvl?-X3_#eXELVxYW=T z=Gk#wU*|zsm&R}VaA_!Bfep=KDPSykPWgoz1C+5hlblfFZh%cZtrhV%*q5rT;73{x zQF?mtAyPilgynZ%swTNCERX@@fZOu!v&4S;Z>S_kT{>zv&bfJ3j<8pDI(VyuIqVsP zw?uLUr=yeRFM!KaGBKsr!s;5fdT}468fg2PeIAiM%FT2~on$FAdmeVI_D-tn+lyuY z>FA(dWez;~S;}m}Iu^)(a~?1Wv0d!=`EamrMd@NgcZ^^LqgJnPr(%UkzP@_3tJ+T# zB%|P=Y}|6=4Z>l6#i+~=ZgZ0@qhH;_{GqgwUo}i3cs{&rF_AB7mRm}bLa8}yF`kP= zNf|1rU4aaQ?aGVyAJW0RG|0@0ztZx;OKuyB51}J1g}<#C!R@~Az#sDMCd@izoeMcV zc%V;mvVHjX;6_PF$ycnm6%}Q22>O_JFz})LZ&Sko?UvuajcdZgU)zh9RX9i=pFV!# z5W?xx0w_{<#7Gr^=hOE!V5H9|G`)-bcOPD#_Mzc*$0HHFW&9+Z=w~L9O1*#2J1OTr z<*ADOajt{@FPKnhmPUYKy7cWJ5r*xwd;N2}eq_jk|K} zQ*HES-frkaq&`r61N5LkewRa3MEWbhmHyYQSH!H-?<|g>hLsg07An|IBz+;Lr4G4i z{QSw50_g#Np|)@WzYY)ROUIbh@=_WaJxk3vSmT}8Bh!|5*r3^SkUo97U%{y&u zY@B79H8VXu{lfUcUqL~}8}{>8n?}<;C|bby+63;?QxgyDv&%@O1C@f2TEvvlL`u zBvoLxKmv2(xMJWKVDt*uxf{@X0Rrvlim^ho7;-r+kt9T;XY?uE>Arjf8y3A~V>|EO zD6`@mospUNUe`utTFfeJtK5yo# zt=HCRKP5Z^BuPJDh?pS_ea}6+7U=~;J}KxOkD!+Bk5}u!#$g5uow1+CZWc;epKL4R z>^Mb4qwOv}G*tEED1Ae?X3wx?AKaH)V#pk%qdg8qpil}5rp7$#H%Kn0DGJaOWW52Q z>k9e2Ae;B_-I2~%<7Sefkod5n)ni>tjQAd$Z$PHpl|sq}yx&CmTX0*Sm}pU_A*xas z)yZHH19q+Jpw;i%OSY;a$^|8;aWJB=qL}wTuc)F4(nUi%%y*R9aS)6d+XSeF44`>R zosFaQLATx!?W02UXqzvUIAf>+j0f>2<>8MOsOb?KBdn%qWX4g;ov6{36f=qn_WV_u ziQJO2?#h`KW`^9Evwh$n%6=`-H39Qox$>WUmMUUdIXQr;kqz(jy!lG4lbY(q&c>#! zGBz5ctgIrO{WUA6@12B&MZT`G7(4H*QA%&9yYfeYTtA8AwW{&| zvH%%sBQm_{+lMhI|7x{T8NE{K?U87iV3}Gq>?9wnz`yD_b5l5h6ADhEckkDrAhz_T z#VmZ)ahmIxMO(+w7}a=o!c;lFd}DdT37Y55J5rwwdOPbub=TxfzO!mKiu`T)>(7y? zqbqXL)_ma_+~{@6^6&~GDjHoInd={G z@pt}9G^=Rz?*ViEnxe}F9QTnD<%`*2e#hNt8l%{M)zQS6s6V97)DOh7?`|sw zlstAH(b~pC03D)5>jvQyfYkCdw~n*hO}XhhJL_q^!kucIzJo z*GLnVwhU1Mb{Sf+%WT$$+252e(X*n_DTSt<6jU^OiN8p>?boFfI4uc!U7gpuFlpp} z@&)?BiD3A|Oaw4+LwQ;!9j!<}$%~cNv+jQd@}{5JmELP6GLoA6QEA3vVpn5Bseymy zUl^NaI%-9l8u*Ve$7IKbLvSa~pgXOPitJhE&n4y}QBDdeVB`0W#Gx4nh$w}OLYC)K zQ{6q&JjV?~p;}+W0#aw93!p zz+mN_KM>tv)%uU>Y>W;+AQmVP<-!pE@4Re5_uD9!8aPG2rX+jZ1!z{N!Jo|!_E*;MqJ>Ac$nn(p5m!^=!;c~v|L&infUN1iw zhfB8xW~EF27&XJse5x}t3sm*|JfZ$^5?F^ql$hbDyD0BzO^`*c_SOOi{3kFTY4xu! z&VRuDuOwU_)@d4D2{7wONdNZdP+WmdpG)QF!2KfK2r(4NB-{FXG?owcL~jOCyCJ1% zqi-Q_zkj&WvY!GUZ6G4WI*4Wc;+H%;({dK8Mmhptuw79bLwq3!V@mz|18j$a@FcDX0DQTp}DZ6rP!B z01Q-q1O`9KyWal08A|03brC=bMRyl!cHVmTpgRJtuCAUvN$Pc@w1n?+?vcIawA;+? zQ?u+2#q0qQK;Gu$s+XuAGu|NYJgn8-F0V?w{%%Z20OEXYMMS{yRnnx3P>^Y(SL0*b zV8K83nbZ^d&(FZ454v>7*kwdNF+=O4$O|9n?9WU5*JIU-_Nx%h%^OhCJ7e2OY{i`hQ6!v69}r9Z1$E;wnJK z;QoJH{bf{DUAR6D6VeTv1`$-;NJvOGxDf=TySt@PQc6%#x?vL{-Hmj2gLHRG_kZ%7 zbAIm_?+5whV6f(z^Pbmz#l3D(Q3Ox0c5EKPh_b>TJ}s;cW~K(-UsEd=P#_4O#*2i> zX}!g?oGeam<9}Z6jv1s}4pBriWM&J>+IjwJ{r|P9FYbo%DWE7Y-dc~?iRwgy2u;z5 zpPU3qDmHzo97!E6-y!n;7Kd7umM|=T@gc{3-iXNhJ&`XnoSiXB;N4S0hXb+u z55^e2d&?S^u~zp;wiUZ4g+C}zyc*qm@(OQSKg~G}+Sz?EW0Hs5opav4y-^#f{q^g2 zTmEO;?`P;*y)jUn>W9u3n!Q0>P>1gI)4`De!QPf0-USkC4#h~q0bv6{gIwt@$gbfm-o(>wTq5^GVeG~7_KvV z_F0_|<4_-8I*lFnTvikq zy~-OK?IP51%;0g%py5RHmf@J6OGOR{1obS6;Ctrpo?PDSx|~f_c0PiyyPFHY`uh4g zo93Qip9~~juZKFTjwOS_ANcq-bN1%kTqKl4Mz+>KKsqJ3tS-(s|Aa^8&O?s_}(>I`DG+DvG@pk=}8UJz9Sbtc@Nd7 zLw_d}9AFi9k5X|Us`i$NOvJ7EyKT)e?xh>{-qj8CYN5=NXluNj^GWm@jN;72mh-b4 zgZWYFH7d=QI<7AG+Vc)khjk(&qr8;*|ArDYSh=Xw5qy|AXvDI->0Io1;%JTB{M?1O zYut`WxZeweJ5rrW%YAyv&qd`;n^y4cZ|&Pp3TS(K)&`5`t9Cm_D50F-5w}RcRnD7% zJLj52aqbxkEJ)=&W8w#7OMjRp3d~%=KURFN$zkdpCWu=f(MCgC)LDax<>cg`nibFr zi!syy9Lj8-@0_#YWKhB~06N}&+3L2tSD$zig#GOu&l><_ij^M5uaSRGelL_BZc)oO6Swa^^`nrPv9SpGg+vz36H&D^ z8agmCphPH&C$75iJ*l|gkX7WE7~Fg$q>|O-G)?<#AdplnbEA@TCJHmi4R;GYY9n;` zdtt>&xGer#YZePU+Tm<*8NDAEQu4)4?jO-O;zD1lN?EtKW>`^UjUDlzz$6wgCSB4D z2AW0=o-Q6RR5x*7E2``G0j_R7GO5dd0`>8DVC%>-z`KWquybuCS*^U9vJqh0+uKKL z>y8a$(jTM(ek(-SozJ|C|Ksb5idCRMgWJD|5LVm1s<&gAoOWhCwp^aU5@4Gq1dX|5 ztHQ5RgPZ3{hYh+CMi*eA!0Z$)wQ2j^Hg=bPkwOgg9V_eL3i4TNmbKH?*rGpe_(}(qOt)kR~TnbLM5<=;%D0uaRj| zrqm5Qxn8I}es+6=vuYH6V82EGTW46X*rmsXN+ts?Ik+zyS-?4G)?SuP%T%_8Z4SJA zxaj2ALFSX?RO+0J+lsO!-+eQZr=7s33>`QL9d0PbcP0{*DaXvvWvB%tJ`pAV(Uw1@ z-O*UaJJgM9N5zD^c9~$4(;e|%5cA7~OFo5amRddUv7}d4#}_Yu?DogWiY2CHWYnYw zykUa{69RZvmx#X`RP)eaw`A|O#J&|NTa!E&D_rqC!z#(LwCG3oUewe1x7!CqbN4T;}kK* zLKc`)B4{+?&$?|oPVmpwWtg3?nWAr|;RLRcgz9VbKA6sF?772RiHPKzB#4I9pCQQ+ ztQfWiRBTk3b>}ba81?i-U6YEyy>V|%eF79%Vt#6DEGpUB_5IzAcN(zpVSK|PMRj+# zm{$39Kr4*|yVRin&l909=L|6@|s#fxbca^sVuMc5O*#NO^D zH~5L?sZ*x|UbF`pW~ebF-$l0PmJ#MX^dVlsQjQxqn3#xbnnZ*3Cg#@Uy;T24d3XGW1PdGpFsc0a_&W=~d zLPv}%%<6G8U4RYat>%_0oBWa%kwwNqs(7-<^|A6oHyeC5Xb3b88G@Q>L8#*h!BD87 zDyGsrs1a{UJ{Q~l%j*oKY&+c^OIIz_RE$+s?i>{%sH2e89DKp_L(q{_Tj$^R9T7=y z6c`Q#imq1UO*hgcrHCXKn-JGf{i}<76YtPaW|73tBJ1Trd?;;YssGo`kxBBu;z(mi z;hT&i&P3M%ErLZCbj7Tx%`r4@A{{Q-k|!TbXa6gqLpOpm$lH`4`Fxsp-^WqkJbFGr6d*d4TQScP4Re|pr!cU@1^WJC% zR^V%uYB<{(K?Wm(PzMJGBiEcUG$^2!m!9r~#pu%g+x$zhS{y9d$|j}o90UEalAf{6 zRF}cH2m91p-4OuAx2-9qFzg4VXbexjdQhrh(h@xV>}2n$0GI6HSG*aCzUzW9e zHg@+EuAuz!=Iu)TqW($m*Y47j&!5HYAL!P~S>#XG3|@@0q+j(1BIa9h<{*6wz9Cd! z-QPv*NJ;275`&|M8x>?8X%X06Z@uLCEb!zb74~4+zS@2w&ch9%o`gASxdeXY*%qBT zs}1LE`8RvCHs#a$p~4p$U6(LSXkvrmAJpiTDN-cZ{5`jSpQX!Phr!V)%d;;%EC~%y zy>3i|9e!E?4x52 zrFP$`;mm)t-O!?i@$~cW`e`k2p@3p=nkv^KSU#fPC|uB=L^lKqmzs&`Tg;#rdL((o z(I@z?NhFocBbf@Klt}#|dK*r965hMK3847eyzJIJQA*X2fC6oe=U4G~cwgX&U#0DF zgr|INA*!9tYT-QBotA*Qap9UfbJS$x6@9AuW~?RtLA4%{u){ zdSqXwkUPiiJx+%@lNj8L-}qrL`pDnSlHGe>&HM z=E*>@De(?V6O;=~y+Ba(YLj2|t} z=6rv+)aZT3Zo4m?EzM%iaLKF8Xf9*S|3$@Sx-WW}n@*{oaD0t6O?f%ag%w1>Y2oJH zI!+$faa~7hU()uvxYq1Ab7w1P~~l`!ZpBYU5QpKxRw z%w=F*kXyalFEGQcrRqNB46bhZ+rm-9rtMaPhDmy|-k5|0VNR+ipbw1)QG8d$uVg~Y zH%xpYUsS5cNi6BcePH_ zBfTni<>g;cLkvRbJi!%MZRsKmgh4VjnZYW9Zt)x;yDYqbXIc8P!ZGT27#96MUE|$Q z0AdcKCeR$Q#n9F~Hk zK&E^XT<2L6FB+;f?h}ee3g>0ZHN%v5+O_1V$T`Qcpjl-|si`E!BtBctjz~?#UHr<4 ziF?4;Lk%8btD3rU!7M~bfze8S;OlJ7FILF^Q~x`9Cu2WONm6|O@dNoYb?Nb`v;EFE zvBq;rxgp89#?58hvey{scr+aAv7=o56KB>SLdoj5A;Z4Ewq|T+*ZqVFl*jb|JQ`e( z*nfUtuAneFSMGn>>q95p#N9G=7*lf8ZCEZcK&gEeFE5$_Z(SNi zq~QZ+^TJ@so*ff3PqBP(SvGLUf&SD&N~$(8iZFSfnktGaIfdvY&c^_J6xctJ2zgd3 z)p=|A-H>5Pa>k#Y84!p`T9NETR61U|$5tE7b6+%p-#VB7>WriNCx%_2+e((ymG|Cb zy8K2+&&v^EWRy8>5tBzN8rgCl>lEutBHetUXA>wv!<^V-ZAQloYZzms;Q?+woJ-TI zr1%sbM8lk+IL4vuFF%m_AjeILJ*}Q=hZVRjt0w3F{r=-_sh+0Z8#Hp4WTbdgVxQWQ zfAsNAad_F;=)lo$mi#Dh!zMpaH(9_s&4GK(3zmCra8uNlz^+L`YGw%cPA!4(R%JU4rbc*40pNm5|1|*mRabg7G@kJ)% z=LoH#_QL_I)&Zm&w=srWI(1?kFxA_>#T-;kd;N@=jZ=4 z47&5JkxWD*a)Xg7vB>hzm+-B*n4{8Ks+6d;aWj)bYDj}8l>!SJ?cmtLq|ivY(^c>X zr|-_7;Wbn6*BGKu%g^l9pD=U>JSiJa?!%u_gy5pB3y&%GQ+@+KC`aSlPR7o}z+;K% z>Bz*d&D*{nJZ|FN5qSW22BZg*#nKvGH^=8DG#-DEnMHcFHlm9&#Snc}y7}a9<2{1K zYWGB_eB%f5Pv30)16t%7=uM?;!@W0VX>}y$QlkQW)754Tv{O2gvYX>{w0>Hfk*S%_ z;e|+5$#2i)LS;tA4EyBPedk=5_ zmO3_b?pX19xt8jE@~J!5eEQ}oPyg}@)lP=nnm{qPWEWv;DaGd zckazJs}=G%ISR<;24bQS2QdmnP!Mw>KHZ;{@D>lmWmxd_kca#`TqXEG$^H_&KPSqd%^eS%=Mb}_o-_|g@^;~3)t*LbmG%Ll~YSpQ1vikeDq(wjq+$Qq&rILe7B5YOr7(h;nTsB`NavdL zlU@^+J&lmq!tH^4R?VBsIA+|JeUO1L2C_=LCo0GG$AIzn9z$G|J-i%Xmah_H$UL`@ zH2R;mW(w(1yhLHi`z8Csm+p=+k6u3d=TC%=AY#9*Ept0(=a((Hl-!+ZxEi7ay-xcx z#2=G+LoFFjTVOCR_0i-p`_rE$ zoCf$?_{29%V&8nVVh8s1-Wb27C4E08N?_M0fp4zvUeHY&^OeO7Lg47kKX%TqkXQnB zNU4Ut+oCq_r4x1YCha7UTR)Jok$df2r|j__F_IBM4C+5U6*6$e{9=N7JI=-jY>e9S z<5>@Cd7(-G7mLQ_ynandS+eDao9+89N787hEj~EYMZSG1E!tHYQR2~hl=ZE94#yoK zRCrg$)DKMOpxNtL!L0V0{z%5D|2hK0T7wiYhVZ;;CHbtpb+})TD{vx2pIC{!f0ltr z0tqD_qoksCw2(wYVH5pX*_S2NhY~Iwk;W5|YbELCE>1d;EK1NxFDCCVT3GtlY7#q) zinkTu<$3t}2$^Czrii(^n zz8r{NGY-u9`?*o-2Mv~?j@i^7xsxm6`Ztrfj~6&WYpjwQ%a-zP*1BVNEiey=*sxea zAU||)c7$y)o9SNaMwD_I1-X)0g=`=MWoZ&0I|Jt!P|yqJjZpW^&BEH?6UY)C;)bu& zD+x@$w(-bVPdj=MD;H}isBi$zN2N~`Eh-mr(q(8IV9$*`AFO)~S=TtessadabB8V)pge4s#B^HpF4448yu z%y3zwx^I{wy<`(KPm4=BlYz!Ne-QH>%kTPZX*BzB&^6VutE>Jpw%LFBT^;xWC%-!Q${-8QtnE)S%ni2-0CoM6VF#?J(o<&v`vYf-b60!gIk8POztCs$kmyt(y#)Y5ZcGZofW-o>pvQD5-Ups(`Pa(kWiLv zQ~3(ZA(AbAR~|&zq1-pC7a~_~{Ro#gTTjgwvtI5cS4NKYov+@&$Do~+>^t4ENBwdF zO~$0P@U64e?+!f}_NhQ>Vh}QUof0>{{2LQg1vHousjS~dCFM4bs;m)&Vg3igsi=nb zXVA79ev--dhJj;J?Cc_C*V6Z#qh0oAxs`lH+TKL-O?BxJ-!=0k@$qE$L2N_j&jm^p z*r+q?Ww{Cv;Z5@EEX+2tz$2X&$0nAguCFyJazHWN?MQ`o``2zTW?d+HNiH^(zm=NN zv!o#t-}SYI4#>3PGiin0BI|VFzDT1vtjC7^xr5}!p}=CSCV9(_m$sP=L;tus6l;Gl z6cg1dkv(J55sH6fYh{(hqM9ZMJPA(n{zYcOgA_v=*cc|<;9Twu_PquqN6i6{W-mXP zq5C_Tkg{?y0UFq&uw2?#>oaChne=)Ejrik5mrjv&Cj0m&hvfWKE7olRJSR-+9m4YA zA>K>r*e9=Ey;8VJhWu^xF!jddhqqQI`zv}EDvI(@HIl}VCHKF!zJZ4J|Q zXeFV*7KDTp(hcO^8;Hvt2C7KQ$vU9s^RrGqnV6+d*C~B72g)_azI7i`m53}LquLKUWRuI(& zR&1F){K^^zsk_0@0=XNJTJd10AE~vZEIh zE6Bs+PFJV#?vm7ceK~~^2N;5^j~x-;G<}^vas5MLH4m=A8|+$79LqCH*}4bEHOSV~ zJ!SX$62e8Aps%9d_cnPm7$n1ZWgyMZyHyI{_ zk(4tR7}1g7_fwDtEua+|)th!PF|+R`Xv??1t^*&9@J>h^SBFA_5GgVxznJA=Km@w>U#X%g?L!0=BC5QZpym&oUwx6&tv4iUs`T6jj?{1FEZUDUW;>@Fb?h6NB8l++n&ceX5`A@Lx45#2d8 zUec|8fPk`aMP9i`n^(Qm5Jpn?PC`OV zU7ZXhgoW*Z|NcOr?u+x@UxieXa?wOzI?Ny^8xBq%y4*N1Yip+4<1QL}Nc+1Y^Qj_g zkOCp5qK>ImGoaKC$?#1Lx19@qfde-6EO7MU2WM!9g;9{7IE79CV}o2K-E#TIP$?G< zUX+%Rvw`>N{(1_$6K{=5hs?M6hib#M2MdS5aBfd3poB{$v47mER3B zsK8}=yZsxd{xLkqeznu?-wO&cthmd|FP9+4y9AOj@8BkVv35(%h!gvyn1I$Atz;Mg zu4cysL3gg(e^|$aem{s_IV?%`pyX`mPfC9-@bejy=u1@fdtIqNl9MZ+uB@SPR`Cry zL8VKB5hY&g!m~*-te`z>YsEhf&|UJd2u;Y3!f3ZaaPOy9%JUJ5BL>TB)9Dzonf##> z-%MBj8Y?fC#~A?;0r%+3J#@xoKVbi1)aK`aKi?r4FA;)Dh1Pl*Wh$WR{zvlkZ3B0)*FWV!tlM}Ytpyk8?;3)`(p9S z=_8T9D`N$*s@xnSZ`Q^Xs5G%d7-OzV3ax%zeBwtvrXgx>X?e=$WNR_a-{L);9wx7* z{#q5syNoynrSPpjI;zSqc=xljk>1$fpdKS2%n#q*vMb5GqNgV$jnB$@!9qDxV$h}! zd|1QPwt74sm#otm0BQ#;G`Ks~Bh8ZYIYE2oPM6!z4!!`8^}}i)p3SIPv@5)c^o?#a z7;&cF4`8!*S@`Q5PoWOdzT90R4TYn;I(`IY-@6r2;NJYKY1dcafF zT1#sOf!#4&I(B1 z)dzA};+>Cj8IM$=?&o?{ixhCrb=8qwT-sqeW(@n2_$xxJLusA;E3vV$xIZv{Wa+$r z^}eL~6&vwDl|axJOy|4VsN)BLTw(0AqXvxu(8`o+vhb;oJ=S%E8kc>FXZb#Pt%n2 zTi!H@BYxf{39vOtmR{8 zyHh``$Vn=6Uu#z6prVWQ{qpuksBynwkx3QMKo~oo;1h-L zuEuaQIj!Mc*`_0Vyz~FTd?U`OezUi`3FD!$I1T{TF=`Ydhcku}B^ zX)g-W@TQ~MtK39u>6;SY^x`0fig6d-rz9W*jFE%~=pr{o)^Do31ZXQS)(^MS;VnQ; z1XKV17cGn$f=_-(nsRuxkcpuid`#hkA*ty#CMO)bE;f34!_gjNCz9nhvs~tXOVTCW z1>Mj6!9qqEWxfxmXK2*G_wO1`LESB3Xcsw&x@lDm0D_V3F65VD-VGfc(3Hr;1v-OwA^!{yXJJTqO{V|Qf zc$yNzDn*35dTlQ+QqbPsUSz{9;}gb-Oj;5Ea2Y1t2FEj73LGU4JNHyV5yX3L$A$1X z!@{Yf*boWoT}+XK8n@=KV?U*2I}U-qVBZ1wuK-qpOnA$H9{Gim!FBr;NV=78=~lU4 zd@2H-ubEoMR~5EHH4A0yzysKRjfRCpQKS(L!>8U{meA63D+(p8=&fk-J{bTB88IY%1(ZhOyB2bV-ZnWT05Sjn_uu^!&^nz1|g%aD^yc(q333MZft|?n_Qi9J*E- z#406V9Q<<36x0cg%o7;v$=w$+s!Pz6>jRXOeIZDfN=4C|U~mnf!HSBC@F^*iOrrT+ z_PmQ54VhqF;WT()UU1Tq3@a)YWmfgOAB1)f_ns@CIyLdqrDsAZxu5JB(nCY6{1MB* zgTW=;qd;8D+#dxd)Yw?evgq|McvLHh0hqMcjl=|=Fel+SaxqPi&E)9%88{u%vKsA+ z#W$L!ch-JPtNteZ{vKEN%2}&DctGhR>u!yjf^Mm;EM;{aUNq#k@inqDt;M7p0lF&zODwqQDpo_ z=$pW@^(C%d&wlc{AptV1N2-U_u~qTXeLzS=z8%;IOH2eWP5vPOO}Cj^xaxuG)S5&2 zD8qU#16R2~m4rk_2R10D!9#eBeNaGKZ!_<8nX87+t6Jetqg|7BWa>TXNM7l2M-OqUcM8>n$Jq zZLX;wHR!DAdA1Z}>NWZlF?)FfzvlBY<{3`kiTcf)4O~mwmVybFG=+id#A6Yl7_O{< z7@se3|2ukR{6;vZJQz8{IoF`BAq2IYt6@tPkV?Jb>;;xFeK56XjjZ*>WSwM=aAPo~ zCeb&4e-SVX=(~9wiw;6zm=hSi|GH%$i5t8^jiEAY0?Z(?7(2G0+gshh+x#N^$y-=G zl?%0-92KSk-oehF1-96cQ8o^^CsAAZQ5pREVnVfT`cYIhjqE0k)jth3!dERL3~~9) zfBzQFon9cOvBwOSu=hR0lMmYhuUc-kB_kRvho0s1{i>xiZO)=y^H=6ld3oded`kvl zCQv3S!-xLzYn;Kis;`8_{k+4dSg#D6Y{~fd1DV{0(Sq=!^+k5H)SMqY-|1HzFQwtj zH@>jASt^PeuJ${^Dx^4j-GZ0tCR+R^fM_~)KM9|;?Txs*oIkLlt>U7*-wQB&iDkcN zWVG_^*_+N@f)B9_y+4xqpCY9Q)4Xdm zV)|7nU1gTP0uhQE+}NQN3mF zK_OOdMTXf9YU9#uuI=?_CC6xh$ag!9gu@5Se%+=goy?Q^obR$e&lVdDmOSe|j8)s{ z{?_r!o6cY4gg(>+$r4HAnDC|EeWm>_ZLy-s(CjpSws{&UlI+YgKs{%C@R0ASZd~)3 zS1bfJ8}{B`iJ775n9U{8u4`MStcIJwO67QeP5hCjRwa)M%5_;4$O9qGkw2wxpSh=6+p7bceqE@7H}k1eRG?1I#H`eX+BdY z1Uc=;ThEX7VLy;5nGh&qIa;h&(l;~f3(d_?bHwTUb!GKR^mnR^w+fRv+o^0mP0yyD z+1(T5uLq(+Cpz;-{F;2UCH5}%zPoX@ZNQSr^V84UqAS-#->UDl4`7XTIF;`&jPgLS zy&F7V5*W{7nC!KqP38oo_067|>?5^Zh)Nxd`BtPnbz9>Ju-052EwkD#2#jKO5?Bty zeYVQ@cz7O-0QwL2cf;ATlyqPQ;$qvs^*-mx4jCn&*w&>gOmXbXiir|r5(q&t8%fwk z7g}g#PYogX-MI3QtJj#QGL#tZLsSFozNL%z=uKYS*vSsOugSe)`G6W1PH4iyx90FM z8Y!rEqx_XElz=V<9Uc9_v!&N>=&8hC5~mZ7&0>Tj5PD6r>f}gwE;ru=pe@89cf8Y!sSmkCfaB zSt2r})`sQYV0fgvYo{pcIZP^j-H*p8M<(4rU2cY)T@|O<+Sc}CmNK@vS%k;ywnks2LdfCs z=ih8WfXK^OL;F2`CLfjDRDJi9YUt75^gT4#mrg>Q3IJw+m$g!g%)T%e3ztxP#k1iG zwRhP|2Tu=OE=rR!Anww?@zz|=4`OcGa;24D)5?)-Z#+?ZFlMyXXqfxD(Ox=7sWO?- z%?j(+@A7D%EgDa3U9^y z(XxvboH@|5Uf-0{6${Pi*D|R1!~4~i)a+L#9^{VmD~AkOZ~I}wf9QKS!egWBl~^3t z6E^d2P8>Vv+zZ(;b%czs1iV()4I+cC(wN`t>#NbLqN46gOtGi8(kPWz;TY_v*1nUW zgp^Jy+;UZj5{S`<#I$#^wZ^~etNJ+hJ?;hAGP;qzbRWBb2NaW0io^#`T*I%{SVtES zs7>n~Zh*)N4*;=bFbGw&+;l*a@$xX@dBTN-*H}Dtp+Q@~8h%~Gqh~OMKu!&EJOkrJofib*iV~&CsPvV7uVE~-P{mdlx<{ADVmFU z{F(Q=!Al_|j55R|WloiHbedx%8MrF!c=WEf)t&j(wmCOxwS<`6c|D-uqNWX-TPz$& zvDP@+s$X7Mk_oAm3!&Df9^Kj5+4oXtDeY|xrh)|J|4Cn4d7uAn(H;~o#r*7BFrt^` zU00x6;_C%QApECKkJ$f_OC76oADzrIex_PcdCZ5TJxMo5# zOYlV1!!fdAG@`vf^@LM!k84CAKRm$kP?N{6M1{lb{! z7-aLrWMr_Z;3P2Lye#;41;939e)%F)XV1-!RoWv3$YxVSYD#l;92J}#gxyyU80#OR zHzvAcWfTyQZ^f}`t9H@D)Mei_*WX9At` zclG}G9j%qZc>dVtsxIukivH}sqtk2d%MHhkf$T8D7C5?@wZea|vG&6_v(f*&#)Ek} zPbX7gA`#o#9YSlVC5r38zG8ic4tb1^&P*KkSs?3_I>qe9wbdvE7F&B`s`N;cHo(z` z)EMW2yh)y@eK!0UMT2hbkK?b^2U8R7|FT|vH&M0y4q~jG?X%gZ{{V{ zS0XDdZQtMBy@a}&SiIz^RxBpBejXSLv!({nX(FeU#7hhLD+OoRp)c3xnHo%Ud;dv^ z$00f>LmE6#!wCa|$5+|Ypg8_&2#Mq@RA&17mX?Ky8;1*XIdDs{OM?-l(mbp(sh@4C(z$bapp|y!>iA+vd8>+&9sbTSqW+f> zs`M6QPFS8#&;R9k!fwz4pS3ova;F&-IS}Z#PanN3AG8A&XGy86}W*uZmXxi~} zY>H_PX%$a6`nQo@^A?jKG@>*woHeUzGOU~NJgsBkR42y%{`5W&7qsL2-PYMOL z_7cq0jbbu2TnD&AzQN5Ir=rnbolMrxxQVk7wjax{x3Vb~jk$P6gM|Id~6X>vm1gQD+h{qGCL?Ck6f z3G&Zxnr3T(Y@j&$YI503u#F5h%U!&E{hBXNAayLPL8Url5z^Q};G6aG#EjMSJXWuN zq3)3qUd#fj)DVScQb*LZ58WJUuxY_JhRg&quPry{l}Xi{wfW>=JxBty(8GttIcF)97?IS{<_BmB@9kC|_^UIo_t{!$^ru&ec zeb7#{^?O4k1JhF3z#4V`;l||)JKz!>Jq(R|-`o~V?k7)B24AU91xx4YwS}z%rrQkU zyKL4?f3ZV+d!@ILRK8bxnso-;D)7`x5*Ye&a5{oMX8WvWs{v8-EEk2Rg z+G26BH;ueMQ`x6T%KS6zsURj62@?W#a_AykEP+;q=m>~brGu{7P>Cb|1SB&b96+#K zLKrZ{TW;rF8^kXJ>yp2S9}2g(HkQ9%PEwV@mVOC)4Q;I&Q&QZ^I9x1B!UYJ~EN%&m z5$dVQfP*laFuI^79)y?L~bHs4wkEWF8kn=UZ0frZ@VNR@|`+8zTSwFWfsAh6RmH4u-@uS62&FXYGX zXmtH+5%Tx4N8sweV0c|g{#=sUA>8*Bkw?s8xVF6i>YWh|5h_9pB@z!4nfHy;UzfpA zsj5*Kbr4$#oit*BhS@2V||*dK{Ss5{!nSA z?^!i|p0}3-MLPAU>FMe6CHkT{3Yn<(qUDv9Drd>Ef;^x=S%HMQYL(*t`_VWAzRukzkkHgv>GPoifUNx5;^C zv5&=NecUDeE-~r9MJ0|x3hGAg=+GLj`e;hweqW*iv_PJ%1?RIXTG30NrCiw*p%;_# z2LgkU<*B8fUSm7&Mf&I}#KeN>tj$0GO}}+7{oRxDB!gHECbCyQLIeGaEP;h+J)rT+ z2H&M8Bx>9GC`Xs|(=~p}#NQH+S0Q^t>Zkv7I@WwM(q5efE)zAr+J_fWJaJk>(lYY+ zwYahC@~b=7=5dd2REiQ{T8KYoAn*OlS17Jn4Nn@c;$Q}wZfF;+p9vG(dWk+cV^rj`)@7=1?01+ z-;YpuM`&n}{+DD*cP>x>zk2;BPZ|Zr(+|QBIET;NSAZ zVlWN70m=6#qHXbzlbPvOa44GwuQ5>XlgW>wZHxw1Y*<~6utT}7MDy4#okSKolRJno z|CPs-61P1x;Q0_Y((4ger%UUkX9!V%?;;T)+w7h%vsdib*P!N2(y^?T89@LoGAUel z$aHGSAF^vZbfqHS@Tr4pfgo{S3mwXS?kR!(ILc_-k#ekxUaaGBt6luJ)s#njAGz0i zl;800BS3v^y?KWV{UG(h!2`@HagmS+z|n-b#^l*`P7bOnToTs8zYAZ(75uD#ot5_B zXv3rWE#+gs*6*4$Qfl@aHRKMNg}rHuxLi@*4@xAvsn~P6b=LMRML?c{|OrOBE zMmN;jDN_Z#5GiB#{X5Dh?I*&u=g~oryxe%g9EX3k|T0h}%1 zVIdXa!b3)tipj4&JWwYI*M9K^?+{nikv$#T-SD*8qT~}plqETH^hR-tB$AcU&%Lk3 z^FD9`N1osp`(*Dy472p^V*dz>YObSQj76aiovNwyo3XNzGzhxby#xc>FfE0tb{vI` zL<#hOnQogh4g9Ffh*Zr+6oQTprW$YO4dY%jzSRR8sSsS`@ZVEWyPs40hQOCQYO}lD zVxw-N``jj>M>FHKCqr8l(wdri;%;tr9Ab3QrPY;kr%XEc8Kb3?7!XfZ)|Pi_{wvGB}5NJQ{JvA zDk`R%7tK&~S&rvFc}UjEwGY9O4@F~C#_pk@=9$p*p{9=PTfj&B24s2g8Lk&f?44S;qY#J?J|DB3eao2Ah zo=)D4$A>1qHGImHR39-;8uZ3rV-TOdbY1k;)y}~MvWK^dlCDLK+ClRAF^pOMa80Z| zFNq37YaUNxKCXlJjhU5%n#>lY_dHoOD+YJcco!nehK(7!KdF%*5DZHVQfMT_xMuf0 zHh%^j%Fk74;Hhpr$ zm63PGyn)A%LB)Skl;A_x%-3OyzEQDql;3gd|WK_C|Y+2ToU zB|<7Hoh6mZp2lJZSxy#yby=>nad+Bor;Ri6eN1`~reI94AwowFB(_XqquVJv6AeFB zX7W8&2?tBYVXs`&@j}FWB-oBFCLbTeW8Vc*(O8y+eTkCj?8|JPh^5nf9wLxai}uN* zqgN(alv?0K8d*hGnK|KZm@DcH|r-@ty^*A^F|IZCKyr+(;;b}(# z^YXIwEq)&YjQo-j6V0y-K($r@Y3e`QN7DlzoHDXLa(UA=auV>rvjSsdHuESR|H^1> z+6vqW@AXHevro?`I58c_=|;srSf)6tX!hZg9_DRAIp~PbYDr)P6F;lY@Mnd;!8&za z5ddvftTdAm!P)Ga;iz_q%cT0pUy3;h99Zij9Sr9ivbalN+$~3e6^q~hDl*&K>FIGZ z?U&}J6d}Nk!wW7rCKn}sym!e@dedzbaMZ&1V3t!M^{!XvGXb@Hg9|6_ ztUzjMU@?0DbD+DsyQH#O>y5VRtW9IDlB@o@~?ba4k~b$}IoRL2~358@z{dOg_tZ)L=hW~h#rKk(iaJC7WvRy;Nw5GtTNz)C*%|tcH_AH! ze&yw&pHipMgZyI^_F%E2`{^5Qyrz!~Q(`p=byDJ6IAczJVyAikBY7@kGg>9{Xqkuj z&=q9aQ2S2`TB8!~FHKs%a$pWR3RQoO05pyvYeO9P_X!o3E3v6LP;PGSNABDM?Sx?nq15&u5nWPIEqWDo;b?b7?AKSd43I4uQ_IyUfQ z^VlsRRO9VT72|;qSrayNS%e7`!VJF?g`MO^%IT!w0wmH-N3Iyj1Gq zwbFJ7WFD($?HRPpC7ABs!8rJFDL)hwY7Wgj!jj5Do|Iw@i)L&Q0;}tR&kduKrKN`) ziaPacLLTJEkWxI}*n2Lp3s1QPoG}yM_M93Q07$G|V}}UD+0zBHxw*Newm`gh^v~O% z_Uckvz`pYHXY-3`2L}fvcX#*lx4-+q=7-WuOVR_+t?D8wzcpf{#y=s~Jp9 za_WFVxmSm=f@U(QVcG*N1=CiEnvnImU)hYi(|z^ayN@;{ex9{!@HNvpVi*7amx%Ye zV9_})C@i!|j2zhn^Bb2njY8;j`ivd^ufl|JS~mX`UK%eF2bQpX#&=!6(-Aqqph8`p zQn$ag-{;N#Ue|$MkzWk|y?Zr!3;BJqonh)AW8K`C)4mwnr4jSb?@dotsWT-hA?b!- zQvVY_RGd-wS3qc=0p3EVo$(*14aPl$WUTt$8jA`HP(r=N=x9iim73VRQ0L3W;(2%v z|1m|#6Rg0all-k5%uCzHiIz(8?act>WF5R%?E_g?%YC;luBQSGKKsiPekHvOJw2}b z)xo>n(%g>Y}c}#w{dB;~Ii=aEAm9&=3Ly2*KSQ zg1ZDy0|7#C?IyUpThQQ6aCZ;xb9vr*znPk>-T6^tpN=)jtf(%)G zhDyIodgJwaOr9c{=FJrRlqVj&)3=SRkfyo{sQk6m0=`TBKpy~{NPLpc< zuGd70{+!YRU0e1N@96*%bu4JmB(;DUESPX})2q@D zF$$KJ+1~{5s6TY1f@_;nY3e;9#Z@(jgIHloX_(eEb_>AMfAs?J zdM6c@*rITNAm2-}LC46T2Q>dEf*k+SJ!ZJJC`bp*f0|$yf`5$;`ruta z`T|rb=F@%hagDEz0`#CfhEyDD3j|ci+-C(L?hy0=z`b|ee0*%pt*XY5E$V6)zmkzV zgjr!G1dsmr!?CaN!3($UsYa`J$~$A(D~Ah>^WI#&F2EpHv@MPv#1q%=eHE<%A!66j zC>3>@MQj7ojxVR13Sm)Ep#a{BvWtr1D#r!{LIA;j{`Vt*L#zPg31C1F_7BkB6=(i` zQUN8|IUqpDT5;#&?RicCS?b?kuX)%7>*}P1>KmHBKAVe3+|KP(AXo!$urHs|avkcn zch+f$37Y|e;C0=GRkZ9t6*9=!b3JpmslJ=|VT-4I{kr)>MXg?-VrF)_cx@|E+Yw0I zqFn7=u2++RaP{|4BylH@yWP5_)X9wmt?Gfwt-i%qxE;vgAFaQ1dP+4PASSLZu1gx8 zju6MU(wh799(FM(5-cnxO6b?JkAe$aCazzd$tD93Ek3sr?Ez?p-4SFa{6HrN-N-Aq z!+A|JCUn@3A3xqNZ}M4BMGsg3xhB4-Lb@w+DR(_4B+!>0(zG#t`5Yu<)S9Um=KDQUB1IKbtoz%dtoPX(j{p4$Y zDZ*WXH0H)R}NPI0{zD=B0H$P z_8R&O-0Xf5fJ$jrT!)N;VmO>G0x<3xDT-D?0H!ew9`x+q-s+2Ec>+w2kI#Hv4~BRi zjSw{V$+bB-nDtm~+UF}0sbkKUOrnf$nId>ora)y;(`qk32Ct5zK>lwt zhO$B8Zr*m>pIWQP#x2_1J;D@tL}!^N{n>F70YmQc@OHw7y8)_SVGD}e9cd?}=72P% z_2C8wXy}o6qSpZnzdVg{L~zUYTrDPG|9-37ZAZ@#_K=t# zrF->i>dwNR5F{`#METmDoxwr%aqjbKerBMkmR&B0mv%los z1*Q$SQNuqMsulNMYF3)H4~y}ve7-8NyP{%C!~0uTSJ$@DpI|tY`i8RiU0cu;Ibh@P ziyg4-hzoSYj5ixj#{~>LNva^fuMxq7INy|(Fk8#?`TipTke^5Z4J9dKu;n4i)OnHb z8sl{C&^4iMqYE*G(5(p*$#kuUaMQ?bMe1nTPqwL|8S}L6yL!tgpfoD%(yG!DbVo2p;GHRLdV z%Ii937xTNcR{0bF&d2?e{$LH0!zrowM$a*`5POf471l<>3_#IPHS{j*-wI005wb>& z4cWJq4uC@Z858qv^?CXK@Z-8wbbvAxat(_Y*hX||?QEk!w}}OSNjjfiEHt4%Yr_PA z?yjV^$3ke)i=z<;Zkj^CR$gkEfV!tn9O%(0Gwmndo{clnU~-5Zsg}<>?wP2u)n26z zf&RaV@hWI1B~ zIS}R6QLlu&{r{4QAG7ma7j&u#{*d2*F2RT&5a;7TfC|=v=wX#V%Eg= zXZLb5+?1eABv1?EY7gaX$c(v%&!f$`J)Vo_D|$A*T^|Qp}g6nk7#yY z?ZoPh!=Y)MPz92fR%$Sq>jvmpMS3wTBXB6q{BUBQ4`koPZ)NzY-!{tL?S8EFI6X5= zTED78ermfSNg#<=dIMP%Q&+BaT?l9W^OT5YtIJ+YbodWvd*~KlpCy&cm|=IbN=#QI zDt_7IJklWn{q4KeqX7;5YcYO%Tck*-xur%3V*iuKEsAfLO3!-ri#JT2B;!HPx8m9F zI@wt?eHLt_mp>&TL7e-K2Wc3II;soZOA9s?3@HjnBJ_Bea6($__m!EM|8`_fRN6wq z(7;eMR4^(SP5HCZ>kMp-a+JQ@F)tkWFT63Yiq|RojB_npY0c(u-qiCr9Sx)&NGKqHIl^uW;gE18 zDV&$+);WpoM&U1gCGd@(HJ=yH51oM&m+g4!@njlJ17qYlWz3vEpQlcN^-Y$Ftu< zCuV114Gj&ttT58K?J+qw#hdjlV)Mgxu91qcq|H?|da!9aJ{DBOn;v{!N=TTsomWeB zIfmQKm zU==8=rjc2ds9T5`YBz=mvCe-!EaRm5r`tB!e4uXnn{VgI*J`#az4k=|>iymY&iCBG z>|X>x21C;vLS%y)p;KZa{No~ahKz3+PTRjRxhxOKVo^Bk~so+9z z?h>rEs${V%ds6OIKZKKGi}Bt#W1or~Mcdw)ozEc#!p*f^^_n-_q??CWAf6!VI{i}A z361;qtr|SdW7w`V_pBP(FqiDZqP-lXj{;7PhlKxLvQ3fo2cNPBD+v}9DTwYPA2hnu zJDbUd4w{sp=lPl_ytk7Bv4P)D^LW*xcgbcZdwbPG#>nLbH+^A@QEoyAD9KI59C>8M=v~lu=Ym3B=HOx!13>%90ib z8^RK(oQ5#2S~U)3Sya&|*S;}|2qmZg)jQvKs}odmF|!Hh*0&`d|HD=cUQBXiU_EgX`m)G2Q&uV~L|MT!#k zHCq=RhD?A)N33;mcV0ShfL{VEYmM#Pl=ZdKW|BD~CkGuI;jVASV+`-YXN%HEWH<;S z^c*_(z3QM@Z8{y=JHQKcdOXV&Q)td%nSU8dxjHukNJ!4HH7|UAV?*~jj9-^4xo6dN z{szsPShJ@6#%GZ~kidZoEPNSs9zT_#zPns3o}06p)6$+hyxvl&O}=b(n(X4q@cC!R zs3#lO_;@$MK9hCzX30D)J6wOsZDE#=Cp3!mdMrrRi}Uo*?mUa00iG9{6GZo@Y8r%i z74^n-Xk+Q-!YM~hRaMn(si7hAz}8k02qa#9a_Z{pZoHACkB%Lg?PrU<^-7S2gV3e4 zht*ich@-h$#&f_a17Bw7{R{Qe|29-_7JsH z!W31^Ka{|we@vHm1y1WO!X%{y4S6N7MWtD}v1!m~TkNrIV_S`zbG(Sr$W#)c2lgOp zc^+qM;iD6sc6lOnJi(#-;fJEyRrDF8Dlwd8=|a&!>@wY)y4u!Xby;ub3S_*FhBZoe zG+NueL{P+cSN4g}Ps}*&iSVCJ4mXlED{SMB!jBa-gC)Y4^MSMkV8}YM45dTRkuzC1 zopvXDyWFS?TmK~RH{qO7(aQ!ev_VfZJKxoBOgTkgEV!h?>1s~9IDGM^b!CjLe^sP# z^^@VO(I>gTcUCpVJ6If?s%?!7XWy08>VMRC8suwPYut#}{xb9?0gMAL>c@#ZYS00buj0&F7a5)|tJE?KXekxF+zd5-qZSHH@ zTuQkXO5!ib86g|~M};Uk!k=ZY6z_D@*ZM{b1uMsE{l-~C8HM!>`FGQfFe(TaN7NM} zSmM)e)}Dbb@_W(l8mk#o4|NH-O+*^eKv6rML{?fMr~ZA{dxi!Fb48aesF~&@lZ23z zd(PnJx6V)}pZ7b@-6~awsrC7-Uu~u~Yx|AT(IV4De?h*G)SQyS$`~MZ7K@R4>Od;5#z9#>%AABTN;%6u9$<87-A z_Sq6zY7ZvStDF1DMUiYv)`{&JZH1nZGh)b^epR1VJisgHcBnIun#BD<7E7xtELqTD zmHl!K=n(^ovV|0QWWbDJC{=txY?M;xo~47!n5;c{2oI^?5mD$T2;5?RnLPFmEvqNO z>!3){dp9aAZ9(<%rpM=Y9AR-NH{3iNu|yQHPg9j`Mi0+3M~BlCr2nrL;OQQh(uOpg zK7=04Hn?^r7E?^m$VPu6;eSqXtu}`()MW*VO=`J#6Zf)acCQ5}fsw7sdlO3T2 zs@hkII_YlfSZ7&A1SZiq>armNucw^0O@RRFjSXd{i+=I76C3&rRm66dmy`D8oWoWJpMU^k77q#+wQS}odx`mK`IdVah4 zu=YT*Kh^aW^>nSI8~53rC0i_FL&*?7shE({<>ol#FtC(Wi2<$AHG) z41%f%=nslDh(0{*Qh4L1%9ml0%U_4XT%$hU$tFMOV!s4ay)_gT(XI6m-|L5nbfQ$srUFjoYGJ_4RE>7%`Jxb zJqnWpQ z3-q5l;n@?wEMKoesh40j?HX1FM3TfbZ$n;O0~i=?So}>;R1Ch2vvofl9KmPOebkT= zmmFT-$_fOw_dsBa0Rr1iqVbmRz4p?f>Q%wWg6^E4#pL&Rg{(&9$y@D)oQ53Rksw*8 zn@K*qVe`n_E{Ks=4bG+|6OGGFrJ{&*Y1HQvd{{+xMJ?NRd8OUV1y?9UkHb1U^XzUf z#&SO(Kpmmfi9u3qzc2AFsFCC$uM7u`p+%;acbXvd+ya+3=a3bI=%D1#HT0|7 z>~8`Ih|-|GCu#FQ-g1`-uavqG-gDlR;a_-1LXqN)xuY>EgTv#H-fys|%_eK{S@rpjXD$SPbJPLgn zqQ^*g50|Y+#jDuRJOAH9JGmK)6oEZgZbxH^$jotQI z+B=!O4Cm-%H3!Ga&j7^C?w-d*wU=QNZNnrNq(=XG8AB{A<9^@VV3Vg&_-Z-ubs;XX z7R#vBA2wI9fV$O_8-Yc4lvqZtYulX<6F{{?sO!_k&=~X{lMdYV2iBX83lt!{K;U|e zyfGCi%+I$Zd+R%!Dq3AFB;>ZgU28ZKa4+=sZDR0SRSVq-&M&>IU$eUBLO}2q)PH|G zyD2|@bV^P@5MkpW=St$O!DQ1xfyOMLyZbb z{)m)WikD^?UyH`_%c?jlc*-0ujsH#PVJ}lm`M4Vm-9}v+aVAC08%#W#9V{}>Iqn~X zER|G`GCWhnGh!`Zeu`GvbZz2y@}Eao@TmM&PXzCJJIarccsLEmP@ho>b57O{4yKel zpL`ndSuqtpIVN;pK6>%jC&!@EpLKvgpALBk6KITPUgsTsyFIC{uTZa2tW!?_^nQM< zS0Alr(T2NA#st$%0QN88Kky@bOy=nxWG*?nIg+`I(LTWB8rtvOTe?YC;Btze~-_2p7yr=FWxq=Q9L75s||1dyDwRR&U;-S+5b7Q@@(t|W&x9-zi(HHG%6gC;}Y7|~( z_@RbWRWs3>$gp=Su8Ai|b$TRO3x_Z%7$I+4&IW^V=Ly?=VunMa*hx0V{>yn4+#Yvid~PRGIH?;K+_q}sLn2v- z50)k;Rp)XGhKFLj2IV(|hqKpTfpuL+Jt_~I);4pC1RHi53~->nQ&Q5L@=TP;dme1Y zTg)Go*<-0rt(os--WaP@P0BS@lXkTj_Svi8pcdS^&h<({B~v^N!F@&CEP7OT6Y^kvIpgN% zDUk`xiD{_J%4%tW!*fbXGQ1M0e$5oQ_@3Y1J|tFuO2#&1gOQpuO?z!P>6)9XE4aC- z>)J|6sw%XSQ&$|RSru5{oR=0-RaRyhtLzO?rT9|&%c+~IPv)q~{l zly^uh3o*)&pJEe`jycehMuONp%TiiX-MAvibB04&cvL(@VwFC7IF;asi?^V9LJ@K$ zEvUH}gTlzy35|A*6zaZlbc%C|Xg}JcoQjw@Em# znIY9KhzLe^B;JOe!0wS{x6#KC7^;}6B;xSzr&?=&!|dFx zv-4rzAOok1jF*=JV6@bN{5hJPSQ+wRnrfOY^89y04jNg(ZfB~05F$}g3C%@OLK#h= zCWILoAMbCM>cz-R=<0>oh_T?KOwVj?sjLh0zTsFU=V5@f$%JANTh0do}?tF8* zblnpAc-nSr0d<}hxx9}dmW~p>%GeQio2+U!V+AH#7jG2o+1GS1nygzCeRwXq-&!S$`hL!@~aq`mxSvZA*%QjVi;qLbB# zzv_}0JyL5h(4PeitsmlcVIdlr<;eD?HXhBU!+ZTz0u$=WX{|-nJhO6GFm+&CZ3~Db zs?S|syHRUl`RuF~0?=K?s|;vwFJ_DLh_j z5-}LVrOSsZA}1nOqKjKZNxuHT5BmgJrL`$UWCmJUk=Lz@9Elx}B<+g*TNaDbYH%M_ zCS+9b*e;gIIF8m|XkB!=46U`95gsk>uvYMC2bg&Qs;56oVR3 z9V&9g889jc>UTdeE)~U|OYXJfOgSd3VK0W69o-W<&A5a5SsX8Z@N9ceYfiip*@@s8 zPIBq077&S(>i~E;)hMx>qn91%{Cre`du4<5Rd8!5T4rR%L>Kl0Q^N27OlZ#c1!8FC z^k&N!a4_T*X8k2Ma%>Pn$Bg|iUj+oa`?Yp4sW!NNSWRn+9lhJ2B}~AU& zN$#F^Nr`#oC56}It)j@DT|>&r(%w`_()FKVJ?HWRUw+%qxNH*T1};5+lz$s%B(VGX zYyvgCH(Q^psWO;hP^nt$QOAQN?FPES zCRcZ#Q*uwesZTFA7S7Yq+yi^r2{KJ z6git5cA22`xsHJyp7+Iu<=4iy^nR+)ekIh1+zV7(NVCXljpGV%^mNR)uh%#!Jd6LV z4`)l$BF|u&R9=*o)}m#ZfdaX^nEo}EvpqXG&86`o4;>N(*FLV)*xc{m&%;!t^zYig za(wQRAmp^(!OkEY(wWv=_t7hBSo@_zp&EhE`LolIhgC;5Y*R=l2JfwA>|>l*X8|Ye z=@w=?A6gg!%uGyu4c?R#0qpmH{8hsS>;K76%(^>ROg4E)8a$@d`s@X@A^)`forF)e zEkVOb?zrMIS8-Z5EX<}79nlZi{=P5SGXt((Xr&{Q9_seA={YNg8Rw%Pqk+s0GtP*e z@^5$D{-xOY7?(puMmlDqsXO&9D$8b(=+e z2%dkhUbXn4aq%t)A-@#5v&r?osAyylxallR8fbG+?*cT(8!UFyy#}op7S-maW@l^6 z24NzkOuf_+{4rrJr$n%TP9}tV*Ojc><2Af7fc7I3-pQ+3xUh zm1pwfP@LgixaIYy>B%f5fYI9|nnK`(p;Fmcz*B|PA=O93(Sr{HKKOBn2;TAMfr-i7 zdE^a+SYx{NAtdm;=IUxYmDAZ#bPLJx<@0-rlo~UXCf~_1tc8UIANOc3?*S?5oct@i zZ43KSOTF43Nkqy^BUSkhBL-T^-4#llT7r+&JB4cO8R;c~5m>y2W7JL^FzzI&YCS}f1ku&bStF&Q zEiaNrFxip-zZn*ivyxwg`LYimxbL_XSF{O5dW9tJWoO zi$PeB8Lg$G-_3~4Z+H?1X_!v_S@(?JdmEl=06-WiQ^jQU4jl!}xS3X;-Q<&7ma@oL zHC0rX5-(Rrfd|bO+4{MIF=EA1@TuCV=Q}z7RB4Sph7uJ$yXxo5?@ej8z+j6a&6aHg zejWX#I@hz#{8B03SY*EU)0Osmo}=%2DE-EGo-zZ2ui+Bkcg93oTkd(a=*YhAEO&fQ zwKiur79lK}<>0Jlp_?&^%C|Rve=3PDMZ4+}45To(1TgF>C8YNdpv89S@_-A3xceSe zREf{OjtvdKAH4u%}>F-<~RVJJbLzk1I`Az>d^zp#QqSSwdNK zkJV9cgBgtfdCmV_zf4406(?bp1QwKHnw6$C-{YS8-7Wrgr}VtRSTJHM!zag5TW&6= z>hUkXVuf5%5qASPc(7kT~r$(6mSn@i&55Sh%Y>?0*l!d^qUi4djlu$PD)`jPN zaz;~!B%>O9gP!lD$MGx~m09~<5Yy}u!05DLU6x_xAs@@^_;AI$2cy&zO3Z0a@%1f4 z2V!f{^qkLZm~>yc|3&wI%$1#`upB!)Ig2O{(SWEWfi$0$ePD2^+|sKV#h@q-eSW@} zn+R2wgQ5c5Ck#rP5+}nrogoDBUjI?Z(Y|Azd#^hx^j&^t@VA+0<6cdp!-jgP$y|h( zn_0QRj;JQqFGAQqO><3q@*0UZI8YNV&%24--CF@W#hxcn8!!g7S?u*8B)<`0qv7-Y z5G|tpcZWsm{sR~WLl+o~{EaQuby#X!U*)hCk{s-$Ps;6m7S`Z%kv8*bzs00+DSj}; zTF=AbqmSEC%OD1&^Hsdfx!ieu5TdvDR6J+CIP}gS7$0VbdhEgRz=fhaHW|@+s!0!q z)_`%A@O-uo!q(5~X%YuhXZNOrL0+DZIamKAL1$m7&0SJ{4>vY})GjRYT6{6v+=_#Z z{f5xG(4@=%{1gMZw_XMym0xx2_KR9EEZu&0p(+mDkDNc(Gq6^Jli&EIE19wz?$dR1 ze!_ps2O*dy75^{^C^iW}c5fa!*G}%W?R)=b&`Kypzujcac-+2zf)tK7n2_#xu}Q76 z3{d~+nk&2Q|Kb@EHlHGzUi=xG;+9&GUSDF4{#ij`um(%zQo&l54G#JH10YKxRDM=( zubwwH+KKr9JPuhUk(9?Ur>!gmTara%!I2pD-e5K_*~m;72few;O5qD@7`G;v4Bm@Z z#)pCh#nK|Fx-0i-e?5{)n)w)5QI|WTpyWL}T*=OT^oZZ)RXJ~t@Bh=XT0L(JNivGnx9L@@2RQ>OfKO3xk-sCPs#SfEr(XwRU z2=CGJ0i$53^e;6B;rjZCf zK_=>V99GV9zYM%WQcf=~Z8_uC*^nuq0-fVlkj`g=&1z}y&DPkH{hgW7RMA$iErvUc z#P9A}X?bEW>+0%G&ehsj?E+!emi(=W&)&sU2z?Kvuj`x@FOTjsHVY}*pH*U)S|AI; zRmOsjei<0~P_IqS=dmb+{*~fr+V!cbJl;@CrQjSI;V9A`{j`X-8IAHBrs zt3R;VAS#oDNcdr5NYj0K<$MRsAUusl$5JncogMu1b#Tu0&Y2~g9D!_ zR()RMt?vhc00v626<~XjzcbD4J#eLNy+@&~P&jC<9$~pyH>BZso0OMjC+{<-!bxYv{H786@PuWw7+V!!phi>9pOY@d$$IWKpQVJUx0wpgM z#dPU>}5Hl5Sby2xytJJ zY2Jh;-72F+e&2R8K%CQpwp=)69o~yOog=uw(~p=@JvH-r=vwt z1&$rw*GyE44MUxwy7J!IUq!HwZ|?7}HqV#jtK*{Ho4j3{?#?FBfr-t>akIzqDYsOr zUF~J*nJb^y*#sjTVlRE|)@D_+1Z;IG6Tb2pO>~k_VBjt?7~6Ql^9rp_teqCESWUKv zrdlnJ|0T~h*dEvxqiAW@eCJgYvGF?#vQ!+WibVXGQrHWrt+u17a5ASTE>l(krihQU zTG}co9DMI?m*i*Hy>jg1ZY(G$n5!h+6+!Y{tKN#LVM{>g#nl7OuoNSnEYX}%J{*uo zC=*^7=wSUVvGX1TU*IaBOsl%6yVO+IJX>sW87J+ZHQ%~=%|l&!sFK!D$_AdnDFoct zg$fG_T)&ex_RJ3rU9|WTLyPCM$fsDf-M?_;0+C?Nh@QYfc!Lp~^rnxIlxV!*y@WEa z{-3AwmI#Hc!VSQ;&Ao3a93&@@4xMN3*!*9ml6DpSmQty@`xUqMc8?qjim&(qo@@blQh`d_-!T{g zq|_0qWj&{^l#HzgQ$jV>yhS<>H!r)P#ZPS3{De2P+EL)CA9O0y zxuo&gV1f|zOuZK82Se#+R+O9;GhyYi7G0^#JFGC9e(LG_ex11nAMHm~Ig(>eHX=of z)rq7R6f9}L9g)0Yzh7T$dP=G{Eodbe$KAgpTTx zzJMD)mEI{(N+-XWjfwA z$wETY>&eH*H^(TyaB2DAHM3VrIltKA1X`H4+M^Jt0>te6|D^K}+toIIgbyb_l?Qbz z%%phn7wHJwFV)mcefGCO&!T?o7-4@Qsl8v0Vmc=gvq0lDa8d1JeN*l1QX@dRN3UfS znZbX0!l^k4tK;pPI%lPl)c)UB16V*JECdVUz(}49qrnlR4H0%RL|B>vdZn> zrTUmcZ7zwv++WTCT4tz|el?rm+iwu7f-V*mx}FYj83OS`8BS86TZg7kiqX-{O_vUV zBF%RKW1rzHY7KDbl|k|84(Z60ab63JT_x67pl38;!0ROMXP1& z+2p)cERdgd>h74F^bxX!hm}W4E2iGSg!IH}1yw?gNkt5Ynf7+~=3s&wU+q<26GPeY zUXhCMn|Dnkcl4Dbf)JGLZxVn?G$c!FCK#kU>fYA2u2PEhzN;Fg?H5j!yJ$y&g4`?e zudFa3$)^QhNB79dXr`#_wP#OKOHb;PuU`rmkhF8xOcn=01fw%m)~lr?PRcV7QnkAm?+e|Qhbru#{nUH!3KMXct3 zZ51>WHmVhCIAfM(tBx$MWT~;Ii)}$#_#I&J9G>Umfa3J`L^Dlse!fEe9S87HF(c1h z9grW*mey?fv>IuPN2WAQuiabix&4(;mVA6Dl!?sfrXg(nDKDX15XE<|)vdS?8oyP} z62}ZCa-surCrNGK_N8R0BYJqq>3ObOO4jIT4kEtuQ5dELI;ib$7b6mpO4u}su%L}H zw3j&V4+P%XHPNbsGOr+K{FMa9kXFGc9B;no+#NJKiWI|K<4A+f}DS`x|;JVigfBIs^kSe#c( z=K`nP1-z8CH+l8?R=CCnxB(pkHSIMjz2`SK&-1~T&M1ZUlx^#@n*Gsm0nIhd5Tm5&bVj~}&v2u8!W%;LNigzA8 z-Xc#$Iq`M(XMI_Zryced63nho*DSDrf_vz~5p+Wz_`~U3gOmDzz;a3)+t*xD;(Py{ z%2pBpiA?*u-&w1(i}+7-#!d%`+y_dUlk4%r^Rk+o&35)UoQ69Dp=^_YN%O&cdDgez zKr<(X@WACXtDny>0Me8j~YRAGIOqQ66lsX?iCN|jjd0K4z@ZbTe)p`MA;oOpPFL}RtjkiP& zH$00UB$QXo0Yxxx1V9>ej9Pcu4;$FwnGEAwn!_d%S-|zR8}^o-XPhmZtsP0qvGG#h zST^2%HDnBM@P>3IO02A3fN)3F_w5Q7zv&5w2c}w+y$Z`67b?0W+elpnD)5C2&{a}x z-hfQEV;J%KDk<&_d5)SN9)lpQ3ros5Zab?(niCL+Z82E>ao5iEBdDw&-wQ-t@LxUW zK-YGpsparbhK>;RxLve&kG4_>P-XDaAbsKR!|2XG^|;9X5rhy{CffKHJ0y6Ts;REy z6?^~eh4&i0iHH+MWxdBj8LqV&=f^cW)4nqli12S>2U0)tJL++oQ$n#%zsG;qS2i}j zz0E9W5VDx!aazm9*`i(?9aWYIhwm2b+I6!Qlol@udj8YPU#DeDNyaGEZ4O$(&li+s zq$bdvBqzr>Z@>Vu7K57$+qqBipIpw*DZ1t7_ZV7Figf{5dVFz8B9R-3dU=^d3{_89 zks7O~X$_Xm;MIqG$McfE5!9Hx zngHOXdmT;?9Zvx3MSx~D&d~c)Q`m>Qli$5t^Iv}9u9Nx*Od_5Lm@x4qf@nav`>j|I z9(I+$;kn@t)Qsc~HEz-oN}W$vo|n6fx(wCh!bq#bVBU#J625DKQEt6kW$=NK@&u>B zWFD4RA_&oJTLk=itNtm4#$OyYfuDJ<&VB@Qj4l+pDcYE`@ZeV4%nDi@>{8=Eu(h+g z6@7AA;E(JltZs)E#k$wsO`6@wecy9`0$NNjJB>!i{mn@0+@yzRRH7FfUnN6(Hh%(v zX{FWqsV>!cimlCR%*qg>tE3qa>CyKjq57=~p7d)ZGYj%=j*p*Xlz!&zd@_H!-=BgZ z3qU_>;uBR%K(V1zR0lo@qXd|$jr)AIPl`qz^=ZHRW&BqpK$E_m{lemy-*`h@tlPdt z)`nBTGYT%dkK@tl5$3El8w6cx?UFJBN9yXW3pJ=f`lK?x8yD_YenBnZany4&Idy?f zTg{VPzR+^No7|(AG*lPe2o@UQWBVEMjDoM#w=Ei1UbD5>bR1~6Exh0+mbHt>@(!kR zHTe^e@O*E7-vs&RXPsOuuOascvPoD&^7uS(zpR)8>VW@q&KB)Q`eGgzR(#?sfW{+< z%MFxf_YQfz4Pq=PQ3W2a{1f>H{DtuVPkfS`1_Q$c(OlcCco={T@$v~=veoX7FQ3UN za%jukc-8$;%VIi9JNb{6+2WCk>ZGVc>AeDF_rx5dINu|H!!2-G4&QG(&T3DUxEp`y zpBf|_pomcAwfMU|X_w(zRWnhlRdI}G%=NQm80-)dmGV#Gu{&q+?+;~6pg65P*ROVd zXV3lfrXj>7i|K$YybZwjD~gxz#1fQ#ib>W0VgT~*Rt#)kA3!a(UBa8E+xnoEbY8Ox z$cNd8{MM&%`Xm=l0z?h&#nBb`KxbeP*=UoUDszibt8aBm50HnF>2b%K;^?JMF3v0( zFxN}qFD=bC*?Te?sa3#p2oQ!-3Eo z`l(Utl?3o}c}uh!!t7AIuW9%*lR|!=B8)zV|zGxx*KuvAJUUD&uYm zzi3@)k3)Fv9REjL_mI`|(IdxKJocMVlI>gU3joIsfFIaCZPfoFq;jzd{m67>3HM1@ z)M+O|$}sl-XM@gQLW6%v?`0ln^S`jMwFUsvmdX;vjAW|&U5c9Ll)~OVHMgIdEUg!2 z!vNh^Lw88LkqPSD6S8Bwm-9|&u0}xha%X&|w!~NNvHM-h?c~(#AGr#bW*bp}^t{Ud zsRYI~eu7LtvuI**!Y9%6ikCnJbVX7?BJwSn-9_!c^8&O=5#-Usdr`-lkU@`B{S^QQ zEnDx(`cZS(kt(o+#Si%@0QESKz}_!^3#FzWS*W!?MZ%z3OyMd!Hu=5q@=yX_=Jf2< zi>VjZX&rH(bTDWRr-LNoIRS9u%RWp5{THeLx;RYyEgUk<&P}_)Rj;SByhjfvA}vcM z`AQ4$-)Qik`c$vLe7uuV#8KmW}qbv?O_B%HKz9n0k-2wwbGqcBu4KNGH50iCl^$&8aLcRUqy++1%wQJ*`R)WY_NDX zpXTzyXK&(15P3fUNvMDq`(U3*h(hRzo}K)^M;H~kix?I$8Eq?KatV(%0t#~aqW9jM$c1N!m;=wY|{k_xoflnC9-(MNB)Bvhpb zGg$p!?S1z@)&KwZ>l_EiUWX(r8OIhO*)p<12xV^>B`X=7QCE(Vf9F3jRRG%`JFy=PZIY}SKmsInjL`E~a~9axDYrQA ze$TEUn}6P~mu;dJ?=I)K^(*hWeV!9*~-EmsdR(e*F6E^+wWA+0m+v)2YXmjo31QY(QLcz6in3*%Z_zz7j zH~ciOp$87l0Y8Q6-4LN0tzs<`_tQS|q4~tocbt@crrtZ~h~0RZG907YIMs$9(M0W1 zU`k*j6+)-+8%_9tNL3)mr&ADqs(+0M4=?>OT#q>(1%CHS*cj!Ai8YG`>0&Y)%b}}6 z%ftCtqIw2vLPcM;=YTS7ZH>Ba?%Q#p&Uqk+-f1)&hXqXF$JYhu$#H0g@t$=pjzWO@ zu@XfIN1E~qVb6_9RcB$3^42OaHL6bnL4zE$X}|_iXlSRO-Z1++6Mq5Q>wR}uE1n0p zb-n6!*0k1(FHG5@Y#V;re|MNe;A?P&80v6R^jHWX*7&{2BH*L9l$6Kgb3c!7@zTMk zr5Yw*Ll76VY6cBL?8s>e=sHY-@2xF?>_*Ugl?(sFanb9cD@k-GuzZ1jGPoYgMeA;N zDGH6Qv?B8#&WusNMvvSZH$0}0oePwN=N!&r?!nj>I)xWVrmdlud<+plP5RSzMgE@6 z8n4PCuNkkTeWIwP#xS4^7{!~&A8A-5KfMp`doU!+%XY(HF7;i^-cy(6Kh~rJMr5-e zJduo1ue~oz^ZylEv=Y`g{~IcnvgQ-8BKv_O97krr{sMQtTKqDbjfnfKTK^-P7#_|r zMOuv49Rke!N)IinWLeE~GHUqFl-A$fRL}l}sAF*a%s(Jj&EHT81k~Fv+V3)RCtBzL zZc0Z)UfGir9fs)jXlzQfd52;b>?7y1q08E;cP(C!uA(slkOIv#DfCwR>K(?Y1Q~~g z7ylUfXiUX|_GpP()LF);nHrX%6NhuhGBQNq48Ocowl#Di3!uFkvqLWrAFs^+dh<4Gqw2SJxPP%elm2^C;^c_)vpv*(zb>f&kJ?2G zC2}n<|Itg7ng`9kThvz+sqjWnms5%fxaN9w?V9;N>sCP|5LttgbNbCh8OC-Hr=@=X zI#fpjiN>r8mYH|d>IkxqI~=E&riTik?Lw&w6dF5y4Q0$n(nWkok{7Uys0B>V9}*AG zGDZ{`et8fj=IR1JOQteVS{SMp?QkD$T4eCVu;n~vt>{8LJ}4&-aG3Z z{5#qOMr@K{2WA5%^2$A2)dE;2n z(3`&t$4~{^vx0K8-k=eMCb+2}dnrmQ=p5G)6^7@MeNvB7#knMVvUcj#9(f@~s?5 zp>6CS)tf?NR^^Yu+p1pVphCIYj;M>h@ir2_@mUgB?p5dqwP46ao6Q zjB$M7qoivjS?CK3rAK?ykI>n83Zm4XN%Lw)woQ_U-VBG1kv2+#$ESWEK-*Yp!V}oG zJmxdx4BJLx1pCjWklEV3AGuFdRC9&htHm_I3Ysshk0b+?b~GzW#j0yIhg!x>ftV3O zkyh{vg{e4xTS|w-)~xr*@J0QOmuv!MF)ijRe>m~v2@>=ETzo`SGxP2fSm*5N+Z$>} zwsGKSjwatzP4|u@N1K~o3x`gZHabblBjr*+Qa2d1CIGGIN~NNre=me|L8JQGDZWIX zo4393(&WVUcD!Nik)l(oa)0;f@^F0ifgN1A>F_AlKU#J!L&P(~r2aH?v7$C`nucQn>b z9(q|^4rPuDz_)QyJGB}N%4zdRy$Ty)#wdGbjfy4P0h(YD`4<(7DQIr)e=D40-JskREOk~#(4ZjW#y#Rni zOvzWcE9d@*KraKYgGv?)L{j?N9TubaVFrjr_3>z}JIw7TIe%s!p0A1=BeF^^+0f*X zu!_TxjYCUgB?vQWTghtgXu@A<(=Kz-OtV05IHGyX(7S*FIX4ihEgo-Za%n%Zx&ERh zlf2(T9dT|QnV3eow~oV1HHN5yhu%v9IvjsB?kZf88YxpV4CO=SuBI=;d4AZHAqE&q z>kpGjS@jn#836d}Ry$-yT*w6$?4|1$?=>{@MHk=x$7{4wB<)(THx+a~?=?S$ z=B`#;SX4R^I&a=-Hq!Fg_cXViuK~}{pX(jF&Wb63DUjQ`u?t9hWS#kJ7g_Y`q}5liZ;ECj3K)i|*-m7=}8;ZuK|jS=I(^@bZ~_DIIK@Hn9PXDRV@H}t1l0A1yaw8nG5S}v@6 zv+kVw%KWUY&w8=(Smo1D8Xe+1)flFw?u83FxqZwm>rjoM_!(T46@bGORvt39; zzH`B4*UsvjarS4v7$}^(G0dXTPMwEI7iU-z^D9_)tj##UL6uvKpx1`{ImJYEYatA& zu7?lhBtHn6_DShP>M#TpEv3uT<6|NA9}j=shP2FI?9_(kRN9cSW$SZK%$K-Es?;+? zwAnwR^ZYs8@9bpcwSR__p`0R&A)?POoF!^$FHDa9T^|U*moDN%o!18~>8w25k&&ub z+<53kv6h*g?>4g)u2kK#zJtPhLJLqI;)VporqjeWhkLyt(qEN0`DzVT#?{oDp}e6G z0H6G|9<4k<5wr7DS;162JAndmkgnZbXs#BIc&1~W#|D_D2pYy0bIvDSR6WhkVS}?e zlRK_+V|)dnq=g6-XZSqgl%iD4O?B?u@NX7!5Ysdn6m!FbCuXf2+Kt znc&{U95kE$#@TGGNysTY`P12G=4ZJ{YVn^+=XWlLx?7y0G~ga};a_p=K0&oN`D?GW zy(q3&X;L2WsMMuQ2_vnj$)Q+ z`2wRba0+Rn8>V2iABeu#5$=O8{N^VnxpPg9qKRyFimnP9g6O9WSL=FQ| zDgtT2QxcqzEu^cTdfD#d*}yalM|kEOo2!s8(b2Z__fgZ63%aP{z#>f z`uLT4K^*#wrWFwo*~0grZAOgDcl{O!VvE;SS2a0+sNt*UFi&9nwD{S~3ntTIsM^2- zb3qZC8Zn~&ho)PHY+bWD^5_v5WvthnC%8Bz2>#2P=SH0XS>7U3Ri+iqw3hdR@7SyF z-8$y>#p1*#KO--oDZ6!RjzT%1bdWQBoq6Y>HDX3Dn-fn3ZBh^P+Q82W6#Z~vjDq)- zA6+8nMC{u$&o|1_WjB~0MBv|6bX&b@MAWl`k#*!EfPj}(KYT%*dmgdBx$y#GrL)Bl zrhg-*L$ukeLrZzRM}}O$=HxQmFwokVO0Pa{a%WZtKwnE2%>zd4B*^W&&d4YOTZ0zC z{)F}i8auB*D~MGEj1BB;`|Uen!w;EYlUK*QG(I(imZ}p>L=R6s6he%2m8RsRvr5oMrW(3UxS5m`+dXrET98**8Y-IoJ72@AB^v_Rkz-J*5pC4*~m=RHTOm1>FrnPKdd%$Sbnt7TGq=+F?{UbXCjt=z@ z6A8;#)PEPZsQJ(H@raLf({%kkTjZ;kA=w@JdM3OUJpwjXi5;E^dAlHx%NiMv#c00H)`uFsYL~e{Ieu@z)YZMX( zJ~<>cEL^E89D7h9X}Kc*fsrgw8A+yX5zL8Zp{`5c&qJYZR+=a0uR#R3?LN=rR!D}A zqU;~7_UUkjaW z^VTjeRP{9IrTjQ`Uw3;mD#goeg>1EQ>suhC8TjYN`rWa$&nm{Ijr(t?+7@e_YzPEa z_7yQ`5Vor7`1+5ffv+`zx|*PshH@+WH{A5oD$MD*}!Tml^HP@0&(ga$E$vd zMc~h}Nx_k@ej@F^K(%pfVv&Z0{cQksL$#anMAy9qcrc zI|l_5K0F)SioUilM{WEBoXRc4Mc8W4%BTIQH75u(5pHoe-VV`@j|lo|aO-+npKBxx zsPJjyyhl+g77VupFhS25)}nv0XED%{l=t8MBs1mvR>9(7k&yULw+HEJrzF-Kylx8O z%OzGP3qs1qNg!E_e+3uq<{no$CmSkevqI?$9htNIH2-gok-aytC5r6f+p#Bh9y|@= zB*)~u4}$H#+l?ubOV!7Q(+Mc$F5SHAvbjLJ902`(6O^cYIW4%WKZfQ^UdGJN)B_gT zHXHcB)4WV2BAUkPoU{#llRf)2HX=(ddCKK-T8y7#A%wKd{W>Pa{?w7}ToxhtqVZ%szW7!( z;b(B=%em~rq@vaFzO@M!%2KZ3Z4$jaqAScjDe;a08))8l-n{X~3SoM9KQ!Brpk}lm z$!i46X72bU8x7zUQ95WIoHq3MY~f9YsQy)vz@_{;iaF=^qN-s$lm=88RN{zLtCi+! zkik{{;^4i@5Z6y7I$@L%a(2EVJwYnn5Vc88NwU*?0(xG4NYlxFPXTV7_bmEAp|)76 zJ;grot9c?=%Bxp#H5cRB?y*p+Rfp9?AM_#L_Zz^1-xu(Rdb&}#bLBd^6BJihnRLc9 z$L2x>!+5UPuGRMqeNc^#ykozhCH*^l%S;g{_#L={-q#gGOPh$xlUqTzPBB(+lIC-d zq*vGh#*Bm?=7Gz?v$G zJ<(=kE?cEMJ7E{jV|`v&^osj9A0W7?1f4l}N%i*goiFvItK{7k(5+pMC+@J|yqwGB zWQSqlZ-gbTr#9*{7Jr72Hq?AO!YN~3smgWg8oAC!qf|@5&c0MQOBR=((E6G5A8rSV zvcjX+etAGIua0h8lvL6vY28|Fbs@9LdAOHUKSanGexck4@4&nWCVNk_IQuBBZH{}Y z%BK8a4LM_FDUv$iaE z^`t+>KJG3##o6-PAGpEwQ!3uOpUGLQ3_DQm3cB*}1|F+R|3ExqT5yr5} zXLtb@A?sL*?Jkv{<3X)`A#|+U8BdzO%uOX^s~IValVL-0bfS40-mE>qgskgUy!rTx z_`TLMtrVGne77qPP4H&&SDh)SnfenkZBlSOG8kI8ZcGf|C{^$*QLNX$*=7Z;Vla23 z4>GP###l^+IKNxo$)pbnDcL&J^(u^~$DmY}l_ZXwr#7YI9xUp#Z)&y%qr>rcj9sXP ziqo7AJXK%&dy%c$m2g^hZ$Fg+PN;vsZt#wBiPi78-Ks5W*dTlwlU5*B zOOC>y;}xwCD)sHsseZlp2|XVjr9uZIFJ^!%k1}?gSMuw9!R7Jb+;=0|`HkDZ7wVs; zZUr{WmsMJnvqcc`0fLua9c^!1CM-pIg3Q~w7xYtW$x6jN7!mTa%L#wjpC0_XdBNf9 z>p4ymyT&rNrl%DTzOQ>y+30bv$-eNs@^*l27{vsQ&4G63{D45pY6o|T=;lXn!S89Llipg&x-)= zr)YN`TEK(CjGF;Cg6*LIosVX^Ud!BhX|ULa)KKttaz4gFmi7C=*7XU`zOwe%cBDq> zABRQG&>K4gf$cloF@|3%vrFzp#)6B>GqK3b$7wT^dS1%Mr-x|6^alf5M5N=zIi(L1 zQm6UpP0$S=d+)nde!1bgL#fe6e=S;OQwp!q&FZrs??9Qw5c8}RKcqk^%V&&tAHjW% z<#37oVa|SPYS|U%c&tmuj00C)r{>$>vilQs`v5f}>ki0z{)NSD#8wp@BDN;04=K>5A!Kp+jpnSl zh|_F+MPejD)<0+nO1OiF05w}p&dcbWy!ZCqxu2??WJbk?fkZ66&z-8H-tX1zd?PbR zp>ZawW>#vfp!Kthxf#F}$drdEn`b&5%H;C|;`GtiAeB*x&|~Qasx%fBg;FPg)2$z^HN&RhUTYuxcdRE!y!Y;0`ngt+GCTC%u#X`X_*KHm{9(XuoRNY#BZ<8P@5WN}35asG`z!dcO z4+JVQT;e@w*^S9BM(7;5CPAVN>}FN(i?My;oBG;Z01&0Bn^3(e(r-~re|RzsJGLqO z{mGp5s@{I;#iZZNq}^jC^T?YAUAa)i(9E7`^gt5CO>tJ(ZgHyPqL6vv+>!?uGBW;fyAcljBko$MKbpE_Z&k%6x%GO)~6?0zset18l8T-Q zcVM7L!IHhr7Omg@u8VTPeprHK=3XZ%YSJU8HvD?S&W$Z{-_NtJ_>0#bSdbS3M`MUX zREEr(p;>6Ig6JHNpcF)%{(kD-H8$LX%kq7gt|hU2(4+R&wbw~C{7MjsyJQ`&@m{4N zeQH*Lq53G4kK>wam$NF~DOY=U{-c}NU_jbB1?0FQfT}-Ep!JzVqEocdLaSW&(+H8^ z1fQ1A=fD||!l=`;d@WConICwE^W>Kj94mTVGAY=XDVh4p{MBh4{GRJQ2y~kql&*Q{ z2eqFA_^wXVt4nG#vFxosFkX~4g)nHT+0zYZvK~(Gx(0bKIeQ zqe{egCT!r~l4WMuULZT}VS;9~g*h60>chR6Sxp8}LHClq+B+KN&bndT7Q8wbLNoQz z=eVG%%JN_qJoYlB-IeduuZ__;tR1NK`PZ;Vi_qd%;E)<@g3|qdrwdf|y|%UNTS~kl zTl3;XT{RiBm@-E;dvUB)23)Uz3sv>6 zT=`1D9YmFXIK^Jj$Jh@yplUM7@W;7&dU*U0wV@{^oxWtGDRtCa^U!?O(d(3Yi=|t= zVP!yuNFQd*C-%ieThoKSo6O(pZS-&vA?f6{2JZL+sM^K6?FVOT-6O49=FrHe2itbu-#a3@Esl<*HA8|Q? z4kcT)R}M3kRh2%@l&NKFka<)nx$PLK#ADHm>tv($!(NGNtCzoVqrwA!9J`9nNnR&^ zu9`W|+fm+4h{*xJKrrf?OBb_3afm_ZpLj}Qk?)9NbO}VO*zXi|=^P)isra;vtK)+6 zJMTC^-|Yp*4-yqo?lq3G8T#m%NRg_%`c4GSao{cRqh5|#)iY!jA!&(HhIt%_q?Mn% zqaSnUgisZtnM;!N`By<3M?W)LD=&}Z=poTl%_^h*hL-^)>`z9a26*2W2G9_0&BFF- zQr%@`;AFg(6gf57sj`P#=dZw0>(oj$7cH)2f8fi&|MZ6xRx79+_C$Yuw6#?Jx%BO- zf=Hk)Tfnzyn8c4bK~>QxbLH1%xn|k3Pe7tM-*)U_)LCl?Qm)$4Oz{Z4_T^Nt`u`l&D9=Hg?#dTnxjnOPd3PF=h ztNbL}Y?xTN*_Y2-b3SZ+UjcbnA`LIjFo`){2-pBr1jc-0rw8+D4WsWrI3Nx2&yHulmW4(4Oz==iU>Y#~r%*cU5KdO6OeonAFiZ6U?)e zW1Ws6AQ;(|!X%b<={4NGBA(iIrk8rj^BARilMLtxT7lj^KW$jz-NIeaXW3rIxY=9q zX@9jFvZ8UlL;wl-389fWAVzek4bI0(p@R%&q zl!_!m$ONA<&{w=zVH!|ZJ|;k<>8*NE5v`(t9LR|Iz3s0_-{?|3jT5Xr(cUfn+{Kcl zWzg8!$IjMZulkWaa&WM!E`Njivi{nA=V@w*Yn0&(U2Arqix%&c5-zUkt?fK++;Isy z!{L%`(8&JP6S2Hi7a#iCJ8)L;v{cM#VQSJ-#!G9Vc&Y|ZC5Q55t>FA8YkX*y)C92; z7c;g;jmZrpPg9PrOFK8+iq#Qw+{teC|t!57txA4J~W1ExH7n&$QP4sTNWT1 z)wicGww)T=SibX=pie0r>b;}K;k@gpxG8LfD?e{1WGvtGm_XAdvaTzmii;$0a9&=u z=($I;C&xVO^JzuSlj}pu>4u6YW#p^jXP`2YRgJ|eUxM9_Q=#@wTz=`Zx5QUj{p7eb zrGM+}Tpc<~47a#JDwggRNIR=@UxLS^;vb%R9>Th3TQ*nx)hmgoxrpt}1xy?B+wCn< ziLq#7bzL-*toAlg3Ghi!wJqF=`Re1JeoQKYoVzPSz6brC=k3rYwaMFU^%6y08_wc7 zY-LqVw+m&~)}W%dTaf_m4u#D42c4NYz3J9DVY=R1XWQ2XuK^5?BGY~jX^2^gw35NP*eCe_17H82}ff^ewjz@97a!Xb}nEIka0bNWdPY6|qqQ zAj3BE1OoxBC4N+LfavS|`c?Gv-z1zjF%j)0mGk@WS1`yhVrw*N(ZAFL;!s!-Vxleb z-*4~|leD~q%-zFE0dXk6Zvrk+mTw{M@IAy=A;C}7@Rv;gnG8@iO<-65U(6d&+zbPw zQl`4n97do=`}?2&RssrW(!)3qtHSP!rUr&GCy7wrzu z?mx#?&sGCz=;FA@5s9Nb^#AzJ|B-tC?|~1eQb-S0E996X{SWK?zrOYVm;Qg}TM float: + """Write statistics based on predicted results and reference transcripts for SURT + multi-talker ASR systems. The difference between this and the `write_error_stats` + is that this function finds the optimal speaker-agnostic WER using the ``meeteval`` + toolkit. + + Args: + f: File to write the statistics to. + test_set_name: Name of the test set. + results: List of tuples containing the utterance ID and the predicted + transcript. + enable_log: Whether to enable logging. + num_channels: Number of output channels/branches. Defaults to 2. + Returns: + Return None. + """ + from meeteval.wer import wer + + subs: Dict[Tuple[str, str], int] = defaultdict(int) + ins: Dict[str, int] = defaultdict(int) + dels: Dict[str, int] = defaultdict(int) + ref_lens: List[int] = [] + + print( + "Search below for sections starting with PER-UTT DETAILS:, " + "SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS:", + file=f, + ) + + print("", file=f) + print("PER-UTT DETAILS: corr or (ref->hyp) ", file=f) + + # `words` stores counts per word, as follows: + # corr, ref_sub, hyp_sub, ins, dels + words: Dict[str, List[int]] = defaultdict(lambda: [0, 0, 0, 0, 0]) + num_corr = 0 + ERR = "*" + for cut_id, ref, hyp in results: + # First compute the optimal assignment of references to output channels + orc_wer = wer.orc_word_error_rate(ref, hyp) + assignment = orc_wer.assignment + refs = [[] for _ in range(num_channels)] + # Assign references to channels + for i, ref_text in zip(assignment, ref): + refs[i] += ref_text.split() + hyps = [hyp_text.split() for hyp_text in hyp] + # Now compute the WER for each channel + for ref_c, hyp_c in zip(refs, hyps): + ref_lens.append(len(ref_c)) + ali = kaldialign.align(ref_c, hyp_c, ERR) + for ref_word, hyp_word in ali: + if ref_word == ERR: + ins[hyp_word] += 1 + words[hyp_word][3] += 1 + elif hyp_word == ERR: + dels[ref_word] += 1 + words[ref_word][4] += 1 + elif hyp_word != ref_word: + subs[(ref_word, hyp_word)] += 1 + words[ref_word][1] += 1 + words[hyp_word][2] += 1 + else: + words[ref_word][0] += 1 + num_corr += 1 + combine_successive_errors = True + if combine_successive_errors: + ali = [[[x], [y]] for x, y in ali] + for i in range(len(ali) - 1): + if ali[i][0] != ali[i][1] and ali[i + 1][0] != ali[i + 1][1]: + ali[i + 1][0] = ali[i][0] + ali[i + 1][0] + ali[i + 1][1] = ali[i][1] + ali[i + 1][1] + ali[i] = [[], []] + ali = [ + [ + list(filter(lambda a: a != ERR, x)), + list(filter(lambda a: a != ERR, y)), + ] + for x, y in ali + ] + ali = list(filter(lambda x: x != [[], []], ali)) + ali = [ + [ + ERR if x == [] else " ".join(x), + ERR if y == [] else " ".join(y), + ] + for x, y in ali + ] + + print( + f"{cut_id}:\t" + + " ".join( + ( + ref_word + if ref_word == hyp_word + else f"({ref_word}->{hyp_word})" + for ref_word, hyp_word in ali + ) + ), + file=f, + ) + ref_len = sum(ref_lens) + sub_errs = sum(subs.values()) + ins_errs = sum(ins.values()) + del_errs = sum(dels.values()) + tot_errs = sub_errs + ins_errs + del_errs + tot_err_rate = "%.2f" % (100.0 * tot_errs / ref_len) + + if enable_log: + logging.info( + f"[{test_set_name}] %WER {tot_errs / ref_len:.2%} " + f"[{tot_errs} / {ref_len}, {ins_errs} ins, " + f"{del_errs} del, {sub_errs} sub ]" + ) + + print(f"%WER = {tot_err_rate}", file=f) + print( + f"Errors: {ins_errs} insertions, {del_errs} deletions, " + f"{sub_errs} substitutions, over {ref_len} reference " + f"words ({num_corr} correct)", + file=f, + ) + + print("", file=f) + print("SUBSTITUTIONS: count ref -> hyp", file=f) + + for count, (ref, hyp) in sorted([(v, k) for k, v in subs.items()], reverse=True): + print(f"{count} {ref} -> {hyp}", file=f) + + print("", file=f) + print("DELETIONS: count ref", file=f) + for count, ref in sorted([(v, k) for k, v in dels.items()], reverse=True): + print(f"{count} {ref}", file=f) + + print("", file=f) + print("INSERTIONS: count hyp", file=f) + for count, hyp in sorted([(v, k) for k, v in ins.items()], reverse=True): + print(f"{count} {hyp}", file=f) + + print("", file=f) + print("PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp", file=f) + for _, word, counts in sorted( + [(sum(v[1:]), k, v) for k, v in words.items()], reverse=True + ): + (corr, ref_sub, hyp_sub, ins, dels) = counts + tot_errs = ref_sub + hyp_sub + ins + dels + ref_count = corr + ref_sub + dels + hyp_count = corr + hyp_sub + ins + + print(f"{word} {corr} {tot_errs} {ref_count} {hyp_count}", file=f) + + print(f"%WER = {tot_err_rate}", file=f) + return float(tot_err_rate) class MetricsTracker(collections.defaultdict): From b8a17944e4a1f7a8b04830281affb0b97f26a100 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 5 Jul 2023 10:23:35 +0800 Subject: [PATCH 12/30] Fix zipformer CI test (#1164) --- .../ASR/pruned_transducer_stateless7_streaming/export.py | 4 ++++ .../pruned_transducer_stateless7_streaming/jit_pretrained.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/export.py b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/export.py index 5735ee692..c191b5bcc 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/export.py @@ -856,6 +856,10 @@ def main(): # Otherwise, one of its arguments is a ragged tensor and is not # torch scriptabe. model.__class__.forward = torch.jit.ignore(model.__class__.forward) + model.encoder.__class__.non_streaming_forward = model.encoder.__class__.forward + model.encoder.__class__.non_streaming_forward = torch.jit.export( + model.encoder.__class__.non_streaming_forward + ) model.encoder.__class__.forward = model.encoder.__class__.streaming_forward logging.info("Using torch.jit.script") model = torch.jit.script(model) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/jit_pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/jit_pretrained.py index 4fd5e1820..c8301b2da 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/jit_pretrained.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/jit_pretrained.py @@ -252,7 +252,7 @@ def main(): feature_lengths = torch.tensor(feature_lengths, device=device) - encoder_out, encoder_out_lens = model.encoder( + encoder_out, encoder_out_lens = model.encoder.non_streaming_forward( x=features, x_lens=feature_lengths, ) From 130ad0319d93657690687f1e292cc7658ff7e779 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 5 Jul 2023 10:38:29 +0800 Subject: [PATCH 13/30] Fix CI test for zipformer CTC (#1165) --- egs/librispeech/ASR/zipformer/jit_pretrained_ctc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/zipformer/jit_pretrained_ctc.py b/egs/librispeech/ASR/zipformer/jit_pretrained_ctc.py index 14faeedd1..904d8cd76 100755 --- a/egs/librispeech/ASR/zipformer/jit_pretrained_ctc.py +++ b/egs/librispeech/ASR/zipformer/jit_pretrained_ctc.py @@ -264,7 +264,7 @@ def main(): params.update(vars(args)) token_table = k2.SymbolTable.from_file(params.tokens) - params.vocab_size = num_tokens(token_table) + params.vocab_size = num_tokens(token_table) + 1 logging.info(f"{params}") From 6fd674312c1d87bd9fc888d623cb3e347ac019ff Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 5 Jul 2023 10:52:34 +0800 Subject: [PATCH 14/30] Fix failed CI tests (#1166) --- .github/workflows/run-aishell-2022-06-20.yml | 4 ++-- .github/workflows/run-gigaspeech-2022-05-13.yml | 4 ++-- .github/workflows/run-librispeech-2022-03-12.yml | 4 ++-- .github/workflows/run-librispeech-2022-04-29.yml | 6 +++--- .github/workflows/run-librispeech-2022-05-13.yml | 4 ++-- .github/workflows/run-librispeech-2022-11-11-stateless7.yml | 2 +- .github/workflows/run-librispeech-2022-11-14-stateless8.yml | 2 +- .../workflows/run-librispeech-2022-12-01-stateless7-ctc.yml | 2 +- .../workflows/run-librispeech-2022-12-08-zipformer-mmi.yml | 2 +- .../run-librispeech-2022-12-15-stateless7-ctc-bs.yml | 2 +- .../run-librispeech-2022-12-29-stateless7-streaming.yml | 2 +- .../workflows/run-librispeech-conformer-ctc3-2022-11-28.yml | 2 +- ...un-librispeech-lstm-transducer-stateless2-2022-09-03.yml | 4 ++-- ...-librispeech-pruned-transducer-stateless3-2022-05-13.yml | 4 ++-- ...brispeech-streaming-transducer-stateless2-2022-06-26.yml | 4 ++-- .../run-librispeech-streaming-zipformer-2023-05-18.yml | 2 +- .../run-librispeech-transducer-stateless2-2022-04-19.yml | 4 ++-- .github/workflows/run-librispeech-zipformer-2023-05-18.yml | 2 +- .../workflows/run-librispeech-zipformer-ctc-2023-06-14.yml | 2 +- .github/workflows/run-pretrained-conformer-ctc.yml | 2 +- ...run-pretrained-transducer-stateless-librispeech-100h.yml | 4 ++-- ...ined-transducer-stateless-librispeech-multi-datasets.yml | 4 ++-- ...n-pretrained-transducer-stateless-modified-2-aishell.yml | 2 +- ...run-pretrained-transducer-stateless-modified-aishell.yml | 2 +- .github/workflows/run-pretrained-transducer-stateless.yml | 4 ++-- .github/workflows/run-pretrained-transducer.yml | 2 +- .../run-wenetspeech-pruned-transducer-stateless2.yml | 2 +- .github/workflows/run-yesno-recipe.yml | 2 +- 28 files changed, 41 insertions(+), 41 deletions(-) diff --git a/.github/workflows/run-aishell-2022-06-20.yml b/.github/workflows/run-aishell-2022-06-20.yml index c46cea0f6..d14196f38 100644 --- a/.github/workflows/run-aishell-2022-06-20.yml +++ b/.github/workflows/run-aishell-2022-06-20.yml @@ -44,7 +44,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -119,5 +119,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: aishell-torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless3-2022-06-20 + name: aishell-torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-06-20 path: egs/aishell/ASR/pruned_transducer_stateless3/exp/ diff --git a/.github/workflows/run-gigaspeech-2022-05-13.yml b/.github/workflows/run-gigaspeech-2022-05-13.yml index f8ee25cc4..0e47f7538 100644 --- a/.github/workflows/run-gigaspeech-2022-05-13.yml +++ b/.github/workflows/run-gigaspeech-2022-05-13.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -122,5 +122,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-gigaspeech-pruned_transducer_stateless2-2022-05-12 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-gigaspeech-pruned_transducer_stateless2-2022-05-12 path: egs/gigaspeech/ASR/pruned_transducer_stateless2/exp/ diff --git a/.github/workflows/run-librispeech-2022-03-12.yml b/.github/workflows/run-librispeech-2022-03-12.yml index d42202b79..3edbe43ec 100644 --- a/.github/workflows/run-librispeech-2022-03-12.yml +++ b/.github/workflows/run-librispeech-2022-03-12.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless-2022-03-12 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless-2022-03-12 path: egs/librispeech/ASR/pruned_transducer_stateless/exp/ diff --git a/.github/workflows/run-librispeech-2022-04-29.yml b/.github/workflows/run-librispeech-2022-04-29.yml index f42c8f27a..bb44a073b 100644 --- a/.github/workflows/run-librispeech-2022-04-29.yml +++ b/.github/workflows/run-librispeech-2022-04-29.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -174,12 +174,12 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless2-2022-04-29 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless2-2022-04-29 path: egs/librispeech/ASR/pruned_transducer_stateless2/exp/ - name: Upload decoding results for pruned_transducer_stateless3 uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless3-2022-04-29 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-04-29 path: egs/librispeech/ASR/pruned_transducer_stateless3/exp/ diff --git a/.github/workflows/run-librispeech-2022-05-13.yml b/.github/workflows/run-librispeech-2022-05-13.yml index 1fbd96157..e7b53b21c 100644 --- a/.github/workflows/run-librispeech-2022-05-13.yml +++ b/.github/workflows/run-librispeech-2022-05-13.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless5-2022-05-13 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless5-2022-05-13 path: egs/librispeech/ASR/pruned_transducer_stateless5/exp/ diff --git a/.github/workflows/run-librispeech-2022-11-11-stateless7.yml b/.github/workflows/run-librispeech-2022-11-11-stateless7.yml index 596596bd9..7e378c9a1 100644 --- a/.github/workflows/run-librispeech-2022-11-11-stateless7.yml +++ b/.github/workflows/run-librispeech-2022-11-11-stateless7.yml @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless7-2022-11-11 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-2022-11-11 path: egs/librispeech/ASR/pruned_transducer_stateless7/exp/ diff --git a/.github/workflows/run-librispeech-2022-11-14-stateless8.yml b/.github/workflows/run-librispeech-2022-11-14-stateless8.yml index dca7d6d25..a2c1a0ad6 100644 --- a/.github/workflows/run-librispeech-2022-11-14-stateless8.yml +++ b/.github/workflows/run-librispeech-2022-11-14-stateless8.yml @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless8-2022-11-14 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless8-2022-11-14 path: egs/librispeech/ASR/pruned_transducer_stateless8/exp/ diff --git a/.github/workflows/run-librispeech-2022-12-01-stateless7-ctc.yml b/.github/workflows/run-librispeech-2022-12-01-stateless7-ctc.yml index cd41e988e..500ab1736 100644 --- a/.github/workflows/run-librispeech-2022-12-01-stateless7-ctc.yml +++ b/.github/workflows/run-librispeech-2022-12-01-stateless7-ctc.yml @@ -159,5 +159,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless7-ctc-2022-12-01 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-2022-12-01 path: egs/librispeech/ASR/pruned_transducer_stateless7_ctc/exp/ diff --git a/.github/workflows/run-librispeech-2022-12-08-zipformer-mmi.yml b/.github/workflows/run-librispeech-2022-12-08-zipformer-mmi.yml index 91242c401..1a7f9f594 100644 --- a/.github/workflows/run-librispeech-2022-12-08-zipformer-mmi.yml +++ b/.github/workflows/run-librispeech-2022-12-08-zipformer-mmi.yml @@ -163,5 +163,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-zipformer_mmi-2022-12-08 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer_mmi-2022-12-08 path: egs/librispeech/ASR/zipformer_mmi/exp/ diff --git a/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml b/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml index e0130a636..40a742988 100644 --- a/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml +++ b/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml @@ -159,5 +159,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless7-ctc-bs-2022-12-15 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-bs-2022-12-15 path: egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/exp/ diff --git a/.github/workflows/run-librispeech-2022-12-29-stateless7-streaming.yml b/.github/workflows/run-librispeech-2022-12-29-stateless7-streaming.yml index 8490a62fc..68014e20c 100644 --- a/.github/workflows/run-librispeech-2022-12-29-stateless7-streaming.yml +++ b/.github/workflows/run-librispeech-2022-12-29-stateless7-streaming.yml @@ -168,5 +168,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless7-streaming-2022-12-29 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-streaming-2022-12-29 path: egs/librispeech/ASR/pruned_transducer_stateless7_streaming/exp/ diff --git a/.github/workflows/run-librispeech-conformer-ctc3-2022-11-28.yml b/.github/workflows/run-librispeech-conformer-ctc3-2022-11-28.yml index 40a37da57..905515dc4 100644 --- a/.github/workflows/run-librispeech-conformer-ctc3-2022-11-28.yml +++ b/.github/workflows/run-librispeech-conformer-ctc3-2022-11-28.yml @@ -151,5 +151,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-conformer_ctc3-2022-11-28 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-conformer_ctc3-2022-11-28 path: egs/librispeech/ASR/conformer_ctc3/exp/ diff --git a/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml b/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml index aba29d066..501fae38c 100644 --- a/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml +++ b/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml @@ -26,7 +26,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.8] fail-fast: false @@ -159,5 +159,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'shallow-fusion' || github.event.label.name == 'LODR' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-lstm_transducer_stateless2-2022-09-03 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-lstm_transducer_stateless2-2022-09-03 path: egs/librispeech/ASR/lstm_transducer_stateless2/exp/ diff --git a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml index fd497601d..bf73d4f18 100644 --- a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml +++ b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -153,5 +153,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless3-2022-04-29 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-04-29 path: egs/librispeech/ASR/pruned_transducer_stateless3/exp/ diff --git a/.github/workflows/run-librispeech-streaming-transducer-stateless2-2022-06-26.yml b/.github/workflows/run-librispeech-streaming-transducer-stateless2-2022-06-26.yml index 57fe5b999..6ea308468 100644 --- a/.github/workflows/run-librispeech-streaming-transducer-stateless2-2022-06-26.yml +++ b/.github/workflows/run-librispeech-streaming-transducer-stateless2-2022-06-26.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-pruned_transducer_stateless2-2022-06-26 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless2-2022-06-26 path: egs/librispeech/ASR/pruned_transducer_stateless2/exp/ diff --git a/.github/workflows/run-librispeech-streaming-zipformer-2023-05-18.yml b/.github/workflows/run-librispeech-streaming-zipformer-2023-05-18.yml index ed934d56d..5145fb43c 100644 --- a/.github/workflows/run-librispeech-streaming-zipformer-2023-05-18.yml +++ b/.github/workflows/run-librispeech-streaming-zipformer-2023-05-18.yml @@ -170,5 +170,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-zipformer-2022-11-11 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11 path: egs/librispeech/ASR/zipformer/exp/ diff --git a/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml b/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml index 515122a66..9fe2f0389 100644 --- a/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml +++ b/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml @@ -43,7 +43,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-transducer_stateless2-2022-04-19 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless2-2022-04-19 path: egs/librispeech/ASR/transducer_stateless2/exp/ diff --git a/.github/workflows/run-librispeech-zipformer-2023-05-18.yml b/.github/workflows/run-librispeech-zipformer-2023-05-18.yml index 7ecf0d2a0..e9d235ad1 100644 --- a/.github/workflows/run-librispeech-zipformer-2023-05-18.yml +++ b/.github/workflows/run-librispeech-zipformer-2023-05-18.yml @@ -155,5 +155,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-zipformer-2022-11-11 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11 path: egs/librispeech/ASR/zipformer/exp/ diff --git a/.github/workflows/run-librispeech-zipformer-ctc-2023-06-14.yml b/.github/workflows/run-librispeech-zipformer-ctc-2023-06-14.yml index 569ce48fc..48f0b1532 100644 --- a/.github/workflows/run-librispeech-zipformer-ctc-2023-06-14.yml +++ b/.github/workflows/run-librispeech-zipformer-ctc-2023-06-14.yml @@ -151,5 +151,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-zipformer-2022-11-11 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11 path: egs/librispeech/ASR/zipformer/exp/ diff --git a/.github/workflows/run-pretrained-conformer-ctc.yml b/.github/workflows/run-pretrained-conformer-ctc.yml index 8aaea35f6..bcd326b9d 100644 --- a/.github/workflows/run-pretrained-conformer-ctc.yml +++ b/.github/workflows/run-pretrained-conformer-ctc.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml index 03a1df48e..1e5b25f5c 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml @@ -42,7 +42,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -154,5 +154,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-transducer_stateless_multi_datasets-100h-2022-02-21 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless_multi_datasets-100h-2022-02-21 path: egs/librispeech/ASR/transducer_stateless_multi_datasets/exp/ diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml index 8da4ff56a..9063c0ed6 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml @@ -42,7 +42,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -154,5 +154,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-transducer_stateless_multi_datasets-100h-2022-03-01 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless_multi_datasets-100h-2022-03-01 path: egs/librispeech/ASR/transducer_stateless_multi_datasets/exp/ diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml index 0b3e70d77..2d24528d3 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml index a6a59d339..761b26131 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false diff --git a/.github/workflows/run-pretrained-transducer-stateless.yml b/.github/workflows/run-pretrained-transducer-stateless.yml index 98d84bf96..e46b9a849 100644 --- a/.github/workflows/run-pretrained-transducer-stateless.yml +++ b/.github/workflows/run-pretrained-transducer-stateless.yml @@ -42,7 +42,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false @@ -154,5 +154,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-transducer_stateless-2022-02-07 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless-2022-02-07 path: egs/librispeech/ASR/transducer_stateless/exp/ diff --git a/.github/workflows/run-pretrained-transducer.yml b/.github/workflows/run-pretrained-transducer.yml index 8c1a652e0..190e446bc 100644 --- a/.github/workflows/run-pretrained-transducer.yml +++ b/.github/workflows/run-pretrained-transducer.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.7, 3.8, 3.9] fail-fast: false diff --git a/.github/workflows/run-wenetspeech-pruned-transducer-stateless2.yml b/.github/workflows/run-wenetspeech-pruned-transducer-stateless2.yml index 6c70c646b..319a5558a 100644 --- a/.github/workflows/run-wenetspeech-pruned-transducer-stateless2.yml +++ b/.github/workflows/run-wenetspeech-pruned-transducer-stateless2.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-latest] python-version: [3.8] fail-fast: false diff --git a/.github/workflows/run-yesno-recipe.yml b/.github/workflows/run-yesno-recipe.yml index f997e634a..8a2c94829 100644 --- a/.github/workflows/run-yesno-recipe.yml +++ b/.github/workflows/run-yesno-recipe.yml @@ -33,7 +33,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # os: [ubuntu-18.04, macos-10.15] + # os: [ubuntu-latest, macos-10.15] # TODO: enable macOS for CPU testing os: [ubuntu-latest] python-version: [3.8] From 11523c5b894f42ded965dcb974fef9a8a8122518 Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Thu, 6 Jul 2023 19:11:01 +0800 Subject: [PATCH 15/30] Shallow fusion & LODR documentation (#1142) * add shallow fusion documentation * add documentation for LODR * upload docs for LM rescoring --- docs/source/conf.py | 1 + .../decoding-with-langugage-models/LODR.rst | 184 +++++++++++++ .../decoding-with-langugage-models/index.rst | 12 + .../rescoring.rst | 252 ++++++++++++++++++ .../shallow-fusion.rst | 176 ++++++++++++ docs/source/index.rst | 5 + .../librispeech/distillation.rst | 8 +- .../pruned_transducer_stateless.rst | 18 +- .../recipes/Streaming-ASR/introduction.rst | 4 +- .../pruned_transducer_stateless.rst | 10 +- .../librispeech/zipformer_transducer.rst | 4 +- 11 files changed, 652 insertions(+), 22 deletions(-) create mode 100644 docs/source/decoding-with-langugage-models/LODR.rst create mode 100644 docs/source/decoding-with-langugage-models/index.rst create mode 100644 docs/source/decoding-with-langugage-models/rescoring.rst create mode 100644 docs/source/decoding-with-langugage-models/shallow-fusion.rst diff --git a/docs/source/conf.py b/docs/source/conf.py index 6901dec02..0ff3f801c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -86,6 +86,7 @@ rst_epilog = """ .. _git-lfs: https://git-lfs.com/ .. _ncnn: https://github.com/tencent/ncnn .. _LibriSpeech: https://www.openslr.org/12 +.. _Gigaspeech: https://github.com/SpeechColab/GigaSpeech .. _musan: http://www.openslr.org/17/ .. _ONNX: https://github.com/onnx/onnx .. _onnxruntime: https://github.com/microsoft/onnxruntime diff --git a/docs/source/decoding-with-langugage-models/LODR.rst b/docs/source/decoding-with-langugage-models/LODR.rst new file mode 100644 index 000000000..7ffa0c128 --- /dev/null +++ b/docs/source/decoding-with-langugage-models/LODR.rst @@ -0,0 +1,184 @@ +.. _LODR: + +LODR for RNN Transducer +======================= + + +As a type of E2E model, neural transducers are usually considered as having an internal +language model, which learns the language level information on the training corpus. +In real-life scenario, there is often a mismatch between the training corpus and the target corpus space. +This mismatch can be a problem when decoding for neural transducer models with language models as its internal +language can act "against" the external LM. In this tutorial, we show how to use +`Low-order Density Ratio `_ to alleviate this effect to further improve the performance +of langugae model integration. + +.. note:: + + This tutorial is based on the recipe + `pruned_transducer_stateless7_streaming `_, + which is a streaming transducer model trained on `LibriSpeech`_. + However, you can easily apply LODR to other recipes. + If you encounter any problems, please open an issue here `icefall `__. + + +.. note:: + + For simplicity, the training and testing corpus in this tutorial are the same (`LibriSpeech`_). However, + you can change the testing set to any other domains (e.g `GigaSpeech`_) and prepare the language models + using that corpus. + +First, let's have a look at some background information. As the predecessor of LODR, Density Ratio (DR) is first proposed `here `_ +to address the language information mismatch between the training +corpus (source domain) and the testing corpus (target domain). Assuming that the source domain and the test domain +are acoustically similar, DR derives the following formular for decoding with Bayes' theorem: + +.. math:: + + \text{score}\left(y_u|\mathit{x},y\right) = + \log p\left(y_u|\mathit{x},y_{1:u-1}\right) + + \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - + \lambda_2 \log p_{\text{Source LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) + + +where :math:`\lambda_1` and :math:`\lambda_2` are the weights of LM scores for target domain and source domain respectively. +Here, the source domain LM is trained on the training corpus. The only difference in the above formular compared to +shallow fusion is the subtraction of the source domain LM. + +Some works treat the predictor and the joiner of the neural transducer as its internal LM. However, the LM is +considered to be weak and can only capture low-level language information. Therefore, `LODR `__ proposed to use +a low-order n-gram LM as an approximation of the ILM of the neural transducer. This leads to the following formula +during decoding for transducer model: + +.. math:: + + \text{score}\left(y_u|\mathit{x},y\right) = + \log p_{rnnt}\left(y_u|\mathit{x},y_{1:u-1}\right) + + \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - + \lambda_2 \log p_{\text{bi-gram}}\left(y_u|\mathit{x},y_{1:u-1}\right) + +In LODR, an additional bi-gram LM estimated on the source domain (e.g training corpus) is required. Comared to DR, +the only difference lies in the choice of source domain LM. According to the original `paper `_, +LODR achieves similar performance compared DR in both intra-domain and cross-domain settings. +As a bi-gram is much faster to evaluate, LODR is usually much faster. + +Now, we will show you how to use LODR in ``icefall``. +For illustration purpose, we will use a pre-trained ASR model from this `link `_. +If you want to train your model from scratch, please have a look at :ref:`non_streaming_librispeech_pruned_transducer_stateless`. +The testing scenario here is intra-domain (we decode the model trained on `LibriSpeech`_ on `LibriSpeech`_ testing sets). + +As the initial step, let's download the pre-trained model. + +.. code-block:: bash + + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29 + $ pushd icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt # create a symbolic link so that the checkpoint can be loaded + +To test the model, let's have a look at the decoding results **without** using LM. This can be done via the following command: + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp/ + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --exp-dir $exp_dir \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search + +The following WERs are achieved on test-clean and test-other: + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 3.11 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.93 best for test-other + +Then, we download the external language model and bi-gram LM that are necessary for LODR. +Note that the bi-gram is estimated on the LibriSpeech 960 hours' text. + +.. code-block:: bash + + $ # download the external LM + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ezerhouni/icefall-librispeech-rnn-lm + $ # create a symbolic link so that the checkpoint can be loaded + $ pushd icefall-librispeech-rnn-lm/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt + $ popd + $ + $ # download the bi-gram + $ git lfs install + $ git clone https://huggingface.co/marcoyang/librispeech_bigram + $ pushd data/lang_bpe_500 + $ ln -s ../../librispeech_bigram/2gram.fst.txt . + $ popd + +Then, we perform LODR decoding by setting ``--decoding-method`` to ``modified_beam_search_lm_LODR``: + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ lm_dir=./icefall-librispeech-rnn-lm/exp + $ lm_scale=0.42 + $ LODR_scale=-0.24 + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --beam-size 4 \ + --exp-dir $exp_dir \ + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search_lm_LODR \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --use-shallow-fusion 1 \ + --lm-type rnn \ + --lm-exp-dir $lm_dir \ + --lm-epoch 99 \ + --lm-scale $lm_scale \ + --lm-avg 1 \ + --rnn-lm-embedding-dim 2048 \ + --rnn-lm-hidden-dim 2048 \ + --rnn-lm-num-layers 3 \ + --lm-vocab-size 500 \ + --tokens-ngram 2 \ + --ngram-lm-scale $LODR_scale + +There are two extra arguments that need to be given when doing LODR. ``--tokens-ngram`` specifies the order of n-gram. As we +are using a bi-gram, we set it to 2. ``--ngram-lm-scale`` is the scale of the bi-gram, it should be a negative number +as we are subtracting the bi-gram's score during decoding. + +The decoding results obtained with the above command are shown below: + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 2.61 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 6.74 best for test-other + +Recall that the lowest WER we obtained in :ref:`shallow_fusion` with beam size of 4 is ``2.77/7.08``, LODR +indeed **further improves** the WER. We can do even better if we increase ``--beam-size``: + +.. list-table:: WER of LODR with different beam sizes + :widths: 25 25 50 + :header-rows: 1 + + * - Beam size + - test-clean + - test-other + * - 4 + - 2.61 + - 6.74 + * - 8 + - 2.45 + - 6.38 + * - 12 + - 2.4 + - 6.23 \ No newline at end of file diff --git a/docs/source/decoding-with-langugage-models/index.rst b/docs/source/decoding-with-langugage-models/index.rst new file mode 100644 index 000000000..577ebbdfb --- /dev/null +++ b/docs/source/decoding-with-langugage-models/index.rst @@ -0,0 +1,12 @@ +Decoding with language models +============================= + +This section describes how to use external langugage models +during decoding to improve the WER of transducer models. + +.. toctree:: + :maxdepth: 2 + + shallow-fusion + LODR + rescoring diff --git a/docs/source/decoding-with-langugage-models/rescoring.rst b/docs/source/decoding-with-langugage-models/rescoring.rst new file mode 100644 index 000000000..d71acc1e5 --- /dev/null +++ b/docs/source/decoding-with-langugage-models/rescoring.rst @@ -0,0 +1,252 @@ +.. _rescoring: + +LM rescoring for Transducer +================================= + +LM rescoring is a commonly used approach to incorporate external LM information. Unlike shallow-fusion-based +methods (see :ref:`shallow-fusion`, :ref:`LODR`), rescoring is usually performed to re-rank the n-best hypotheses after beam search. +Rescoring is usually more efficient than shallow fusion since less computation is performed on the external LM. +In this tutorial, we will show you how to use external LM to rescore the n-best hypotheses decoded from neural transducer models in +`icefall `__. + +.. note:: + + This tutorial is based on the recipe + `pruned_transducer_stateless7_streaming `_, + which is a streaming transducer model trained on `LibriSpeech`_. + However, you can easily apply shallow fusion to other recipes. + If you encounter any problems, please open an issue `here `_. + +.. note:: + + For simplicity, the training and testing corpus in this tutorial is the same (`LibriSpeech`_). However, you can change the testing set + to any other domains (e.g `GigaSpeech`_) and use an external LM trained on that domain. + +.. HINT:: + + We recommend you to use a GPU for decoding. + +For illustration purpose, we will use a pre-trained ASR model from this `link `__. +If you want to train your model from scratch, please have a look at :ref:`non_streaming_librispeech_pruned_transducer_stateless`. + +As the initial step, let's download the pre-trained model. + +.. code-block:: bash + + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29 + $ pushd icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt # create a symbolic link so that the checkpoint can be loaded + +As usual, we first test the model's performance without external LM. This can be done via the following command: + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp/ + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --exp-dir $exp_dir \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search + +The following WERs are achieved on test-clean and test-other: + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 3.11 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.93 best for test-other + +Now, we will try to improve the above WER numbers via external LM rescoring. We will download +a pre-trained LM from this `link `__. + +.. note:: + + This is an RNN LM trained on the LibriSpeech text corpus. So it might not be ideal for other corpus. + You may also train a RNN LM from scratch. Please refer to this `script `__ + for training a RNN LM and this `script `__ to train a transformer LM. + +.. code-block:: bash + + $ # download the external LM + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ezerhouni/icefall-librispeech-rnn-lm + $ # create a symbolic link so that the checkpoint can be loaded + $ pushd icefall-librispeech-rnn-lm/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt + $ popd + + +With the RNNLM available, we can rescore the n-best hypotheses generated from `modified_beam_search`. Here, +`n` should be the number of beams, i.e ``--beam-size``. The command for LM rescoring is +as follows. Note that the ``--decoding-method`` is set to `modified_beam_search_lm_rescore` and ``--use-shallow-fusion`` +is set to `False`. + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ lm_dir=./icefall-librispeech-rnn-lm/exp + $ lm_scale=0.43 + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --beam-size 4 \ + --exp-dir $exp_dir \ + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search_lm_rescore \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --use-shallow-fusion 0 \ + --lm-type rnn \ + --lm-exp-dir $lm_dir \ + --lm-epoch 99 \ + --lm-scale $lm_scale \ + --lm-avg 1 \ + --rnn-lm-embedding-dim 2048 \ + --rnn-lm-hidden-dim 2048 \ + --rnn-lm-num-layers 3 \ + --lm-vocab-size 500 + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 2.93 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.6 best for test-other + +Great! We made some improvements! Increasing the size of the n-best hypotheses will further boost the performance, +see the following table: + +.. list-table:: WERs of LM rescoring with different beam sizes + :widths: 25 25 25 + :header-rows: 1 + + * - Beam size + - test-clean + - test-other + * - 4 + - 2.93 + - 7.6 + * - 8 + - 2.67 + - 7.11 + * - 12 + - 2.59 + - 6.86 + +In fact, we can also apply LODR (see :ref:`LODR`) when doing LM rescoring. To do so, we need to +download the bi-gram required by LODR: + +.. code-block:: bash + + $ # download the bi-gram + $ git lfs install + $ git clone https://huggingface.co/marcoyang/librispeech_bigram + $ pushd data/lang_bpe_500 + $ ln -s ../../librispeech_bigram/2gram.arpa . + $ popd + +Then we can performn LM rescoring + LODR by changing the decoding method to `modified_beam_search_lm_rescore_LODR`. + +.. note:: + + This decoding method requires the dependency of `kenlm `_. You can install it + via this command: `pip install https://github.com/kpu/kenlm/archive/master.zip`. + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ lm_dir=./icefall-librispeech-rnn-lm/exp + $ lm_scale=0.43 + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --beam-size 4 \ + --exp-dir $exp_dir \ + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search_lm_rescore_LODR \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --use-shallow-fusion 0 \ + --lm-type rnn \ + --lm-exp-dir $lm_dir \ + --lm-epoch 99 \ + --lm-scale $lm_scale \ + --lm-avg 1 \ + --rnn-lm-embedding-dim 2048 \ + --rnn-lm-hidden-dim 2048 \ + --rnn-lm-num-layers 3 \ + --lm-vocab-size 500 + +You should see the following WERs after executing the commands above: + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 2.9 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.57 best for test-other + +It's slightly better than LM rescoring. If we further increase the beam size, we will see +further improvements from LM rescoring + LODR: + +.. list-table:: WERs of LM rescoring + LODR with different beam sizes + :widths: 25 25 25 + :header-rows: 1 + + * - Beam size + - test-clean + - test-other + * - 4 + - 2.9 + - 7.57 + * - 8 + - 2.63 + - 7.04 + * - 12 + - 2.52 + - 6.73 + +As mentioned earlier, LM rescoring is usually faster than shallow-fusion based methods. +Here, we benchmark the WERs and decoding speed of them: + +.. list-table:: LM-rescoring-based methods vs shallow-fusion-based methods (The numbers in each field is WER on test-clean, WER on test-other and decoding time on test-clean) + :widths: 25 25 25 25 + :header-rows: 1 + + * - Decoding method + - beam=4 + - beam=8 + - beam=12 + * - `modified_beam_search` + - 3.11/7.93; 132s + - 3.1/7.95; 177s + - 3.1/7.96; 210s + * - `modified_beam_search_lm_shallow_fusion` + - 2.77/7.08; 262s + - 2.62/6.65; 352s + - 2.58/6.65; 488s + * - LODR + - 2.61/6.74; 400s + - 2.45/6.38; 610s + - 2.4/6.23; 870s + * - `modified_beam_search_lm_rescore` + - 2.93/7.6; 156s + - 2.67/7.11; 203s + - 2.59/6.86; 255s + * - `modified_beam_search_lm_rescore_LODR` + - 2.9/7.57; 160s + - 2.63/7.04; 203s + - 2.52/6.73; 263s + +.. note:: + + Decoding is performed with a single 32G V100, we set ``--max-duration`` to 600. + Decoding time here is only for reference and it may vary. \ No newline at end of file diff --git a/docs/source/decoding-with-langugage-models/shallow-fusion.rst b/docs/source/decoding-with-langugage-models/shallow-fusion.rst new file mode 100644 index 000000000..0d2837372 --- /dev/null +++ b/docs/source/decoding-with-langugage-models/shallow-fusion.rst @@ -0,0 +1,176 @@ +.. _shallow_fusion: + +Shallow fusion for Transducer +================================= + +External language models (LM) are commonly used to improve WERs for E2E ASR models. +This tutorial shows you how to perform ``shallow fusion`` with an external LM +to improve the word-error-rate of a transducer model. + +.. note:: + + This tutorial is based on the recipe + `pruned_transducer_stateless7_streaming `_, + which is a streaming transducer model trained on `LibriSpeech`_. + However, you can easily apply shallow fusion to other recipes. + If you encounter any problems, please open an issue here `icefall `_. + +.. note:: + + For simplicity, the training and testing corpus in this tutorial is the same (`LibriSpeech`_). However, you can change the testing set + to any other domains (e.g `GigaSpeech`_) and use an external LM trained on that domain. + +.. HINT:: + + We recommend you to use a GPU for decoding. + +For illustration purpose, we will use a pre-trained ASR model from this `link `__. +If you want to train your model from scratch, please have a look at :ref:`non_streaming_librispeech_pruned_transducer_stateless`. + +As the initial step, let's download the pre-trained model. + +.. code-block:: bash + + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29 + $ pushd icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt # create a symbolic link so that the checkpoint can be loaded + +To test the model, let's have a look at the decoding results without using LM. This can be done via the following command: + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp/ + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --exp-dir $exp_dir \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search + +The following WERs are achieved on test-clean and test-other: + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 3.11 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.93 best for test-other + +These are already good numbers! But we can further improve it by using shallow fusion with external LM. +Training a language model usually takes a long time, we can download a pre-trained LM from this `link `__. + +.. code-block:: bash + + $ # download the external LM + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ezerhouni/icefall-librispeech-rnn-lm + $ # create a symbolic link so that the checkpoint can be loaded + $ pushd icefall-librispeech-rnn-lm/exp + $ git lfs pull --include "pretrained.pt" + $ ln -s pretrained.pt epoch-99.pt + $ popd + +.. note:: + + This is an RNN LM trained on the LibriSpeech text corpus. So it might not be ideal for other corpus. + You may also train a RNN LM from scratch. Please refer to this `script `__ + for training a RNN LM and this `script `__ to train a transformer LM. + +To use shallow fusion for decoding, we can execute the following command: + +.. code-block:: bash + + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp + $ lm_dir=./icefall-librispeech-rnn-lm/exp + $ lm_scale=0.29 + $ ./pruned_transducer_stateless7_streaming/decode.py \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model False \ + --beam-size 4 \ + --exp-dir $exp_dir \ + --max-duration 600 \ + --decode-chunk-len 32 \ + --decoding-method modified_beam_search_lm_shallow_fusion \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --use-shallow-fusion 1 \ + --lm-type rnn \ + --lm-exp-dir $lm_dir \ + --lm-epoch 99 \ + --lm-scale $lm_scale \ + --lm-avg 1 \ + --rnn-lm-embedding-dim 2048 \ + --rnn-lm-hidden-dim 2048 \ + --rnn-lm-num-layers 3 \ + --lm-vocab-size 500 + +Note that we set ``--decoding-method modified_beam_search_lm_shallow_fusion`` and ``--use-shallow-fusion True`` +to use shallow fusion. ``--lm-type`` specifies the type of neural LM we are going to use, you can either choose +between ``rnn`` or ``transformer``. The following three arguments are associated with the rnn: + +- ``--rnn-lm-embedding-dim`` + The embedding dimension of the RNN LM + +- ``--rnn-lm-hidden-dim`` + The hidden dimension of the RNN LM + +- ``--rnn-lm-num-layers`` + The number of RNN layers in the RNN LM. + + +The decoding result obtained with the above command are shown below. + +.. code-block:: text + + $ For test-clean, WER of different settings are: + $ beam_size_4 2.77 best for test-clean + $ For test-other, WER of different settings are: + $ beam_size_4 7.08 best for test-other + +The improvement of shallow fusion is very obvious! The relative WER reduction on test-other is around 10.5%. +A few parameters can be tuned to further boost the performance of shallow fusion: + +- ``--lm-scale`` + + Controls the scale of the LM. If too small, the external language model may not be fully utilized; if too large, + the LM score may dominant during decoding, leading to bad WER. A typical value of this is around 0.3. + +- ``--beam-size`` + + The number of active paths in the search beam. It controls the trade-off between decoding efficiency and accuracy. + +Here, we also show how `--beam-size` effect the WER and decoding time: + +.. list-table:: WERs and decoding time (on test-clean) of shallow fusion with different beam sizes + :widths: 25 25 25 25 + :header-rows: 1 + + * - Beam size + - test-clean + - test-other + - Decoding time on test-clean (s) + * - 4 + - 2.77 + - 7.08 + - 262 + * - 8 + - 2.62 + - 6.65 + - 352 + * - 12 + - 2.58 + - 6.65 + - 488 + +As we see, a larger beam size during shallow fusion improves the WER, but is also slower. + + + + + + + + diff --git a/docs/source/index.rst b/docs/source/index.rst index 8d76eb68b..a7d365a15 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -34,3 +34,8 @@ speech recognition recipes using `k2 `_. contributing/index huggingface/index + +.. toctree:: + :maxdepth: 2 + + decoding-with-langugage-models/index \ No newline at end of file diff --git a/docs/source/recipes/Non-streaming-ASR/librispeech/distillation.rst b/docs/source/recipes/Non-streaming-ASR/librispeech/distillation.rst index ea9f350cd..2e8d0893a 100644 --- a/docs/source/recipes/Non-streaming-ASR/librispeech/distillation.rst +++ b/docs/source/recipes/Non-streaming-ASR/librispeech/distillation.rst @@ -1,7 +1,7 @@ Distillation with HuBERT ======================== -This tutorial shows you how to perform knowledge distillation in `icefall`_ +This tutorial shows you how to perform knowledge distillation in `icefall `_ with the `LibriSpeech`_ dataset. The distillation method used here is called "Multi Vector Quantization Knowledge Distillation" (MVQ-KD). Please have a look at our paper `Predicting Multi-Codebook Vector Quantization Indexes for Knowledge Distillation `_ @@ -13,7 +13,7 @@ for more details about MVQ-KD. `pruned_transducer_stateless4 `_. Currently, we only implement MVQ-KD in this recipe. However, MVQ-KD is theoretically applicable to all recipes with only minor changes needed. Feel free to try out MVQ-KD in different recipes. If you - encounter any problems, please open an issue here `icefall `_. + encounter any problems, please open an issue here `icefall `__. .. note:: @@ -217,7 +217,7 @@ the following command. --exp-dir $exp_dir \ --enable-distillation True -You should get similar results as `here `_. +You should get similar results as `here `__. That's all! Feel free to experiment with your own setups and report your results. -If you encounter any problems during training, please open up an issue `here `_. +If you encounter any problems during training, please open up an issue `here `__. diff --git a/docs/source/recipes/Non-streaming-ASR/librispeech/pruned_transducer_stateless.rst b/docs/source/recipes/Non-streaming-ASR/librispeech/pruned_transducer_stateless.rst index 42fd3df77..1bc1dd984 100644 --- a/docs/source/recipes/Non-streaming-ASR/librispeech/pruned_transducer_stateless.rst +++ b/docs/source/recipes/Non-streaming-ASR/librispeech/pruned_transducer_stateless.rst @@ -8,10 +8,10 @@ with the `LibriSpeech `_ dataset. .. Note:: - The tutorial is suitable for `pruned_transducer_stateless `_, - `pruned_transducer_stateless2 `_, - `pruned_transducer_stateless4 `_, - `pruned_transducer_stateless5 `_, + The tutorial is suitable for `pruned_transducer_stateless `__, + `pruned_transducer_stateless2 `__, + `pruned_transducer_stateless4 `__, + `pruned_transducer_stateless5 `__, We will take pruned_transducer_stateless4 as an example in this tutorial. .. HINT:: @@ -237,7 +237,7 @@ them, please modify ``./pruned_transducer_stateless4/train.py`` directly. .. NOTE:: - The options for `pruned_transducer_stateless5 `_ are a little different from + The options for `pruned_transducer_stateless5 `__ are a little different from other recipes. It allows you to configure ``--num-encoder-layers``, ``--dim-feedforward``, ``--nhead``, ``--encoder-dim``, ``--decoder-dim``, ``--joiner-dim`` from commandline, so that you can train models with different size with pruned_transducer_stateless5. @@ -529,13 +529,13 @@ Download pretrained models If you don't want to train from scratch, you can download the pretrained models by visiting the following links: - - `pruned_transducer_stateless `_ + - `pruned_transducer_stateless `__ - - `pruned_transducer_stateless2 `_ + - `pruned_transducer_stateless2 `__ - - `pruned_transducer_stateless4 `_ + - `pruned_transducer_stateless4 `__ - - `pruned_transducer_stateless5 `_ + - `pruned_transducer_stateless5 `__ See ``_ for the details of the above pretrained models diff --git a/docs/source/recipes/Streaming-ASR/introduction.rst b/docs/source/recipes/Streaming-ASR/introduction.rst index e1382e77d..ac77a51d1 100644 --- a/docs/source/recipes/Streaming-ASR/introduction.rst +++ b/docs/source/recipes/Streaming-ASR/introduction.rst @@ -45,9 +45,9 @@ the input features. We have three variants of Emformer models in ``icefall``. - - ``pruned_stateless_emformer_rnnt2`` using Emformer from torchaudio, see `LibriSpeech recipe `_. + - ``pruned_stateless_emformer_rnnt2`` using Emformer from torchaudio, see `LibriSpeech recipe `__. - ``conv_emformer_transducer_stateless`` using ConvEmformer implemented by ourself. Different from the Emformer in torchaudio, ConvEmformer has a convolution in each layer and uses the mechanisms in our reworked conformer model. - See `LibriSpeech recipe `_. + See `LibriSpeech recipe `__. - ``conv_emformer_transducer_stateless2`` using ConvEmformer implemented by ourself. The only difference from the above one is that it uses a simplified memory bank. See `LibriSpeech recipe `_. diff --git a/docs/source/recipes/Streaming-ASR/librispeech/pruned_transducer_stateless.rst b/docs/source/recipes/Streaming-ASR/librispeech/pruned_transducer_stateless.rst index de7102ba8..2ca70bcf3 100644 --- a/docs/source/recipes/Streaming-ASR/librispeech/pruned_transducer_stateless.rst +++ b/docs/source/recipes/Streaming-ASR/librispeech/pruned_transducer_stateless.rst @@ -6,10 +6,10 @@ with the `LibriSpeech `_ dataset. .. Note:: - The tutorial is suitable for `pruned_transducer_stateless `_, - `pruned_transducer_stateless2 `_, - `pruned_transducer_stateless4 `_, - `pruned_transducer_stateless5 `_, + The tutorial is suitable for `pruned_transducer_stateless `__, + `pruned_transducer_stateless2 `__, + `pruned_transducer_stateless4 `__, + `pruned_transducer_stateless5 `__, We will take pruned_transducer_stateless4 as an example in this tutorial. .. HINT:: @@ -264,7 +264,7 @@ them, please modify ``./pruned_transducer_stateless4/train.py`` directly. .. NOTE:: - The options for `pruned_transducer_stateless5 `_ are a little different from + The options for `pruned_transducer_stateless5 `__ are a little different from other recipes. It allows you to configure ``--num-encoder-layers``, ``--dim-feedforward``, ``--nhead``, ``--encoder-dim``, ``--decoder-dim``, ``--joiner-dim`` from commandline, so that you can train models with different size with pruned_transducer_stateless5. diff --git a/docs/source/recipes/Streaming-ASR/librispeech/zipformer_transducer.rst b/docs/source/recipes/Streaming-ASR/librispeech/zipformer_transducer.rst index f0e8961d7..8b75473c6 100644 --- a/docs/source/recipes/Streaming-ASR/librispeech/zipformer_transducer.rst +++ b/docs/source/recipes/Streaming-ASR/librispeech/zipformer_transducer.rst @@ -6,7 +6,7 @@ with the `LibriSpeech `_ dataset. .. Note:: - The tutorial is suitable for `pruned_transducer_stateless7_streaming `_, + The tutorial is suitable for `pruned_transducer_stateless7_streaming `__, .. HINT:: @@ -642,7 +642,7 @@ Download pretrained models If you don't want to train from scratch, you can download the pretrained models by visiting the following links: - - `pruned_transducer_stateless7_streaming `_ + - `pruned_transducer_stateless7_streaming `__ See ``_ for the details of the above pretrained models From ffe816e2a8314318a4ef6d5eaba34b62b842ba3f Mon Sep 17 00:00:00 2001 From: Yifan Yang <64255737+yfyeung@users.noreply.github.com> Date: Thu, 6 Jul 2023 23:12:41 +0800 Subject: [PATCH 16/30] Fix blank skip ci test (#1167) * Fix for ci * Fix frame_reducer --- ...ned-transducer-stateless7-ctc-bs-2023-01-29.sh} | 2 +- ...n-librispeech-2023-01-29-stateless7-ctc-bs.yml} | 8 ++++---- .../frame_reducer.py | 14 +++++++------- 3 files changed, 12 insertions(+), 12 deletions(-) rename .github/scripts/{run-librispeech-pruned-transducer-stateless7-ctc-bs-2022-12-15.sh => run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh} (100%) rename .github/workflows/{run-librispeech-2022-12-15-stateless7-ctc-bs.yml => run-librispeech-2023-01-29-stateless7-ctc-bs.yml} (97%) diff --git a/.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2022-12-15.sh b/.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh similarity index 100% rename from .github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2022-12-15.sh rename to .github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh index 761eb72e2..7d2853c17 100755 --- a/.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2022-12-15.sh +++ b/.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh @@ -21,9 +21,9 @@ tree $repo/ ls -lh $repo/test_wavs/*.wav pushd $repo/exp -git lfs pull --include "data/lang_bpe_500/HLG.pt" git lfs pull --include "data/lang_bpe_500/L.pt" git lfs pull --include "data/lang_bpe_500/LG.pt" +git lfs pull --include "data/lang_bpe_500/HLG.pt" git lfs pull --include "data/lang_bpe_500/Linv.pt" git lfs pull --include "data/lang_bpe_500/bpe.model" git lfs pull --include "exp/cpu_jit.pt" diff --git a/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml b/.github/workflows/run-librispeech-2023-01-29-stateless7-ctc-bs.yml similarity index 97% rename from .github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml rename to .github/workflows/run-librispeech-2023-01-29-stateless7-ctc-bs.yml index 40a742988..821abc25d 100644 --- a/.github/workflows/run-librispeech-2022-12-15-stateless7-ctc-bs.yml +++ b/.github/workflows/run-librispeech-2023-01-29-stateless7-ctc-bs.yml @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: run-librispeech-2022-12-15-stateless7-ctc-bs +name: run-librispeech-2023-01-29-stateless7-ctc-bs # zipformer on: @@ -34,7 +34,7 @@ on: - cron: "50 15 * * *" jobs: - run_librispeech_2022_12_15_zipformer_ctc_bs: + run_librispeech_2023_01_29_zipformer_ctc_bs: if: github.event.label.name == 'run-decode' || github.event.label.name == 'blank-skip' || github.event_name == 'push' || github.event_name == 'schedule' runs-on: ${{ matrix.os }} strategy: @@ -124,7 +124,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - .github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2022-12-15.sh + .github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh - name: Display decoding results for librispeech pruned_transducer_stateless7_ctc_bs if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' @@ -159,5 +159,5 @@ jobs: uses: actions/upload-artifact@v2 if: github.event_name == 'schedule' || github.event.label.name == 'run-decode' with: - name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-bs-2022-12-15 + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-bs-2023-01-29 path: egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/exp/ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/frame_reducer.py b/egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/frame_reducer.py index 0841f7cf1..c44cb1eaf 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/frame_reducer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/frame_reducer.py @@ -81,20 +81,20 @@ class FrameReducer(nn.Module): fake_limit_indexes = torch.topk( ctc_output[:, :, blank_id], max_limit_len ).indices - T = ( + T_arange = ( torch.arange(max_limit_len) .expand_as( fake_limit_indexes, ) .to(device=x.device) ) - T = torch.remainder(T, limit_lens.unsqueeze(1)) - limit_indexes = torch.gather(fake_limit_indexes, 1, T) + T_arange = torch.remainder(T_arange, limit_lens.unsqueeze(1)) + limit_indexes = torch.gather(fake_limit_indexes, 1, T_arange) limit_mask = torch.full_like( non_blank_mask, - False, + 0, device=x.device, - ).scatter_(1, limit_indexes, True) + ).scatter_(1, limit_indexes, 1) non_blank_mask = non_blank_mask | ~limit_mask @@ -108,9 +108,9 @@ class FrameReducer(nn.Module): ) - out_lens ) - max_pad_len = pad_lens_list.max() + max_pad_len = int(pad_lens_list.max()) - out = F.pad(x, (0, 0, 0, max_pad_len)) + out = F.pad(x, [0, 0, 0, max_pad_len]) valid_pad_mask = ~make_pad_mask(pad_lens_list) total_valid_mask = torch.concat([non_blank_mask, valid_pad_mask], dim=1) From 41b16d783878fe3de304bb70285d97581e629eb5 Mon Sep 17 00:00:00 2001 From: Desh Raj Date: Sat, 8 Jul 2023 17:01:51 +0200 Subject: [PATCH 17/30] SURT recipe for AMI and ICSI (#1133) * merge upstream * add SURT model and training * add libricss decoding * add chunk width randomization * decode SURT with libricss * initial commit for zipformer_ctc * remove unwanted changes * remove changes to other recipe * fix zipformer softlink * fix for JIT export * add missing file * fix symbolic links * update results * clean commit for SURT recipe * training libricss surt model * remove unwanted files * remove unwanted changes * remove changes in librispeech * change some files to symlinks * remove unwanted changes in utils * add export script * add README * minor fix in README * add assets for README * replace some files with symlinks * remove unused decoding methods * initial commit for SURT AMI recipe * fix symlink * add train + decode scripts * add missing symlink * change files to symlink * change file type --- egs/ami/SURT/README.md | 156 ++ .../SURT/dprnn_zipformer/asr_datamodule.py | 399 +++++ egs/ami/SURT/dprnn_zipformer/beam_search.py | 1 + egs/ami/SURT/dprnn_zipformer/decode.py | 622 ++++++++ egs/ami/SURT/dprnn_zipformer/decoder.py | 1 + egs/ami/SURT/dprnn_zipformer/dprnn.py | 1 + .../SURT/dprnn_zipformer/encoder_interface.py | 1 + egs/ami/SURT/dprnn_zipformer/export.py | 1 + egs/ami/SURT/dprnn_zipformer/joiner.py | 1 + egs/ami/SURT/dprnn_zipformer/model.py | 1 + egs/ami/SURT/dprnn_zipformer/optim.py | 1 + egs/ami/SURT/dprnn_zipformer/scaling.py | 1 + .../SURT/dprnn_zipformer/scaling_converter.py | 1 + egs/ami/SURT/dprnn_zipformer/test_model.py | 1 + egs/ami/SURT/dprnn_zipformer/train.py | 1420 +++++++++++++++++ egs/ami/SURT/dprnn_zipformer/train_adapt.py | 1411 ++++++++++++++++ egs/ami/SURT/dprnn_zipformer/zipformer.py | 1 + egs/ami/SURT/local/add_source_feats.py | 78 + egs/ami/SURT/local/compute_fbank_aimix.py | 185 +++ egs/ami/SURT/local/compute_fbank_ami.py | 94 ++ egs/ami/SURT/local/compute_fbank_icsi.py | 95 ++ egs/ami/SURT/local/compute_fbank_ihm.py | 101 ++ egs/ami/SURT/local/prepare_ami_train_cuts.py | 146 ++ egs/ami/SURT/local/prepare_icsi_train_cuts.py | 67 + egs/ami/SURT/local/prepare_lang_bpe.py | 1 + egs/ami/SURT/local/train_bpe_model.py | 1 + egs/ami/SURT/prepare.sh | 195 +++ egs/ami/SURT/shared | 1 + 28 files changed, 4984 insertions(+) create mode 100644 egs/ami/SURT/README.md create mode 100644 egs/ami/SURT/dprnn_zipformer/asr_datamodule.py create mode 120000 egs/ami/SURT/dprnn_zipformer/beam_search.py create mode 100755 egs/ami/SURT/dprnn_zipformer/decode.py create mode 120000 egs/ami/SURT/dprnn_zipformer/decoder.py create mode 120000 egs/ami/SURT/dprnn_zipformer/dprnn.py create mode 120000 egs/ami/SURT/dprnn_zipformer/encoder_interface.py create mode 120000 egs/ami/SURT/dprnn_zipformer/export.py create mode 120000 egs/ami/SURT/dprnn_zipformer/joiner.py create mode 120000 egs/ami/SURT/dprnn_zipformer/model.py create mode 120000 egs/ami/SURT/dprnn_zipformer/optim.py create mode 120000 egs/ami/SURT/dprnn_zipformer/scaling.py create mode 120000 egs/ami/SURT/dprnn_zipformer/scaling_converter.py create mode 120000 egs/ami/SURT/dprnn_zipformer/test_model.py create mode 100755 egs/ami/SURT/dprnn_zipformer/train.py create mode 100755 egs/ami/SURT/dprnn_zipformer/train_adapt.py create mode 120000 egs/ami/SURT/dprnn_zipformer/zipformer.py create mode 100755 egs/ami/SURT/local/add_source_feats.py create mode 100755 egs/ami/SURT/local/compute_fbank_aimix.py create mode 100755 egs/ami/SURT/local/compute_fbank_ami.py create mode 100755 egs/ami/SURT/local/compute_fbank_icsi.py create mode 100755 egs/ami/SURT/local/compute_fbank_ihm.py create mode 100755 egs/ami/SURT/local/prepare_ami_train_cuts.py create mode 100755 egs/ami/SURT/local/prepare_icsi_train_cuts.py create mode 120000 egs/ami/SURT/local/prepare_lang_bpe.py create mode 120000 egs/ami/SURT/local/train_bpe_model.py create mode 100755 egs/ami/SURT/prepare.sh create mode 120000 egs/ami/SURT/shared diff --git a/egs/ami/SURT/README.md b/egs/ami/SURT/README.md new file mode 100644 index 000000000..74a8ba014 --- /dev/null +++ b/egs/ami/SURT/README.md @@ -0,0 +1,156 @@ +# Introduction + +This is a multi-talker ASR recipe for the AMI and ICSI datasets. We train a Streaming +Unmixing and Recognition Transducer (SURT) model for the task. + +Please refer to the `egs/libricss/SURT` recipe README for details about the task and the +model. + +## Description of the recipe + +### Pre-requisites + +The recipes in this directory need the following packages to be installed: + +- [meeteval](https://github.com/fgnt/meeteval) +- [einops](https://github.com/arogozhnikov/einops) + +Additionally, we initialize the model with the pre-trained model from the LibriCSS recipe. +Please download this checkpoint (see below) or train the LibriCSS recipe first. + +### Training + +To train the model, run the following from within `egs/ami/SURT`: + +```bash +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +python dprnn_zipformer/train.py \ + --use-fp16 True \ + --exp-dir dprnn_zipformer/exp/surt_base \ + --world-size 4 \ + --max-duration 500 \ + --max-duration-valid 250 \ + --max-cuts 200 \ + --num-buckets 50 \ + --num-epochs 30 \ + --enable-spec-aug True \ + --enable-musan False \ + --ctc-loss-scale 0.2 \ + --heat-loss-scale 0.2 \ + --base-lr 0.004 \ + --model-init-ckpt exp/libricss_base.pt \ + --chunk-width-randomization True \ + --num-mask-encoder-layers 4 \ + --num-encoder-layers 2,2,2,2,2 +``` + +The above is for SURT-base (~26M). For SURT-large (~38M), use: + +```bash + --model-init-ckpt exp/libricss_large.pt \ + --num-mask-encoder-layers 6 \ + --num-encoder-layers 2,4,3,2,4 \ + --model-init-ckpt exp/zipformer_large.pt \ +``` + +**NOTE:** You may need to decrease the `--max-duration` for SURT-large to avoid OOM. + +### Adaptation + +The training step above only trains on simulated mixtures. For best results, we also +adapt the final model on the AMI+ICSI train set. For this, run the following from within +`egs/ami/SURT`: + +```bash +export CUDA_VISIBLE_DEVICES="0" + +python dprnn_zipformer/train_adapt.py \ + --use-fp16 True \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --world-size 4 \ + --max-duration 500 \ + --max-duration-valid 250 \ + --max-cuts 200 \ + --num-buckets 50 \ + --num-epochs 8 \ + --lr-epochs 2 \ + --enable-spec-aug True \ + --enable-musan False \ + --ctc-loss-scale 0.2 \ + --base-lr 0.0004 \ + --model-init-ckpt dprnn_zipformer/exp/surt_base/epoch-30.pt \ + --chunk-width-randomization True \ + --num-mask-encoder-layers 4 \ + --num-encoder-layers 2,2,2,2,2 +``` + +For SURT-large, use the following config: + +```bash + --num-mask-encoder-layers 6 \ + --num-encoder-layers 2,4,3,2,4 \ + --model-init-ckpt dprnn_zipformer/exp/surt_large/epoch-30.pt \ + --num-epochs 15 \ + --lr-epochs 4 \ +``` + + +### Decoding + +To decode the model, run the following from within `egs/ami/SURT`: + +#### Greedy search + +```bash +export CUDA_VISIBLE_DEVICES="0" + +python dprnn_zipformer/decode.py \ + --epoch 20 --avg 1 --use-averaged-model False \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --max-duration 250 \ + --decoding-method greedy_search +``` + +#### Beam search + +```bash +python dprnn_zipformer/decode.py \ + --epoch 20 --avg 1 --use-averaged-model False \ + --exp-dir dprnn_zipformer/exp/surt_base_adapt \ + --max-duration 250 \ + --decoding-method modified_beam_search \ + --beam-size 4 +``` + +## Results (using beam search) + +**AMI** + +| Model | IHM-Mix | SDM | MDM | +|------------|:-------:|:----:|:----:| +| SURT-base | 39.8 | 65.4 | 46.6 | +| + adapt | 37.4 | 46.9 | 43.7 | +| SURT-large | 36.8 | 62.5 | 44.4 | +| + adapt | **35.1** | **44.6** | **41.4** | + +**ICSI** + +| Model | IHM-Mix | SDM | +|------------|:-------:|:----:| +| SURT-base | 28.3 | 60.0 | +| + adapt | 26.3 | 33.9 | +| SURT-large | 27.8 | 59.7 | +| + adapt | **24.4** | **32.3** | + +## Pre-trained models and logs + +* LibriCSS pre-trained model (for initialization): [base](https://huggingface.co/desh2608/icefall-surt-libricss-dprnn-zipformer/tree/main/exp/surt_base) [large](https://huggingface.co/desh2608/icefall-surt-libricss-dprnn-zipformer/tree/main/exp/surt_large) + +* Pre-trained models: + +* Training logs: + - surt_base: + - surt_base_adapt: + - surt_large: + - surt_large_adapt: diff --git a/egs/ami/SURT/dprnn_zipformer/asr_datamodule.py b/egs/ami/SURT/dprnn_zipformer/asr_datamodule.py new file mode 100644 index 000000000..ec8106bc3 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/asr_datamodule.py @@ -0,0 +1,399 @@ +# Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import inspect +import logging +from functools import lru_cache +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional + +import torch +from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy +from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures + CutMix, + DynamicBucketingSampler, + K2SurtDataset, + PrecomputedFeatures, + SimpleCutSampler, + SpecAugment, +) +from lhotse.dataset.input_strategies import OnTheFlyFeatures +from lhotse.utils import fix_random_seed +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + + +class _SeedWorkers: + def __init__(self, seed: int): + self.seed = seed + + def __call__(self, worker_id: int): + fix_random_seed(self.seed + worker_id) + + +class AmiAsrDataModule: + """ + DataModule for k2 SURT experiments. + It assumes there is always one train and valid dataloader, + but there can be multiple test dataloaders (e.g. LibriSpeech test-clean + and test-other). + + It contains all the common data pipeline modules used in ASR + experiments, e.g.: + - dynamic batch size, + - bucketing samplers, + - augmentation, + - on-the-fly feature extraction + + This class should be derived for specific corpora used in ASR tasks. + """ + + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/manifests"), + help="Path to directory with train/valid/test cuts.", + ) + group.add_argument( + "--max-duration", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--max-duration-valid", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--max-cuts", + type=int, + default=100, + help="Maximum number of cuts in a single batch. You can " + "reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the DynamicBucketingSampler" + "(you might want to increase it for larger datasets).", + ) + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help=( + "When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available." + ), + ) + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + group.add_argument( + "--drop-last", + type=str2bool, + default=True, + help="Whether to drop last batch. Used by sampler.", + ) + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + sampler_state_dict: Optional[Dict[str, Any]] = None, + sources: bool = False, + ) -> DataLoader: + """ + Args: + cuts_train: + CutSet for training. + sampler_state_dict: + The state dict for the training sampler. + """ + transforms = [] + if self.args.enable_musan: + logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest(self.args.manifest_dir / "musan_cuts.jsonl.gz") + transforms.append( + CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True) + ) + else: + logging.info("Disable MUSAN") + + input_transforms = [] + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info(f"Time warp factor: {self.args.spec_aug_time_warp_factor}") + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=num_frame_masks, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SurtDataset( + input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + return_sources=sources, + strict=False, + ) + + if self.args.bucketing_sampler: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + quadratic_duration=30.0, + max_cuts=self.args.max_cuts, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + drop_last=self.args.drop_last, + ) + else: + logging.info("Using SingleCutSampler.") + train_sampler = SimpleCutSampler( + cuts_train, + max_duration=self.args.max_duration, + max_cuts=self.args.max_cuts, + shuffle=self.args.shuffle, + ) + logging.info("About to create train dataloader") + + if sampler_state_dict is not None: + logging.info("Loading sampler state dict") + train_sampler.load_state_dict(sampler_state_dict) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + worker_init_fn=worker_init_fn, + ) + + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + + logging.info("About to create dev dataset") + validate = K2SurtDataset( + input_strategy=OnTheFlyFeatures( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + ) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + return_sources=False, + strict=False, + ) + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration_valid, + quadratic_duration=30.0, + max_cuts=self.args.max_cuts, + shuffle=False, + ) + logging.info("About to create dev dataloader") + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + worker_init_fn=worker_init_fn, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SurtDataset( + input_strategy=OnTheFlyFeatures( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + ) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + return_cuts=self.args.return_cuts, + return_sources=False, + strict=False, + ) + sampler = DynamicBucketingSampler( + cuts, + max_duration=self.args.max_duration_valid, + max_cuts=self.args.max_cuts, + shuffle=False, + ) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + persistent_workers=False, + worker_init_fn=worker_init_fn, + ) + return test_dl + + @lru_cache() + def aimix_train_cuts( + self, + rvb_affix: str = "clean", + sources: bool = True, + ) -> CutSet: + logging.info("About to get train cuts") + source_affix = "_sources" if sources else "" + cs = load_manifest_lazy( + self.args.manifest_dir / f"cuts_train_{rvb_affix}{source_affix}.jsonl.gz" + ) + cs = cs.filter(lambda c: c.duration >= 1.0 and c.duration <= 30.0) + return cs + + @lru_cache() + def train_cuts( + self, + ) -> CutSet: + logging.info("About to get train cuts") + return load_manifest_lazy( + self.args.manifest_dir / "cuts_train_ami_icsi.jsonl.gz" + ) + + @lru_cache() + def ami_cuts(self, split: str = "dev", type: str = "sdm") -> CutSet: + logging.info(f"About to get AMI {split} {type} cuts") + return load_manifest_lazy( + self.args.manifest_dir / f"cuts_ami-{type}_{split}.jsonl.gz" + ) + + @lru_cache() + def icsi_cuts(self, split: str = "dev", type: str = "sdm") -> CutSet: + logging.info(f"About to get ICSI {split} {type} cuts") + return load_manifest_lazy( + self.args.manifest_dir / f"cuts_icsi-{type}_{split}.jsonl.gz" + ) diff --git a/egs/ami/SURT/dprnn_zipformer/beam_search.py b/egs/ami/SURT/dprnn_zipformer/beam_search.py new file mode 120000 index 000000000..581b29833 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/beam_search.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/beam_search.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/decode.py b/egs/ami/SURT/dprnn_zipformer/decode.py new file mode 100755 index 000000000..d1a1eddc9 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/decode.py @@ -0,0 +1,622 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./dprnn_zipformer/decode.py \ + --epoch 20 \ + --avg 1 \ + --use-averaged-model false \ + --exp-dir ./dprnn_zipformer/exp_adapt \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) beam search (not recommended) +./dprnn_zipformer/decode.py \ + --epoch 20 \ + --avg 1 \ + --use-averaged-model false \ + --exp-dir ./dprnn_zipformer/exp_adapt \ + --max-duration 600 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./dprnn_zipformer/decode.py \ + --epoch 20 \ + --avg 1 \ + --use-averaged-model false \ + --exp-dir ./dprnn_zipformer/exp_adapt \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 +""" + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import AmiAsrDataModule +from beam_search import ( + beam_search, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from lhotse.utils import EPSILON +from train import add_model_arguments, get_params, get_surt_model + +from icefall import LmScorer, NgramLm +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_surt_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=20, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=1, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="dprnn_zipformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + feature_lens = batch["input_lens"].to(device) + + # Apply the mask encoder + B, T, F = feature.shape + processed = model.mask_encoder(feature) # B,T,F*num_channels + masks = processed.view(B, T, F, params.num_channels).unbind(dim=-1) + x_masked = [feature * m for m in masks] + + # Recognition + # Stack the inputs along the batch axis + h = torch.cat(x_masked, dim=0) + h_lens = torch.cat([feature_lens for _ in range(params.num_channels)], dim=0) + encoder_out, encoder_out_lens = model.encoder(x=h, x_lens=h_lens) + + if model.joint_encoder_layer is not None: + encoder_out = model.joint_encoder_layer(encoder_out) + + def _group_channels(hyps: List[str]) -> List[List[str]]: + """ + Currently we have a batch of size M*B, where M is the number of + channels and B is the batch size. We need to group the hypotheses + into B groups, each of which contains M hypotheses. + + Example: + hyps = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2'] + _group_channels(hyps) = [['a1', 'a2'], ['b1', 'b2'], ['c1', 'c2']] + """ + assert len(hyps) == B * params.num_channels + out_hyps = [] + for i in range(B): + out_hyps.append(hyps[i::B]) + return out_hyps + + hyps = [] + if params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp)) + + if params.decoding_method == "greedy_search": + return {"greedy_search": _group_channels(hyps)} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: _group_channels(hyps)} + else: + return {f"beam_size_{params.beam_size}": _group_channels(hyps)} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, +) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + cut_ids = [cut.id for cut in batch["cuts"]] + cuts_batch = batch["cuts"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + for cut_id, hyp_words in zip(cut_ids, hyps): + # Reference is a list of supervision texts sorted by start time. + ref_words = [ + s.text.strip() + for s in sorted( + cuts_batch[cut_id].supervisions, key=lambda s: s.start + ) + ] + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(cut_ids) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_surt_error_stats( + f, + f"{test_set_name}-{key}", + results, + enable_log=True, + num_channels=params.num_channels, + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LmScorer.add_arguments(parser) + AmiAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ), f"Decoding method {params.decoding_method} is not supported." + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "beam_search" in params.decoding_method: + params.suffix += f"-{params.decoding_method}-beam-size-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + assert model.encoder.decode_chunk_size == params.decode_chunk_len // 2, ( + model.encoder.decode_chunk_size, + params.decode_chunk_len, + ) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + ami = AmiAsrDataModule(args) + + # NOTE(@desh2608): we filter segments longer than 120s to avoid OOM errors in decoding. + # However, 99.9% of the segments are shorter than 120s, so this should not + # substantially affect the results. In future, we will implement an overlapped + # inference method to avoid OOM errors. + + test_sets = {} + for split in ["dev", "test"]: + for type in ["ihm-mix", "sdm", "mdm8-bf"]: + test_sets[f"ami-{split}_{type}"] = ( + ami.ami_cuts(split=split, type=type) + .trim_to_supervision_groups(max_pause=0.0) + .filter(lambda c: 0.1 < c.duration < 120.0) + .to_eager() + ) + + for split in ["dev", "test"]: + for type in ["ihm-mix", "sdm"]: + test_sets[f"icsi-{split}_{type}"] = ( + ami.icsi_cuts(split=split, type=type) + .trim_to_supervision_groups(max_pause=0.0) + .filter(lambda c: 0.1 < c.duration < 120.0) + .to_eager() + ) + + for test_set, test_cuts in test_sets.items(): + test_dl = ami.test_dataloaders(test_cuts) + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/ami/SURT/dprnn_zipformer/decoder.py b/egs/ami/SURT/dprnn_zipformer/decoder.py new file mode 120000 index 000000000..c34865c25 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/decoder.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/decoder.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/dprnn.py b/egs/ami/SURT/dprnn_zipformer/dprnn.py new file mode 120000 index 000000000..8918beb32 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/dprnn.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/dprnn.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/encoder_interface.py b/egs/ami/SURT/dprnn_zipformer/encoder_interface.py new file mode 120000 index 000000000..0ba945d0f --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/encoder_interface.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/encoder_interface.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/export.py b/egs/ami/SURT/dprnn_zipformer/export.py new file mode 120000 index 000000000..3deae4471 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/export.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/export.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/joiner.py b/egs/ami/SURT/dprnn_zipformer/joiner.py new file mode 120000 index 000000000..79fbe8769 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/joiner.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/joiner.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/model.py b/egs/ami/SURT/dprnn_zipformer/model.py new file mode 120000 index 000000000..ae8c65c99 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/model.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/model.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/optim.py b/egs/ami/SURT/dprnn_zipformer/optim.py new file mode 120000 index 000000000..366d0f7a2 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/optim.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/optim.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/scaling.py b/egs/ami/SURT/dprnn_zipformer/scaling.py new file mode 120000 index 000000000..f11d49d77 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/scaling.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/scaling.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/scaling_converter.py b/egs/ami/SURT/dprnn_zipformer/scaling_converter.py new file mode 120000 index 000000000..1533cbe0e --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/scaling_converter.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/scaling_converter.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/test_model.py b/egs/ami/SURT/dprnn_zipformer/test_model.py new file mode 120000 index 000000000..1259849e0 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/test_model.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless7_streaming/test_model.py \ No newline at end of file diff --git a/egs/ami/SURT/dprnn_zipformer/train.py b/egs/ami/SURT/dprnn_zipformer/train.py new file mode 100755 index 000000000..cd5fafc34 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/train.py @@ -0,0 +1,1420 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +cd egs/ami/SURT/ +./prepare.sh + +./dprnn_zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir dprnn_zipformer/exp \ + --max-duration 650 +""" + +import argparse +import copy +import logging +import warnings +from itertools import chain +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AmiAsrDataModule +from decoder import Decoder +from dprnn import DPRNN +from einops.layers.torch import Rearrange +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import LOG_EPSILON, fix_random_seed +from model import SURT +from optim import Eden, ScaledAdam +from scaling import ScaledLinear, ScaledLSTM +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from zipformer import Zipformer + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler] + + +def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None: + if isinstance(model, DDP): + # get underlying nn.Module + model = model.module + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-mask-encoder-layers", + type=int, + default=4, + help="Number of layers in the DPRNN based mask encoder.", + ) + + parser.add_argument( + "--mask-encoder-dim", + type=int, + default=256, + help="Hidden dimension of the LSTM blocks in DPRNN.", + ) + + parser.add_argument( + "--mask-encoder-segment-size", + type=int, + default=32, + help="Segment size of the SegLSTM in DPRNN. Ideally, this should be equal to the " + "decode-chunk-length of the zipformer encoder.", + ) + + parser.add_argument( + "--chunk-width-randomization", + type=bool, + default=False, + help="Whether to randomize the chunk width in DPRNN.", + ) + + # Zipformer config is based on: + # https://github.com/k2-fsa/icefall/pull/745#issuecomment-1405282740 + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,2,2,2,2", + help="Number of zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--feedforward-dims", + type=str, + default="768,768,768,768,768", + help="Feedforward dimension of the zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--nhead", + type=str, + default="8,8,8,8,8", + help="Number of attention heads in the zipformer encoder layers.", + ) + + parser.add_argument( + "--encoder-dims", + type=str, + default="256,256,256,256,256", + help="Embedding dimension in the 2 blocks of zipformer encoder layers, comma separated", + ) + + parser.add_argument( + "--attention-dims", + type=str, + default="192,192,192,192,192", + help="""Attention dimension in the 2 blocks of zipformer encoder layers, comma separated; + not the same as embedding dimension.""", + ) + + parser.add_argument( + "--encoder-unmasked-dims", + type=str, + default="192,192,192,192,192", + help="Unmasked dimensions in the encoders, relates to augmentation during training. " + "Must be <= each of encoder_dims. Empirically, less than 256 seems to make performance " + " worse.", + ) + + parser.add_argument( + "--zipformer-downsampling-factors", + type=str, + default="1,2,4,8,2", + help="Downsampling factor for each stack of encoder layers.", + ) + + parser.add_argument( + "--cnn-module-kernels", + type=str, + default="31,31,31,31,31", + help="Sizes of kernels in convolution modules", + ) + + parser.add_argument( + "--use-joint-encoder-layer", + type=str, + default="lstm", + choices=["linear", "lstm", "none"], + help="Whether to use a joint layer to combine all branches.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=50, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + + parser.add_argument( + "--decode-chunk-len", + type=int, + default=32, + help="The chunk size for decoding (in frames before subsampling)", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conv_lstm_transducer_stateless_ctc/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--model-init-ckpt", + type=str, + default=None, + help="""The model checkpoint to initialize the model (either full or part). + If not specified, the model is randomly initialized. + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--base-lr", type=float, default=0.004, help="The base learning rate." + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=5, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network) part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--ctc-loss-scale", + type=float, + default=0.2, + help="Scale for CTC loss.", + ) + + parser.add_argument( + "--heat-loss-scale", + type=float, + default=0.2, + help="Scale for HEAT loss on separated sources.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=2000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=1, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 2000, + # parameters for SURT + "num_channels": 2, + "feature_dim": 80, + "subsampling_factor": 4, # not passed in, this is fixed + # parameters for Noam + "model_warm_step": 5000, # arg given to model, not for lrate + # parameters for ctc loss + "beam_size": 10, + "use_double_scores": True, + "env_info": get_env_info(), + } + ) + + return params + + +def get_mask_encoder_model(params: AttributeDict) -> nn.Module: + mask_encoder = DPRNN( + feature_dim=params.feature_dim, + input_size=params.mask_encoder_dim, + hidden_size=params.mask_encoder_dim, + output_size=params.feature_dim * params.num_channels, + segment_size=params.mask_encoder_segment_size, + num_blocks=params.num_mask_encoder_layers, + chunk_width_randomization=params.chunk_width_randomization, + ) + return mask_encoder + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Zipformer and Transformer + def to_int_tuple(s: str): + return tuple(map(int, s.split(","))) + + encoder = Zipformer( + num_features=params.feature_dim, + output_downsampling_factor=2, + zipformer_downsampling_factors=to_int_tuple( + params.zipformer_downsampling_factors + ), + encoder_dims=to_int_tuple(params.encoder_dims), + attention_dim=to_int_tuple(params.attention_dims), + encoder_unmasked_dims=to_int_tuple(params.encoder_unmasked_dims), + nhead=to_int_tuple(params.nhead), + feedforward_dim=to_int_tuple(params.feedforward_dims), + cnn_module_kernels=to_int_tuple(params.cnn_module_kernels), + num_encoder_layers=to_int_tuple(params.num_encoder_layers), + num_left_chunks=params.num_left_chunks, + short_chunk_size=params.short_chunk_size, + decode_chunk_size=params.decode_chunk_len // 2, + ) + return encoder + + +def get_joint_encoder_layer(params: AttributeDict) -> nn.Module: + class TakeFirst(nn.Module): + def forward(self, x): + return x[0] + + if params.use_joint_encoder_layer == "linear": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + nn.Linear( + params.num_channels * encoder_dim, params.num_channels * encoder_dim + ), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "lstm": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + ScaledLSTM( + input_size=params.num_channels * encoder_dim, + hidden_size=params.num_channels * encoder_dim, + num_layers=1, + bias=True, + batch_first=True, + dropout=0.0, + bidirectional=False, + ), + TakeFirst(), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "none": + joint_layer = None + else: + raise ValueError( + f"Unknown joint encoder layer type: {params.use_joint_encoder_layer}" + ) + return joint_layer + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_surt_model( + params: AttributeDict, +) -> nn.Module: + mask_encoder = get_mask_encoder_model(params) + encoder = get_encoder_model(params) + joint_layer = get_joint_encoder_layer(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = SURT( + mask_encoder=mask_encoder, + encoder=encoder, + joint_encoder_layer=joint_layer, + decoder=decoder, + joiner=joiner, + num_channels=params.num_channels, + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_heat_loss(x_masked, batch, num_channels=2) -> Tensor: + """ + Compute HEAT loss for separated sources using the output of mask encoder. + Args: + x_masked: + The output of mask encoder. It is a tensor of shape (B, T, C). + batch: + A batch of data. See `lhotse.dataset.K2SurtDatasetWithSources()` + for the content in it. + num_channels: + The number of output branches in the SURT model. + """ + B, T, D = x_masked[0].shape + device = x_masked[0].device + + # Create training targets for each channel. + targets = [] + for i in range(num_channels): + target = torch.ones_like(x_masked[i]) * LOG_EPSILON + targets.append(target) + + source_feats = batch["source_feats"] + source_boundaries = batch["source_boundaries"] + input_lens = batch["input_lens"].to(device) + # Assign sources to channels based on the HEAT criteria + for b in range(B): + cut_source_feats = source_feats[b] + cut_source_boundaries = source_boundaries[b] + last_seg_end = [0 for _ in range(num_channels)] + for source_feat, (start, end) in zip(cut_source_feats, cut_source_boundaries): + assigned = False + end = min(end, T) + source_feat = source_feat[: end - start, :] + for i in range(num_channels): + if start >= last_seg_end[i]: + targets[i][b, start:end, :] += source_feat.to(device) + last_seg_end[i] = max(end, last_seg_end[i]) + assigned = True + break + if not assigned: + min_end_channel = last_seg_end.index(min(last_seg_end)) + targets[min_end_channel][b, start:end, :] += source_feat.to(device) + last_seg_end[min_end_channel] = max(end, last_seg_end[min_end_channel]) + + # Get padding mask based on input lengths + pad_mask = torch.arange(T, device=device).expand(B, T) > input_lens.unsqueeze(1) + pad_mask = pad_mask.unsqueeze(-1) + + # Compute masked loss for each channel + losses = torch.zeros((num_channels, B, T, D), device=device) + for i in range(num_channels): + loss = nn.functional.mse_loss(x_masked[i], targets[i], reduction="none") + # Apply padding mask to loss + loss.masked_fill_(pad_mask, 0) + losses[i] = loss + + # loss: C x B x T x D. pad_mask: B x T x 1 + # We want to compute loss for each item in the batch. Each item has loss given + # by the sum over C, and average over T and D. For T, we need to use the padding. + loss = losses.sum(0).mean(-1).sum(-1) / batch["input_lens"].to(device) + return loss + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device if isinstance(model, DDP) else next(model.parameters()).device + feature = batch["inputs"].to(device) + feature_lens = batch["input_lens"].to(device) + + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + + # The dataloader returns text as a list of cuts, each of which is a list of channel + # text. We flatten this to a list where all channels are together, i.e., it looks like + # [utt1_ch1, utt2_ch1, ..., uttN_ch1, utt1_ch2, ...., uttN,ch2]. + text = [val for tup in zip(*batch["text"]) for val in tup] + assert len(text) == len(feature) * params.num_channels + + # Convert all channel texts to token IDs and create a ragged tensor. + y = sp.encode(text, out_type=int) + y = k2.RaggedTensor(y).to(device) + + batch_idx_train = params.batch_idx_train + warm_step = params.model_warm_step + + with torch.set_grad_enabled(is_training): + (simple_loss, pruned_loss, ctc_loss, x_masked) = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + reduction="none", + subsampling_factor=params.subsampling_factor, + ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + ctc_loss_is_finite = torch.isfinite(ctc_loss) + + # Compute HEAT loss + if is_training and params.heat_loss_scale > 0.0: + heat_loss = compute_heat_loss( + x_masked, batch, num_channels=params.num_channels + ) + else: + heat_loss = torch.tensor(0.0, device=device) + + heat_loss_is_finite = torch.isfinite(heat_loss) + is_finite = ( + simple_loss_is_finite + & pruned_loss_is_finite + & ctc_loss_is_finite + & heat_loss_is_finite + ) + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_losses: {simple_loss}\n" + f"pruned_losses: {pruned_loss}\n" + f"ctc_losses: {ctc_loss}\n" + f"heat_losses: {heat_loss}\n" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + ctc_loss = ctc_loss[ctc_loss_is_finite] + heat_loss = heat_loss[heat_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if ( + torch.all(~simple_loss_is_finite) + or torch.all(~pruned_loss_is_finite) + or torch.all(~ctc_loss_is_finite) + or torch.all(~heat_loss_is_finite) + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss_sum = simple_loss.sum() + pruned_loss_sum = pruned_loss.sum() + ctc_loss_sum = ctc_loss.sum() + heat_loss_sum = heat_loss.sum() + + s = params.simple_loss_scale + # take down the scale on the simple loss from 1.0 at the start + # to params.simple_loss scale by warm_step. + simple_loss_scale = ( + s + if batch_idx_train >= warm_step + else 1.0 - (batch_idx_train / warm_step) * (1.0 - s) + ) + pruned_loss_scale = ( + 1.0 + if batch_idx_train >= warm_step + else 0.1 + 0.9 * (batch_idx_train / warm_step) + ) + loss = ( + simple_loss_scale * simple_loss_sum + + pruned_loss_scale * pruned_loss_sum + + params.ctc_loss_scale * ctc_loss_sum + + params.heat_loss_scale * heat_loss_sum + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss_sum.detach().cpu().item() + info["pruned_loss"] = pruned_loss_sum.detach().cpu().item() + if params.ctc_loss_scale > 0.0: + info["ctc_loss"] = ctc_loss_sum.detach().cpu().item() + if params.heat_loss_scale > 0.0: + info["heat_loss"] = heat_loss_sum.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + torch.cuda.empty_cache() + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = batch["inputs"].shape[0] + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + set_batch_count(model, params.batch_idx_train) + scheduler.step_batch(params.batch_idx_train) + + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % 100 == 0 and params.use_fp16: + # If the grad scale was less than 1, try increasing it. The _growth_interval + # of the grad scaler is configurable, but we can't configure it to have different + # behavior depending on the current grad scale. + cur_grad_scale = scaler._scale.item() + if cur_grad_scale < 1.0 or (cur_grad_scale < 8.0 and batch_idx % 400 == 0): + scaler.update(cur_grad_scale * 2.0) + if cur_grad_scale < 0.01: + logging.warning(f"Grad scale is small: {cur_grad_scale}") + if cur_grad_scale < 1.0e-05: + raise RuntimeError( + f"grad_scale is too small, exiting: {cur_grad_scale}" + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + cur_grad_scale = scaler._scale.item() if params.use_fp16 else 1.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + if params.use_fp16: + tb_writer.add_scalar( + "train/grad_scale", cur_grad_scale, params.batch_idx_train + ) + + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + + if checkpoints is None and params.model_init_ckpt is not None: + logging.info( + f"Initializing model with checkpoint from {params.model_init_ckpt}" + ) + init_ckpt = torch.load(params.model_init_ckpt, map_location=device) + model.load_state_dict(init_ckpt["model"], strict=False) + + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in model.named_parameters()] + ) + optimizer = ScaledAdam( + model.parameters(), + lr=params.base_lr, + clipping_scale=2.0, + parameters_names=parameters_names, + ) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + ami = AmiAsrDataModule(args) + + train_cuts = ami.aimix_train_cuts(rvb_affix="comb", sources=True) + dev_cuts = ami.ami_cuts(split="dev", type="ihm-mix") + dev_cuts = dev_cuts.trim_to_supervision_groups(max_pause=0.0).filter( + lambda c: 0.2 <= c.duration <= 60.0 + ) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = ami.train_dataloaders( + train_cuts, + sampler_state_dict=sampler_state_dict, + sources=True, + ) + valid_dl = ami.valid_dataloaders(dev_cuts) + + scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = [sp.encode(text_ch) for text_ch in batch["text"]] + num_tokens = [sum(len(yi) for yi in y_ch) for y_ch in y] + logging.info(f"num tokens: {num_tokens}") + + +def main(): + parser = get_parser() + AmiAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + +if __name__ == "__main__": + main() diff --git a/egs/ami/SURT/dprnn_zipformer/train_adapt.py b/egs/ami/SURT/dprnn_zipformer/train_adapt.py new file mode 100755 index 000000000..9f3b4425f --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/train_adapt.py @@ -0,0 +1,1411 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +# ./dprnn_zipformer/train.py should be run before this script. + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./dprnn_zipformer/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir dprnn_zipformer/exp_adapt \ + --model-init-ckpt dprnn_zipformer/exp/epoch-30.pt \ + --max-duration 550 +""" + +import argparse +import copy +import logging +import warnings +from itertools import chain +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AmiAsrDataModule +from decoder import Decoder +from dprnn import DPRNN +from einops.layers.torch import Rearrange +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import LOG_EPSILON, fix_random_seed +from model import SURT +from optim import Eden, ScaledAdam +from scaling import ScaledLinear, ScaledLSTM +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from zipformer import Zipformer + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler] + + +def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None: + if isinstance(model, DDP): + # get underlying nn.Module + model = model.module + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-mask-encoder-layers", + type=int, + default=4, + help="Number of layers in the DPRNN based mask encoder.", + ) + + parser.add_argument( + "--mask-encoder-dim", + type=int, + default=256, + help="Hidden dimension of the LSTM blocks in DPRNN.", + ) + + parser.add_argument( + "--mask-encoder-segment-size", + type=int, + default=32, + help="Segment size of the SegLSTM in DPRNN. Ideally, this should be equal to the " + "decode-chunk-length of the zipformer encoder.", + ) + + parser.add_argument( + "--chunk-width-randomization", + type=bool, + default=False, + help="Whether to randomize the chunk width in DPRNN.", + ) + + # Zipformer config is based on: + # https://github.com/k2-fsa/icefall/pull/745#issuecomment-1405282740 + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,2,2,2,2", + help="Number of zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--feedforward-dims", + type=str, + default="768,768,768,768,768", + help="Feedforward dimension of the zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--nhead", + type=str, + default="8,8,8,8,8", + help="Number of attention heads in the zipformer encoder layers.", + ) + + parser.add_argument( + "--encoder-dims", + type=str, + default="256,256,256,256,256", + help="Embedding dimension in the 2 blocks of zipformer encoder layers, comma separated", + ) + + parser.add_argument( + "--attention-dims", + type=str, + default="192,192,192,192,192", + help="""Attention dimension in the 2 blocks of zipformer encoder layers, comma separated; + not the same as embedding dimension.""", + ) + + parser.add_argument( + "--encoder-unmasked-dims", + type=str, + default="192,192,192,192,192", + help="Unmasked dimensions in the encoders, relates to augmentation during training. " + "Must be <= each of encoder_dims. Empirically, less than 256 seems to make performance " + " worse.", + ) + + parser.add_argument( + "--zipformer-downsampling-factors", + type=str, + default="1,2,4,8,2", + help="Downsampling factor for each stack of encoder layers.", + ) + + parser.add_argument( + "--cnn-module-kernels", + type=str, + default="31,31,31,31,31", + help="Sizes of kernels in convolution modules", + ) + + parser.add_argument( + "--use-joint-encoder-layer", + type=str, + default="linear", + choices=["linear", "lstm", "none"], + help="Whether to use a joint layer to combine all branches.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=50, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + + parser.add_argument( + "--decode-chunk-len", + type=int, + default=32, + help="The chunk size for decoding (in frames before subsampling)", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=20, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conv_lstm_transducer_stateless_ctc/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--model-init-ckpt", + type=str, + default=None, + help="""The model checkpoint to initialize the model (either full or part). + If not specified, the model is randomly initialized. + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--base-lr", type=float, default=0.0001, help="The base learning rate." + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=2, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network) part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--ctc-loss-scale", + type=float, + default=0.2, + help="Scale for CTC loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=2000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=1, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 2000, + # parameters for SURT + "num_channels": 2, + "feature_dim": 80, + "subsampling_factor": 4, # not passed in, this is fixed + # parameters for Noam + "model_warm_step": 5000, # arg given to model, not for lrate + # parameters for ctc loss + "beam_size": 10, + "use_double_scores": True, + "env_info": get_env_info(), + } + ) + + return params + + +def get_mask_encoder_model(params: AttributeDict) -> nn.Module: + mask_encoder = DPRNN( + feature_dim=params.feature_dim, + input_size=params.mask_encoder_dim, + hidden_size=params.mask_encoder_dim, + output_size=params.feature_dim * params.num_channels, + segment_size=params.mask_encoder_segment_size, + num_blocks=params.num_mask_encoder_layers, + chunk_width_randomization=params.chunk_width_randomization, + ) + return mask_encoder + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Zipformer and Transformer + def to_int_tuple(s: str): + return tuple(map(int, s.split(","))) + + encoder = Zipformer( + num_features=params.feature_dim, + output_downsampling_factor=2, + zipformer_downsampling_factors=to_int_tuple( + params.zipformer_downsampling_factors + ), + encoder_dims=to_int_tuple(params.encoder_dims), + attention_dim=to_int_tuple(params.attention_dims), + encoder_unmasked_dims=to_int_tuple(params.encoder_unmasked_dims), + nhead=to_int_tuple(params.nhead), + feedforward_dim=to_int_tuple(params.feedforward_dims), + cnn_module_kernels=to_int_tuple(params.cnn_module_kernels), + num_encoder_layers=to_int_tuple(params.num_encoder_layers), + num_left_chunks=params.num_left_chunks, + short_chunk_size=params.short_chunk_size, + decode_chunk_size=params.decode_chunk_len // 2, + ) + return encoder + + +def get_joint_encoder_layer(params: AttributeDict) -> nn.Module: + class TakeFirst(nn.Module): + def forward(self, x): + return x[0] + + if params.use_joint_encoder_layer == "linear": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + nn.Linear( + params.num_channels * encoder_dim, params.num_channels * encoder_dim + ), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "lstm": + encoder_dim = int(params.encoder_dims.split(",")[-1]) + joint_layer = nn.Sequential( + Rearrange("(c b) t d -> b t (c d)", c=params.num_channels), + ScaledLSTM( + input_size=params.num_channels * encoder_dim, + hidden_size=params.num_channels * encoder_dim, + num_layers=1, + bias=True, + batch_first=True, + dropout=0.0, + bidirectional=False, + ), + TakeFirst(), + nn.ReLU(), + Rearrange("b t (c d) -> (c b) t d", c=params.num_channels), + ) + elif params.use_joint_encoder_layer == "none": + joint_layer = None + else: + raise ValueError( + f"Unknown joint encoder layer type: {params.use_joint_encoder_layer}" + ) + return joint_layer + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_surt_model( + params: AttributeDict, +) -> nn.Module: + mask_encoder = get_mask_encoder_model(params) + encoder = get_encoder_model(params) + joint_layer = get_joint_encoder_layer(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = SURT( + mask_encoder=mask_encoder, + encoder=encoder, + joint_encoder_layer=joint_layer, + decoder=decoder, + joiner=joiner, + num_channels=params.num_channels, + encoder_dim=int(params.encoder_dims.split(",")[-1]), + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_heat_loss(x_masked, batch, num_channels=2) -> Tensor: + """ + Compute HEAT loss for separated sources using the output of mask encoder. + Args: + x_masked: + The output of mask encoder. It is a tensor of shape (B, T, C). + batch: + A batch of data. See `lhotse.dataset.K2SurtDatasetWithSources()` + for the content in it. + num_channels: + The number of output branches in the SURT model. + """ + B, T, D = x_masked[0].shape + device = x_masked[0].device + + # Create training targets for each channel. + targets = [] + for i in range(num_channels): + target = torch.ones_like(x_masked[i]) * LOG_EPSILON + targets.append(target) + + source_feats = batch["source_feats"] + source_boundaries = batch["source_boundaries"] + input_lens = batch["input_lens"].to(device) + # Assign sources to channels based on the HEAT criteria + for b in range(B): + cut_source_feats = source_feats[b] + cut_source_boundaries = source_boundaries[b] + last_seg_end = [0 for _ in range(num_channels)] + for source_feat, (start, end) in zip(cut_source_feats, cut_source_boundaries): + assigned = False + for i in range(num_channels): + if start >= last_seg_end[i]: + targets[i][b, start:end, :] += source_feat.to(device) + last_seg_end[i] = max(end, last_seg_end[i]) + assigned = True + break + if not assigned: + min_end_channel = last_seg_end.index(min(last_seg_end)) + targets[min_end_channel][b, start:end, :] += source_feat + last_seg_end[min_end_channel] = max(end, last_seg_end[min_end_channel]) + + # Get padding mask based on input lengths + pad_mask = torch.arange(T, device=device).expand(B, T) > input_lens.unsqueeze(1) + pad_mask = pad_mask.unsqueeze(-1) + + # Compute masked loss for each channel + losses = torch.zeros((num_channels, B, T, D), device=device) + for i in range(num_channels): + loss = nn.functional.mse_loss(x_masked[i], targets[i], reduction="none") + # Apply padding mask to loss + loss.masked_fill_(pad_mask, 0) + losses[i] = loss + + # loss: C x B x T x D. pad_mask: B x T x 1 + # We want to compute loss for each item in the batch. Each item has loss given + # by the sum over C, and average over T and D. For T, we need to use the padding. + loss = losses.sum(0).mean(-1).sum(-1) / batch["input_lens"].to(device) + return loss + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device if isinstance(model, DDP) else next(model.parameters()).device + feature = batch["inputs"].to(device) + feature_lens = batch["input_lens"].to(device) + + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + + # The dataloader returns text as a list of cuts, each of which is a list of channel + # text. We flatten this to a list where all channels are together, i.e., it looks like + # [utt1_ch1, utt2_ch1, ..., uttN_ch1, utt1_ch2, ...., uttN,ch2]. + text = [val for tup in zip(*batch["text"]) for val in tup] + assert len(text) == len(feature) * params.num_channels + + # Convert all channel texts to token IDs and create a ragged tensor. + y = sp.encode(text, out_type=int) + y = k2.RaggedTensor(y).to(device) + + batch_idx_train = params.batch_idx_train + warm_step = params.model_warm_step + + with torch.set_grad_enabled(is_training): + (simple_loss, pruned_loss, ctc_loss, x_masked) = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + reduction="none", + subsampling_factor=params.subsampling_factor, + ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + ctc_loss_is_finite = torch.isfinite(ctc_loss) + + # Compute HEAT loss + if is_training and params.heat_loss_scale > 0.0: + heat_loss = compute_heat_loss( + x_masked, batch, num_channels=params.num_channels + ) + else: + heat_loss = torch.tensor(0.0, device=device) + + heat_loss_is_finite = torch.isfinite(heat_loss) + is_finite = ( + simple_loss_is_finite + & pruned_loss_is_finite + & ctc_loss_is_finite + & heat_loss_is_finite + ) + if not torch.all(is_finite): + # logging.info( + # "Not all losses are finite!\n" + # f"simple_losses: {simple_loss}\n" + # f"pruned_losses: {pruned_loss}\n" + # f"ctc_losses: {ctc_loss}\n" + # f"heat_losses: {heat_loss}\n" + # ) + # display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + ctc_loss = ctc_loss[ctc_loss_is_finite] + heat_loss = heat_loss[heat_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if ( + torch.all(~simple_loss_is_finite) + or torch.all(~pruned_loss_is_finite) + or torch.all(~ctc_loss_is_finite) + or torch.all(~heat_loss_is_finite) + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss_sum = simple_loss.sum() + pruned_loss_sum = pruned_loss.sum() + ctc_loss_sum = ctc_loss.sum() + heat_loss_sum = heat_loss.sum() + + s = params.simple_loss_scale + # take down the scale on the simple loss from 1.0 at the start + # to params.simple_loss scale by warm_step. + simple_loss_scale = ( + s + if batch_idx_train >= warm_step + else 1.0 - (batch_idx_train / warm_step) * (1.0 - s) + ) + pruned_loss_scale = ( + 1.0 + if batch_idx_train >= warm_step + else 0.1 + 0.9 * (batch_idx_train / warm_step) + ) + loss = ( + simple_loss_scale * simple_loss_sum + + pruned_loss_scale * pruned_loss_sum + + params.ctc_loss_scale * ctc_loss_sum + + params.heat_loss_scale * heat_loss_sum + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss_sum.detach().cpu().item() + info["pruned_loss"] = pruned_loss_sum.detach().cpu().item() + if params.ctc_loss_scale > 0.0: + info["ctc_loss"] = ctc_loss_sum.detach().cpu().item() + if params.heat_loss_scale > 0.0: + info["heat_loss"] = heat_loss_sum.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + torch.cuda.empty_cache() + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = batch["inputs"].shape[0] + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + set_batch_count(model, params.batch_idx_train) + scheduler.step_batch(params.batch_idx_train) + + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % 100 == 0 and params.use_fp16: + # If the grad scale was less than 1, try increasing it. The _growth_interval + # of the grad scaler is configurable, but we can't configure it to have different + # behavior depending on the current grad scale. + cur_grad_scale = scaler._scale.item() + if cur_grad_scale < 1.0 or (cur_grad_scale < 8.0 and batch_idx % 400 == 0): + scaler.update(cur_grad_scale * 2.0) + if cur_grad_scale < 0.01: + logging.warning(f"Grad scale is small: {cur_grad_scale}") + if cur_grad_scale < 1.0e-05: + raise RuntimeError( + f"grad_scale is too small, exiting: {cur_grad_scale}" + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + cur_grad_scale = scaler._scale.item() if params.use_fp16 else 1.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + if params.use_fp16: + tb_writer.add_scalar( + "train/grad_scale", cur_grad_scale, params.batch_idx_train + ) + + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_surt_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + + if checkpoints is None and params.model_init_ckpt is not None: + logging.info( + f"Initializing model with checkpoint from {params.model_init_ckpt}" + ) + init_ckpt = torch.load(params.model_init_ckpt, map_location=device) + model.load_state_dict(init_ckpt["model"], strict=False) + + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in model.named_parameters()] + ) + optimizer = ScaledAdam( + model.parameters(), + lr=params.base_lr, + clipping_scale=2.0, + parameters_names=parameters_names, + ) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + ami = AmiAsrDataModule(args) + + train_cuts = ami.train_cuts() + train_cuts = train_cuts.filter(lambda c: 0.5 <= c.duration <= 35.0) + dev_cuts = ami.ami_cuts(split="dev", type="ihm-mix") + dev_cuts = dev_cuts.trim_to_supervision_groups(max_pause=0.0).filter( + lambda c: 0.2 <= c.duration <= 60.0 + ) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = ami.train_dataloaders( + train_cuts, + sampler_state_dict=sampler_state_dict, + ) + valid_dl = ami.valid_dataloaders(dev_cuts) + + scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = [sp.encode(text_ch) for text_ch in batch["text"]] + num_tokens = [sum(len(yi) for yi in y_ch) for y_ch in y] + logging.info(f"num tokens: {num_tokens}") + + +def main(): + parser = get_parser() + AmiAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + +if __name__ == "__main__": + main() diff --git a/egs/ami/SURT/dprnn_zipformer/zipformer.py b/egs/ami/SURT/dprnn_zipformer/zipformer.py new file mode 120000 index 000000000..59b772024 --- /dev/null +++ b/egs/ami/SURT/dprnn_zipformer/zipformer.py @@ -0,0 +1 @@ +../../../libricss/SURT/dprnn_zipformer/zipformer.py \ No newline at end of file diff --git a/egs/ami/SURT/local/add_source_feats.py b/egs/ami/SURT/local/add_source_feats.py new file mode 100755 index 000000000..0917b88a6 --- /dev/null +++ b/egs/ami/SURT/local/add_source_feats.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file adds source features as temporal arrays to the mixture manifests. +It looks for manifests in the directory data/manifests. +""" +import logging +from pathlib import Path + +import numpy as np +from lhotse import CutSet, LilcomChunkyWriter, load_manifest, load_manifest_lazy +from tqdm import tqdm + + +def add_source_feats(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + logging.info("Reading mixed cuts") + mixed_cuts_clean = load_manifest_lazy(src_dir / "cuts_train_clean.jsonl.gz") + mixed_cuts_reverb = load_manifest_lazy(src_dir / "cuts_train_reverb.jsonl.gz") + + logging.info("Reading source cuts") + source_cuts = load_manifest(src_dir / "ihm_cuts_train_trimmed.jsonl.gz") + + logging.info("Adding source features to the mixed cuts") + pbar = tqdm(total=len(mixed_cuts_clean), desc="Adding source features") + with CutSet.open_writer( + src_dir / "cuts_train_clean_sources.jsonl.gz" + ) as cut_writer_clean, CutSet.open_writer( + src_dir / "cuts_train_reverb_sources.jsonl.gz" + ) as cut_writer_reverb, LilcomChunkyWriter( + output_dir / "feats_train_clean_sources" + ) as source_feat_writer: + for cut_clean, cut_reverb in zip(mixed_cuts_clean, mixed_cuts_reverb): + assert cut_reverb.id == cut_clean.id + "_rvb" + source_feats = [] + source_feat_offsets = [] + cur_offset = 0 + for sup in sorted( + cut_clean.supervisions, key=lambda s: (s.start, s.speaker) + ): + source_cut = source_cuts[sup.id] + source_feats.append(source_cut.load_features()) + source_feat_offsets.append(cur_offset) + cur_offset += source_cut.num_frames + cut_clean.source_feats = source_feat_writer.store_array( + cut_clean.id, np.concatenate(source_feats, axis=0) + ) + cut_clean.source_feat_offsets = source_feat_offsets + cut_writer_clean.write(cut_clean) + # Also write the reverb cut + cut_reverb.source_feats = cut_clean.source_feats + cut_reverb.source_feat_offsets = cut_clean.source_feat_offsets + cut_writer_reverb.write(cut_reverb) + pbar.update(1) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + add_source_feats() diff --git a/egs/ami/SURT/local/compute_fbank_aimix.py b/egs/ami/SURT/local/compute_fbank_aimix.py new file mode 100755 index 000000000..91b3a060b --- /dev/null +++ b/egs/ami/SURT/local/compute_fbank_aimix.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the synthetically mixed AMI and ICSI +train set. +It looks for manifests in the directory data/manifests. + +The generated fbank features are saved in data/fbank. +""" +import logging +import random +import warnings +from pathlib import Path + +import torch +import torch.multiprocessing +import torchaudio +from lhotse import ( + AudioSource, + LilcomChunkyWriter, + Recording, + load_manifest, + load_manifest_lazy, +) +from lhotse.audio import set_ffmpeg_torchaudio_info_enabled +from lhotse.cut import MixedCut, MixTrack, MultiCut +from lhotse.features.kaldifeat import ( + KaldifeatFbank, + KaldifeatFbankConfig, + KaldifeatFrameOptions, + KaldifeatMelOptions, +) +from lhotse.utils import fix_random_seed, uuid4 +from tqdm import tqdm + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") +torchaudio.set_audio_backend("soundfile") +set_ffmpeg_torchaudio_info_enabled(False) + + +def compute_fbank_aimix(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + sampling_rate = 16000 + num_mel_bins = 80 + + extractor = KaldifeatFbank( + KaldifeatFbankConfig( + frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate), + mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins), + device="cuda", + ) + ) + + logging.info("Reading manifests") + train_cuts = load_manifest_lazy(src_dir / "ai-mix_cuts_clean_full.jsonl.gz") + + # only uses RIRs and noises from REVERB challenge + real_rirs = load_manifest(src_dir / "real-rir_recordings_all.jsonl.gz").filter( + lambda r: "RVB2014" in r.id + ) + noises = load_manifest(src_dir / "iso-noise_recordings_all.jsonl.gz").filter( + lambda r: "RVB2014" in r.id + ) + + # Apply perturbation to the training cuts + logging.info("Applying perturbation to the training cuts") + train_cuts_rvb = train_cuts.map( + lambda c: augment( + c, perturb_snr=True, rirs=real_rirs, noises=noises, perturb_loudness=True + ) + ) + + logging.info("Extracting fbank features for training cuts") + _ = train_cuts.compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / "ai-mix_feats_clean", + manifest_path=src_dir / "cuts_train_clean.jsonl.gz", + batch_duration=5000, + num_workers=4, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + _ = train_cuts_rvb.compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / "ai-mix_feats_reverb", + manifest_path=src_dir / "cuts_train_reverb.jsonl.gz", + batch_duration=5000, + num_workers=4, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + + +def augment(cut, perturb_snr=False, rirs=None, noises=None, perturb_loudness=False): + """ + Given a mixed cut, this function optionally applies the following augmentations: + - Perturbing the SNRs of the tracks (in range [-5, 5] dB) + - Reverberation using a randomly selected RIR + - Adding noise + - Perturbing the loudness (in range [-20, -25] dB) + """ + out_cut = cut.drop_features() + + # Perturb the SNRs (optional) + if perturb_snr: + snrs = [random.uniform(-5, 5) for _ in range(len(cut.tracks))] + for i, (track, snr) in enumerate(zip(out_cut.tracks, snrs)): + if i == 0: + # Skip the first track since it is the reference + continue + track.snr = snr + + # Reverberate the cut (optional) + if rirs is not None: + # Select an RIR at random + rir = random.choice(rirs) + # Select a channel at random + rir_channel = random.choice(list(range(rir.num_channels))) + # Reverberate the cut + out_cut = out_cut.reverb_rir(rir_recording=rir, rir_channels=[rir_channel]) + + # Add noise (optional) + if noises is not None: + # Select a noise recording at random + noise = random.choice(noises).to_cut() + if isinstance(noise, MultiCut): + noise = noise.to_mono()[0] + # Select an SNR at random + snr = random.uniform(10, 30) + # Repeat the noise to match the duration of the cut + noise = repeat_cut(noise, out_cut.duration) + out_cut = MixedCut( + id=out_cut.id, + tracks=[ + MixTrack(cut=out_cut, type="MixedCut"), + MixTrack(cut=noise, type="DataCut", snr=snr), + ], + ) + + # Perturb the loudness (optional) + if perturb_loudness: + target_loudness = random.uniform(-20, -25) + out_cut = out_cut.normalize_loudness(target_loudness, mix_first=True) + return out_cut + + +def repeat_cut(cut, duration): + while cut.duration < duration: + cut = cut.mix(cut, offset_other_by=cut.duration) + return cut.truncate(duration=duration) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + fix_random_seed(42) + compute_fbank_aimix() diff --git a/egs/ami/SURT/local/compute_fbank_ami.py b/egs/ami/SURT/local/compute_fbank_ami.py new file mode 100755 index 000000000..351b41765 --- /dev/null +++ b/egs/ami/SURT/local/compute_fbank_ami.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the AMI dataset. +We compute features for full recordings (i.e., without trimming to supervisions). +This way we can create arbitrary segmentations later. + +The generated fbank features are saved in data/fbank. +""" +import logging +import math +from pathlib import Path + +import torch +import torch.multiprocessing +from lhotse import CutSet, LilcomChunkyWriter +from lhotse.features.kaldifeat import ( + KaldifeatFbank, + KaldifeatFbankConfig, + KaldifeatFrameOptions, + KaldifeatMelOptions, +) +from lhotse.recipes.utils import read_manifests_if_cached + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + + +def compute_fbank_ami(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + sampling_rate = 16000 + num_mel_bins = 80 + + extractor = KaldifeatFbank( + KaldifeatFbankConfig( + frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate), + mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins), + device="cuda", + ) + ) + + logging.info("Reading manifests") + manifests = {} + for part in ["ihm-mix", "sdm", "mdm8-bf"]: + manifests[part] = read_manifests_if_cached( + dataset_parts=["train", "dev", "test"], + output_dir=src_dir, + prefix=f"ami-{part}", + suffix="jsonl.gz", + ) + + for part in ["ihm-mix", "sdm", "mdm8-bf"]: + for split in ["train", "dev", "test"]: + logging.info(f"Processing {part} {split}") + cuts = CutSet.from_manifests( + **manifests[part][split] + ).compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"ami-{part}_{split}_feats", + manifest_path=src_dir / f"cuts_ami-{part}_{split}.jsonl.gz", + batch_duration=5000, + num_workers=4, + storage_type=LilcomChunkyWriter, + ) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + compute_fbank_ami() diff --git a/egs/ami/SURT/local/compute_fbank_icsi.py b/egs/ami/SURT/local/compute_fbank_icsi.py new file mode 100755 index 000000000..4e2ff3f3b --- /dev/null +++ b/egs/ami/SURT/local/compute_fbank_icsi.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the ICSI dataset. +We compute features for full recordings (i.e., without trimming to supervisions). +This way we can create arbitrary segmentations later. + +The generated fbank features are saved in data/fbank. +""" +import logging +import math +from pathlib import Path + +import torch +import torch.multiprocessing +from lhotse import CutSet, LilcomChunkyWriter +from lhotse.features.kaldifeat import ( + KaldifeatFbank, + KaldifeatFbankConfig, + KaldifeatFrameOptions, + KaldifeatMelOptions, +) +from lhotse.recipes.utils import read_manifests_if_cached + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + + +def compute_fbank_icsi(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + sampling_rate = 16000 + num_mel_bins = 80 + + extractor = KaldifeatFbank( + KaldifeatFbankConfig( + frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate), + mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins), + device="cuda", + ) + ) + + logging.info("Reading manifests") + manifests = {} + for part in ["ihm-mix", "sdm"]: + manifests[part] = read_manifests_if_cached( + dataset_parts=["train"], + output_dir=src_dir, + prefix=f"icsi-{part}", + suffix="jsonl.gz", + ) + + for part in ["ihm-mix", "sdm"]: + for split in ["train"]: + logging.info(f"Processing {part} {split}") + cuts = CutSet.from_manifests( + **manifests[part][split] + ).compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"icsi-{part}_{split}_feats", + manifest_path=src_dir / f"cuts_icsi-{part}_{split}.jsonl.gz", + batch_duration=5000, + num_workers=4, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + compute_fbank_icsi() diff --git a/egs/ami/SURT/local/compute_fbank_ihm.py b/egs/ami/SURT/local/compute_fbank_ihm.py new file mode 100755 index 000000000..56f54aa21 --- /dev/null +++ b/egs/ami/SURT/local/compute_fbank_ihm.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the trimmed sub-segments which will be +used for simulating the training mixtures. + +The generated fbank features are saved in data/fbank. +""" +import logging +import math +from pathlib import Path + +import torch +import torch.multiprocessing +import torchaudio +from lhotse import CutSet, LilcomChunkyWriter, load_manifest +from lhotse.audio import set_ffmpeg_torchaudio_info_enabled +from lhotse.features.kaldifeat import ( + KaldifeatFbank, + KaldifeatFbankConfig, + KaldifeatFrameOptions, + KaldifeatMelOptions, +) +from lhotse.recipes.utils import read_manifests_if_cached +from tqdm import tqdm + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") +torchaudio.set_audio_backend("soundfile") +set_ffmpeg_torchaudio_info_enabled(False) + + +def compute_fbank_ihm(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + sampling_rate = 16000 + num_mel_bins = 80 + + extractor = KaldifeatFbank( + KaldifeatFbankConfig( + frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate), + mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins), + device="cuda", + ) + ) + + logging.info("Reading manifests") + manifests = {} + for data in ["ami", "icsi"]: + manifests[data] = read_manifests_if_cached( + dataset_parts=["train"], + output_dir=src_dir, + types=["recordings", "supervisions"], + prefix=f"{data}-ihm", + suffix="jsonl.gz", + ) + + logging.info("Computing features") + for data in ["ami", "icsi"]: + cs = CutSet.from_manifests(**manifests[data]["train"]) + cs = cs.trim_to_supervisions(keep_overlapping=False) + cs = cs.normalize_loudness(target=-23.0, affix_id=False) + cs = cs + cs.perturb_speed(0.9) + cs.perturb_speed(1.1) + _ = cs.compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"{data}-ihm_train_feats", + manifest_path=src_dir / f"{data}-ihm_cuts_train.jsonl.gz", + batch_duration=5000, + num_workers=4, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + compute_fbank_ihm() diff --git a/egs/ami/SURT/local/prepare_ami_train_cuts.py b/egs/ami/SURT/local/prepare_ami_train_cuts.py new file mode 100755 index 000000000..72fced70d --- /dev/null +++ b/egs/ami/SURT/local/prepare_ami_train_cuts.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file creates AMI train segments. +""" +import logging +import math +from pathlib import Path + +import torch +import torch.multiprocessing +from lhotse import LilcomChunkyWriter, load_manifest_lazy +from lhotse.cut import Cut, CutSet +from lhotse.utils import EPSILON, add_durations +from tqdm import tqdm + + +def cut_into_windows(cuts: CutSet, duration: float): + """ + This function takes a CutSet and cuts each cut into windows of roughly + `duration` seconds. By roughly, we mean that we try to adjust for the last supervision + that exceeds the duration, or is shorter than the duration. + """ + res = [] + with tqdm() as pbar: + for cut in cuts: + pbar.update(1) + sups = cut.index_supervisions()[cut.id] + sr = cut.sampling_rate + start = 0.0 + end = duration + num_tries = 0 + while start < cut.duration and num_tries < 2: + # Find the supervision that are cut by the window endpoint + hitlist = [iv for iv in sups.at(end) if iv.begin < end] + # If there are no supervisions, we are done + if not hitlist: + res.append( + cut.truncate( + offset=start, + duration=add_durations(end, -start, sampling_rate=sr), + keep_excessive_supervisions=False, + ) + ) + # Update the start and end for the next window + start = end + end = add_durations(end, duration, sampling_rate=sr) + else: + # find ratio of durations cut by the window endpoint + ratios = [ + add_durations(end, -iv.end, sampling_rate=sr) / iv.length() + for iv in hitlist + ] + # we retain the supervisions that have >50% of their duration + # in the window, and discard the others + retained = [] + discarded = [] + for iv, ratio in zip(hitlist, ratios): + if ratio > 0.5: + retained.append(iv) + else: + discarded.append(iv) + cur_end = max(iv.end for iv in retained) if retained else end + res.append( + cut.truncate( + offset=start, + duration=add_durations(cur_end, -start, sampling_rate=sr), + keep_excessive_supervisions=False, + ) + ) + # For the next window, we start at the earliest discarded supervision + next_start = min(iv.begin for iv in discarded) if discarded else end + next_end = add_durations(next_start, duration, sampling_rate=sr) + # It may happen that next_start is the same as start, in which case + # we will advance the window anyway + if next_start == start: + logging.warning( + f"Next start is the same as start: {next_start} == {start} for cut {cut.id}" + ) + start = end + EPSILON + end = add_durations(start, duration, sampling_rate=sr) + num_tries += 1 + else: + start = next_start + end = next_end + return CutSet.from_cuts(res) + + +def prepare_train_cuts(): + src_dir = Path("data/manifests") + + logging.info("Loading the manifests") + train_cuts_ihm = load_manifest_lazy( + src_dir / "cuts_ami-ihm-mix_train.jsonl.gz" + ).map(lambda c: c.with_id(f"{c.id}_ihm-mix")) + train_cuts_sdm = load_manifest_lazy(src_dir / "cuts_ami-sdm_train.jsonl.gz").map( + lambda c: c.with_id(f"{c.id}_sdm") + ) + train_cuts_mdm = load_manifest_lazy( + src_dir / "cuts_ami-mdm8-bf_train.jsonl.gz" + ).map(lambda c: c.with_id(f"{c.id}_mdm8-bf")) + + # Combine all cuts into one CutSet + train_cuts = train_cuts_ihm + train_cuts_sdm + train_cuts_mdm + + train_cuts_1 = train_cuts.trim_to_supervision_groups(max_pause=0.5) + train_cuts_2 = train_cuts.trim_to_supervision_groups(max_pause=0.0) + + # Combine the two segmentations + train_all = train_cuts_1 + train_cuts_2 + + # At this point, some of the cuts may be very long. We will cut them into windows of + # roughly 30 seconds. + logging.info("Cutting the segments into windows of 30 seconds") + train_all_30 = cut_into_windows(train_all, duration=30.0) + logging.info(f"Number of cuts after cutting into windows: {len(train_all_30)}") + + # Show statistics + train_all.describe(full=True) + + # Save the cuts + logging.info("Saving the cuts") + train_all.to_file(src_dir / "cuts_train_ami.jsonl.gz") + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + prepare_train_cuts() diff --git a/egs/ami/SURT/local/prepare_icsi_train_cuts.py b/egs/ami/SURT/local/prepare_icsi_train_cuts.py new file mode 100755 index 000000000..818e26bfb --- /dev/null +++ b/egs/ami/SURT/local/prepare_icsi_train_cuts.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file creates ICSI train segments. +""" +import logging +from pathlib import Path + +from lhotse import load_manifest_lazy +from prepare_ami_train_cuts import cut_into_windows + + +def prepare_train_cuts(): + src_dir = Path("data/manifests") + + logging.info("Loading the manifests") + train_cuts_ihm = load_manifest_lazy( + src_dir / "cuts_icsi-ihm-mix_train.jsonl.gz" + ).map(lambda c: c.with_id(f"{c.id}_ihm-mix")) + train_cuts_sdm = load_manifest_lazy(src_dir / "cuts_icsi-sdm_train.jsonl.gz").map( + lambda c: c.with_id(f"{c.id}_sdm") + ) + + # Combine all cuts into one CutSet + train_cuts = train_cuts_ihm + train_cuts_sdm + + train_cuts_1 = train_cuts.trim_to_supervision_groups(max_pause=0.5) + train_cuts_2 = train_cuts.trim_to_supervision_groups(max_pause=0.0) + + # Combine the two segmentations + train_all = train_cuts_1 + train_cuts_2 + + # At this point, some of the cuts may be very long. We will cut them into windows of + # roughly 30 seconds. + logging.info("Cutting the segments into windows of 30 seconds") + train_all_30 = cut_into_windows(train_all, duration=30.0) + logging.info(f"Number of cuts after cutting into windows: {len(train_all_30)}") + + # Show statistics + train_all.describe(full=True) + + # Save the cuts + logging.info("Saving the cuts") + train_all.to_file(src_dir / "cuts_train_icsi.jsonl.gz") + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + prepare_train_cuts() diff --git a/egs/ami/SURT/local/prepare_lang_bpe.py b/egs/ami/SURT/local/prepare_lang_bpe.py new file mode 120000 index 000000000..36b40e7fc --- /dev/null +++ b/egs/ami/SURT/local/prepare_lang_bpe.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/prepare_lang_bpe.py \ No newline at end of file diff --git a/egs/ami/SURT/local/train_bpe_model.py b/egs/ami/SURT/local/train_bpe_model.py new file mode 120000 index 000000000..6fad36421 --- /dev/null +++ b/egs/ami/SURT/local/train_bpe_model.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/train_bpe_model.py \ No newline at end of file diff --git a/egs/ami/SURT/prepare.sh b/egs/ami/SURT/prepare.sh new file mode 100755 index 000000000..ea4e5baf2 --- /dev/null +++ b/egs/ami/SURT/prepare.sh @@ -0,0 +1,195 @@ +#!/usr/bin/env bash + +set -eou pipefail + +stage=-1 +stop_stage=100 + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, they will be downloaded +# by this script automatically. +# +# - $dl_dir/ami +# You can find audio and transcripts for AMI in this path. +# +# - $dl_dir/icsi +# You can find audio and transcripts for ICSI in this path. +# +# - $dl_dir/rirs_noises +# This directory contains the RIRS_NOISES corpus downloaded from https://openslr.org/28/. +# +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data +vocab_size=500 + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Stage 0: Download data" + + # If you have pre-downloaded it to /path/to/amicorpus, + # you can create a symlink + # + # ln -sfv /path/to/amicorpus $dl_dir/amicorpus + # + if [ ! -d $dl_dir/amicorpus ]; then + for mic in ihm ihm-mix sdm mdm8-bf; do + lhotse download ami --mic $mic $dl_dir/amicorpus + done + fi + + # If you have pre-downloaded it to /path/to/icsi, + # you can create a symlink + # + # ln -sfv /path/to/icsi $dl_dir/icsi + # + if [ ! -d $dl_dir/icsi ]; then + lhotse download icsi $dl_dir/icsi + fi + + # If you have pre-downloaded it to /path/to/rirs_noises, + # you can create a symlink + # + # ln -sfv /path/to/rirs_noises $dl_dir/ + # + if [ ! -d $dl_dir/rirs_noises ]; then + lhotse download rirs_noises $dl_dir + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare AMI manifests" + # We assume that you have downloaded the AMI corpus + # to $dl_dir/amicorpus. We perform text normalization for the transcripts. + mkdir -p data/manifests + for mic in ihm ihm-mix sdm mdm8-bf; do + log "Preparing AMI manifest for $mic" + lhotse prepare ami --mic $mic --max-words-per-segment 30 --merge-consecutive $dl_dir/amicorpus data/manifests/ + done +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Prepare ICSI manifests" + # We assume that you have downloaded the ICSI corpus + # to $dl_dir/icsi. We perform text normalization for the transcripts. + mkdir -p data/manifests + log "Preparing ICSI manifest" + for mic in ihm ihm-mix sdm; do + lhotse prepare icsi --mic $mic $dl_dir/icsi data/manifests/ + done +fi + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "Stage 3: Prepare RIRs" + # We assume that you have downloaded the RIRS_NOISES corpus + # to $dl_dir/rirs_noises + lhotse prepare rir-noise -p real_rir -p iso_noise $dl_dir/rirs_noises data/manifests +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "Stage 3: Extract features for AMI and ICSI recordings" + python local/compute_fbank_ami.py + python local/compute_fbank_icsi.py +fi + +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "Stage 5: Create sources for simulating mixtures" + # In the following script, we speed-perturb the IHM recordings and extract features. + python local/compute_fbank_ihm.py + lhotse combine data/manifests/ami-ihm_cuts_train.jsonl.gz \ + data/manifests/icsi-ihm_cuts_train.jsonl.gz - |\ + lhotse cut trim-to-alignments --type word --max-pause 0.5 - - |\ + lhotse filter 'duration<=12.0' - - |\ + shuf | gzip -c > data/manifests/ihm_cuts_train_trimmed.jsonl.gz +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "Stage 6: Create training mixtures" + lhotse workflows simulate-meetings \ + --method conversational \ + --same-spk-pause 0.5 \ + --diff-spk-pause 0.5 \ + --diff-spk-overlap 1.0 \ + --prob-diff-spk-overlap 0.8 \ + --num-meetings 200000 \ + --num-speakers-per-meeting 2,3 \ + --max-duration-per-speaker 15.0 \ + --max-utterances-per-speaker 3 \ + --seed 1234 \ + --num-jobs 2 \ + data/manifests/ihm_cuts_train_trimmed.jsonl.gz \ + data/manifests/ai-mix_cuts_clean.jsonl.gz + + python local/compute_fbank_aimix.py + + # Add source features to the manifest (will be used for masking loss) + # This may take ~2 hours. + python local/add_source_feats.py + + # Combine clean and reverb + cat <(gunzip -c data/manifests/cuts_train_clean_sources.jsonl.gz) \ + <(gunzip -c data/manifests/cuts_train_reverb_sources.jsonl.gz) |\ + shuf | gzip -c > data/manifests/cuts_train_comb_sources.jsonl.gz +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "Stage 7: Create training mixtures from real sessions" + python local/prepare_ami_train_cuts.py + python local/prepare_icsi_train_cuts.py + + # Combine AMI and ICSI + cat <(gunzip -c data/manifests/cuts_train_ami.jsonl.gz) \ + <(gunzip -c data/manifests/cuts_train_icsi.jsonl.gz) |\ + shuf | gzip -c > data/manifests/cuts_train_ami_icsi.jsonl.gz +fi + +if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then + log "Stage 8: Dump transcripts for BPE model training (using AMI and ICSI)." + mkdir -p data/lm + cat <(gunzip -c data/manifests/ami-sdm_supervisions_train.jsonl.gz | jq '.text' | sed 's:"::g') \ + <(gunzip -c data/manifests/icsi-sdm_supervisions_train.jsonl.gz | jq '.text' | sed 's:"::g') \ + > data/lm/transcript_words.txt +fi + +if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then + log "Stage 9: Prepare BPE based lang (combining AMI and ICSI)" + + lang_dir=data/lang_bpe_${vocab_size} + mkdir -p $lang_dir + + # Add special words to words.txt + echo " 0" > $lang_dir/words.txt + echo "!SIL 1" >> $lang_dir/words.txt + echo " 2" >> $lang_dir/words.txt + + # Add regular words to words.txt + cat data/lm/transcript_words.txt | grep -o -E '\w+' | sort -u | awk '{print $0,NR+2}' >> $lang_dir/words.txt + + # Add remaining special word symbols expected by LM scripts. + num_words=$(cat $lang_dir/words.txt | wc -l) + echo " ${num_words}" >> $lang_dir/words.txt + num_words=$(cat $lang_dir/words.txt | wc -l) + echo " ${num_words}" >> $lang_dir/words.txt + num_words=$(cat $lang_dir/words.txt | wc -l) + echo "#0 ${num_words}" >> $lang_dir/words.txt + + ./local/train_bpe_model.py \ + --lang-dir $lang_dir \ + --vocab-size $vocab_size \ + --transcript data/lm/transcript_words.txt + + if [ ! -f $lang_dir/L_disambig.pt ]; then + ./local/prepare_lang_bpe.py --lang-dir $lang_dir + fi +fi diff --git a/egs/ami/SURT/shared b/egs/ami/SURT/shared new file mode 120000 index 000000000..4cbd91a7e --- /dev/null +++ b/egs/ami/SURT/shared @@ -0,0 +1 @@ +../../../icefall/shared \ No newline at end of file From 5ed6fc0e6d9afeebaf86ec83c16d9ff2c8d6a0ba Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Wed, 12 Jul 2023 15:37:14 +0800 Subject: [PATCH 18/30] add sym link (#1170) --- egs/wenetspeech/ASR/local/prepare_char_lm_training_data.py | 1 + egs/wenetspeech/ASR/local/sort_lm_training_data.py | 1 + 2 files changed, 2 insertions(+) create mode 120000 egs/wenetspeech/ASR/local/prepare_char_lm_training_data.py create mode 120000 egs/wenetspeech/ASR/local/sort_lm_training_data.py diff --git a/egs/wenetspeech/ASR/local/prepare_char_lm_training_data.py b/egs/wenetspeech/ASR/local/prepare_char_lm_training_data.py new file mode 120000 index 000000000..2374cafdd --- /dev/null +++ b/egs/wenetspeech/ASR/local/prepare_char_lm_training_data.py @@ -0,0 +1 @@ +../../../aishell/ASR/local/prepare_char_lm_training_data.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/local/sort_lm_training_data.py b/egs/wenetspeech/ASR/local/sort_lm_training_data.py new file mode 120000 index 000000000..efef2c445 --- /dev/null +++ b/egs/wenetspeech/ASR/local/sort_lm_training_data.py @@ -0,0 +1 @@ +../../../aishell/ASR/local/sort_lm_training_data.py \ No newline at end of file From 4ab7d610081c0c3b38dd851298cb45381e6ac591 Mon Sep 17 00:00:00 2001 From: zr_jin <60612200+JinZr@users.noreply.github.com> Date: Sat, 15 Jul 2023 12:39:32 +0800 Subject: [PATCH 19/30] removed `batch_name` to fix a KeyError with "uttid" (#1172) --- egs/librispeech/ASR/conformer_ctc2/train.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/egs/librispeech/ASR/conformer_ctc2/train.py b/egs/librispeech/ASR/conformer_ctc2/train.py index 3366af13e..c4a13b101 100755 --- a/egs/librispeech/ASR/conformer_ctc2/train.py +++ b/egs/librispeech/ASR/conformer_ctc2/train.py @@ -675,7 +675,6 @@ def train_one_epoch( for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) - batch_name = batch["supervisions"]["uttid"] with torch.cuda.amp.autocast(enabled=params.use_fp16): loss, loss_info = compute_loss( @@ -698,10 +697,7 @@ def train_one_epoch( scaler.scale(loss).backward() except RuntimeError as e: if "CUDA out of memory" in str(e): - logging.error( - f"failing batch size:{batch_size} " - f"failing batch names {batch_name}" - ) + logging.error(f"failing batch size:{batch_size} ") raise scheduler.step_batch(params.batch_idx_train) @@ -756,10 +752,7 @@ def train_one_epoch( if loss_info["ctc_loss"] == float("inf") or loss_info["att_loss"] == float( "inf" ): - logging.error( - "Your loss contains inf, something goes wrong" - f"failing batch names {batch_name}" - ) + logging.error("Your loss contains inf, something goes wrong") if tb_writer is not None: tb_writer.add_scalar( "train/learning_rate", cur_lr, params.batch_idx_train From 1dbbd7759ef707eca36bb899bcea8e32afc52282 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 25 Jul 2023 14:46:18 +0800 Subject: [PATCH 20/30] Add tests for subsample.py and fix typos (#1180) --- .github/workflows/test.yml | 57 ++----- .../pruned_transducer_stateless2/conformer.py | 2 + .../pruned_transducer_stateless3/test_onnx.py | 6 +- .../pruned_transducer_stateless7/test_onnx.py | 3 +- egs/librispeech/ASR/zipformer/.gitignore | 1 + egs/librispeech/ASR/zipformer/model.py | 2 +- egs/librispeech/ASR/zipformer/scaling.py | 14 +- egs/librispeech/ASR/zipformer/subsampling.py | 23 +-- egs/librispeech/ASR/zipformer/test_scaling.py | 82 ++++++++++ .../ASR/zipformer/test_subsampling.py | 152 ++++++++++++++++++ egs/librispeech/ASR/zipformer/zipformer.py | 4 +- 11 files changed, 276 insertions(+), 70 deletions(-) create mode 100644 egs/librispeech/ASR/zipformer/.gitignore create mode 100755 egs/librispeech/ASR/zipformer/test_scaling.py create mode 100755 egs/librispeech/ASR/zipformer/test_subsampling.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e04fb5655..363556bb7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -35,9 +35,9 @@ jobs: matrix: os: [ubuntu-latest] python-version: ["3.8"] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.23.2.dev20221201"] + torch: ["1.13.0"] + torchaudio: ["0.13.0"] + k2-version: ["1.24.3.dev20230719"] fail-fast: false @@ -66,14 +66,14 @@ jobs: pip install torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html pip install torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.github.io/k2/cpu.html pip install git+https://github.com/lhotse-speech/lhotse # icefall requirements pip uninstall -y protobuf pip install --no-binary protobuf protobuf==3.20.* pip install kaldifst - pip install onnxruntime + pip install onnxruntime matplotlib pip install -r requirements.txt - name: Install graphviz @@ -83,13 +83,6 @@ jobs: python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz - - name: Install graphviz - if: startsWith(matrix.os, 'macos') - shell: bash - run: | - python3 -m pip install -qq graphviz - brew install -q graphviz - - name: Run tests if: startsWith(matrix.os, 'ubuntu') run: | @@ -129,40 +122,10 @@ jobs: cd ../transducer_lstm pytest -v -s - - name: Run tests - if: startsWith(matrix.os, 'macos') - run: | - ls -lh - export PYTHONPATH=$PWD:$PWD/lhotse:$PYTHONPATH - lib_path=$(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") - echo "lib_path: $lib_path" - export DYLD_LIBRARY_PATH=$lib_path:$DYLD_LIBRARY_PATH - pytest -v -s ./test - - # run tests for conformer ctc - cd egs/librispeech/ASR/conformer_ctc + cd ../zipformer pytest -v -s - cd ../pruned_transducer_stateless - pytest -v -s - - cd ../pruned_transducer_stateless2 - pytest -v -s - - cd ../pruned_transducer_stateless3 - pytest -v -s - - cd ../pruned_transducer_stateless4 - pytest -v -s - - cd ../transducer_stateless - pytest -v -s - - # cd ../transducer - # pytest -v -s - - cd ../transducer_stateless2 - pytest -v -s - - cd ../transducer_lstm - pytest -v -s + - uses: actions/upload-artifact@v2 + with: + path: egs/librispeech/ASR/zipformer/swoosh.pdf + name: swoosh.pdf diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py index 9bac46004..bcd419fb7 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py @@ -849,6 +849,8 @@ class RelPositionalEncoding(torch.nn.Module): torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). """ + if isinstance(left_context, torch.Tensor): + left_context = left_context.item() self.extend_pe(x, left_context) x_size_1 = x.size(1) + left_context pos_emb = self.pe[ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/test_onnx.py b/egs/librispeech/ASR/pruned_transducer_stateless3/test_onnx.py index 598fcf344..810da8da6 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/test_onnx.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/test_onnx.py @@ -113,7 +113,7 @@ def test_rel_pos(): torch.onnx.export( encoder_pos, - x, + (x, torch.zeros(1, dtype=torch.int64)), filename, verbose=False, opset_version=opset_version, @@ -139,7 +139,9 @@ def test_rel_pos(): assert input_nodes[0].name == "x" assert input_nodes[0].shape == ["N", "T", num_features] - inputs = {input_nodes[0].name: x.numpy()} + inputs = { + input_nodes[0].name: x.numpy(), + } onnx_y, onnx_pos_emb = session.run(["y", "pos_emb"], inputs) onnx_y = torch.from_numpy(onnx_y) onnx_pos_emb = torch.from_numpy(onnx_pos_emb) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/test_onnx.py b/egs/librispeech/ASR/pruned_transducer_stateless7/test_onnx.py index 2440d267c..1e9b67226 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/test_onnx.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/test_onnx.py @@ -265,7 +265,7 @@ def test_zipformer_encoder(): torch.onnx.export( encoder, - (x), + (x, torch.ones(1, dtype=torch.float32)), filename, verbose=False, opset_version=opset_version, @@ -289,6 +289,7 @@ def test_zipformer_encoder(): input_nodes = session.get_inputs() inputs = { input_nodes[0].name: x.numpy(), + input_nodes[1].name: torch.ones(1, dtype=torch.float32).numpy(), } onnx_y = session.run(["y"], inputs)[0] onnx_y = torch.from_numpy(onnx_y) diff --git a/egs/librispeech/ASR/zipformer/.gitignore b/egs/librispeech/ASR/zipformer/.gitignore new file mode 100644 index 000000000..e47ac1582 --- /dev/null +++ b/egs/librispeech/ASR/zipformer/.gitignore @@ -0,0 +1 @@ +swoosh.pdf diff --git a/egs/librispeech/ASR/zipformer/model.py b/egs/librispeech/ASR/zipformer/model.py index b541ee697..f2f86af47 100644 --- a/egs/librispeech/ASR/zipformer/model.py +++ b/egs/librispeech/ASR/zipformer/model.py @@ -320,7 +320,7 @@ class AsrModel(nn.Module): assert x_lens.ndim == 1, x_lens.shape assert y.num_axes == 2, y.num_axes - assert x.size(0) == x_lens.size(0) == y.dim0 + assert x.size(0) == x_lens.size(0) == y.dim0, (x.shape, x_lens.shape, y.dim0) # Compute encoder outputs encoder_out, encoder_out_lens = self.forward_encoder(x, x_lens) diff --git a/egs/librispeech/ASR/zipformer/scaling.py b/egs/librispeech/ASR/zipformer/scaling.py index 4ee7b7826..7c98ef045 100644 --- a/egs/librispeech/ASR/zipformer/scaling.py +++ b/egs/librispeech/ASR/zipformer/scaling.py @@ -125,7 +125,7 @@ class PiecewiseLinear(object): p: 'PiecewiseLinear', include_crossings: bool = False): """ - Returns (self_mod, p_mod) which are equivalent piecewise lienar + Returns (self_mod, p_mod) which are equivalent piecewise linear functions to self and p, but with the same x values. p: the other piecewise linear function @@ -166,7 +166,7 @@ class ScheduledFloat(torch.nn.Module): in, float(parent_module.whatever), and use it as something like a dropout prob. It is a floating point value whose value changes depending on the batch count of the - training loop. It is a piecewise linear function where you specifiy the (x,y) pairs + training loop. It is a piecewise linear function where you specify the (x,y) pairs in sorted order on x; x corresponds to the batch index. For batch-index values before the first x or after the last x, we just use the first or last y value. @@ -343,7 +343,7 @@ class MaxEigLimiterFunction(torch.autograd.Function): class BiasNormFunction(torch.autograd.Function): # This computes: # scales = (torch.mean((x - bias) ** 2, keepdim=True)) ** -0.5 * log_scale.exp() - # return (x - bias) * scales + # return x * scales # (after unsqueezing the bias), but it does it in a memory-efficient way so that # it can just store the returned value (chances are, this will also be needed for # some other reason, related to the next operation, so we can save memory). @@ -400,8 +400,8 @@ class BiasNorm(torch.nn.Module): Args: num_channels: the number of channels, e.g. 512. channel_dim: the axis/dimension corresponding to the channel, - interprted as an offset from the input's ndim if negative. - shis is NOT the num_channels; it should typically be one of + interpreted as an offset from the input's ndim if negative. + This is NOT the num_channels; it should typically be one of {-2, -1, 0, 1, 2, 3}. log_scale: the initial log-scale that we multiply the output by; this is learnable. @@ -1286,7 +1286,7 @@ class Dropout3(nn.Module): class SwooshLFunction(torch.autograd.Function): """ - swoosh(x) = log(1 + exp(x-4)) - 0.08*x - 0.035 + swoosh_l(x) = log(1 + exp(x-4)) - 0.08*x - 0.035 """ @staticmethod @@ -1361,7 +1361,7 @@ class SwooshLOnnx(torch.nn.Module): class SwooshRFunction(torch.autograd.Function): """ - swoosh(x) = log(1 + exp(x-1)) - 0.08*x - 0.313261687 + swoosh_r(x) = log(1 + exp(x-1)) - 0.08*x - 0.313261687 derivatives are between -0.08 and 0.92. """ diff --git a/egs/librispeech/ASR/zipformer/subsampling.py b/egs/librispeech/ASR/zipformer/subsampling.py index d6bf57db4..6532ddccb 100644 --- a/egs/librispeech/ASR/zipformer/subsampling.py +++ b/egs/librispeech/ASR/zipformer/subsampling.py @@ -138,9 +138,11 @@ class ConvNeXt(nn.Module): x = bypass + x x = self.out_balancer(x) - x = x.transpose(1, 3) # (N, W, H, C); need channel dim to be last - x = self.out_whiten(x) - x = x.transpose(1, 3) # (N, C, H, W) + + if x.requires_grad: + x = x.transpose(1, 3) # (N, W, H, C); need channel dim to be last + x = self.out_whiten(x) + x = x.transpose(1, 3) # (N, C, H, W) return x @@ -266,6 +268,7 @@ class Conv2dSubsampling(nn.Module): # just one convnext layer self.convnext = ConvNeXt(layer3_channels, kernel_size=(7, 7)) + # (in_channels-3)//4 self.out_width = (((in_channels - 1) // 2) - 1) // 2 self.layer3_channels = layer3_channels @@ -299,7 +302,7 @@ class Conv2dSubsampling(nn.Module): A tensor of shape (batch_size,) containing the number of frames in Returns: - - a tensor of shape (N, ((T-1)//2 - 1)//2, odim) + - a tensor of shape (N, (T-7)//2, odim) - output lengths, of shape (batch_size,) """ # On entry, x is (N, T, idim) @@ -310,14 +313,14 @@ class Conv2dSubsampling(nn.Module): x = self.conv(x) x = self.convnext(x) - # Now x is of shape (N, odim, ((T-3)//2 - 1)//2, ((idim-1)//2 - 1)//2) + # Now x is of shape (N, odim, (T-7)//2, (idim-3)//4) b, c, t, f = x.size() x = x.transpose(1, 2).reshape(b, t, c * f) - # now x: (N, ((T-1)//2 - 1))//2, out_width * layer3_channels)) + # now x: (N, (T-7)//2, out_width * layer3_channels)) x = self.out(x) - # Now x is of shape (N, ((T-1)//2 - 1))//2, odim) + # Now x is of shape (N, (T-7)//2, odim) x = self.out_whiten(x) x = self.out_norm(x) x = self.dropout(x) @@ -328,7 +331,7 @@ class Conv2dSubsampling(nn.Module): with warnings.catch_warnings(): warnings.simplefilter("ignore") x_lens = (x_lens - 7) // 2 - assert x.size(1) == x_lens.max().item() + assert x.size(1) == x_lens.max().item() , (x.size(1), x_lens.max()) return x, x_lens @@ -347,7 +350,7 @@ class Conv2dSubsampling(nn.Module): A tensor of shape (batch_size,) containing the number of frames in Returns: - - a tensor of shape (N, ((T-1)//2 - 1)//2, odim) + - a tensor of shape (N, (T-7)//2, odim) - output lengths, of shape (batch_size,) - updated cache """ @@ -383,7 +386,7 @@ class Conv2dSubsampling(nn.Module): assert self.convnext.padding[0] == 3 x_lens = (x_lens - 7) // 2 - 3 - assert x.size(1) == x_lens.max().item() + assert x.size(1) == x_lens.max().item(), (x.shape, x_lens.max()) return x, x_lens, cached_left_pad diff --git a/egs/librispeech/ASR/zipformer/test_scaling.py b/egs/librispeech/ASR/zipformer/test_scaling.py new file mode 100755 index 000000000..5c04291e7 --- /dev/null +++ b/egs/librispeech/ASR/zipformer/test_scaling.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +import matplotlib.pyplot as plt +import torch +from scaling import PiecewiseLinear, ScheduledFloat, SwooshL, SwooshR + + +def test_piecewise_linear(): + # An identity map in the range [0, 1]. + # 1 - identity map in the range [1, 2] + # x1=0, y1=0 + # x2=1, y2=1 + # x3=2, y3=0 + pl = PiecewiseLinear((0, 0), (1, 1), (2, 0)) + assert pl(0.25) == 0.25, pl(0.25) + assert pl(0.625) == 0.625, pl(0.625) + assert pl(1.25) == 0.75, pl(1.25) + + assert pl(-10) == pl(0), pl(-10) # out of range + assert pl(10) == pl(2), pl(10) # out of range + + # multiplication + pl10 = pl * 10 + assert pl10(1) == 10 * pl(1) + assert pl10(0.5) == 10 * pl(0.5) + + +def test_scheduled_float(): + # Initial value is 0.2 and it decreases linearly towards 0 at 4000 + dropout = ScheduledFloat((0, 0.2), (4000, 0.0), default=0.0) + dropout.batch_count = 0 + assert float(dropout) == 0.2, (float(dropout), dropout.batch_count) + + dropout.batch_count = 1000 + assert abs(float(dropout) - 0.15) < 1e-5, (float(dropout), dropout.batch_count) + + dropout.batch_count = 2000 + assert float(dropout) == 0.1, (float(dropout), dropout.batch_count) + + dropout.batch_count = 3000 + assert abs(float(dropout) - 0.05) < 1e-5, (float(dropout), dropout.batch_count) + + dropout.batch_count = 4000 + assert float(dropout) == 0.0, (float(dropout), dropout.batch_count) + + dropout.batch_count = 5000 # out of range + assert float(dropout) == 0.0, (float(dropout), dropout.batch_count) + + +def test_swoosh(): + x1 = torch.linspace(start=-10, end=0, steps=100, dtype=torch.float32) + x2 = torch.linspace(start=0, end=10, steps=100, dtype=torch.float32) + x = torch.cat([x1, x2[1:]]) + + left = SwooshL()(x) + r = SwooshR()(x) + + relu = torch.nn.functional.relu(x) + print(left[x == 0], r[x == 0]) + plt.plot(x, left, "k") + plt.plot(x, r, "r") + plt.plot(x, relu, "b") + plt.axis([-10, 10, -1, 10]) # [xmin, xmax, ymin, ymax] + plt.legend( + [ + "SwooshL(x) = log(1 + exp(x-4)) - 0.08x - 0.035 ", + "SwooshR(x) = log(1 + exp(x-1)) - 0.08x - 0.313261687", + "ReLU(x) = max(0, x)", + ] + ) + plt.grid() + plt.savefig("swoosh.pdf") + + +def main(): + test_piecewise_linear() + test_scheduled_float() + test_swoosh() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/zipformer/test_subsampling.py b/egs/librispeech/ASR/zipformer/test_subsampling.py new file mode 100755 index 000000000..078227fb6 --- /dev/null +++ b/egs/librispeech/ASR/zipformer/test_subsampling.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 + +import torch +from scaling import ScheduledFloat +from subsampling import Conv2dSubsampling + + +def test_conv2d_subsampling(): + layer1_channels = 8 + layer2_channels = 32 + layer3_channels = 128 + + out_channels = 192 + encoder_embed = Conv2dSubsampling( + in_channels=80, + out_channels=out_channels, + layer1_channels=layer1_channels, + layer2_channels=layer2_channels, + layer3_channels=layer3_channels, + dropout=ScheduledFloat((0.0, 0.3), (20000.0, 0.1)), + ) + N = 2 + T = 200 + num_features = 80 + x = torch.rand(N, T, num_features) + x_copy = x.clone() + + x = x.unsqueeze(1) # (N, 1, T, num_features) + + x = encoder_embed.conv[0](x) # conv2d, in 1, out 8, kernel 3, padding (0,1) + assert x.shape == (N, layer1_channels, T - 2, num_features) + # (2, 8, 198, 80) + + x = encoder_embed.conv[1](x) # scale grad + x = encoder_embed.conv[2](x) # balancer + x = encoder_embed.conv[3](x) # swooshR + + x = encoder_embed.conv[4](x) # conv2d, in 8, out 32, kernel 3, stride 2 + assert x.shape == ( + N, + layer2_channels, + ((T - 2) - 3) // 2 + 1, + (num_features - 3) // 2 + 1, + ) + # (2, 32, 98, 39) + + x = encoder_embed.conv[5](x) # balancer + x = encoder_embed.conv[6](x) # swooshR + + # conv2d: + # in 32, out 128, kernel 3, stride (1, 2) + x = encoder_embed.conv[7](x) + assert x.shape == ( + N, + layer3_channels, + (((T - 2) - 3) // 2 + 1) - 2, + (((num_features - 3) // 2 + 1) - 3) // 2 + 1, + ) + # (2, 128, 96, 19) + + x = encoder_embed.conv[8](x) # balancer + x = encoder_embed.conv[9](x) # swooshR + + # (((T - 2) - 3) // 2 + 1) - 2 + # = (T - 2) - 3) // 2 + 1 - 2 + # = ((T - 2) - 3) // 2 - 1 + # = (T - 2 - 3) // 2 - 1 + # = (T - 5) // 2 - 1 + # = (T - 7) // 2 + assert x.shape[2] == (x_copy.shape[1] - 7) // 2 + + # (((num_features - 3) // 2 + 1) - 3) // 2 + 1, + # = ((num_features - 3) // 2 + 1 - 3) // 2 + 1, + # = ((num_features - 3) // 2 - 2) // 2 + 1, + # = (num_features - 3 - 4) // 2 // 2 + 1, + # = (num_features - 7) // 2 // 2 + 1, + # = (num_features - 7) // 4 + 1, + # = (num_features - 3) // 4 + assert x.shape[3] == (x_copy.shape[2] - 3) // 4 + + assert x.shape == (N, layer3_channels, (T - 7) // 2, (num_features - 3) // 4) + + # Input shape to convnext is + # + # (N, layer3_channels, (T-7)//2, (num_features - 3)//4) + + # conv2d: in layer3_channels, out layer3_channels, groups layer3_channels + # kernel_size 7, padding 3 + x = encoder_embed.convnext.depthwise_conv(x) + assert x.shape == (N, layer3_channels, (T - 7) // 2, (num_features - 3) // 4) + + # conv2d: in layer3_channels, out hidden_ratio * layer3_channels, kernel_size 1 + x = encoder_embed.convnext.pointwise_conv1(x) + assert x.shape == (N, layer3_channels * 3, (T - 7) // 2, (num_features - 3) // 4) + + x = encoder_embed.convnext.hidden_balancer(x) # balancer + x = encoder_embed.convnext.activation(x) # swooshL + + # conv2d: in hidden_ratio * layer3_channels, out layer3_channels, kernel 1 + x = encoder_embed.convnext.pointwise_conv2(x) + assert x.shape == (N, layer3_channels, (T - 7) // 2, (num_features - 3) // 4) + + # bypass and layer drop, omitted here. + x = encoder_embed.convnext.out_balancer(x) + + # Note: the input and output shape of ConvNeXt are the same + + x = x.transpose(1, 2).reshape(N, (T - 7) // 2, -1) + assert x.shape == (N, (T - 7) // 2, layer3_channels * ((num_features - 3) // 4)) + + x = encoder_embed.out(x) + assert x.shape == (N, (T - 7) // 2, out_channels) + + x = encoder_embed.out_whiten(x) + x = encoder_embed.out_norm(x) + # final layer is dropout + + # test streaming forward + + subsampling_factor = 2 + cached_left_padding = encoder_embed.get_init_states(batch_size=N) + depthwise_conv_kernel_size = 7 + pad_size = (depthwise_conv_kernel_size - 1) // 2 + + assert cached_left_padding.shape == ( + N, + layer3_channels, + pad_size, + (num_features - 3) // 4, + ) + + chunk_size = 16 + right_padding = pad_size * subsampling_factor + T = chunk_size * subsampling_factor + 7 + right_padding + x = torch.rand(N, T, num_features) + x_lens = torch.tensor([T] * N) + y, y_lens, next_cached_left_padding = encoder_embed.streaming_forward( + x, x_lens, cached_left_padding + ) + + assert y.shape == (N, chunk_size, out_channels), y.shape + assert next_cached_left_padding.shape == cached_left_padding.shape + + assert y.shape[1] == y_lens[0] == y_lens[1] + + +def main(): + test_conv2d_subsampling() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/zipformer/zipformer.py b/egs/librispeech/ASR/zipformer/zipformer.py index 7d98dbeb1..b39af02b8 100644 --- a/egs/librispeech/ASR/zipformer/zipformer.py +++ b/egs/librispeech/ASR/zipformer/zipformer.py @@ -219,7 +219,7 @@ class Zipformer2(EncoderInterface): (num_frames0, batch_size, _encoder_dims0) = x.shape - assert self.encoder_dim[0] == _encoder_dims0 + assert self.encoder_dim[0] == _encoder_dims0, (self.encoder_dim[0], _encoder_dims0) feature_mask_dropout_prob = 0.125 @@ -334,7 +334,7 @@ class Zipformer2(EncoderInterface): x = self._get_full_dim_output(outputs) x = self.downsample_output(x) # class Downsample has this rounding behavior.. - assert self.output_downsampling_factor == 2 + assert self.output_downsampling_factor == 2, self.output_downsampling_factor if torch.jit.is_scripting() or torch.jit.is_tracing(): lengths = (x_lens + 1) // 2 else: From 80d922c1583b9b7fb7e9b47008302cdc74ef58b7 Mon Sep 17 00:00:00 2001 From: kobenaxie <572745565@qq.com> Date: Wed, 26 Jul 2023 16:54:42 +0800 Subject: [PATCH 21/30] Update preprocess_commonvoice.py to fix text normalization bug. (#1181) --- egs/commonvoice/ASR/local/preprocess_commonvoice.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/commonvoice/ASR/local/preprocess_commonvoice.py b/egs/commonvoice/ASR/local/preprocess_commonvoice.py index c5ec14502..e60459765 100755 --- a/egs/commonvoice/ASR/local/preprocess_commonvoice.py +++ b/egs/commonvoice/ASR/local/preprocess_commonvoice.py @@ -45,7 +45,7 @@ def get_args(): def normalize_text(utt: str) -> str: utt = re.sub(r"[{0}]+".format("-"), " ", utt) - return re.sub(r"[^a-zA-Z\s]", "", utt).upper() + return re.sub(r"[^a-zA-Z\s']", "", utt).upper() def preprocess_commonvoice( From 625b33e9ad15961239ea77d12472428d8006085d Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Thu, 27 Jul 2023 12:08:20 +0800 Subject: [PATCH 22/30] Update descriptions for different decoding methods with external LMs (#1185) * add some descriptions * minor updates --- .../decoding-with-langugage-models/index.rst | 21 +++++++++++++++++++ .../rescoring.rst | 14 ++++++++----- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/docs/source/decoding-with-langugage-models/index.rst b/docs/source/decoding-with-langugage-models/index.rst index 577ebbdfb..6e5e3a4d9 100644 --- a/docs/source/decoding-with-langugage-models/index.rst +++ b/docs/source/decoding-with-langugage-models/index.rst @@ -4,6 +4,27 @@ Decoding with language models This section describes how to use external langugage models during decoding to improve the WER of transducer models. +The following decoding methods with external langugage models are available: + + +.. list-table:: LM-rescoring-based methods vs shallow-fusion-based methods (The numbers in each field is WER on test-clean, WER on test-other and decoding time on test-clean) + :widths: 25 50 + :header-rows: 1 + + * - Decoding method + - beam=4 + * - ``modified_beam_search`` + - Beam search (i.e. really n-best decoding, the "beam" is the value of n), similar to the original RNN-T paper. Note, this method does not use language model. + * - ``modified_beam_search_lm_shallow_fusion`` + - As ``modified_beam_search``, but interpolate RNN-T scores with language model scores, also known as shallow fusion + * - ``modified_beam_search_LODR`` + - As ``modified_beam_search_lm_shallow_fusion``, but subtract score of a (BPE-symbol-level) bigram backoff language model used as an approximation to the internal language model of RNN-T. + * - ``modified_beam_search_lm_rescore`` + - As ``modified_beam_search``, but rescore the n-best hypotheses with external language model (e.g. RNNLM) and re-rank them. + * - ``modified_beam_search_lm_rescore_LODR`` + - As ``modified_beam_search_lm_rescore``, but also subtract the score of a (BPE-symbol-level) bigram backoff language model during re-ranking. + + .. toctree:: :maxdepth: 2 diff --git a/docs/source/decoding-with-langugage-models/rescoring.rst b/docs/source/decoding-with-langugage-models/rescoring.rst index d71acc1e5..de7e700d0 100644 --- a/docs/source/decoding-with-langugage-models/rescoring.rst +++ b/docs/source/decoding-with-langugage-models/rescoring.rst @@ -4,7 +4,11 @@ LM rescoring for Transducer ================================= LM rescoring is a commonly used approach to incorporate external LM information. Unlike shallow-fusion-based +<<<<<<< HEAD +methods (see :ref:`shallow_fusion`, :ref:`LODR`), rescoring is usually performed to re-rank the n-best hypotheses after beam search. +======= methods (see :ref:`shallow-fusion`, :ref:`LODR`), rescoring is usually performed to re-rank the n-best hypotheses after beam search. +>>>>>>> 80d922c1583b9b7fb7e9b47008302cdc74ef58b7 Rescoring is usually more efficient than shallow fusion since less computation is performed on the external LM. In this tutorial, we will show you how to use external LM to rescore the n-best hypotheses decoded from neural transducer models in `icefall `__. @@ -225,23 +229,23 @@ Here, we benchmark the WERs and decoding speed of them: - beam=4 - beam=8 - beam=12 - * - `modified_beam_search` + * - ``modified_beam_search`` - 3.11/7.93; 132s - 3.1/7.95; 177s - 3.1/7.96; 210s - * - `modified_beam_search_lm_shallow_fusion` + * - ``modified_beam_search_lm_shallow_fusion`` - 2.77/7.08; 262s - 2.62/6.65; 352s - 2.58/6.65; 488s - * - LODR + * - ``modified_beam_search_LODR`` - 2.61/6.74; 400s - 2.45/6.38; 610s - 2.4/6.23; 870s - * - `modified_beam_search_lm_rescore` + * - ``modified_beam_search_lm_rescore`` - 2.93/7.6; 156s - 2.67/7.11; 203s - 2.59/6.86; 255s - * - `modified_beam_search_lm_rescore_LODR` + * - ``modified_beam_search_lm_rescore_LODR`` - 2.9/7.57; 160s - 2.63/7.04; 203s - 2.52/6.73; 263s From 3fb0a431704a18c9d04230b07a1d75b7ea159970 Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Thu, 27 Jul 2023 12:36:05 +0800 Subject: [PATCH 23/30] Fix conflict (#1187) Resolve conflict --- docs/source/decoding-with-langugage-models/rescoring.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/source/decoding-with-langugage-models/rescoring.rst b/docs/source/decoding-with-langugage-models/rescoring.rst index de7e700d0..ee2e2113c 100644 --- a/docs/source/decoding-with-langugage-models/rescoring.rst +++ b/docs/source/decoding-with-langugage-models/rescoring.rst @@ -4,11 +4,7 @@ LM rescoring for Transducer ================================= LM rescoring is a commonly used approach to incorporate external LM information. Unlike shallow-fusion-based -<<<<<<< HEAD methods (see :ref:`shallow_fusion`, :ref:`LODR`), rescoring is usually performed to re-rank the n-best hypotheses after beam search. -======= -methods (see :ref:`shallow-fusion`, :ref:`LODR`), rescoring is usually performed to re-rank the n-best hypotheses after beam search. ->>>>>>> 80d922c1583b9b7fb7e9b47008302cdc74ef58b7 Rescoring is usually more efficient than shallow fusion since less computation is performed on the external LM. In this tutorial, we will show you how to use external LM to rescore the n-best hypotheses decoded from neural transducer models in `icefall `__. From 19b942c958cba13a78757c9f7a287f8c88460bd0 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Thu, 27 Jul 2023 13:36:46 +0800 Subject: [PATCH 24/30] Update installation doc. (#1188) --- docs/source/conf.py | 5 + docs/source/installation/index.rst | 687 +++++++++++++++-------------- 2 files changed, 354 insertions(+), 338 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 0ff3f801c..bf231e3c1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -90,4 +90,9 @@ rst_epilog = """ .. _musan: http://www.openslr.org/17/ .. _ONNX: https://github.com/onnx/onnx .. _onnxruntime: https://github.com/microsoft/onnxruntime +.. _torch: https://github.com/pytorch/pytorch +.. _torchaudio: https://github.com/pytorch/audio +.. _k2: https://github.com/k2-fsa/k2 +.. _lhotse: https://github.com/lhotse-speech/lhotse +.. _yesno: https://www.openslr.org/1/ """ diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst index 738b24ab2..534b674f9 100644 --- a/docs/source/installation/index.rst +++ b/docs/source/installation/index.rst @@ -3,40 +3,23 @@ Installation ============ +.. hint:: + We have a colab notebook guiding you step by step to setup the environment. -``icefall`` depends on `k2 `_ and -`lhotse `_. + |yesno colab notebook| + + .. |yesno colab notebook| image:: https://colab.research.google.com/assets/colab-badge.svg + :target: https://colab.research.google.com/drive/1tIjjzaJc3IvGyKiMCDWO-TSnBgkcuN3B?usp=sharing + +`icefall`_ depends on `k2`_ and `lhotse`_. We recommend that you use the following steps to install the dependencies. - (0) Install CUDA toolkit and cuDNN -- (1) Install PyTorch and torchaudio -- (2) Install k2 -- (3) Install lhotse - -.. caution:: - - 99% users who have issues about the installation are using conda. - -.. caution:: - - 99% users who have issues about the installation are using conda. - -.. caution:: - - 99% users who have issues about the installation are using conda. - -.. hint:: - - We suggest that you use ``pip install`` to install PyTorch. - - You can use the following command to create a virutal environment in Python: - - .. code-block:: bash - - python3 -m venv ./my_env - source ./my_env/bin/activate +- (1) Install `torch`_ and `torchaudio`_ +- (2) Install `k2`_ +- (3) Install `lhotse`_ .. caution:: @@ -50,27 +33,20 @@ Please refer to to install CUDA and cuDNN. -(1) Install PyTorch and torchaudio ----------------------------------- +(1) Install torch and torchaudio +-------------------------------- -Please refer ``_ to install PyTorch -and torchaudio. - -.. hint:: - - You can also go to ``_ - to download pre-compiled wheels and install them. +Please refer ``_ to install `torch`_ and `torchaudio`_. .. caution:: Please install torch and torchaudio at the same time. - (2) Install k2 -------------- Please refer to ``_ -to install ``k2``. +to install `k2`_. .. caution:: @@ -78,21 +54,18 @@ to install ``k2``. .. note:: - We suggest that you install k2 from source by following - ``_ - or - ``_. + We suggest that you install k2 from pre-compiled wheels by following + ``_ .. hint:: - Please always install the latest version of k2. + Please always install the latest version of `k2`_. (3) Install lhotse ------------------ Please refer to ``_ -to install ``lhotse``. - +to install `lhotse`_. .. hint:: @@ -100,17 +73,16 @@ to install ``lhotse``. pip install git+https://github.com/lhotse-speech/lhotse - to install the latest version of lhotse. + to install the latest version of `lhotse`_. (4) Download icefall -------------------- -``icefall`` is a collection of Python scripts; what you need is to download it +`icefall`_ is a collection of Python scripts; what you need is to download it and set the environment variable ``PYTHONPATH`` to point to it. -Assume you want to place ``icefall`` in the folder ``/tmp``. The -following commands show you how to setup ``icefall``: - +Assume you want to place `icefall`_ in the folder ``/tmp``. The +following commands show you how to setup `icefall`_: .. code-block:: bash @@ -122,285 +94,334 @@ following commands show you how to setup ``icefall``: .. HINT:: - You can put several versions of ``icefall`` in the same virtual environment. - To switch among different versions of ``icefall``, just set ``PYTHONPATH`` + You can put several versions of `icefall`_ in the same virtual environment. + To switch among different versions of `icefall`_, just set ``PYTHONPATH`` to point to the version you want. - Installation example -------------------- The following shows an example about setting up the environment. - (1) Create a virtual environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash - $ virtualenv -p python3.8 test-icefall + kuangfangjun:~$ virtualenv -p python3.8 test-icefall + created virtual environment CPython3.8.0.final.0-64 in 9422ms + creator CPython3Posix(dest=/star-fj/fangjun/test-icefall, clear=False, no_vcs_ignore=False, global=False) + seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=/star-fj/fangjun/.local/share/virtualenv) + added seed packages: pip==22.3.1, setuptools==65.6.3, wheel==0.38.4 + activators BashActivator,CShellActivator,FishActivator,NushellActivator,PowerShellActivator,PythonActivator - created virtual environment CPython3.8.6.final.0-64 in 1540ms - creator CPython3Posix(dest=/ceph-fj/fangjun/test-icefall, clear=False, no_vcs_ignore=False, global=False) - seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=/root/fangjun/.local/share/v - irtualenv) - added seed packages: pip==21.1.3, setuptools==57.4.0, wheel==0.36.2 - activators BashActivator,CShellActivator,FishActivator,PowerShellActivator,PythonActivator,XonshActivator + kuangfangjun:~$ source test-icefall/bin/activate + (test-icefall) kuangfangjun:~$ -(2) Activate your virtual environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +(2) Install CUDA toolkit and cuDNN +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You need to determine the version of CUDA toolkit to install. .. code-block:: bash - $ source test-icefall/bin/activate + (test-icefall) kuangfangjun:~$ nvidia-smi | head -n 4 -(3) Install k2 + Wed Jul 26 21:57:49 2023 + +-----------------------------------------------------------------------------+ + | NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 | + |-------------------------------+----------------------+----------------------+ + +You can choose any CUDA version that is ``not`` greater than the version printed by ``nvidia-smi``. +In our case, we can choose any version ``<= 11.6``. + +We will use ``CUDA 11.6`` in this example. Please follow +``_ +to install CUDA toolkit and cuDNN if you have not done that before. + +After installing CUDA toolkit, you can use the following command to verify it: + +.. code-block:: bash + + (test-icefall) kuangfangjun:~$ nvcc --version + + nvcc: NVIDIA (R) Cuda compiler driver + Copyright (c) 2005-2019 NVIDIA Corporation + Built on Wed_Oct_23_19:24:38_PDT_2019 + Cuda compilation tools, release 10.2, V10.2.89 + +(3) Install torch and torchaudio +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since we have selected CUDA toolkit ``11.6``, we have to install a version of `torch`_ +that is compiled against CUDA ``11.6``. We select ``torch 1.13.0+cu116`` in this +example. + +After selecting the version of `torch`_ to install, we need to also install +a compatible version of `torchaudio`_, which is ``0.13.0+cu116`` in our case. + +Please refer to ``_ +to select an appropriate version of `torchaudio`_ to install if you use a different +version of `torch`_. + +.. code-block:: bash + + (test-icefall) kuangfangjun:~$ pip install torch==1.13.0+cu116 torchaudio==0.13.0+cu116 -f https://download.pytorch.org/whl/torch_stable.html + + Looking in links: https://download.pytorch.org/whl/torch_stable.html + Collecting torch==1.13.0+cu116 + Downloading https://download.pytorch.org/whl/cu116/torch-1.13.0%2Bcu116-cp38-cp38-linux_x86_64.whl (1983.0 MB) + ________________________________________ 2.0/2.0 GB 764.4 kB/s eta 0:00:00 + Collecting torchaudio==0.13.0+cu116 + Downloading https://download.pytorch.org/whl/cu116/torchaudio-0.13.0%2Bcu116-cp38-cp38-linux_x86_64.whl (4.2 MB) + ________________________________________ 4.2/4.2 MB 1.3 MB/s eta 0:00:00 + Requirement already satisfied: typing-extensions in /star-fj/fangjun/test-icefall/lib/python3.8/site-packages (from torch==1.13.0+cu116) (4.7.1) + Installing collected packages: torch, torchaudio + Successfully installed torch-1.13.0+cu116 torchaudio-0.13.0+cu116 + +Verify that `torch`_ and `torchaudio`_ are successfully installed: + +.. code-block:: bash + + (test-icefall) kuangfangjun:~$ python3 -c "import torch; print(torch.__version__)" + + 1.13.0+cu116 + + (test-icefall) kuangfangjun:~$ python3 -c "import torchaudio; print(torchaudio.__version__)" + + 0.13.0+cu116 + +(4) Install k2 ~~~~~~~~~~~~~~ +We will install `k2`_ from pre-compiled wheels by following +``_ + .. code-block:: bash - $ pip install k2==1.4.dev20210822+cpu.torch1.9.0 -f https://k2-fsa.org/nightly/index.html + (test-icefall) kuangfangjun:~$ pip install k2==1.24.3.dev20230725+cuda11.6.torch1.13.0 -f https://k2-fsa.github.io/k2/cuda.html - Looking in links: https://k2-fsa.org/nightly/index.html - Collecting k2==1.4.dev20210822+cpu.torch1.9.0 - Downloading https://k2-fsa.org/nightly/whl/k2-1.4.dev20210822%2Bcpu.torch1.9.0-cp38-cp38-linux_x86_64.whl (1.6 MB) - |________________________________| 1.6 MB 185 kB/s + Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple + Looking in links: https://k2-fsa.github.io/k2/cuda.html + Collecting k2==1.24.3.dev20230725+cuda11.6.torch1.13.0 + Downloading https://huggingface.co/csukuangfj/k2/resolve/main/ubuntu-cuda/k2-1.24.3.dev20230725%2Bcuda11.6.torch1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (104.3 MB) + ________________________________________ 104.3/104.3 MB 5.1 MB/s eta 0:00:00 + Requirement already satisfied: torch==1.13.0 in /star-fj/fangjun/test-icefall/lib/python3.8/site-packages (from k2==1.24.3.dev20230725+cuda11.6.torch1.13.0) (1.13.0+cu116) Collecting graphviz - Downloading graphviz-0.17-py3-none-any.whl (18 kB) - Collecting torch==1.9.0 - Using cached torch-1.9.0-cp38-cp38-manylinux1_x86_64.whl (831.4 MB) - Collecting typing-extensions - Using cached typing_extensions-3.10.0.0-py3-none-any.whl (26 kB) - Installing collected packages: typing-extensions, torch, graphviz, k2 - Successfully installed graphviz-0.17 k2-1.4.dev20210822+cpu.torch1.9.0 torch-1.9.0 typing-extensions-3.10.0.0 + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/de/5e/fcbb22c68208d39edff467809d06c9d81d7d27426460ebc598e55130c1aa/graphviz-0.20.1-py3-none-any.whl (47 kB) + Requirement already satisfied: typing-extensions in /star-fj/fangjun/test-icefall/lib/python3.8/site-packages (from torch==1.13.0->k2==1.24.3.dev20230725+cuda11.6.torch1.13.0) (4.7.1) + Installing collected packages: graphviz, k2 + Successfully installed graphviz-0.20.1 k2-1.24.3.dev20230725+cuda11.6.torch1.13.0 -.. WARNING:: +.. hint:: - We choose to install a CPU version of k2 for testing. You would probably want to install - a CUDA version of k2. + Please refer to ``_ for the available + pre-compiled wheels about `k2`_. +Verify that `k2`_ has been installed successfully: -(4) Install lhotse +.. code-block:: bash + + (test-icefall) kuangfangjun:~$ python3 -m k2.version + + Collecting environment information... + + k2 version: 1.24.3 + Build type: Release + Git SHA1: 4c05309499a08454997adf500b56dcc629e35ae5 + Git date: Tue Jul 25 16:23:36 2023 + Cuda used to build k2: 11.6 + cuDNN used to build k2: 8.3.2 + Python version used to build k2: 3.8 + OS used to build k2: CentOS Linux release 7.9.2009 (Core) + CMake version: 3.27.0 + GCC version: 9.3.1 + CMAKE_CUDA_FLAGS: -Wno-deprecated-gpu-targets -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_35,code=sm_35 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_50,code=sm_50 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_60,code=sm_60 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_61,code=sm_61 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_70,code=sm_70 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_75,code=sm_75 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_80,code=sm_80 -lineinfo --expt-extended-lambda -use_fast_math -Xptxas=-w --expt-extended-lambda -gencode arch=compute_86,code=sm_86 -DONNX_NAMESPACE=onnx_c2 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -Xcudafe --diag_suppress=cc_clobber_ignored,--diag_suppress=integer_sign_change,--diag_suppress=useless_using_declaration,--diag_suppress=set_but_not_used,--diag_suppress=field_without_dll_interface,--diag_suppress=base_class_has_different_dll_interface,--diag_suppress=dll_interface_conflict_none_assumed,--diag_suppress=dll_interface_conflict_dllexport_assumed,--diag_suppress=implicit_return_from_non_void_function,--diag_suppress=unsigned_compare_with_zero,--diag_suppress=declared_but_not_referenced,--diag_suppress=bad_friend_decl --expt-relaxed-constexpr --expt-extended-lambda -D_GLIBCXX_USE_CXX11_ABI=0 --compiler-options -Wall --compiler-options -Wno-strict-overflow --compiler-options -Wno-unknown-pragmas + CMAKE_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=0 -Wno-unused-variable -Wno-strict-overflow + PyTorch version used to build k2: 1.13.0+cu116 + PyTorch is using Cuda: 11.6 + NVTX enabled: True + With CUDA: True + Disable debug: True + Sync kernels : False + Disable checks: False + Max cpu memory allocate: 214748364800 bytes (or 200.0 GB) + k2 abort: False + __file__: /star-fj/fangjun/test-icefall/lib/python3.8/site-packages/k2/version/version.py + _k2.__file__: /star-fj/fangjun/test-icefall/lib/python3.8/site-packages/_k2.cpython-38-x86_64-linux-gnu.so + +(5) Install lhotse ~~~~~~~~~~~~~~~~~~ -.. code-block:: +.. code-block:: bash - $ pip install git+https://github.com/lhotse-speech/lhotse + (test-icefall) kuangfangjun:~$ pip install git+https://github.com/lhotse-speech/lhotse Collecting git+https://github.com/lhotse-speech/lhotse - Cloning https://github.com/lhotse-speech/lhotse to /tmp/pip-req-build-7b1b76ge - Running command git clone -q https://github.com/lhotse-speech/lhotse /tmp/pip-req-build-7b1b76ge - Collecting audioread>=2.1.9 - Using cached audioread-2.1.9-py3-none-any.whl - Collecting SoundFile>=0.10 - Using cached SoundFile-0.10.3.post1-py2.py3-none-any.whl (21 kB) - Collecting click>=7.1.1 - Using cached click-8.0.1-py3-none-any.whl (97 kB) + Cloning https://github.com/lhotse-speech/lhotse to /tmp/pip-req-build-vq12fd5i + Running command git clone --filter=blob:none --quiet https://github.com/lhotse-speech/lhotse /tmp/pip-req-build-vq12fd5i + Resolved https://github.com/lhotse-speech/lhotse to commit 7640d663469b22cd0b36f3246ee9b849cd25e3b7 + Installing build dependencies ... done + Getting requirements to build wheel ... done + Preparing metadata (pyproject.toml) ... done Collecting cytoolz>=0.10.1 - Using cached cytoolz-0.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.9 MB) - Collecting dataclasses - Using cached dataclasses-0.6-py3-none-any.whl (14 kB) - Collecting h5py>=2.10.0 - Downloading h5py-3.4.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (4.5 MB) - |________________________________| 4.5 MB 684 kB/s - Collecting intervaltree>=3.1.0 - Using cached intervaltree-3.1.0-py2.py3-none-any.whl - Collecting lilcom>=1.1.0 - Using cached lilcom-1.1.1-cp38-cp38-linux_x86_64.whl - Collecting numpy>=1.18.1 - Using cached numpy-1.21.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.8 MB) - Collecting packaging - Using cached packaging-21.0-py3-none-any.whl (40 kB) + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/1e/3b/a7828d575aa17fb7acaf1ced49a3655aa36dad7e16eb7e6a2e4df0dda76f/cytoolz-0.12.2-cp38-cp38- + manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB) + ________________________________________ 2.0/2.0 MB 33.2 MB/s eta 0:00:00 Collecting pyyaml>=5.3.1 - Using cached PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl (662 kB) + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/c8/6b/6600ac24725c7388255b2f5add93f91e58a5d7efaf4af244fdbcc11a541b/PyYAML-6.0.1-cp38-cp38-ma + nylinux_2_17_x86_64.manylinux2014_x86_64.whl (736 kB) + ________________________________________ 736.6/736.6 kB 38.6 MB/s eta 0:00:00 + Collecting dataclasses + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/26/2f/1095cdc2868052dd1e64520f7c0d5c8c550ad297e944e641dbf1ffbb9a5d/dataclasses-0.6-py3-none- + any.whl (14 kB) + Requirement already satisfied: torchaudio in ./test-icefall/lib/python3.8/site-packages (from lhotse==1.16.0.dev0+git.7640d66.clean) (0.13.0+cu116) + Collecting lilcom>=1.1.0 + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/a8/65/df0a69c52bd085ca1ad4e5c4c1a5c680e25f9477d8e49316c4ff1e5084a4/lilcom-1.7-cp38-cp38-many + linux_2_17_x86_64.manylinux2014_x86_64.whl (87 kB) + ________________________________________ 87.1/87.1 kB 8.7 MB/s eta 0:00:00 Collecting tqdm - Downloading tqdm-4.62.1-py2.py3-none-any.whl (76 kB) - |________________________________| 76 kB 2.7 MB/s - Collecting torchaudio==0.9.0 - Downloading torchaudio-0.9.0-cp38-cp38-manylinux1_x86_64.whl (1.9 MB) - |________________________________| 1.9 MB 73.1 MB/s - Requirement already satisfied: torch==1.9.0 in ./test-icefall/lib/python3.8/site-packages (from torchaudio==0.9.0->lhotse===0.8.0.dev - -2a1410b-clean) (1.9.0) - Requirement already satisfied: typing-extensions in ./test-icefall/lib/python3.8/site-packages (from torch==1.9.0->torchaudio==0.9.0- - >lhotse===0.8.0.dev-2a1410b-clean) (3.10.0.0) + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e6/02/a2cff6306177ae6bc73bc0665065de51dfb3b9db7373e122e2735faf0d97/tqdm-4.65.0-py3-none-any + .whl (77 kB) + Requirement already satisfied: numpy>=1.18.1 in ./test-icefall/lib/python3.8/site-packages (from lhotse==1.16.0.dev0+git.7640d66.clean) (1.24.4) + Collecting audioread>=2.1.9 + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5d/cb/82a002441902dccbe427406785db07af10182245ee639ea9f4d92907c923/audioread-3.0.0.tar.gz ( + 377 kB) + Preparing metadata (setup.py) ... done + Collecting tabulate>=0.8.1 + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none- + any.whl (35 kB) + Collecting click>=7.1.1 + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/1a/70/e63223f8116931d365993d4a6b7ef653a4d920b41d03de7c59499962821f/click-8.1.6-py3-none-any. + whl (97 kB) + ________________________________________ 97.9/97.9 kB 8.4 MB/s eta 0:00:00 + Collecting packaging + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ab/c3/57f0601a2d4fe15de7a553c00adbc901425661bf048f2a22dfc500caf121/packaging-23.1-py3-none- + any.whl (48 kB) + Collecting intervaltree>=3.1.0 + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/50/fb/396d568039d21344639db96d940d40eb62befe704ef849b27949ded5c3bb/intervaltree-3.1.0.tar.gz + (32 kB) + Preparing metadata (setup.py) ... done + Requirement already satisfied: torch in ./test-icefall/lib/python3.8/site-packages (from lhotse==1.16.0.dev0+git.7640d66.clean) (1.13.0+cu116) + Collecting SoundFile>=0.10 + Downloading https://pypi.tuna.tsinghua.edu.cn/packages/ad/bd/0602167a213d9184fc688b1086dc6d374b7ae8c33eccf169f9b50ce6568c/soundfile-0.12.1-py2.py3- + none-manylinux_2_17_x86_64.whl (1.3 MB) + ________________________________________ 1.3/1.3 MB 46.5 MB/s eta 0:00:00 Collecting toolz>=0.8.0 - Using cached toolz-0.11.1-py3-none-any.whl (55 kB) + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/7f/5c/922a3508f5bda2892be3df86c74f9cf1e01217c2b1f8a0ac4841d903e3e9/toolz-0.12.0-py3-none-any.whl (55 kB) Collecting sortedcontainers<3.0,>=2.0 - Using cached sortedcontainers-2.4.0-py2.py3-none-any.whl (29 kB) + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl (29 kB) Collecting cffi>=1.0 - Using cached cffi-1.14.6-cp38-cp38-manylinux1_x86_64.whl (411 kB) + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b7/8b/06f30caa03b5b3ac006de4f93478dbd0239e2a16566d81a106c322dc4f79/cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (442 kB) + Requirement already satisfied: typing-extensions in ./test-icefall/lib/python3.8/site-packages (from torch->lhotse==1.16.0.dev0+git.7640d66.clean) (4.7.1) Collecting pycparser - Using cached pycparser-2.20-py2.py3-none-any.whl (112 kB) - Collecting pyparsing>=2.0.2 - Using cached pyparsing-2.4.7-py2.py3-none-any.whl (67 kB) - Building wheels for collected packages: lhotse - Building wheel for lhotse (setup.py) ... done - Created wheel for lhotse: filename=lhotse-0.8.0.dev_2a1410b_clean-py3-none-any.whl size=342242 sha256=f683444afa4dc0881133206b4646a - 9d0f774224cc84000f55d0a67f6e4a37997 - Stored in directory: /tmp/pip-ephem-wheel-cache-ftu0qysz/wheels/7f/7a/8e/a0bf241336e2e3cb573e1e21e5600952d49f5162454f2e612f - WARNING: Built wheel for lhotse is invalid: Metadata 1.2 mandates PEP 440 version, but '0.8.0.dev-2a1410b-clean' is not - Failed to build lhotse - Installing collected packages: pycparser, toolz, sortedcontainers, pyparsing, numpy, cffi, tqdm, torchaudio, SoundFile, pyyaml, packa - ging, lilcom, intervaltree, h5py, dataclasses, cytoolz, click, audioread, lhotse - Running setup.py install for lhotse ... done - DEPRECATION: lhotse was installed using the legacy 'setup.py install' method, because a wheel could not be built for it. A possible - replacement is to fix the wheel build issue reported above. You can find discussion regarding this at https://github.com/pypa/pip/is - sues/8368. - Successfully installed SoundFile-0.10.3.post1 audioread-2.1.9 cffi-1.14.6 click-8.0.1 cytoolz-0.11.0 dataclasses-0.6 h5py-3.4.0 inter - valtree-3.1.0 lhotse-0.8.0.dev-2a1410b-clean lilcom-1.1.1 numpy-1.21.2 packaging-21.0 pycparser-2.20 pyparsing-2.4.7 pyyaml-5.4.1 sor - tedcontainers-2.4.0 toolz-0.11.1 torchaudio-0.9.0 tqdm-4.62.1 + Using cached https://pypi.tuna.tsinghua.edu.cn/packages/62/d5/5f610ebe421e85889f2e55e33b7f9a6795bd982198517d912eb1c76e1a53/pycparser-2.21-py2.py3-none-any.whl (118 kB) + Building wheels for collected packages: lhotse, audioread, intervaltree + Building wheel for lhotse (pyproject.toml) ... done + Created wheel for lhotse: filename=lhotse-1.16.0.dev0+git.7640d66.clean-py3-none-any.whl size=687627 sha256=cbf0a4d2d0b639b33b91637a4175bc251d6a021a069644ecb1a9f2b3a83d072a + Stored in directory: /tmp/pip-ephem-wheel-cache-wwtk90_m/wheels/7f/7a/8e/a0bf241336e2e3cb573e1e21e5600952d49f5162454f2e612f + Building wheel for audioread (setup.py) ... done + Created wheel for audioread: filename=audioread-3.0.0-py3-none-any.whl size=23704 sha256=5e2d3537c96ce9cf0f645a654c671163707bf8cb8d9e358d0e2b0939a85ff4c2 + Stored in directory: /star-fj/fangjun/.cache/pip/wheels/e2/c3/9c/f19ae5a03f8862d9f0776b0c0570f1fdd60a119d90954e3f39 + Building wheel for intervaltree (setup.py) ... done + Created wheel for intervaltree: filename=intervaltree-3.1.0-py2.py3-none-any.whl size=26098 sha256=2604170976cfffe0d2f678cb1a6e5b525f561cd50babe53d631a186734fec9f9 + Stored in directory: /star-fj/fangjun/.cache/pip/wheels/f3/ed/2b/c179ebfad4e15452d6baef59737f27beb9bfb442e0620f7271 + Successfully built lhotse audioread intervaltree + Installing collected packages: sortedcontainers, dataclasses, tqdm, toolz, tabulate, pyyaml, pycparser, packaging, lilcom, intervaltree, click, audioread, cytoolz, cffi, SoundFile, lhotse + Successfully installed SoundFile-0.12.1 audioread-3.0.0 cffi-1.15.1 click-8.1.6 cytoolz-0.12.2 dataclasses-0.6 intervaltree-3.1.0 lhotse-1.16.0.dev0+git.7640d66.clean lilcom-1.7 packaging-23.1 pycparser-2.21 pyyaml-6.0.1 sortedcontainers-2.4.0 tabulate-0.9.0 toolz-0.12.0 tqdm-4.65.0 -(5) Download icefall + +Verify that `lhotse`_ has been installed successfully: + +.. code-block:: bash + + (test-icefall) kuangfangjun:~$ python3 -c "import lhotse; print(lhotse.__version__)" + + 1.16.0.dev+git.7640d66.clean + +(6) Download icefall ~~~~~~~~~~~~~~~~~~~~ -.. code-block:: +.. code-block:: bash - $ cd /tmp - $ git clone https://github.com/k2-fsa/icefall + (test-icefall) kuangfangjun:~$ cd /tmp/ + + (test-icefall) kuangfangjun:tmp$ git clone https://github.com/k2-fsa/icefall Cloning into 'icefall'... - remote: Enumerating objects: 500, done. - remote: Counting objects: 100% (500/500), done. - remote: Compressing objects: 100% (308/308), done. - remote: Total 500 (delta 263), reused 307 (delta 102), pack-reused 0 - Receiving objects: 100% (500/500), 172.49 KiB | 385.00 KiB/s, done. - Resolving deltas: 100% (263/263), done. + remote: Enumerating objects: 12942, done. + remote: Counting objects: 100% (67/67), done. + remote: Compressing objects: 100% (56/56), done. + remote: Total 12942 (delta 17), reused 35 (delta 6), pack-reused 12875 + Receiving objects: 100% (12942/12942), 14.77 MiB | 9.29 MiB/s, done. + Resolving deltas: 100% (8835/8835), done. - $ cd icefall - $ pip install -r requirements.txt - - Collecting kaldilm - Downloading kaldilm-1.8.tar.gz (48 kB) - |________________________________| 48 kB 574 kB/s - Collecting kaldialign - Using cached kaldialign-0.2-cp38-cp38-linux_x86_64.whl - Collecting sentencepiece>=0.1.96 - Using cached sentencepiece-0.1.96-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB) - Collecting tensorboard - Using cached tensorboard-2.6.0-py3-none-any.whl (5.6 MB) - Requirement already satisfied: setuptools>=41.0.0 in /ceph-fj/fangjun/test-icefall/lib/python3.8/site-packages (from tensorboard->-r - requirements.txt (line 4)) (57.4.0) - Collecting absl-py>=0.4 - Using cached absl_py-0.13.0-py3-none-any.whl (132 kB) - Collecting google-auth-oauthlib<0.5,>=0.4.1 - Using cached google_auth_oauthlib-0.4.5-py2.py3-none-any.whl (18 kB) - Collecting grpcio>=1.24.3 - Using cached grpcio-1.39.0-cp38-cp38-manylinux2014_x86_64.whl (4.3 MB) - Requirement already satisfied: wheel>=0.26 in /ceph-fj/fangjun/test-icefall/lib/python3.8/site-packages (from tensorboard->-r require - ments.txt (line 4)) (0.36.2) - Requirement already satisfied: numpy>=1.12.0 in /ceph-fj/fangjun/test-icefall/lib/python3.8/site-packages (from tensorboard->-r requi - rements.txt (line 4)) (1.21.2) - Collecting protobuf>=3.6.0 - Using cached protobuf-3.17.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl (1.0 MB) - Collecting werkzeug>=0.11.15 - Using cached Werkzeug-2.0.1-py3-none-any.whl (288 kB) - Collecting tensorboard-data-server<0.7.0,>=0.6.0 - Using cached tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl (4.9 MB) - Collecting google-auth<2,>=1.6.3 - Downloading google_auth-1.35.0-py2.py3-none-any.whl (152 kB) - |________________________________| 152 kB 1.4 MB/s - Collecting requests<3,>=2.21.0 - Using cached requests-2.26.0-py2.py3-none-any.whl (62 kB) - Collecting tensorboard-plugin-wit>=1.6.0 - Using cached tensorboard_plugin_wit-1.8.0-py3-none-any.whl (781 kB) - Collecting markdown>=2.6.8 - Using cached Markdown-3.3.4-py3-none-any.whl (97 kB) - Collecting six - Using cached six-1.16.0-py2.py3-none-any.whl (11 kB) - Collecting cachetools<5.0,>=2.0.0 - Using cached cachetools-4.2.2-py3-none-any.whl (11 kB) - Collecting rsa<5,>=3.1.4 - Using cached rsa-4.7.2-py3-none-any.whl (34 kB) - Collecting pyasn1-modules>=0.2.1 - Using cached pyasn1_modules-0.2.8-py2.py3-none-any.whl (155 kB) - Collecting requests-oauthlib>=0.7.0 - Using cached requests_oauthlib-1.3.0-py2.py3-none-any.whl (23 kB) - Collecting pyasn1<0.5.0,>=0.4.6 - Using cached pyasn1-0.4.8-py2.py3-none-any.whl (77 kB) - Collecting urllib3<1.27,>=1.21.1 - Using cached urllib3-1.26.6-py2.py3-none-any.whl (138 kB) - Collecting certifi>=2017.4.17 - Using cached certifi-2021.5.30-py2.py3-none-any.whl (145 kB) - Collecting charset-normalizer~=2.0.0 - Using cached charset_normalizer-2.0.4-py3-none-any.whl (36 kB) - Collecting idna<4,>=2.5 - Using cached idna-3.2-py3-none-any.whl (59 kB) - Collecting oauthlib>=3.0.0 - Using cached oauthlib-3.1.1-py2.py3-none-any.whl (146 kB) - Building wheels for collected packages: kaldilm - Building wheel for kaldilm (setup.py) ... done - Created wheel for kaldilm: filename=kaldilm-1.8-cp38-cp38-linux_x86_64.whl size=897233 sha256=eccb906cafcd45bf9a7e1a1718e4534254bfb - f4c0d0cbc66eee6c88d68a63862 - Stored in directory: /root/fangjun/.cache/pip/wheels/85/7d/63/f2dd586369b8797cb36d213bf3a84a789eeb92db93d2e723c9 - Successfully built kaldilm - Installing collected packages: urllib3, pyasn1, idna, charset-normalizer, certifi, six, rsa, requests, pyasn1-modules, oauthlib, cach - etools, requests-oauthlib, google-auth, werkzeug, tensorboard-plugin-wit, tensorboard-data-server, protobuf, markdown, grpcio, google - -auth-oauthlib, absl-py, tensorboard, sentencepiece, kaldilm, kaldialign - Successfully installed absl-py-0.13.0 cachetools-4.2.2 certifi-2021.5.30 charset-normalizer-2.0.4 google-auth-1.35.0 google-auth-oaut - hlib-0.4.5 grpcio-1.39.0 idna-3.2 kaldialign-0.2 kaldilm-1.8 markdown-3.3.4 oauthlib-3.1.1 protobuf-3.17.3 pyasn1-0.4.8 pyasn1-module - s-0.2.8 requests-2.26.0 requests-oauthlib-1.3.0 rsa-4.7.2 sentencepiece-0.1.96 six-1.16.0 tensorboard-2.6.0 tensorboard-data-server-0 - .6.1 tensorboard-plugin-wit-1.8.0 urllib3-1.26.6 werkzeug-2.0.1 + (test-icefall) kuangfangjun:tmp$ cd icefall/ + (test-icefall) kuangfangjun:icefall$ pip install -r ./requirements.txt Test Your Installation ---------------------- To test that your installation is successful, let us run the `yesno recipe `_ -on CPU. +on ``CPU``. Data preparation ~~~~~~~~~~~~~~~~ .. code-block:: bash - $ export PYTHONPATH=/tmp/icefall:$PYTHONPATH - $ cd /tmp/icefall - $ cd egs/yesno/ASR - $ ./prepare.sh + (test-icefall) kuangfangjun:icefall$ export PYTHONPATH=/tmp/icefall:$PYTHONPATH + + (test-icefall) kuangfangjun:icefall$ cd /tmp/icefall + + (test-icefall) kuangfangjun:icefall$ cd egs/yesno/ASR + + (test-icefall) kuangfangjun:ASR$ ./prepare.sh + The log of running ``./prepare.sh`` is: .. code-block:: - 2023-05-12 17:55:21 (prepare.sh:27:main) dl_dir: /tmp/icefall/egs/yesno/ASR/download - 2023-05-12 17:55:21 (prepare.sh:30:main) Stage 0: Download data - /tmp/icefall/egs/yesno/ASR/download/waves_yesno.tar.gz: 100%|_______________________________________________________________| 4.70M/4.70M [06:54<00:00, 11.4kB/s] - 2023-05-12 18:02:19 (prepare.sh:39:main) Stage 1: Prepare yesno manifest - 2023-05-12 18:02:21 (prepare.sh:45:main) Stage 2: Compute fbank for yesno - 2023-05-12 18:02:23,199 INFO [compute_fbank_yesno.py:65] Processing train - Extracting and storing features: 100%|_______________________________________________________________| 90/90 [00:00<00:00, 212.60it/s] - 2023-05-12 18:02:23,640 INFO [compute_fbank_yesno.py:65] Processing test - Extracting and storing features: 100%|_______________________________________________________________| 30/30 [00:00<00:00, 304.53it/s] - 2023-05-12 18:02:24 (prepare.sh:51:main) Stage 3: Prepare lang - 2023-05-12 18:02:26 (prepare.sh:66:main) Stage 4: Prepare G - /project/kaldilm/csrc/arpa_file_parser.cc:void kaldilm::ArpaFileParser::Read(std::istream&):79 - [I] Reading \data\ section. - /project/kaldilm/csrc/arpa_file_parser.cc:void kaldilm::ArpaFileParser::Read(std::istream&):140 - [I] Reading \1-grams: section. - 2023-05-12 18:02:26 (prepare.sh:92:main) Stage 5: Compile HLG - 2023-05-12 18:02:28,581 INFO [compile_hlg.py:124] Processing data/lang_phone - 2023-05-12 18:02:28,582 INFO [lexicon.py:171] Converting L.pt to Linv.pt - 2023-05-12 18:02:28,609 INFO [compile_hlg.py:48] Building ctc_topo. max_token_id: 3 - 2023-05-12 18:02:28,610 INFO [compile_hlg.py:52] Loading G.fst.txt - 2023-05-12 18:02:28,611 INFO [compile_hlg.py:62] Intersecting L and G - 2023-05-12 18:02:28,613 INFO [compile_hlg.py:64] LG shape: (4, None) - 2023-05-12 18:02:28,613 INFO [compile_hlg.py:66] Connecting LG - 2023-05-12 18:02:28,614 INFO [compile_hlg.py:68] LG shape after k2.connect: (4, None) - 2023-05-12 18:02:28,614 INFO [compile_hlg.py:70] - 2023-05-12 18:02:28,614 INFO [compile_hlg.py:71] Determinizing LG - 2023-05-12 18:02:28,615 INFO [compile_hlg.py:74] - 2023-05-12 18:02:28,615 INFO [compile_hlg.py:76] Connecting LG after k2.determinize - 2023-05-12 18:02:28,615 INFO [compile_hlg.py:79] Removing disambiguation symbols on LG - 2023-05-12 18:02:28,616 INFO [compile_hlg.py:91] LG shape after k2.remove_epsilon: (6, None) - 2023-05-12 18:02:28,617 INFO [compile_hlg.py:96] Arc sorting LG - 2023-05-12 18:02:28,617 INFO [compile_hlg.py:99] Composing H and LG - 2023-05-12 18:02:28,619 INFO [compile_hlg.py:106] Connecting LG - 2023-05-12 18:02:28,619 INFO [compile_hlg.py:109] Arc sorting LG - 2023-05-12 18:02:28,619 INFO [compile_hlg.py:111] HLG.shape: (8, None) - 2023-05-12 18:02:28,619 INFO [compile_hlg.py:127] Saving HLG.pt to data/lang_phone - + 2023-07-27 12:41:39 (prepare.sh:27:main) dl_dir: /tmp/icefall/egs/yesno/ASR/download + 2023-07-27 12:41:39 (prepare.sh:30:main) Stage 0: Download data + /tmp/icefall/egs/yesno/ASR/download/waves_yesno.tar.gz: 100%|___________________________________________________| 4.70M/4.70M [00:00<00:00, 11.1MB/s] + 2023-07-27 12:41:46 (prepare.sh:39:main) Stage 1: Prepare yesno manifest + 2023-07-27 12:41:50 (prepare.sh:45:main) Stage 2: Compute fbank for yesno + 2023-07-27 12:41:55,718 INFO [compute_fbank_yesno.py:65] Processing train + Extracting and storing features: 100%|_______________________________________________________________________________| 90/90 [00:01<00:00, 87.82it/s] + 2023-07-27 12:41:56,778 INFO [compute_fbank_yesno.py:65] Processing test + Extracting and storing features: 100%|______________________________________________________________________________| 30/30 [00:00<00:00, 256.92it/s] + 2023-07-27 12:41:57 (prepare.sh:51:main) Stage 3: Prepare lang + 2023-07-27 12:42:02 (prepare.sh:66:main) Stage 4: Prepare G + /project/kaldilm/csrc/arpa_file_parser.cc:void kaldilm::ArpaFileParser::Read(std::istream&):79 + [I] Reading \data\ section. + /project/kaldilm/csrc/arpa_file_parser.cc:void kaldilm::ArpaFileParser::Read(std::istream&):140 + [I] Reading \1-grams: section. + 2023-07-27 12:42:02 (prepare.sh:92:main) Stage 5: Compile HLG + 2023-07-27 12:42:07,275 INFO [compile_hlg.py:124] Processing data/lang_phone + 2023-07-27 12:42:07,276 INFO [lexicon.py:171] Converting L.pt to Linv.pt + 2023-07-27 12:42:07,309 INFO [compile_hlg.py:48] Building ctc_topo. max_token_id: 3 + 2023-07-27 12:42:07,310 INFO [compile_hlg.py:52] Loading G.fst.txt + 2023-07-27 12:42:07,314 INFO [compile_hlg.py:62] Intersecting L and G + 2023-07-27 12:42:07,323 INFO [compile_hlg.py:64] LG shape: (4, None) + 2023-07-27 12:42:07,323 INFO [compile_hlg.py:66] Connecting LG + 2023-07-27 12:42:07,323 INFO [compile_hlg.py:68] LG shape after k2.connect: (4, None) + 2023-07-27 12:42:07,323 INFO [compile_hlg.py:70] + 2023-07-27 12:42:07,323 INFO [compile_hlg.py:71] Determinizing LG + 2023-07-27 12:42:07,341 INFO [compile_hlg.py:74] + 2023-07-27 12:42:07,341 INFO [compile_hlg.py:76] Connecting LG after k2.determinize + 2023-07-27 12:42:07,341 INFO [compile_hlg.py:79] Removing disambiguation symbols on LG + 2023-07-27 12:42:07,354 INFO [compile_hlg.py:91] LG shape after k2.remove_epsilon: (6, None) + 2023-07-27 12:42:07,445 INFO [compile_hlg.py:96] Arc sorting LG + 2023-07-27 12:42:07,445 INFO [compile_hlg.py:99] Composing H and LG + 2023-07-27 12:42:07,446 INFO [compile_hlg.py:106] Connecting LG + 2023-07-27 12:42:07,446 INFO [compile_hlg.py:109] Arc sorting LG + 2023-07-27 12:42:07,447 INFO [compile_hlg.py:111] HLG.shape: (8, None) + 2023-07-27 12:42:07,447 INFO [compile_hlg.py:127] Saving HLG.pt to data/lang_phone Training ~~~~~~~~ @@ -409,12 +430,13 @@ Now let us run the training part: .. code-block:: - $ export CUDA_VISIBLE_DEVICES="" - $ ./tdnn/train.py + (test-icefall) kuangfangjun:ASR$ export CUDA_VISIBLE_DEVICES="" + + (test-icefall) kuangfangjun:ASR$ ./tdnn/train.py .. CAUTION:: - We use ``export CUDA_VISIBLE_DEVICES=""`` so that ``icefall`` uses CPU + We use ``export CUDA_VISIBLE_DEVICES=""`` so that `icefall`_ uses CPU even if there are GPUs available. .. hint:: @@ -432,53 +454,52 @@ The training log is given below: .. code-block:: - 2023-05-12 18:04:59,759 INFO [train.py:481] Training started - 2023-05-12 18:04:59,759 INFO [train.py:482] {'exp_dir': PosixPath('tdnn/exp'), 'lang_dir': PosixPath('data/lang_phone'), 'lr': 0.01, 'feature_dim': 23, 'weight_decay': 1e-06, 'start_epoch': 0, - 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 10, 'reset_interval': 20, 'valid_interval': 10, 'beam_size': 10, - 'reduction': 'sum', 'use_double_scores': True, 'world_size': 1, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 15, 'seed': 42, 'feature_dir': PosixPath('data/fbank'), 'max_duration': 30.0, - 'bucketing_sampler': False, 'num_buckets': 10, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': False, 'return_cuts': True, 'num_workers': 2, - 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '3b7f09fa35e72589914f67089c0da9f196a92ca4', 'k2-git-date': 'Mon May 8 22:58:45 2023', - 'lhotse-version': '1.15.0.dev+git.6fcfced.clean', 'torch-version': '2.0.0+cu118', 'torch-cuda-available': False, 'torch-cuda-version': '11.8', 'python-version': '3.1', 'icefall-git-branch': 'master', - 'icefall-git-sha1': '30bde4b-clean', 'icefall-git-date': 'Thu May 11 17:37:47 2023', 'icefall-path': '/tmp/icefall', - 'k2-path': 'tmp/lib/python3.10/site-packages/k2-1.24.3.dev20230512+cuda11.8.torch2.0.0-py3.10-linux-x86_64.egg/k2/__init__.py', - 'lhotse-path': 'tmp/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'host', 'IP address': '0.0.0.0'}} - 2023-05-12 18:04:59,761 INFO [lexicon.py:168] Loading pre-compiled data/lang_phone/Linv.pt - 2023-05-12 18:04:59,764 INFO [train.py:495] device: cpu - 2023-05-12 18:04:59,791 INFO [asr_datamodule.py:146] About to get train cuts - 2023-05-12 18:04:59,791 INFO [asr_datamodule.py:244] About to get train cuts - 2023-05-12 18:04:59,852 INFO [asr_datamodule.py:149] About to create train dataset - 2023-05-12 18:04:59,852 INFO [asr_datamodule.py:199] Using SingleCutSampler. - 2023-05-12 18:04:59,852 INFO [asr_datamodule.py:205] About to create train dataloader - 2023-05-12 18:04:59,853 INFO [asr_datamodule.py:218] About to get test cuts - 2023-05-12 18:04:59,853 INFO [asr_datamodule.py:252] About to get test cuts - 2023-05-12 18:04:59,986 INFO [train.py:422] Epoch 0, batch 0, loss[loss=1.065, over 2436.00 frames. ], tot_loss[loss=1.065, over 2436.00 frames. ], batch size: 4 - 2023-05-12 18:05:00,352 INFO [train.py:422] Epoch 0, batch 10, loss[loss=0.4561, over 2828.00 frames. ], tot_loss[loss=0.7076, over 22192.90 frames. ], batch size: 4 - 2023-05-12 18:05:00,691 INFO [train.py:444] Epoch 0, validation loss=0.9002, over 18067.00 frames. - 2023-05-12 18:05:00,996 INFO [train.py:422] Epoch 0, batch 20, loss[loss=0.2555, over 2695.00 frames. ], tot_loss[loss=0.484, over 34971.47 frames. ], batch size: 5 - 2023-05-12 18:05:01,217 INFO [train.py:444] Epoch 0, validation loss=0.4688, over 18067.00 frames. - 2023-05-12 18:05:01,251 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-0.pt - 2023-05-12 18:05:01,389 INFO [train.py:422] Epoch 1, batch 0, loss[loss=0.2532, over 2436.00 frames. ], tot_loss[loss=0.2532, over 2436.00 frames. ], batch size: 4 - 2023-05-12 18:05:01,637 INFO [train.py:422] Epoch 1, batch 10, loss[loss=0.1139, over 2828.00 frames. ], tot_loss[loss=0.1592, over 22192.90 frames. ], batch size: 4 - 2023-05-12 18:05:01,859 INFO [train.py:444] Epoch 1, validation loss=0.1629, over 18067.00 frames. - 2023-05-12 18:05:02,094 INFO [train.py:422] Epoch 1, batch 20, loss[loss=0.0767, over 2695.00 frames. ], tot_loss[loss=0.118, over 34971.47 frames. ], batch size: 5 - 2023-05-12 18:05:02,350 INFO [train.py:444] Epoch 1, validation loss=0.06778, over 18067.00 frames. - 2023-05-12 18:05:02,395 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-1.pt + 2023-07-27 12:50:51,936 INFO [train.py:481] Training started + 2023-07-27 12:50:51,936 INFO [train.py:482] {'exp_dir': PosixPath('tdnn/exp'), 'lang_dir': PosixPath('data/lang_phone'), 'lr': 0.01, 'feature_dim': 23, 'weight_decay': 1e-06, 'start_epoch': 0, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 10, 'reset_interval': 20, 'valid_interval': 10, 'beam_size': 10, 'reduction': 'sum', 'use_double_scores': True, 'world_size': 1, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 15, 'seed': 42, 'feature_dir': PosixPath('data/fbank'), 'max_duration': 30.0, 'bucketing_sampler': False, 'num_buckets': 10, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': False, 'return_cuts': True, 'num_workers': 2, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '4c05309499a08454997adf500b56dcc629e35ae5', 'k2-git-date': 'Tue Jul 25 16:23:36 2023', 'lhotse-version': '1.16.0.dev+git.7640d66.clean', 'torch-version': '1.13.0+cu116', 'torch-cuda-available': False, 'torch-cuda-version': '11.6', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': '3fb0a43-clean', 'icefall-git-date': 'Thu Jul 27 12:36:05 2023', 'icefall-path': '/tmp/icefall', 'k2-path': '/star-fj/fangjun/test-icefall/lib/python3.8/site-packages/k2/__init__.py', 'lhotse-path': '/star-fj/fangjun/test-icefall/lib/python3.8/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-sph26', 'IP address': '10.177.77.20'}} + 2023-07-27 12:50:51,941 INFO [lexicon.py:168] Loading pre-compiled data/lang_phone/Linv.pt + 2023-07-27 12:50:51,949 INFO [train.py:495] device: cpu + 2023-07-27 12:50:51,965 INFO [asr_datamodule.py:146] About to get train cuts + 2023-07-27 12:50:51,965 INFO [asr_datamodule.py:244] About to get train cuts + 2023-07-27 12:50:51,967 INFO [asr_datamodule.py:149] About to create train dataset + 2023-07-27 12:50:51,967 INFO [asr_datamodule.py:199] Using SingleCutSampler. + 2023-07-27 12:50:51,967 INFO [asr_datamodule.py:205] About to create train dataloader + 2023-07-27 12:50:51,968 INFO [asr_datamodule.py:218] About to get test cuts + 2023-07-27 12:50:51,968 INFO [asr_datamodule.py:252] About to get test cuts + 2023-07-27 12:50:52,565 INFO [train.py:422] Epoch 0, batch 0, loss[loss=1.065, over 2436.00 frames. ], tot_loss[loss=1.065, over 2436.00 frames. ], batch size: 4 + 2023-07-27 12:50:53,681 INFO [train.py:422] Epoch 0, batch 10, loss[loss=0.4561, over 2828.00 frames. ], tot_loss[loss=0.7076, over 22192.90 frames.], batch size: 4 + 2023-07-27 12:50:54,167 INFO [train.py:444] Epoch 0, validation loss=0.9002, over 18067.00 frames. + 2023-07-27 12:50:55,011 INFO [train.py:422] Epoch 0, batch 20, loss[loss=0.2555, over 2695.00 frames. ], tot_loss[loss=0.484, over 34971.47 frames. ], batch size: 5 + 2023-07-27 12:50:55,331 INFO [train.py:444] Epoch 0, validation loss=0.4688, over 18067.00 frames. + 2023-07-27 12:50:55,368 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-0.pt + 2023-07-27 12:50:55,633 INFO [train.py:422] Epoch 1, batch 0, loss[loss=0.2532, over 2436.00 frames. ], tot_loss[loss=0.2532, over 2436.00 frames. ], + batch size: 4 + 2023-07-27 12:50:56,242 INFO [train.py:422] Epoch 1, batch 10, loss[loss=0.1139, over 2828.00 frames. ], tot_loss[loss=0.1592, over 22192.90 frames.], batch size: 4 + 2023-07-27 12:50:56,522 INFO [train.py:444] Epoch 1, validation loss=0.1627, over 18067.00 frames. + 2023-07-27 12:50:57,209 INFO [train.py:422] Epoch 1, batch 20, loss[loss=0.07055, over 2695.00 frames. ], tot_loss[loss=0.1175, over 34971.47 frames.], batch size: 5 + 2023-07-27 12:50:57,600 INFO [train.py:444] Epoch 1, validation loss=0.07091, over 18067.00 frames. + 2023-07-27 12:50:57,640 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-1.pt + 2023-07-27 12:50:57,847 INFO [train.py:422] Epoch 2, batch 0, loss[loss=0.07731, over 2436.00 frames. ], tot_loss[loss=0.07731, over 2436.00 frames.], batch size: 4 + 2023-07-27 12:50:58,427 INFO [train.py:422] Epoch 2, batch 10, loss[loss=0.04391, over 2828.00 frames. ], tot_loss[loss=0.05341, over 22192.90 frames. ], batch size: 4 + 2023-07-27 12:50:58,884 INFO [train.py:444] Epoch 2, validation loss=0.04384, over 18067.00 frames. + 2023-07-27 12:50:59,387 INFO [train.py:422] Epoch 2, batch 20, loss[loss=0.03458, over 2695.00 frames. ], tot_loss[loss=0.04616, over 34971.47 frames. ], batch size: 5 + 2023-07-27 12:50:59,707 INFO [train.py:444] Epoch 2, validation loss=0.03379, over 18067.00 frames. + 2023-07-27 12:50:59,758 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-2.pt - ... ... + ... ... - 2023-05-12 18:05:14,789 INFO [train.py:422] Epoch 13, batch 0, loss[loss=0.01056, over 2436.00 frames. ], tot_loss[loss=0.01056, over 2436.00 frames. ], batch size: 4 - 2023-05-12 18:05:15,016 INFO [train.py:422] Epoch 13, batch 10, loss[loss=0.009022, over 2828.00 frames. ], tot_loss[loss=0.009985, over 22192.90 frames. ], batch size: 4 - 2023-05-12 18:05:15,271 INFO [train.py:444] Epoch 13, validation loss=0.01088, over 18067.00 frames. - 2023-05-12 18:05:15,497 INFO [train.py:422] Epoch 13, batch 20, loss[loss=0.01174, over 2695.00 frames. ], tot_loss[loss=0.01077, over 34971.47 frames. ], batch size: 5 - 2023-05-12 18:05:15,747 INFO [train.py:444] Epoch 13, validation loss=0.01087, over 18067.00 frames. - 2023-05-12 18:05:15,783 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-13.pt - 2023-05-12 18:05:15,921 INFO [train.py:422] Epoch 14, batch 0, loss[loss=0.01045, over 2436.00 frames. ], tot_loss[loss=0.01045, over 2436.00 frames. ], batch size: 4 - 2023-05-12 18:05:16,146 INFO [train.py:422] Epoch 14, batch 10, loss[loss=0.008957, over 2828.00 frames. ], tot_loss[loss=0.009903, over 22192.90 frames. ], batch size: 4 - 2023-05-12 18:05:16,374 INFO [train.py:444] Epoch 14, validation loss=0.01092, over 18067.00 frames. - 2023-05-12 18:05:16,598 INFO [train.py:422] Epoch 14, batch 20, loss[loss=0.01169, over 2695.00 frames. ], tot_loss[loss=0.01065, over 34971.47 frames. ], batch size: 5 - 2023-05-12 18:05:16,824 INFO [train.py:444] Epoch 14, validation loss=0.01077, over 18067.00 frames. - 2023-05-12 18:05:16,862 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-14.pt - 2023-05-12 18:05:16,865 INFO [train.py:555] Done! + 2023-07-27 12:51:23,433 INFO [train.py:422] Epoch 13, batch 0, loss[loss=0.01054, over 2436.00 frames. ], tot_loss[loss=0.01054, over 2436.00 frames. ], batch size: 4 + 2023-07-27 12:51:23,980 INFO [train.py:422] Epoch 13, batch 10, loss[loss=0.009014, over 2828.00 frames. ], tot_loss[loss=0.009974, over 22192.90 frames. ], batch size: 4 + 2023-07-27 12:51:24,489 INFO [train.py:444] Epoch 13, validation loss=0.01085, over 18067.00 frames. + 2023-07-27 12:51:25,258 INFO [train.py:422] Epoch 13, batch 20, loss[loss=0.01172, over 2695.00 frames. ], tot_loss[loss=0.01055, over 34971.47 frames. ], batch size: 5 + 2023-07-27 12:51:25,621 INFO [train.py:444] Epoch 13, validation loss=0.01074, over 18067.00 frames. + 2023-07-27 12:51:25,699 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-13.pt + 2023-07-27 12:51:25,866 INFO [train.py:422] Epoch 14, batch 0, loss[loss=0.01044, over 2436.00 frames. ], tot_loss[loss=0.01044, over 2436.00 frames. ], batch size: 4 + 2023-07-27 12:51:26,844 INFO [train.py:422] Epoch 14, batch 10, loss[loss=0.008942, over 2828.00 frames. ], tot_loss[loss=0.01, over 22192.90 frames. ], batch size: 4 + 2023-07-27 12:51:27,221 INFO [train.py:444] Epoch 14, validation loss=0.01082, over 18067.00 frames. + 2023-07-27 12:51:27,970 INFO [train.py:422] Epoch 14, batch 20, loss[loss=0.01169, over 2695.00 frames. ], tot_loss[loss=0.01054, over 34971.47 frames. ], batch size: 5 + 2023-07-27 12:51:28,247 INFO [train.py:444] Epoch 14, validation loss=0.01073, over 18067.00 frames. + 2023-07-27 12:51:28,323 INFO [checkpoint.py:75] Saving checkpoint to tdnn/exp/epoch-14.pt + 2023-07-27 12:51:28,326 INFO [train.py:555] Done! Decoding ~~~~~~~~ @@ -487,42 +508,32 @@ Let us use the trained model to decode the test set: .. code-block:: - $ ./tdnn/decode.py + (test-icefall) kuangfangjun:ASR$ ./tdnn/decode.py -The decoding log is: + 2023-07-27 12:55:12,840 INFO [decode.py:263] Decoding started + 2023-07-27 12:55:12,840 INFO [decode.py:264] {'exp_dir': PosixPath('tdnn/exp'), 'lang_dir': PosixPath('data/lang_phone'), 'lm_dir': PosixPath('data/lm'), 'feature_dim': 23, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 14, 'avg': 2, 'export': False, 'feature_dir': PosixPath('data/fbank'), 'max_duration': 30.0, 'bucketing_sampler': False, 'num_buckets': 10, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': False, 'return_cuts': True, 'num_workers': 2, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '4c05309499a08454997adf500b56dcc629e35ae5', 'k2-git-date': 'Tue Jul 25 16:23:36 2023', 'lhotse-version': '1.16.0.dev+git.7640d66.clean', 'torch-version': '1.13.0+cu116', 'torch-cuda-available': False, 'torch-cuda-version': '11.6', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': '3fb0a43-clean', 'icefall-git-date': 'Thu Jul 27 12:36:05 2023', 'icefall-path': '/tmp/icefall', 'k2-path': '/star-fj/fangjun/test-icefall/lib/python3.8/site-packages/k2/__init__.py', 'lhotse-path': '/star-fj/fangjun/test-icefall/lib/python3.8/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-sph26', 'IP address': '10.177.77.20'}} + 2023-07-27 12:55:12,841 INFO [lexicon.py:168] Loading pre-compiled data/lang_phone/Linv.pt + 2023-07-27 12:55:12,855 INFO [decode.py:273] device: cpu + 2023-07-27 12:55:12,868 INFO [decode.py:291] averaging ['tdnn/exp/epoch-13.pt', 'tdnn/exp/epoch-14.pt'] + 2023-07-27 12:55:12,882 INFO [asr_datamodule.py:218] About to get test cuts + 2023-07-27 12:55:12,883 INFO [asr_datamodule.py:252] About to get test cuts + 2023-07-27 12:55:13,157 INFO [decode.py:204] batch 0/?, cuts processed until now is 4 + 2023-07-27 12:55:13,701 INFO [decode.py:241] The transcripts are stored in tdnn/exp/recogs-test_set.txt + 2023-07-27 12:55:13,702 INFO [utils.py:564] [test_set] %WER 0.42% [1 / 240, 0 ins, 1 del, 0 sub ] + 2023-07-27 12:55:13,704 INFO [decode.py:249] Wrote detailed error stats to tdnn/exp/errs-test_set.txt + 2023-07-27 12:55:13,704 INFO [decode.py:316] Done! -.. code-block:: - 2023-05-12 18:08:30,482 INFO [decode.py:263] Decoding started - 2023-05-12 18:08:30,483 INFO [decode.py:264] {'exp_dir': PosixPath('tdnn/exp'), 'lang_dir': PosixPath('data/lang_phone'), 'lm_dir': PosixPath('data/lm'), 'feature_dim': 23, - 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 14, 'avg': 2, 'export': False, 'feature_dir': PosixPath('data/fbank'), - 'max_duration': 30.0, 'bucketing_sampler': False, 'num_buckets': 10, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': False, 'return_cuts': True, - 'num_workers': 2, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '3b7f09fa35e72589914f67089c0da9f196a92ca4', 'k2-git-date': 'Mon May 8 22:58:45 2023', - 'lhotse-version': '1.15.0.dev+git.6fcfced.clean', 'torch-version': '2.0.0+cu118', 'torch-cuda-available': False, 'torch-cuda-version': '11.8', 'python-version': '3.1', 'icefall-git-branch': 'master', - 'icefall-git-sha1': '30bde4b-clean', 'icefall-git-date': 'Thu May 11 17:37:47 2023', 'icefall-path': '/tmp/icefall', - 'k2-path': '/tmp/lib/python3.10/site-packages/k2-1.24.3.dev20230512+cuda11.8.torch2.0.0-py3.10-linux-x86_64.egg/k2/__init__.py', - 'lhotse-path': '/tmp/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'host', 'IP address': '0.0.0.0'}} - 2023-05-12 18:08:30,483 INFO [lexicon.py:168] Loading pre-compiled data/lang_phone/Linv.pt - 2023-05-12 18:08:30,487 INFO [decode.py:273] device: cpu - 2023-05-12 18:08:30,513 INFO [decode.py:291] averaging ['tdnn/exp/epoch-13.pt', 'tdnn/exp/epoch-14.pt'] - 2023-05-12 18:08:30,521 INFO [asr_datamodule.py:218] About to get test cuts - 2023-05-12 18:08:30,521 INFO [asr_datamodule.py:252] About to get test cuts - 2023-05-12 18:08:30,675 INFO [decode.py:204] batch 0/?, cuts processed until now is 4 - 2023-05-12 18:08:30,923 INFO [decode.py:241] The transcripts are stored in tdnn/exp/recogs-test_set.txt - 2023-05-12 18:08:30,924 INFO [utils.py:558] [test_set] %WER 0.42% [1 / 240, 0 ins, 1 del, 0 sub ] - 2023-05-12 18:08:30,925 INFO [decode.py:249] Wrote detailed error stats to tdnn/exp/errs-test_set.txt - 2023-05-12 18:08:30,925 INFO [decode.py:316] Done! - -**Congratulations!** You have successfully setup the environment and have run the first recipe in ``icefall``. +**Congratulations!** You have successfully setup the environment and have run the first recipe in `icefall`_. Have fun with ``icefall``! YouTube Video ------------- -We provide the following YouTube video showing how to install ``icefall``. +We provide the following YouTube video showing how to install `icefall`_. It also shows how to debug various problems that you may encounter while -using ``icefall``. +using `icefall`_. .. note:: From 751bb6ff1a933c69a5ad4aebe8e24972f14dd691 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Fri, 28 Jul 2023 10:34:40 +0800 Subject: [PATCH 25/30] Add docker image for icefall (#1189) --- .github/workflows/build-docker-image.yml | 45 ++++++++++++++++ .github/workflows/run-docker-image.yml | 66 ++++++++++++++++++++++++ docker/README.md | 15 ++++++ docker/torch1.12.1-cuda11.3.dockerfile | 62 ++++++++++++++++++++++ docker/torch1.13.0-cuda11.6.dockerfile | 64 +++++++++++++++++++++++ docker/torch1.9.0-cuda10.2.dockerfile | 62 ++++++++++++++++++++++ docker/torch2.0.0-cuda11.7.dockerfile | 62 ++++++++++++++++++++++ 7 files changed, 376 insertions(+) create mode 100644 .github/workflows/build-docker-image.yml create mode 100644 .github/workflows/run-docker-image.yml create mode 100644 docker/torch1.12.1-cuda11.3.dockerfile create mode 100644 docker/torch1.13.0-cuda11.6.dockerfile create mode 100644 docker/torch1.9.0-cuda10.2.dockerfile create mode 100644 docker/torch2.0.0-cuda11.7.dockerfile diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml new file mode 100644 index 000000000..327f0ee45 --- /dev/null +++ b/.github/workflows/build-docker-image.yml @@ -0,0 +1,45 @@ +# see also +# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages +name: Build docker image +on: + workflow_dispatch: + +concurrency: + group: build_docker-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-docker-image: + name: ${{ matrix.image }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + image: ["torch2.0.0-cuda11.7", "torch1.13.0-cuda11.6", "torch1.12.1-cuda11.3", "torch1.9.0-cuda10.2"] + + steps: + # refer to https://github.com/actions/checkout + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Rename + shell: bash + run: | + image=${{ matrix.image }} + mv -v ./docker/$image.dockerfile ./Dockerfile + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + push: true + tags: k2fsa/icefall:${{ matrix.image }} diff --git a/.github/workflows/run-docker-image.yml b/.github/workflows/run-docker-image.yml new file mode 100644 index 000000000..d0ac11071 --- /dev/null +++ b/.github/workflows/run-docker-image.yml @@ -0,0 +1,66 @@ +name: Run docker image +on: + workflow_dispatch: + +concurrency: + group: run_docker_image-${{ github.ref }} + cancel-in-progress: true + +jobs: + run-docker-image: + name: ${{ matrix.image }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + image: ["torch2.0.0-cuda11.7", "torch1.13.0-cuda11.6", "torch1.12.1-cuda11.3", "torch1.9.0-cuda10.2"] + steps: + # refer to https://github.com/actions/checkout + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Run the build process with Docker + uses: addnab/docker-run-action@v3 + with: + image: k2fsa/icefall:${{ matrix.image }} + run: | + uname -a + cat /etc/*release + + nvcc --version + + which nvcc + cuda_dir=$(dirname $(which nvcc)) + echo "cuda_dir: $cuda_dir" + + find $cuda_dir -name libcuda.so* + echo "--------------------" + + find / -name libcuda.so* 2>/dev/null + + pushd /opt/conda/lib/stubs && ln -s libcuda.so libcuda.so.1 && popd + + export LD_LIBRARY_PATH=/opt/conda/lib/stubs:$LD_LIBRARY_PATH + echo "LD_LIBRARY_PATH $LD_LIBRARY_PATH" + + python3 --version + which python3 + + echo "----------torch----------" + python3 -m torch.utils.collect_env + + echo "----------k2----------" + python3 -c "import k2; print(k2.__file__)" + python3 -c "import k2; print(k2.__version__)" + python3 -m k2.version + + echo "----------lhotse----------" + python3 -c "import lhotse; print(lhotse.__file__)" + python3 -c "import lhotse; print(lhotse.__version__)" + + echo "----------kaldifeat----------" + python3 -c "import kaldifeat; print(kaldifeat.__file__)" + python3 -c "import kaldifeat; print(kaldifeat.__version__)" + diff --git a/docker/README.md b/docker/README.md index c14b9bf75..19959bfe6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,5 +1,20 @@ # icefall dockerfile +## Download from dockerhub + +You can find pre-built docker image for icefall at the following address: + + + +Example usage: + +```bash +docker run --gpus all --rm -it k2fsa/icefall:torch1.13.0-cuda11.6 /bin/bash +``` + + +## Build from dockerfile + 2 sets of configuration are provided - (a) Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8, and (b) Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8. If your NVIDIA driver supports CUDA Version: 11.3, please go for case (a) Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8. diff --git a/docker/torch1.12.1-cuda11.3.dockerfile b/docker/torch1.12.1-cuda11.3.dockerfile new file mode 100644 index 000000000..c5e252abb --- /dev/null +++ b/docker/torch1.12.1-cuda11.3.dockerfile @@ -0,0 +1,62 @@ +FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime + +ENV LC_ALL C.UTF-8 + +ARG DEBIAN_FRONTEND=noninteractive + +ARG K2_VERSION="1.24.3.dev20230725+cuda11.3.torch1.12.1" +ARG KALDIFEAT_VERSION="1.25.0.dev20230726+cuda11.3.torch1.12.1" +ARG TORCHAUDIO_VERSION="0.12.1+cu113" + +LABEL authors="Fangjun Kuang " +LABEL k2_version=${K2_VERSION} +LABEL kaldifeat_version=${KALDIFEAT_VERSION} +LABEL github_repo="https://github.com/k2-fsa/icefall" + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + vim \ + libssl-dev \ + autoconf \ + automake \ + bzip2 \ + ca-certificates \ + ffmpeg \ + g++ \ + gfortran \ + git \ + libtool \ + make \ + patch \ + sox \ + subversion \ + unzip \ + valgrind \ + wget \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install dependencies +RUN pip install --no-cache-dir \ + torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/torch_stable.html \ + k2==${K2_VERSION} -f https://k2-fsa.github.io/k2/cuda.html \ + git+https://github.com/lhotse-speech/lhotse \ + kaldifeat==${KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cuda.html \ + \ + kaldi_native_io \ + kaldialign \ + kaldifst \ + kaldilm \ + sentencepiece>=0.1.96 \ + tensorboard \ + typeguard \ + dill + +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install --no-cache-dir -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +WORKDIR /workspace/icefall diff --git a/docker/torch1.13.0-cuda11.6.dockerfile b/docker/torch1.13.0-cuda11.6.dockerfile new file mode 100644 index 000000000..bcbf8b599 --- /dev/null +++ b/docker/torch1.13.0-cuda11.6.dockerfile @@ -0,0 +1,64 @@ +FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-runtime + +ENV LC_ALL C.UTF-8 + +ARG DEBIAN_FRONTEND=noninteractive + +ARG K2_VERSION="1.24.3.dev20230725+cuda11.6.torch1.13.0" +ARG KALDIFEAT_VERSION="1.25.0.dev20230726+cuda11.6.torch1.13.0" +ARG TORCHAUDIO_VERSION="0.13.0+cu116" + +LABEL authors="Fangjun Kuang " +LABEL k2_version=${K2_VERSION} +LABEL kaldifeat_version=${KALDIFEAT_VERSION} +LABEL github_repo="https://github.com/k2-fsa/icefall" + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + vim \ + libssl-dev \ + autoconf \ + automake \ + bzip2 \ + ca-certificates \ + ffmpeg \ + g++ \ + gfortran \ + git \ + libtool \ + make \ + patch \ + sox \ + subversion \ + unzip \ + valgrind \ + wget \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install dependencies +RUN pip install --no-cache-dir \ + torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/torch_stable.html \ + k2==${K2_VERSION} -f https://k2-fsa.github.io/k2/cuda.html \ + git+https://github.com/lhotse-speech/lhotse \ + kaldifeat==${KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cuda.html \ + \ + kaldi_native_io \ + kaldialign \ + kaldifst \ + kaldilm \ + sentencepiece>=0.1.96 \ + tensorboard \ + typeguard \ + dill + +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install --no-cache-dir -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +ENV LD_LIBRARY_PATH /opt/conda/lib/stubs:$LD_LIBRARY_PATH + +WORKDIR /workspace/icefall diff --git a/docker/torch1.9.0-cuda10.2.dockerfile b/docker/torch1.9.0-cuda10.2.dockerfile new file mode 100644 index 000000000..7553fcf86 --- /dev/null +++ b/docker/torch1.9.0-cuda10.2.dockerfile @@ -0,0 +1,62 @@ +FROM pytorch/pytorch:1.9.0-cuda10.2-cudnn7-runtime + +ENV LC_ALL C.UTF-8 + +ARG DEBIAN_FRONTEND=noninteractive + +ARG K2_VERSION="1.24.3.dev20230726+cuda10.2.torch1.9.0" +ARG KALDIFEAT_VERSION="1.25.0.dev20230726+cuda10.2.torch1.9.0" +ARG TORCHAUDIO_VERSION="0.9.0" + +LABEL authors="Fangjun Kuang " +LABEL k2_version=${K2_VERSION} +LABEL kaldifeat_version=${KALDIFEAT_VERSION} +LABEL github_repo="https://github.com/k2-fsa/icefall" + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + vim \ + libssl-dev \ + autoconf \ + automake \ + bzip2 \ + ca-certificates \ + ffmpeg \ + g++ \ + gfortran \ + git \ + libtool \ + make \ + patch \ + sox \ + subversion \ + unzip \ + valgrind \ + wget \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install dependencies +RUN pip install --no-cache-dir \ + torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/torch_stable.html \ + k2==${K2_VERSION} -f https://k2-fsa.github.io/k2/cuda.html \ + kaldifeat==${KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cuda.html \ + git+https://github.com/lhotse-speech/lhotse \ + \ + kaldi_native_io \ + kaldialign \ + kaldifst \ + kaldilm \ + sentencepiece>=0.1.96 \ + tensorboard \ + typeguard \ + dill + +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install --no-cache-dir -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +WORKDIR /workspace/icefall diff --git a/docker/torch2.0.0-cuda11.7.dockerfile b/docker/torch2.0.0-cuda11.7.dockerfile new file mode 100644 index 000000000..c11c0bd67 --- /dev/null +++ b/docker/torch2.0.0-cuda11.7.dockerfile @@ -0,0 +1,62 @@ +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime + +ENV LC_ALL C.UTF-8 + +ARG DEBIAN_FRONTEND=noninteractive + +ARG K2_VERSION="1.24.3.dev20230718+cuda11.7.torch2.0.0" +ARG KALDIFEAT_VERSION="1.25.0.dev20230726+cuda11.7.torch2.0.0" +ARG TORCHAUDIO_VERSION="2.0.0+cu117" + +LABEL authors="Fangjun Kuang " +LABEL k2_version=${K2_VERSION} +LABEL kaldifeat_version=${KALDIFEAT_VERSION} +LABEL github_repo="https://github.com/k2-fsa/icefall" + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + vim \ + libssl-dev \ + autoconf \ + automake \ + bzip2 \ + ca-certificates \ + ffmpeg \ + g++ \ + gfortran \ + git \ + libtool \ + make \ + patch \ + sox \ + subversion \ + unzip \ + valgrind \ + wget \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install dependencies +RUN pip install --no-cache-dir \ + torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/torch_stable.html \ + k2==${K2_VERSION} -f https://k2-fsa.github.io/k2/cuda.html \ + git+https://github.com/lhotse-speech/lhotse \ + kaldifeat==${KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cuda.html \ + \ + kaldi_native_io \ + kaldialign \ + kaldifst \ + kaldilm \ + sentencepiece>=0.1.96 \ + tensorboard \ + typeguard \ + dill + +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install --no-cache-dir -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +WORKDIR /workspace/icefall From 375520d419826485a206115d66b1471934295081 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Fri, 28 Jul 2023 15:43:08 +0800 Subject: [PATCH 26/30] Run the yesno recipe with docker in GitHub actions (#1191) --- .github/workflows/run-docker-image.yml | 34 +++++++++++++++++++++++--- docker/torch1.12.1-cuda11.3.dockerfile | 12 +++++++-- docker/torch1.13.0-cuda11.6.dockerfile | 10 +++++++- docker/torch1.9.0-cuda10.2.dockerfile | 30 ++++++++++++++++++++--- docker/torch2.0.0-cuda11.7.dockerfile | 12 +++++++-- 5 files changed, 86 insertions(+), 12 deletions(-) diff --git a/.github/workflows/run-docker-image.yml b/.github/workflows/run-docker-image.yml index d0ac11071..12604a132 100644 --- a/.github/workflows/run-docker-image.yml +++ b/.github/workflows/run-docker-image.yml @@ -25,12 +25,23 @@ jobs: uses: addnab/docker-run-action@v3 with: image: k2fsa/icefall:${{ matrix.image }} + shell: bash run: | uname -a cat /etc/*release nvcc --version + # For torch1.9.0-cuda10.2 + export LD_LIBRARY_PATH=/usr/local/cuda-10.2/compat:$LD_LIBRARY_PATH + + # For torch1.12.1-cuda11.3 + export LD_LIBRARY_PATH=/usr/local/cuda-11.3/compat:$LD_LIBRARY_PATH + + # For torch2.0.0-cuda11.7 + export LD_LIBRARY_PATH=/usr/local/cuda-11.7/compat:$LD_LIBRARY_PATH + + which nvcc cuda_dir=$(dirname $(which nvcc)) echo "cuda_dir: $cuda_dir" @@ -40,20 +51,26 @@ jobs: find / -name libcuda.so* 2>/dev/null - pushd /opt/conda/lib/stubs && ln -s libcuda.so libcuda.so.1 && popd + # for torch1.13.0-cuda11.6 + if [ -e /opt/conda/lib/stubs/libcuda.so ]; then + cd /opt/conda/lib/stubs && ln -s libcuda.so libcuda.so.1 && cd - + export LD_LIBRARY_PATH=/opt/conda/lib/stubs:$LD_LIBRARY_PATH + fi - export LD_LIBRARY_PATH=/opt/conda/lib/stubs:$LD_LIBRARY_PATH - echo "LD_LIBRARY_PATH $LD_LIBRARY_PATH" + find / -name libcuda.so* 2>/dev/null + echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH" python3 --version which python3 + python3 -m pip list + echo "----------torch----------" python3 -m torch.utils.collect_env echo "----------k2----------" python3 -c "import k2; print(k2.__file__)" - python3 -c "import k2; print(k2.__version__)" + python3 -c "import k2; print(k2.__dev_version__)" python3 -m k2.version echo "----------lhotse----------" @@ -64,3 +81,12 @@ jobs: python3 -c "import kaldifeat; print(kaldifeat.__file__)" python3 -c "import kaldifeat; print(kaldifeat.__version__)" + echo "Test yesno recipe" + + cd egs/yesno/ASR + + ./prepare.sh + + ./tdnn/train.py + + ./tdnn/decode.py diff --git a/docker/torch1.12.1-cuda11.3.dockerfile b/docker/torch1.12.1-cuda11.3.dockerfile index c5e252abb..5338bdca7 100644 --- a/docker/torch1.12.1-cuda11.3.dockerfile +++ b/docker/torch1.12.1-cuda11.3.dockerfile @@ -1,4 +1,4 @@ -FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime +FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-devel ENV LC_ALL C.UTF-8 @@ -51,7 +51,15 @@ RUN pip install --no-cache-dir \ sentencepiece>=0.1.96 \ tensorboard \ typeguard \ - dill + dill \ + onnx \ + onnxruntime \ + onnxmltools \ + multi_quantization \ + typeguard \ + numpy \ + pytest \ + graphviz RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ cd /workspace/icefall && \ diff --git a/docker/torch1.13.0-cuda11.6.dockerfile b/docker/torch1.13.0-cuda11.6.dockerfile index bcbf8b599..4d2f96c8e 100644 --- a/docker/torch1.13.0-cuda11.6.dockerfile +++ b/docker/torch1.13.0-cuda11.6.dockerfile @@ -51,7 +51,15 @@ RUN pip install --no-cache-dir \ sentencepiece>=0.1.96 \ tensorboard \ typeguard \ - dill + dill \ + onnx \ + onnxruntime \ + onnxmltools \ + multi_quantization \ + typeguard \ + numpy \ + pytest \ + graphviz RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ cd /workspace/icefall && \ diff --git a/docker/torch1.9.0-cuda10.2.dockerfile b/docker/torch1.9.0-cuda10.2.dockerfile index 7553fcf86..a7cef6dc8 100644 --- a/docker/torch1.9.0-cuda10.2.dockerfile +++ b/docker/torch1.9.0-cuda10.2.dockerfile @@ -1,4 +1,4 @@ -FROM pytorch/pytorch:1.9.0-cuda10.2-cudnn7-runtime +FROM pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel ENV LC_ALL C.UTF-8 @@ -13,6 +13,13 @@ LABEL k2_version=${K2_VERSION} LABEL kaldifeat_version=${KALDIFEAT_VERSION} LABEL github_repo="https://github.com/k2-fsa/icefall" +# see https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/ + +RUN rm /etc/apt/sources.list.d/cuda.list && \ + rm /etc/apt/sources.list.d/nvidia-ml.list && \ + apt-key del 7fa2af80 + + RUN apt-get update && \ apt-get install -y --no-install-recommends \ curl \ @@ -37,8 +44,15 @@ RUN apt-get update && \ zlib1g-dev \ && rm -rf /var/lib/apt/lists/* +RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb && \ + dpkg -i cuda-keyring_1.0-1_all.deb && \ + rm -v cuda-keyring_1.0-1_all.deb && \ + apt-get update && \ + rm -rf /var/lib/apt/lists/* + # Install dependencies -RUN pip install --no-cache-dir \ +RUN pip uninstall -y tqdm && \ + pip install -U --no-cache-dir \ torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/torch_stable.html \ k2==${K2_VERSION} -f https://k2-fsa.github.io/k2/cuda.html \ kaldifeat==${KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cuda.html \ @@ -51,7 +65,17 @@ RUN pip install --no-cache-dir \ sentencepiece>=0.1.96 \ tensorboard \ typeguard \ - dill + dill \ + onnx \ + onnxruntime \ + onnxmltools \ + multi_quantization \ + typeguard \ + numpy \ + pytest \ + graphviz \ + tqdm>=4.63.0 + RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ cd /workspace/icefall && \ diff --git a/docker/torch2.0.0-cuda11.7.dockerfile b/docker/torch2.0.0-cuda11.7.dockerfile index c11c0bd67..d91fbc24f 100644 --- a/docker/torch2.0.0-cuda11.7.dockerfile +++ b/docker/torch2.0.0-cuda11.7.dockerfile @@ -1,4 +1,4 @@ -FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-devel ENV LC_ALL C.UTF-8 @@ -51,7 +51,15 @@ RUN pip install --no-cache-dir \ sentencepiece>=0.1.96 \ tensorboard \ typeguard \ - dill + dill \ + onnx \ + onnxruntime \ + onnxmltools \ + multi_quantization \ + typeguard \ + numpy \ + pytest \ + graphviz RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ cd /workspace/icefall && \ From bcabaf896c0eadef1ed8d86907847c367e4bd14f Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 1 Aug 2023 12:28:34 +0800 Subject: [PATCH 27/30] Add doc describing how to run icefall within a docker container (#1194) --- docs/source/docker/img/docker-hub.png | Bin 0 -> 364778 bytes docs/source/docker/index.rst | 17 +++ docs/source/docker/intro.rst | 171 ++++++++++++++++++++++++++ docs/source/index.rst | 4 +- docs/source/installation/index.rst | 5 + 5 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 docs/source/docker/img/docker-hub.png create mode 100644 docs/source/docker/index.rst create mode 100644 docs/source/docker/intro.rst diff --git a/docs/source/docker/img/docker-hub.png b/docs/source/docker/img/docker-hub.png new file mode 100644 index 0000000000000000000000000000000000000000..a9e7715b0b41d49cf6a2717d2f3f42c193269134 GIT binary patch literal 364778 zcmbSy1#n(Hu4p)4l7_iqW@cu_hMAcgW@c`~%*@P5lQhuK7i^fBndzmwd-v{rZ)fhm z@64RBk8R18EK9QN6QL+C0T25b76b$YUP@9_2?PXC2?7EI06=||bk+RO2LXXCw-gak zloAmkRCKgAv$Qb<0g;SIN`Y2PSj6x@e}6wDZ46c{3|@mcgosBGhRY?y6D1=B6^3C# zf;bt@2cwA$2dFu#iuS5f)nkaFE$5dmxCmh&o9fbrM~*xP5S^T5+^==AIPLjzaos2K z+)rc@fJ73j)5?dSgG!>BN(+1QVSkxo`t$zv$MJG=BV-qxw(SHja zKEe$5<}<%~8unBeeURx9jNQBAv&S%Tlemjg z0yX*qS=P<$C9YO#V^8i$WPS%VI>DHBOu#$t4?itLU5@etB;bI70fZTHF%31Qc4mMJ z#n4EJJSe1_5j2QoOLM|(L1S?NjPX*hq`KehX@@8HVBXBeLldMmJvJ^bC#?>W8`Pm3 z8)SMTdK{ul=879cXYogZbbYZ+f;|kJq#)>|Kldcm(+>y@5S$GV9e?Nxs5e4DaDYWE z)`cL7qzH{Zl$w82E(lt{xB*Tngx3#W2P|B03PDs(aC-mI4enOR_Fm&j@TXpD#!vi# zOn|@(B*=W>qj&@pK$-|U5~^cRXPzJ>L}chzuBar0R37>ycPW56ggVbh5&nk7`Exsn zZb*A>m?8r!pev~R2S7H|vKGSz0vM3S2#4U`zai#8+=(_3a=j_%0I35{5Q4J_cj3~C za~7cAPXZ9!LE_Gj(jb%#j)5Rzk_00bQX>kH6C8Cprc7#3@3XKP1;f6*U8qhM* zV+qJ|hK6ZIYKBk^su&>{e`Y#OC-)lec_d<=(`DKN*bBnT zmluy0TW^6kRH4{hky@e$BqIQ^Fn(U9L$E`vLk6QD76~;9e8j|0LgJiM5n3XA!e%6U zl;;RzQnK$%zmm|zrOB(vRO62%QzR86&v$orZgw(vFot1ve-En;+Rx~7y(0!-1T(2WD^X-erUEADveU>?~_q{Wa$R-+-hDLPLQ zAju|)I;nx0&pFsmbbnI$kn9Ha#_NWmT&tW`HQ-2mj(cvoyuhkqmUkAyD#Ys6YSl`A z7W-)Ji0VjtHe)u&%7Ar@6~lsdW})h7UW#Gm+r=caR5M&P*O7UB224uW=+Ul&wbjMd zIn^p|Id&yyso8aUt$NvdU2Dod-}#jVgnDFqisE0;S<-;zx$rYi1`wNL9cyEBtAQ#Iol zH*|t_%6p{0u3lVNly4Ags4!n|A=Iv=t*5o3^Q1dW4oLP$mP@wZ!y1c7YcQ1`$~;%Q zQ!7)0uEf`-(SFw6*KE<~X<%yDTvSiOCU!70A`j~$npCeQ|b6Kwi-%Z!8+NG*iMO0;gdJDIfv-gJ~iM@my(Hs&V&n0Sm7SFJ)?Y2>C7>k{)4w}|lp$gS+637Hl; zykrPT1T#VGDCTq)%L4;v5i8w#1ep<1b z6WVLd!fpdg=v_3LH1}13Y6CS^+EqQ=elG8y*V)46zyB_-d}RYt*RY=Vw-39}J!(B# zUp@O?yyi6vb_*&w_;rmw@!>Gy1mkdXEZMEpU3wAqTxfk${o*k-Z=`qGL{Q~d`S8_S zxvzKvCG7iZ#@7t7jC0$s>yE7|WvnfY_Q#!7F{aV=ef5aT$u=EYcIw^cJeD2KUJ7Cc zV%G>kco}ZqRkGzL)mN3}>t$_D+Y?i|FS;#mUMJPd1x+Ouz?81OF2uXXGwVwBa{BeS z<(#JXfq-yuJOL)Za<0;^wFet5BKabGVIl}E^PInDai@5dl;Gy2t)b%v@w;5cjy0UJ z#8$gKJ$s|l{ZkE%S?Qm*v-Ka$no+mR9)eACb|^XU;f@agjqnpW~)EB=0UYF zoixgy!r)rVJ)!@#%W<##dTwSubw0hCN5{U?(63-q{aGJ}8;={uo_o!vd)K+5$l7kc zZTYN)Vdb*j>5+c_?Dp(&RrkK{oUfI@t7jkdy>~RM3~`-*#FO>TeH`^9p|2!TVKm2* zzv6BDrsWAgEW4pguZtr)jNfl%=-BJkYw%qE`C4C5FA?$J(`;v>hlkmu`Ni*wkeVm` zDE&Bi^q0Jwp$QIhd->PH%l(+m(#^M)?=9Qsk?R#b0yn*9BFmB&!wqqk1ViF*0%d;O zFKLf$+cG;T0d=POm-;l_X};Ev_!qCg+?O6&Q)xEkW5EFB4#Hp{=Ln#J5m5e~a9IQx z^5ZiJU~)d-vM3JDUM2h2;`xk-Aj?Z5&YpK!nKKj+ zVZL=IE?K#DY;T`05Yy5(-<9qK)ESZI5MG>KF>@Utv$EVE`aYc3NK*|dGdVdB>W@4C z1OoIk2;@f&^yB3R#rbDm9F!6S{BPx8ARu9uAQ1m(Bma^9dE!6bKQMn!!M}%sK!1Fp ze7x?tVE@$`P?-zXliQbWMS_-O-L>HQ2^s0sp$j)fRD>+sPU1ccX}`y*>>>TF2pZfj%b#O=;U z{I?d|ANfDU48(+gYvOFpN30>INGM|OXiCUV&q&Wm%nwUQNXYAGV#ci`D*g}fk1sxA z3uk8sZUzQ7H#d4W7J7R}a|R|ZE-nT}W(HdgP&6ZQ|_f35rnATPro+W&)HfE+{;R0sq_5JXB;NW~rWL>DGgVGdJ(#cXf^2DvXczACX% zGI_e#0vUY-Wkl~A2B}t3R$*|X<+sG5_^ObbmQY&FhI2D4Gp6_JuAVmgv-5GUo`Dy$ zJSXq5j`KCAlzX51)%ElHiTiBSW{=}oG-gs-+A+>+`Sz6B>^M)00rifxF87JRIXA2R zF*p}5v*qs?5D=h3f=EbzDG7)sLCWHZq^I+R2?+}`?_{zh8X|N!TjAn%IH1hp%ZN7~ zBn3x?fCxB|ARLp|=#5_)#=|D6e6X5_fofsmJcRm=d2|PMsO8;BU&OoVQwSDB7nz8yyUYc&0X z`hXK5LSEkJt9_tE2pon%4K+XAw?!7gL$)1&f2HG#DDA*NRpi#Afv=mq3xgK`6KDz~< zwr0ahem>gQK21~=?uNE^H71jh{eSY?K;p*-a^Vl;;#baL^0o)eExzN9UQ7C3u0DSI zd3Hd3KPCGAFWf);&;x2`N)$>KetmwMVAF8r+u_-L9J!xMrg!3Qzuhjxk<=?)hAUK< z`cOBZ?*LLVikUX2eM+uJ+@iw5(i<-R$bZhXY*}$B+tV(P|yc zPexB7(mCrBC4}P0NAnj2EL{OWfqGY0z zs{co#se?R>hd!{A1d#CRu_mKJf^Y-@(+ftA9+F?F&4Di8E)3%hSI)>r`H-F6V*2Uc zqvhkUa`+NgvOg9Iq;>&^f9K@MD{P{;w zip>WD)g1R@zbgtY`cjyH^b#Vu9?h1a@obVTI5}I?Y$umM?-$mLzRa=R2F^9j&+Wer z7ToUqQT=&Bgl}V-V1~Q-7!E2sQ5Rx9QM+4zVHl2_U{}@rVe5q0dZozW(c!tgQ*9!v zm5met6Iigw+Q8(~;F##_FLQj*J|14(WmoINHJ8Hs(ZfZYAajDshURWQ0_Y9_)YN}s zVwurFg8_jhNjkE6$jA_m)PH9E(=_R~R7f@R&t{+HpuNsi zhUkDFW{vRfjjwIOFKEieim2c=Ybo&$p@hO%xigpMKN})tq5gNy{!{4^!Xf*YBz1b^ zlkqBI7}4uO#tL%x!Im#Y;9&E)St%nT^$azci8jF{1IIAj8yB zCx&98|Iu2i#|Qu*z(Gt$h7-omASr(Eb3}fY5jfr`pz9NG$1Q*BeR#yIe=D11aC34y z=2Y@EWA~547qEd8zRxHaAtlwTiemd8kSB-)zy|wxvdHMkb#{YJYaJfTZv6`wOy4xq%(ED&co&w8;FvsWw4(!V35zkIgr!J-Ef8 z`JU?oO>TVP`~5cIpl2JODfRm-B1P0pZkz~3II!wT{N_VFR?&<9!bMCCX&|2RDp@cly z^x$iWC^l{9x zC8lK+Y|P;t2=d~}Wv_I0f>?8AZG)g#lV<56qbZdF6}?C!;?WRN!@9aUDsrKI*=^x5 zr;kZ@;zlF~%3(etd97!!-1vdd$>`7UP^g@`w@{l0{_ukiXn=`uW+&S0aq4v}`U`gW zqsc{%IEdL+X{qFHFR@~NBpzeD>P9cVYjV^V-`86XX=&L)?W1)R#qC%Zyz< zPhYEIM6!D~5lAln_G_N1UVWe5HI<%0PojoW+70}@_<(1;#4`9&qcAD*Q3<&lCr%^iNBpxKT!=hC4r=Z z@#++a@RK~mB-4~rbb6c#5&)5$N&n%EfU`+`4k$czxfw`z0iT<4VVdH9v4Ev}Hx3yY zNns#}`OH6SZKE}RYH_|U2UPc^T)LR^B{UUaF_d8GlTN111KDO) zHM>`*Cd3E6=u@?&OV)Ajf5Y%ci3x51GPzwM1&sS1hrVS0pou6ZydV%G4CosAaH#Yc z*GK6K_i-3DA27=tlU@k2^c)xet@L9ug0%^%{leRo?B?w7W609r&6%jwmWp^kE8O9{ z``lZN=7>%yh+H)KCHqKSIS)#X(yfg75XnF5Vr&rt0S?7LC*GNECo?*Y#eBLi2h$V& zU}Ple@D~M7X!OqZoI3(<8izgpcs84MOr_{a_PGeB zOXZDSIVwrcVS=~Qs;tbyQ=wBU9%S=X>w?es1hq$cgM~>}C#IzP*SVEM&$1guH~TZ) z`5#1uWAi83hO`RAqz+$7R$%-N6Ign(Tmvie$U{-99hOgLPD*~e%9ux=@7Xo;%(p#u z24<%yA#g{So>T8Ziz9s2YW`Ftai862x5`beT>MGyb!VHFpdZ^Tf6Zl{uYBEX{H#E) z%ggi;YYf5sl`(3~V_!Fk&27C%j72+#V5Ojr|)qJ(wy?}$CqAmh9Z}3Dm zpN#2flCnT~PGQ_)Dy2w#^ZQ(2OePl<+0V)q zYIL5Z10vW*l=Sqmt~TpJ?`Ml5WT`hgb;6&}ce=s8snV!VG4#9%Ki{1x^RU-uY~OO| zAr@LCA99ZG!)-qD`*3H^=h|D$Ph>e?nN;>2$MU2r_KU|bM`vX#%L4;yn7xLy)PZtd zZJFluV4wX;o>SZSWRCxhyh6l+tfB9G{cy(kYU2=mSYzdDF5J;B=Z)5m?ee$q+P|Ek zq*-AtXq@)nBn2k2j$L%03XO`SXUmi+(50pJb)zK~#-R|fBf%Lo|tK|h}o7pIwKDK>M)@wn%rnp zlUQ9)UT3G_4xvi>Sq$+vt+3dYczLrE81xU^o5e!3^y_=3NFE%m`pB~akOZ}}N~K^& z2`q!7K3vC7JRVo1UV~CN285Q;t8xnZDVlckZqvQQJidKlH$6!eRn0W|9+0<3Q{&bF z^(__4^#tH^4f8C(4}Ww~uhz1bOr05jNFlSfTu-Z$bFNbJHISleBV9ZeUxLT=q_G;F zetLtih&#dY!$ymc(RZmN;qiM1yPm9w^}OBj*uHor6vc@qji@Wd*(uE2V6RWTKw4Ig z!F{U@47qznB(HbNW9>`!sB@|5iRBhp?DgaytItdn*BN+VF>kPy1kg0yr0849ln-Uo zc;J<(pPQAdHi$RJ98O7S&ph?K<;yGzvfHo4Y4RhqrgmJ0W~lTauJ{SS0Kdu+(pQ=EU^f@3I8fgqDf9&;tm_1UoR zek8{eaLw!yk(jCo(r?1tZ*_`ShPolh9SqTBJ6&!mU@@PVqyg9e;-OlhQZ%&8=c~63 zm3J!hwgPH6K=07DRBWpq89`;wP(SgmaS1K=?%D2AjcVV0TP(j9UNGZ5aR}r%yBtO? zI9RAF(8FRks+UG3uQE~b=7a7Y8S~BBE7qhx9FriN^h$ap`G)^*%%X!h2>EZ`Hm8zn zBuATG9qyN2fV31~LJuXGCp}W&78kvGeW60c223C;rWv!Q7s8`{V?XCGEN#6)ODqbb zfJB1#oTM(zr9Tv&x<{!fa<)w-#hDXoQ8iXgzrif#mR||65w8cjLX+j0-C!?bLhc(8 ztYJ|NRs1&B9pguwK^zcty<0BQxGTy@Mv-aAAl3%c7{j_3Ski=6T()Dxu=%x9h=+-3 z3Uwr$pxgV!mjRO?>nN8zJ7dM;S~+c{q*V13g_<|^Y*Xkk3$p1dXt zI5Ks`>0UxC{8X4US1~_5WjsWC3zyXx7-Dj9Y(I6j`mQyv(je}F;B^f^r%{_7agsh+ zY)IiZR-ndVw`q(-W`{kSkZYR+&@6RKo{uJLr+~U$BW)Dz4&oe5C*WmYc+$IQ%5w!) zf=x@i4!gaC?CqMyH~u&VE8_~K|k$c0uc~fb2?w|mOJUar}{z~DcM9`o z8|r{D0F`<(uCDsN7PZn2O1ZNbnZ)bKh;us7EPLiUOV@br_3d##=jK*jzd|{_qPpWu zt3B}7K}r&()PiB+mrF6{3stkbV^)rFM=5iDuzo@Rfon`1Ab;NaE9e#G#2mz^*tVMnH{b+SU&f;Uz0iS=8?_1 z_vvPM9mGMteg1Wax1=2Nd9me@vtbj5$Ly2s>QkILhq}&JNWRru%bLzgUQkWQ!lX)! zJyJaU&OA@@&?kOx=9Q@C_dd^HPD5I%cFlj(dV7Rz;_AC7jucbn7H zt?Bhl5u{^9yyhME2CV<5%brz=Lq%aUw%CZ`_T4L*{Ak_z-@3_J)$4BQYNxlIUSlWn zHB?C{<+{l}_%4~%3g1*8_^A+S6d7Ug0}6fSrts2F7jBynpqR~8o*T_hxwDEo%{afQ zHU@U-X4&I|PchxDPXCECy$W6lnQ82bYL$H%KkkN{R+FDh<2*EWt$52|OEkM@E82Sz zze6CZWBN>dovi^0Og7RGWDKP&5Zla;ft9-^SEY76m!LktDUF&eRj*p>q zQ;SamPhrp_A*|lYp+b#`#U+wrWdctw{X*U&PU2gUjNhi>m+Lvk?4#+3a+R`7RgwB9 zB^m1kCKaX`DF}R{V#?PpyM`H*UJ*v!U@S1k1py<~Y6TN0hbL zbU~Z$_TdSLi$JB&3hI2^%i!FZV{@kRx>)YA<{001@8#kA6qAgp=N*7_HGnoVD)aX8c)0t< zkTJkTeaf2i%D2Ag0}7A`trbHGcIrrmp~*wj7lqb0-NVZK-?gD+9LNc)$KrzhlJ!S$ zXHo-Es9%qyUY2V_yMtaVnYO6%JmUFI>7{%(vDPcBD7|M&SEbyVAkJlNBqmEfsAnhk+%-&GvcWkTkt>3kfe;rugNH z;cT-!Q+sEv{ngl2R^81^^2~`PS%5S;pg^7@C7<_O_i2$#b9U|{4@#M&C>Y<@*CO1{ zlzlF;nwtKKg6q|M!rm7QmqS$QE1<9nykdqh_<=IgDtg?Q-{QW*U5JnIhP*Z-_cF58 zd`ZlAlC&V~ialMZBZ;XEtYFlih0bW6N8G^$$9`UeV5paFswj61Pbo>f1hM6e;9@Uv z%)%_q{_Vycn*R~43>pVlbDf68sHw1?yJ)xXZO)i`(tckQuj#sMp0m9+yH{%PIit|eoxG8m}Sa$hoN5E#in)`ztwwuB6Zz$ zwWyxv)rx-i8H+B-7m6ou-t;Es}}i-=N(jN_-Nhl+GfxG z)`DhNcW0ELxQwQ!ZEgH!QUuWy2iD_SRQcl!Ky&S#VOf3>jQo3MB%c(d$ET_qI^o@@ zJws-Nr#w<8GYI?kV2NZ8?X3f6BjzN7Y`KnO8OK$C>d+^*pctKaofB!(x4muFoa3E{ z*nDxOw!`Gew_kXN5=c*(raW5~$H?pKb7;O{X~zq$kI7Yt~=hu&3C!@j%9+L z-~duD4Lnm&ruU_6%q~g3*Vi{KHp}%i!z*zvUHWRplgb;9qgJCSArV_V;L$TFNmJ9s zl-T^X{nbsi!VLasuE3fRNuyohjWwtHVR~D5WrP(46U=ZRqVjtf=1w<|ub&8WcS z3$q_)*LZNWd(fxvs8TdACy{5LItoV-4(Fb^np(TwmMVf7zZ7S3dVGiageB_1mAr~m zWA4+6M9~L(_8yDJb2z-&uaXwQ3JHh78g`N-u2Bl{1{T=_nBO2b=X53L3pa$1iH&WZ zMH<-{#IaM_|A!Z=ShTI_M-jJ)6^2XmrJhvR*8@ApM5v+{1f{i`&+C4g|BYaPv=Wwy z={Fmch(<5lYlk@^9=Ch=)y}94StKUn6Wj?Xd*InNLvI8x$7Vo5cpM|Rokm(pv^BUZ zlKwqJ>)B@AP0L{oRLfN^7sISF-^bF}GB6l4^X0v64up!Ny?i%* z_`{*#JwYKO1%5qUYKoG-c1hf2;;g$3xHa^p-+pYP`sv<}11_xE8lfzZN)m>Ex95sb zW{zRIFi+NU7azC(y3reS}CE~nalAR_RCv`E0On=UeCc+$vLYCwaSf%l7KKso-|U!i>NiE{wvd{a$eB zIUOor%dgq?19(HWHTtJ(HmKA9QE^1Z^49&XBIxqB9j)+KMBUN?deEf~U{gZ*ps z2j;z3I@}2S=A%oddAkGI<2c*9`JX%yyps-}?3|QSj!C+C2x(W#7C80J>AxIT>rrf! zKd!WygcS9#!R25}#NltYPJMBP36rAp{6*Ft7kU02;AL9vCU48Bi}O4BV+&Ss`xHr2?VKqx2{Y&8{5fUk-`o?3R|Rb7it;xf zrk-cr#hr>!V7?r(1+SpfD5rsnP^p97-y4_4dq1J=jvU)AUszGzLi8-xe#Pq9b*w*G zZi!p(FZS38g9qzTM!e`CKQ$~;SnpOTpEV73(}#Y1{f5yQ7Py}jB{os3o(T#Lg|tQ{ z(IAoCD-x;*yFQfikcD+c{HS|ujeCSYbP|8CBHOtYW1>k#0VJLJX$pNHSV_?bkzEow zkbhWw1$kKh-Q$6ddRboyI=C(NiVhk$c@iU`nq@LUqeCdL>LZ3NBdyqbaB^G!JvCEUznZHLScf)woE=5`A1oG(HM-AjahdZg6PofeG-DHcK6ezdT1C*wi>_%G_m+~jW&!D6~kx5H^pX~-Md5X#({)|>*UV3XGR}G_d@v8I;%1f6^t_|NX z3zIlWb`Y|0+)0^xkhs()^2glYCgn_wWn3kJdcbl!iCFjfIxoN7*d*bPe;@}Sk#*JuJYlTQmZAx{Y zKc};Ld{45cFud_?=8$vu1Cz^oiYN;DNoIp}c%_1AwRmM%^J1WY#BZ`tYj}g`rNX@1 zy!=_Y#@utJe%-f(NJLLrEI}Y!c3#J)sjPUKkUkoZC${_HCCb^fyOudAg;uxR6BEyf0aIawKqt=;BejjcWyeHwIgM@lwOW5;cg5zTPOy- zPHBbaP_yl_qI#^vp{UN`ObNs5&9;KH+dz)rWUf%nG~)WZ!5kjDck1VsBxFIO<>pgX zvl}d}T7!TT_Hcy@(u&T4x5|uxCCf?@A!7N^YXozu1Qidxis#O zk1oW|V{m|>3*gCC9~;NRruk4tkhkc&=E53Zr$ai+6) z@s;09g{R$E%(y>(Blyx_HeQw)6Cd$Z8s-7p*5vCWM*fs667usBQ5CRv>>mJrz=_Jg zCL7}hCvqEw+czNB{d7e0w4U>>JP^l!N4crVW{S{(J@68Gb1;<^A-|T>#aQZheamr^ zV*C1ox8r((r)-Vx_Fy+fPc1m^`-^dh`uPoy{zcZQgm1Cm>naZvj?&BGe$VIj z{PGF<&>jjnnstIQhZbGne8@a|^es8ogxqh?{U=m5ms7D7iY?#u35#q#m6=Q4-j*9L z=6qpJ6cf|#p|9^7gDG=}F2-d$huc{kHIANjv(4+!@sssjxmXkt5)s#{(&PSJVPI`wAQ&(}S2fe$|qQ+zJFT*Y|zwRm+N-#AiGLD|-#g z*Hz1d$T?mbZoexvjm;`pht1^9t)(@3R!A3`P$5bU%f6PcF4@eLqlei`Kik!M&RMbP zGpQ+G;HvJzBO(gCFrt!7_n2JV;yzH%__U78>BjCu90z9yaT5S%b=_8ren+I}dH*`+ zzMN6G=Db;&c0PtU^I*BdJqYgcS*`q1LGJUmxF1PdIut;XJu~EP+k!eEi?{NuiTeg$ zNTMt_Vl`VzWZ30~BDd5J_w)D5tJ(C=I5aMW_`L>(qS83!8?PJ)*fW?;q)Z zMS`lJh98k@EPCa513>Gym$OPW#$=$vHrFRm?X5Ed@yBcUJW+ zA1h2fNn9O5wYU_Z@IfGYxloJbZCfsh?P9Fk*mbVfAoFUTf2=}>b37<%_jHVb@0BtBNX1QsJW^O9^=oo&3f3S zU%!IUdBF>YV(*D73Q#Cd1J-y$-B1qRow+ETf5mxDuF+}&^Z%8!>GhRw@an< zOJB+77ti@87t)7Z1W6X%4%cf{UFz(!?|8=%kHR8B&0eayaN!9c>mq4=0ACoa{VR$ok zfzM24Cs^%wOG}3;$+h#}&sIGPM>$voDbqMFgPgM!H`XqiRFss4V(||95&5!d$J!-> zNN=YK;}!~p%^8@Z%AfZnY4f8vneeB9TrfvLF30B%j?HD1 zm{JF%oj-)e_w=u>aq8l4iY46sF&yi0M9#?Xdb}p*5i`Zz!xC`9LryNJ>R8>BK9hRD7tmk%UPj?r;oL>F9eAtCBhxSDH| zuYKMW*GZEO?oK3lI~4d{RocxZj@!n~TlTf*`&Ym;$L702&rUm=*}6~Jb;r50flCqH zfa> zpHDw&b=#z!O<#rsvtiMVI!DG_8a$hh8ZEEMbF9UBd|z>;6pr7G7(|r$^g=ti8Raj! zioR|SipNrv!O#%0lQm$bZ4bqI9i|2+jkU+=;f+N6n@b^@(f)k6ihZ$D&aTU!*dias z%@2P9kyRR%_1kxvJ4E>F8*niWXubA^Ex{Q2?R##Ux9w*QJ`P^lgS7)OF`Noi<=giL zqA)7lDlnvo{d>D^u4I1S5{fpj$)XFY-rBWmHkh5zJ^dD(1^+F?Nf-<{BCXl_2=UsO z!~rCw@aTkO>LulNW1TZb8L>@*<3drxu;G^X!;dIrp@{pEFaminh-DvlL)8lM7PM{u zj$4w`GN6oW--sxI&_WP;rGq?Ms03=|UgV~WW?J+B2aSf|7f5_x)z~=(!OgDYC46}7 z?Wy^T$}r%B7pVu<1d8RL7c>l*Nqi4 zeN!z4B)qEQW2|yl?^|S9Y>XbZ<2R+VITyap${N)fV~c4Iu0udRr0bHf=lzK|n{td% zKvO?iQlAK?D+k7Lq(%DmS)J zle`-4Hs{zNS9XRb*|Gcn^F50>mAD)IlaoIq2&lKh7Z0EiE-!{j;xsko&TXaP?Yfd8>&(-4Z8+1%w-?%GwyQu2 zQ5*9HVrs#RQMat*GwzC%2Abdr?y6Ht&`uhCUoKrw28!(XJv2W(QBlDdIaBz~=ru&A+s!fj^-(G!FNy zsbiJ2G8Y+1Ior&bX5oUdKE|Mik~2NfYbr7(F-6~(%WMBJY|YRpkbF6pKKh&MVzVue z7o4ZQ>H1d*RI7#3Z$(BZQt>eT{GXjIhqVYU92<{@-+2p2iN=ZWY7l@ zWdLsc&~!2fNhBRhAW*$D8R~H$Fm(naJWQ$_+RZn+q#4I=XFD0+f4NhD7mX}T{@StJ z|FapDbmh|$AurL-Wt6w6rb~OEIR+y=fwGWGR4pcVB6cm#-C2j42Ej=)^lwvanVXK# z{e&}u(0lV#3nR}cM);bi^o?&tR&x~x%1sUxf7|PWuMtZU)sN!f`70~2E4<4`gwRgx?#nIo!ggi;S zuyjnvBA#yT4HanaOW$q?+*5j&YWDHFx99d;kW%f|?+|e=<~_cBLXsc&G_z|K9cG9N z(w_lX?k@!p&_OlRB@EN*jY*=_asxZKU^k*gE&Cl#sU z`u=md8;_Y}E~hUrcKs*Kn?~Nz>enH>5tvlVC&sXfC1=RuQ8?qTV1 zB7D8Zu#6T4BpI|it&$#uaZxb^%D5P?_*z?AMvEeH`F7HHmpC{$2$x;f&*E01qlHwy`i+gIPKC@c@} zIJvXpf6C=3+ivS>xNEc$SF12!%T*mGPO8@La)#CH4qLiGgR~*o<%e@BP@WnR%VA2`AOcSy zR-F2{Th?=*-Er`?cdvQW`@4mD2_?@0+9U>#XNJGhM%Uy4&A1XtS??9-V4ODj!wJ|k z%up0po*dS5jS*0KsgKgxipOMCiKz)W|9+Pt0!Ch8(J82PxO{HvQyx24p)1DMA#iHS zdZ#oTn2_)i$VX$kK~v+2>=t;4(;)LN2dzuMuyfb)hY~E~rEm`>8WZ#p$BH}n zM?{AmFYk2GJX>{8@t||8@(cs9MUWXUl<_-J;x{S>(}nU~5{$#2)y61l4sraiC(1Re zn}4F&&(0ZhqxZy)wC69RM4U0bbnmm=+gLnKq0wl8Jf6j(p4E4-R$@=EyZ0q!+v4blyQL?5fZ=l$$GX~4ioRZQyQ|kyrL=jP zJ9DOpwBbEZbbe}v1^CuzZp;+{rz`|7hR{-reTgLN(a#6tP%+_mg@!MyKaqe)!rd0z z9KxB-=WDB0y>_gSTu0r{)7_(*k(vJAyeDabRFbtd2rH5Oiaps6XUh^`VT_wWfjp@M zNFDni&l(#)8 zG3ck&i?M?9wBJm>FaSW4&{GHI!S_0EF1dY086;lR)iycz1D1hIL`Hsr>yw5TkEA_( zRFfbkS;nbCbRJ*VzR1I-p%aOcIx`Xz*q`#l4#brP^h75Scd06&Fm_oyd}$Lwv3Be8 z$V3H5+^}F^s`_hH#zR)j!%~5u%Qu- z?zh2XHKt+K8pi`6UUQ^)uj3+=`o`%{_~W!x7S*i$ZlAZbVD!r4EDv8!A z>V}>($rns+_F?0OHr>{IhTsVJeuLxEKgivgAOzeeDizWHAI8o)sIDMe_rZd@LvVKs z?jGDVxVy{2JxI_1!6CT2ySoQ>3-0dlc4pqIckkS(x>L3PImM|0c6YB{{jJ~ndPaO6 z_1x(B?yz*izU`9IvnhXntv)}t7j3cZ^6lZid}9emuU7(5cf;`sPhj285MmAhL$$3y zNul?lyQ4{sS0WkILuo{{$`VXVqOCC$%;9}ArQ(`2lZ5Jv&#)YW$FV}5Ix-D}h2TnC zE^A5}`LJskVYak=!EmkAUv726%97J} zPk?2&Ov^Y~lRSGQ3dy=9U>(wv%*q;7Nk(I8mB=dK-7`fXD%yJ0iXB~|b*S?oPly&1eD+fb;REQ>!`fKI&74<_HsWeXiU z)(w_jPkvOCA1{e^t7PyNj82wBmQj`+FbnQ``H)G1Dlzw0qB}h)4WYIaD%u;|T4!vW&gAhR9AIr#yz6mN>oO>h1j}R^>?dG~aT9c`V z+4CHXRw(&A#0{gedxP7Q=)NoX;6K=;oyafgec8WXg7gL)Pu)&4tCkxuxTf2!-FKIu zJ;)YC;UjI`rk&vyE}vOrrYB}`d&yBiXAZ1+z*3lT4%8wt1U+@~37Kxm2#R?77u#Xu z45Chaa6~W2ZX}JViU8l9P)o7-jMUgty3B#GVCYE+-w$tfmvm0-Ml6;%n{NFYU|T94h8G` z+_o*y)2tTF55`H^j{n1VY6w?$;?|ln6Lv^G(XFlY0c2^>)^T2G)W{X1q z9hSfgWn!rZw3+L`*JftNuG+af*ch)bDBpm8akm%(Jt93*E8!WfR{wxIL`T?{I5;tq z&+ni&Yb=>XG$b)VFNTmGW<6B)l0D0Ze`Bv@W<4bG+rH)1pW#o~57Z&o_6Tz6aQ(~S z))$hRER{11d<7G_n13iuff?6xpTnj-por= zVnnn+0x7ELaWc<0yIoM{ZnqeQ;?A>Pe|kdO{Y-%rbm)l2 zSGgFg(I$j(YpXLLMKO9MpzE>Nf>bE)e6BfCu@tnUdf=w)B(5aVoOWy3LgwIA=2q?U zsFe{g?j7~;%FXem6-}L)dewjdSL(O{WnyCT0}j%|L*_u0^nd>>xGNq1mr}uKLoMdO0qp8Ud9rj5WeB?9mLz@%#+{aaa9=7 zJ<`5rLeUeXXy4EUL^K^mLLNp{t+gk&z7yX~(X|Zh$)$0>jP@R3@IJNP9~68~a1W(9 z@g!ume`|BeRI1D2IMa8&oukc*nc##)82Z|7mbma#aqA#P9cI|e*Xl?m3(7iLS6rZX zlk0^S5BiE^BP@Xp`dxaG(ql4yOsTHc z+Q##sru)bBv*X{&q32~jcDYl06+qfPs(+G^A?fOqMp}Z{MuuER7V?FakVSw-0Hug- zg3cc}S6W{0ck;8$^$pEObivPe!~ z;SIl^K$68*f!mW1Z$F^1xhP}5ZRuw2mtpZ5Qt&9)?b<@@5|4D@|CsSCJ8mTi;0@zm zwpQL*u_Hr4OD9L^k(S65EEE{$TSZ6t_0y4y7p7;guPh280wT-tf)J|O%aSDv;(5QOlC^Hv+AcM(S>U8)@vIo;mIQ`|-D=h% zv7r*3>|0C~ALi{Iv!#4iK#@g-6eUfc4(Dkf%bej6QEi75mv9p?XeogBW_$cLUEiq8 zq=sJvPvqo}f}Ccz`Hg=7;Z>nB!j*!=nQ^Rcc+0<0R!PK(>0s%QH$KV5%qv{`2e^Oa z2B>?KmQwX{WiDi*t!lPKVdUVdK%RE5TXnTc!}C=@p{(bOC!4$^#MMizcQ-<)Syr1n z2)|Q!SaS(M5hAMfG2{2*n+v>$1!U*Xoue|HDRRfbN6B-+iP^H6+&JWh{l4X!<-wQl z4}B)7wJbm9&teKe>pGEq2}`>oq-A2KFNr-r&`*+3rXK&|f0UyGQTTJF|2!lH$@ z)8(rg9bcCoV9^i#Y-DGYb3NIQ?p(}|Lu!`_=N(7j^XPE^P1Nm%{bM6fN(%D8;!*wa z;-X{UQ2}J_vDJK$S;l@{A3PIc-fQv4bk1)GMi2^5x~<%{W7(f3d&Ffd_U>yBq&aF{ zy4MNnf&;6Zd@`)L=U>$C<{g;4Qi}@HCa!W()YC~8f-ymaT1^M$k`#Sbt@9%N{G1gf zzKYfK2&08(4n2%k_SN{V7Bp-$$n2QG>#p=oo2ne2?kocp*3+2bxO8yI$o2(?v}%3)jL68M-K zb$Q3b@U`Jj^X7{z*~!>(@tLL)Q|vyNC$p+`O)5~Idi+f&5BpS*91H~->`I?ALQ{9i z6zI4R>2%q$c1fuuYy0i57~DXj;T)EtUasA&&a!~Z%RN%&(lME3XMCR7UT3)Tak<74 zo`BaeeZt{@4#kjJU#=Qm{pD}r1s@W53^lCNb?`*LP_)`DhvE8dWtg&siI0l-)^52? zDg+Zk363!BCbjI=p{_AI3nI7}_Ggt*^tb@0KJG(As%UbhTP5QjNLD1L(v@_|{B(Md zFdtw(N*KpnXBL*NJ-v_SN~sdt#&*meYbOPA>#gUtdtLE^!kq~yfAL!cm;VmNe6H7u z!V3oqEfUy2F#b(;GKzXZgjR$X3W>MNQ(?eY#y3sP#9o<()qvZ{?QHbr7mZ3m>|OM+ zw5B2NR$B47=(Cf>&8T#uQ{ND2*~Rl=LySI7Fs2U7`d9BEeCtNL*TI&G9f+%!2TF^|-sq4_$Bf{a!uTtGxXX!7$T#x6g-LH5P z2JLqZ#bY%8lAw>-(#H)|VbS>SVCA~|mmK~tr6uawnQK|0=36`)N`U`-az*EcW z221YkYdw)Hx86g!cKc71MqQce9PuS$&E8|*!tyP%)zmD++|zNIFLe<0U0a(BvG8 zZVqQGr|2g1)MWYDY^e-`BdSCYig#!naPiYm4f>hr-r{D?(-Y z9Kk{b#|jZ|3DpEv1u|<3IyOOtso)UAf7zDM>^^fkVMUfW`6yirY?~oaj0kIxdfHtz zp%%TgJ(XQ65Pce7ND_J%-y~?WyM{~0u1rTQzy>*>F1w@*((R{nLIA;*&Eoj!4Tg^L z+daFr1m~JdZbj-@7zz(eJ_JjhWG>}Oq3X9 z$&@oR7H>2=k63=o^Qm)HAGaq1VfC)ziT_-S=yX8awhu86F@7~?zwZZyjOAUKRtazX zqVf60LNmwgimO4wM?LXRJzy!$(|VH&9+R2;W!UiDDC)8`;)ksH#k^j=EdAJqP~`Bz zg|Gv?BbILQyheR6$B!Ql;q|n=eFl*essbIeCa*)ed6NWdv6wDFEzFf0G`3V_4{5+Q zfA6d3E|FKk3&5(`RLlHZvl-TzkE^Y;OK>OFR1tE@VVUn{>}_Q!PnUyrwkvLIE9|!R zqJq%!lR1^Fe$Fa_jZKvL>z1+#9CpXS$NgLw#A3K2iNfHzoNF+2FwntOLfv2br4y55 zSEoyI7I=U)3|AaJ))sTu@ki(1uqS0=#0oBU5uba$4^~@gh+uRU)@;`x%wixG!EhRXD2JpsWt~Q-#?Q{SWdK4p19M`CjDSf*6+9{H-F5O z6C^SGNCI5^iomXhFK5DUUcc?s9ZqT8%Hu46iNJ-?yIq=G>NA$0E_SQ}GBHQN=#kZDU3!x=)pIq;@N!2Q5_q4H0fdvcU4T}&gJ{nYZ@$NDsDa7XJ z&mNHtY$v#lIM2*2?5ILBfem(Wl2HGSSctKSZv#+y$D|k9;KTEOUJ%cz~{fbVTF)e6;ZoA=Qj&5}sOT{N5u`+h@giU9->+*st zy^XwGhW58*z-I@Mvsm}ljfs{L7J}Eg(D$m^U3|v|7Jt0O{W#m{>F!h}rrvj!+Ew8# zMt7jmenF3hIFl3x(vH>TK=H}ssIX*_yeyeK;rK{raEn90#abjpGjJa-Ov;XB1&J~* z?rf+J&f{YJ>bYiDeKC#0RvW#IW3m6HDzLiIn5-y`oSafo)g%EfA551ao3xBxv&w(R z%M;Cmy<`u0q>b>>IDj9Rml-7s8FgkVmX)|rGROFN|hrh`{J zWyL~-?jG}q-gAM?|x@>`n=Ay}|UW_edvT14LUCI6V<(Ov(F`-js?&3z`KkxVw(D z3Yl#q&WL=9wohL1R_pSbmz;mOX6H97)8Af1>*E-a!A7TIen6PZm5eFie?LoP`y9|W*U|M5O#F3_(wf6#&qafzlK&z# z{58|6fe5hhB|_^zhpQ()!I3d zq~y~LW@NyqP&lLyjp0svitZa30}B@t0VgmctH0A(jS+1o^9+vHDI!!KvkoZSS1FV) z-gvcdzcp*Qchu%2*`mModV|`{=hg4<9Fm#B z<&_r@(WHL`V}5oD9;U73%*v5_mai_{7Mo*S@7^JRk1jaiL(!(zfE0{}D%wg6kxism zU|KPKvief ze65is8gEl9So=c9-@?Cp5V-mYdB8BDlC>N=EPCFR$E>bHC9Qi8j18pD?MXilH5Ia{ zC9c0800CXRApBhaT$#5%;j*IKc24oY{AyZ3mb*ySLQ2u0pdFeZK~P0QE?A_T=7 zLaT*2_MH~RJ7y1D54P;yVo2=ahJ0ZJCS6_-&KAVce7n-_GijNS;oMQ8o+0+w0Hx38 z;bm9Eo-nB*=G&7)bSil#u97EO{A{gt5A!13exCEGY_iuBJgiajvd!M&J9a{+;I-#4 zHE#CHHFq{kA-_q_E0aR1@+v|=EG|Ipm851skr85sl{g7bWVhDv^2iYWDo<#Fl5`D7 zjKvjWDB|5W1hhRP%k|d9a>orN)+c?0?8`gtWK-3$>0EJ$t+|O$GEALf$bzdq^;W$f zqrQZ4X*ayMa}av2Dw}Z_Lg{n4f-;ozWt3tF34bJVEB+%bSn3Hz(qV%9whf2{_fs~5 z`pYNK1I{?p8IZwc+4f$U@bNZAKd{pIP?Bw5pGGThUhg+b%Yde+->=B4_E@|Hmq9SM zn{g7O@GqiOiX@ZRPMkpv^S6L##}XM%Gig%K*kaX^Yx&1^rU5aOwxqxXoV{%sU+e)d zmfl5^nC;V4m&Ql!>*+FR5%@3$6sj05ecwllauaaEJi2pF=BQd)w%f@KzjwKKkM@8? z#A(eSm-J=3wbo`&&z}37t=@0N91$4J!I|d^8u{-_Psl!9cA_NmhTl~KvIGIWJ6n-? za^3gXmQFQ}8`@PxGw4!BSZD%|&!mbG?g0nE&=i$ze}CdtseTP;;x93Hwy8x|7m{G! zhoepHqiPiH8HEbdYjr?b%)=pobGJEy=1SvCq%IgXD@bE7(EGue2ZY{-u4O( zx8TJ}zsWW^p*g?SRiZ!G6-t>b%Ot4a+j;EOZjQ1g;v1S`c{ys2tfv#(K?*i@xc=2r z$2wv>>7D*v{?Ciz?~V!5ns#FJ#m;yrCy?Vt6(60%P+5A+h5x4D=w=*IsH zsW&zrRplxAa~<+yM&sk!WkRrLO6HV* zfLC-`p?1cs!`uyE)PdnFI8%x}5AT`ius6L=WOetdj6C>$(jMjkcq47av_qy~0Z=BG z0$B8l{)8>Gf@+M1)utefnz!?_9oV{utat%lB=Z=#xWR_US>Y|)#Il5ruUtri)64eP`qQSw#i)ID&iGrmA(yx&+4=*Np%tOGYy-_@FaOIzvQ5Kl7$ZoA# zd#1~LYMOMma*j|KRop68>Ua=l?sZMxN9vf#3K%Zb0qmgKsW!W{VkM#OWNm&~x!d1v z#6^g1Tn=WF#1{=1x|sqW%hNwZ{BEDoMeU@ZtEG|wrQ6Y(EPPnPc10k&E$LTKzYNwT zqy0Hk&mj*7t6k?S(r!2aE}r`(7rWi!_tQ3~@e6)#=VcI!lF|$i*`Lt0SUMj~sK7V; zXcCgi?>W8Q|2=X2;qrVss@U+$+tV?VC;i;DG{E*-jwDD$lYc=mTgMm7oTv_HU~iqb zUTuoA0s>WX=GVPyY$_D#y5ueUSaY;*C{KsaDQwg{iEY`(2V^2Fn|N1$#tYr1k6#o~ z)h|r%=bsQ04Ds%_|5n#&Lb-kc#tH>*vQGiNf2n`#g~B;S^>RHqg$q9a2e=>;BisAc z_@&^)cCWiCcSPjkk9nVhrK-M5kvo!=Z7#E4!U`f8ps@RqC)<`VdwXx>a7|c%^;%fn z86zhd%nyF*L1y_95BPlKN@Q|}I>&GNPkkzY#!e7rHXn#4kbHOgf)Ar3qF#E&VziBN~cSP$CT>D(BL2QZrfv z&_ahUvV17(Fu^cXk0wV{!R8-~IK{})Cx57lUdWxeejG5MdW;*!Yx)=_B?K<MMF0JibwHFi|reXia>S}{6{^SPiydsaQS&v!>+&nj$^?X_Oq~P=FNNpwh-m>R{ zuKVwDL_EzFGw|%P?H&Hsql>bwlpx3!yB1V+4*TW4Rt?JH1|cj6%Q2}c!YwF#azH`* z#JRE~2fH7DqR#p(VD#yqF1i2RJkJCh90Z*d)2@85VTCgo+Swro?^bb*r<_8%Yaz6= zUo4bh4=!TFI&H0=l{dG-cRuMt7Q;CrbH5Vg3)^2P$e$MNX{EMkGd0>5=zX7$4tO9V$`K4&8$&~7M zE_wN}p)3J#6|&f_fp8(7XWO_Wq-FB@33mFQ8LXx+UcrqLH1GdFR@M}UQ#Ip7i*HD>yZFF){d}&f{T_gw2uzPiq^jxb zI7y;xI*)RWT?#k*``&xOB6^US@E1b1Rq%nOmhqjs5JCUCMOeCx$(Kw$-{wx*jz2$S zxFQ!nx7_^R*48C3{57Yi`s;e8jfg=RPM|AoZ6V$5Y_-^hWy1G0^S;FI<-t+Uov?~8 z$eZ(;msY z;w}0S{{Z@R)NjD?@9Y1bOWc`IN+89kzNk?`%croWM9F?!$r986`m2OH>0iYU5%BoJ zQVa3+F2%P1PmqQlVpoq6c>ex(xh;plPZ5X0@S?3$KgTG`{L4O@-+f{y=ge*TM?Cxm zhI3-j6x4OyqoUw``4kuKy$QKP+Goeb{w&;#4r{O?XtSCgYqt71YNF_RrLq$>G6DtE z0Qw0F7qY}`30I*JU-t_FnH05{#i=vnje$YGL#MbkWS1ps=Jn}z=Z@dxbNnwuPK(@b z!1-(@@bOx0MWgmUSsjT)QS&6Pg=IZc6Ug04gmL`jGb2q| zH>_!)@8!LwnA;n1j;um^z97LvI9`AGFiGv{6OgI$b@VZGr*UIKs2Z)61g|d^wKw$G z$muHG(6h)tXDw}%DvO!KY)v0f#$pezS8Mgi;mcf`3SpukEtl(P=34VJrFUWgPV3Qn zp^PFJd>Y&c8Ad?c_c3JO^N5F#fIyBI+|r&;dl13dh)NqcKXpH_dkjTqoect}22L}_ zFfizg+1p|XsdxR@byjm$bL4xJ4sK-mhrZ*+|Exq4j|IF0nByH&Gjp0wlr6~;>Kd7* zSVOz50&-ACrhWWF5VkeH*X?$!**YU={Owe%DzYM@iCRAxGk8xVj4>>}kYtl1&QElk z&=MBPkRrA@{H4y?1ZL@yskTk~A#cNd#ogimHrTf{l0_Q@pam9RQ4K+9ra z?|aY$oU=XElKkUL80gr+dV$IQKBXZ_skGj0BCX=zZxVEIYw$g^Xu@?ti}s)4PCF8H9?LW?KMd=p|_=B>p-uJuGtE4%_Ln4F_DIs}${ZLr%LAKaJQs!`27 zIOu+rIlMXGZ8S5BONLm^eZohAfxzgAa80uqh{UY}O&cYG@j}5+vpr;N*QhE-%(ra~ z;B%9M*c#$1Nn_QC5%E@~$0x{So9x&Y$$G#@=oQsR|GEiG@}eE8>XaSvAa~-IY6w&& zg<|^hX9qu0*X?dvK4lKmN>1bfo&0xwXqf^Cu^tSDHgXDEevcqF`Cg|u7nyz$GslMf z*KbhUaryp>jyAmF18>Kd7{>VJcNI=HkJLP^_W9+XKNmJLmCA`9-NDEiOVBN%z~W1u zueZ;@&`~es{g-s^6I1UdO|QK6;SvE;;tY=NUX!B2_pcp{gDfZnDjS&-7Cv5H0hvKG z&C6IHzj&xOe;B5HN~Q99f$j9Z$B~M2x1VV}4A`4!jd3M-cAP(HzPY{mY^(%$TNoN2 zKcecN*qZl0_DgGmxl!%Nn?ePb#Js~9LVb%!vzKNQTX*|vye{P@xNDx|!0UdJ7fw{u zO_}%$%m9#AenI5)177wr%tA!Go(X{Enh|cVl7Q_pwrai=COO03c9{r@&`4I(5o$o3 z^7=&nEtJU46qR6e>kkqVGDa`2JE!eS+TK`-=zG=HItI`zk_BDaaYY*{a1}c(gwVX7 z*?M?XYKD8P*YGe`>*$dU-JSq-E{9kD}&|MeLfb8McHs(;6KLrIG!dm z*s#?Slf`gN{uu#J$P4Hy+I~n;%N0r7;?;%2iV_PPRLL@PG3ngYCS|(-ZJV<{hsE(k zH&rXwU;RnK_&kfS7mxdvdf+7*rSgfT>IF5X zqgn&DIBJpzL<6lC{DSmC--^|0u@V_oQl&_rF;b97;)s*&FK~>$-NDRWl;BmpvLLQg z2X6UIl?Ju#~T%x4{=roTjE0Q(;%r)#4N=p%3Xk-uni}kB#=QoR47O5|qR9ozY zw3Wp)FTGaR1LJ>7RMzsV!zAK(iy{$SID+1bFg!q)Y^n;Qko9FM8oEkmIJcL@Yl|zS zC^O_Kvga*k#~rn6w4hxuW+_9IbbB6X$lVQs9R`E_aquQlLnnLSd%Yo` z#g#C=EsBN%QFUo>;3=+n;0b{2vs=%b2dz}7{tg6_L~Rm99YlBAS$FXFu&_F!`2uM8 z17e+|w0*h+(X*x3Sy@>R zSG*zbyL<1nJa|4{pk%)3p`ExXY)%&1{`;7o$GhZMMKwCh>mRga^yAC&FeNo~Hu&`i zaAiC`K9Y)CMx=AdAM2OKgK8;n#`e7n&YS93L#twb$w4~8mn!!vvYwB3&s%2)*N8hjgxZ!T|^G~;5)gyTP4J0rB(N;;rV`l z`80bBvg{r$k{C{P(F#BtvGe$IqxAH=xl#9ep>C?EAfWQ|2U4T?xjx z%~!Lf?g~_Mvcq)Ag8Bt@$CcB@h4B_w5uz>+s;rZb;73e>E&cc$r1NXrUgOs@$4qJk3iOzYm(KG1+BX&o{WArs58HbYw_`hh-z_V?m zs~oJjN^-p@eaILfD%7clrsg}u2%!Mk=+h{96!-HtUO%BrKzE#DIGxjas$SG6Z?D%O zHuDJPO0Ej7W&vQC%Hnjcq{?SpQKIPp`QzM9zS>odWxaw)7HVG0e6&_h$srs$ZI;16 zPwmw!SMKP5gXE_>7m4GQ75G1&OcE36H0a36=R2o{X`7Ik3%<2oa^Oe0AC4OJsv~-~ zjpypw?Usv>fk90Cb=nmFo}}H(xvrSR<7#7`p$`-OJ*DbLVm10ZT`=aDWiXmYfqQBk z(7q>(r`cfBd?JTj6I0I-E7AS%KR+g-ydomU92{4cJZH1t}pC5~uf({{ZTYzZk!O0pZjR_hZJg=sH;pGkWH* zN=;W2~5HH0qq*PY`(vZ2q3U`L&Wx1Vq*C3aL}!+7Hj8XaB&KKV24Og zdFwQS*~<4Pe??8MkpzWmwqCgM2UiN=z%wv^LdNHy&&XsPME&T9;wuEl`nQarC0V$| z525$Du50ssQxy3~rE&-08yRYbH%Vk&SYZeYNcR>2=VOz@R8t;AG;Cj!?dQ^X5->*XPeaQWAauI z1yKOqUMuV~t`CoXu?x;B;1p%_yRlNWTrOyNKF@9^)ulB{k= zU%r#~G7ANrkph$V`8nVA%YdV@1v(S@#QtMY5@IajMLIX^850coau5Izospt<>r6@BW`5|Cuos^Xv#F z=hh~FXxLUz04Zl6I}P%mh+pwgA|FlCHM;hYCmcN8<;J`J5?YX}ebxK)5m8J+g~LX3 z(lDzaRqOC~a_Cx%d*hb~Z4$*EX|r`nek^Xd|9L4&YzEAqy{P+U4~SHn4ZC!4{R(L> zbJL%XGKJ*(xxu2ghuOzg)48|p;gIGNP8JTnOsOj3Fm#!`@v*UAn5vSj+q|mbPsST- zpfQJx?z#fqUER8$cR1I*XKG?rMnydchY1<4m=9@7p)?R#M`{SF>kL*bBY0g?uP&dD zJxz0q9d~;o`nna1zmNe|hq*+YgVUEYusT#)OY;5<4o*D^`tT$EoY9ZhySW3HS5t_) z@YI3OoEc;$fYtZ1%TTT1vG>Q>C+!#?>-R{ii9*jVS80x{PgQoz5l;f1fU&KBe3KS<=GBa#Bu!+gMB7^aa-*XN97hJcLk))@b?d|c@nK{2(5O`!^J zmL)1?gg0?c3Ty0mK@#B1dY^KP{`U5jpYw{F^CW6L)IpLz`fQ82K~!|H%q|H9v0*9a z(^v06+|fw^-rtC&hCk2O3L8aW(y$m((}pLW##x(6_^bNR%O2XXU~pNKMJU?@JzW+r z9o~t^jXIC*n@D7<(0@vC5Nmq?9ySRK4uKbjUAPdlA*8-V>gB1rvQfV0{H_bS_gsh5 z6_*!Vy>n^yZM%OSw+7Godp&d<`x7T_doO(-Hzwuv*j7gX+@qvxb`&L0(n}(oN>kWl z7BEPp$|Qvf?%}%ZLsuiS{)N0N+YJ{TWX-nM%Yu4;+VOlwXM0LU2%r?ng_tAoRANY- zQA9+StU&=wJJEuAu1BlGwe)C(3TccLl!Hxr+%6IbnWVuQ?{BZYBBF+ZZ5YIPuw!&D zh`*DV^huZ$HX;TaVeMLNy5X+6cLocyBGqR4*n(`!SL)Q;O{Z1}>BR+t>v35~QA!3& z^;#L7@q6-lLst(Qjz>x>5YYc5dz?4DB1tAd{wzqsWsA`Z_1{G2PTJ^C3wtzz{TWu= zyz%x-!>{@Mc^eNG=C0;^*L}2+)TTPixM)9&49T6ZwC^bjmI>N8pFpKR7PgcSVrcx2 zmRn2kL6H(0^UlB+^UE7_`tyfDTppXE=25Lsjr34L?lx@RYM6NyCdOlV+5FI)lTw% z#dc3>2!&BmEvSr1TN;eOb%I(NzmVkjyipt^bhnw+{_*&dU30q~a9qEz5q2>%`I3n0Gxn99+vBd?gN)#PZ5Cjy>k1n0ab9nm2W?d{t;=qT4`PXBGc5n`am#6Y ze5vcFZ1LqA-0-+3SeOZw52Y$b|C4eR5ez$P7;wU4%Yhevcfq(g)d#2?f&e#{N#_{sPD0L9@c**7b#pfLMbEZHp7N5luEJQDpp zaoIXv;Nw{U1QMSRfzKGr8TX-&=Fz0)4nJOLX=U^&7Kh5hv<>otlt1K`|N0Prg=D^n z0`q%bJAL-(EW#e>q}~cZ!;fUtEr~pu*-~{KSl@ch6z9JCK|ksOtA5ep{pz~FJ1*2o z`^0cP47hSKp!d9?PyeBmODhUsrKwTc8JG~b+32KMFUQ6;?ruS$jH$E26p)fTm)S?V zXB49ow_SD~xyxu3U#(XjN0Dsd&97|y8Xr+&pEMObs84P)#aYW)jQEvg_D7e1ll`XP z6GPPv>rJCkmY#ZrvRr*dwM<5b0>4@X0$(j;TLUPO;plVvyZOvQvFq19Ygx3P=Yg2% ztWV?w-EbT(=FNJP@n%26>+R0j8mEi)^*~x?B~W5?YGRk4x2K`w`fYP-4PRMSY)_py zc24hUjSKR%*ULuFpE$S0A&n3<0lQ+=Bzb~Ytnu&OT#cOTc$j+({r0_={Tr3`_O3Z& zW|IT;mn#z%kFwp;b$sD@oi{IR;U<%y^{AZS$UE`4i68}xtpOnIASD!4r&P7w8Hggs zP-DWiIgV9q))|g;V8Rg-wSYiKSQZR^Z9s>hmoag>FNlI-?NXNfNOBDF^kqI4#YI3d zM){m+cwv^0V35S9E1~nq8Gg}&TFG~}11i-hcM7)3A5C!MvjV}A8_m^Q&QR8?LJGI; zK11cCOZSvhBd}FEzNMLbD3yphf?GHJO;aKBM;ikRizm}{>gUHRA7udO4MriR#}tEK zF*QIxgFPN5HshJ3h#PnT455y8Vg%hHi21;K$n$0z>^CTL%IaC2I~03ZRUI|A2JJty zLF>pFB8Z5U>okv`7Ihg1X)NYgxNbL2J>4G9N<7+ZFQVWG*X=AQ`X+kYIjoT|ep%p; z@j`OlevG~odw%QpsgdBTuvuym*PEx~L9lg9-f#|2LcXWu>*_*L2JEoxZs`M`@0Bh; zK=9^0lS~k|nM=^2cB-jY>g6_I*(udq&ALPznufQ+>NOG7jZOlfBQJQl#Y-FQtn8&u z_kLpw&}9kN%N{z^B(y!GjuP|vB07Gk{Zo0AwoSmZ^F>rpuIcFLjftQib*Q3H)BaI- z5nRrx#m|%1Pvru6$NLi>n>FY(O157pzZ?+CT`b|2GUz>D*bI2=a3Y_jnfN_}=4=Zd zn>k4ev>O?TlLu;%y2Om%(Z9>cen{Km5z31&@;~WvZ-z~e@?-pM# zc8jcl64wkY!%#7zaje6nULh~W6>CI{C4muQM=qY zPd{teT%G<_pVnA9{a&VY-LEgav=(ADsg0Sh|LK)Ndi<+y5yv@7C2dXixo?8-l4gM@ zR3d|hFngtz#O=AZ%v_1i1WnS35AHFj_++KcC+X>YwjPco5UqZjShJg35=K2;ta(aL zhHYGjlc_ArwU&O~(2Uz9Ir_~YlFWpwVt=~iT#5uUJB3M+^@3BoWdhy4a{}8o#p==G zc0f9d+qyxU`f@aFsI0!hR(o(Hb>Vq$JgrjC>)Mvxd}6g}KY;0#k+t=co_m`a%s@G@ zBr8J4AxFJ7Pw?XZ=sG5k+OGcX|D7Il%ClK_X-wDIZ?fz|o z2NJ4dB}sx}`JzASZIg?8ZI8mJAcm)B9Gsw@u(C?c;_iG&!^4pFNIp3Ma z4!B7YFh#U?0~wYSiv&{YBpB%EE8$E;Fe%53WI>^_MuCRugim0Vm!5f+m)~m>USfS! zE41aQE<8V})}JLWtq z*Jdx-ey(G~MEk<|E1?N*qnydEj)HaNdF->Z?mpYhiL2m>o<~l8zx1F$@=}G6w2_|* zL)PN7w3%HnbSm?fJ-h7_g5+4W@9RhNI}@NTp*o5+uC_0qp*o0kYBa$$j_u?(?9p+( z{wxm@p}M?UH%xdm`$kn-q03XKEVn#*hGt5(JwE++t0Z27{I<#>MiLBVmbC$zi8`zh z*m|PYl)}*(f#L`ojA7sIw#*5BeUb{MW(N7-IqjqDQw$t`sBF85Nae9DskL6zbsx45 zztUSuR)>W2eS5^dola%}?m{MTi8i;$Cbf{zBMQ8CHe=K$z2BQr5IFrt8VFr1zYSXp z)K5x&fOG;bb_a=k6SS1Tk|t5=5pbO(wcF55_r|pT$7BKKE{uVHdk)Np-@CXNhW^al zTnqfA7QUS_phHgb8%G{nz}heRfGoq+tfk_sT}S^R84lNl>|B9dS}+o)^PcSOUYdO| z!#3<3l3%)_xB#mL<~i+!*blk}vl+MB{<0{mbFdRW+t7jJFS= z&U4A{49=zAK2Cc7Q04O?i0zR{W@jgEJ}Tz$mK$~M8DH_$ui7Q6;2%iN4$;);B@OvE`LIoFq#|oAr}Jwm#10bSD>sOwMK=_?6BJ9#0Q9rm4@@!_mv&f@#0q$biUV{ z8LPd>6iZ8u*1OZ${42c-1>?Gd-nWf+U$sF3_p()pVGpJy#)Vga#@9H2sH8*ZWHW63 zqYJeUjSB}!^|kR_)sLQ9aR=K@8Q;BoW#r?D8(#%i4~C6JoSfnqI0-x-YIlihyu(R; zV=+Xky3#XyX$kF5x}Et`lc7t&O=hoMb#}vH%6c_O`pdBx1Jg|izvAXmj#9m~qfxfv zgr|pvUD`q@IxfrC?3jj|5#xeK-Nx(3B?-c0yRQaxH-Ft*xHG<;;q0=DHp z2+DHUuIFsphDiK<8;8WqmfPfjvPxZ85@3#~O2A_xST=fG*-=q2nS53UOA_(mMM9v# z<3>mB_6euweo>2;`l;-@nS|Lle6EI$Gn!uwuQ7YvM$Q{JUw^tMmPt zE&r;a>wLd2MesPLo7(lNd zOq$!aH7yCF_eH-PDbU?j?VcB@$Il91Yt><*lYf>;RL}o&tyXUYrIvr=R^J(>%EM7s zcBBd$7Ba?zPL27`DL6Q*8Jxk|d#mDAW8W3bytvNL!4s$FD!S6(O)nNqs`WG7_@ zr|yvYR1gV_6gt!Rs(t12(%lF;1A{uxo#o7aVdE=fJi~sY?;bErUp@$yGz=vH_0E50 z`MLm_686-_dS#Myc#GHfF}}cQ-E83MjIjzkS0B&rZOtgVfO}4zYw_6(rk1s z)b8+2)2?xB)vn1_ET>r{$&>_#i-t3mF6NmVDIS=kD;lV=QOs>C(W;Il134WJ&Bfm= zRb)7k%5d7KTx(#L+x?Q{=zOMVd z{<)-0vEi<4NtOVoi0ay7_)1{vAjNB+$@HK7va}C;uc{TQEGy%imuy1B=TLM&PQF~; zk8}BR(|kW1h=`2~uQp!0;XkqH2%ZgEUIQ53{eUrlhZ&Os?@#)mvm{&x-stxEzvYkr zl1~2rj{!L({tl`rr3s9>Efz>axfnEHaO#kaSk4C&6+(5lVs|7=V`X%)+Zk94m5jMb zdbfu@(?5q3!qu^7q<)d3Ez(qDbIO2-2lVN~HL~_)|1~!LOQ`(&QE?WMc|cdI=R+bY zGF{!a)GtkVb%)_P#a=znQ?{8gS6wX2e?NTnvIVkbPfan9(xhNv>J4EQA(`sZ+jINe z@&CaXVqk)S)QiQcrIofOmK`HP44`i6hjE2oMQt zql)B`Opbw=igVkDUY2Lqt#Ez*P??8T*P;-qb!6fZrWeWQqX#g6TCYU$Fa#fPv^H`nc z@tuI+$`HiNDU40Xn}o~)_x9<+DZMH6&P9=`G;=lY|vMld6K|Cq0E^?H+5(p zuPGw`%iyMvZi3X8qU4`s2?B0^2-%?u!Tf^TR)Y$})E*|;)Tp}3irFvb?9eLH>vXiA zD^bGS@W;FJT5?}EVg0Gy?6hmgF!04C9AeoN@TZVCax(L+^a(HUm@7PbZ(EM3Q{eJzzOB!KlWivtAQf;ghVDOF1YIr4m#GPU;lTWVLm`GT zxYaj#E<5AHvp#7Yc^Ma#=KZ)c6B{0RqR+l21@4d+*)h@OG3y1W&`f_k~h zRL_5)asL6T{-k;;*vLPFjo?@9UduMy`BdzCpNu~oK6RPjX87})|CbBQbkXrZJoOUz zmzU#+IzgaPmt2$i5n*YB-<)2S|x)6O!9fuD=$+lVxH~22kd9|=N8=B_o zVJ9OWl@#MMzYjHFvw7vl`qaR~|I1weZUhxzQ1|fa9@`&9Gb*MJd<+(3qs$wJ#!kBy$V*?2t1*(47#vdnuk!!v7ofDBq8yH? zo-hJQYJKBloyc^za-J>7vmy4sCyD=6zW;O`DG;D~!20<81WctG*WQ32R9H`cXdmXK z^WkhS+FqDOa#};KfT#&pN@5wX=oCO&X?5s~a}n93pd+}h-KJX_i&uVm!|+g!7aU(?vDO@f>OYtK~R8j-30ktSnfh#w#D=a+G+49 ze{hgh_e5tTWROq$k@8>d@_)Dvm!W7if`7yNH6f0dmPQ9Tj}Orao=J(4o>7Inz8L=X zcY945oL(wF(&M-n{h=X? zDA}pG08Waa7OGqUu*lD`E{R;@jgREm436iR<|RsX!Wo_Ov9&Tk$2@*8LrJr?omrD$ z)$31x&&B0UYW+2 zKLw2b5=|R+lT4G|$Z8*hE)0&#qLp_q)3t{Y$JN~|{j0^s-<22wrw2hHgcHn$3n8dY z?Ob}XkAW=7oD-VPrt+XRXSlO{`&-?BMkNp#hy)Oo=pv|%gT4`oQyd0MzHn_5b@~UhxQ8AjvO-|oRF1(!nkzra$UVyOytPJap1Dj9~ z2DQw}PD9hL$u9@~7STv#hQS4`dXfZw{>df+&(v3C`u6WQV?ey5e;}To9ST5hTxi6Z z_WuCUf0Bw`jsSln_>Y)l2#@g09Er-ZlUTEwAaNM@5fR|&TUM?nny4zmzty^z@#rN$g}KEc92aL%{$H0Xt6fesEIUJFB=k)+H2F}n3d z60PU3G^dne@-=#m@z)O)RfXeV))GsnYN@F=4(Q+_GbYJ#Qo_3w{$>D*3~6qGtHpVg zhsbsGYC-u!wi(WQKtXe{L4!hHXnWNkBs0K7!KiaH!y2+{*L{uVVf|+d|NUZ)Midg6 zuMCN#gU~+7bWJsC6)SgMwjPE1+m@of*#0R=eNlBkEq}~e|Ft^@e4)pb0tdQ9Y2xU1 zt=S_AW(GHZ)*@3`^oDqTkz&9Q0zLa@#_%*fz{Ea*b$NX)OM9 z=zqVEjeOF$ZfdVIJzXUlvJ*C=#-v9Z@x0g@kjC;E_?w?WX=t?0&xY84&;ui!Y@=?v zj;CD9(;tH`9D^34?@Xel(A(vAU@E^CU}of8ucLhhNR`_HP7hjo{N1 zN2m`ySE;1Xk#Pa6K(VFDxSnmUsyCM*JhdMybFjbr7No5$ z;+~Fo%Ap)G2hU&wZw_faz26e|nKX;#2p633QOu_A=6X}H$jWd){%-GpRQk?}x zQm<*^T8Zjuo?{)0fw5*rl_&5e6uB?NJ=_; z78)R!f1c1H`{IJ2+Um0Fq@q!Yhbm4rzFc?&s>LUl&Be?%%FETW%B26#ogncg%sD7t zV1{1=j%gK-k|6>OA_0-RCyAmASm!i(3YC6On(v8d_@k8+Sx=@J(TuhGZJOV1^;=#r z^8Q3~q~}B9^jcpN%G9CV-sB;vE{%`-wXnT;;*(Q31XD;K9XCbej$X59C*F{eRTmVi zm4db?+tRDWTFwvTMtl^YDx!G)q!Vr|`#(~Hq9@d%5V@14d_n5}`OZJQt`Cl?xg=c6 zd}cE*Fjzdvc98%V^nRCC68u#;MgHkGvPsY<7Gaxt{PBQZA7}{q`S`{J^{<9>_c+(* zzvfGX_B0()_m>;>1WRn)>Kat5;lu;+UTR8`ToZs-$8nwN_X2!!Fa;YGzzq0+<EZ5m&%%zKJ2u7=N|pWgOBN)a-p~}qNPv4lztS!h`2=lJ^~g z0`cD5ZcSPD7D_z=rTU=r97GymSIfi}1v+S-R(k5wN}35Vq`zd%@dgnRsLrlcqT?vz^(>c$@j%^C3HW(2;#Fl9mQm*eF9A6pba$f zH1^Q>qd`Vc_wDwFX#CY1n(3h)VXxn@j{<1#fSv?W28XP@kQo?%EO-u%Uy2j$9I?;$ zbT8S^Yx|VNq8aPg$P8p@c*3Wcr?$jsCT;!3r#0ve?g!9OHIv zXZR+strLmRV)r@Aw|(FJ@@>}xU%SntfdA+8TkD^Bb2y-CdzE(|8%p)H?hNiM@*012 z(i&SWKD}G`>D|IJ@s_{!IEJJG#{P&l@mD^0`U$GI2G=PkSuhNH{&uHc539|>8et~j zix6m$IWWoj8xmj|Z(n8UmqVrJ<~aT?IA)5J<1$Oc(m{1AIyJAAa9r$7ncDL5fo znYHUyp;YGGI_)MfIxvvs+OF@Ioby zfDLc@O97u^MT2WYJd}^o$VG%oqDr5S^YVFRH60|PnWHgT+o$gFeyKT_#LUFERxb2h zvB|u+V`U-gT!@!_E8;P-Zy-Zjr>r;vg4cB~X3EMcu-~4Lb)BoKw9M3&uPgSFW&>bg z%&Scm0j4=e+!MG9Q(EO{9&ly5d7yyBxu~B zt+Nl|LIyahj=|AM^TRV`YW|W%{^4v8>zR2M2ONqzn~Yf3&npxJHG7-8ob)Y-l&Sup z17XfPNkCSkK3oR#cjqW!<#j*jWVdABj|PI(<>lxZTfVnaUw8X_z8+ zoRF+3a!hH`#|9A3yef!UD#?GvCLVj#tv14Sf}a?Rpe*@tSzfxU&Q?OPe(O8`-COV@ zm|G2u9U4UPhw4?BpTZY28{C&YBa8)T_qc3SIl)&j!_elkgMeE;3| z=L-~*jvOFN%#zPd8cJea=TprrJcbzWVxsqtY$P1sU!PYdaA1eQjY~rOHFy zrojjFBKrBQne_%!g)dQLstr2M%~ZQ!l=1yU#;5Fh5<_$+tuTfh>&}cfq_8kZXY-2} zCJ*Zr0ejpBj&<@K{U=^CDt5#GCneX7(1TsO2IaG{My^s&%)a`P)os~Q(^S+7NL{yf zqwbo4BoNQLO!rM;A#p@p5n!s)`XN8VdS~9wdb+aI+Uxa+UP9~@&%!^a?Nq#wO|hi?>bc$!#W{y2dvt(KcsScqECDt+OrIQg}5|NM1!Ut4ey(g zR8XD~treu7k&2=I4zx{X!nF<@xboBf7V1|oiD$b+#8y)NfD5|rNIXkp(CS-~S62^A zGPs*kg~cc@IZlhQ6mUNPIQX{7`JCR(1GTFf$+uK1^sH*$X1X_3&ByPP-uq8$XeQ(N>MdmW_R zf>P`|quMsOlbl%2%4YM_G-hPAxfFtW>Gru)=y{eEco!EmOqgVZDY`!RnST7CwbaU9~6&HhW{ zX!gc~p~dTHNr8>Bp;sk5#S7Eg=3XFa4>jcAZmJ z_2sn?Q?ce>OFz+fFB~+B+6ZiiC)du3mkU^}a$=A|v0L?9sf7vKLJvlmVA5>%`8XoB zOpyCtEKoim2_1gHJ~KkkTUpD1*ewZ4tmqWBCXeXyM+YXksF;!d8ExjCm_=>5==BLT z9Mi16+~Zog=|8eQ`)uZRh$uNgaB~@=K%iB?Z4jR;w9SMFhi9VSS5wP??qp_%Dw%saSifOi8&+7_5DqBgt;nG^7`rjNwGJV^n&EFr zn=+>->YXp~FxMcaK7WTsIX-IKBo4<%3*lg7;8o@KNS`dlB>169#MNOtTgnlhD4$#& zh&MzUnB?va9D<+?at3jpS+Fe?`=(XAwo9Q*xYC>_?u(SAP=2CKzr+vz7;T(z67#iY zREz1KEb39ydX+%EJ?z0brA9rh`pTEQF4iA2IW|VSSQ!7)8y3mch^WscWct>;$e8{w z-LED`m9$IrYe2hLF9)Hg18md@cYL=P7*2ZrGyU#pjjVNRl+n6xKK?{^S!josNyeox zpNzk)p;#}+W$;V6Za5N{#z4xV4!$U(m z`xLj{tF_tk7onzMmUCq@4>(nFK)+j)_cA;q7i`07Sr0ACOG@S;|Bd6viIzb*FzuAw|>QVoG?fGHJ`aPhZx5#L(9 z98aQ3KS+utf;xnLw#Xp_0vr~xN@YLg^5Cp>8IRM2znY1%jP={J12A#hYU)a5 zs}^}%X|#Fr)o4b&D*g;K5=-|>B$`3Z3u>*`rGtmp;SD%SKB5cZ-&hT7PDXQSA5hd^ zd$FKeujWG?6l#M~>6Mi_lOBkdAnml5*4!3vgC+k-^zn(hJl0gfGhH7%FE^;zPuYRz zF_&DH8RZ{r0Aqizf&18Pc=&`WGn?hl(*MXIJ8swTFKnfIT0)PJZ) z@;xM;Ro6&d>5cWNR&RN52F}U?IX%4`Nv;MfSug_-g9^H-5yY*1n3p%4r1Vx{`cL~m zculEbL~K#2RU1-}(H6X|ANXD~Q1acigI?%k(pv?dybsuF$;u){orY9j;ecaW^hb0& zEsdfNZ`gP;F?>b58s;B!S*2s}z8cZ?up&WEMEaw%$6Y-U>E_@){1Hu41jFq^-n*oV zVg-+*2uNGoBj~f>r9QnwlthebnBj$=eS;Z*L2GQX68cZEcgCD^$Bg%2m47!dlTsNe zB6DEr8@`l_ba#oVAGy>}G}9cx z@Rv6(@n@lknHC+@;y0?rjtlm03{2whKHSMeB4;u_kyxeSg4%Wa*GNkQ=p$bY8GpmS z{cMMbNOy&1n-8rL-cEe*T=$gOj#o4(2q!4*KTYeX+L;iUF18~(Tp37W$@4tYl-DTI zl7>w5-*Kd~CiBv#ZzYtOTB@pK!!uO~`cx-*nTdQ@z{tT7fBiAZ26m>u!3c^5zIYrS zetFCM)1ojpQqYKbJu`q0`f@kRe9^PGNN5(l;+Y2>&q^@*lBt1DMNU@T*3_IkP&7)l zbTXVo?OYGuepH9Y8tVXr7bKa6DzaR*-F1j|&9xtS+zSUC&vLf~ZMromI7r~4zvgF= zeD?6ll-9*X5i6<_Gx=WyV#XTb10U4E!$Nv3h@wfA~_woeY=~P}LCv=(DU^yr1WP)i5pGkNd z`OdCITBTzY|3MDXVQq_3bUj(CmLyNc70C9;sdlDUp>M1W1Q_wH5Vw2Ve!$L8SI%3O zgHzrR>M{=`>QF_)85_o!=;)-Rq1NJ)g=2M>4`&q8-QoO@;QcUg*79O6mz0(@i7kmp zP@hIGBBeAbO~_H|u3GQg`@`fboRiU#nb5`TPTgTTW0$!P=#xcZlPAT~Z#z9!n5#lf zT(`64T4IWISyqvshh!4g4KnKLA2z+93KkRGFXrftm63!b>sv2vl3LHNvsjzM7`$1X zGa+v{45$88*OjQ8TZL7%%m1b*7l2yehj05Xm2ty|8@62Jr{{>H81xOH!-Y zCd}W7@N1dAi=}0d&GX?E6A8roY4b|Fk(wWVaadWuTKi$mG?epD&RaUcyxecNYu8_? z&WIK-Ga~U~fOOyZI@*$&2D40z`7vPn1X_gX4`mIJbXY!VC4qQI*AL;Z6R<$p_Vp6T@6#Jd6k9JWQcSUhj?Z*83z zY_aP~@hf+b67S*}5+CS!$Z}KIL|k>TNip2H9iz6&l~-K8orx(U68u`95Bj=p53pbD zC?Kc7byk_IQddoOg``uz(9m9fFlfrJv1U7(WDg$TmllVnGi$q==!`-~l&Y`J!x5z3 zhP!`Z=fv4HHGTAy*llJRlk@ekM4XaE$g`3Yd**cH6*XCpoa2aIQU8TH zgS3wDB*%24yS2_wD{q*Wc~mU$pbwS1WP&_1Vv0TJcn#uw<!g(X+9nq97u{5$H{#7U?atzY0>($5i(OS@FYmsscc$Zdylc+Gi!w$f#;o@!>I-P>00n=NHy;(rWE2 zmwhHl_h|55F7q|E?Pu~^U1y$ajW?yNK>o^CkMY{KJimT0}1z$i}>6qji$zT{RNYTr1vlMG3|Qz^>@MQ!xMa;OVlu z%(#H83}?rIs7tUc5{Z;3w}Vs;HC2^Bh8;vsBBElc>>aI!jO2UF4>>--sz&wknVt z;qM^5Cr1GtqOTjCtHUu7HhZjhY!)lA>!FDwE^~T*m=&Y6 z_9Zlab8ITQ-)T7WDm=?LD5kK$^MD+PPep^gOkXBaR54$d2mF?JL<&4kr8yE~^KGEo zy^Au0$a&p7_$0q9_X@Kfc8Ih)eRu;snj1J0STFS1=;hJNV-94AaG87G28HIG0Fu_W z7fn8uRm#60&W$6!Z>ETr_KE=?nfTk7@T1}gR*cL2>NqSVwy*I(T*)hfhG++c%9=~3 z{V*3NSvE^{EvgcNM&ivY1-f=Uy0pINL-@5D-G z{lRf=A7R_XPP{BX0STq92ew*uc6RvA*U2t4MtopQ*595!_@y+Hbe8eup}s^x33_N=@K%@4c?q!HV{I=fQ=Fk$v&~zB z$p^`UAgv%K?y5#@Lq}BoB;+1p8b`dbt~9@F2M1`lMBCOk4CJb8dybdqq-+W^cq22d zDMZu!LU3^1eM!!PCyZxOvrW6GHH%nyr`ryQuXzl*Jv>yh7wIJ!-*P^XKl@r+m~x-# zC7StRewgGn)4ntGsO#_9y{Jc&n!+N=OAMKjG4XF@QVX5i7U><}vLYH|IL+1vD^wk} zERtRxHPvgfr)`;ggs!GVVOE1l3_g>uQR43a4>MVORDf*|*D(6QjZ+fONbko?(mFlG z$`ys{`t#r$M;k>Mo0Jc zIxSaw#5@BT1#jME{Pez`HMu$P&gki)1;X+l^)_-C7ezKRsXHy>jq>nl-o9ME_RYK9 zKRSMFkQDK%n0KfZnA;jK-Qr1cR1L7X%4YwHqIGH_}%0+J-xn(^`9u)~Kcp-x4feaW$K8bXZrlqE8 z{P8hl_u(XG7`6!=mPTPbjuXa!Y~QD%){g+=br4N5-(a~Bd`GI+uysEBstrjOHBVbB z1#`nEG^18;4V!`=oVH=H=;gMS_{PZ3V_9RK9V}1twi^JS&5z;#aPw z<(CrqG;CNttXDM%FEf=O_S3%}0k*-SATDjhAZ#0vlcwf@1vYC8y5tqo&u}t5A=UMY zQ03C}xbn2MpaQ(v-T>!7?((D(gCh))cK8lxS64zvSAfOSaEnWH(vt8^#p>AtGZuq{}dK4 z%OFrrOru%OwsjDR&~sQ)@=8N;PwZ}xJOjfRtB4?{Ka)|K}+_Si*#L(#H@cU&h;}X#fnBn6`C2u{?%VVr6qRYvr#wVv&HR< z!L@y&z3VPv8}+;~(+d&dMAKn5$8&{e2(Nx+vAp80$to~0z;SYK$s#>mB&@k%RE=;= z2JAGQ<2p6;Kmt9Rye{WEO7JuHwkj=;5NaQHN8KBA9N-Iy)sIIE-Xa~V4NkvE=!xG% z%wF$X6uZ~?ukzT2*|y7f^o{@ue|gt3Z#A3W;DJP&`JqSf92PB|4O3lAE0-+UfmYoY z_B88~kILhCI|rYQVU*Fs-R9^0@D=qb(sM`*v($DzMH*L-0H!iY4J%^)edx4=(4kQuM z7WcLeqVF1z9(Lt^eX^Ew1}l!bn66)TCu51KOXsCrhXpy1*Q|32xi(Hh;AxFtnVsnm z{%p4Lx_MXOO{s3lt6lR3zZ!jtj*o^LS591=4jo5_hufK3dn;w?zaEVu(jVTZp>eLS z`(x&cE=T>M$5Dv+w6Zl%hQHPkbvlgLSwqg9U9{`$fAw%$dM-fez=6`2JjEJZy}Z`dm>e$s)9Q=&unUoJHr*ri>b zWD5@x7Cx7^dTIBgcPkT}@wyR?$Dhv9Wqisg&Uj808ssE(cxAM{X0>P?U`(dYpPBF5 z9cVaX<#ak`tqQ+qz+tiN+&YoF`Qp1pxh;2jV@Ce1unx*^-frID9%!S}?c9>RvThY5 z`!9A$Z4^6m^a`nYD@hPLhf^uV$h^a=ycll+)<+0YA6WWIt3qAz&O2I1$UUFa;$5AF z*VJY&&Lr90%T=;w^@XU`#F@D4V`^SZyi*;Dom!Qyr5B0F=%H|XqZ+z5Uw($)(!K#{ z%}QTG>lb~VF?XJY&6{kVG#qNTjZ`Yt9@k1TS@}Kty1~S{lPcyK7##Mt`(YOy8)h3O z?UMK9WJt90t86OgZbv@V8@r*MxF}0f+*SvK4(B3Yz zCeCrSMHnGeJz~VwQl7_ahF#TgeCykP*^L&Nu7uoBQ@a)}b2SoJ1s@C%c+a#~`(kxBBs~Dw$O%Aodqy!Amft%xxOz*>}4dg63 zI5y1Ic42JeIg%Ab3rc#ucf=?z1j>Xgxm859k`|30Q-*aGNTi!PtCL0spK}D^n7{%J zp|vqNCA+fqpoct(`bOYQfaAJET7c#99;Pq_4Hka@C2<-nCn`b26{WW+L|0yy2oAbU zUeImoC%f7KT=r}qOaL>E*i7VwfTUoh{1Da%BRepZ55T`Skiv2xnDrQMx%i4mp)>kq z(OGjl@oyreP&Utst*KSEf_Iecy0iW$ld_v!KLbxN$3XxAx z5Yeyj?dR)6cGp(^jhBULkQ=ff`t)ual#mnqt+cATL4__`{n-&J{xJm?4|ZO+^3TKj zszApZ(`hNt6pyUQH3jxAL`*i6pJ_Lye5~Ec=(ykK%Z5QNQ zQA>KCiJ-x5ZV7U$(J=(K%722cr3Bj%FYh%j;bMdIW=cLaZqK$ik^6GC<2)C{ZXXCB z%a!y<57M@*oqj+8yhyaU{rW+X4JW7j>NKMaeT`5>lgEQRC;&_F0`lV}Tx(6xG7$5& zazZaCcy)Z3-Qt~d*z=tk%H>-+_!Rzu*D@hR!iEZv$O+LKSjw%z zJl)CVb#p00^8IvbpAa7@+~6ZRyCT6X7t%Q$3vqD#{u$$Z6|wF3iI93?ba}kW<}YkE zp&hVgF2*JD360F3^{%}bFqc3fcqX|#dGP7Mx6aA?6A@k>@dTe2>QVIfbUu3P$&+B! zJh>7dM7?bOE1}M}dC;<~u@<1Gz*^qPG|@NE!}IV@HCp=}(Rr^qkWxSg^lV?sBkMc3 zT;!IncLTuz8ZG39N#t$WA-mgaba+9{1wvdBmKyF^W9k`Bj_>nv-4+?ECck>jY%?yy z9wwyeBdKMhXVmm8y?MjDufT$pO?_6WJ`8FDV`BlsY zOO&aC;;V};MI@qD1^1) z$9oeZGYx?#`iQMJw-N0~UkQNvMD%24`)2{~H&jlJVK{|SQ=!;p+!{aUz3KBY){#8z zE$Zu-gR$-+=I>dO!8qGoq3>b5*DQXe5am3lo+ufb85h>$nV^t`JfIvg8F5Jq>ZEPO z9s@9?#1-Jg;z+-uk*;6@*2vOc<%B$@l5QVgVuSp9usSLtfUN;ydf}GoR@>PJEEmSe z0SNpvb^?IR*b8$o0_WNoozK-x!?K(HW$k9n*+VxWuz;O@pn4*)eO*cw@Ju4%V6sfwZ*!?9Hq_1N)rm&~V|CTe zk`xSAe(RJc)E-K)trev>@fcRM1jxB_uoC5RsR@X}I_tg#>!b*jay7EgmZ|LhY$zie z@ooIEsM__? z+FN@@1=p-;HTG(FBa<_|kxYnUnuyCJ`OYA0@1*+pdpnP9^nGiS;<9l~o~KCe4^LCo zaQiHw_c54BN6V7{9?iLB0aNtAuPOv-3a6K=-1aw_5(SdLiRu=wcGONsEUs-`b3bE5 z2hw@3@7r0LC6wR9zdmP!p!jn7upA{+rQq?mZEW>EAVzH&(~FAS{K!5i z2nT;h6S?b>8Y0CA>-B$r*Ljq>|8porM022tP%x^Uf?G0Rb!*CMUnF-`b z9*9sQT(^8=w`_|dd`TuZE&u*CGgkbTvxk8d7AYO7m|pHmO^e{rJapaoW5{SO1{?+? z9WgMjJT&yy(-snOU<}{(z=nIoaZ7~w?lBxSy@ZvDD$FCqA?8BR4aMU=Kr@Y}+~$XH z809Fx#%=4lGqIF{G0e5te*T>m#kz;VVGV2$72g+kwv(UcpR-2DPJ68V_%qq^jo?>s zwmV5k&%Mz;%_y$B?sH)&SnW}76^WF~a+W)M9ii8^tWAb~Z7U{%uL8{nh!mBFo-;HQ zrwqkT6QL5d2eR=4mVR@7NhaJ9+zq@R6~7Cfgy8m1JW&7~(8V@W#VUE9tw$?LUKutN z>+tU=*H~T1T~TE^&LC;DDauGeBBcXAWhV|A-kfv_gdr2D@|(pFxHXq7a!wr(%H=nd z%gug%&V@R|6Qkl?5!Ct7nR1KTE{LMR`p&bn2; ziUp;ob}bwelEZl`npUa-%l>7KT4}kJQ?Wbr=R(~wUR4=Urur{zXGyXb4B@hS3t0I`&Jz8U zknzj1f4-J=P$4lBL@UXHxuy6ifN?M})Cz(!FQ_AKW}FPRTQWm%5+IuOmx+G|(DM{1 zZzxTJl=KNCNBMAO862@uE*|UgNRiQz=zS{>2}sHLK}2R@J%g0x=v z%K|Cl+?N>x|AbP|pmeT&2@9Tb2p`lgFhv#>cYFFo;T$27jac=5SO(9s-kvQ_-aQdT zq^HU4#|$G>zMLnYy zbL{K$b5v4jEkk`+7{vjI8YYlSx7QdIV4r}jU@9oyYB2k) zwXxNKdQ0ot_NBXKJtEaUyNJkaEj-g!IKzt$#90R_Iv$}yiUVN}vdg3`Dl`L%5ljcI zd0R*%3#O=WFz)n-t1UInXGSe3@00drQ4sShbX zgBge7kdNsYMBuWUolak@_}vUqBui+LpyXi(*qR!cRM!=?EA)*Zy&z|_JXBb~u2%D> zdZu#Yuq}%sM2^D_eAEN|SdlqZh5cxjDB*{hZ`SkDddnsk;46f{E+A&zQ zRE*m7ezZq@1~Y_Tpce9^sg>7Ux;1 z;1s{g^rzg9@nhZBlEYpYzStv7et_GdxHu)ohHeMS?tk59chJ!#mT$CCnTbA`)FQ!3 z-eSQt@49PO^#jR@uy>#FVEl4PpHxGiueDZSS&MKQxnSaf*)UF<)q>$`x?x&5!ay`M zcyxkbjiV3a=4j#5mvg2p=SF$UKw1@z}}W|%_|_>z?y{)L9zJ!F<#7b;-T6;w#D|9__tQ< zAl)#Vbe8o{E#t)f)3o{FL?iga4MXgT&-gFJ)Xlx{B3zY9%||6ihNW81%vHKBnXyZ} z>8>C)aKR48jnAKlSz$7_Q-{t0qBYQXtIIgw2RzSQk-}UvSEOa0$I5`A3cmW;+VQY4 zql7Jn+nmarnD%uAy4j&!zW-X=IAybr#j}M_un?xID8&g~Fvql!|1icH8XV`Pu(p^c#{*M*izLDZ!=|MM zj`jCm0L)oUWaAa6QMBJ7zC)vEEU-D49j_s+_g?y4tC>|2kE_C>TF`}xHW;kXRLEC? z;Fh57Mhwr$t3-r#A=+daV((U^BQioX(f2JL5TXv>vbqU>LO(9|MU2$fWGiwF;=R!H zw8@g;dvd{X3Qz(gAaccilOiDgpY;I(`hj?Z@xN4p$%iEM92TIH<{=@1V|POVl)*cE zcPHAODNS7tv9XVAG}9t)#cPLOgkDPIYr#e<8!U>GGPac%Uz7SK1tVNkrj*-mgn}zN z6QH~m=;p03+|8}4<<}{H*5$05tL2j0980lCwNHkecLGRU!E}3&s(>y_TPMYUJq@DO z>v0gHEFboZL?d1yeaRJ?y^)%uMmmD!DUy8cbG-nozc@sR0xfk086ZhhVh>M3F89fEEuPG!Rv|cyO_Un+DYR#V=f-3?| zb<1ad`R2L>|PI%#NdB~RGf0+V!e zvwnQZeg>Uh&f9bS1@zQx)a;_c?C-=B-cgN>z_>gcy}##$|8^hHPA*>QET!(OnB~ua zfb$~e&26A9q<=1USk>=s@4Jb8ui3*Nkr#s8dt-YoW?zS4*E6IS+E#Qy1}R?M<&Y*h z5577js&n=hlB-0^SiYNH`??f&>X)2=yNMI;*YHf#!Un~ec*+9#IX3S1$E;ipcyP+TW`v$f2`sROQ-qa7>7Xz>n5m z3R&Qw($4}CA6pKbcSpVeP~LvQ0I zOHuB*YH8v!O3VUX?I7JQ*}3+bQ~s?On62nFJVy4QGsB_<}EWpmiNSTbmuXSBVJC z_c97EGsDFqO`#k*k_&WOq2LU_Kx(A^qr{_juwJaZ44^EH zYXU2iQtyR${PwUD(5%BK&TS=HMSvbrK>tKrW|oMJiHQto9mN25nbyS=Mo%>{%D7Vd zj#g$6krMuK@A|+90RyKG32;f^Kz6WERjWOJ;rmiq(ZUOSC<7kXhGPnsdNBY+sI77d z7-M|aLn18_k4$ogWo09!*)l>HG?q#BmfY1dPar$8bF_%%lJsQ-Ophr@?sxYhJ?hN7}&;FFmAWX<|5z0})0TQ~4ayvjQju9q0wer?B|5 z@6DA*-mQ@2ESR9P`#P!#oOr5^@$JhSmO`t;cx2c(Ew2|6p5G$@Uc|5Xu!V0+kt{E{ zdED{smZ*OYtEJSs2=!%utlz>K#-xND11!j6oVJ^w2b%abi#Y?8@3ey% z=53Eqph@%h(DQPklWRXRxjP@ThcLrBoyqA+?rE6)@bcgJ;!}A|52A?#-H{qduBS25 zq*D6e(qqJuVKaO->c$W@M(2vW)qMq3-iuHktmXwsE9R$_8mh}EbIey?XbsFTZ>w_@uuE8S6i$@3T<1+X~qPBIX0g0#wO-=rS9#I*Nlr$gfyUTCjruH4tY?DiG!*^**(#kjP^ zqaT9-pgJuu>F5Zbr+W&PG=mUMSa#8b-h?M}QZ~cgrO-}OPVc8f7iY%@j;4oCsGplO zd{|+7X8Hyj%!Xp20X6_pP*9EKYWui4_O1p9vZBbq-r;!Yut5bt<5NrHX19kGUo?xB zhl$?LXoYfENXDi1dYU+ipo=%7_%pVFP@|2dU!^hg@AjsoT`-M}G#2(&%3YQKd(~eJ zEL5SS`5Zdi(FEPm2Yp*R?H!?AHrZATBvn4vh9XlM=1bgFXv(HnXa}P;pdu#B%{`IG zPCri#mkK6aWYzPfgsQ7lu+x2O08w$pF8kHovgqoE{B$&!HC}ed07QJD?GUYX%<`wQ zEot!qu0%}Ee3edHdC-L(7AV#D3E@&TAXaIoX5n3A<)s%PyZ8K*_vAw^#1vA>oiN{BDoKrC-MAvl8oM zH14Z62`HFWao0=4C%tO{(pv-}zS>!HjOZzw{*9*+3pdpZPYHZm#MZr85S zdC0tEI*P1K%Vga58LJ(_EH7Hd*r{KBZ)wA~(!)ACG10_~pd<2Nnn6rQ=487D0Ewwg zk~oYmUdlP`9uvK5v2CS-DS8Nk@!+hMZG>amsV^8)3>lg3?wNM)-dxSLD?&m!#E)4~ zL`N!f^{h>?9B>%dRbJ|}L+FU>K4Z2vz!0EGCH-et;}qn!DDqUIYnsJZ1_!Q;Q?uG0 zdL~y6q%X|jf+z-Ob!Y0{*-1Tv?QB{s!WfDsvL_4-WAb`)-kdUc*5L__O;5ymW_F+x zWu8nP9Qe>DoTb8@Zh-X6rjny^^Uv%DP4H=+BxED-iHGi z0DbH5>p6cc-=a`_0z)`$=z5J0?grnIN04~Ec{lU#7_>gH$zUM^Y{i1S?oFyq+%9vS zbgsgAViRZ;f@&ws0XbPJ?EHRsY33XTdbBp_G_apw15ZMR5U~*UZ$)?|EYE7H1&mw@ z%1K!n-noM=Ay8|nSS+84yKa8t{NVA)`y)wr_eH;0)ldO=(qAl?c5r{q{D17dWmr{P*FTI10)nJ~bO|Ua z-QA7krn^hJK|s2@L6Gk5Mv(5VE!`m9@GkUto^$^9IroR><9j`y;MyC;8gs;N#2j<3 zqZb|OTdi*|zQ0l$#lL z$f-5Za8AO1+vFKP5#HFoJ4)WR8EP>i!y*RoqSE#xsAj6!%-0$b- z8iKpPD#`-R5?0TbIs0F4u^b)-IN|=f)bMfiXW0(esfa&J%ME>(!piQkn||D8r&veA zhD3>+b8q1Usu%9KPT)TPdB?t|C!AC9_|lektI&ZO_;IFxpuLdC5IFZ22J1@(fOq*Y zPRE1O6U2w8)z_%k9fgbAk_^z4=N~!3P7YpJD2g0~Kz1IMEYWi;AvzjM&wZ?|(lUI> zdZl;gECUtKo}TX20lE-iFR8Vp)a?9mQ9UeF@ly9Q~%u&|a@nrGhN+L24Os}F4 z+n-C*CvHZ=lS=@`R0Q}GJrDW$&n%K^Ih56$YV>&*1xj7Q?`x`@#oF*`7Zu;J&<>NN zhDuA89ftt(++3SKs2eV1{k5Ev=VPo0@Nzc7AKGWRS%dsztp!RI`*UGZ#Bl8wQ(qR< zo!W)_B}Z%pHR;@xsjkEwi*yPVhZ8%;Letz4-H$L_$eT1>L4I;UP0crM)MMg!v5czL zD62`B>PsFVu_3sJnwXfmit%NEx|Hg}S7gHh>nxtvG{#z}%2mBdcpWfjz?|yv`11+* z2h(MjbY6DOrzs?OHsv!9hxbFk$iBR2{B{`md}rT7a|^F~LXKCfK5&`rw|*Dq4_Jfo zySMLSJ|GDZfo5eolC;TMYa+F%!?(-%Hplz91f4$KNX(1Sf3a)v{~&Nk8)N_!e60RY zR!BEr3e@>kt?$-wq}~SPzu@amTY0WBeB0$=vDzhc-n6bZNp7=OAr4Hep}GF(MG z1Gi4GTYlX?&WBa1!bBbT;i9ISP0r7z3`O~EUt_%9^8@0KvJ9B8oj~~Y)hs>C^L3ux ziHhn-%NwSC;DDG-2U<~-tmLY9W%jPbOsZbCWrA`7nZ}fv$#W$_ZoY)Pij{MMZ+txj zPBvJS4ACh3eN}1n0i&M{ovU>hna}E-8pOYf7ig~wCXR+jNJt1mzz#iL`X~yV%M_$9 zmj~>;J>-fGRdWKNJwN;`2BC?2n+W5G;hp|)MNPAG!qW?xF!c9*n?%L1EFSUco ze?U0isKMJucf`qPjdi^6WjPqO-|XRS?~v%PQmxberf zr*8+>;>CgPS$Fs{vFE2J%`yp{gdjdAl}}zrX1_w-=-eEr={S`nLv(2W{qUeoxD$-? zZf=A>LnTrhK9o>~6EKZT*BrETz5~hBy^1I6p-S0=vkJU#9mcnzLR&cCc zgt0&WQaPc;cNoep&(05dwyPwu=<4vD&C6x*Mw#H7W|F%Eav?&b>k?zrd^q;Pl++HJ zT|vi94KvVZS{ppQ5Ch`%HE`?u@QK*w;1qo-kanj3(NQu8E}gb+e(Z@m0h1NKt3dLf zRp9)2vD!rP=7qK6DZx=gfhsLy{=y;=CHP^Jo9QCAG&M`w{aM}}jOO0KISI)5A<1^4 z8(kumzWKm42$*8i1fu7MCgoDo;&U^+1ffIx)>lP@Uj;p_>p5tpe753)9YZL;YUI}l zHpk-5mN&Q@k6|>_(Q}A#H+~TSiIfpwR$B^H9On&4vN5wp1fHIxClq)0LsTxpU2QlP zPBb6VG6$1Y-FK{0FJfK4aa#5?EPUuC8zH95#lvEXYgTyzaYYX7 z8`ku-8D&0uKQR4JshgoG8deNNntXuj+pUL{XlykDTJ zMYn=~^=kSYm|mKo*an@NZO!b=GAZDv&d)J;gW1PYOW#0I8Z&eZrO*=9z0*GY=8;nI z-ViTN?xwb6M?W1nOwjOfC!~T>e7Z&Kje;`Ci*OWX*R6*&xFhWmR_KzVcRmY$A- z#aW+$&}q5UpgP8n+emotSi_a_wIABB`D|k}@(NI+J^3)x)=lG*m`5X4#~~N=PVMcy z_)Rly(bJ!_bM1r@mxGVXS~tvXn&zEiVLEfh5C^{Bx;uP-1+@8O#FB}Z?6qN0pivN@ zByBl$=@MMC2AvEYmEUBPqK#7ZWbszll)aeTkn7T+So-c!Vh`Fg1#%z0bZGJ3lz=#V0%sO8hcp#G|--uQKd; zeM8TOo7_S+;tu9P+*e72U>2jmkBiK=ns~UUey~TK?X}W2O<%U%HaNVgL3A)Zgr8Ye zpV1wq8eq4iBRhO{n{BgVVi0_Be1%d(2)IW!o zIHX9Z0UO9WyPL@H5zZ~px8OhaeE-y#OX0*aVu*yTbv=Q5wrY3yfmRb&u9-j%IES{j zfSif^49QJEC_)$BQu3LWB&f8+-2~V!^L(sAThsY$PuRaVe;+g2{A`?nN76b<(3)4D z?69&pzR(K+pvC87g@f(+^-3)@Znbv-T^0jz~DC9AaTuhDN1+)YGC>{Md;`7GFV}Z%E15%af1u!;{tSIBZvyL?FJc-CCGh z>*l>ShRsmofgq#SAYKXh0KWhy%!ho+CT{kj~yTM`15D*C*wJ# z%tA$l&5h{$-9`cu02h!v&_)thkX-m(nF6;$dZQoqGhgpi0+mpkV9u8kf`TM_4o3Pf zOR?6{wT3`LAYscBQjfHa$rl6j^uulce5qTj-V>#rAw!7&}uImoEHOa7t zUO(dqjd;jF>UuiB((C$h^xHs|_PTnaud=|ihH5T4nz7>2B_rB97m(K`s?)pC^J7QK zPWB`FhUTiy)32ySZSx%;zkg@ixzgEr#zgeQ3X3aSzPj1&bGt)-t5FLGWh!uU@68I2 z5D*DXyud0pw0qM91r(UL2<0-E-cOx+F6Ez@UBmB=H^O{{?D7A=e$i4Z$|>*kLT#QY z+p5ubz-4cVh;S=`C=Uo4qWD8E@npmHbFF_E->dTeK^s52Jf{D&_e2?%q~_(p>FHP75A&u4ieG}lMz-`4v$vU89r8X|8q?r?Yg`wZ z18u+4tbO}ctM=(rSLBEpZ&JIF^g1hQMftDd@|YG%uo)1+WT19#P8fdOqQRlNFgp+) z)O*AV348ORjgur@7qV7BfRL@Du{jTF$vGig_>axE-`&1iVmX)FXBPe^+_rP8|b zY(`fNhnpdF4c{j*ovk_j@m>{($MXbWxNYPjZDjPP*{Yv|9pfF>8gGit=JIH$A%_dJ z)*Zh>6Kk^oAk=Yj8%jGYzbXjzxj68)+x;wQ$};Z->_VxNWy#V$l^&cDVP02b`1x4C zk5c3{GZe@c@F7oQ{*7g|$u5okLfKo>VyF1=4dpWW%PiqAQLObJsh;aS^(a242{9j9 zoBFxvfjW2-^YGBgeIZjbi63sfF?SXbAt6bJ#eA6U(&C4odTB{JmT#UFr5R_6D!1k3QIrQk%YB#g4nzO$yxO$i(D3;wGSKJVoKY8S z(K|FQl~Hzvb7|;5tc_HzvO8J0Ja#)X8F`A2N>t{;2ZdT}zz>1YF;n>>Lm9XhBY$Ds z@r7~bb?_xFv)+-CeV@#Gif7igm~9xxq8X^~=5i*rz4 z>3YUxpePx94wao+9}XiBMIH9&oQF6s&Ym~tv-ckU*j!uc(1)CnyP|dv2~e#vnCpDb zuNjK^UQFVucG??J*J*hT!^TbtFteb`HdPEnY=@z>fuK;+})MUdQU`zrii0ZQ%V zD!Bs;g}1IwKLNa?ByS{gBL{K>YQ%w|PT<2$+9Jb3Vrput0kffD`pMPalB>+3ZJO;t zYNBh)g}W_BZJ-hrCsx;(hNVG(KLivZ#1mLv!YepJP$0K3U;kSsY?g6Mi?j$E^vWq? zx4`^Wo+v9Ao()8aSQ_e70%w(isbSP}HGSE#F}w7qU7u(OQLb$)G=rtCee>xiu|CyhzOO`KKl*N+lx#_XvC8a~~?tuv~6+!r`!fxwb3T zdHhnV4IVe0O4*otIJUbCPn+r>8Q24LbM~mt-An^(@ zIOiDqP@jWR^&UV(4|%SIP2OCrX49|Dp0po#qZZ9jZ*W#aX=jqbODGPU#&PootG77> zqCD(nn1l=L`eD2?EG*NmYk;CSkd5(12P><6rOWW*GOy@zu4F9N1dB5jVratZIPbpz zC(D>oHEw&?AZ;-3wqI6xMX{DG~cGu%oh)BbwGNVNCCpiwuGa4Md?~t=2w%Q!98I4*>Ia0gpan9P?+DN ztt2NIAzvf;cKW_l@m-bZN#b5FcmIXSd(zXkOAe`Id?K>vs#@hY`z+DQy?uMoP2%ek zY@^a>8cyZb)edtrpa4=(`Dv$pc}ad)y6dPcLzF#jO!0m!7m>YZ4L zLeFb)B9tLm^kt`7{-dzkRFc~7(Cp=(_}eRLdbYtGHCI=QF+W)ReRK)YT%{^&{m@(B z+>&;@y)g?xspU|VG#aH3iG7`y$&*Bts@1MFGhI{cH!Azh+-5E%@FIpSjx=Y@4U|m;8 z61bam!{ET9DJDY?JiB&Z_|$oxIs@ZcW3ye!z~AcQ=GNqm-tT#lD3!HKG_R>%2L9;4 zH6~?9z{QSt`48 zpV!`RoDwaWm6q|yprRCkv{37LoH4qpsg2vki7#?N7dzgUxD+(qDVUJRfC&*o0Nd0M z?c%3L%)8_ymOF28ru3T^)bkDTOzhW=$8!afavXTR@rd|JEaBz2-ERAz z*X#69d2ZFT!=Pa3SmmBhz!cTreXk|3y{lf*tGd??a4X!cJD*%14Niio6kYRKFB9Y{ z^Q}C%CY&j%X*!xjy;32-2L*@#4pKIpH+(5jxoA`8eNCIxYUx^cbF^>Ki9a@=RJpQ< zpNf5skyYAhHQ3xo5c61<|(>e{{f5nqja~%?K!QL?_17Ui-mF z3Vfr~E=!Vpy$sEw)S6f`ZOPG3>40xGpWEPsQFRYo+k?*GSX5?ipJTg|>wsIoBtN^1Uhb)?ak)c{X_&lH}tL^T&Y$8!C3e`%>Kz+Lv#%ieS`euSYjJ!f`lOokOpNDN??@zlzg{ zKg>rjWv9>&!63ewbp0WTl?#saAxis&Ddya~JW#wY?2)Lh)Z+||@TP8aBE zkGJKA_=PI6$FPK;UiSguy^_stX9_&Cb=lXPDn z?>Zj&;p{ub>I!f6mDbk`BNI>jo`*&uC@J(15Rh2GGjA1i!0 z)VSxvXS5@4m(GvHyZu9~*tQOA<%3qK&c>^?QOmzNh>%)R^(-teIuhVMLrhdyT7 zcT@hN5Zxh7h+WA_#Cji9rRGgov__dOg8A}84NVuVYIzOg=c-N$RwMPvG$hg!W-_6FxaRT2lgIv zgK>GJwMh1pEnFooxKhs$MjH96W7Hy$rmNaLYju(V(s=xqTG#o;hacR!aRGb9inJl+^oHw(y#-PYmrc?G zRIcyV9GiR$?mYP5m5P>8v|(jb-C=NXumA|ru&9DAUajVG z``hpiD9`ppt$Ih7m>3d@FoNR=6lHzo(c3^+CG#Wmtml3Ej^Vzerq;VI=z35B8qF|C zYWQdcs;~_ZFK7@F&7%F6(#hTJyc+FSso>4xz!=2o3h*JFXN?wB5z~-gX!Q=9O1R6t zs>Z;=WS>skzp~KgHTLy%Nu{R|vbtLW6HFw!4~v z>`QmwZDnP{CPsC+QoEH!y1~;X3|wgVex&;X!l;tTM7#9Y_)gs-0qG0 zZ~ToHxf8k&E5%tT>Xv}WfsH-RNbdh&reybR!SY_W(>{AtA-ALgg@D!pq zE4PA((hm4QKqghQ z`4j3Q0I2IqI{6do=mDq;%RO++p)Qp)ml_J%%kE=$fd|x1mwb^Y{T7oJ5h3h<2 zRZO<5;^9iUFUdec|2P4Oza#cIwop9zi%b;PaD%L8vi_a0VA;z1Yb~>TR`9$9(%uTS zOHG4yz5HT*mgyVw@}SKzaj-NC!%Xa%#c7YWq4mC3v@h3b2EXiw?NOr{z;+c0eF&H&VpxA&h*M0Wp?2(p)kUr!3zHQi;3!B zxMz}3$XJj5dQWCr+^!xH^L)WJE-x0w7OfIgYj`}rp{}R#3Oq>7Y{e1I72_tq2wOZR zbgU1XPY9xuK8GMv5v2537~ubun9V%efz1R>(4Q1ei?%S=JoG7?a?&Qyu@y}+yv7P? zqrfb2uz_BC+IbCD-s*r^2YRJzoOQ< zauT4}HS2sN}3}c%g*SD%OZCxF-quM7Z;)1lG>A>t!6QFZ(u~&*@E&CcC zN}L&hf?lrsNO*z8N%mmqn*rD=yQ2PjXniblj(+l!sp3?Q^1gHgsCZU42v*}|nKw&jw9qq-U~gD5#F_Wh zeZYzLu%JZAh*Y(AC_#bF45y9B>(qJ7YYr`-qrib};cJ}ojC=glQX0X*(Dw!I@pKvX zPg&yo*`M@yvJ>pF2tRMlOyqNbVQnhcrKF%HkW25;t-s9Reyf08@F{wT6%pVPFr*%uem8Tbt-ru z(<0-0#J09;2oLmWekB0Uqac8*u}myMjjBqt3FPaAiqRC+yX$3ns4DWZWj5c1rbT!4 zq~nU!nEMN$Z6_nd-sUfTwnp@M3#-K0)wWU|-UnuW@J26u;C8>+KYePpeyGruGLO-8 zpfuW%_husvzR;xjYN?X6A`WrNmTg;?J9y14~o*SeSxid%0K_;nc9z{{M9`HPN2w}mzGLRW+mgX9l}T! zDU5}%8k*$Vs;Oq4Gm7=5h3t^FPgoi{F)y5p%3 zCq#=P3HMzB-xaEd7_Y<8-!H;17Se{=kH&wak{pkZkaqu(?o{v^35pPz{0X=J3X~A@ zZ#f600^~Yyyp6qMn}+n;2Dnra?wkCEhx&s0BO{>yWNcE zD-K0EwL%lXtAR-y4;9`j)#yqwOrr>Jt5JoAq@-KfWr^FzZ zW1Io~9qWBFQU58$tn8Xro#KiLRSlcl!c714NsZANuyWr^^f*C3<{g6j^@r*xQ`4ZD zhelh2t@tB2gL5mzt}AtBWn?6zbn^5&JheRQEY%xU(VOPv^TzV?`L8s#app0Bwo|>B zst#2(w530C8c60e7U|s?oU2dW9E3GzSEX4TMD~?5`to~^O391kux3k=R`T06f?i;n4k$&_k_)FN3`UK*qxSJXFQTNH(0 zuMEs9x9%7QIV!JfDnnDnR@^yTDH>rK=FEMfAgfBu-IVN(F!e^)+Y+Lrwlm*Py1r?> zgc9!UNoW2;7eU@_=N_drK{eKhC>VYT(0q^s{)8{iC9t2k7F`_2QX7C%3qLhj4nq#C zS8}n?j|L%m-Ze@$aM4k(>CSzt^o4*zC28A~)xCZ5;tzD=9Yx_SlXEIUDSA!JE|yh< zGj74-R%;YMS;+exnD6Y(*e|e4(ayW%dEZUHRLw;idC3z~ZSUMbP8rr#V zoZwVg=ABx|KPz{@N#-YZ9s%t9>#1sDu}s3mjb{dYjY?(P_xaWOe#rzx7$GT6&rLm@ zH=a#nTr9r;V_m4tLmvD&&e=7ODR!abjeYy=2+WE8QNe9%NQ_$QR{(KTpe}KB$$lGq z1jVCOE{PI>#02f4L{(UF0^yWytG3*ApVptwT;G?0YNhjz%bzPwG{8Oq>NwulUJB?% zg6BSZzv0G){G&FhBXe(r?g4Gs49RY`zvm?6_b)pwF|syr?+C;XX2wrGKpGZ1l%`T} zP?iC&REfBvWDcr7V=63ey=`RXWDo(!dZB!`&1;jDL;2O&hk*2z1UgX5Pc)frlk|(! z52R(A#ccD{IW91tyHPqAyZ4e`V|&)j5UW*{`|#9QWJQqJr6?Nu2k2=X)z~~C5(&Fz zawKG1A>^d9OB=!J7~g+mnx=jK>LxJ8e$vnWb$8&gwCQ}F_kyjG{6#1G;R1AW=RkJY zm4z>Y;sp<^;`2D_YCgh6Ad3^LMaky*qbrsI$ZAcWAK^ETzdrtL0z0U@Wl`#S{@PvP zBB*dmMPgGaVLSMFjY)KiJl{R1S!xMuH(b_k-kPm-62$i>@Ft5W8mA@)5yf2qt!@#pg&JFf1uv#o#Jk6!RFkm zDp${+uRZ3lZ#A$$!GK}da|(?`f7E(*oN;}qQGpF^o`;U+M}E@|(1`iop1b>V5wg0# zQs2@ENQnSlmEW5E^O?MXgaCW>epu~61mUkSiZ03K9HzU`4dThrvrSTuU zCko_*O%b?jbd@dvsC@|jdBl4TlXu9iN}W>FX-(({07u3h(M$_k58@% z2EcZ|Lzw8-LwUskKZ=s;6w>=AJ|Liu9~nX76-o)w?s_VN(jH_*qxSfpT~dIxScgMw z{;@Umk{j?3-{Ay?-**1@RlUS}bf0hRX$(Z5qZs5OCsgyt=Y4(bda5V%K>ZSds0k#j zlAOz>G3P%k910+U!W(pj42T5+iZTuACo9Q#f0=>O29ToSOr7<_JQr2mvF;LFH- zz$2j|bh&{n9|od5h`b%+(aY2U?R@nOl>Ec+V#t7HXZweD|B*5LshMABAc%Z#Ou$ULfcIOk zh?M~#NQjIHNWfZ$810vQ2(y9hII->JFGqiKj9)wgia>!n*q##{;;cKbnW4tf?{Bzr z35kuhkPWg#Y7a6B)$WScel&PO8bItT9fD-PO#;}_h#Kn@S^Z~|AcozWY^rE-u_*ij zLN5j`Gv=o*E}C}wM7jLo`2}oy&}Q)el5i$+kB$MUIDTf3-=ib}{RlGta@Gc5Vmsg--Qvd1b!5>wW6U&`^iokgsQ2)jm z5ZHJDHwU%?e_OjHB$;ZyT6Ks*u`Kqo_`*aF>>uc(2}!1)l3K2UQM&XAMC$Ax823As z{5zQO5>h{|wlm;2$v?ux|MOS>@#*tez^ubDf1CB6Zu-y8`}@gILceX}zdz$oI=*!Inev*$NN`*GpfJ(8w8ZxMyy>>;3WlIa0}4hJ#6RIg*KMW`xwC;6dD(y5~Alr!@U24>;DO85yX!|K~nfhJh#U!Bk>6&78X{8%b96W5mZ7Myw}Pj zRrR&;&8C5jQ>=cwZ8%$c4dEz{{}StXEgy+S|C02=vw+JrvNH2o`q?Ju%83#UHSt)A z*oDw+dt^~tNcC_N?lz$jt_@if-i?=6#BC`9m^AeNTzmX{B5%-R*~X}jI#qPBF-sP-88zh_Ix zJ&y$vbCe)7d0^Zy9lc-{f>*Xgd|w>3T3JODJ&CZucp}8K9(%au=xHdM0%HoB#4RA; zqe^1(|B{_#VE{?Y>}qV@_t&Gx`AinZ77=X?_PhG~bJZXfIDQ=h$Sy&^loQ%=M@-xs6F*JZH9v>;0?(vR%+=yx$?!QREoaE7jg??I@6%OjTTx1&{)|A*- z;37LQKos)j@KsolCITOK=`8#zX4pG$DVR@FSTO;lP;W#2-TL|`H+=h8j-US^rC=4S zauXjW#y*67G=-^BEn39_g~%XuO8!Tlg&%o_<&lye#1)VBlKf%`@NE2U`}u$2nH9(* zE6DkO^2|UWpWe1Hm~1dxVIbFFXOgd2sI=o4fX@ra>jPKLkME>naNsR6f>t2R~4m(Ac0+~E|&^0W!j+l)!4z;gc8Gytm@_WG$iBZ?dtFR5ha-sAQDY{^7=xk|&q)2)}yA9;gN zzq?3|dP!s&GWbUszcUUZX-m;3{%`Kg{?p}>ez}}#v=P~0Ga4Y2yFJtx;-LUxO+oNB3ld%Nh0?h zfMmmb2GE$)2irtHp`5Eb=*q@BogYWk@0Yl)J=G|UUzJBJVPY$epk0j=HUk{P= z;Z2zZ2aSd33Y{vY)EDmZEmihf*p65hW%9w-2CiJcSBM2f*2niOZERx%tV#;IS}~l1h_8- z?SmaFhwU3YVY=q)wiLf&LN!^aOgW)M4C?Jp{>hLpd*5tqsYb1)rHA%s4))^lNSg^FJXH35Aa63bDo~Vb zHbsA{Tw}gqU#!}!v~*>ha?7q`lP5W^#L=sWnS#zP{VmmgcdB=;+N``HYQ?9~aGyP2 zu3k?3lvuCx*<1OBkeIjf?4)yQF`<%vyVjvg(Ui(XguRzn!0tSqmK)`2%e7`}gt>!& zJPv^tZ4Z4o$vw{uyavZISFU9WlQ=^5(oTVZE zruM(Uc@~pbbpy9@iMq<$q)n-!>cNcRSNz4W5Bm^kLGOhHIWKyX_BDi&> zDb_2aVxjtdAu7jv5w&MNB_RtFyO8RwRt$o3)8C!!>zGM-#ke}u_vO{O6orM z9KtvsoqMCEmsY5@709-voYuF*hl?BU?$+LT7Ccu9az%uO(Y{KOI$qfFrPn!UPo(D& zn6g+{i851sOVY@k|0bS5S?k=~i=%}S^?QW*KLjr-4GDI;@i`@k zFhIySY_9FcZ#=}V%<$2|rRWs#*gJ0G){jt_JX`gKZN0trUvS#hpa8ukr{#XgVl12i~z3E&^i*ul*!nbl0a1HtT&uW8z+lxFSNsFWlA0ZW*|SQ<&o~=Bh(e?02k9 z?}(Zg8Q0-RBHF*Iuq<8VB~TW2zu4_BR@QtaIylD^*#itBr5G2wH%_sQ1=Dv>=^+rD z)N`yGEH?HWSUA%3jpu%?4?1yiGwgYn$ZFshGqzituKJ3OPqYH6ahm6Hp#Pv>pfV>k z>!e&nv~WWTUO^!m(HkA7bR|W=qve*>K6HWJu7*lIar;ZArOaHT-|24oml%p{3L*X$ z*eswu${vegORx2z7vVoN`A4Gln)*p)mGveUQr$V%k+J8j(cwAWJo}IQO@kQ@f;vuu z@KqgBkf2WTb}MlMrpq|(eVWD zJXQq1f!5R=K=kt0KLhc^=-cl-f>4w>1v?i55<~RjUC-CEZF9paE-V`Z_SKqKTd&@C zyI(b{eP75o(W|d9a;!zhnf1j4-?r65QfL&!aDLdGB$Y-kH|WPOGRTfOZ*|zuw14{@ znsR+oeQKR+2muG%*0$2Pm;P#FlZW3si_2kF#U*W?luApC=ko5*bFH*@eC@OQyhOjD zl+E|c)@g7w51DkNi@j;DMziaNC~Y4s&NzUFn?<^~j+cCO&_R%`tfeAozHUc%w9>G; zN}*A2-Of5okEeRdOT>Ih9mFw#?X@8yoX;{3s&A*8-pLoI1}ab~icXIzCmmtH3y6n87q}l*$RotPV^`9mkK{@JwJeL+gNhO=tKbiP zi{N}F1-xBte)!PCL9Tu@&lcM+nwHT z{Zi8pv}qLZoWrT^*S)#v?%PEzA#M-$`I^P5EyG6#HzWpM{R#T3w%NEmfT^)`a_3LK zD#|a;&`~J?+6Dud;JzMftp*2m`*xMFnn`K4IYvoL*47DMY4~ZaDFMkkRIF>RyQqOl z-#I0Y{Ws0D2N;R!#9`A#DyuJTfuaUQei&Wxhc`MJ){DB;xxC!PDw9eCkJaV~(m43W zG>V(Oa*2s1!Hw4ulHRc?#Uy0dUMmgwU#dw}R+jLXG`Vli$;oZ7KipIg6)W)7QfmN{ zrrC8HBZi9kDPtD|iS_zE6Q!C;X$^MdCew_YMnP8{O}De2>&b5m7vCY?X;IELk{);+ zGvvyPmC2>p((_36{`1~3kbJ!6@zMw86nOGys5u{2C>6ib37ng@HgX=#_^?rB@Z9Zb zJKq&MX1iV49N*6i)JjmHHbOh+YC3Hz>c+i4H*Zbm-6>Y(OO^!*UMPrD_J4my66Dy^=)Op-mH9vER|J18@T&+!WAKr ziSDX3=85uofM-*t(YzE*bAqCD$ec8r>a#%NUN2h5Z33s+4CSXDbGDxK-WFL{iQ0vr zCK60-2=?E2SON2kmx+_9f*Zz}dfM)ySz+F~TPhfnQ`hRa)2rvFv?$$ISo16*A(5q? zw}UDpeNqc?YB#G~GVE|lL1u`}H~Vt+yTxplZ>}B8C{izm?@shbgCyB@(gKH2CBi#* zdZ;yQo{N78HQCrGmaDN_$^;u5P+LaF(H7GLtc<+a%*!Mk(jW`3`Q{p?fL8Q!I%R+^ zK=3a~!QX#VK9->$g%ZnmzM3M!Pr^I5Zmz@3o!!&gI1hzL+Z4=;o!XtBF7r6z`pN*e+KXnUk4jBYc;GGGFHu&(O(LM_ z?3(o)W2&BI$JLHa%yhcwKpMO`x@oY!!!)Kdtg*hQ4u!BgWz-|(PT+VY!?y8(?feJT zgHG^d%4ok;=!c^d3~G$VtAyBam+DH4Y13^s8g7nFUzX+c?c-i$a6cq^N74IQfmljK zI|E7BO063QIx9_hOj`RXO~av7+LI01F`$hL(W6qz_)@*ttKR`wT01Yu4iym9R161u(gZSE%!1(zG`;V>7Iw5 z+huS3ac%>VY93AMUJ}RER_8+geOl)Ia<@_S?usqDL@~C?w2eN8;XR8?(U8f*J-OMW za@x9C=Bm@olN-_bxIvOie>`%VP(MY50hlgpE|1$;7J9x#3B?I(=0e3${v40U>y2FS zBBw^yMU@o0m6=j083(f^;WtOlnZaq}bvJd7Z|A-7OCvX9!YTzhX9L+WaCb?}eb@Xt z_L|mw)3}xkd$z0NRvuBFM#HfW6E1P?h!e8uF@s5rF=%0&g4mP84Tx^Zx6G+flvBAY z2?83|4MxtERCG_V98pki=PF*bhZZk@}k|wCgJ1tH3{fW&BYn8BWZk{{u2b0p9 z9Jut(R4J-XQ6Ey(s%ISd0fTXvabQH?{1g>edgAg*DMxiim6c|1J<+xG4z{v?ps?yx zDPcUmnoQUN*R^|(a)b7ju(QdnE>>wt7)H@R40f!z#Jn&%a5#E$eNSNb(p+ag9vE7u zYb}mxnC$CmlSt~9C!m&97Aw@B%S5Q(+j6ma90|M{v>)RDQ^g)m&JAc)PWN+iY3)-O zWC`6J+q6fXtpzM*CbLR1wFlYPv2N(YG6L1)Kef|<8dDYd4MKZRAf~3<6;mRsa}d>- zymD-OX3_W;m)y&Pn{<9l+k->fM#hOVMgpbX28}};OIv?sO^@=uIZI6E z?bzwHc<_Bh{% z{Nv?3I(pMn*4g*d(uXZ~IiSf;z-vZLi%wm)=rCDKzpB-%%52Z=6DM+5wWK)g1ln{~ zEBj8O411}l5j@Od9^d~6PhpC$ZWtODopw^KH6=|ADw78CiG19|z*JDBYVQ+PRN=ZiR-<>H!ga@K zh*5VNGSO9%Jw_vwP9z#ez}g5p<*b#+yIp_|&H~HRZm23YvsbT`EOB*?`8S2V|u7kr#V;O3EHrwwX=$?cXYN^bE^HjlodF(}l2Kbegw-2(JfF4C- zvi@Ka6a10ne8Et-_WIH{m|kLZUPh%SJwU!(R~)s+g;vRVdy8C~mrC-U#KCG_e28OJ zpIq{J4!9J<0b{G9agZ9#d}bj4gB`yrJ;mcTvOXwk>Hb5?W5P8W)WC|yq~eU$F8B;B zhLhen4~<_a=W6u}(ySJ{V_*J9%HgWw{M%Y9m`>jRM%YQyOunnABLU(K{S4RsOs4{btz;Yo89i5|8DmAs3O7yI zE?t~D)osW7}lI``E~XV->lRT-C_>&ernUEJh0@JI`8)e40TgRuj+=y^?_ ziL1IK;zg)+z2j|Vc=&|$8#hJ6Cm)sv-LOjL*yRF}yi zFE1l)^h%^thp8$~24%0`+a|FeiN5PRW=dX*n1671Kb8RnYgXUmZLGc{f@ zs`1Kt)QyoFn}wGupr<8Igld|85t&XM+OzX~J$z#xJ8$aGM^#Ihi*BxjnmT_=9w%)V z-%n+7ANPxw4N6zVjM#i=dUr><%I2yFgTK@hZj@4;*tR9c^1nmvho)*=nf;a19e-=y zq#w&WH?%fX9#SW|O?CM(FK`mbJPZF7snN>!Zk8+w=avJPoL<`FhrbZpX054G0dEW- zqN;*>7l;uYB58gH(LSfBlq94ZTk61b2cg*9`_13W6em0$(i**z-}-7{Y`mzGmF2K% zpAp_&e`GMhO$t#R^&y*-gKL#t{fywp5<#;g(+{C1`uEG5VJ?HWT2X#fk(-Gnu3Xic zKEqi98V(91+_~tKBs!+~AtP#)v3pe6l_@MM$pf*!K9U}*095d@C4x7D{P%%tnJ*2d z2Pv90oGx1N)10ri+Gu$BYi^hHvI~B?ZF`=TI$i|N(@W+x9_FJTgLd5)Rh5~>dN+k~ zC&kakH`9L_1Sfvm&B!o(6)(`->d$kjX?wxNyg$gH?6jC~$2?NS=xmjj*=l^1Q;F}l zYpk4BbZ6Gef|Gql&N;~MI`-HSL&0-C3N1)`!~6Lx@La@aNBExN9JtM}KwKe|B!4;M z9His>I|JTQvxkZB>~5DqU+)yp?6CaYlefwy@NZGZV!(&7Jo55XO0egiC} zJ&}6yMoyPX5K3R{i?Yd)6a8rs{S9Z*_K0<+YhBd&;w4<+>T5$oSQw?Of4`@(sVLt) zL2bBD9VAIQdMhHgjYv{j4Ynca#Tn~i!zf)msN|O`%ZAl z8P-FV1@CEgCbp-pjy&z;(6dAuGINNosV0c}M5xy46qY?Q{60KUQeo?j9Ry2JCUa>s zmDg0U;c;nf97rIQ3|>p{Ku=0eu}i>L_V47Y@INXON$|Mf59E#JkofjE5U2{~UQ+gk zIq#{bIZ3_<#aSSbuoLD&57u7h3!W73b*CkqcwPZJ&|gr@JPnW1$5Nu4g1iKX1JM4-WhlT-D`|-T;#4D-;>Ik;CltldTLsaBwQn$BsmV-RP>4?44 z-LTN7x#0HEO^!e#S`uPV<~5IIG@p4j{go*KVdPjdxH>BD%6Zpw;-sf+k9#jw(Pd6pg!6sc#b)lQbaq!QkrI(+y8;oU0bE*<_yXo!q96y;a~>d#*sg-q1`=IpYcxn10q}eP@M!$DK7WM7qi5Q0)O zS))3C?tbwzK*IW{a+hMO<6=?^84UkUy5-#dZ#Z0fF|)u}2xniVdCm zpz$*=Z~f?8`e`jFtbF}2ea%N$yGoA#ehsPoai0k-cT=&;D!(L!^3(kLXIFY8*wfAa zk_!EKh)Vv5We-96=L!4DaWZ%&*EALzuK2Q6Ow)MGIAVlvOuR=)QzMQyyD;jpIJW&v zRXL(vQ1IM)tHg>_gI0`; zOZ|DB9eQeqIP}pIa>7^gt0;>K`$%ifw^#LOK}wPWZ|jEsv4Gk4_87ydyoaI38dcTA zb~|17Q!Hy|^A%bpHhz8UUp0C+WdXbv%FAu`_?1q|`2@9(bPZ|BMaz-pXYoAML=DNq zp0^JdcZ}-}Xybks3{#a&{G2`4T~B32IJ|8#K0#Wg=7zNwnGw2tc$@;Gveh}TNk#PZ z5YEcU9HOF_$C#DZd86;=7n>v|DOV&WCt3>?cJ7mfP=|eV*UCOr)$6FFr5I61%DWaV zvqrsSxlg$!^%E(zF8w8FMnItaiQ(f95zs6Mcv|jTP(vpO)+u(GZ90&I@=uVuLY{Ch zP0V%^_c};jd~e_1$1;=pzc<@MVKIs?@XH^83H$RU^C?O(tkU;lm%Vq^l@yJP9!Y4w z31aFOd}mO+{=BcBy;NC=O*|hO&A-&eFQ#u`^r7lg5^ZFgSddqp3qfU`q2Ojg!Rw~b zy};VJ42S3AhE@LP4%6~y^@r1OpL`-;vD$UeKBeZEx>$D0Z@m~U^3zb2xyS3UbWpAP zG}U{I(WT?dnRi_TT2O6;^{qH^Tj_&KDjgH-TbL>_9q?2A>ZR{x%>_?nNPS6SUi)g_ zbiQSb`OL8BXkg@`t|}G|j*-^OG}fQTDhd-z)ZNTkc{>E(1cH`c@R#Zwd)Jj`IWFjZekZNuCEIezjwZ;n zD(^<_t27aRU$A{6H#+NKB_jJ#>)`tvJWcH@D7t4u6(i@?u7T_Hx%ug9D<+w`#FGpj zmLAyO{;||ROd@FBXbP>U%AM;GfEUEr!ji3XVseD7g}FT$*xppX^F6G6xvsW1TZHJ# zaPVUH_c5}vyivNV%PaiJsudgqbT9wZWLQORiGiK~1F^Ppy6z-d+(swY z*5`Bwz&%l;G@{W1ep+!Sbd5(8Wj$h;$c2kxL6yrA4pFBZ6RspARkdF*qy~BO{ANa> zwDD2lU=k}<0zUjWK}G_r-m74rui{m~%)~D2WmDsu^F5`dqW$L(Fj_)YLl4M#Gjiu% zT)s{JPi4}8@+(@O0ulbFc;Xo3IrvyWkQjyu9znfr%$k{g>5Ff5l)+lzrD_YQ_aTCK zmI61TNA~uTmv*sXRf7&1mh9AS;6TlT#kEOV+26@qZ`N#RbC;Ny?n73uT$3axFD%DF zOBx-12MgI_nP0Eu&4nm}+1iMsmEoV{?#u@dAVX83c;~ z6D@0CLW!R3VB2mH+-<;9=9Q_t`lYq{8>w4~m=8@hUobS+=kSCP)!JZ#@(%@kz%U3? zp^DPE&3Qn>e^POU(ZExtb*_f6bEd50rjpOHe5%xWvF?^|&T(@clnY_|@#mpH3)t!= zaN%<^fsp8sB291#wjgx4gc@j->`hQBsRY5;g`A97l%Rv;>jqguvaWQH#v(1KS15Q_ z4Fo~PN}rZlrSF#7)0cc^60U#yYDYL(HqYT|r4;Bv)Y^OF>=pRSy(Xat@gN>1cbd0p zZ~4a-voaDOok^#oE!Kd76}#w;EBgU~yvD^a;Sv4}qODeL1`%`U#rV@%ff^KJG8tgS z<|l)Ki60$~zHhb|Zo}~yiR3VF+3gpx?=yl(?3}YL2fVJzY#y>oJHweKH;tadGQUJK zH*09aB1>pEJ>@qG%I>{DzEOL685}R3zF0O_eLbleR{VKk$S}y?*TO(QWH7s}+J~2S z);L(H>?LzPwPem=R`T;|HGq=pNvH}j?q$uIyx*cm*OA7jppXZ`J`Rs>3))e)4m>WL z(@>)+rK-b!zE?NOtQp&&mDg3k>IvGFtY^g*nG}8;WjGIUSR8xm?KZ~$*qObQy6U`| zW;1S+Z~S!;XDi2>%8S-Y-F+Qk+b!Ye87_Fgq|Qq2t`dLV!tBah8*GY|_yz_;2g!SO z6D%)t*&+O9%&mwNpUGrC$8Waji3k-e?&$lnvSpKscIVIg7o-2Nm*}bJNni{NJ9RP6 z1&2Q3I^k;M+Nl>uxL7#*UI5nAPDQTVvP$tX?6;wpUb;>$^^;)9H~yBv!aL#bSuI1E zbzNJ3>j`s*Kzt3IMi2H!XLC&bu~pdA@5}`+77Tku1>Bt1Lc3O6^0w>uH_f7(cX<6* zZh66XpXk(jfY~=3Vx5D{P6ILB-D^s`$HpGa_7BE>?0fBQS6yfAxDjMva9jS_-Ua`S zv3>ZoLC#|qNt)$t^8o$8AV8gMb5)#-q793y&Oqo{TOl<)pb`(~vN0kZr0wi2-_)CPuzIMZ`eV-MU#$Z#Zn1*F0IovPy(hPd4|?wJ6rY-nv50gU7>OS_Sxw6X zO=uIWnkrP&-{2(W+&4@`iwH$ z{Bi}UZ3-2$4|91gA<@)gSmEL3IvGH(TGiDm-30T$d_$_z0}0HzSdpUc=S=qd^>Y~@ zU2TFUY$fcwYU1oB>M)(WG@w29rrk~d-*;^9seW>q99Ac6pK3NasUBaU(dF%CgG8vm zuJe0ZY(Ds;gF7N<&b?g~%^?Yf8jRF@>=aW7y3IPVFs`yQbW8P!r^qbp$Q`~kHYCUf zj2wUe28cdzlxVWQF~6k>@rsWXZc*dXhT~Elsnpoy5_cY}PO5H|yxc4>Kx@!7^xJJAur+$+Y@vn{n(Xh6Zdex*P&$xj2QCZ@|{WIncewS_MWn zNcw*CJ;3rheCcmbz-%lVu1#Xnx)SQ#X2$B`{*FVD+rTXotgPXrmd9; z4=Yr`!9895RJ45>JJ4XE@$;zg$P6|bSw6)WX@+S}VBsoz|Lb}8_;Ke(=}jwjduS??weU_ZLQ*wW_ z9@u9DA_Ppm6)6dF*v|1$WQ4=;k2I`N&a0w_iGXUbmPz-h*ro7%pcqL}a^m7Dr5KHS zRW~mb;R{4Cz`VY5HQ{1a26J*C`7c*m?g8XeB~QId^xalo;1B~D=?%O~8@CV=>RGlc zn{efuFU}$i!c|o*=|D>2!P%YIPrAH^txi7)t^LvYn&&;6Eri{&J3E~gDU(whVacPs z(m*ai(Q(EOi~a!D`#K%`z;qIFWlIYJZwTt69)g>EMXlY7_u~)jIrTnyI!<^{Bca8c z z#(mb$T`$IH2-t_W3=A=hyb)akr6KZ@tDYo8cf+BfhxMr8Y56#kd6_2G{$Sn|u3D7u z==9T%h&BQA`%hz~?bRD;`DP0Wag^hXB0p9-h@e#bIh;>tn^A^-&Br^+IjR*!*xG2a zcd&fxWxgu#U)eP91&Zf^FzV*~*Kcdgl{X#e!ZzL!{r7%>?;B41OFmw@tSK4-Eyjb& zIC0wv)StW~kIOzjr%PvT&)=Or>8@+;clLeMOg%GGpZ@*1o>Q}*=zWw|6Gx3#uSct< ztFfpY=g#2K7zBs74Na}~%>!V(oyT|2C*f>mYUW?anDMUz8qzgKQ}Rp!@t40E4z%}? zzFNFh)+Tpq8UmFhMQZ>nb{&9~r)zzGB>qzN`$O_C%}=LxCitE?&T900Zy(3O_}y)B zA3N|7>ILQ*hz5Sm=%2qR?$_8=b944Ymi^!NM;6O{&Nagr<$n9-Kf6}D5t?hi86`1B zt&YD{l>Vx<@BH`gp=>k>gbYRL7bxS+`S40{&|P(o!N}OzdgIx74q(d8rAMqJQhbtG za{+Ehrp$?wAb|B>_3UIz3A{XdTtSxi_h=jKQi?OEmjayRSFd*iH$-X#Z?Qr`UFZ|^ zKAmz%NIz840J>CUUi+8H%q#P{d)-kpnb1Fb9`X`Aaz$V}Wd-lU1$nY#uZLM}-LHC5 zrgBwP2yhe*{+6-6yUXXl;5Sf31unULiQ+oN%vC()@39lV=f^Yp z7{sX-DWUlyVai!%{_LDc#8J9g7SIhRiWA|y_fq9J{aaI5Dcm(Gs(ETNFvP$9VX*5Q zRV_}TsD6f3kYUV}%?jGY<8FckB7xs;BFp zn4|?h3^wEbd0pS2!Ukcdr78yW4d}*+8?c@WvqLFau{C4|dqPH8v$_?)U+i0ZRb8cF70Vkw z4sD1@Z#uoujhWRRHcf=DODf4Rxwq0E20Jm*1yO}8Y9kcFm##T?k z*Lcg%PoZiX+bDo}k3$+gN~)L%NB*b9Igqdx9gAdx83GWKw7b8Ar6vo@s~Pd>J!=%* zS@Y-;ds-m*ImyhjEDyF*KP@tkoNu35!m;M7(_?Q@*ES(dD@p;PYob{LAO3!v5Lk=eSGhL`Z#oo9{iC z?6tW-MbEK5OSfX*ZmoOA5w-dK6Cv1(jtzTCv)-)NCELtfayJa3v;(P6kU6_@kzYZbH>gvs5P!e zsXmbvC`JNN@1?Rw=3nMfCSx*N0*mDSFfq@jYu01wshkI|aN~1eqp#hW-O`MZ-@OpB zwFmevOibtWd2c4U>9&QJB|N-`iE6s}atJ}lr!$VV{A%UV0Noa_U+`Wv!KLaugsa`( z+CRfv>kBn`?GcYi3t7u7>B;E+j3ZuZ;OT2n65BqhQeOjb{{^rFG_kngU%aLvNzEUMuQK&pnuP5DD zEbBt+t6eTXa9Ev1<&@Ig4X~j(?l*n<=KnK=LjeO!D90r{lkdKG4fok5?%C~`$PVX` zprwfUUWK9>C6%%sbGmPq?J-?ZB9KS0Km-8<{h4wq)mhJN*|(0X-0^^G;3zvIgsoJG zsmjE!Vz$mnlqQQn(g8-zCq?+V_M0*@0{uK5Yp^^> zp&-!sdNWj43VZh@cdO9i-_?BkXF?6e7Pp36OKnJoR@U>!W&M`d&F{lqCL5_tOq70I zPsu71FKzuA;s?}lLe;AhS2n_o-7+mmJWmh7DJ-pN^NhhXzFgFMVHlcxGs_0i?n1nP z{$7xIxZOIr**?l=g6O3YkG;t3ll%)6%~eNC6>c~>n65e}_p5S)W#6NDvBdyDuVr;V;Ll#n&&X5q28-KhK^ z{vI?9k_>Z`OIl9%K{kvTly4xs6dux6v~R7a^tSdM>mq65p7k<^V;9LyR!Bq#MyLGt z4wjf@-Rxb8?HYZj4B)J{E9!4&0B6TCqKVhV_{J< zj(ZvQoNS#$H^;M5&xvfi>cW-dYPCDmZGzx;xVvEh2#N3lpd`l!l;kYv?Z?XXYUJ)> z&Z!H=f^MAZ8r7Cs;~9UL$gvi+Y}$Uw{&H2i>J@38{bBP(YLm<;f-=8$Q^2#$%dumq zT8W2O(zJ#b22JcA&aTsVR$O||A}qPju~bN_N*26~i1T!C;3KrpmX~gJ{umH*r+A~L za=no$6jPZ%#OV8u*y#o#Po)XmzLCUv>evy`kF!WRZ1^07Qp(d40#A}AJE6o%x>&1r zZSso{Yoc6Ph@0$J4}EVj9_H3$wa4`3|9Cs->Bq(ji3wqO%65K?pq|QUc?qqNatD@* z?^lcJZ*?>HW3o&p4^$-4TguP(>S&`ZA{TFZPWtQWCvpQ(ua3rwZ)I4+S|Y5EP_FyX z>lm*OlaPXQ`qfTRJN8!W+Z~xT9&8R-2V{Bvr+{3jJFStuGL{!qCvzWr|6y)`*GjUd zwn&1UMfS?Dzl7J)MzG5VmDA|;(Bn$}EhsYCcfCvN8*)Mtf5D>FkKb=!ZnkvNb-vT3 zL;K0>O{vYy-adHa12*{91zg)L0r&Q(<391vPkW!N->)W~d;F(szP~$ULQ`g_Ft{cq z{yrvUt#r)_XxFY9cj`cN@pUt}LqHt;g?vz0k6z)kA08GK4*J(H*R7jMK?79&cU z92R`Is}5WSnBKRdc6@*A0J--t;r#Q~N9@>Guq$0%h0TEAyE~7H#CYgHkj-UYxGO8G zo&2ki)(9{StzxCVzkghG^6vLOwKnrb-j5VDwdCi*?sk9s0^PLINN7%MGQ35(@7XxR z@ay5~3qa4x;T^wN>;y`;l`D|Vb#!Yz{fn4t;nx^p6T>c<#mkarMdRZ#*p9L5lZS`3 zbrLppuF#5x7V?5ihJ3|nBIujZf&5~@IdQF~@bHEZQM>08#*7JJ(GtOtQEEp{BdVi* z&T=Lv!+U7GnY{FiZC_R01CRT!Q_Rd$t=|9bx*;uYFEpR#RoeeeubDgPDS?FrJ#<*L zyj%l_w?j35?-v201tgAbZm8aXPipIj#hAF|M>b&t{+R}fLj3_jQ{Zw<89D1Y^7|#! z<5l0E%%RKPA8!m5-y4^ADlXG7hhz&cc!!o-508y6ugKk>+ae~5U!NHw%~}wOkDC1+ z^NUqNfchmO?)?R*^Wb%?@Eph5amvP2;k#V~OKPAVMfy_4yM;R`t98F3T-4t!f73tG z6bHJuJFi+uKOdBmB>uE6z3Q6Z#egPqnP^6g=D#nu(N}=WtX#>*ImUk55_>TfyhWI2 zw=Mjzg3Bna^X;tc_6)mI7GQEl4WYa>D28!M4YhWM@(Ku87O|XpB;gchOQEX2Cjb161S$!?L%~p%m z@j<19hT|66`Q+`6-@|79Sax4jXe@AvpE4h>A=N}YGw_DF)N}dGPIpy=7v2K*Y(pns z8@-P_975l;XO&6y!cX_yd7(MIXmtj=?1)~buT2?Hijb_yEQ6FP;h1vNQiR`{atKcP zQKOCML{+c86#WqlD5pb`6TFltnM3{9ff&CX2N4OFPsQB<@j6641eokjOP}4d6ME$vDZHw8c)-;dqMeyp55z=)Je`IChG>pK77`TXj z>2${#UlnkZG6ZfsFB{W_?>d#cdc8=t7>ER}?!I=?#$^PyHu=dv_y>U?HqOk_ujTY2 zbvvxoZBJz$hKE$7PJ_*8*dEN-M;cg*+Jc*at;bA^z#SH(yV~E2*wXD|p-7L1r^4=( zd>>d6%+lMkr(lKrH@6cux3P{W&J1p&+oC^M(fZ)>zMx-=OKTWmn9&Paa`{5+4dc0o z`&BPHISh)BcY$WUYb>CbsEwOnb%XRcf!1q-6WSm_{?yx7TV-)3JT4M=5ZB(%II*2D z;=^Q+t$YV=SWHiT|Dw7Ro)$C!TtsqbXYYC!f9{STG;eAyMZ^}V)e-bm5}ok)G!qED zK6kJ=Sr;VybME14;(&>l(bqDwYcW`#WX6{XKW+dYXT9_CC!M43ln||fv9;d2!M;sM zQ70W;1tkrA@TMv|m*I3>-&fUW7+9Nz?h)O&gQfp73qUCM#IxcjnA0|aK|dbK(S|4H zY8QVo>;C{|?bi?uAMd6)d|%4MD0Agf1Iq9 zrxo^289{bya+tIRsoSwU&^y63@L?Z5U;OEXQzy)4wkYFo9#>0A+}x7 zya%~>=h{_6bw`Rwj-7>MG)=*yHwJYXpp$5wP>Wk#yipm#N`EOCYp%g|UTn{mKFNH^ zQ9)G&n^`Iie3|v~W`DF9x`~j0*Dqz?&HKOug&6ErzClphHk-qCs(qZNOdTP#S$A7o z=gUNlVRcK<@S9zNrWcrRorG16J5}HV#mugI^Mo+Vru%La$U^r_s5`D(4`~X1lt$is zYa{3VJq^3;?h6Vf;S5pw%SaF#yymcF^l=Oou8vyW0zTw@(q)G_MGWIjt0-tFKwyu; z{)&G(O=3$z!Vd9~RRi-xE|&)LXp+NZYZhy9X~yyk-r+MRY0h4}^5D&1C;r<%q^>&k zku`-UHjw1)xA)9*2Emir#dUT@5gu7FJX)voABpfL*_AqT4Dh!22J`IHZqwB&AhSCxVzry zbRwY{)0^!xY#G-LGP@B=jcA_GedKsE3Qs13?@iYE@-^R3svEziv%TiB`{fMVb1|G- z6D=Eyqs;Cq#?jvgf^Z+ugWkF~>h=pCxT&Ye$jr3(rS{O$c~_d@+sKuC#zN{j{cj2* z3cQ`f&hDQ@vdAO9Azy1>*ZV?_uBqB-u3M3YI}g=4*@`bMhoo}jE986E;gN+^LNFB7 zHUIRHNDMmha4ILK(^_|qv-lDy1p4a`=Pbb5Ai11e8W%8n>fHCe0P;Z?{+y3Y#!!ZD zB3w%5h-w(ihAZORX+iHWM9^4P(<~&zGSK6|vJJLisblSFd)W+jfRhvCh~y~nFmo%8wzQSLXnbVJjlsE3o-(9HJKN5T#? z$RFxN55lrfMk}JhzwI<;O*&%j@p?;XPAnO|^&Ov-0~=;|6Qa8zyXKEG&siN5q!&E( zc+kiPQ9fc*t#txaQv9nOW=1b!s_xd3hAi^-2ywP8s@HL*7UG0<{D)3sn5Zs0z?gpk zjj^ud7pho!rPuKIUjhi~UpGy2m~RC_JSH4E@l*45ddViIm+J6L^N^WI(qUnk0MX^3 zCRV4h-TYfL%U3Gn{{|%h(;oJK{Pr_;GXvs`{fue4p~1yO_wLou*44cQyg~Qtk%Cmc zK40ry4E4~4rF`?iwX2Y)M}3CRY$oVn)ktQEkwG(azgb$!>;{CHp@MgBv0D#aXzYwZ z4AfXmt~Jc;V3tkEdH7<_hKBou{$F{GpZVnXDU`}iLNi-C&&&Ev(^h7-*nsP|3L7Zi z7(NfbR~vcIx;pPqGT~{Y4h!R~sE;W=`QfNcHiUjJ_H;i$kj zh{OW!?cT$Q!{m3PGb5A1opCJ^!OffZ=GjS-64YEv&4cm<@YHvCQ^Nv-9SEc5>XWyt z*Kc`;RDb>GW>oZXJq&st{NBx;{pxEI_Hf`*1EOj!(N_4qdi%ut1?_3Ps)O`~edWgE zJKm3NMC#aqx`$;qcNCt+TRTCXqh>~2!BxGtS#kSxS=WdyD`yOWi!BoM^{jr#dopRz zq}EQ$)bmAp$5$-6enoZ;uPQ785%I|#c2Y|zJf!#<4Bz*09oFseR;oMsQt9j22dIV5 zs$uTf2Pr5XkuBNB&>#NyaHyDwRuGma8(A}#D5m8YZBS%ryY?%+ThdbcmDj3s$)S8) zQB)|j(O#$C#`amUfizKvT7P-D#}YSYY^=4T;)2tI2v&~IA~81ke=|@x3A_O}=S^G` zQ6C4w+;PdHUhe%a0tu|LY00j=7X}K*A8?$Vj%=rqOLLnnPvAA>E6O%|V2r6<{o~u1cYDtZF39^-Wc2I|i{wU204no#~<=e=*Rs5>~zG3~7_Z$VfI^TQIMv0=S&0LTlu@SuXsa31ky6C z5DU*OIilYntc6GzRCh!x#z0urI=}x*)-%IjC520~ZZT!(>;KD{9`Cf-b6WrH8Qo9` zG@%E7fshaPILLp}q5pdOnrquAJxF7>OIgqHHZQfU0Z~L-gZt>^`;+NnvChHoHFSd> zqv8S|Y$GoTR9uh{GELC-a1;+$Zfw6ByN@5UYMsFg#buT2E%oRK7A_7!++Bc40lSFAeA11Ee#f{|gvB7(`IqZzwIt6pj$lf3s-V}t9pw1ipcB&QM=hwD6@NeSk zUq6RBSO>j9)Lfc9-Vi8TE!gRZgku8|*rcTiy^MFX1#Y#eA_%mrj4RpP(L6~B!Rh&1S~t`AV9vUjT(4t+=?X*-(`m=^&*-0f?)@%e}hf|$4u z?7Wz~LrYW8{|0qJ0JV1qtG%Nn?6gOSW2+i-!9F7+FDY$jSwx5+tCyC{N5K*zJ0q*p zVRij>z}Rc9NE7g|v|~g67wPViM1CeM0RdMrwm+?d=KVQirzRy#H+%z&|7ez?r3Z~FCYOG{B%5kbF*luv6NUx)Z2Sqp zvHKXo{DRVTQ1PRaH2kbiG8$@@Emu!2^<2E135|avh5Eiw82>-ww*RxgG-Cg2>ja&e zffyNT>Q^zL$7;PdRIqpncpTJglF=uhew;BcHi0i)V{YN6>YrB$GCW@8$=|Dlx+xV? zq#sYUgNlS4Y8JE2(WJrOCno*!My$%4#z!Y3T0`PH?mm3L%@JVHcb-v80D86-@h5wH z@TT$PxwHD;uDYJmaM=>k@-pnWcBy)!g)=N4S(8`2Y(g|Fs9mcGdxf<-AF#*-tk^U` zWSb{nkq&I*g*v(P0-1jJCy-yfQk-$T0XIHzI(<%pD?w?NOL4zlFlbTY3cwygoSo_+j!@R|uxYCl|lXA9G;! z==fS?^}Vjai}7M7T4!QdcDajdmwLG0*hCos%VSOJ_=n{^r2&>l8})m6QT=CTA-KD439++Xy-RFTO`#}c_=-sof*Ou`vz3^=BOa0^G%o2}ZH7o7+vl2niaXUxZ zNe+U@y3KZTATTb3o$0SQDwC&){S3VFEyN&x4C4OEITm}@5g{enndLb7$9L`lp8z<} zYwj5GL`X&A8XC2~lpn_FQ2nK+w%rYP%BSHz4Oe7XCRgkxBv(-twOxZ+U3wvb9f18Q zaCulE-?icwz++Xjb6Hp4=P8C& zzRLD!?*I7>>@BVFC({hjI1kuQn7G8$bqTnDEslAuUmRzwD`Z+cI;)N5lI%aUhRkGKuClg8dej~)Kf->a$Y23SYc z`Ivq{MRhJEO{Y2C?8z$(0XJ>{kJZ6c+?0TD8^&P71~{`cG~72vz5ZJ=^?yNm)=lRa zMg_lNByCWeZI=TK6@wVVk4Tsi+ai;Kk`@l#O0q{>BOXAfFmh8r)F_i4jL})%xtAY3 zE^O)pe8uwLX)M5+8E_SG-VTk*4ixJGUz3q}BS&;a^-3u=6ag{(HUwb5w%m3nsPQuZ zAz{=tBR9?hdT`CPg#8OT0y*{P|N4Wgc<|MxDwevIl?!McaKj@RrdJAsCOat z$Q9QjT)sCnM5-9rRpzCKP?bS{AteA!Izkngiq54TydZ;-F7wh?!$0`d6M^ZY&{E7mYaRpqHttT4y{AUSh@LWLlP)>&EQi54JE|zj<&8I2qO7Yt*DQuXdw3hCBWk_s4KQ#=U_Q zr-SM_H!Z+tHa$c~dn$9+L^l;W6ugm=jBWe-&#zwm`+x0^@u2_ zYq06zEqWd>MvHGkh4z3kLcG+Q2lJwTX|?Y}{Q)^APW}QP9}D$gKsUJ%H#hM_vCn(C z5L!^Pl&9)?#!ftd9#RDwEaK7+~xlx7r%M%gl5g9y&K+JtLhwQ zy1pg+bM-M`rJ%reBv)0$L^1RC_FTkx?)4!4e@uCBElZoXpXW77Q+!%Dsf zkZ!GSw)1lTPirCf1GPWdOTZ%Yx=MP@*l(|`X+gE3Y;QzsGYB|Ri z{+~YrkmP^1>yIAl|04Q7*7YBp90wEs!O8!xIGHpUAPyQig1aQ=lkvx~VCC@3B)o%X z?PyBHQWtW*ZmSDaIpMubq4+H*@(RLsO5;ck2SAb+%2EwP`(d_7RJJ}4^;p@rg_$1W z@ojvOLiZ+K&*1T4BB|njV;`b5q`nRLk;n6gBV>1m@XnDwW7R_0R_Bd9$K5fsY{pu4 zK)!h#DyN{zf0jn(38d|HScNtmjW!grZs(gG9<5kGT~oc6 zzF{<^SMMqamtDMaTg5Dx+v6r)|7f{^vW#EIjN}ntZnQqb<@A7&?7fVvb#MlfrVGhd zC6T@u5KNoP`3ED$(6&$ym@Q*h;^>f5MLf=~@ROI4>?;j`fZbLI<%izxQw_TyF;=WE zNM(h&G~WBSto%0Z$&-+!hh0ZeU0Zte-`x&1`-AfCzh>*xi2ptX(;{% zO-2*J9^>w~yCY8Yz%QM)Q1TmvQ;*JQCY!A|!VgCCL|+9Obe2A)U2{!4_}i|-$cumo z>y((RJHPEcb&6NN?vNlNa&P2Srt}Ly9RcDke{!wq@6?4+{}TuGSNmhyYhS${U!X1c z*k@2}%?5k@eBi<1H%-61-Ct>xe$#bUBRg%G*{ckPs<=1A87M7vCf}YKl{;9`_1!5;180aDBmyCCyIH;nd=INP&MD)5ICS=|z9X8{ z()}tz33QPl@$=VwuWC!}1PLkVWckH&PC$N7d7~#-uH0}X;0%xcBPZCM@biyebxEw0 zy+J$|qm!Ui&qU;(3`f@QFMXj4e_xM7ueoE4Cie%wY|W}&*3+aZm~ER#CF2?>?i(|_ z)M}26R~~IUTvFhR%6O8oD|b_g&wrcd zGWz6NEwLUmFHhZ5DC_h}e#3GCA<1=>(cCg9|CeEyT_KdG#LS>Y^8+(imKU^@8dt{Gna1w!SUeF3-^C^ z%IAHWlU@Z7(-8VvOq_%Mn$p_|Rnz@Gd_uSH%Ym{c2LJW9dCo+MhCOvSOOZb*Kg_e? zVEl+E_mS;lvDnZ}?=8uhk9SUM`1VDoKKo%S>oICyg`axMz;nNwk9V)_I2B;O-`SRR z_0vg6hEP%fP48LuqREgPf;?p-5?Ztz@&NUU%d0<=ry^aNZNA~``#oR>^~KX$@_Q1k zpIv$ATK_zaw&3q*gZPc!i#stnGFjgHzN7)D$i(5C1XMGn_175Vhdp2m%3n8gPV_jg zN5HsY>w#=S9qpW|?8j$wH(gAwo_a(Sf$gO;?;Js1$O8ywjP9 ztyD6zb3=koa$41hP5yR;{NY|V9JT@w-vF5bzS}$?YX>)P2`&$Zots5GU>|q|Gw244 z&OFY<B8nzn~O$xg@jXxX#~M*RgF0r*kNWHcl0{X*e9l|rP3dYI`* zcN;eVG=35?mnZp}qRgp%X8xW>$=SLUOA(OVW>h3IqZzBLU18;jP1!yBZdr1&Gm<*g zDtB+4C6M=U>P#4XHyEC~`Q$IVx(WNMpWm>^76?6(P3#s+r5?_+r5%jT=gr{ZR5Cn& z3ZZ}vfu8gu<(n4rE1Au`1%4fXknPyl`4MH{&sUrf`$G!*>R6mqa5)wytJ-fI*dFcz z&!Uo#2wYyXQ5ZVY-R~6gT<})6e8ZA60q~54k6V3KsjHKeLP>06x5UJ23vqufiyw`D z7e+~pyXg)bsBRyMK^ubfJ?%6KeDo9DfIWMEj-EFSs4H7W>k=O$4%Q3n?Nw;Jl!~~$ z;p_r>&x+haPh+)#vSUntmM_hqQrxEJOAeL~w_i!)>jnzsx6bK@{1tXgQg2xujTNVW zmK=AMI9Tual4~amyXziw>h|*x-cx2m5fG zDcn!w2%$MdP6VEqhTeu!iU(||2zf&i@B@ZUv;y#BU9>aYviPyce39L4gJ+8X!@%p# zsl7C<0;+bg4py=ix?c zcRL&DMr_rfp)ILhILn8y%$Sa52nu z=3~f=gfW)4+Xv7k-4RuGjY~%Zrbl{3EAG!9L?mQp@YDk_3sH9J1Vir9>;8sKaXvKB zcV1dAAyyN)gD?79E+9@6>EnT=)|%&59NrREtXZRfPfPpogP7qo{0Q$n@CEq$mBCr-!kn%7n-N|! zGNm|}4~arDrB>9yFULAQliiC14#}VUrjHkU<%gyH(n7@%wh2A#gxw<4zTA(c%q)%=AXsgy#UIzKBEh-ITj#QI< z+E33K>?I#qMSbLEoL{`zyeoaU-fY!L4R`d-Ok|w{;*FgA?&YEeO5=>kl8ul6@~BxS znVOjzqi}nmE-J@&pu5qhul!?fL&;dY2*rLwgt{q$4hBL`bSX4`N#+tcfUu0|TPb_* zEYG+%1LM~jR()P^!5J30I(!hL`2c|{8$!jU90R`FUSYCfT?i& ze0OqYpL*0lg{~?lpE;c+cCiRvz~$0*_3g_=C4;rfQHP_0rQ>q4R4TqY)GzuaT>)w? z5#f`p*_yTKu0PP=Oj#pR9Ht63h=Z9CQh8FIN|G%N(-{ub&dEc-I7mGoWujAUGu3`c zikaxnR6m_*$FX$kuNh5j#@ZMfKN-DcBE2(gm1d*EC7@k3Gim86sWH}EW$7pI-*mCx z5{8>*`o&@wquZ()#VWHVYPTu{0-xf8iMwIEoov^*{kD4t7(QB~R-Yg4?!K3(d)|_F zZTFj`Db4x24OQ|_GN;p- zo$Vbmx93>N`5GIfVxx+#Md|;;-djdxwY6=-ij;(abfbuXpn$ZrbW5Ys-3`($oziuI zbV+w3rNBia-O}9+-@M%Sb3gmp?&rO~U*GuNG4>ezK#+BGQL6y~fR2AX;M?&s0P9Hz2GO&*)?=QjGW1;n5u+`LE#Q6NhWSiai% z(7ZWw+H|_j(EJuD{YFU8govF+i>gl?D=lJ$ki`{hsxdsRmn!Ln{8UUCi=IV1sB_my zHS|t|0IfIQ;;We0DKe1^>ybDCV?`c9z||Uy8*4hLq#Pq{#lsqyl7V{0l&Sa=wx&b3 z!x39jLFKo-b-mks4z;{Mzks@~+rqcFg0*e-&s{|nWnfL0?>1fb?MaRe#Cz~wI)RF| zS5;#Y!V;qO!vQ6y%rNZp`QpuL8~KeD*Zbqf^|vSQ7&D|Ei)AMyUnUXRUXlpl%lY~A zUCV{*PLjE7$ssg|-Z65$UBK|*(52Gf^|*fNK6g3GuKVud;b|7_8RuvMOUVAV%TP1} zLLv8-Hbwy)y*%WE(fie7f})fmCalw!)^kK--VU>8WL^63D(I5d7R1pEp>&$XiU?;I zJ*l>8;V7ygFKrr{%Rxp9Lqea-bFGAaGY>yB=Jut4;so5OrJ1YYBXVd=NYFj2R%yN% z73`siC0~vo0tpWCh{UnwfYt5wscdkeUq(^rI!EB40Q`2}Du*=&pU)KjUyCVzLm0x| zSZqhDMFmMWz4_+2*NSAQGM@^Z$Y@lKaqa#S8x}OtUAX-WJ0kKngC#dEEvjtMd$;9a zJB-NL1yRnno|~MKQ?5~H@_>+Xxd~~lZApspM*$9xm^?Gd&h%2Nq_3l zB5{O?d;EN5LN7|%dC=%&Q}ed&!^L`zimYB&O@(=zH?4vB2u^7H6R_$QY5g}%-Da_U zL1?Oh7sF_5frsMBhv^T5u0b)WelbOMiOC~bp};cUS%EWf3ew564chFjPt6;pRNG)c zCUXj*TP5(wJcBxP8mEyu%TyYkOUOztSrh;f6|+gRN7?QfACBPhcwSTt=Ze&VqXgZQ zGGGDzD0D>srER*HL)}=js4`(SBpBv{9MuO(Xa_omHKcFVqDo`(00%tJs~h z0-M)rsYo%fxUrWeS4WY_0wDKwn$|uqfzo3;#k!MFso1lN#I`p?_gQyV$|MAQoK%Kr zV07|jRQr|R;IMS`U>c-Ao>B7X9sYV`xT_wk<)ucYtMHp9LdDTH{0t`)} zma5j_5gTG=f`<^6=adMA?MFsJ;q#%ne9hO}mIaqR92o5tXP2;yI{FKr89BDKEa9PW z%JnxDB4sEVu?`=RJP)rLjN=Sx%!PU5yU z$F;7jt{%=uM?+F@%85i#ZjcK9m@_Z#m>&G5y^n8E!^ecOIwu8+!vcy<6mA@JRq5&7 z<7JCU1J5UcSwXnMI2HrmZic7jRa|K|-h+0t2vf?pNDzoFNnD`ztW7+vi^pi@U}!i@ z*)8mAGlv)~$<*(^f=EpE!tT#(9;+Bt8w;6jv^$4LZK2V6u6)?{~CYt=tU(RZR}!xqLE+;ly`w; z!R9drC>1U_fpz2pwrzbb%(kcbDsQr8B@mBk@Wuk|WXN%lIltGk{UAIYmFttk_sfzy zeN)>N=yImL(TV+6oVSh|7^Kf#I#_vLKu(~L!C#N%UNp+#jwam`L_VH$_mS~iEZ+%1B{JNys>x! zVhc@BwyEi=z0O;Owq;NYaZ9uDthU$Ye3*-e5`WR4ycTEK3`Z zhW*<71|{1|M84n>mVDZfWeIs;5*=K2aZ=qpQ%t$-8FSdjRm^8TUO@Y}g#!F~Qlc8w zbweR_V+x(?#3I$@C})I%?3h7432|9WHcHe=vi`2KV#ai?XG~2~yA#z7qWXIsuEepp zl}f5?)_e5nMd%WP9Au3J?Ki^3d{7*oi_E1WCv&qt>{Ni;RiCV0f{gf;u1#H-2nMT9 z(Pt)GV0te*S<^+*fb&?V;pI&^tybGG2$AOB^YkjDf~vhTW0?GuO%O4_WKVH6R-1eBR9Sz6dNH{&%faau99qjx0zXQDcj~bfU+GgWem7PqQvWu#uo`Q`wnIJZ=7;I7T>X5?1Oz?h?j+9{V zE?}vSv}DK?K{;90FQr(AOFlyVsEUI|cwSO%Y>@$uIcoyW+A<1`R)HsYK6C90^~P+8 zOPK1ALh8jx{*BNZC9Zh7FSI+HI<;7yBTdZ`UD{IQxKGLf=lPIFW{4-=ASt z16s`MS}hWX;`?Vz2LMlp$oXWL9y@;Gawi}?!dI_h_m~c$9vTkM^MT-meiWq?Or-Y9 zV-Im?=}x15OX;!kk>WsP$zH;r#74}cc94;&u#{dSD&ohSYY-2}V=It9#ao*2F5~xH zEHijaXhwU+@OHG5KSftf(DT|)Or?nuXvEI!gj}D_d90-;@;s~zk2!n9$AfSk=W!m_ zSl-X;vSuR{{~U&3QTrHa8Q%!^i?7>D%DE4swx{LPs_3;yOy1mkz@c^0H>$gcD^Pm9 z&z{@me-!P;QXxtSVM*X;)>;PeYCQdLM6G<=Y00KaH=clD9(@LNfI*^Vi-av<8!vk? z1JqQqcMh$D;OJd+MXyTJLCk2CXo`6@@szCsQhiFn||2i0$&~v|lS}EN;X_@$lmEtc$PdSsZe#80`+b zf+a1M6)CNElstvaQPZf*dXwD<(@X<{^l7FsC#{zLoAB4h5Q7^B2D~T|Y)hA^sf**v ziD6k+nMU0TP2GeaDSm@oA|vM|W-u3(be9f`CeB$t#N%E?zX#O5Yc;OvDC^oS7&1J0 z*M0lkjhr5n=UgblrA{R4@I6|Ff384lBKx$J;O((P#)d4sc=mFok9=o}>F|#iiY?u_ zrd=gVPpVpZT%X++{4J?G1xckHqZSpB^l*4I(-jQI%XoSV+6W0ujhvPj(`c#_02aPH zEC$qzQEab$MXgJQ<wPSa0S&{8Sv1E9)8ahUGoOUDgXB1!p@*OOH&W^B)ZV{wkcSS2d4Sqf!8Nx#blN4AJd zFUPxI;c=DamkKgN?$M-fI%h_LSP-6sAZsa)MtzX?f?9$cZCIUeBSla%J;Sw&%>t$e z_mZ8Z14NP>-T_c|bJ8^T|Xyk&(sq$u=#sKLYakW)DD>8#ZECu?lekP|aEBbZx5fUgsLN zV!H1`RqQS-s8T-$_OZ5sMp4AH4aYsvBye##OUxd3$16um2;;`nYzdQ;Xfu zLb=AWzce!gq?D9;XF-9M;I~diwjf|HA>C7k!Ra^Q(fb)IAhW+9tmK63T!B`K0z#P( zqvVw^{>VDg00G-QW%+x8wih-1>v_%7o-u1D9@BM`!%4E!*&-j~4Hb3Yb1xI1zu(pu zW)!LyU|KovU~nH~$1NGki9ZuGA#nRq4q;`JZGb$6ut@7;ht}^`>_6bz8mI@Y1I^>Y zhr^5Fy#^+1D8d76C9t`f1K}n6hF27Dul42ynSYS*-P2m2=8XHe%>o*elo-_Ml>EZIgvugS-GK5=X^wA0 zR7T=%=zdU`+`2dF3xc}7WLS`6Nlgns6oa1;-sQ4>FlAEk>K7!@w?0qf8)JoHWT=PSX{ z&a)<(_)m`;%xU|sziachtvOL{D8RdlIAxL90MPNv+hQ*9#@$SnOW_EupP^$3LM2~e z(G(cc-JME$x#A}wX(H`85%^<9wZmxhLwq4_JeU;_50)rY|HIO=Z*!pILz1azIDhQmV zGMOnH^%D$9Fp-$q0FXp;1|phMfB`;Rd-j|U@+mA0Z`}d*mgUo8-EA^Hi&WE<9FEuw z?VaY^%VtBV0Fk|tFbxustTk5J7;&y|=1rh@atoX#*Qp(RFW6v_<76JVZLFc0I2Iev zX6z|k?jK`NB05s3dJ4K4q`|}qL;Dw3t9%}vuIsN}Z4nVMukIMq>jKXMnxfy`&qGY# zgwNLKG-psJ0k>10K+Vf_S}ok^VTmX!I$-E*KAJYRC&VvVXBq0Zv4&3Dy$hi1qZZt( z%OT|H0Nah4W`}SiuxpuQ%8A8^A?S&~Oz=t`g8rXda5H5%x6S-bR;2arrxl*f!t$ZD z4WfrjwqctFEfVl|ge_m>Rp+QXOl08~LlU0M$lq{nWJbwsklGChH2x%-!aEh)o)vid z7UE=q9e{)?Kf%7MopdUR6~J1w({;pmy)MhF+6BT_FR-1gi8cw_+u^FL&n{ek*K$=;5r{S1M@e#QnNB26oFGK=;o zLsDKxli|q9rigNV$(vP{Om0!qYNQACnp)uy>1hH6_TkpZ3L?U(c zVSBrARu+TqnSRyI8j%rL1J7;MaeE*aAl*EuwZ>9ZA5!Txl;t~9 zWLTrB$YBh>^-g%p8W>Idks;p@M6(jCVFKeX-xL~8aQ+HW`t5WZk6G{OO%@Am6ub8N z*W?62LUrnwlDT)Lc?_&m5=nTFw(B;F8b!Gmv6wg7OHx8qo`2LWhlp5dq0l=IC(W|Z zZ+_bc?uYgeh7_4wVWdiYjrcb+?O)#vxB)gjDY~jLn{Q`$ilyrkvFMK#U^naj=c+Px z%VYvHQQ06C87#>cHqQibSa2my@fMbD2T+FPx?_edcWE}@rKj+M$k`X4Gm$$EDz7Ua zqK>Jaq?286ZeWaw-03AH+_s^c%UwgYq zuJiW_>_wMOOIV(1E$3#0WQkY`9Spouxlgs8gpvC=@;?x9w%BmL=F*H{`o~ z+YBLbKU-j45#!-%FL*X7o5*hH3VL=b&nbzbrg)x0*|oBq+z9Q=1CRemo~2AR?ora{ zsRjWF0EOhu#C226%Xf2BkP7nH#Zk|74$~8Pyu+N$H6T0D8&#iL8e$!qi{+T)yg+d5 zoU`Ud0UdLv(iuJ+QIHKCeIV6$eedlG_sgPSx#IK%F7FqMwnxcqG{U>@n0Pdrqq>Vp zJ>w6uUJ>4%u&t$*J_iG|M`3LAFw)QlIkuc-BJCHC5vE9Q37ynL(0WYrV?x{4SBYVe_TC3t$SpwzSNsH;4>D2Xdy}oU$ms6YcS4 z7m+;Ia>De`p69?4v}6TcPA9beN%Nf^*N7z2GJ;9oql>)=f9}keA&5}N zP$6a*?s$C=I0mUu_&Em>`isK~7Y(C7uqTAauE3VJfP91~Kr}CgALL%bGMWC{skxh~ zl*S8p&^uN!|~bj+%DecHc|8wA>?O+`Aww&JP-s7rv?aAq+8)7#q|EeFgRpe7b~C z{weL^ZxVNduoPLgD*0*mXu6sE$e|-gB%={K({qYRx2ulAdLsp}VIm@tw9b;F4D##T zMC!1G=$wfHF=oxfCcAMS<6iQvKR{I0Gf^!mc;PCAWCgODJ%5!ctko6p;${9Vd+w}C6rCub2Llrgd&@Apj!Qga9QEI#L2TuFARKXxQB1V_ zWNY4A54bIgn*l1e4v6K z@nvnYbF6ip55>oibd5vEe~jay>q4&v6+~Igx(Y}aMHkLKvmLPCrmK96PDEd1>Q58d zXDMDVhXYQN#iKB+=oZ!>Qbw;Dyz{V;rah`PPc0p7ibqjsX$R2b-$JpE528}9^o}!a zd+}xOi?-QylefXT2pbXEzj9JikE{J_#3dt~a23PMb@HTSN{S zh^RBzV~~Z!{03`=WUwO203xP%IDNhIVK>EYNPLtYs~59ziTg02e`_^-nI2#f@LgFK z*j{pjj5z3VZ5y&&TFVDL47;g58D2+#GpS=g8%i2rt2@$#V}*P92#ROKwq;S;f!Ao% zUJp2k1%w$E?WQ^~V$RWKOUY7+Qz0nl8MfmDqxDI~miK$_wECHzRfZ-+p9p{p>*&VyaSa7_TZa z(o+#;{t)Prf)heIR)uTbBi|)VB|AzCD6mC!i6fx zqb*Cr`7t8{T)DiJ%JgZcJ=17$O?Jl>bfl20t*?^FVOfY$)Fo+ZGwirO$3I5Z{SfJ=9+Sx#py)j?ABpuWzx%=-{_HhUw-7fae$l4&uj3o(M1*bLu=dIkb({~4Z^HDZNx zC`$R7CAn+$=oBC}PRc&~KT}Sk;gmlO=W3699MKwc0BkGXvFb4WcAJbyhUdj^{ZI)@ z9!_E1h2M{MdZ$uLEwA{7k?Ctc9=ovk-V`+>(ccDVK)(P$!{jSrK(m@uRP2 z$ZrfjS_$_%lB3;`T$)UZC2d2tFq=K>Ja!&1{RmnsN4+>+vc`l^myZP1s}PsI4Rxw2 z<3Ic}JtbV1!>rTlO&w(mcac8SbPB|o8n4rzk$m!f);!OE{>hB1c`x715?^Pod(fR2 z9LC74$6{H8Psmf70WtpE>bbQWFjhkHyHr2-IO73n0awj|SsWio z#wC9vP>dL`+(Cx`Tm!pOR#2TQ60MnSIZ~%?kX94p7)`FzevOXnBoLYq# z#k-Ex&nh*F(8MgfMk;JK0L?+_@uXwws^vxy&qLB0fTlnsAJJ0)iRbhg5rl4s-f8xE z@G2L^4(l)*LiSo4q;WnmK?)XLQe$~`yJg5n9C>*nKDa5ug2L{jp6c?F{v+_H+x`RnkdFK{>u5yYkM zGxhmEZH?(Eot1($4E(a}NJ;I{LmN0R6Mm4k(m*|ZIAqZzV3h6SXKfCi0`QMI3L5;f zvR6R@uj*%e%lQ3!SB^?m2W$1ru%HVqbS$@{py;W%wI)%Q{42k78Bt7$WQKI=N&#Ag z!mC5RTie~M!~R`iEwSk?k=e6g=Tks2urVt623#n(;&>59HYGu(n?^9j_pyc**3s+! zv~A0&kVj-21JMtIT>4?hoO$Q@SH2=^>*s(5FX^Yh4XiZOrEsNDHrkvy6d33TFwiF2 z9p>NC;Uu^Zf#lacSZU>ll zP>7bb!{gei(UoJHgnqDQK8W#ml0!_x(HB&fWAnw+-z62Euwv3B(^L-OKhJcKW4prF zzayz++0&WDX(=|L?=06OlS@PFA9SF^pmYOc6Vaq@6!D5SJ|iykd%?aTI$@GiAyVqoZ#pyyt=M1r zB}`ar_Yw^+^H@@c9w*vc-9`=sfQ9_uStgW2h=LB z<>Gqy>o}k9gz*-MnoFHaByBV`pjFUzleGn%Pp5{yb05p;xHeY{f84=Yi`JfW_(JqL za8pO^eLBfgaX_zHnsc_6dEtXsu(=%cY|aj2h0Oia5G%%C4MWQqM%C@1JM~`&)1U>D z*=XAq)f*e1J*_D$C&;qQ&6ia;-#$kEJd-NGY zsrA`@cc5cT3z(su6+oyL`uBLI);e;Hzve4_{XZ@2U;NfuP(qrt{u*(uMWE%o3+IE- z$9H?Bv)g$3!W|^IXvqt(5t04UaJNDfJwgtj$Uimtrt{!-7Iqaa=qUfnS}SA(mof<$HzkEjY+R)r-_AcJiYI5=VtLV1@-3#!@T=JzM*y!r=uifddzO($>$Ny__fBHLmR&Yc1fq%%hnBaDu{MIP= zjrt3}0vpSY?fEt0Fb5YM{#W4UuZsrC5+g8*Jyq;4A%i?6lBxeiGUHD-h8bTB9`(By zzee6&QQz?X$v%F+rC&d3k%vvQ#Hs$;-7IBmGlsv~=D(l(zrWFHf<5TkUl025Isf_p za_K)WQBoQ<_pS45+CA=FM``1q@A|JCgFmkK|NVnWMX~Z%xq0lyl5FF}e6#JP@IveK zzfbS)b^v~oRsgnQ;}aD2TvMOLgDgXvMoXCV?^pEio%JgEB~8HgOH3kP3naI`pFMHy z|GmNg%Qtn;fDOHv__gnO-uG3-ANs*5kNo?p|I0UP`LN@!_iKha2j!_2l{f*4svOdP zcxeAuEUZOc=6u4q5yvdjPZ&(Wdo2ALfG3vra89Z29oH23y4kNF8@zA)D`ZnjO63om zZ?1U|a}S5uXF5%_tvFQVNFVU9-#*!7{LRt-{tVJkVb5B=7xt{@K6dMo|A#GvT_Fm2 zdKr{ax&|0@%r5fcUc}qlX1%%A&~@Xs;VYkPbfU3jzUR6Fu2AFk!;%K$iyCc<8|xnS zls})HzdtGixG*Oq{)cXg#K$u7zyEK4_A6f2uygzQm;EfEbld%B@4&C}%LgU=lIEbp z?RNj$L-0ShsuLCZR%$w5u@}@Q3FaLQc|G{AXc_>u> zoRr&tPRc*->;JR^|HPDkVhYd?`~PX6_b5*TT7xSv)d@Napb(foW-7Cv@SoGMk|Wq8 zi~cXIC_beuWaZkcAUcHnCz!u+gnqkLnk_7^eEdfZVe%tJMME7XoTRf|`GbDPq1rwO z{DfBe<$o8mf6nRtM;2rE8dhb6{K|0@%r+aVpGg{73% zzfwxs%8m43hA+GpjiUy}(f|2x`O_3&t62P%9jBVaz$&5Nm@R+Gt^W9MJ0;i+m;G_W z2-qc`o}m55eDQCJr~iC=14^)^eg9)=xKf}+!T6u|^Ka^{|CnU+tHA7lDhZY`^C>aj zr4#?1r19JH`3nl7FoQit)+s^*oW*)bjN3@^Q4g4<^^yY4DL$EukYro5*EXh}sI>oC584&Z9P z)q8Jh`!`r@2)-kbR8VLDs!u!puq#xLgLmCs%(+nv=z9r1;LPMWoA!ab|xQ3rp9WbFR zuX?+Uyc|5hF&%C(7I|1x4&MiKvKDcR9fhs8O1to?fsA0`=uXq+P6v!SYkHN!w0K|E z@pYv-)LuA31%3=o(zLKtq!eiI6Gokas&EmAuO%_{{U&A5Qh}Nbr1?7lF`6$z8>TeQ zHs^Y%v-zd^g~POU*P^kk%ezvbY>tuqfhioPq%2;tt7>Kt)J!*gE{1T`9Y;5}hoaBk ziJ=bVr%MM%pOh^pohipYd?s0@e=G4sDAw#_0Oqsz(Pcsfib3+U(aM8GZgW>F9=u&{ zq0F}ZjeXZ6!-uX#W1Bm@$v|Tw;gs`qn8%jaR{gDX{C3ox`w{$%cTtodt9?b0`T8Sn z;j@--p83>={I|CFx%$n;@=4Ve_$=aC!eMlo-KGl?DesgN1)dvxzwbbUKnFAdtM|!D zwOZkp_VuYdHvq+O zOyP@Sp8N*TZc;)(!P)z{>X#Cy=|0hWC^QySZ732(l8~z!OOm5SC0SA?_mJZ5-+VE` z%VQNHm!aT3%f+G;$=w*m2wxq+Dg5@02^tDdqxwBGqtEo|-G|>EfPVyryjE=$AHR4> zXj7o2|NVe(w8ql?4VS|X9uA>6Dn7Tex}$}g(nT~j^U&;Pqn}zR$|u`XhC0!_H)l&G zr#nBp;AWK}9yd!AN!>@^5Xc|>kOc`71!*ZN$!xhhCSM}v8H$GC!?%|w+oPAe&8hz{mU`ul z>zLlO6voxd#I16@T|GC_;B<)7t*yBguN%gsJ+eP4NAX?gSh@ThkO9lStnCToF`;wpPV9F@7DeFlAUxObk& z-ua5+YvsLw9wr=&m#28k+sf5b+C{cmTN+E|dUL)CaFiFHX4E1b)&?HE@r98{^NZB$ z^9`Vhc|h?-SilnjzZh5w-xD<&Rn8~lPV#^AyeYiYUbir)8js{gCce~{7#VF7+{aZ> z`|N(cjDM;M-t9Tko4~BpA_;Z{Ib0Fn zu8}~g8};kWMOkVRJYD_imQouZY4H#9ar!v&SHc8ok+MS}ATf1t>xEEh;0Cy~RL$@nTX*JcuUGe5~H(1!4$WpKA<${v?eB23U@@O$!AIAaZS!_~S!F)OcHBkok&7+EApj#qe^(5AYXJP2$>9og zWx0ym>7bq7|K3|FRe^CQ-_QQlgxsd6x~4x0<$!OK1MgUt6D6@0iqFR3C{`_f-TGdp zPJ3X1ha?2No~8gCM4WuS+y>LJ7stS=llEU-tYzp{RqJ*=sNS7VELvC==3eDH{~5J( z;&FR(y|OIU+x1vDDAT?Uyej=!2)X0Z$M1}OoSyEwyjg8-ZGPx0F4iDy)g981wxV>J zPL%0qV>sq9Z6%|%`P#!)J#R(D-zuT^^XrQU|1S*KG}Xw;4@l15yD{n%zAJanP;*|Z zc;lt3IM!#5I|;eCR%5m8M}48zVg3QfH_FzM7R65<>fv;>MH2 z^UoWcm=moSrBfF5(&%K|2$PVVM?!zHqL5qbDJyY+OS ztBbCHH_K;Lu2iP?J#*s%bpgqx+F}NWJAdA>Z4e-VQW$=!S*k+@!*{)E{R&Rui zqJ1F6tDGSk9@fnlO^8!SB5}dZ{x^<2f?tL@B1j8?K3gUwpNQRJOQ%(b4Kmj8Zon@W zzF!xJ``c6QL0G2pPPO=jvU z)jMYVYLH0U=f4>A#xtQXtSx+eHJ+2|p1-%ntt9By@RQx;#EcRRD||6aB1YbPvVw`h zBt3i|t-nIkd7X+??;GZl{oC%>LJB!Dq{r>pvhh}@pDc4Qzt7j!j6biQPA7Amzq=H8 zl@ZSSG99zzBRg4zJ;=ZEfs}D4jz1=fKDA7;5%<{@kRMpl-#A$ANwWP~E*p^o=07*U zsbKRr`B9jMd2-!`!KaAK5bhel$H)MpZ~mSud9praF@dogxbtueWT%Gl0M3zeL)mL@ zD_1O>l}>OX%p3)MsSp=>D#tWTHR919Ci9Pb8xHA!O-M~(jvoU8Rz>UVLM)6Sq#7k4 zXqQifA7eE1w$*d3>+pF{}We*?(R<0alvCCB>i%XqEbYsF1U&CkzI zy8_ib;*Ua};VK5(_BkY}W=@eaFK|^_-ri-$AU-xxQIaOQx)kiPWhCCwtJFf^6WPBv zaYEts=H=PvAad1xw2jTlO4EY#!&R4{g_UQK_z$k)eGiDu142nSU#>_8MQamD-Qla+ zrzGc#)vj^61_Ka2s5$Y$OkdH>Em)=l7CHwkYwkL)efGba@V86#ju8w>rKbM7c>HYv zULLpbP8U#yG;K%lHu&m$esgtH_mx-rQVJ4Tj2C%*+Qli1Y5+TwXVs?!EWz?35)b8KrchY%h?J%x|sz1OH} zy7WD%Ks$6eC%g{CQMCpC=$ShEg4GGmufA8gz~5a<25o|WE_Dn*WLga?@JX^Rtzfba7B4p=;2A;)Ft4zi{q<^nHY$0UwKcx zLp1WftkgfOBHqSF3*nK=Q<(&+xa3HjS*2H+6B4NO z4KEIU+*XRaaXY6^*1CkRawDU|ThjUa=mTRYRlqvvV!Mhsa8WA_pPB->c&N#2Lv2dd zQ(1_ag&KE46X{jjq5p%g7{eeGpG(-V6T(?GUsu)2eJZOVYyj`H(Tg)qgtuCHz}hf3 z|CT0FXHJv4;FMPNyUACx)W3;4VifGyVxlE*mgo)y6b*Mi;(s<1%uOa@HE|T8;CQeA zuOV|7Ot;vWi-)s9O!lONpxOD@yOb}qu?6?&$hw) z>sW%19e5!KZIsADNL2zOcZ&(D&JpNNm?r#Ab&)YYu_7~CG+#T&T00^M(2xWWnlYgX z;b1;JYh!9v_y*kWj`2NHK}=;&hNAu|;H%!XG6$Huw+6YPF+?mb3wUr1$%foT(=gwA zncL7nW1cn*Bos}!*Tu->-oU3;B-gWY{g?kXhWkv2}C5HQ7cF! z3j}q(u(l{}?!JIWSJ>PUQxKg2!vKZ(TBJGsDexvKiDNKqIPrEGB(ETSL#6;h4qoDq z^p%ZMy`;=`)q>UNTaHQdgQ=+dTu(JMlnyc%H@s~(@^lYYQO#_6er%hp?J=30Lfm%t z;ks5-)L&I8i>{MDnV2biS8@Q@tofkOZSC+RAFHayJ>?HUzw+!1e|d$ByE)hFF%s^< z`P4W<3xQosI*Bdjv6KRZCp&SrRDzDlydMhR+ENFH^yhUob)2AUiUo{mnIO8pEw}*h zn5~x&w~+omdOe8<)9SeWGy&9RbN0BE(H4y6tr7Z#JN9fSx})h-`U3FxKFukqs4wrY z-3ccqePtK%?5QyEN$^Kx+^~)Vkx(K+ITXCjNA&R^U3!o48VgyZUFW_soB5>P*X;4v zK4GL>&(u;q%;7bp>KwM8NfY%Gt}!&Iv`|R390xewd5lhfLYW+<3YMf@|0tgGa;qGA zOmT%Sg#4Z00;70wPYARe>mg4v`Rfpuo!TyDHe?mwxQX^XZenlLsdB`xM2{NY zL_QYA;nrHBVn0%E2x_ZG^kv|vhF$bg>Hv9zDhd|LYg(YAuWJG_e0gXlb+VQB{!CJ^&zI!KP9he(rWWMd*@N}5?kL)lsoIiRZ9%AVl zrIOp$Sk5&Kg^2IdkKv8-w|_MC{6HMw3MPY*n8SeJ>6aXcW{LNnGJ!2qVL?k`Pj~jGLFpG(6&Iy-ADYGa!Vhf3Dl&_RR+tn4++iYjOybkA)YUW&|;%%Si z*BcE!wg)9zZ3koa**-}pCerBb4+NamHy?9!-sCK0J8zAb`r#|C#e38wL}OUI4ClK! zQ%ppsM~%xTNCFd{2?Uvrbm^+4I-OAcV7%wO=v=eh0fsfk!XcVW)BMe6i@{}^*){8( z&?fT$*&u?FK(s^s$qM7AJ1-PZH-CI#X0Nd7qCG^Gl5dRstVM<8ME17V)G;-YHE;XdjYB4fiEvYZ#`B*Hmz8@jch zBdb6SKG#tQFarD|%$G?0PQy$AHd}_S#Z4Fa;AIa<+|mNv@U0jP{%sULTMqT_$H4L> zm)E`@Hh(~evsq4nK82Gn{@%dd1)q3uk=0R5wX?=rZjjS&0M>tm$x~rRQsj?jZMII_ zsrWu)gpR4DP`pAd3oVTcO_GiV!2t3p%nRf(U1ANjkMR-^Qb93znJNJi?*VZg0?XTS z8DxEfJZi zK(o9}UM+yTuS>Mg+0iEUhVbXY(|hdHKt8KkPOIw~esr_Pt@~2q!;LL;Vm5eO>%%=t z($$tw+`fcS#IYm?Gx^+;YrbiPZNXQINS7p5NG zuI=!~X8;_8|!EFF)+4VTTF7Yn7C!^2cr% zFk~3=0D7fz{gzoTgzoI&JVGoc!i;L6AS=P|yMBB>&j*K$*6#tSN!eZlng+9;TN{I_ zSIOOL!82ABNd!#?YpN|!vt;qqk@A?r`KxL-tH8K^uuSX=^^JWi zpZ)e4M8dBhcn-c%?mP-8oBt?GpRl_L!cuQ=k~|Va6D!U*5r3Ikz zeWpK*I1fd6PY{D0p{fg84L`cEimp>igU;P*q6pLa-V#Q}Cnay(++8GeqUfPzehROo zm-Ktox;0p>=vdqdnL0Jvtx&%?Q>ka>Po4!6rvV(Zwp4d>ByR}|gDRhV(-BykF{-6& zT9cbCW!GX>z-?e@JG#kB#79z~@C~g_((9HaoJL!c#+y#QcwPWcXN7dXl%U}tQ)e#- zo(V(E>1ml=XNY%%aqYJVABBwEZTj=iZ9qd%sGQpg|R(RXXq?m3N|Y zqsyr>2H#bQ3tE5RBgp-i++!WVMvT6>UJ-)8nRrnr`k@ndFGR~bX0c+~5qV!*9{=R^H)YANKWWKaoH6_Objy;PYuZUq7c+mI#qaE0L9x7vu; zir#Um+H1`4oK;kGs$N~BQk)m2@1f@gNzxghH&Cl`VAKLX0k_JFF1d ztvJ(bog#q9a8N@IyHUOO#x3}KI*#Y-+MSO#vM8kPmkD{}a&LC^xa(lLYD2Tkr>Z)8 zf&)JN**LQfwiDTYxBhe!q>TM6RzQz*AW!656D-)1{Hk>YAi zr0;9ZX^P3^wDHbRWBPD1hY7o?&j)@7;YGGtS`w9%4iRA^~7|sWBv=pFy ztqPv_>}nIGg|KybylKdN4+hs;!DsK``L5>b?A+vq~CA5&>W_;N_l&jvRJ zg1ijFerNhEaOqNswhD#pTIPt48IY|@WdEq@#KK^~NajZP;OE};`)6iu##bH-%*quc4y7q z(IzYotw>=gg)`7rF5miewa>@UcqGprrrQwpl`jdd$T6h}Y8yv3sW^Htg)Kt{1yV3rnnkF+-^-)-KDpc>}E1$*PF#;qlat z<&VEnJbg{CQGsF42X8BYhf{|FM|CNUTVoYnjlIRj_~B7V8M?LQHRKJ&E7BUoRFtcO&};jqiIh99_Z@0gq{JEBLfn7F@-uQ`$ScM)HlW7 zKa@gn9B*A=VBgWH3jY(#AZ87>m;%PgFXQ<}r(r&5lw7JRq%7;B;6oYSHT z$e-@agys-@FIV8c?Q~+2{a45hgn>ZX|2pML;FP;EZ;Jxd60Wv4dU%nVq&Sf!H>*2_ zlXJQ528EJ0DKoZ6b=jc0E-f-&Ew8XXb%yN|R-u*`@v_-zNKM!a^}5V2MzVhQ8iM9U zP;>}O1F@-{r)4FY_|WN81-^Zj<~Q zg9}KmLY*7V(@!64E7Tc`0PM#-0oGvq2oyAYv`!6hj`a=l(E?mnU*qdj+v30 z2MqwcBquJB0o2bTPrQdHT2b)k_ODcYSsk+x@vwaktYr>T$5yR5~ zn6~n(WZ=Q$N8GFY4PL8pm}6Qp3-P37Jit!kRpR4x_8TxjND+ANIE$e@9)LwT0-Aok zP04~o0Evcj-u<4lzyHq|dykU(TTRSN6_~VZ?ibjv@XLvh7F#&3cJHl;(ENf1sejAeVSQ|F zDB)qbgNRhZc=^7dzc|nU_H{4!*2fnf%@oXp4iJe8bxxdqf0n*+=jK>9_03ylqYhQ( zW^=ozo3q^Uw#w0b?Ci=HzgT>VkQ<}fPkuWb&tG&AA?I?#CiIRrtwM9J#DS>EwX$TZ zd$CQy)Gl2gZ`NLM#pf*E{3=^rG$OWiFw*apUR8DVj@Eqw7n-%TIyL2T(;#i-@{OGc zG~HC)8;Z=Z5=hfoN~4&3>1tYF_^J%ij6 zCVHlx5GD23*H&5FE4Nx;UvIf+`3K5WM}2=xTy9BQDgSv;(7|bW5z(`5BT5{wFS-w3 z#kPH>?46rbyT!ABZf2{XW}n-;-zElj}ky=EE~W6dUgg z#;w*28!hUq4JFtIxdji1MV!8|iT0Xu+|*B@Q9VvFgn9!bBRzKS`Px8=kr(fNS*NEXlbo3Onk_M)&415-pKdGsGe<7iryv&zmw_k9R; zbljP-@Y(_vRNw*4^b@^Pq`Gze`xZoTqh^o5;~pfNS7@QfrntkHmeYveT(Io2JpO0e z7y+wy4|ie87^{<0YYIb37{!w)<`0{`C9f5aITZ$Ud?zb2by>FQUv5ebpI6*fLT$Y= zqW2O}chd8~L5w0E+#WL<`GrmaT=;~27-e36K3$@Ub;LLQl9L%;J`bHHyl~Iz^rkJ& zj@Ep0D`*mD9z8JhzJ!KTqRA$nz{~u-wZKu8_S)A}U-B4a$zO7U@<@IUb+K%uop1%W zACU39%R-hwQGIRauI7-9aAWB7dhKv6`+W8Ok(;aMZ!S7{aiSO$L)@rDd9I7Z58wav zV8GQfFn8);?uJIT7O$GXLt=IycU23_y7?`Bp0;MamR$TO!6Dog$9e@Jn$=>f*bzhT z_Oo`aqv52x`kOc+S`?h7y!CWB5LJ`3iTexsR^G#Ec}w?p^e@=R#l6Cj0Mn&{;fWsF#PGIALF;&cRTGm(qM~Re^gC%S#;5miP?zmUrVXq zfOAVD)|=g24B|@UHh>S@@}W!v8$y8rTa~W2dt4$4-yl1^y!;FJL{b;evqk8xD(OKE%c(T zh3Mc2MqmgznJ$zlzR;2y6>caWW%;2W=x~&N{ZLr6B0ZKuq;zho_f`&VrkhH)n5k6_ zyYIs;iL|8`TD>Up!|U;DP<$m}v&ZkW+!ZBeFLzkm&Ze!w6xcpW(=Xk%Zet^6-wZK! z7s$=wwVDuhckGA`pcr&Ic;uqxkxu5K3pB@G2LEdhvrdOH#m@v-@e%LQpoeu7=_~vQ z1`oRyB)(?8CG)IrI!T%;wpAyeU0;)@w`#@YVX*^A6tUn- z?1nXMdeddMWvI;aQe80K*!>ZSFH>v?0g+5C%3A!cj=cNt=7CvJFZm)OhO%bx}hNRY)f2E)}dtvj!CV?g* z37a8n%fs5Zl-^{Hd?Oy1K z;DcWF##|weft-t}RPnR-6xokM$LSc(W66xP`?!evUD}jRIeudbbpQD5&`_0Yxh;@o z<%2$E`NxO*`(C%|>?IW+g)ZFJcb$8}l1`v$opFH%hMjd-={hyJvuDU`660fa_ftRMNkmBFFgQ093TrRXJ9?}Wrz+mN#C!!Y(l7CJ0O*p1 zCl4mCu)2Dh>8>vcW;NYNPM19A_+wJt``J$zgxHC*P7=oQt;9-yaE);r5wJLLU;l0W z2h}yhTeY3ZRk!)p8f=ZD*8NrSxIIs>yuX$mu9uTb?>=gSQY+p1lUu)lkBV)r{;fS; z{pqUSV#qz$Tq@0Xcvnu#eC741e2q_)Gw*SafS!UluE!N0Ye`(&Ie}7I?>oK7J0=|| z;x{qy@Y4PgDZGM6cqMV=-cjEv%|PG?^Y>8N%MJRi-%5pF^%)9)EW zMtyjuanFYfSm}w3njga0z;TReYedx%teH&$OHwWcZd8vX?oG~lV!RZw?2I&}m^MT2 zgswy8q_pfsvPs&I744w>8GkV(IYc2Eqf_`e%sGb*r&o^CYs>3mo1Hu|_M>uw zxB>yo_d@2zYu})C7~s(>clhK&SW&o7?VZbA{6yuA^kbG$1Rd_Iul3pilhWa9=ex%+ zsm^JeC+z{x?EF%s6nVVMIjVUNH`n--?FV$SyL%!&GJ*~4< z%~SNol6En00`?&jn1tIq%$#}^rG4=z(S;&{$F%|Q`n-+t=F!0w2{kJ>LTffS9~AOT za*B2ja4WSjXne7Ivhz^Z@5=vNKIUlY){qx@tk);*n-OZ*?3fdjISy;T_Hp)uX>~r< zdE>X3#Mo^Y4lnx(RlnF)9TvJ)2t1&GyYfvm`oTm}wQOQdQc_v%hXLYmG zq?f)SwKFC4vYVfM0b0PsKBjBh27zp}17)@giZ>RDlOMWfoy9y?dboEh4V%66m&};n z*ye};|DMpS#j}f9^XCLJ6xocKcdu%ZT=F%ZBtNK+Hb-R*N zh(~B)NS~pe=uAT5bHvrk?KbvX2t#`1`?;zGrb?;xokL1xqlgK=srd-CyS&{w0`BHUpHDF)#KcD7i($DLgaq>*#s?-XUjd?l|XV14f9y zqoEhlSl~jdH$lGhNHrHF{+OHX)=P|9_199mX<|Mf4eNc%e2-D&G&MJ-wU6L68G9~8 zjbM_h!Qdqg{*A#R#nIZ=mw&XchHaUsx3wFtY5e9RY>}BcRjbnWeg3lJuEV(O4}MKr z+`ps7+b?ndlNx={(fvH#ip8xgwZlN8cM@N5j!~k-&GH&IvC1;yBt?-QZF;jN-T$no&}<&;^1|<#ow<^2y$^MkI-!6e zAP^}ilsI`5{iVq9Rq3 zq;FfRx~-*1spgp1Iqftoc0fFgaPQVpVzV1A=5hTM1$_oDMHv}yfZeSy&5#7TQ~Wv% zC!f2!R9CQ&R`^-n%mANO0q0xO7WnAZKbjfFz*6Fmyb`YZdt}hFxBVhk1Wzl%;kYv+?a?2wgRI z6cNCMs}6x(yF=t8{qyp<(Cbkmn{^mIdw1gZ0^h?eI(4+^hMunx9f{XCdZVIvww#T- zY5AUhp9c9Aty(Fkei)i77j%&U*TB4c1(7A$#09q`FLjr*(F>e*|G!wCWR}}Ls$G>EP+Em5i+Pk#NT6p-~ULg$! zQVEtU_Jrd>K9q0Vu3L_2xkA8I{>trZ3|byq*hW_mn;LE3kU6kw-9AeZ_~!o-M*r?9 z4HeE19D&I38eMXG#8u-4o~3N5)zOk}KUrkQs|Cx$UvsrGm>bMaId}|2@u_uz{qOXTOgrz9*UG*X-py1#cRLjv73Ltygv)Ol*vYvn-zWIK}Jt;%i z>3f$But>-oZX%-4kZ&44j6RXRsE$$NsZO5J&=RF0V~kH!UrUZI88QiO-}z_Rm2 zKF=~RIq3NHba`K*)g%kw@+L4?0`BCN4}XOv%><)LnRIZ&9|V7|0%+z3ttD^&rkb4eqGDh+2Bt)e$(l2 z8VU>h{r>UA-eh0@CAM9Ldv&49iaKnlx3|NYv0seTRY^?t ziMi~_*q4K*ojIF)V&c{4-LdTU&Px-bco#0K_z1Z_Xe#p#kX?Jj1_B5>|a%cgH6=TIyCytj%ik`4cz!pZ_V`RmNQ*C$xl^S zC?smbBjX&3jkE{!%{+qoH=b zbgt1p>F7QAo7Yj++FaF@E-w``6`x~PLj zCJ*oI4jI(j1UJ2uCbu0=on-4;5lu~>KyBYMg4?xmqH7q;62C8WG zA8Ld)9Md+;RPFe+_@Z?+o*|S*ufUA*DLO<)Je5mLrorrFc$7TQu2#iH?@;56SYEt?fU5*n;=l6kzm6RrMZT zKJ38-;vHM4m2Po7|EiK#nGLt>b4izU${$_nPn&bbQ93uo%hQC*@7{2f5V8D;&5kRJ zX~j?sm)=d#`^hj|3DE+DveIgUosowdoO@Y-n&L|{<^cgT_HysxbBS#SK)AzFMbm}q(H5hjBp z;v5@02XM+DFTZzR&)G!11j&3eBhv|Bb5UW@KjMi0{l@*v&bjiM6wN(SHU8Nrq zkiOFK2}{XIuN-aa7YECa+jAl#o$VpiZnbT2wtVs-q`EMm;Uo^)Q5Vr)ARJaRxw4L&04`z8~-Hj7= zZnGRKRUyBs+7P~Qk-FhRu@jrN2tq|M`g&_;8Ax2de*9duyq^9g&+Yhy_a@(Eg{PBM zm8j;h?0j`>cCfEq-^xX87{8Xg>~)V#JO5F|`qhr5^!OKSS%fz+WEl+4{dH;b zzqpGULj`zKTWY+2SNuCHpboGO)>n@GFn|0-hHEfo?Lk7iLc;ST7G;O3Xr70i`nl>3 zYIhlQm%grFE^=OR*gf&fi$C}y3;w$O-K04VeNmTJb8H{%Z+u4UB+Z$x`w>i$8-q^C zQhU|?cz5u@PgPtyv>&s$Nh9%PLg7nqK0<_C1Ud-hdxi<7{Sz38XET_}H^k^o3@4I0-s1kh2 z-D!@oPrt>#11%5{V-6J{p5VF)sP;3H2!fZ%>)8&qmWLI`AsEg&X^N_SRUJ-_eq}u< zGWdKiGlXgci@Vw^hJdTI=HXmN3^y9>jT-Eo;VFFucc<{sWy^NB{Q{A<(O1XU1yWlKP{iIzkwKlTzxfy}0{YAI` z!CY&&P$HQM44I{t9ujl0&_j6ho(Io0 zqZ`k@siXC}Y`Uenk4!kuGisG>HBqCb=w9JMHA2pTYbZY6iLaz9iHfP|T`j!}EFyph^Lql=4^T%mT?jmYj!ztLIou5ZaDr?S98q<$`kmZ7R$Wn%-eE(!6uZ}Bx87wIM zG8)1~c3A^Nw|~K@x`_0mhI{<^&1U3h@Hsx+TOH>0X2JFM6fSPT2s{DDf(E#GBuS}C zcu;S_1M;!$ExU^h(c%VNhDHJ8)@KoNg%+dV>eHYvUIx^TutMAm4<#v0$Pfm?6H^)D zH!N4NF4DUn<$Zx0klg8D?adu7mE>E&+n~-Ow!y@rG1}zAI4iID0K_-APoI9x_&}tE zy1v9HLy54G6vXx*nZKk3N={!m*xYsp?$ap??$yG1bv11e2?FTyHGYi}%ZH=^kAcK{ zdhlau9&>8|g#T-49gaq5PZIbQ<6jQrF=~0p(Rtb6^Er7rgKXi?xl4TFsl_~v=!or#d%~*e+{qwTCuta1}J&+6jrOEl=z*o2U_Vkm~U0J1p zRu+n#@1r8!6{24!l%5@STuT3Fd#QP!|BYt7w)$0#TKq4)pYRp!I_4w`MFVDm8RKnNSc!)k#s1)ubsrm>Y1kgPCB{@GVpa&sAB zc%!aicr)F3N4degc`_{+qJz`SaLyx8h4+kUB zu49kYv--y7$pr`V3%iW8TbLtWT90a8o%N3nbrF5Pd7DjIs=yG>s0j8VTC=S`#T4HEn3&@_54o-GE^p;`+TZq^6Aj# z=dTCBr!PJu&VemYRf+nmsvLcsT>jYgY?y(Z8UEEIGM3cVW<(6Ku|bW8TCb16mVPsI zj-eL(Ge%~a*aQ?AY#LdB=@emp=E|%4$-EGuRo7y2SQ%Z@3-(q)fdQ83&>d zv&o`okxvJ6;>D#R4WVz#KRPG%3ar||-CZu#3s~B-($lL3{O}R1o z8L1g9)W4#^_U+dd`gm>49NnwsmY2HA@BUVI1it(2gZcNRh$IHid$e?B>W_}Ibb+E4 zKgV%!U2Hj7x%p!?zhzjvW54nCoJLU*OT>s-Y})qxu_LcgkU?t4RVMYA!03;n&LPjc zZ3WF!*wNKmAQWlE52{|4xx7a_sY#aajJ`2mQLmAUB6vla2xnUsz=q)d9ACe|G?D?= z)0T99zDc``?7+8AI^?3USkCEqS!2NOV|R1>M1BvnKof}I5G^RUV0rU(Z<(!G3HlZ^ z^AfhaMPZS`==M4XDTYSrQ&U`^i!$bqcx*A>2(a=ZyTMNd=FBN#JgKti4d?S`hhIA0&t zHGEAB6~6GlR^9zCpWqY{k!Zk#A=H~>l9Y$5qd(HEgd*47sd7CWxHy}Z$<&@KhBx)I zSGeI0)f#!KR`0C(R#NjvWqZ~ScG;S{3dT_B<}f$aN99CJ*nEUIF^*dsOma;FoY;ay)|XN*A1<~`i@LhI?e zC?$iV>9-5uWY^d_Krkc7{i zM(?f+eoT@Le6gJu-WB!tdsrNQ^rr3kvuT|c_-g}n_feDYKKiSfWDWCE1v4b zX8#;dBkjj+vP`E%Yu~@|g8rj0&1XSFAhQ>KU$TnC_?li=e@LspQ6!;1w@2*nmZHVy z&jSfUku%i(RuTQL^-$`R`fqHf^O?AV zcnPtX2UOR;vqMat#{KHiCvlO)$Rvg0ppIje9tFt-i+|^CpXwof{4M+agHmh52efEZ zOJifAvOgxAVkO{38G4lb#79c35;|RI z#>D(EkyT575M2Z74;)>tU>_luvB(8nG6Q-m-sK=eNX0tdjYP2!dy%2LI!X%GfXxr> zQP<2uUI2=t0kELPLRVa}a9^>7&W-2aO;9X20>jyHd63d=4S_4W+w6H{8kT7~F`#zp4n1SE4F+|@cM z2DE|R_D}ISLY9sD{}L_z6MT5>iuXu57xnrIPXW&KhLE)M39iS*;H!Fbm`0 zt6P1$xo=uGpb}P~jL(x~KCBGZkWg{sn~m~c!QXM@UZsR28^V6_>{q;Xgws~KU*2^H zrjGKjgh`Tl0QXq`3$6Mx1BZE+M1qF!!@2ASMI+jkFM@w%YZrX0HCr38pD(Z+e}R** zyK>5xP<=8oHC13(e`{yB)F#}s8)PBl-SeF~G%t(IhnL-oEyw3u%tnjXLXL;4Tzwf6 zpv@FM>F$avr#UQmHfYJRSm3fx$I@%o9bb7js(YgHFjn!U=26~At?z#%j$jEzPmgv& z^z3-)*9@65LzWv27Ap5AZ0&Gy<$M=;?As%`H*{YtrFMbsJzAYw$~)fe;7K|VI9*`D zdQ7x(PxdY>ejFrqboHVr(W8XHuJ+SVj$8VihQPut*O$4bWRI?ycJMT--15DR&z3ak zfASn1Ott_cb!`hvYkH-r>f^TxtEIo``n zAOf$P?SsO{(;W#C61i^q?}6aAMkA{Am*G7Qp{R>;b6EMN^x5{4axs;%&~#4yOw1`8*{iCuQcJ>?JJlce1KqayRsX+X5a4s*0T%6A-BxykG(AuhS zSBL-(!N1EB585}ITcLCo0`#SoPx_e`V0f-g7sP+ne~0Od4n3i8ZZ6*_t6VQ*QI4n4 znoRfwXH2Pm4dtZ%54jveICM_fOx>5iW_FoGKAb1x)*Jg|TZ?^B5UoB>_9pSLH!{i5 zs~iza;E|F56xwimwD8GnX243b%~9nm!;lY(kZctmZH(5;xQM?jL2<-wSU{561VnKe zb_G_8JU#{b=sAzu6kUTeEG$t~;P$xCv==Ev!3Z_?GT5$~ij8g`-`U@YAJ-bEj!{EH`NbZD~eek5;QBnS4 zKFlZQ8~J=w04He6F$J<%LiRAy2A2e!+eSp{a^Ye#<(l0%cd|ES^_hslBtXw~cVPaA zGE{325}GcQgrYs~uK1SWlF}FfsJHG79EJ^S88LWy_#<$iO~;!2rv_K>JKB9H$>V|D zqLmP1Ri#}WRn9*5EL?UifSg?z4C8^r;xZgkGyVb-PW7KWy_=ELA`aO)=TK*>0cUI{ zpT>Gf?{$swR?y~sTl3nsYu%!=1|Nhqk3mqzmF31q1(!*cTB4*!je|`Nh{Hr+h@35- zl5-obBd!2&c~%qwo(CbaKR@Y|yjUI)*&nl29;JEDtRfM+#e2&1xt_oL8xIq=TReXfKKs;qNro|RL|Pn6ueg;4 z@@X)rNJf5+`rT+oM!n9jJ~{X}UO0Sr--UYMH8*AwPVB|YzCaz8xk^reHeW22p$%m% zf3T#?pgIBh3-9`4->@zuQ^jGtjq5;WF_%ZBCV1j9b^A zSN*Ew4kDl^cY$8FOx4s_KoY-kT3O-V+^hzmTWrDS%(-)6LAuHuHk2n0ciwBEZXH&D zfADt=I9Q~-#e`UJ=dH(<74_mk?Y+AK3tNDpI#FzWil?AM-j2X7TJLni{NODah zTvqN0y+sg+KRkY78Tl4;3AbC&2kyBM0JxlI?7J28YA2FarEY2Pa{^{4^Jc&{2;Sd| z(y(jgCN71$y6NPi__T7-Ndd8-hP>oKAbO+avMVq`t68@#>SH{uh2Wva!7)PBOC;J< zb6CUMOk*5ktRkjiMdh}>NwW>`r4ZsAA&p>6Km$ZZH$H^6JBNJU#!Uk2NVP{1#s z>R_3D_UyUcX%bUFrsEF~^wUGK7ax9aZSG3B9X_^`r1%<#oFrV=Y1&(5*OQNeThBa) zzzDM}4aC6TzE8h6o^Y8Oz?tKj-^@R2*Z@pcER3A*cPb0x7l%aCjQ-j{Owm<&DyN>B z8&VBut1Y!M(R0ElV4HRlkka2GA(o5Mf3t9r+|Y(jnmsmIukwYdqV@Txn93Ks&v{G* z&xJ}02Pu8E$1+KbjU}Go_%6KZc;=T|t=5nbg>vqNc^7Is3x{}+!nq=j48*5i@J?Rj zeDIy3Q5Q#I`X^-jL~pP(9RT_3NCojGYnHUbn+3ObH|R$2M4Eo;uo&a!Aw^Trxs0_H zmS8z{A#~l%_Mu_LlAKZ5ZewF1IQXZ%j#QQ`Sb7`xAtb~Hi%S&&>-^@ zOuE!f+P#y1m^bRJ`T}7)I%4$S{3n_7 zJH~BV`B5!#V6hbQ#Qj{{kj7*64n4aF9wG-Fq#UtO~nwu1{d4A?GPS;;8P*!VU z#FJQy^%c9{eT6CQlc?C^G`z57$UjoEzz60&3~_;61zPvW@a(iHmUYOB&4zC7}g zb@l2A;{!|bs)HB!4j-PS{w9lVgpqa+0dNP6)r1b>OqwocNG)(4zFGkL!7yp;0m4`N=#(o}vLo0Z$t6`xyl zt3mB`YqcfTTEjW&;d<^)fgK@`MH$eZ!pqXYnOTp1ZwfyFRS= z+V+m~S;*}S20tA0n~#~wOudY6H-azt2woE(q0(l28}EC(im9dPLJAT9lihPX|RpMb>AZb3`5Rv~KR)9Hhw!^>KkkauY|F)}yb z@qU(%2*24Ye-i_90~y7>scI7Q^0e=`Jkp<69H=X$Ni^Z=*g5XAKyx%+k0Z^F-qdi{ z&u{M(utIb^g!+6kusz_4vks$g(MM?ZD0hMaq|H)0s{JKi(8~H*Ok)%hV8r6tZ7c2Sm zqqvcwo5G1TlBY0#=xf_}fL`k=P&JKh$(1In%iMvT27+eC?%*DUJVJ(m zM#_^An3{PduR z#)X>UrMEtFnSIGB%l4WYJa}8e<3JXz z+AX{4k__r@ctz~i$A)v;!&#K4#v(Vc=5Z~m@wcPjlc$*Ll8acYd zahRL4|42Gt<+BF*o-V^~JZK?)6hu}8{m)59g)-NNa+ z+XG$iT86($06vf``{x4I{(m@XUSVB8rf_U$tWrztNbOfo{NrqmQa0dL{&RkdGKI4L+tDg6Owt{ zLr}=@9U{Ucvpp-I0dg9-W6Gjki!U7-`*OQ=UbIh+!w&7=w7y7!AsRPiN>IVWZLF zot>*1fQMSn@<*+2N=?>0Te-#p%+8gMh&UX{zu8EeTHF*tC2NN`U?@5E9XB zjbKO-d0pl4dQe*@DkB5PkYj_65`@K$^TppHT}7+$0Rf=h(mT6BiVltyzMuVCve=jW zCA|@Pov)0kHiys}+t+@t_k0~>Eb5_jk8xNw_pcb zK=^gwVH5C0ZijrxvRa8R_@9icVpM>5oi3|)YqEXP6B z+Nj3F4r#~SC@O7(-B{fXa74D993KXok@2j7o}yak_J%$bxIUcHu6}1Qq|b~tkk&)T zwj>JiW@(~zE-A*aFc94Stm|wyH$n4o|;BZ}g&2!&;3;+ysHW=2S7{gw zSapiCuMQ?kVYc37Q!?$f0X)JogU;zp?s*2ON4>{zt`-oNjc#GFYYBRn8Z7pFuyl68 zBuLx#sdhgr0!xhB$S~+_eMjotXC~Eo1F-GtKT1>6^QQ zRxSPu<~c&1U%1N8!SwZhO})}X-V>IZ3j%6viM$y6O$5fQJ+FH9NVC~hRS2I3L4S&s_Tx2pf2U#!E`q^1rTW7m@(VH z;m)Y5TeJm%hKdGk7Ps(87I#&SjOie?3f&ZV8%)y8%2*9ua@a!?tiA^8a*S zmY~Z+3WYibmoEJqwzT0gx>(bEa6MFHpPLbsR2VUZ!6AQYY3g}3}1zeg@(ogHA{Dkf7Oa9pLR zoYb~PpM9>J|8@#c(~)l+9DIToAKHcKFtd&+s59yNcyXX*VH+(HE8?J&9a1VCMHJLL z3!VIp2-JmaL1{3yRX@3bq!xc!s&fuQcofot(bw*>QO-FknUE)siV#PWTVD_ zG(u^d4mNn7{xIUg+z49`w@Fl zN>k#0t#76mz6>v1DXv#ejdShAzzM)h9RaXimi&&^!5v z>BKPMh~#&M$n>#$_om?McTjW4C%=3h=P>eQ-TnyXgW$2veEqCH$*Bepy@s##(Pj0w zK};1z=0mx)>IGsShEU-kuo6Tgd+&(>nSd114w|?8h%T%TG3NC}2|ygoi*D-jIc*v+ z?nw!O=U-4}nNdkDSM{@=SpT{Ca{8<252kS6{TONOYyVBZ8;;%4b^B3PdG2n5AAA*I}>Wv8KW^c%kD<@9F7 zQ|n3#k}Ql*%bLg-(I~GI#fTsnidWCo8qe0gRzlbouy|&4b6XWu!uvlH+)tFg7yVE^hqXOeIG%x5?pvTZS^Vw?~;gn%vksMpMt~D9g`^eW3y5lxRZk~iI>}n zi0oZ0&oz|zH;46eh(p0pZ?w2D69zYTDM7H7HOGU(49`Yar{>q#Esz;?=>(2TG>rYi3p>LzmmV~Wj#GJ! zR`5(~e@*eGfyc}8w3h`Jd#V?Uj$n!b(^>2B-A42Eyk({f*@<+eV_-zzeUD@Y z*N-nZBku87E@kjZau>SHD95NJYn$2u-7)sN+x(;!znlRyDD*5NBqq7QkG0U4B#b4E z%YNOc0k=c~p@Bt3(KKz?q!;8Hb=L4BPug>_OfUa8J2h&Ab!Tr=%Y#SwDgp5YN&*u8 z4;(UQpn6KhP2PwFVMU^=LhWG4z&;MJ}KbN+b6 zvk0Ho9+^hK7o!wa?ebWpgR(ww08HZM(i>AEtj7KpUYBD;94+tdcREs=OuI6@o3{Ea zcIqh|^MR{@^JmV`q(I?#sm*MZ;mo~mQ@`U!N)3^D2cBC9BDZeA)_Bjk=fobTII?Lc zb&EpD5h*X)$0I9pGsmk6O%{HY93MK+(s=&bIXXOz2;gG*<#6d^&O_!22goVj8?mSE zFkMn*x&6&B?&;f{zRQ6%&w3OjQ)g4Ju9@C@a?wlSbb(weZn0K>)|zkD>@jhSZB{8ZhregKh0u=tX>b&Z&C`O0FBNZ-)BY%@tgptAK+}6}Y@> zgHbX?9NbCugHdiz&IW5Iv}}{6*gXWIw?FM=E7ew^R3IT-`8}RzxSZ$5f}Z6&x0#u zq_SzJ>(erZ%}(Gez~3R69ETe%%Wl5>6tyKl3$@hb*2BO-=AXv^{o>o=ySo%k^-St6 zx^}3&B!(jPqn$iUYGsv7Q6{#Df{QlaFIL~vg~wMAfoMf6-ycEYnYrt=3a}Ir4vox_ z7X=2`BvOrkP;DuUp~^NBHRC@*u6}>zHq3yy^+W?g?pn_uA#2_FjegotT(2fJO-SMX z4_d+`JS@(!TaiXYcr@ZzNNW$CEAL)mJ!95Ne}y2p?5U2W?MsZkcb)F+$#|d~%Ss5e zxC%v<;}4z?3&O|?dj*%B0O9vUYH0_%E9sy|cq*f(f(^-D6lg^v4~pVD=UqI3`frh> z-%V@rDPJW!kfhUr;U-E#+DpSyVLJ63ijMavtQvUCM*_*2pA>G@!`P?nz8u`-E*qXh zGW+NMkLX#^!&JbUhsO-DCktw!j26;u}^H8OIC8 zvDP4}Yl=VF{WK85IMd3AF5h>DxSJajwN$oV7p46YKm+nV;}YUyO>!@=J*IU;zg+TM z_ALh>n#Pvnqh8g1LinLC`Z~Wn`^PSkQcg|>_d1CcWKTnXQM+&EOsRNR5}5-wPj zKhWx8WWvZ8%WfzBf2;cSKw{{Yo=>W)sDFP(&(0FYJsm(Jfx(>&cjD8YM5!MW2zeIA zN89L+x&Q#SjDMXT%w!n=zSgtr7NslN0ufVJ}W1}&M4jJhxinfR*DWkX3=6-zx@UFz(T{Qa!! zOB%wgvNAnZ@vWV%E|Hb(Ofjz!@q-D&{MX746NZ&ao!9?*6w&|Oz*BjQJTMnS8@Av{ z&=K4=ZmT&77Zouau%?nQ%y3GxrcQw?Pyk`V6>x|3L65q&ZE!#IFrEOCIdR0IS@m= zfiMDWwe!(@rg-ge&y=^p|KFj)SYMG3XAUmTTmeon_b4DE&;__tefw)Ib3US$FutHC z^EEnrbANU&^srXqI(jh7{xpsoRpFEVrph%}noQ3N%!;hQ+ z#0ut>p9~k6?Q&DEmO5_4M{N(e0ne~2@jRD^g(f-bhj{w_jSm%gxYkWURLL9X_a9vb zQ11+(#n-gd~+iYwnb|gqpvB1r?Ck+tVW5GG2S9d{r{}+W6>qlTR_}yL-H~q(N)N|F&``Kt+dcYa(TQ!J1|Wn z*YbhT=Yx{OAG)F3Sx6oDK+%9DVlYg?a+l^kudz?X@!sgW%2R#s#5CY~9t<3wR>nWG zzTuHHE<&yvFY5fLJ6`OIHOLfFLFvN>S%IN#h>#j@_>N3;Gcj(*2T#I+uCBV$B)tKV zjx(;ZqBaMpL0GK|JuX~0%R(*K?nMSv>V>a2>Eg=8tp#v}F};lDi-{!ev)N5<_}DDwN&g5W%(B1K>RyvOL_bau(js43_PVu`7Nmi>Otk0G4KpCmnKl;E4)9> zuM-$IT#_fYgW+u>XCHj9j{A2<{Qp^TCn4ZC){UE1x1K4iq9RrPVt`FE_2X(k}VYw{o$tXOl}Cs_nb@1bgSJS!Y;WYU^&*)oZz~r9T$)WWb?sG#LJfu zjLZjo+w$i@i5{9DXmHAl`N}!#GJ7CJBBYmXtY) zCX~4;gmxv&66bK*PZg$CsmTBbp0?3gSzk2cde^EJT%h-_t8pkHl}P(oJS5hwzg06+Vei>1K(eN@`ekXhG#um8ohR-hH+E~VpX~HDl#0Ucrd4f@Jfr(Sa6Np$Z$6Z`e>3Q8GubS~EOe?r(LL~k!s*`EBe=I|X5dVV0IeoD z2YDX=H7U@fuFo^f2)P}V-c2@k@QDOr$U7K$8Dp9XvQoN^j1 zc$!n|tU&1$Qsd2y*KD|#8AuuBLLwm6GD?Z-KT;(B+;IQ0+rWw`6ejt~5aV(ne5Pe3 zTy5lIUXKV-z|Hw%=-NS6+6w3Sk1BA+wt~v!#{t~eg_MmcEzV%CH33N~UDA0nQJWdB z(O|6Oz~{^$8=7&jmij`Uat5Uq=vZSo2W6Nw{5zMD?_F^^Ryx@1@~&Ph(mD>)obp=^ z*YK7~inwz!yl9UXv6=<+gnFPy|#a&GStumNJ$MKU4nu% zLrH^lNSB1-7^D&dNOuTINrNCQAV>@#AgCZ9ARQtl-F@!S=h=I|@7cfe`u=nNVU26K zVCFk_T-T?r-{alBx&zNg6?O>3`gSgV zjF)h8_5yPy9_bWY;Rawvo^_TIGzwmpQ&Z`DP4f7vS(QT+(7bh+UzWn*RY?k}mgNA) z7Ax@gil(BUsZil4U!=rHg>J#Wua?h(`I8dj558Z~R0+f*`3}NKAKiQS^iK~Q75Fl6 zHNO8!qlL?v>j1Jc0lpDGZTI41`HVc{n_?co*Kx3XqAuCOwIAX%l&YkVwT?4nO%P$FUnf2NQ@u0MUp~y2^)vLLIh! zcTcF{yZ3B>1){EA;hm1R)ttI&+d+&OVkN*-X!Tz5s~#xBare9bj85?rF6fQSTzzVDaic7cq z>$Xh{02SL*N>y3`v=!7PWA(Igp0p-aCcpoOHR8QaYfM z9)!?%V9D|pCbK96Td{*^TV60@C;llB4TiJ{WYm5xoc9C}Qt`Y-0zSmU3GC|B9^$!7 zU@Bbve?CMDNxsuS{#sA_n>i+ppb)EhP?Is0C_IpqlYr;q3ZwL8RF(q4ySX_N|ECKl zn+$VP`7HgusdT|jl?RGH)0bABJIi0yz4v}PiQ12pTlY8lfN%;RPvitW(ZhR^=5LWR z0n`O8@3Es9Igq4jM4QuSZ2ovOg${u1O=|8uw;=lz10MU%>X;Fbd(Jcsf~~ZCQN=+S ztefdXNr84Dat#=JJ!3<@_)(A;Fc)%&QYe|$~A?gDOq zc3G_;fU;osQ>b@5E-zBzL+bT1#83YVdUem=?eweT#ZV_8;5OOdbb)w8_q79<={xXy z))VTIn2cQoX%>%-EPFus%ueY-rk@2kCOI(4(~}@bpo&4#T?1oORh#Cbxay#xH_-_G zcWuZqq3)br&ofQt7(TQbxY+E#vR`v)mo{azZB+o^<-_k?EGlh57i4p-)MDYN$(pkR zpm3}%{tRyMoi$n^n=bHA0Zf2A=HseGto-S%2$TNxag~36T%@-y__$n{A+ctS-d|t# zaod!X(u*PIyQw~;2;~5YL+vLuASSavIoNd2)5?(I z22pOqW8~b%ULcvx4y31=?##)&HAQi8d901EnX7o%_}JkwBKrAXeOB{T>IGHy9`RkDrRqucxB^`>Di+ zuLUH5Eu#vk|J_w^*B`8JLHQIG2sx~yHNFXnu zc*Dp)w}|6pCxH%kk+m>n`w56eg3J(SJATRD0!Z_c+exSjnsD?Un4t_rj5lXHV`4OY zw2=vzN_Db>tZtwsuj7)W`_I^6i^rcDc`cisY=Kf(4a}9CI4-A%x5B+s>F|k1r7Fl1 zRCv3Gpb*R(u2=^_)v+KcLK$q|r<#m_|BnT?{|n7@R>GoejhE*$`g4pbGj>!0B6mh^ z=4)rJnMeMgg`j^ubsbD-TdCIX0xa_BqJXi`{?a`Y5a$XE7ty&YajiA9?eM3BRPn%k z;mx2-NEfOCgo)QzU)Ydl-vb-4xhc1c91q=vV;&&wZdl ze#GdZ;EcP3g7cvY7lGzsNC7mzl+T6j!#iapN<62h|0K2O7g(i81gN#T`Y%P(?M^;d zUu4*U89l_v&x?S8Zx1DJgIAc#FKz%0{?d*}4X&A_?fdXNbJ0nkee)O_jE@A9>qlfO zMLKc^3@p|M11cGQcC`U$&dGJ-9JELQjMpV&TNgp|GwjOd!2AqsG8Ftf&=ty*5f%A^ z3=TEZaj_MF#>om>W&Sht4R`=mVygJV{Fl@!NTjd7JKEv#j2y zUi=?&(*LP``u%!(lM{gsFD6ST)Jhw=uCSy^QPSde8N^h@3z$n3Sv1xIz0xgUyT5%C zr#em6uSx6i!;NXV)ifI(G0+*MI*n=jQN&ct+^cvpPVJoPc1Vzt(1_V$^@{Iov z!*u>yX^5VcIz!E*Idh9kC;;GWT$K zkURZ|1H?8^fI+?fD>?8{#EaCcw?8}XCxUql!LOPUTF=_qkfIph3^g{|3)dzCF$Zu} z*MesNr##jexajzp%L6np;*S<%*D{IegjHtS!xZ$=?fLLLDF3U`zFM7O2l}6f1CF$;toN0FeeeIr7jc>xyo2mF-hl`C&!YhV8#(3_ zY)}KIM=pRE*6x=6^N;@@UL*9uWVG<&>uJCCSRh!d3IXc>7vC2gwFvBgR7SlLzhD?V z5C{8zabNy=%wLi)r+bFm@Aev)N{I>nFWOJghi3EsdcG^apRd)g=lkd14tPL{Mwm0y zd+7HW+QMZlbh+-&Q995+gqHqVC?&raioXtKp{V}xG6PQmW)74tv&$yG3x^9iR_|k{ z|G0|AwBqo3Ac%kewd(hZ#{!d^SkUZ$yk9U|<`Ed{9$tL=_hE|-@+$s+SO~CP=y<)4 zk$~k}0lzb^TW3edO~4$ z^!E;Hu~ovnQ;*;8^ajZvy;Bh=?d>FRN2$#Q33gpoD{TE|&x*VOej$zQuVtZmUH4Dc zRh;(p&}u{Fd5ew@%LfY$#R|3W9t357-LCK#zgqRU-wS0^@lO_tq72qtX}~qare!G2 zAx%5)-|rCR2x4cbjFf-x{tn7N`oxDg?(5QgeiO&H7E37(eb9gZzEF!_`>5deJ~IBJ zeH5o{)f4T*dVb1Tq|1?1zA*WV}KyFWSkP+g!ZQ~SP;^^Y~>{p;{4`@N=Sl>Tr{ z0nV_Q>>pp&mjuj3;QOD8K=4m40{9kqm|zXH{9_Ge{8~fBzt@oGAFd&cF?t=_0_(p= zCSvC@<~X$ceH^w${4d8L3P`GqhFp(Kd?np)Jh1QOQ?l$);`)2h{q{bV}4XpP?jsC)0|M`M7 z1N?UK-jv&a?MP4IU#rXGKdbBSi-a|!1xOU;LFv8)5~x*xkAxkt8RUNboIAvJjP=oe z8`D{Q2d=Nv#QnI9-%EX~QhBS38@Rea`rj4zzi=<1n3Aj=XCdIr5oQsz&-g!b$0RwfOo7qyH8wfy5vCr&gycfwbH(LQP>O`_}mOL&7 zV10ai-xPhk2QmpGpNuxZ)I4PN!?Y}Ocf4=y&|JhHBE7|{kCgqrwuhh1rSU7N$ulH;J z;Gh7<`2iUA*qD8G;sDLjMy&?7zr!0`{t%$|y01+|06SNMp;Y2QXg|yuN0#ARKwOuM zACeWAz5z9oYlRNLfIQ!t{hHfjm@TKw0Bi-O#_<8%A^|Asx&TGs)&NwkNm5>0D$Ty9 zxgcCW8Zp`IX9*~PymSK%1=z4$IRvGkPh#2Y4J`ZBeRw~WW0)B81kQEJqe}y*{78B8 z6Vxwu^Ow!{x{CfgB^e3_XM-ZlSoE)gd7~XuR*T^LE*9D1z@}hz^qT7+0kOMblUyLi z%&sdIUFGkx0*1U)`W(AA?g9<03TTn<^hj$-0??ipxz+qIA(w1uAEIpvqrsc*tYy1d{JD zlq&F7$xd)E_-^<%0!Z(e0W5Nw$lh{1MMlsle)$R}tbneKYyI8KiizLBM%HDOSVY4& z408Ymc`_REk?XF5C!z(c$Y)+~$;4u{xB&6px^v~XL_Q66uhWiCEAO7ShwYYBJDZN} z(R%!ASsBC(N{ZGP_FJrf?~z|6>9yYvDqSAzaq{Y*o7h^=P{1>*17~jlsplzX3Ct&G z%Y1nZ&Jz#S45_z9VV(B?Gw~cl#DB@y`e#|I2bDC=wXjQOpD~Hh)gVj; z{I?nyyN4ss9klPY`}u+&Vh}7Xsa4XzMA zCD8xlV@!bCBVfKR5x)1q`7Ltu)w?wAbp2vuKE*F;H6NGU#OZg&mHw`@{yrLD(cnlX zP)@lM{x>kphdHIuzagC+0&smP_HC(Bm%vv)0IJUppiTYnydG*_DSeIT9x)^iNe4|A zc6(=a7)S$D2LgI_`|d=5-q3}Svjrb(welEaD8*cWj{t(Xd>_Fp48OqB@())R7-=Pkgv5OE~leY#h!cuZTqB z`F?}zYkYJAUGz!niwhS3SmPjmeoVE$x#Og(^2s?*DnoW(?%#W@Ak5d_O2tY4kLg!jG|VV-Pr3T2azpctLs^&+57Le+-2o+J5B!%a-c`ikfNw* zT1r{p6RY>w+;ipxnIH$5PBP6jEa z#;I;)U|7xA)@tpP8b*@=Y$$7|Stcl#9xxUp59IDw0xZ$LSYnAjvKHh>n4U9l5SZh( zZIy&PBIVqtcyarBNr^(+%6*!W1NGxM_3AC%BYoW*wGUUK3Sv9(V*b^yi-RdDkR0Rx zsMKfVFtzXd*dR(4w7cn4HFG+^mcBC7J!l6dj-%%^hw&KYNT0;^<}(uxu(xXDW}rC=as-9_$;jJZ4uC7{+a9 zWmvoB-c%_aSi`Ddif!S(c^y&?eg)AY!UiroZ${acIuMxQO`nF0f%OIjzpb^h12>1T z`_Eqv_vvwfDJKC@O*c4YCwx|#F1&$cOsDag2&Y3jyg!z-p&Ul5oxCYQXt~n*Hrs2$ zGfZ%-`%+iBWG%qGEb2Yh*NmmGXWD=q^wAig?qe~NKR{4}0r2^Ww6+K!ZzW>C4Ap7aB@)5ot~rzZwbf-Z6w zx=pZInV}5G6F~J0DXh6&eICOI=J=F#RyiIq$TJ|X?=P4BG)0X#g3BC>K=j>;4LvKC zSxDK11)wVWMyL&s_XN)Q>WvSjGB})=fg99TUd-npdVN=V#tSWtD7T?W;xXvgV)Z0# zKA0tHU>&W%k&lj5ya;zZ{tkprfR96f|kb|7W_+b}wy-5EfaICkReQTKq%k}k}Y zQdm#RCU!zK(hfC1)DDFyz?D9LEnmhcP_ht=jmf2XlY@(c4ZCtcD3+@K+SGtZp{3pR z4z4a<05cMz40Qt9oSbx63yD0fSfklH&o4f7`OF_l8^SEA=Yzx*31QK+71&42TFg+f zF+kW3r&$O_S+x{dy}w52L7SV1TEyDN+9wiogikkZ0=;`mHkP1H0ic<@rLyme`2)e4 zInkXPk+dv86*zqN+XQomGa!O+mP{-juZpZ95`jujx&yxE552&O70L~VY-lY2pST!S zhbl0D;LIjsDuXFzhA3jox@_8zGz<+4e}Lj5B<>>%1qK4AeU`?VIs9?2jkFeW+}<#c z1Bw#R+UUw2t8ii`vx)J05hcDG+nQ|hJ*}r6Lil5KUbch~t$UB)*^05UY&=Pq^3rWM z1`^vF>)XDTEx34VVo+#!UuT_%+4UefPdsu=kKj;v-AoqVjtOd>3W}Jpk%#%6KVEocfhtcY8Pm5nRZ!vv z<&OcTW+5DO7*PFbOBRNP!+o8AK6$tNRwyXbGjUMiBJj$9 z5!}{`IPE5|CeN$>DY)4@d~vTn7`32qi)`WDtq&bES?rfb2n>PTDmRlqcLTj^C3%^X zW+20j08OuZNp2)~Vc;|F2h+)6iSdBGVOmO+c{>`Q?))kr4Z1yoAX}J^3HB_>4qcF^ z3YNV+A&Mx7n|4g4*N$=9v27fGNa)9@cr(sc$uZ>}#1&jLF=UnmdauN~UAK-jx2tvc zMN3M{8@bSh2&ze5deJ+5wnXbL>lal~=*<8j-x=eB15 z+WaU=fl`+>L#P*k9vhypk;5KWIw9xC?;kf`JzCpvLeMnYkCB=*`{sbEN}b1O=cL~b zK~fF_fO9nm24Q*pIw8t>=|0d*i02eroZ)U<+a6Z;bkrYE_!muGBI0O`)mH{BDz!azsc`=Dxi?goRFl7+T z-_8M>qFtch5jkRTn;x+nz6Xd(_lC-ms5ykO2rxCV*QEf-$xG1=-Vo!H7Gdhmty>HMjLoTYO__&5!}35UzjYcjRr zQh{>$!%E%L-Iu+HoY~#27xLQE`%{7+7Axky_ui_V=Qe2kk@_|7&Qg5VJuU`kP_$V# zrR}~sdEKNK-W4m8UVV>d$NopTsGs_$0tSgk##-qzY{_M229}{(`RNJ|?DjK1PYfTr zPBpQ~mYQ%XC(i9f(O;jhd9mN}M05FCQLav7J&$C|)uvZ6)W?!;D@8JbGQ9jUCkVBR zycB&R9&1DD+wY}1gM`>1uPDC~F^YMr4Bb0X{WNBlM86#ILA$v3i{54JtfrS{@`T@s zWCURr`&GyAtLTzFCvh+dS2r zBZmZa3Qs#s%bQh`+D@b?2qep&smyVC-Pd40GU9&Vd}dS869!3NRksA$KnUUo<2jz$ z`@+M)WgoP2dsTrpi(2%1PqMZom)*ydFQs8u))ii40GP>%lSX59R0(chG7hg65)hqS zMcd=C(&MD0ggM^{x%P$rQV5mb$JfIbvZ4(1DJ%f+v(VYtY62mp1RzT?DB5j}5Z3K* z7;}q#tIYk#w%CP+;Ht43laL1lR=|Crd(7$l3 z;vtYiyea*;pse!lWWrBnT}ZZoWM9FWs#w3wOjmq(I_JVa(fz-WK7<{)XJSCo8Fol_ znKSF4VkyJ7JiQQihpXNEoSNySX_TUG%Zia%kgfYY)PQW2I>Ui~UmWl&Z^>iQDUug= zy1Cw{^0p!>RkSCUdms_Pvsg8KAlRN9Hg_X<$#lbOOG3oG`MIR_+W1@glBg4}ZIuBI zlCKMSdzKr-hH|S1tBXQ(S3FbHbeJy}ZVC((%9$74EREb>>vVWLb%rgB>_V!hU9N~^ z61^!teB8@rVm!och|y%qwTH6El%MX-UV^>sSu?%-rk$SR#OQSSb?n!BmuPc2%h zupwk8@&WX6z{Z{joZ;T`4B7Yvs7Jgh-#dfe35GmFbn}^1Di&9~&aI9}SLY6$>0b5DV@2j6) zNcuc04N%qLfZE8_&xg%mFsK< z<&a7;Caw_gdFVu=k*qQ#v}oY=hvhdbk8U?^sflu%E{do=`>7=EKAX#D_*u@AG;S|n z@7NtHjBF$CYfYxxv*P8dpM1}+en`EipOE{ivBBd!9z>(`Oi22vqKCIs$Hne5k74y| z6D&{j>6WcE=(1ld@)5mrq*}9jU@~n=`NCU=d=UIO`{kYI0fKYQ237UnJ$I?X@_vYy z6$^S;G(Ir!F%fxc_bFanUXXF!>|-%2HUFVjlVydPJe#qcw3Kp6LCZTrhTt1dWVBe- z%PVacdWE;PH&0Gc(O&W(z1FBx@;4>S49GD-ppKsg@gi}c1zk~f7`W~bRxO}HAGjlz zk}HBz)zg4_06krV$;UH9GEld{Lhax(bwEaH)jA~!fEfuO;ED(4kCo*n9H^KII{e1v z4sKk^?PNhq(own=Z9kjh*jMzovw(gOC>m{_P}kP0M>0v97K{r$#lCM$AczLGG=uLz z2G=K`f>hW7L+OA#pZg)aNr?37WanzlCc{N;UB!YNlpZwW?e`C>U*Cw8rCZ_33+GBz z-gwDMn5(?ppBDkLP^WCoroWW{F*LO!Xl^D(t1UX+Ufu%|>e(iaaoG-65YoXcYu&pso3((*d2_cK%0 zhm}a(WX(H_818yIh3!`hCxL-unJw(!WS_9NI81M>a6KB72n;(SQ@P$g{1F?yI(9?A z|H9|!FhA&p@QXaaO@-$p%7_r5_KJkUsXm-MjbJk`huF~!CS%L4_8CwV_L_4>lBBk& z)n@q2*HIMc8+^XF-nd;8(zA(fQ_d}WkR;b41eGs|ovl_&2z(^2a(LB!5b1SxcuIQNFK{8}lY&>MZg+b;)X{ZMLnw&SRPGAJwxi@>4 z%OP?8g8MXth5=mkAxQugtAC2)e$u#~rM5ssnf@j03Cp)nk1D zw3JuC7Nh(HYbr}AkeUPHm2me7X%YY^%bnyUoH^`5Rf0%E2@PS?bp2d6U;1lFV`pir zV0AK!VWZP=SgCSLP*+%_SYhN(ccozYGbbuEPj|FBtnpkC$RRl%ZkLv)1p86`{v^Gx z4GL|kSPmZ;6YQ9Dia#L`ou;r&IaDVJ1`wN51oAcVnOt~oEM#HJcNnL2lv~hpKV~KB z4w|r?hp71$XF;zyF+t|B*n1w=i)JkUZ!`E5HB--_g<^ou=TXsNg?QF zw=Wnh*#sUn!>G;;($yd=FG+l_I$NxUKKj$xLSc}ZX~um`f*3@mCc zLX`0>Aqk*u+yi3xl?*Y9`bR}t);7^5$uSXD>;|v&O}VTa-K!?wxEQTILfwTDmGs&G z*|D1?hOUHQ1(hLQJ-75dR05EBuDRlqr0vO=uqUBO@CO00EZ3}qbYS-Iy9s6G;W5+5 z0GDf+8_gjNXG;TIKJEN<7UK(rIypPrs1;WLx<)cAS#FZjOXQA@mRkp6II8&;b3qV% zE?m3nNf#R0I;A?j6Htx7_^f6bLnS`ewb2Hp65`xxYQGIoRQc_aISC&E(OBOuvHFp` zl}(QrF+(7&HUY!pfr(3gUrA+tH9DkSLdp|!3~1Yd@D0NXdh&l2C4y@vl%iz3?!F!= zB?5p;U3IdZx^4{UUw2wW)JuQU(bw5a4VGvZag9R9Th=@{;Zd zHs9fWhG2e~-h5WiDP$wVZ@_D$Pptb^aPy^FPo-0jq1OSt3s=TB;Gmm5_u277H4nLA zLK)ui!)?LJ`vZn=pX`R8y<@Pf*UY%~d^T2RwSi|rf68NR}#Q>t=6o z{34$v{WOwRHiOIrXQodOsIaWmV0oynYZKx|msb*;wup|A~Nl$s~Rj z!Cd*QV)=vBWf7Bx`w8GG(Jw8Wq6Y~>abgv7D&gNhBqsQ=aGSgUD6deEzQWf=se!EXNpy1+?2NZc*>j6nvPdRUGyHB^;B|L>YmZp2A|zg zlC926zpkET^#_LD>%nDB8EnY-a5}d9@1IiDmz$2GeVbknTbWOlO>*C=aRrx# z->~JQ8_rkXtyuVqN9dOtYZY`=J|kO|z~vRNai|okzM$zb|e+-RHwHmYU~ z76hG;P0-RM{_wi|2V|h|F^U!Kl7xq<07mOffh?L4b_wT_gD%xw1u<(HRHD!>{E2GH zm8>Q*F*XFkmIj1)B&HBy(xMe5 z6gi<#ziCrzNYQQJuPL=WIS6cB6DLvx2}J1jQkhc5(_lX-%!pfzg#6(VEV%=h4Yz*lypNhai6f@c0o?3mj+Lon2^m^CyNJpyo4v`JNgR#&z|o zeVTv*h0ZjcqUjYyFhBvLjzMdE0|QwxjN@dp%dkK#(dV+}Gag`*>pT9^0b<+eO9U8P zVjmD&FO=J4Nc*j0yX2hV&j8bvB zI0_AO^W50-I1gqo9b1#89z7FinkH}FXsP`%hpp08dPhAoB8&&MT13ChP3t- z?s-Nk-o&IxI^nJ$RlSpZoL0MmYQft}s%8T>h(!)@$Rh|eE{Hnc)Ydjm^%2j)?Z!Zo4mg@ z==5CpZm|2C7NI9VS|M}$-NVjw?_(-A5@t~+F4Y9Q^AhJWiX7rgTj#wyU+~&l*WaB! z15>BfwZGeDinNZAVE>eLJgfNebbI>h8`0Odx<&{RzFxLO(oB|^2&->9Xa?-|*6-5W zZw(4gB%3OS>AW&3o}FYmJoNllv3+}A)bIugb1p6^ljPS9;6<>X?jv8`u24AY+g{#o zx~xi3OfabaK%2m(ynv6P+@;B%tRA^Zms!8Qx$!{n%b$UwLn82g` z9d0$@`FF{B1-*moC5QEQO0 z{P_J5!JF{R>f^dR@jDIgUj6Wod$H)hV(!F`Gh++NjIW+2=tg>5JRAA;%5|f~sihxQ z-}>2;5L^CcIsxSC{#VHuyq_o;cM|$`RU{Zkp7+15*B!4dP&CJ+f|Ul_4FaS7?#ET7c9D|k1E}~dmoc5g%|&(& zQ=ysg7DXN$h&xJO;s@=xjs~NP$<@Pf%NrOA5>P{#kgd3dN&>%B;joZX6oD-z00y4{ zzu3wk6Yy#q3VxpUL)sguO@K_jE-+1*9R!-cF1j>ZnRsLmy&hI!+qtZ2(?EzytQZ0_ zMfdH1>cG4I99TaeYR9Gu1F=$Ks~V%98n+txfj!A>kW#L>6CC_jHiQxY5zxDR_n_-% zEi$;OuHF~FZHbF>S=YT@t;{;6bVa-+ago=)1`AlyOMPImA#;4BFF{!)y5 zIm9CYh&5c~`hm*ml!x#B-6kAfT1%C?L7R9&U=AVt;#M?hV4c3eeBMi&|3$s~GHt|E0eJfZz{fwb#IR@Z=0XB^ zqdkIt)u9dNv6Hrw!}LsJ7mI`Nm1^YvML47rf$$_UY)aY7D>1LtnYzp|@X*dr$mf0B9iShk4laUKHb`-VbdWa zy%KnY>l4;9N;=&N>`Vw%WS)xdhy7}cHACbWZv1Y>Xl(D z=uIEiYuvVZazdIxAN~YY)t>ftim>Th^hG|E_coA(hE6Sp;e z=VOGoi@zPoD3j7(_U!lxaTVY?uBFgynZXy}hNU6697yVD69tV-BbrK%E22f~_es@+ zrsTtt32Ar=$6AEyL^aZ76i6A*NK$p@qMVn%m3vU24hrp4wZ9&OvnwU#DDK@6RDV|T zJh8c3uX24D+jKnmzFwY3* z`??<52SSJ}ZdJJkmXgurR@$o--M*Z6k~hig$su}oGW+=oWVv(jgKZ$eefQOGYziTM zr4iq|lN4e!tDkzwm;0;t>Fz$6HaD*H6k=y`x$|P%`1A8xgt&Uz@Nx8syS`*e3&DyK z`$^>uU3RGV`NU|w&x&t}vi`XuF1p)#{^C%>6g=iusbOBc!%W3MC6*jhkQWMPQpjAw zH$T(9VGDU>>AGVI6SERwbADQJhIJHKZ>8haBK{y%Zl9a!n#Y=tyi3#cw|x(! zgQt29P8ptIWG$bLV{-ahADK?2#RO1kqY2GKB3*ECIDpLY3+0(-h4FI*e_h~ijj8}x zZ9gctPH!Gk2)3MBgRGr;($VIei=#+&T8T%6u9)B_On%1zzjxRwNU&IP%f&{wOX=Dp z>K^a-`f8pH$ocKBL6;e}}8Fkx)s>4N5 z3V{TP+g}&Bu&-0s%~??}iW(zpuY9JM?@6Yq^YZKj!lzf_KqjOa3+a-a&Jce1L*tZ; zERtE{EJqN{7{C&$wn6k7oWMs%uXlwEqeAflcq2(p=E$3IVx6I^R8NRlfb66aj29xq zXQ3#;jjq+>%SZ_;?; z8!H)}b(_XKy_}6OiM$RKofZ&w?1QI6puOb%yVHkqesYfz^-mS(d*8T?qe0fY@Y$kL zru;}eChElkzg5!Zi0fT3Fy1dUy(^bm0oAP4DF|6y_PCg41NUxixHuf~7-dB~PCmKL z|MMz@wFEdLA@t){JFd5*kXbJz++I<4SdT${ZZeTWT{zz%N&WWEL0a&|;+M*_f9pa2 z(#OM2p+N;!_PtG~!k&vNBukARlh2$Dc)gwsMfJ7=*5XxvEsjqGbSijl5WOAo;9$>f zVNLq$f^PCD%Uw> z=l!EoubYlIya#)GedIYiBF}gX%q|W2X#2S?`$Ua&+}6a#CmT-aa~jBfmN6mWf{yku z(V+EM|7zKgY*qRCew+8C()d`!Grk0`DS^SvF`I1I8ut2-Z_nIAsV5ncy&J5o)702| zP>mYj!FNRnm>r46R@BLpfHUHQyhyrS?cP$R;gHINe2sz6S1>HECjHgR>Fbd z5HB8|_e#CCZ*`dflS%Mj3e9mk&1i7UuWj7#Av>*?JZOlnS|+N51!jGU<(PQA+LDwt z)skgOD{{6?KBV9~yh%K3@-a(y@6po5@g$ty&Wr4(sIJ%6jr+Ky8cmQj>Ru_zfw^>> zZdLTIE0*+JJ{-x~ERlFGetMV0J&Ap(~drfWUUI^BhXmAZS? z^QY{?A0<0$vMLed<*OjD=0cWF!I}h>T9}PebwZq*1-ToH-fuK}qNMyxwfKk3K*o$1 zq?H(Y?S-f-m%Umtbd+woH}q#_*CXRi_aF$Vn2^uVo^7+;M&#-7RI?iGTh09512vxy zlBrKd^Xw)Z~q55hp9uqpezq?e^83Bm4~i#}Q%;i?rl z)|%@~2aQ{au2r_f8AoqX2X;UR;x_c_&bi2XSq8I&1L zfRv~OOrZf8F$9~}c#`3Y0?<%-WHU#9vEQJae}cjMy{ZnGBuZhUQZsKOkiYPzrPZ5Z zCE4J!1e8M5l~^~xMgzs$ks+=Or0CT*TSCh;=wZabZ$6mri{}&YVPCyFoO~$`R_7Y& z(U2%d&TlODqVOWmIn!Aj@n-CG=7-Efd9olIUhc9TgA%AwL)$lg&b}qqoIX@|RnT_> z&PrHU0)I0MB7r78%}x04<0-l4xO%bmM84%ePAVH3e86A_)Hr z4dm4Sq9by4GaBe6W}a>zHk<=WF3b3R!~R>%e<@(UgdjW#4L3;Ewig5)??anb@6sH& z@80gdi=)Np9zZ1)5}7V$;)h3K?JtPJ8Vy$Ht_zdPEVGHfl<*a*5iH_7D(`pWss2?J z@j2d)K@tq|-fru6Zhc%X*Uf*V%HB>}z}YfcoTDgi!1HZLNKzY!BGMan`%|SG+~Sj~ z9E3sw_;342EBie>0drOcC|%Knb?)Fq5nUPfG2$`eTQ7DD`U&R5PuERHU@?e$@6#6^ z&B*fO`E#{g0vR=EV{{5S_cTN^bwrD!m2ukt!RxN)R~ufBU=0HSok|pHP;L@6$eYut zHAwkpfHP)ahM{!K5UnPgw03*3>&s{yUH~+AqQX~4_!?*M$_+pOy_q8p5^St@Bfg(K zUd?QX4uAer`}GeY?|prn*JAfI6PVQN!tD$*b!=Isa$6Ys+~!Rm0dGlI)(<`k9*@%y zvU7tad5M+d=i+y>FX3y43`UMo;kc?IjRG9``N&N{Wx=>{$L>>_sy7!_>T~42%=PY_ zCFbuhWltmSGo6x#5WEv?ahXktlX*|@3r8FB? zxCW37aPrHZX{?>O4UcC(Ix2bn!m?*(a7W``wp%m-$rx>{sHP6kn=EWugm~g*Q`NPk_`ifKxdTDF03cR| z8vw$gSQCce@K$JJn8k0{NTz_!$I0W8!jNxUBEI3QSp!GZ1CUwZF$%W?0%{V|X5U5- zeqtCAu%Ubja;gsig^2}s8-kPgrws%gd&fIVZ0V9?=eESHIOCl_M27415O!cowsr+p z1xSha>&$+Dgvo1?4GoFx!t%S>mW2CR7CBuJ5lUT>Qmu_q%t446Ts9_YKy=CcrWD55 za^8ktbU$_vpZgU&nm|)A7qIA9I(Qk3P@atw+8LV3{d-M^`55CCzvEVuD>McEv7)Iq zmic0|v?Jw`#|Bd%6t9#gv|I*LAN{Dm^5ecVGCV+m95UAooLi$*_&&3Ov82%6mr2-1 zL|WpodZ66tt?`BjQ0WsB2|kqf+h^{2{IPoO(cxyPIsf@#F@A#LK$ZslQ^DhOXM|82fnVK2F^~- z)UxVZVRSQ>Q{#|jQq`xa{j;HHrlC_nMN_`3h4(AGVE4mPrpYzIUJ=R(5EeS%SywEl zm^=-1i6ji5GQ(%MTf>{|^X{C8cgEOJw~ZDBurE5Z&RAFcrMCh_g3lj^&>ef$;O(r= z_!u`lCZWBip4ryq!I{x0uyw6DEs$B_f z1}GtOi*DuhTX_-P+^D^9#^K*anU(?b6ht=?=@^FHoZ}2F8(#GtH#*t8P5^Ow6Kla+ z4FcIBQhrHz^5?P>tlp37dGZN`#i*LsU9k$d?zQ#3=97G%fAz(bgQqXiF9h0ojD=z*PmP+t1V&r!y5rRBSznJGxNad@ohz*$P5MP;Z1O|zYbV}BtG5_bAOrFy&b2gHYK>fg$>Dk_suc*6h9Tya3Q*c5yxJs~w969@B zIz20*+5HyYdjj!eQdRTTkS$CFsDjdruY8`lc=0(_9M!luywVv^l6Dqqxta8{*K)tL z%1-we%Q|o1Yr#S(^t3tnWK4tW4Y6yT zM)YTUA;HPb?yPeoeHE6tPevo1^wgqW17llt;57zDQ{NH&aBBXb37Ix1@(Fdh_yv}(AfcSWh4$Vt1}dR| zq*qD2Y7_gD75tQOl2KN;tl?R>LOe@!vM08X*=<#j^FJJQ0+;+w``s(|mt205LqQ@B}!_ooGJA@nJbys7zH z%hEhOGq3Y3DkV!kO#5c3^H6T{O%7pGAC#O(*Np{eu0aGVLdI!Wr@`RS%D6>?-eR}) z0r@2~;SVfX!v&?F?{(kTb?d>X9wn}`ZcpaGTLvnuEnp|NHu!yfqfCVkf16Rx7uCYb z7eEy`fKs?Ym?5hu!M+CO7DrN?qU7ogY zh`FVPg6vr$_lBW>HIw-5&DO$r^P%T?=~nC~y%hD-GcZUpMQ)^=Gi?JjH_}}CXB)HQ zL0h{3ybhD|=I)O%z`DaLcsqTxIj~N2LU#;cIxGfB29CRS6k!Jq?pr0wCto*-S;(RK zA^1&4dD(m9*s8Ysit8zP`;U*EEtlO2lf85VR_s8+St-IY)MDd5|e; zxml2^JNc_W@T>_221gC9By>dWai8?zLJq9KW$(DNII8Lw=JN4CwcGyC>99o8CQzPH%{ zCI?jhqMP#r{||d_9Tw%*z6~o4ij;tYgh~h_AV?@kiAaMILx+^q&>=0TAfTYY5Rwih zF?83UfCx%=BhuY9#Jfh^`+2_o>h|{@-#=gYj|Zc2&$`#TdR^CfCcv>9Og8(Mn`6Aq zN{Brgw}zgbnmc7Je%nu(IjGT}q0ifuo1F*>V0SO-suGcTpi%0hJ_D0)Khkzj#FadG zstJv#?%{qdY_M`IPMIKVXt<(q8*Q~`(GXyO3;UpHoUnI$zW@64#!$;*g$r&AuC3sW zw>o2X7tWB@;%$>`O-|QJd8wqQ;F*8DKfFCBEx8%%?k&Ctn6uZ!*P(lB*koBzGRu)X zrmwuJp2Y5X4!cXEB|y*Wjc2t)OWR>R9609jAIL$19eIGFe>HT|f#44Ga2%C($B?bp zz0~*VhRJL$0rV2W`^&){*7rn1!eeV>Z!`)ggrc_>FKyzkl=^N@LvoByL5EA_cgeFZ z2=2$oa_~tD(I}jw-EimDxNPB1=7u+(OAeyoM=vPX7X*Qe8>UirxX5Viv2^oDKi6Bf znfa0$Lu{W%%1Xz}-5p?;PrW#{%ifsT@ekhTDRCiRy(5oo$iOT_b`s!Z>HrKw2Cl5H z$qD31qpnD?Qy(|2iuDoTTmq>Fvz3)*&`-qZQVb?q!1OfG@WHZK4Lws%UE3D)m^ioV z+G`!hPcZu=E6=doBAR=icCzb&RK*L%m(O!YP zU7uR*z9WVPhKSAol^ekWmh{)fDGm*-L(^NHF#e# z*VmG!tt4&F)LU?VvaI)Y6BZ|x_x`Sbms>%3CFli2pM$0f)WJy!6u3KDiVX?=^9=PrHU^I{UYZ<-^s}s2uSMhmwojSR>fuYe7cWtov z{EXr{=i?omOX05&y8fKyN*StGxi;sjX_f5;zv+8kSC2T#Rpv33<~l&*C}A6EvptS` zK$i8?h#2W>7i z*QI$2DnmqdI?W;$NN<4>i}o*DnspV$av{~=PQ0JMNrNyXkDT6@^7;AKA4q*%h({(hv=8v_%}#kWOr>cAMisFpEuVB z5SZlhFxo@m8Y)cC$+!m~1~JBIW6^!&{Qu?)#Wq+;K@<6_+p>(wc)r)S+I&1=sG=8+ zqSELmxegjPy}!E5b`|-Pv)u~MP-BYXWLuLgWm9- zXbfXkGrzXCCy(neKEW0({IEJ%F|LW9@-$5$MV0}I?53DbXMy%Z@h0Ns$7YGmX>@Sx zPo5lok8Nx94GcHH(16n%zWyWv1odYqtIAz^(k&}Cu8iH9Zs^$mC^eqvYug2 zET%=Z+*b}u^ANH29a#j1%%-vSZIV)}waByUfbca8v>}3qzw4>Ny>iPA(bX?x*?a)7 zw+>ea?DYK>acQaU3v4$7DOanBx3`dkB`G#X7qRYB$NJQ4*E7isHRRdvZD_hGmf?X= zy3GhRd>44lyS4^SVr$k%pdR72cCOWNJDaYkBDClQ8UL>RyT{s%)SaoPloJ?llV|Pc zTZWvb-7WjvZXy*x7p2s@sGU@Cv`hE3_IOv)EeO6|t?6uSM^en?%;;EM!MO8ty>8Y!kHdPJwl zYnx1%8PigQT9OG;0)qow4jpX-7Zvu--?*S~PUun}^-4R-$HPS-mkC$+;b)q=Lr)!D zY@j)&!pXg_mokqM-`dY@oZmc@%fWfNHJ$?I8zPp~58pM2ee-h?z5^jWs~E?x;>_;P ze&>#0*3$+JLX!7HO9c)kdT(fTEXy+<610Zq)SLqbrc$;c{G$ANGZ5KNKSVa_Z;{2t z@!p$dO;^i|&?!G_JUy*Y1n?z!z`)HW$;+`L=@rh;u;T`aQJj53(8RXd{8H*alaqgg z9F5sAlE(YO!_pLGO5zW?1j>%!ptVW%o8RU{w|c&X&xz{xO3=O*eci_A@Y1C|Fou=O z5LOn|Rga?#Z-P%OzTvxt!%FHSud;L@%Aj^%^5JAh=w)=~ySX!^vW{~-Dr5V;0hLqD zH0}hl*_n-FDW4-ghh+B7&U5G$KKJO_$9D0Y4+J{gbnZUh9YIew#|SWC-L9pUbCk$J z^e}@^BAMH@WJ_CF<|d@>#raO%Go#j{6-Vlt71l`$BL(UTdV%sZqJn)BDe0teaPP>H zOyioP$k7;;PVX?a_}iA4*#jPw%A}uNrD}=fTitb`9s` z%h0{LRY2Z5@+_4Wi5>@xkkIr}CkN!>)t&yvBCoteAhfTjA$rD_a)`P#%29Q3pfJ3$ z%t6~mJ?Go*6N6KRSCjJ|L+&A-#v{&poIhs*y7BG@AO&$PYu%-QjGx;+cSNe)b&q1n zCL{bV^eb+Q5e~y6kyOPCv|pZ?ff+#_&yVVRp|1jybL|D)rlJ|AQ=T+=8om!T@l6SM zg#)Py|1|qtmT*n{8=i8pql^m27deLmAHl50pb4d_sP@#I(+V`*VOYEC<*g)kv?Di3 z9mQWgc4>O|0Q-qTcnIisLt#77S}ah1nx18H+FsJn_iRwCcc~g1j^%_|obN(~pl>-t z{?Y^pI{~yOYmiYTwK-QWLcfF|#Mk26K(}Q_xh_73toiC!eCxO8W0QjEg)X|opF4Ca z#o(fPU&Rxla^zpoUl%V<7pX%dJu8%$z&!h&3q4Mpc8N_S?y1No=1objr=T*aeBWuY zU5b%Fl1VXES~*QVU6P0N`qypa(7FJp!0hJ$x&r6}kcaqyrK&y1$uWud%SYwQ-De_r z=KEGDK$Da3?$^CTJ7mEd=Q07A4j3p$5H4c04*@C52S2j;m-CraC+sW zwUfX+SM=y$&k57!X*F1^;Jh@HzE)Zv2AaWT07OO#xFfs2?CUCtxp-MQN4~-g!i7oh z1$jN5|IM(UAbdndpaf{v{Q8VrDs46=E)*NpJ!f0hf27DrDsG?*hQfKV{7+nvi(;(q z0=MfSg%?QA%fI1KP&)he49jq5np|2OqZjItj5maa!uH*fB2r*-(*l0~s)m8RcL!+i z8GYd&cBjL*aboHvt^ID3n(+N+vY?}!RG#UQ$qT+$_Sh6w-tPkL$X}x7*nd;FHzVOf z<^k~ve{&DuJK(UigRwy76j`ds?U7v+?TMzB=SA<2o%03kqSQ=r_C`LRCP4?A{o01P zFgKDUg8_ualsA+$u+3p-=mf7Gr5`PgqHrWv6u{RtfkI954{!vK#?M{pB+M?L=&iJ9`R1uVk3fPucPj_ zHaYq9o%m+-A&QJ1X(-x6OQZKRir0K!uU9BW$$zY}jqtSI%74;6}GML}@66vA~D! z86DNUzwGNMLU6;ZMy6J1%(^XIn`^tyBTFMs^@v4nGuuM3G_@loFjHh8y@b0ggmBk6 zq3|>dxi6EH<9CcTK!w%2I=Cgl}}!*{U^MKV3Qb6@8Ou z<6zavOFmyOHxUbRHc z4XI%Dl;H*r1+{IS%02ug9V6Ab*b0{?cPwt4EcMOVu2^Dom6biqm2n4exikkGVmmKR zb2S;kvnobNZu^q-&Vwwi%PDgVrADsXAw^$)v7IjPEJs5sE8SA;5kKGiD9HG4fdYzrnz=&ZC8x7!?hl;^^5Pg!-BzTt?Z@#YgAqH|)T z_f>r#sHxr&*R6j+#+?=5xkM9R9(Zc$&9EBPWjxgv()Y+z&thLfAec@PVue1CuD$ly ztO||lLM|@No;RJ^YLi$;ItR_w9wotK-N6MZR3f7|Q`>?N2#+~hr zpjaFu_JNa1AHCYSTD>^zLZM+to<$~h;v^2~&;F1`iDh6z+@$u#4x*>=q)3dMVnnV9 zC&RSMnpFADiGGEg-(Slt?-w~+m9d>-*Uh(ZW}N{!x6<*dcQO2207;)#iQqj>RAHLu zr|Dp&;ugIEC?0Af&_mS3Vxz5xd#~nGqpE76%iyJ zMi8EvBZ#lsZGzsz_Y}Nz{Cv6n$1ed#ZBkyf5P3`&vf@-vWr1j+kN@d`XVBJHmERuSS)T`PAjNVR|MYJemO z8+N>0u3!7mWpvlOqYmeWVWdC3r+i_4h#xVpNg2VX|Z zjufZA4VY^f&#aK~izSk*8BxPr5W9npA66TtPQ9ga<1}zu<-v>z1^a-W zjHHrIo*}OG+l3A}w}G~4I+x8LUpaJCTfOH2n(C3#o{RlgF4QM>3b%fT){iX7c_B!P zjoCE)sK?W%ij`aS54S&-&iCG%qEHfjh;>IBu}Gs)S+X3*uaR~!VsL53^6-!=yD56;!*XZUeIK8 zPVmdVsds^Q$x%tGQ+Ao|JnCdC3H>E77!r)G$PnV2=twtmN87BQ#9yH9&A!1vko8on z+J|CvE4z~*>jq&D0psHnSU>$ESwE2%3$m2$82IE_kk@O$sUymGcz1FQzS+`_Z_nOQ zelKI)fRm*H?(4f#4^*+yj2VgF!eG7l7JG^EM-1b8V{(};W|I+)SNHBY&4!w!x_!E< zqL!SUj&)yj^k`j_u2-#VS^jj^*&^Ks4VqyopOF&Z?!H{ap8XK)f-W2;T4z+xbe=(U zt8O@CN6Sh=$ar~+waA_^3uEoQdNi`1(CjGRykG0^kC zcnQn`E@6ZTS-IWHb5U(Ucsb66Gt1K60uU&Y*SnLZFVXb=z~W409(t6XzXRvp-8bw^ ziz%&}>BQgay+Yjedp-u@=|;R+nYCCKNR8MXCD?36(G`6`(WQTqCefG*ZijCw<5X-!Rk3Yv01AZbtB1t=Rr<@#c#jQcEnXlYkt)z zy3feqv6H8o2~|^^9bhis2ixDc+CWd?IYo>SQ?Y(B4dc`q_pROch|YW68Jcp%RsdhJ zysBt^L6Uyk-)KTfUH`r_=M9PMr!}&DufL1B^)0gh;VSHLQ3u5eoTxQHFlw#s6S+U} zSSkeo0pKn@#Q*;FZaBPFJ8hRkrqTAq3|r+BhBxKm5d^BPJu-6z{>a6K946y>Y(&=XS?zwIy|lJVBD=Vs%EHc-T3V; z?O_ZdJ-=1gz5F)|%$svPa8;9&9p3&X%S1qxJ;#MUc??#(6*s7t$-Vn4H!a3^0M5E& z6kq4$4^=l#1>2sh|NV@HS1#p_GihA@l^tMw<#iGZGP{hO{nzt(RKUyjc$q`O$1ZmKM~%3!XsbZ;cT6sy*~$$dB;D=gV2fT=K05&JM+2j}+mVD9@N`%(K)vc9MJ zMPMX$!e`?YsfLUp<7chj`K=VKd+A+;KuzUd&UGh4n=SrtJ?kc|Kx5%~ zhlOYNOqdT2-v6zVQv?{5>^N3Qw%@3viA0T>CWbz$D^n#b-KXhpi7n+1{mKmj3P&i^ z_WeOGFaPdAQezF8>n6i{7u>}Kh3|EB9sJS5e~8L|KK}ntXe5n+^NAMn&a>)^f&hp8X{wSVBF!Ey ztDIuboUU6?zJOXDQ7*X4*YNSzQRaUOnDa%9v3_{$9k>fB@St7?PB9+awerz?58q~K z6|~r$Ew|P{1s7WP>zsjg-ey<5qCb5eRDy&=x7o-xCp z_v_^6za1gZ70g?lJ$?&*=g69-J!0C)W-=p_IL!u5K7|{THB`;UM^l;|#;tyN6^QMi z$n5a4b#LxYe2!};BPz`*s?+t;B4XXAWR$D4;H8TmKi{tZJ{ODw{>Se5*97{h`JJY_ zTo%J$F&H6B$^Pj+-9QuBqL$>K^M>hQ@#w@LgK1OmsA42?uvpT+H(OJe^Hv4qEnNDk zKme9a)n-I2paXQqU}>j-N!9@W3H`AC##W4P0)~_YSCK=*f!gjKQ%Rl zsvdAa77Sgcswkc579{>_PyVsg?;m|OFe&_^Cqu_xv5N#_CpJWoxb0QR$2v7=x_Vk_ zEV>~Kk?tKUdB1nQXS&aVW8T3&iN&oYs8v`EAbXR-ATI@~+%`kolceM}=ejYC5-F>c zu?1&w2(q4IVEOImktIz4ADc-?Xpmh!U0L#s$d18mZ%%?E0OCG7QIZf^Bb(lCZ}=-i z`eV(`=P?qmek}21VY5E)$Wagdh#Z}7*6R6NR*b@mqT=kXcyhc1>ph=BOeW`w+RI{0 z`6AZ8Y>1_l#kx*K^Vm;Fr{7QVvjSrryeRg`>7K+7fNwSh43XLQXg*BY-#6KPTH$U0 zCPOaTPr}s)GS~-{C?U{rqSU7Q)=SE~I=57LPln62E^|6r^xBrhJ07$|wVG@VNh+hPIcB@pHS_Iehdd6E!ql4%bFzp; zylCji%FTxj@@wZMF>Q|xnu2Wy6x1oVl~gng>U;Imdf6Hg;$({e3?BL}2>@BK7R`sY?#$^^e#?ME2graaw>^wL{CUU8LNe>f~ZBGli*y=c#r$pof$Y4`s67 z@-8EpdL?N8Rib=`l`zGaX-U@_{l1kI*m6r;x({4H>IM8+P~_lX)_r84j?-*(egnOG z=^&@AZfH%@RI~Qv?E6~d`JBN8SM3rrmSH84)SBg${?~yuO&G#jCDDVz7j#0<>Vqw9 z%|f%+yk>1;pizVwkggQOdbxdtn*fecP8#bdz+MchS6qNIhO!^HgNcQLRw`n8fN?Ke zUcWu!-b$ZI3z%UbGI$iIal>6HChDcuc9ekhMm+q(jW5f*iXEc8ZgSPT$3%E zdSkh!8!EEnJ(r#~0k~D_KQWmfqL7gxGEVz zGIw>u-OwfY9QSQs##|NgBI{v6XZ4Lkz^xKFXoDK|>fu%fASq`0_eaAwMuv8KbM(1F zZmm5p+6AEe3mVP9dS4E{@nH*2Td=ePh@02n5r z?_M9^`LSw12_-TaH5LuGmzQpbJrX&H8wKzRh;Ir6PFz2luG=*4f<$B164$T^6!cr+X0y>C>_Z}l5Tm&l7zw({evg|O?~ zxrmdYKCM;Es2Kl=@(aUnkrzz#BsFEpHQLv`3^K~(`P6hB(^d0)bw<%T6tCDGil(iJ zR{OvKZt7=nidPvtR>XZGyDMC5&hwhx>o}lsEL$p{U#Oii#-DNI5=;paiRzJ*paWh; z*7@CV>f2Wj6FiG+4)TPuQHO4>;{mJ_kFn3>em&!PF5YSu0Qd9aDl8mg;P3sH$`zKC zmap5j%aUgsHV(Ut(mjI>+0mrln2Iu5=;F859^Z@5wk9L)t>et|f&0_DN%JPYeGpbB z3BeHJa1u2$EqMbGwwk$Hdm=y%fQVVmNJ!u{MgV8`8JAke;0bmBdcHf875wHqv&7qx zbv1!_JckF%-h-Y4`U5uUI=nS2O^}?vv6c+A%s@|z1ouuZ^jCj|iCBjgGP=u+RKykL ziPQO?IfI-nr87W>YG_#4Wx*-l@X%7T6SB1ig4Tdxf|F*bsYSW8b48?WmzW_3N7F_hO2W z1TZP@7QWj}K_Lp@N?PK%l0@E&S>NYG%Q)k`N>5B=)gH9b(YWTg};}zW8eHhP# zHs%W!Nt=6VV<4I7Z{`HX5iR=WNVODFGz&~Io|g7PyJvaj;oiJG(6I*HVyIF3)*fj! z7_dZ;BrDxoy`z2cPVHsDpH*o#mIPcDf|9){l8Z}gQaqD;2GL;2csuR+c|MEqVQ7Uj z4BWH{FQVxV8gQXQcIrBI&yXFqZQ}U-L}zHQ01bo9f;BOfnFfoqjHH1iq|mIM^x(jp z4-Sy+8%59ymjMZH$;BwZT{MuR8nWIZ#I?8AR}*O6WkKn7FyI9oWDgqfvqS*Al4d-^ zLN}>##I`e_%=(o}~*~gi3it;j(e}RV*HW|KRkz$wI4b?xLPGbY&3hLQ!FTWy{_r{ z+*HPh8Kaa0qcz*ChW+9xE%Z=Gyb5I5?20^E0$UY;Nfm4-s@ZRNIc8neudZ@~( zvj&!-x^C2dtt zwl4LI<#73CyXC(A$tuDAS(Nb#xWF^}F^EF61(l54P+qLrAV2l?{CK{RZ8|CLK%_foHZiR5Vd zsIb$=Si>T&x%Z|t#2mB~Q8hC6W9~FWv#D5V!;S_lb0i0cY1b14uZf>_f8iRn#4lw@ z(UyzeIJZ6~GO#ofWp063kEsb9ynx({-5Z{I?^KAqVIyx6IkWG^5fDUZj|2Q`mhi`? zCRh|W0YY{1GG~hfD52w9Wh5+ZWOPRCS*6X+u+&4sjY>BX)4ilp&%O7zN=f8*J}wD} zCzc+7i8xb7z3ZN@cC|~GtWjR2a~YWkBl#Id>mz{2j-}FEyCm>7rPOn&)~;daak zB?DhB>jK@QStaj$v^-@*jcQH6U>ZZAsKTQwdtNHSOrzW^@4{`g?F6ElIDFC%$)e0- zu~h0WUsIXGVLAH%)r<26Ym;kf+}(!dl_6^Msp0}kE@1SiLyk+5GemexW5}9f?DK%b?lkM=Y3r>1mxtn`c$ z8Jno`Y{niqMAo=K2}tTp7YlIaaNJW=eMZCHjx^_xe)p=}oZ2&}wCI?f+leShME7%t zli^HfME6(vhMj51karMpDnL&`S(0;wK?gkXOu>U%zhnhR!Ar=Vx1Zx&M%eN?zwJOZ zxW5W&2VCwiTcmz-gu$)kfer8q6}^NS*vVNvql~Mb3Rh9It3VWi#hRNtwp;M$c?r^m zD1J^txi3l~2|`OTVOF{0tf2s43-84cy`c5dvf9mc3d;k785W-y2Pyg_!_^(zXRQq( zSxiFJOoQ@fvW1rv#}S>MqiPh#r|7&C>#NN97#FpXiQ^e>>%8)yh3)Cyj&8AaO~P8w zBq0k?Cqj`LNtDut)CY9tPE^QZ3ZZB4R*gVm@BD3%?WAoD&_1NCJK3e81P2`MqW$W9 z>^DH4%;;h@*!;bc$)pgGd?Uq%48r*dA^$_KnCK=&%iio}0X607C;fk2>isCf|MR0* zAV~S1zB-NyL_t)rRvmrD%rBZsI`SZf?{$IV`~o;k%Y>Syn9CoVKGYAUHQks<#GaUF zp>x^stQW3f%~cLxE(by0om%X@HbEgFq3i$=Thy5EsRjPHDTy>ZnbXKRKIt6{T@-D^ z3USs_Q+U^Vr9 zB>ydhn5v$$WW+7=)<&DeZYpkvm-E}pad(?kr2=8l@w~Gxq2yjyX1pAmk|fUDtc&Nf zBTDMa*=rCzyy%S{99>{J;Fz;frdGLmSALUHs9QBb#IsH{Id3rRpg~ohTm}A7k#H)N zSkC#v;BU;gG#uRj-aEc@aWZ4UgMRebhs(#@Y#kY|R@e?LP!fGVymY2_Ie7QBX|wK? z?hI9axTfN3Exyd<+?PTIGeQZUT7x16LSWjqNQ)2BhliJ zgs9CJrh4X)#Dw8O3+L{(#tTa_RL>(QZ}8@}Beey%RVJ8_dk;uX9~Bf}LA019Ai9nV zefI&F*g4(X>{t*G0{RetC?(>;M50KjQL0crCI6Rj@%HWdYBpH|6EN#%4{!|!O#xt5 zcC7P&c{|%10UK+`$oF)m#F`GK-36ooUe{Hd8^W3irO;2gTkS5>}J&x?-Wr zx#Z|L(}7DE;9%lLv+hVDP>8AV!46V0XNGT^ras6&NyOJR`V=Y-KnjAYjFE~)vLVdG zS#^H5cppDVy^;^x+ggNR{Oemz;_0aL5JMY%S;2^LMdeTap+l{oUSEb$(hDz6CwS`_ zFlHv!Gy%e<$r@~uLiH3ipTqrWQR+v}XdYcxx!r%T=iWhj?ssun3g+g#>-YvaE*7LS zs}$a^a;oF3ZSpV~^(^iYQuXUnMdBuYf_!+oXiHJW_LXhXj_gGrdsXz4=Gv)1T)u!} z)Xso=KD&F|K^81{skBO2+)<2ZWCsuGm~kGtF~&|~CM}j0)c#l>iM^EPILC5T_YI zyLSIAOwtMn$F5jwG0=t3kd`eL4AW&(Ata z0Mb3OIL?n87Vo;{=pT*wc)%Y5g%m^v;`s%Z*Fj6pT{Cz*^H7)f`CHD$h&Vb8yWT`2 z*|dcA6aLWYHgjV~-Ed~L?bL6YwUpVSnX2iRKrZ!y-QSEwNzzNdDYciJ4>C2b_*OTUZ|&8ch0|i#AFnDCC#CgDYpx zdj?kw(`9=f3BPJoF>M6UktT9CbY;51$iur}*kl_&NAk7pqg&|I30S`cDk(Gi%sXT> z)s^lbB@m(B#gu9a2c9UxlHLcZDBi4x7Tfk}_JxZrohymI>5%IADCa#f(iqXL>DZor zdvAMr`Akrc5F{7l=01b`Hr!?}RuzQ8#O9?K2mnO)J(2wt3C&`w=wYXtllY&6mMbaL z-n6|0Iabxyz~}g*Itrz5%bcnn-P=Bv#55P&X5Kn`FQ_VFsz!@wW8zLqjdn{w_@F3y z;OMSt(Vrq0X&f14qs_jg@PtS3AQbA+ZrOG%)Gx;{(l@ouu3;uzOpvdQ-hJT#NKyEL zp`-1R4G8D~n-yD&2K&OT^44iD;K4kO4z}CR$?muKVnI$(T_r{0C4UBNS6rAp&Wgsf zhv+IMt3A-sxWQsk^Y4X^lQ_398Rz3?xD0`Y3xgw`QoxK+ap+yNd9m|p7_m`3b2?o+ z&2*#eijBR%WMk>-XycazsyqI@IVy6&qEoi!)~V-Zb)Y6WaB6*8O4yR?RwJh9+di#Y zwMv*(>OZHYCJBM@0+PN_6H3-x@<9y-)dcUuT^{p}t;Boz26Zh&Su^1`X;Rx|d93#? zRrU1MGy&{52w=Q2S>Wi~7SvU)4;;*#9rD~BMqCU=Tooh^iSJavcNlS<4)9|e8mHB0 zZ=(D5@nx7%h%gl7B-dRAE+J97Q?!^2T^L#`2@1Ok1KFSlk5<6?EOIU>Xe$WHAyYi_ z=;ln~lZEU^eJ@SU>YZ_8|H;9KK=;nvCQv`&0tHkG;RHWL@<=*dXv9^`OMt&JBrbS1 zJuN=J+m`o5d#Rm~P|>}72No}XbY)WBL@>l`(Lq#ewhlLr*KzXpoq$obdj^s@RiNOL z@49YUHlJOw5H1}68-BRi-WGVMoqCc1X;J%sswt9b=Un$^1&l2QiFHFefCjG;fWll)K#2%77v-z|eCs<~)bhd_!=p|HBv>V2qU4Q3ciAX?N^8m{Zj0+aJO zsN(6F!4jJ;maS$$nQVj2ESSTW@|R~ib6wz%Zxu)ZI4)Dvz6NbuhhOQD{46=`GgD5Y~|{*qDy2mxzp3}hJyv? zuIz%cT@l-F9S%EiI1>t6q&YX%Fzw(bh6a+$i^JUdpR&qMEy8+owB_73heU>-S1gZb zQKS)OV5*I-v(d^TspPeZky1?qn>V)#@r$fDhvgGfc&R{~#~i71DW`HRuqq3aeB(Fe z^}eILRVXN-rx%YQk_HJOZH2buoAbnSI*#p+9`E42b3#p@rPi0*hxPRf@<9Zz-9` zBm3I!8a{g-splrcS+T$~IDv`DNQNi|=Y>fw+-SqQv*G?-E!mj`6<*>II=-Qdvu5r` zyd@<*%>=#Q9?0cJtaV@rm<#j4HOoi7UUNk7tu<*{#x^=ZJa+X%8%D^|n4wo2n;~yh zBBea<3k0PXc_b)V>)?^lUj-m`B`2SQnhy)T^4P;s_d$^2!zj-p{qk+Uet7pG82tgL zuPBkg?6M7@q_kEa+5@pmq35@Z^qI2~6fE+&*>?DENJNa-R&@drJeSh|h3Oi|Tl)i9 z1zQU8^T(?%*=3nUVc_rDxh4Eg2e406?#*RxxiBoA9>vgv@?J>$d>vqqRgt(9{M}LV z^Dr^d5eHCxo{gH7Ua$msQ}4HW$qV$z0iz7BM!93OCt$?8)z-T<30bhL4}^o9K3m5` zALNH;Hj3Pwiakvp<`8ZanKlp16ge-Ji|NnzAVc)#du<4YENlH@8MokJ1>CkdFKHc~ z)Tvu9r7~_6nXBl%T7XBE-0W+&T%Y3bX|?*IfO%R1O#g z;py}b1})sL7s+1(*8I9(F-~Wy!l^Hg;#;xhO8S#c@Kax~x8EVF%Rt#EX=~4GZKBZ` zl^#JFm9FXw>P7trEb^35V)8oL2eyN-63Imc4W?^fgFHrG=iL9|2h1aVN~Vfk`a2Y^1b3pdc+`-D#+{k+8!+q-zI9I6 zKm7(p)V+{l8(&#F*zP~G$bUp-APl?rp#D~4SAEItKpXU;`o++6O6bNR9*SQUYHE{1 zu3X@zRyL%n**@#0TxQiY2;igk;wRn|ak}$mb5X|?cv^(1dW8r@X6BBIESn+WHwrKL zb84-MXNJzGGF!I?QV9x!&)o=+!vSy4gzm@rJe~oG@^3Mup9z90*aku8uD!+9rmTfQ z%;u`V!jH>-o%b3YK!MoK^)eTflh9Z4u21$PCKX;J-v zbYVSC5mFbG8xWNkJe;O>r>h1>Z{eYEFMxlk!=YLTX9d+ukfY2%F`y1E zUVE`yYaYVqL#i!vC$p@yzT`t-yi}TH`y4-jPL3Bs9#~Thch zH`B%u&43bQCb9!DsE}Jwsa3&Ya7=jvw>?Z|S68w%%%6WHNvO|Y@O7-W2 z8-15IZ6cv3(^b<$G559YJ3^~<6tKw)d>l*T4mMhgQkfl5ZTB~d@=&K1m8wjuph%u% z6>Q4FQuf>B`f0Hp>1jk-AQK`IOc;z0M7x6PnmU}C1zu&hk-j_x)!tx-tZ+FKD2l-L zggQq92p`T$X&2q~)-Kmg8}5+IJTa@mDdRmzvBL1LS6?S_0wH40;EUbr9vPWtgQ}|P z?GDBk&nOjiIrdFlbtc0E{Jf1b-ICc~r@(LO(Fw9F2|=^ID0a_=#W$#FaHE8@-x`22 zpGJU?lA>ZT($Vk()_wQ&nm(vbG@m8(8n_cStB5XU%0{9M=uHRNPh}50ukfoS5xsQ| zVEYSQogH|piFF{-n+jNhWkTeN+tt1kS|Qi$R_m!7)C#YC4I3-$RJ;z4gfd&w!!R}e zHMO(c59qYPKz9d7Qfu_|!2!5nQ3MkfGRSMNmXlO52sbDQ3>&lG=5X=9JFlv$VaMtY zX54(S&)i_l@O?Xq>_v3Ted!Wk%1yRhWPr`@E9@|uf#h@rgeLebsuVD#tX#v?_j%iC z{DO+I`BApF+W`D^>3q-pH6S-}cOectSDC8>4iqVpbG=$5d$sBta2(_%vp2;-Dd7_B zfk0NGBm}?Afhm(Gyf2#xIGYQNB5C7Zg{=zLn9CWexDNKgYv3KAw?m5ON5hSSpf(+{ z@&NEJAh_EBP(w99CD`BT<~!sO|E#UxKY!<=N`O(}yiUm@{W*~!8~5$uXq^X=%XS-m zggl--9xfI3W-(M9lRIbHA0`Mad*0A9JuGAttp5trZX#?b4iSl*6wsO!%M6Y3|7gGR z9zO_5!?uJ_lm4=C(J+;S92uWTsS=nUb>i`t*qbWyD|Ju_wmFvyBFKPZ>)Rm0n|*=Q zS{EgkYvsk2!{u@tqRcfh(Bs8gK#SQLZ7jfaZ}Hk)kj*0RCc$N$dk}E7FRH>F@9tmb zRw+3gT62OBONc;QB6(!Si8aiyA)yYNy7l&!sqljW+)#4=YC-yAn{nShHmos?+? zhkF7({i8(Wt#4bDCc?I3Vr5_bXeSb%oXJPML=@UJgn!gnt753)pH8PQvg|F6bjMsB zMk~w7KW5A3d07!zS48+)Q=KkT>-D=EsLncv)${+^$DjS=6N<@FCuP@cZ&3}GR0*l4 z;zJ#2VlPUFzM{z`xSed$>vuWa0l zT^@-GQJ~W3j}tE4&Sz}{4B6b29up((%W#lM7A)u*kmX3H?b>|}uVY~;WA~?Xj;dB#WXREwOfA2T?A*N={4HXUz(f4B1bQ9+$@V3g2};UDJS2Z zflcO0oOw~aIMZAVLp3HhUZ03*6m9B(4I*miyD~0PC%9*%D=Tvke2nSBblX?4J(Q2D z0j3Yk<+T=eOBDLb+hvXH>AZ~{=xAYJM!`q8zd?>5biIJxnHq)1M|#Ha z=fTy-o&6XCIBf5b)0V+;7bA6!inPvSuYA7fs0wtLcmED$1nc`k``+pc)_lY%y}*WTn{#a zD`Z$sAsFGt067RTYn(50=Uc~R*+|d^6<9G!A`*Q?#XELhWJXvQnCAB> zkHBO3xKN=D(hm2s^!&B{$S{-k&t<%24;I^GZs#8Rw>c@=x}W&DMS{O8IrY2tnC2_tu*Mp2yzMKal??a8w2n&9=q{7 zn0v!yEk2c(8EO!4Yh2*CJW};Sk1z7sKi2n;k7DmJqg1rVqg0X=Mh5l6=1eER}!(vpX2-DUkQA4F;ezyh}l`*D`scw-aSgZg194De+NgL zF`ip*40*+3oG=ja_%Ysxyo!Z-PWQZ1hLZ)`f-Yeyym(o@o{BVYSVSa@2CO zyiIq`C^`mIpQ{gJP`fegsg?2}<*n=G9VW}G>v?Xy#3J+_Biby*^}MClE4y*-FNe8u zmUV;Gi(L`|=p!Rq8>-E>DyP0W4~~fDWd$N~*sLj& z4w>W{jpPyk`Wu*s)i_d;YRq<7nU5tGdRQA^K{P*v{_dMdqq4EEAaQRvh-1SeT8ZTi znB_Fcf4#6DYwlDNGe}V>Nbw2lSS#}(%rYjF`}Y?Dzh_VmMx-4rig|67wL}E2`M|F} zkoo;asDlZ2#pVb+McL2Ocr3Z0%vTBjW#7L4lg}(WV{S)!e@Bgs`R-5+4mAHM^KbT> zl<}G_E)GHqnM6eIjlPpwyN^D(SA zqX!HNR1W-JMHsKaaDZ^AX?_l zv3A}s89khyo5DR6qX%6R{Jr)7Kpg(1)Kx( z4&54!3KJ0jt@4pZJq7ZOpN`R3FLEan>fh@4p+Dy!Pi{{v6C7jrpT7 z9c%wzGG(KKZ$R@#YxfT?1?_fPrV9B*>{iotYQK3@wR>+KsSc}Lz1uCk>>83T`u^4B zaw-v=diEcFr-pzKSPJ%v?-684+0J9Hb>C;c?C0x#;li~jo{u$oE>-Q)kRVH`Mzr99 zWIw#o7$*c1BEC5e5pPp*>Zo$)+i4fc$mZD;_osH3&u51TfU5XUeS{C+CEEr-abR6$ zVpqhpd06#+Z(c3N#w&-T*1s4xM$#djCeikwXCUw`922y;KG zZ2H9$2?{US9uOD2S+}a{enXrEc0$%kknr!F;K1xe{P9l6dPukaD$!^w2EoAKc#G7J zxZxZId`fXR@_T5`a^=^dc_Iw_$Kv|9kRQJAS~=zg%DKL0918b-opFc_g8v8$5@PtV z*8l5vCjL4`_bCSZK0xIn@|WPuU{a5{>f_Wy{9jTJ*eQanLXIVmAMu1w`}d5>^EhVn z{g;etfQnSZTzhEuuiz!=yMUpOw-pEX*EvWbX8(&z*U5ey?_G6FXtRBsS`iZcI<+#> z2LBOeH+}hskiN*qBqK}5$q2>SUne7f4)V`I{?W)k8u`bx{;F#A$F%BnJmBLS`T92{Rg19h{oVBn zf5GwRmotR7`jXH?*jScY1{qioz|NjNvKs6nzhi&-Tz)rHTW!#~0(N~FO#OFb)2}=x z{G1i=5_NR2oCyZkcDv!~dYZ7R&7N*4(Y^M$HVKkNA$W}}gFh=s8q9P-P%feOJ>j$T zD#w9hMndNDLOiX!v{mveXH#R|klawQzlQ(w@}mzo$asvT1J(DJTu_sdTy(+Yv-@O zmZH~;v-@~{SmM_HSCX9Z={y&5joI#%D3FCMf-J}J%WXov~H!ZhZer!mU&F)hQ#djmSEiPNxT1FvY>m$j zMM|NENP{C`*2MPpku}=UosUr@msfqh`jB{xJ(A2~{YBZ~Dq}8BQ4K)5&~pM%DyM~K z^xPNK4i+AY(v~A%!m%JKX}9Kl;Gn&DUTvc-ER7%wbUNo?rgbwwOIU&6bgV-LYJZZ; z)xil!OIjeQ`f19-)U-z&DDaXQx-P>-0|rlsKgKE=n4Auv#0WFymakU#SeOAMzX5Xz z?$2*3Fi9*=(RizCi^H1{rnURu@I04aq9Hkz`2()gaqCmiNU*vFj@%9gjq~B2^V*|Y zRm+~s^V(JXgI>$0$bKQ0S#AcCA8Bo@I`Y8Ht5;F&86j2BUFYcp%*W+0MYr?An+HHm zEX)1e!4$Ec@We%u*)#Xg5)J_+Z5=j3RxFi66b?VGW`ha4IVJBs6&3zqYKIpJ7MFgu zzcsq|#SBd5EKGnjHQS2q4y>`nSSi=rGNr{iYi%<%0N|P^+TG!o!X)G8?BWEK?1()o z(d1;$%RrmuT6VM7?i<%-JAgxTG>0#`BkqC{sQgMi+)LijVp1-2`c{;%yR!rFEjOi6 z(->GrX|`96`>>@!&Q~ROeW?ZR9j|P#*f}unk1QUr$h6oVcF9?CZFN5*dQVHp`*42_ z5yWW`ocEctBFkB62~6slE%5;yp3<^j(rsG)A=_HJ)dHi$UU*T^NF1xhv@7bpr(bvd93IpfSiU*X{FsgyUML>4_FU!~ zXkMHAy#D!J^;S{N%kxf&6?yA5!YSfpt%RiK&ii04%@r9~-rOAC$yttlm^}Hy?c-FKz^53 zW4*lbVmJ1w`RoMEhFimF8(*14G`E}E&#(%4t_JC6sq9r$j~*=by1ckyD$CO=Mdzey zktU!pXFDrl17^3}d($DM)kZ0+r{{aCmYmY{)VmlCSNze%2lJ@I`O*eOhAZ=tRSPCg zdyj|JS@g8`Ug5J1Yuo4& zK@`P^f)ptVD$+y*0U-%0AWBt{BE1NRfC1?xAfTXhMS7Foiy|F@NK>lR&=Q)oAOuK4 zAfcQY-Rm3QyZ3s}IRDO{JqBa_&}D#U&S%d1zS>0rw+-#yJlf$66-*)FRYuV!XY4mE^R*T>;7}e!O zj$fMhbxuuj8_P#bdWZqLjMeTdai8=$yb@$)?+fi}p)^5!*WzbeNIT%xF4b)Q9(G-6 zLTwp2c?z}{?6(yxF-Hgi&Z>IPULm8a^B&+BJGg8aaosvQc3VF;)$1v&YZkw^ipPQE zn+1}%?|P`jg8t_x_b@jSZyykJppf(JVFkfwN~U2+16fM4)$%sL z4jTJ{LjyN1xVoY)WeiL?Bw%g5pL;^bwC0I>+RjhGq>Mmv(gQQA3B}r6smtNa^oPS< zzu?vF&wU-=rp7;6<2tt=8{2F1qq(C6Rx|h^1}OrJeM8h$mJPDF?DVE~!Rm+U(HXvoflkSJ`j z(2+wtSE0I}uWzd*CL$lP@-O)Ce{!Gk!UteQ>M@UXvhUp{Q789N-d_;Dgrsf~j)U$J zhKMiGlIf~zaB|T*4ju7YE-qZ`JFn&BaGk+yV*oYRxI5mcv(y=naTO|iusaR$dy<-a zLvbKl#eG&5lF|NtkuB5Rpwj-4>(+Fz3>xE&hIn%?K*=&x{2@&q~nR3%FPt_&`lQ{Y@vTJ8o8GJU&(|Gg^bw;OIiUB^IzqnmDc|$X{c_7DZIKfdm4k5 zcqyF~C?BC4;p-d|AhB%=?zkZk>huH23(?IEj=6Gt8H;tL9FI+{UsM(^KoPUWNh^{j z5Rp|OW}K(yi!YacD^42zz`o!jUOyb)M057pGVFK6!Qdwgkd(^try-g<4IYdC9a<^ol9BV1VR0#Twic^JNmx zWrM==(ICay&mNA?k=jhLr$BAg8ZdxP5$l;%f`s*;A&ew8N6U( z4(|3II9)CBVh0OVN#$v~_8D=bRboL+cVW2?fd@fvlHzfdL!Yy}SylubyLG}r9os7i zKlvVBKVzQe#+I^M5bu%Zz1mNN#cu+UMLn{!VG_%hPtrIvg-G?-DzViA%9mzA*n$0U z+#%f=wHlV#gkP+RVy#g(4edX+#`Du&%69tuZNH>?LMrJ*5T2ce1<_jY8mh2`M-j)S zt>Z{${?mnRC2daqH)S0nrrRApi#i6!j}}$uepY?ycJYQ87p+VauKGjHNc?`-c%KFN zLia{0CZS*{;k>KZ$V<(cKtJNsjf7sGoVyV!{qkCGxim5RbGhAD1H|5{qGm!wD8nlJ zyVrX?H6`Iz^p^bVzQCYOS~;Z^#GrdfVNs>}BArlv;aB0dkT8g8sn<|pTNX;Z?$;+_ z*Y7g(+`9j;0%vdhCLr=zBCp&+Xic+3ZGqw$%e_-jSW=L|_zv1in%(+3JJn6ChsA2U z3MiClFJRBn%&NFtg>4&mW znwH|vhqQhqN&LzDY&+a(g-W|2hYb-89%CusC!c}cK6|z#U*(FxEg>hZsYJVyrPyWU z+Dk!b`ck?;No#L{WbRV>1uwCs6qB_8cBzJFVhI(zh{%L%Uzw**X%o#(S`k=DjbOi! zNWV@ttk#no@WuW2Bz0ci$zYFK9Llb8-vWU$wUlov@OO{_(b>IQ#Glc(!xu{;#edws zao#86^2~0K+TiZg=bVEgEd3$8n*EN-$WjR$X=^r;&*!@Ka1X&hSGertfgD<^SBg%5 z!dQx2u`86ae>h+wkQ@`#S+!f>unQt&Z%!> zC2^dR*29uEJx}K!l|L)6L@nivc0;{4ljY_?grGD5Oy9u8FLWnOb`cgPjoN((-A8K) z0Srf%G6(k1Bh&Zn|`Fv|H7{Mf2%2ICRXfM-0U;WIqt6!(YuCxcW4Yiwvl`J`5 z2EDK5bTn%Di^AS|BvrEx1gwx9w3@ew4tzfE=paaH?m7QM?o06fMHPjyDg;xzOBD(3 zK^aKHR+&-)p5W(KXRG&8=me3}9CIxk@k0M9eETL-@gPVmGoM7|t0dId)Ty5fMIog@ zEkyyZqp`cUZ7eC?@B#AJ2qKIkCi@%>fm&3NL<88X|UT-K1KaU0;uC>n?}VstOm zj?YCj$|7)ckGp(7^1m{oY-*SW@zk0YslzJBD@iD$q*XsL&T>Dly*$nI*Q+R@k%zfa z>XW`pN9!;v>y!sta91V@5xm0>7IQWIKcJA7`>fz3!*}RrnJ2YzFWiK9-Q#ghTo^3W zNb&l@S8ke27%KH8Rp-Xz+<3`vXOcZN73spSyK}uOtb=W;H4BE1E&wYZSP|R>q`y~} z?|S)umLLpNFS6E5MZ}fm)Ca%2@vv%I31Au$&wu36)tK1ch2iFoEm%?V^9~{8kvWbY z8&K4DxnQiqj&Qx1!3wo)PJkTNy26a3U$O7)vTE9wZ(Z4q$m8eX$zOmnZ=X*$8B)0o3O(YRS+D3+Du;__SLDRJ~*4L zY8q5x5ua(U{p*a?1}l1gmJ5lO8fjJ~UOw3na4hLiaQz!zwG1+mXf-5JM@uLwnMw3c z&9I4Ed<*jm`>uVg(FK^<=5vQEc?t0nu#%tQLN50);8ohT9Z2#fQn9I48RShY z`YpK*fE5MA>|tRq|K0&wGfaJKk+jRuCUSd{uZ(gR+wsxemy&4>Q#;w6YP-U`DA72>7z^`btI`9W3@+=QuPbsDH$8Ugs{j7pRti;P6 zP&A-r)H^_^tlWHOe9e!Cliwm|HKof=DiX7>@C~*62-%s}xKn9cO62XSz{BNPyfPH5 zVNvLmX6m7XB78ejjD@s@(bIWyg9eN|5EsL^uFv&h)5B`d^as*PYt;sCV#(7Wk&viw zICZBYS?g3dz;*Q6xt-O0$Bb#;s#&Sk@xEsWn$5wyeo2v!axYEoSb|F%r;Suydzmr_ z9?`0Lg5%6mg^kKI<6QJMp<8p}evh)TkG+N;cCRV2{ACdHJ6$x!tK?A(KL$B#w7xe< zNF_g9E-Ke_NBUkFTJZrvQ0@~>DiuwK&JdC5a<^ zB@A3rRsO%p?;SaM-HUwsZy?I+HznX*t)Rb~m_O*~b9f6RE~VW~vO<_$@Pc!4AC>a5 z;@mkg?)@T$LDyok>AibV=)R+p)|I!F;gEQiRdVHx?Z z&DEAq!IDvE=x*fxRwS;h`FLki0m!yQsVW|FO@qMoqd{yoKxuS{xBBHr4K0Aed6gaGT$BH7L7PSY4fgG!aV_Ex$?D6E!TKelAVI4-mzC)vc!w6v zv&RqpRyz^^z>**f_6Gr!W0UzaiqoKmjvCditp_H?g2O2-tdZmq%ShDKzHC!s?NzK) z!WUH+TOjY)++)lnd)OZ96o%i){dg@2@-{F%0gHrP{73v`3@AT^v?I?MW-P2gSLz8p zG@vvgZBr~WH%uTB`l;?&s90s5byxiT){37^wgrfv?dyW4^l65~fjTAp^7e2QN@t~U zf3Kmi4H{uL-MG~%Ub_tPRkm=TGjS(x8=osXY5F+ao0%o~HTimi5S=oMC|B~u@n+Qt zzSmf3UH38bV4i}*b1kelW3j0=8RYjIPp#{1H4f>b_DH3XoX;*bMsVz1@+#)z-Vh41 zmIIk@glFB)3&yt}((77$%K^ziRe0An_FRxXyo+*e(7FibZEEG~by?EU-+uHZ>PE`g zAI(bmJu|Br@Qm2ljpvJTgJ<;L5Z8UFD`=RKPn6MUaNl=&-I*6}Ya`MFXsvoNPUDBW z*0ARZK3$76z4{rpz$EBDB79#APJt(wIcXJVOraBI>3VG<5u~xMOp7t9LkyT{o%fvm zbF5q}JJ}FvUooV+Z6(!-~y~uy!uiFwx?7mFBlk_QUh?f4xEj6=^3^1)Ki&Qh{51EPLMuEcvJ- z!Y0~ysXy<%Dabjd2JMg9mv>XVFiXhY?e)iWA|Fe=RUdV|`^*165Wp-A`uGWv_|k#P!#uj^#ro#Pt?$Gl z8(5*-{tls?+k&>wj)0YXb3^5rDCERyuZ2|!0{=!Dmc0SO9u_=MiM|hnp>P9 zxViYq=FusK7YtJDaz3?M7#MQC_{}}A2^+e0~4_ha=ELATX*M4?) zE?;@0KS5$ulXAlE!E71n(kPTw+or2A}gnPIMKh`+-`leL^eY$(;{Azc{>>TE%^xe8)s*eS2tl z=W6Z0&d7A{8CQ~~gbP}F+Ze}DrFz&r8?wg%e5ijdUn8;)Xy?(gN*kZXZ_McB0<86wXNtrT{} zxT0Or%2(!J3q&l!@yHX+eT}QGrOPFhaex8|d-3umUp%Al3E(JGQjqY>NS^=&z<>K# zkVQ)k8sARa-%gt!^SE3N7GV7qx;io{_vq#74_;#fsj=WR&Xp^B4l*osLDaejnY+&# zpctb_lrZFwH}MfjJx%gHK}L}qf_beSzzYn@JK@#fpSA!eESJ=hFP|CdChk{ z>W114Yq)mFV5I62Kx~qBJBaF>uuheCH{l`FdlHg7_jZz&;}Iy&o9-rMX68JEudk|H z77U*~dbr@(m(l8qulHDgC)W#*oMOfW5QI0>K=@=;HQYRJh%h!OwKqZeC>vib)ps(4 zu@PL+y+xL(tJA{=ElA7nqKXnaosX48KUrD=bIAwU!Z%%qdr%y(F8&@w!R+)@!GM0We6bSy_<# zet&W`UoQfMF>#|r#CXM((%>x}^R(?4^jBGPq6BZ8 zn4vBWt=9=lN$Bs#AU6VOoz9SU>c;z>&yR8-%sNaB#A3h_o9M!*qrD{=<28%C;>g!| zG=Pmv0}RZvLq1+45WzNb>{{Pn=iC3NdUtbP|D&nu2|X}lz|9pz&gu!Vq=f4J#-B&f zO8+DT@UIX9KebM&Ke2UcUe2)=Jt(%b9_d#vxA9G&u2;~D79@5JUFRLF;t6^HV;U{6 zd|IB+C0VG4+K9m{tWSN{@_p_);naRjktO?!qO%4Sj0ecK@W{a0)}QmoQiiK^^U3d5 zOY*BCgok0YD-TG@^?6)=+{8FIt-wm!$(lk_4;tpLmF91PQrr4cVznkOx<&g^R-GZv zI9v5V$WzDo#CPZXA3#KOjs;8FtHR3kQ zJlEokZY=0pmW+)xT;=4Vf@0;yg|dEI`aiO0F{YwpkfN7bVGbOY+$BA%d?ziEy8(cc zt^WPzeBf<76JcQ{4)_#R5r7#>ic;ix}2sI80?8Zxc3qw?i{+b(Cz zKH;vNKo`$aTvPiU&uYAw^(EJtnpE^#;e;~fKto(hZFQX+%=BP04~c&e$u|TDTm$8^ zmO8oXbRv^G^D@+1LV@k!;!*S7d@OBm&Z=z+Ng zrjzWC8BHfWdIesxnrAgF0j-2(OwcthBMellVMWS32pdl0&|JGD+KD9wKoqeL296kk zw$&O*z5Qg7hJ+c)j0$SA=_mZ!wrD(z%`zcTZ$WEz{=CyI%b`?taZtDvV9gACF(QV? z0nOk?ey^KjEf2~cep>mnw3BYXjg|{JY4PUy!pw!Z#p`YV z{Cw*IK)qftbZeqMf)}<&_S>&Ayn?#44GR52s?SwO5UF~NR>kOWsb(8Oy$)(M4-!dT zeV7r?6Xy~Z??Rc42QX45W*6K>Kxkv|=J}K3UZB*0dL8!z{q3U^pN;P#%-&hkHU;9y zB{dE4EBDla`e-=1WwHKls5juQXsH;n5`>Z09c61k}g zet*OUwujRs#sPY7kxSE5Xoy3b0Z;+GLwt6M%*#(NB@6n5$h8Jnc(oRN8vprIurK7z z7Xxkp&iJaZloef2#)C-gaD&JA$9Ocb7`&-{%}Z)eG9eY{>DV65K}Gfd2E=EN7=VdE z-;QJ@t&x~zCVq4QF47XY02L6k8I-i{Dgr6yLtW+8n1}5?0~+N?o}0g}yLJJ=lEZwB z^TLosZ<0)7!a{FjO#}$bVRu;osrthb;zk~7?x?v~W!F@8F@xl`PU`air;@3MBc-`R zy~;^`Ah;o2n{rm#`dH(WV3DML#!?Cp=kQ3HSW}vcExUT&Uw(GnY4E-ZiATY8NP0D0 z+ue&a0BRM9*{5yQZ-v-Qb0O2NxYSa-$5J4sx?K%}8nV?-_X=c|UKBZ1DlxAD!`*kq zX_jg7-EM;g0kZZVx)DH}ZF7;zmeJZZm(y$<>f7_7Hb#&hv@hW*bK|Fu+}F#kwp+=l z@(VU|e`f($W}Dmqa5Ap2v_$(`mZoXO%D8)GxOBlll)w_ICrs|ubxI|GZR2NuvVj2V z;(<6h>SiSQZIFEatk+JN5Z0M1A}_*jaT~TUr)%h-(+DmhEpm+Yagi}&@@o}nMyWk6 zC9BS1?Iu$kaV3dPB(TmUE%%%uC?L@>sEIKQmU@g3Vrxd z1)I=D;2MIL;3#6h+A>jj2(>%Alb=gkg$F|VuiSH;=)GeeC*jIv#Yc#5AGAq!YBT$o z-m9?vT}zsLHDmQTG7IaQNcI32(lskyaD%#6wZetUt`hZ66^BX3M=JDP9ea#p%Y(0d zIk-G{R2>OATuqk|l!NwAk9Jfpz5>2ttF- zkBT#@-ZgzZu16YqYux|_PZhEz+qOkc03@U?__YK#=v4cWd!S}5RXrECN-&35*ni_N zyjtrJhUTz*+*benV$!j3jNCuo7@SF1z2H|_ z$^*yjB{qxcudjwcovxoP{@N{AfUD9EVCK<)g=;LOX<~#+^4qx;`7tRu9=+zA4Ov-d zH7YP}4`_TVaWakD8f@4;g60xTnEXEHlN)#FU;cH`H2Y2EZ^X?xlLgps zRz%swO^lQ|tTKe)d2J&f8njLaE?eao81Yjm z)nf*7#WsE`fUw}6)1&QDib>b?g=B5O0GcD0L9zay-xb)`0X&#+ykZgd_fNIT3z+c~ zuRlJvZXhmGD;l*5Yp($Wrj+G_Fzh#(zE9FBnCjU(?q5Q!~^Y20F)UjrHISECkaWvT8v;BeDaeLwUeg*3h@NM zv(B-jpTU$QnQ~s8vC^Y9{}fbVwgQ&9h{xUL!oOA{EQ9s~5C4%-&i=n-Rs7c;7Vux6 z-+v98|7&Q>e+`=-{_DK`uk-eQO>g?Ih5El<`2Tw0|JN9`|Ard;H`L(2=@kF}rBi^A z#A=!)su;LL0D~DGf}H`l+dYTcrJNz)jWKcIe)VrDHU5o2M!qxjRp9T|&HoxL+++Pu zWa5zM-^nLta!fvRBBuE!Kq#RH%vPE-mlkV7YgPT6*bq&4(`pH0mF@9y)17-$G1Tz;!5N*qC8$vS#&au+kHL*wp9f6E*8Yt7ksyJ;pb;HEOv z(18_y&0ncK=?|bmU7D<0b@P+wAj8fvt;o}BKrCJ-w>1@52y6?v6yTdOT&csOlo6r< z!ZdI=(L@(0z)c^5IC(H4l=a^wJ_V{^C|b1$laf{)!1Rp?{hENC03+ZdaNY@}9<3q; zG?B^}kWUtlmfMgW2wA3QOTGtdtMgHQ1Q*b_!(DmxjZw_@7;3pYy6ACTv8zA2e*T)y zPEQ>$*63iszichF`Tp)gH~yPL6{gYv(Lf4`l<{~0X4mS_lwDr>laHe{4#{|Hn&p8; zHaJ`A+dTs~x(Ar(0KYexx3`qGC)8N1V**AV4t0x9Uef2%_XH{c!v=ujmC~G)x^YI& z-~8whMz2XT$@)5XxfI2Tbuumy|e){ zD`%YbS7xX11)!ZqY`&o~tj{F^>6h=N6j-RtK|-p*41ph@lN zD{KILAXMxwc{F(941xuCe7B}`eAb`S5R#HwTtQ3#&C-Iof&m+B1_y?mKC(^F88ruD zJrUiaCtqz@d}`L)&Lj<#0)}S@tlYx}LJGdf1PCljI@Gwz@j%6pc(1!=A$1()`-}L3 z5&FRkP+*2BF8z0hXrq~OlG*4#fv2vL@mt-m_l@?{fxA(-mb1pJDd4w_OdqI>Xhumw z`vu?a9-wEh?M1Fe*NCArz5u6E-=I_0b$76Jg!dGsWp~$t6^dwODia|V&hT-L3ckI2NGv2#K(|3ua4mJiMM_~xWV9N7 zjLFX!PpAHHP9-E;e-5rz;Uo*|Nh@D?Ezqtk6hd` znFdC<&(1zedjhnfywe@Zp{K*^#%T(F;nkx>9g{6i5NpM>8=w22axF7J} z%4zRY;ltFwyVFOr{b}zNG$ zjwsZ=Y6}lovmEwfj;D)Y_E8DVo4hIw3;_S*-SF1|vbRUqO6$pHW>!fKmA0s`xKihZ z-5{}9HRQ+rnby#{BnI7D*6xG;F}{)SY_k1hWXT;RD{=LxYsPMJ#t4%ET?fAX5=X5k zwG6uaN(8{we#-Ay08bRWBeJV;ZFktgnQIT2-Q9xG)24@!KnYW+s>2KG;|2_)&dU-0 z&!-wHeu2}lyMhAb4KirlQmn7gZm9^Zj>>fiRf_g{@YRK=$IR{jkF?{aT2ZUoxK3>#de>9N4%H=FXxyMdGcvt zaRE&z*uAC@_$q@jaVx;~rLOeo<7gZpa1FDkHx5MtYbZDwxB?||xCiw|pO2s#mJjhk z;JxjUBu_3QnJu6C-3(xv%tX#NdaikI3)RmgKQs{sv}V&1JUjJguZ1j)4R zF6(Ma49_J$7QqspUEu3$wLfa`j#so*V+ltTBQn220udA>;hCnv_Ba`<#3qIE~A}993P( z-b7bz9)gBU_p|xY@`XVO;DoA5Sm;U7YlkARJJzWR`}1%3d7v`A8%xas}4E)m@cU9DCh z3`uf1K9}&`>vY~g@4XZ+{e;1B^&<_|`!s5yd(W1;f|9R=e46sW-?diAw0)Bp`ccy_ zDu@?PlOLzR*U+>(CFogXjP0*VIn0%kDqyRULtZMC0c9iwn6*4of<#pv9M`Mo+QuD$ zVoqiujF9uO8AlV&lJ=;0({}Gk$7W{jq(&?CJ{QS4_|GUs92JPcyQvsJnk{?u=AS8% zpw6?|8dc9jZS4mJH{*tA>ZPOwA^Qip>m-^XZHZ(9CnVHUt2*ZP`C2?Pjt_LN<#e4kwC@|-1}^0Xxe8+q zb(^rBp!~v5M0xvXDSIC1d}_BGvN~Ve-MSkk?)hCUk)Qy4Yx&JjN5KBx&8Ko@hL_;p zzbopl0U}zejPEqZ$^y{S`lamUX?RpW(fVl0A7QM@V3t{WRlN<((qSoXj(#EW;`FL@uZhCc_2wM-0ggR zJ_DEWz`8uR)NDzRjDx?7xUPcOe&ZXoPcq%FxNf7$D5LBPgqKn)(#EV$u!=j}4WRVU z@{61&^FHsI&EMm>;t{aL`G#^NSBm#B5>T(M7~n5U((>sQ_-_<&rb_n>)vHOrGGtp!%BaU0w%I;5)L% zUFo^j$Lh+xqR_+U@E5s<_&C(GG8y7I(0h$F1P(h zgXVh_28#qT;n~w_=J9e!r(7%`>ynncoqDRlTuL-b8dI7e`Pm46Z5fQ~EUQWNp=p`W z_A`m9Mv*>Umb}`zCgwr0k<>fe1{OvHUg_3rl=@y!*~0DPtc{J_!|mdn8bnK)bE32w zz^)%M=Ro%bX*h|Cs9VCpD517rC((ad8XEESXdB6d+dWuM?Ek^;Ns6DNbv>)g5q8_r z2^$qd?eNWy-PdNXXee3^IvGQ8uvNpXblHC7=TSw7j81ByvjcqWPNlj`WKw06mvYH$ zgOZ`aBg5A+QoqK0eKWOPyQRl|flC}WZ{Yqpy80Pkn>i0&zN&(6f2)0aFxo)VrC}S; z%?Rg-YONF6JKSx^SRlOf4cGXb3w&07cgZ6AUOZCFRs9o5G=Ftctshu_qg?u{nUNo5K?7~C-D+6f{WY(lv@EzxEVD`H1 zA(cD|d`^B(uWB=$cJWJ{vGH{b@#gQ-1=0dW8P~LS{O5^Onp(#N-5$8b`5JwS%x)PX z%}Krk`ZaGEbaSRg&eXT+M){;$r*;TG1r=(p@6kpNHJglV0OW`#S9S$^)^GF+qCNl3I>ZywWToRdCW>w2crDe^MT zW&}Cr7$zN!mv7y3GL@;yT!$YertUO?u#!5eA5HIv86ZBuFlhb+sx@AW=<`FP`F zJ9Ui;Oj{r7dcibT=&y&fzg@N?J|o==97eZaod5c=IcG4&kdRi=bamU4KCA8MkC%p6 zJkYkd5Ypup@WyR^`U3Bxa<{bH@S9ETELS*^`>Q{=s^TIHFYh^~f#HCGt%(b#c8@JU z?D}H)rP$gzePHWQI??6;3#Wi<>+)u6;Yw$PO;mPrg-zOl-+QbwlFY(>c19t;*=Sbb zX@1?zO0cKP)s?L)+|bI6%PgHx>jvDNErrd-`r=F798 z9!}lPjs_`TRJjFKMOv=USiAWwIghZ)bX%lVAAp*eS(h2%D~7gi&3m9P%pUcliE0=_ z(c05Bwdi^8@vqA!hX?Xt=f2+G4k zpSRLAG2GWCGw=;Z_Gq_hS6%#KvkLvunKL_1zwSi(;k46KVMn{4oUAPcKKVq%R(s`E z6x0NA*-|rJm*_hcy0M-Xz4ezf7+EhPGbG@YkX!A<1FLQEU2o>?oy|c^)Wd+MGm-Ss zVI+KvyYkz@>!g`=71?kGy>>sY=Lmt1u)Re{6909MamxtDZkq0NP^7=R+bJVrqtcOE zI(xM*5t;F924aBw^l9m%5VKx?_1wl=VeaH0WL`L#w^U*N&YRoYcg`=k_BAWvSr5FM zD763yx!Sy6BQCV~UFl!r2h&nbG0DQW=(ZK^ z&+e{cfDAAR&Bd3YXH^Sen!3hcnR;nbKB@!0Gs%4>8-Dfzm@#Vmr5=`E=QD4xL7?Iy z4>{F8@dLR_GL)T62Z(0Fem>K};Y6hX=-xrL6|n%NZ3Tn%y$#lg8u(XXw9_;1^|WHF z&|YBbxCgO|#X$gTO5ZdCn>R6-IfyQE98pqe-VRjqWnNDg_2Ldev)_DgB>oZhpe=A9 zk;3!CJ9RPTJSyQt>ABz!x-uZ_7fF12(m}+;dndUSAG*kV$ycFu_At?kst=TzGh`Pm z++;+gXy(k3?dH2i$#;zxhu1aI%R=eBib7{wf^(ZThwOORp%#rE#nBDbKMb+Jp7uXl z>JQJn{))WveZSnDcg>B}iVv&Bsw9_wq(AGnmv}lEdrSO)5!GH(C$gkV zy~`0is(Xd553Q=ez#BuL#7dkVGvlL-`rN-AYsS!fei(ggQb-_U$-7yNj#sG&65;P@ zZfY@N6TTiM_5C69GZ(4dRO|OM9LOTyC$GNHMPAnRJGM)livWX>+nVXH$Q6!*mzt-_ zg_g}VGx&#;6O+2LW@q!=6%$}CXNTRb&t zKj|}6`By*@Fq-RB^qv-92o*Y92KLtO z*_RYfPIfhDK%sL3GMZ;vQ5JLz5%zcp2!sW<{gymyU;O{q0X z^xlM?o_4yBkl1#lSN3+=4uBBsTHl+v=`}Gev_x*U{q?vf+&y6QwQMD*Ti?UA`1$n( zc-%hUK4c;JWfT`k-Y-ZmNGZih=05f6;Y-T+*rMr=4D?PRb8p^_Fvm9^;yE-{HhT7B z=gh~KR66S6a()iG$+lKf`|+Ue?-6MLnaE$cjIR?A9Y^VuF<5)jH?Kb14wm$K*znBy z)H}1*B;h-}x?eAjxsL15i>UXVI&^H(`rh0$R}jBtRsLzu6K49%f#UOD?ifmM@%c>r z^!fIf;hC>w%f`E2CZENPD_=tI%JPK25tH?!XEL8+2=BbqFTi?u0Dw4{CB-Pe*PjAB zO{ZVIdOHTWgwa@QCf+ok@_)+b@QBwj1pf4UXuegRJcLm6h4I7gsaF*7PBylI;*n-n za*+9zpq86!(d=vex43%*e|{rLn;&ZTPNrFZc-NxO+VtO#c`U=Ifk=5aPr-Q^%t47Zdi>T-^>}A=lrhl%KXNJ z5@V2YKteiN&^6y{% zLh&z-+~s0PlRAa&SQ5%ZLPUs}=lDPRh7>iuPL?@X`dV?4e|Li8gQ!NV*{cKA+$vEg zHv`|Mkuq5pz-4Ey$F&2uqqvk)iX;*rZJVbQGKntcmZK~Vg<3P5N$zl&F@K_47I3!h zCG@q$*B7r_dlHt~IuA7>*zlM~$N_a8OHWI)O+}ql9m;%RB|}eU9-{GS+6v7<^xiO5 z<2C%wlGzTL5 zublm>LYkB5D$C61i-n?1MU#5T?@0&-r7ONu06L)aw#f9aHKY`ca&_JA+I`X8cT1!s ziC!f2p4F}PT6&S;mRjG@A~S|&ZKQJ<~zus)W#q8Mpd=JyktQjQKaZmk};p3F5xj8MHQ+ubrPK5sK{>=O6+3D|r zOKQJva@Bg@@-cJmbOw8h&9}OoNwH5w%xM{!ymk50Eot9J^Z5);YFm-X5|5eQt|{Xe z%=e+sQRyPr=kuhFU*hUKG-Dw5@|tIp?6$)jzZYFPXy8FejFa-k<>&Tajj#L$*;CND z25u-McMyQ*O`p@OWB`%6R$P^IgVgXd4-Ro`d17DLQWk#T_N$(WH>?*tOi5GdYH(D$5(}4u_`9==fqPlDD!>!EZG^n zS$vj`|3ANe$R0TLPDVH(48&K8f2^_FBedQ}z#Q1UIFM89LaP+3U@ikKL3(Pv6^x}SZzvP2)hz`SP;*rZ-1gV9zE;{=Ib@ zDiTE754e?Bi`?9rRxnhTefi`9f{Sl|mt9UgPgwt^nGHX*CCCcpaJ>%YE2JR;Be?HzD9zosR)bWb_x7MB7aJkR6E z_Wst^szP($abJUBZ}RF&#}Q1^_Wr}U-RRI|2}^d=;qP@}X0JR*bmGrD%z05)STW6; z=bD@^h;L;SuLYJArXCn%h~(cn)#b^2$4G~gZXCeY*i}!0ZtewZF5Iox^8%!caQP;0MyzeC+Bq(ZE`nro%=?49OesKw!-FkPsMXR!=1zdw9PID$d`?i|B3HBS+uoOKFrdgpQ_%u68s4{Jb!W%Aa(-z$-{YiBt4k->7y8f*d@aYne|>30XF4G zg3dVIg3e$S0vXV~Ihy!Quyadml)iCtY&8uLF}R=Pu5rgUAVySNAy#tWxxP0OHA(FF zZqw;tJIs@B%jFl+!0|bjo#^0h#W7fZNqX)P1;6@l`t`S|Z!~;f?1rImUJ9pp zADHMSImDP{FZ#|*bT6MEi8GCSv*(h+nY2iV^O=@?eJ`PNOXQc2PTF;ye#v|8iMBaT zE5+Z2L&|!_vJ?#ck4z04*cIOjJFNS$Zd2>ZJoq2R-(2)Om#%TS2HW~lbKWltWUqb6#z3kU}#Puel4CzT;83Bmx#!T8#_=_ZFD}^n4IgXqa zb6i?I-~Nw#jRRT=4LPb-EvNHlyVL?oliR!B!?=4D_IDC}nwgbkPRl$zNQnAur^*d~ zNCx?rTmFo^msrj3-3A9`J2)u2uAz-eOgsmK-ZCDL2pUS;zPEcXD7{rh{U#hW#*06D z$#B#(Yq0{lbAe#`?l{RJvuSVi0&L(DD+9eo%Tavf)%}OtA#+hYR?__X)6I-d2L)88 zH0!37dwN@#z;Ci1lZxuRiksBw{cEFg;Uu}DG=5IMjzOfmjI8h>q~(cn0Na^{Pv7`M z9mihzFF#&+VZ%$O^srsLap$?Y-2R9C$ZtTcy~NjFd-(N>|Micje4`>Nk81BlLbEtX`3-HpM?j-0(>)}piOAB7BdZ2ftW#QW_b8zE2+ zz*$=*pmb)asl1D!pMNl=c5imaRhW*D4udUuZ?MX=Z7T_F6Ut~~*X%Lz{pemp|Fb40 zZH7lYE)^fq z?Rb9^eo9bNeY`n~wdg&MegjA!t)cLDd@aC56%Fm8{zT_z=eaZkV24O9i;gbyynQjIA%GJ+K7TqbEsmiY` z9apme)WRhlH_n^>oI#3#PzKf`y0Q`HILSddOclYRLE1)z7(k&b*KiW|j9;`p?f(T5Pk09V^w1#F!Om_185(3&{0y>{$LxJ17oi{qC7+2kK6t zOl0{dR7FPCyJu+JUBs#7f^9PxIT`wlrzm>GEbqLysww9b+IzL~=z^4^$h?LAPz> zi8#fY71kmUz`?FOx2xS+1Ad{cZvhASc&+I8YJYQ}u0Ivj7Co-dhDln`dnh;U2Zw9& zGyF&02r5bcwj=RTSuE>!C7p_s+w4YQ+FJo^elJD=E3iOOxw0A`XJz{BdH-F(Y{8#e z&hIYhr}G_64`rsL@4w7fJ8j}&j&7ePI|c84u%Z?bl$gL?n`#}qZKdsgNKC>QM#_nF z1R;7F)VOBfMPXjuztkt?toYAfoqoNNiO=u5mxG9bGXC@!d%~c#fyhCio*I6b$UyB1 zBP;iA7%K63nm=bRk{Y)eXM*djJTQv39y7*P5XtEBBgKt-lvTJ1Fl_EVPFSALzSub* zmGHrG;ioV-UwDp1^&a)uEc11zkdl~RUlmJRGah)7T3klZ@tnUmDZIoY&lUPf&Gct9 zxks$Ck3C5Wbb6oSzXwpL19YQsdl&O3t*+5#$^kKD#ucu0bw_k}q8H9}Dtd)q8-BfF zJ$tCN)leY)7b9P)+_$ti(TulVjXwr%aDHUiZ;9vl!d`!cF%Jf1JkG7};kBn=s2pTh zAMSV@0Cc&8TNc}y!aomQJf_T7(Nauy3YhZ0N+B(?TN{@%%M_|@y)_IaQtd*0qt?ypFX>%!cVgnxl1VY_i}y71`(KDN(vnJ#Akq`%hp(-I zt~&A8%5|gG9=*8)MXQ1=n=hXvS`Qia_H;V!Hz00xi{hczK0KE;xS+o@+1Mx#NjiPF zE;9uU6gq?XL|r49Bf957G)>u9O{V-&U7zEJ<#bIv=ZHa4G7-MX^`uXHKGJ$-4K?34 zI)nr&*K)tD!H!Smf0@sh2YQN)5Jzf4V>IKb4y1M(`^7uQ-!<;eDv!~!ceUSTL>lm; zsj?D~3m%UzF`=emK1brr7Z)4^_}2^w0K^tq9V(2obOzkfcAskTZ2t-A>N@+Dc&&L< z6g_*QvNl=j-dXYV!5-5ApkucdYV21CnH3F|<&FIz&iG()o4bMIJkDFwh z{{La`t>dcPy0%e8I-~>%0Rbry=};D((uj07NJ}Fj-67owNH6IWq(e%ik?s)b?lW)q z_w2pj^X~JU=l%YE|6}vpvhH=yImaC18rO9V8Ofob(~n8DDsrB(v?bK8J_O%d8$v9B zrTg~Bb-L_1n$({SlBL34MOeSeS{x`dozPTv@4hEkXRf+_l#91360g@T@XbvXUF9o< z$kQW34P0Hx!%2%0yV^`BmSAG@%->R_?X(v-1=ox@JF?eymI|3H(Z}Mx2>l^>2>M@v_=Rl?RYSoeIMpu&j8D8Kg)P~ z=W$C9?4U13ot!s`?OS?U=BNCyL?2k9w+{>u67ad$+W-o<*1(S?2@_zYQI6A7l0Mp= zsW0{jwNC~}iS||L=2<4V$k@W*Tg~er&L0*jyeRb{8cvT}cw3Sim5K#RR-n7!{~8=$ zU$5*#Aj&~2+*)lh$AQ^XZH{pzRk`>%Nw358XIJ9e6^Y~7iIM95jorP@aeF8 z;ODvTnQaE1Z2?%$O$%)Tp=Tb)KTuCdSq+T(Y?i+&ZgamLS^roT86gpQ4ZHHVK1&p3 z<)tNOUkyDx>9}ubco9x^Va)dCd}=O1H$s}EpS#I!sWGH&PE|Kte&p@kb!TKo6wxs6 z4XqLCt5y$uI?uGerjg5%BxJtTrNJe$CnZ!pX0S0ga>A@z(;VkgrpJ zSynjMZ&%#)Hqh*q0Jk2`I6u>ORD?na-yr>S?b5=E)vkt?u z5g9wx<+Q@(`h~7Ax!sn6i9zIRm+YpFr5 z^x%B22fmcv%P(Bs5TQoiu#9ub^0OI%0{?iQJ{b?qF|Pw-t()mUK92boKr;-o$a86X zFkF(n#d!JP4x{XPU!y2l0e<8e)c$Mo;6Z7vF_OOR6KYW9UIPqIa_a7l+8R#Fb6G~g29RKc#z^hz zz)>W$0)PM4fk+V|XSH*lU$ z^Hj=tU!o;}K*4uEnocgA$fqF4(k&12pH$-NEd7eQO}FUVj0jV4s_5ymTk6bFqFaf( zX+fd*m~PvOZKLI9w~cDIpNqs+b{A7P5+B6oLOQj*FY89K7l^z{FW!(PsW4yyjkfq? zM5`iAJJUHz*3Uq{sumT2JFWJZHCMPaHPdgTU1~?1cqa~UMM({Hl-fV(>wS~1rw9rx zZO8VMkQ!s}Tb#U12J}Qd6H>}qJn1J3KZj~;bfw44U4b{#XCATFM{>cHBLA>;MLz_* zJ5sDE9N=Z9V9d+i?RLoqp}Gm~8lGtVc|+?4>FVLattP6Lke&C`7T@oCw8(VHQeZVf zz7qi^Klm-!-Asb-nE>aM1nw0hBxEj)vrJW+xpx#H=3g<&!jDGZOVER~a%%tEezWFC zpqb-Q9#3iG3~Vnvr(5&fqbQT7YiQbKDcn4*;uoT_3jk_t#57AS;znz9s|Mn|Q=C3o zozoijL=Rr400Soy6zt7#zpUz^yDd_GLloJ@^V4O6ZjXT@U0Z)F&HTl|U@C2bK{M<+ z^htS8XxoU;NApLO8l`XcI_~FL5iE!ktvlCn3DslIVK@ocg%!+~BV25yGmtz;i>vv)Gfj7; zWlvGjssVlMmbiskf2h2P{xS5~*3=mf{s542H^TeQ=UaSdk`8XI_Kc zh=UJHB3(MW4VI@}E#y*=2Y4cEFP-CgI*=G^yZvkx8D1sxzm3iPQx+n>dE^~}d{&ez zj&~y|+V4Do*+SqOART4I%fnWcD>_}qK9H|N&GhHZYZ z^V%eHZjW+~sZm>tz9Z7%wF>=*N?jg;ieQ;9ijFm(@%fv+2f0SOg7Fo_6XJG)08pLV zCFQ189{w1byBILiczX_Goq#O!IL`t-SV7ekipYqG`qBM7lt@Nw4%@>!Y?ptp9p9n{ z8;KFqiCz9X(X%~FIbAkkf^$pD9$Q0E-8>3V?otYz2f#7oSuAN!J&Kmw4tH2+^&Wzv zXmi**=upu#Jhh~?LTb*PaZ*@u&U2Js^Bk%F-GH9#B#M&D_c4}j67Kqlm=wL7*IurS zryLdug+tpt)25yRwjg2Nxvy(gHEMvlMo6S)MQ&D_cF*;ynq#4S)Q2G5!^Bk6JSXlY zF9laBNP4)T3d|z?MW#o`Hl4)|AQx#}0#vc~btDlh)$y0Qz~L>wwN4K|G8Av=i3EyH zriv}>5K|lma$6Y%Z!FY{Xq&vg&8d@;x9m>*h8zWmj13r8;uy%0OMcA{P;Hx9TP({a z9tZCPdS0E^aS3WP()7uN%EmgQ%{tazt2?RmlvS4jJJ^qxFQ&|z4-$SjbmQw=DdxM) zAU~4BgKzJb4zX%S!Ql5e>&D30@9_2%uw{mQXkP7U^-v%R37a9(Zi^(vyx}_Xo1q6+ zh)SZ>g3}LdezpaQ5Fp84oJwJHMePG%6D#=(_$9GPBBdv=eBzBWx6Eg!ux<}z#>SQI zjefRS&L7krl~8BnApXwhwBwqUlJDl%a)ClYLR~5KQ|u9cLJfM(Hx<~}n6&MC@>%QI z@h+&gk~RXx(x-x}4VM;Q1*Bz|@32dE2?)9>ArrTWBQm<{smOJAv~RAkRr!T*NXxP^7K{0Vw~ z)K**C>2#a>vIehycLm#JVg>AMm)#EYsaOR;T!XluC?Fh>RPq zLylQXcLSI?orVC4M~V7X4yXY!_gM!6S5{8)Sc1AD*lqi;!BP06%hTHyB9N85zd_0= zLNax@_v2QVLaDH}d)wo^D}Lc>D|Cdhk}t;^P3P8!{1<1Opl-+XDCr=oPr>q>D?GB- zoI3>5LVOQ3D$+SepRcH_Kn+>ONuM-(RTnjHtThvz;M&zn`wFJAaICh;wq;tif=S(3 zfG+%c*)ssKS1nvlgbh~rkmf}04z#NHL2Td{S%SAwUqbUq2Ra)-Gg3sES67)=ar7|t zj;~jlw{7F4i;11K$8FT=#wYR|?%2E6)s>aH`}-xV75K|PT)Af)hLbBn=gO{C@QNuh zf%@kgV8Ar7-DfFH9|ZrN&$f9UuN+WXLeglh<8iDex+?kZ;WTrcmx-hU zBo*!+ud3sk!+yCxU%ii;#bQcPxB#CIWm0%m3>+;vk#)l=04}Ch>^;8U{yj7=_&X1* z_en*&!~;7&C1%ZOI1R;ELwiBVXc`U0QmlaGL@C^;AhZh}sUiJN?d#3%*Eh~Dv|f$5 ze>`UvBLA5mcaY0Sl{OHyRo)`X?|Q_L64{ZR#<~)GgH78?b6v(!!b!-j#m0njw5zML zD^crPU)L~yqcjTF^KZcIKSd~lpYhq%m>`b$H`^X(>p38~p7 zSB?V5@ZnJ?m5A|Yu>K;ylVshP^1V3R|q)bHkFlKPEfDhKKZn$n0)&c zms-zxeyyCV$3?s|LHhEb%~x9ijLEbD@Pcq`qbkTiHh;?H~K!=J~vA=8O>5X5mcJA78})DMV}83Y8oy* z7TjvL@)n3Fr|(;P;TPG%rgoSQhR*Fm#)@)l8UnE>HdNM9P6Df?d%5r=bx0Jmd_toG z1uN%$FvPn>>cexqlwM^?hT`qLh;^@&=)U*Be}xVK?haW|+&a4yzNiMG={g$W)b)5x>)_}>^9prGsf32_Sw@xq zOBoG`&&f(^?i_2~Dc0V%`@Zo^o!O(JD+Zg!Ib1Ks-DqW&cd0Ynev9#pB@~a-d?e&} z&@!!J$T84VP~}U=aaD}R;x}fghO1hO1ow&0uW87AvP8qlXwq;M_Xn)=3Ter3w{<{o z96K9W<#0(|yYAehn||+}C)zDx9b22|Fkp}#c+?8CSNCYhkQCXxtYT$YV@dh;5b)J! z?5Q{YdqVRLRi*`z|J)^?_o8k!^^-J1K63+!&bj%zKqhvnj=-C$=yIiDn*%V_PNG}V zZTw_7DBQ0x!>Cy?)}_PgN+$R_b$!&+3F$ctHl)DKga>R!L7RbAxkTsw_zUl)lQEIQ z@NgqF>EyqKH(s>++HSy$b}NNHXW7i|r^ogh^@Oeiij|`kS^b>_(6J7|lfPx1W|533 zY@=VOHexy(bHn zI{hk7arx@p<27Cv1pIQbT_n~rGPS6d`zYOU)SU@)#O4FwKjpX+fRY0|4S2Uk7RmCH4D& zL({87u^}$p%Z6Bv$Y?yoAjgRiR=;0nJc^c>$3?h?S;?O8P;nd!MwMY#UKi0?cZjY%TjZNmB-vh%U!|@{fz9~e$ zuN?HQ;q;dcAYEi9*5OcAN2I-j5X4+|k_xi>LC^^quO^W$9rFHMIREjrTm2#)oCuAn z7bi+D2Qz1^^Tq0`(DTC$d>U)=Y%q~)z%|~?3av*L@iVq}JBDU#t#a7$>|2`?Z@k9M z=rf;e;H>&xInfIzrXcMNV@9n(*!B5%lX&-`;UFmY(@0B_?s!}rhey}3X=^|Iyq;xJ zu0ZmSZHl`Hc<_9N!&eBaXNZP3KsFoYwr;lSPNQZXpi#%+(gI8t!_!bukz4)KcZo~I zGidgr%_X&(^y*UIia*)gARbpVr9_wse}^?DaVy9<(aFsQtY0>#pLne}>5I`rm2^bL z-|P2CjX|Wq?Sa-u8hZorZujg#z?|P`e+(nSnmitJat;WOws0{0rbdBSbvp~Vt>B`e>=*{~XFl?TY@H$x@&ii7E1F*6L;xUX-giRh`;;jX_}Oy?&>Dth>H2Sd61%TG zB1;*)_9YwhF=(eLyngx(6uukhclWI2Zg{g|q3LD@&*OxAk;O2xL@K?bR>HYwgH{3J zvWlK8qZB+~gbkR7f7^^}023_VyiQQDF0FbJQ2-i|SyH;35XKjb^4})RkupLYcBadCgIN zW>~Q}(Fk7H6G-wry&fb%?%Ix~_DhYu7j&v<0eJf*OXrK580hLYq@ih5DrIA@Ro~_C*e|$v6 z;)+Ma=jPxy;fISU6UYgYII%BpPn@X9NWR?PEiQ-JukSI06CW=C4Q?b5HL9Fwg6gTr zD5vrt!E!RN@`_69FAe~y>v`eB&@+kN|C9mA+15|;He04zUE0S!vkI|YRZ{gRRQ z31At+bloWySi( z7`YG=-~&HqG%NTN5AM)h_?V|4dA2wSIu6pmH>7vE)U8Z19mQNQgF-4V=p-Aegba+r zeEwU@8r&p;7O0Y?LnIMBVt-r25By-;JSS2nNPQ)1lOW|oK%JTMPyhJOn@F%84g3{w zQOp1JS8}z$25`{2DD;Ub-1vkhRci34^4@=^!u{}!DE`t4aIuqYOk7u{_$I+mrhmOX}zn=8Jb4f%$%z(u- z=eCQZ`-=vp^b~q4#U9EnApVRY(x|cGX@vawr2q6oWy(85#x2_7{l7jzuZIRaURPDg z?`2E=@Yk~a?M3_-x}dicK8WNq?aJwYErCoO6lH3Um67O_hc)qI^*SWY82)nZ~T32Sor+DHpKpkTJz`QPs)PTaKcx0{p%02tl)vi zTbfk;y;)=x{&%zdrp^6dZ|%-o3Bp+=~R@lxYmHX_){S*!Y4`=r(Y9h5b>vJY+u! z*E&eg3cDRLJdT5r?>->v2U8UWcNf|$A{;w9gK(*})R#R#?_UfmGF9&>aSt@C9aN3$ zkREN059F+!P1QT^e0KH69Rp9X4&c5>6>Z1Sb&wQbVatzA$;0+UD}f1Qt$ppy^+h(g zxm?QmDU=%@csE?o$S}1?p}aQ{xWqIYJ&ce1Pxt(@#ROeXSBF+KIwRV2FrGRa_|^_w zIZc}935(1C!5KN3*X7I_J&F}~Q zC5y-jnq}Kk)TVZ20mQk#9)ZKOm@D81d%)LPh z@nz`We^WfqdjTJQTB*v~oJs%33%ZS=a(!IWl!JW1QUjeg;=La?Kp87$0WT9L%CW5v z8!$JYjh5wK{qe}Fxzj!TMfjW_8vc@7Q!sv0MA2pA^|`JcqbG&up?RPD zKcb_5yA&s7o6qw#2j{e71LKCb4>k+;fp5iJNAOo6UhG6TuTM$t21hOG_9(XQ;8}T@#T0Z0pldj za45w+BA{0urKuOohRp6)Fzi8kXxa~t-60*;if z^T-A4oJf3q$yB0!CXN1mFacKj?2%7gvY}~z1c{zTee_-tzp#b;O>CVQ<@zeh%p<#QSvzYbuDl9JvF z&4rh9{SlF%jf6xQ|8m7s3EYIk7$Qz`jq> z-V89WoR1(+B7;cj*0o2&6Q%?tmPTQ$58}(YL zP*peM%Tl$*HU4%aU$M^BRa>iVWw%jsFQcKpYFpt&XDw& z#S@6|HcfcVzJEwOLAMHe>vQsW=WP$_onKozZ?BHV+uE!zKe_h|G(tTf8@E| z+b_U*CEXaerY(*e=40T{DJu6N2n#TA0Gu0BUV>O-L~$(x^YVBBFjVCAK5GlQ_`s-& zCKESPtH05qi>wA03y;#>&YlKaaPQ=vnV_kBz3;25;_)Mp=Z?r*B9LSnr^BiCPzG{i-%CM0NjBq9?_fD@~p`b1wt zg+#Zs9AqNTSp8NOm~p*YI2dUIKm((Npne5~w}- zy6k`wrG-lFPPp*gAQc|va{gwccD4x|>pSZg_j6ew>`~Sg_{IkkYqWYWeMTsZ@7mi8H_K5S=0HBm41!O+CgPmPQ zTsB|>HnX#MI!M!4W&-zSv1!#>i2O2f^n3VDrtzsyYr&M%m)Z1PH^9B>^u>%DBO;?o zSnIn0x+Ku|=?H(^_09@eKg;PjchYJyXEaX;@0=IUwL>jNot}2z)0Fe`y`K&Sx3lxfH)P6y z2=R^d_ADKg^_|oM3%!?vcf=O~<6qzjEkDQ~hut6xCDwrBGQ+M;7f(SBeMeEXr9g4D z5&Y-hx%w)GUP?0&%z-UZ{OMurA`F*XF%o=q{qPZdh>$141d(y17xNl5y1bRd0>1FG z0WkKB$i7XlfyRq2qNIyobL{|U*8$RavLVdbEL1-+7Bs@YjOpNTfDFZP5A^%C8VnwW z2)CPd;Z|^x;M7HtBg5=7suy&hre5a( z7%{oTo6}Vhx}zR^xts5k7Xv`Gb_ii|vtC|!%l|VrsU~}H&3^PaqK3|}`6A$L%o$i#LSLzsHJv+ED+*%f-~+NUG(qE}-UNwSIe5wKKYFOa?`CL9W)9 ziiTXufC_tqq!pnKKLs$5XnTz2G7L`we2Gwytz5OH%K`Ei zNnFtQL^Runz>j7OxqSN>qF220*gfM$J%5I!)-~i4A6I!Hh5N{&EGqm64J`93cXl2T z<^e^=K;9o=jpWC(Z{T>aA`G)=IgZxMert7BCE}hw=7aBQ4fIebrm`;bZ3Qm01?HuH z1SD?K8->piX5oiHftMHGU?dBUjH&#;qD1c6nae!!J>d}ePUx}|z2kphXPu(3-g+n5 zc9I1Tyqbo|uWcbP_=uH4$bT2_{rxvA#yFsP%1#w4KBNXHFt5!XaAzykw%g*nP45!l zBX$W=?KCqDPz>vI)fZ&@IrGyA=$^+!iyo`(g$Jy5f6RSn)E0I~RD8uiE8NnPub6Gn zAf=$1>x{-%uGUC2Y8m2h8}cZ$Z73JR;ByvBtUhTjS`FyV(#j-8xp{vr^j$dtB~{Ki zilHI(ZCuP&{}x{qQ`)wJQ0=#vrJiO9dd6THpo8Ol@`^00P zPo9Bx+pq`xl{v=88uz+9z&e5pUm9we+jurF1(Nmd`z&@HjeE36z5i|u${5G9rLuOf z>ZozB2W-iHr2?qi{igEunC$v+zTS#2HoxT5Pmp+9>{o^#jcE>y-fod0k^%B7W*`J{ zb(yeAd51YDLL{Tt6>EixSJ4WH^U-YUk6WbTj<Tj;aRq4o=F3PJbzLMVu4C|VrP2dq3t^8{E!o92u z+>;71^1MJ^fL1nm30YqN5*2si>%h&Byt@MT(tJDG82ze(6~5}*sV#23bE&)#_Z9F* z9Uk(_EYw3PAsxZeS6x49W!?nm!1m;&+4Av{(WmjH383)Z!GW#izvW^24!XM=$K0=X zPmmbZ)3bkFUNRKy)-DEuYIJW7n_4_TvRG1fp*I902Nq9=w$uPuW$PkMn@fYu-4vnC5rp(=+nr{ zeB~V=+Ot0NmEDY4S?F9m+d29^d{>xe--Lt-Lq?*VAJ(%=-f53J)ri8A%VarhoG2ZEmEjX3Y1q0=`+mXZfcZUK#{Wcb6Xz6 zkDCmCo4~c~p$Ab9x{3~WPp{<`0R^GbiJf8 zX*NKzkPeEy*cIU5YRWjo)Q{f-O6Bx3>xz>!*&6@v?bx(Z`+OxoINTmkCnolt{4|~| z6~=z$IqSL!))RC-P+!p^_5y%GZu%bQjm5G^1HHBUh2((6$4TaYkJMkGs!=#93OF%F zs6U&D6tw4jOsz&~DxRA%$YC)VNOcE$qc`X=`2KzPJ;0??aLhcPN{xZ7HV5e*97w^~ zeI}y~U2lsNbTHPpWB5_@lpZaaej3Mzr@rZ7Fz3TPxrfBQ&dMZ5eLJ%gxX6&QJNlJ1 z-)V)+3vyKrJ~nLSHF}x_1`>FE&Za7PHqv{d=Tn1+v4sc!E?z4ie!o*s^cIUzVWI0< z8aLnS_`px~(Y6rc&=dkDWpj*F5rP(Ua=)+?Sq!5KvlN*hBa-xtYI}J;p8)`C5mIVu z^PW4c7zdu{g0i5d2p+qoH^LRehu7e7yRh_=GCzwVRON|`BN!{I$os%JSPQx^Q~fu{ zIB|QUN~H8<&KFQ?ecOhaQH$xnL{pJjH->LAiR>u5SW;qbwZfJkzBb5^t?IgGXIWC# zNpkl#0==lpFOkY|Gy3!NvXd0F{AGPHaupIQ-H`vE6%c{6Q3P4BSul8Nx=$J4JRKN^ z+{Y9QTfUoj20~<1by$8DxZGjzUjTPAN(*@G69)z7G}1MYV2y z$-M#s?Q@<)0GDC8m3zT_lay);ByV&sA@E+M$z-ajBtOMJ%f6IHnGbGjvsm@c?M=z| z;>0&?*o_}@8NtJqb&(glw~P1tJ+|`RF9b+k6OKePHa4{%s7Hpa*hf{fGr_kwDaDfu zWdmGJ@B~e~JI_`Vvv(^~2;lC3)Vrr;-@nqM_qYtC>%8KY^MMKv=P8Q7!r3Rz3_vEy z8CLynAc8ATLYlB?)qAu+B)^B73XMhoyu&r?vEZQQ!rjV#&y|I9284?Fv1hs#j6w8g zPlXb@u1pYKrF#+P1+IztF7pmn#=JbeW30`i zT(@Uno|)2}Il$0X^!@O)S)`pUD})W?iViH)OVEfVB@}%4Jz0@AP!+#{i>!7EJcwfC zV({c%=-GwDgPUxLdI8Ybqz|+J_{NT93iUHIGJuy`Wq#1Kq4C5j?KY2se5nDl;KVp- zS>vzr-p}?u`|UBR*tzA2zd<28iLt%>srYR8K~y z!b;o-_9l!%fS*sr;Vxl8q23A@QYAmYK2R?YNMmyRXGW?hxsH1~?7UrB#-r7Ov%rhx zqXedeX3GJ}RxWTgu)Y8p`J$}|ST_HHS8$KJBpmsNqdM5e+E?Ydh3K1!HDAE=wOW|x)uwLV@Hpj%qdPuCJkVR(1L*M2u|1p- z8F0SPrm_)1Da|q5`OkEKKM~XqD0m?AWE5o(a|M|vl0LOh9B3L06MHAhJ;H>Fka%3> zL_K3r>|<2O*Vxpp5UQeJDuK?3{pSpVF!f)|Y|(3IDw0hT2BCF_L|yx>AoP>$fLaw& zP33bWGpp%`U|{giwcX|IbIOyROY{K42Xih+)fw8Vbv(gYOJ&R+UwHdn2xZ0O1oYAJ z_Vl#5n&TSID09D}LxM+=H6M(p`N2M!V?%3b5uom&w@osRw_9QPB$tkJ2b&I{2k$}y zgPO6oaNt^CU@ZeiPcOO)E}M?_f<6*9WxN-Cg?8wm0B;%znQsS8tD6g#bQr754QhvqKeNguks}us5RsCAZS7IS)2nyZ-Yu zafBW9`UDhg^3;sILjzKy%?gb0uP{N8iR zWKrYU%w544f_)?c5OTLc*d8&f0_-E_$DhFATm~GSA33Jyfem_~pu)`@4CQbUQU@(g zEq(~(fmu)2Cd2#m{0BoeI9rB~LZE^A*apoYie5r6UP`uLi zp8yMD16A1xU_m@>=f{Q0Uyh(eNcQF{(1)3{Dg;<1(oRDscIY_(u~QcGYpenc8^V@l zG5kv_Y&{PAxS(# zeIBMrtrI6{p;I=WLX+CMIRd%@o8Qt3MqBj^f{0C1Sz)o(8K8%1&Wn|ian0+y5!Bku zYYKV`^26*jve}ul2X(LCuKnNv$bH>&DoX=iOqcy~i&u}np$(U5E0_Ab8*YyPLIP)~ zHXt9~Zo0YdTk1rORBq7ZN%)@97ey&z5HYN?o7(pLA{kBW1c)WP6*~MFmD?YGv|9Cr zG0K7@$R_tTWB6ZehCd6HzkX=`NU7&BqJB00Y1(u}Cb`1WqX5L{>^*?D#ay!pL(mYb zxKNlV5rHY_rONnPy!Q^&SmN_Lykx;Nz`2V=O{+$Ew{yz-4oc@LC4#q~4Np5NF|r?_ zi_pU>D9ts*p_?aL1|WPmxWu|E@3y1$-T)|U2~-jj9YQzD9uBo$el-uv?C%CCWX4|_^m$6ANqAOE1g5Arv37|oZ3Vc6=CkcW z?sx%E_pzfzA@~+bz({0)ryOjrJ^2uVT(((}WinXbEK92T?Y!`J%;lpbA5NSnMpQxc zLqrvYW3^Iag4kqBk?2iiBMIp@Wk^NyTOC_c{L|>9iw%i-O6l>QM^T)V#8m*AS9`WZ zq+DprW)HAk-;NlL7p8tYTCJd5Dl7(?a;KHosVKja(E!fj?(&WDs|ZYp)Zd@anJ1zq zgauR5PrftS^i%LrtpsS_Cc6X>n@C31yet*wb5}$;LE1q@I6+Bwz=>9yAt8X<-l%Eh zhAlknwa%Y68_!Hz)k8WfnPJ`eovmijTRG=Oxmj86$30ic>5SVmFx ziS*ODM=O3Pc=-G#+SyJRDzz$&Cl7P?S>?rG^0-{vV4_6vwbq@tHdCC{Ij(|B8kS&0 zM|Hi4xsS@CU)E4^kR#XcxCUHJY)&ouAaMxod(Kyzf_oCt9I0rsaw&&FtxYJas2 z96LNiMV|DNoI0wh+sSsCn~xx-9WZqY=;#DIbSm~9ckmxhjxG6wZjT>+@k%G!&M!j4 z&c{fXka@76`T-u2T#cCUHo0(%(?A@u$4oeAvB{fO8MHCH?AYo>Ui1uspe zDwPh%M_;#~8)N}$8Us@(o2#cipL!no@h|IFr9OIHHlfRFHOhYO&{P2pE}imAXdCLF zRs-d6d?jZ3Gupmvey9)c1_#>qq~9Rx*T55oqpA$Aa2$@LuIv{eL!SKssEk9O6}!N3 z6%Z$SeUua7GSZD9Q($U$#n66v8kG9vu1|%D$lyuX?~G-tI9Bf8bHM^3p+WrDk8u}J zIZz>4>LV2Q{zk^%TLPbSG{bQlWG$gyW9p`(MAU5sx_#@xgM_NrL|0ZlIAIDz;z@*b z(x`gIdRiSR0nAztnby1~I!_3p<)DBG&EvLmqbgw|;@8Z&6%i;`!0FT|*=D)>5}km# z)W}EEJJ>`}w>7*h*5X|a(YNk~Vh5?jU-?~JjIy=ZtRXgn%a;|c-!nzhTsrSh5E-7T zpvh=^3yt!Wi9T#|z||@N4cSRDk*Fww#MyxNN1kqBvsdRw_C9y;rP4Z z2)dsx1a<*trM#>IHcs)Y90|>P3m%a0!(qX`J1UD(C*ecR|xYFf;=jo zj{Hr$nn5th9&YDrQ3)5%ZVcr~XfurT`n_1FcwF*L{Q@vYPKd}~L3Hj{?j2P^j{+r< zvset;QR$>gH^rZ~lcB!a&DS4Bt^#_UG&6Y@&iyI;fm}fP=e{;wT#q@)DdpKtJqryPiYk6H#79-2oP=339iV3Q|^7GIlw!>xIk#kf0YxoPDW zGdxu2nqF5&GoHCvuOGci!~4p-3Dn?WuE=LrM3h)Ne6Y!T{=lhzVl<<&!M7Y?;0f;u zq}^ncD)0bxe5T&Y4QNegP#!oTfV zHZ}E0Z+0um^zSKkq92%kQ}-E7i-E1#(EiAM9V{ERq32St z`=Z^b$Mmd^C4zB2vb2eQ#04VMT9@Fi_j1Q^4ecML!&CGmXvkWvaWN0I7#e(zUw>)1~@2MNJAZQ2>45AbNHGU$nOb||(h z(}{|*Mg4GT5d7$IZDJew)7a3R9x17d?lfK$_p-CDqDXAo&}L=Rvo1;m?~PSeh+FCh zFN{~Ei9VbkqD?nJ$9SZ}CMy+7R7)1v9;Hjvl!k)8X(>u7*RG(r*#AgMR1%9|jy^Q2 zFNPsz`U_^BQDIhAK4({9wSRIS$;3RlWJA0ta6$;Ru3L$x-_siu{CTbzW8RqsQ3$d; z=)^D1(ViPT8s-!WZl;oIN8L`QCD)Y7vl44Ja;pw9m8B96(NK4y5_LGyrqT<>6O!Lp z(_#G9z5mXNjZCp^ynX0CQ!jsp=HP6dcra0)Huc-Cee#bpk6Lr-aWn5JSM^2?9>*}C z%&=D^t-Vc9E%inK+3&m2uhLwhHh`;^$3GPA7yoA;@E?EpFND+eI>c?jbv6UTNrIyb zq_O~_FVW4R&39ttXcEJByFDp*yr}O$@KxXk97Tmie;TQV6{F7KZ=Dc9wFUkMf#2bO z%EbK6j;1#O8R8v6bij+P8eIqA7jo2xKdOF&iqQQ^Z15ZJ&I3T#Q&wd2Q0{-WzyPrn zA|V}vSz$qLNIK9W&?o&Yq>bQ8rUUd;l=?q-aCrYy(yrhDB`_d?YUe|cO^5@IMjn0< z%NKklX{OqFAHFqFM_Rs!X3D?^c-potNY+wZ4G5pA#xw>p3r{NC(+CmXZ^>h1X# zgQ&BvK{Z=@kP%Dw2t3i%A2edz|CE{cFB&m;A{K~V23U2)&oci-fbZwG3KDdFWZO-X zAPTbS{FGxl_?&ao#SE%quX$b?p)6>^FX&sTK7?LI!yhk$Rlw}uGbMfxpr_z|O1@OP@?*9q9TP7V5^z?8%K-*2RMCS`+>`^uJ z!W}eG2LYit3yDze;W;26DlstA5dI%NPBE43yf=zCgdO&J0=`5fr0q}6f-;G9&~dlD z_^o&Vii?tGv!kR%pQ<&$@p!1(2be~}&(MWL^*K^R8!2CQh-Nx&I<5DKvr&q4K8 zPnKX9@r?F4NMy9XsA!nytb-JA$H_7E9znF8@k<5Oq`%@fi}7#T`sY7r>-~T?YamWQc71 z9877+0`!5pS7QVG7nTknuNMbyB|i8?QD)~Gd66n=`u;(3hk$uRrvcao(knTVTQnbR zCXV-2%&Ay%@Lhn3#SAZPWE;n@3_erkci};*&?k7GKpi>=oUO=ZIY(c)4l-^f=;h$? z(-3;gAl%oo6>okR)Z2|zTfUODG^!I>N>;9eX)?a|aEL_{8@wbc}xSRkP1J54{*CMKx9_~lh0O&#G{-WRPxDE*J!!tln ztLi5`$ijkPTr=Z@;cTJ#*T2`0ng5E(4me6S$;>{{U|VsWLjp~}#d$b0fIdeD zy29H1{8K#h@9t2E)_CfnBf*N?!J8ihj+bPtGdK(Nb63_Jdd=_5K&SjT+rk46|2y}u zM~$~0>mcT`AY|$pe|xqTwp(bh048;*`k}B0?A{yzLcr22)Ae=XCLa71P5vR(_as4j zwIrLyv)5Ftj>~}gty5tGXwqR2Bl+15BPR;&U;rXx45~Xu2(SS~!in?$y{gSdFK(4| zkL3koy;<X#kgm|IqPlfnt;T{g-xyicS)Xk0_8~i5WPk@-oaA){A%S!j6hb?%Q!A<{a7 zSF!?p&n!mrwPD^U}pRY(qo`RH$a4kS_|>G#2rRdwdwHf>@3Q zdR*;2H(=-RiY;I4f&qqh!X~^yUHpwN>P5iU<<;_E{RysZ`)S(1SCDw`5X2G*Fw5a} zdyOdXog3bRo1&|x>2Kxr_Uh1piR>7;=iSP7i|xUj2+P}AJK{f&+wtW^L4Q0#`pwPN zi3ErLId$iq~XTCC*-n4utmZZGwfbH#~d?i_! zJo^;bPK#5$Rj;a`P`!Yrvc1vXGfg<}*Lf8OQFlLEe%r|^ca-_pHvDjb%LACP*#PSO zs%#16WXQlD5TbFXJw6Ai!Lt^oY;81w^TEhzNvy^fPgum-0Mqg(R}+{ZDF&EJVjQ5K zIl16)1|Y4v_hi*e;CKb^_GNwlg7)evz^dbi{&JQTC;sl6e6QRT$~+FL?^TFBs48$| zNMUh!ka083MV(tQ1;%C$Z51*C6uMyl7g~m^1Is}=?Ra?fu%o@v1FU>C28UFi&xv&%t4p%l5v0=Gp%dD+3#7@xt&!lu!_t zAsR*Y=*Ig!aE9L}jHZnMdc{00+3l_WBjHfXA}9%dQM%ajy%1GK7y)YhK|#0@8FH1a zK-u?d2T5i6_IL;?hsZogq#S)8K%_zmOs&hkNUv!bEmOL^@^jYl7ME=42&d$>1WK^hEi=b>sPK_g^=_Xbs2o3fzE1;}^22r;b`H zCr9HqA)0#^yV>%Fp6EXiqj3%G-f-tO8CAY<61CZhk>@9-BeS%|XZvyy*> zbHPaJD2L)MFeFgoTsGQ;p8?io=hJB`7uDAf`)q9lDA=1GRGdZ*IEF&tUu| zHcI7+mL7%>n&u5_v@&_-c%Nat%g>&v}>QFTJc>4{q#< zAYS~(E_@rDg$0NDo{BX<01R~oWeraf9*rC_K}YbW^+}&JEUTlNe!uckv@|+Cj{tZ5 z8zSD4^`tevzPP?}1|ZmTJUua>GLv4K1Fv!8s{VM=BJbjg;D*!)bUhBSb2ht!zMq2g z9+m(g3Lf0H+KVuoFbKty6SXymd3D?%%jG_@55&U_-87ZX9Sp-~wVDPxC?3|a&fr^} z@{{HshwFoTP{}ZyC-Hn=I(H$=>wGw)^U&tLKdWJ9I|`YDLZ7n(6^HZOAn>jfyZl*H z(g{t7>WSE4_P2pL{fy`Jq5Gw8+P`=p;xXSZ?mw`$G@SsRdfIiSj~8MG!+GnpdLz|IiAg9&cPi}7atOx72L@~9x!TBJSOGZ`=B&@7ix<; zJa?W$H2FvGIl*mUY0`==6of7;fqlAVH`zHwB>O zX$0?f{tLtZhrPcHi@NLjfMF@6l#=e2E~Q%mX%H!?0YpR^q@+6qm6lWqL8QAI$sq)k zW&npA7`l1(c*b?!*L5H7``lliPuFqqVGcl?+5ffIUVE)yPy^R7ne=a6eaK`VF2+}7 zzTg&D_2cH}oUA{FWrcj_M!&Y6q&*G-kDD;Xd_do7{a2`ULkNuEvxP1mxvsZg7L z`J9K%AX>T|SPCsJz)yT0#q`E=NMl+v^HT^YIJxue1&&L(IwRvHoWM!cSON+_ue&Sd z85ilVWgexH-(*(#nOrONl38tQrQ`V%*L$QuTFqi?LNO> z_!r-|GDudCRCBPV739hFXVc)YzI!v1^Sm`eSF-J=2i7M&N}E2v(B3Rr=K0dzN>UpM z$ZZRXb4O!*m553na8%o>M(BJCcQl>uA?~`2FM-IDYtt!?bZbgu5Lt<7!;1b1}Krawk

D=YC5d9=Y(e^_qnMxdqW)Nvs z_QapnceXW&>`ljcoCwvv5LDk>y#fp%Jye3Y4xZAD-Mw`pwC)OYykgcG^D9ivcUiO! zsgo*xD@_UqEVAC_;Q_tdH6%*B_j&}Mf!s?Mz_=f$IWn0WYkqf_FHjfr*#Xlb^~w1_h^^VxU$Tb6slc-DfA; zaq52Va5V$SZQ@VdJDm2p?H}epU*qaMq<^LP;?S5%x#z0o@`~woj=j>%tFaW)aYnW2 z7)b8h%fM$+MUU$a7saakJ5Jfu1J8 z%wHag^f!Mmf6-8Int4aJH~pb*v6)6vqKE#9VI8{i^+ll~(@YZ%0}h>J#W|eCy2r}Y zzepWKTzFW4cs{G0%?eX zph}@H5GncTXrN&Za0U0~&Iuz8Ji*CeJ9XWm>S6&;&kx+$PQynJNc9XD2Q7gd_QRtJ zSj|Z4v-cdA;3#?#ThSKr%r<%P&br3iDxe{mt%?&8l!%&HBT;Immnl=fxyGQ82R}v` z29A1r0MK9&k(|a00B5x(*b6?hfC9o;BZ;ecyDuLhxK_&N(9R|V@ELq?>}zQ;^am^( zOT-yUQlqKGODTN*8X%Y9xU9bk1=g26!U!2jL<|FJ|!`|+js3D z2_pNHz(h~pr?^e8=EMZlguYG!c225>N6%HdCu!fVe1bxZr%%n%`ZyO#BXJ2uqRP8U zJNpy^SpkB-F(938)`C`CF#>;d!LY+SDe&W92q@pgen~%oB$Inq+XIL3Arj(w-h}96 z7EIdj1tS6K0fHxsQjA=2Q^xaTda$6WD9XaDOm0;3{?gjCxeS!Gn>i8 z^=gdfUNQp++L1}OEavXVSdcI(@g$oZz#TP~0O~DiifWD`fsEN~!8a6!%x3m$T%aSH z#AFeiBNc?Q+Ppi!v2@`NFM6r9+}PRyjgY8@M*t`{TM6^qTl5F*1SIE|BADnB?LEF3 zP*bc`El81t0UpNug@3HRu7O}FW+0o_DDNKNSf?-S&H}dH-p5+^r(4;EA8uS(Z-R?4 zg37f7b`c|4_GMhCxCP)RiZd2gaLUA>ck;f-hYTff7`C;LBmLU1uHlyk-ox;{ljY$# zw;y2WH6@+-`ul8_G%24`_bSl8EB$;=_0-GLG<7HG>@9p6fUVNj2CLbe%SQ&hKk81_ z^huFq_Ml9_6UG6|f|O3921SV?=X=QuA$q@}Yy^K2`tfOh@(Oq`OR(cH*p%~ioOhR? zc6+&PJ#}xsVa5ZWn;Ul@`b45FIO`yhr(!_RLB>K(W(J@YY|fpcOr}y-x?DYnv*iTV zysQ~0;JyQ1$1%4{7>0 z0*<7HmBI1o3y#OXvvd3v)z)_rW5emnm7D!tcIRYteZfRO?r8QEr6twt`~_Um}M zvb5d79G}|)M3B$#uZefNMUzlT3S>?TbShHp+fAD>Vb-BUH6=Sp^Q4!l2Q)1=mQmlx z?b;-iaLWJEnfeh-=mnSw&AnU(s zvC_@7;Ske*!1m)qIl7`djGv#w$5BX&wdCvD>mR;@Y>=Axl|+u{2J=f($O4Y@@jYgP zwueI!v%A>I6cefCLR>p0Z|+&;ue7zM7Qv(a`CwwB+h?Vf)erbbAMQ-&0ZLtgD+PtY zG6D9^Qbp{-4$w)(jnADPY-T5{d+VRLVRVqGtohNhYkS+`#a>ZB?N18EF#P+ZnWYTd z+WY8JUjnj4-RxLnb$4NAPG)*KG+|dFe>@u7|S(1Ti5Hk z2&WezR2ljhz{TT0_<-Ff=8Sjff|~y&6)#DpJ8A`Z`ZF7=8J6gU>Pp&w~rACKIbrE)O6=`)h{k7Jv5{}R*GrnKG zdL{&mPkJJ06;h`CErxqvB7zdB#O{8)m*173&EAMUMHYBS3`+j?izaY-*1v5t=V8x< z^D%g3x9|O%eP%w03<-?@}Htd+-qhWu*8`E;#2c1 z6yQVWLti})K;|z#sHF8v{!#5Hx>P6IiWhR_^NpGjh*nG_MqB<}^~IY+_d4=z4K?=) zJYc43)j{vp%S+?$pbGoQjRvOwKw#MD^b0a4#2PJT44hy0U*RK_^jd9$tGqEkpxTEq zNa_w!Q%s*FvP_)z03?;RxYv%%>PoU10^gFNhtTK8RW7jpv+D>$fG$e6+8638%bhF2 zxf?q>f>iN=s7;1G}OtnsI0cW4HdY~xWEzb z+D)w>s?~KVvj-h%wtSXi9+q;2fjkWA;lxjJk2xLb=3dys489{n(y8S?OGmJzbw1wo9WBpx~v(-02a0@C6cA*-Rtjn|SnK9JPCp z&}0Sfv=SmhkYz`N)hd}m}=skK+Uk!alMYUGlzXzJ|xqJby9Hr-J= zj=rG_K?MPn$A`q|?533PsoS7Y$*VuA!4iA7{)^Wy5BIcHl^SL{75cSGEkw9&j-7gO ziHn|UkNm}928RR+QhINm!c(Du+@}nU*?|h@<&zPCXIBAxuY;cFegj~h6E25I;^)iM zN$(d_vWC>o{Ct6Q`)pP9kk;WI57qSz8n|aBWCggVd%x#H+9fd2Ch3{}TmhvciJsgv zI4JB{2yyi&aH_z3KTk&7K_F^)QH9;Z6tE-{3!!u|)`tTy+Ej8M%UH>xW$n>Rgkmds z(QqJQe!d8$3E`fxHZhW}??3MpRwl%CY^uTU5{`DCO_2h>+#PzNI;s`NkAiZU-n`h2 znh+=@@wtBwx%LhE`*VW2I8EiE2?aqp!k*tX4CBQg_+`RC!xBlZj&Pge3M(ms%rWra zVSKWK@ez%hV@N7;Q38L`*j4=ru#)S_%Ud)h*kk9zWO2V@jJ06_E;@Fy*l_3-m-$X8 z*1`4CQ=1F8tS$l9`F8w)RM&TjZkBx)c@sNv^WvYNTHex!esyiWygK;+kc4?FSu1OS zHnc^vA!C{b=9}yUc644J`l^)fSh{sGTT}2KGrLrZ0`HQ?HX^#)H9IsH_Lo^A$S$DD zxJ>V|5n&L6g`vW|Q*5O|7|W=)I2DG5Mr9!ivjZ|ci;KJmrR_ptYsz=hI5Y`OL)!ee z!*x^yJ=>nazD$n0jF09lnH9B7JBy(>87vuW1YYbT( zJT)`#CW5!W@IUs_1WWq3&bobXrv^!EzgclwSo@2nXD^2SNn1rC&;tz|ylxgC1?JoX zc7PZ{_|j^Y8Prg2eU#?77GL=J)e}wdGRe*Pt#5k1*v&^)da{vO7k^tm)(&L!Jkph&J$QZU+Ty6RVaL80Q;VQC053L!w)f# zuI;C6j*4?cPP%r?96z8({28_iGU8^WKyHeSHc+dyy&^A_p99#$WC6Z{tLCY6-dOCB zz1|xLYjo71n+G}ywXat30dR)6(ug|uOBmk>S<=kkJm+@`zivM7)Ft(v^!%1zbMnPP zv~mjFht-D8*hOS$0OR0#w_v}Pnq&*US6uKnkv7S!yD*+y;BgY0C{f2}PgU5sc27)3 zsLvwhup^AxVRdTLu40hPA9RH-KU`UF8!sTz)`2Oyka}{eB8mKB5!}ZJKB}|;r2-+arbJ_0|oNj;fU`cZ0e(-^cTi{GBqjh#jXzKGNEOe2SrU# zY3nT4%xUW;Q}|E#PwSF?zin{;_4bw7XsDm397`&D|8vqAlGL((4#dzYuI{c$T)!Wy z3NG1k??hU1P3d8nmBxg0gJ(G6+cWQE-;2eju!~TJL>6$D^|Q0*6ma(NV*x$NFF{EG z=8u$pw)n5VsFuhqER?s~RLA1lALBpYD|liWmdjo1fSu^_0gM)bcDxMUTZP1$h96__ zQcN!Ub@k~po*uEe8k8~ZBufqVFc?OfUWR#O{J2jE<(TczjsM(>!G|k;D~{6nIoI=u zF_o4c7{>!j-9W1fj(OmgAh||3b!T0GF3bh3QjF*al1KN}tl5;;YC!qr zHR{;0P*65eO{XUehPQnly~?4*$Lt7bHP|jVLPQx+RvHY-BNp0f_37up=VtzO+>L7E z?5iTUo{2<4H*I%SC8R4TvC?#K^(yVwAxf5Id(N|wAO8I_Xp)#5Q5bLapy~J4BiCe? zgRr>7IOnX<*&wK|z2IxM`2DnkzSmE0xiQj7E_+_;x8@d}(%N|sVn&)=&&Rl?GB%y6 zkZnUR%zMv*WgO466?KV5T-8nuhOz}E9D;RU&pJ z07}cjKGf>sGf)ydy(r31G`5XDoF^yLEEXnrC|YUbZLnl|QvQ{SCV?^3a?_kOZ+lxP zwG})nYG$~(_0k9gj(8TaXGhOwW*}L7ptvFQ_j~Nxn|;s z&IKhVOntgieJB`#otq`v$8Hw&vr5D-lZm(l*Ks<`D_ebrb z=}zr9v);}l9~F?A@y)jG_n_UP3w>EHPrf(B!zjZn*BjkB*Ver=Agsc-h05--KhYMy zunXrf==D<3GK5ugn*L%GzBNI4yfTYftQ$nIk~rN*JwB?+mvL4%21HCjgWfJj<1?vd zb7ar`ax8~h*Wp(}F=t=(iI%LmmaGB^`J6IpCkm2TLnt=k9mKS{ivZfRN#_AZ?Hb}m zU$f00S@g=RYx0ke6`IZNH%>)ke3WwAJg z9ANdMQ~GZ2uA&&B)rRx}T|ZW1!&=JYheLwy=VAKA^;Q)IhjB{=?@DYa{tU9b1A9f9 z$(N-vi0qQeQI((NpU@{?r{U^ZBywV$)ZGgh3oNS@dOtWUq27wA_WSGCZM0K@b=7QX z=vVpy93e}^3NF#iwnI-^qw6^-k-3#ld&Z;>@$?bmNr61!shX%zlO~CgeJ|B}Fwu%P zuXDc)))ho*-1G}w=%cQ`sPwI}Z0|93k*{}gM??^lklXO3Z4k*FjQrefxI$hlDw&6i zV49lxNcI8u3kjDkE$TLvv}$%?Q7zu2&3>$#_0BP13&_ujkl^5}mcGNycP*ZZlREr? zkjfky&UcK0i%6FRnhe0&ToD4@{kNxZ_4wW2=1FtM8<6fuz6~=dss@yt9GT>+{$mZg z!_r*}CLHBcqAYxuqFpa;_|LPgezQxLnqs{sYP}3v(>7h`M&lsDVG%1G855F)g&KzV7lA6{EV?;^Q%&u;wlOnbpeD+!29*6y-2#Ba}(%&7=%cnSTgnm(4r_X=mxnaCxIP zTuW!vnEr^)Z=xb|yiG!ZD#bJGj^qKM8-QqdELjYzpAAu@3dDSQT?$zdC?|f0&m_Ob z9{We&@wkU)rJWmiJS$sZJnViGO~OYCMtGN%-rsD^&Zf$FEw2^hQH^QLa}iRnnm(^b z-81k^QiFTI?u=g4mzS+&+A`l2n0jd)!BE{5VJ z51VwdFbGN7?gQH8OvY_fG^h;hlU3l(XC^t?%+I6QOE*@OUy&85gIeeMmo^G+(&Q zF;D@UHv}rj?6m!MPN%f{W)$BS6c{z4u=2gmu0}|nPlcitG-1CV3Ar*29=%bN(h@6guUm_<|Y?CySkjHK>i>d zJ^cyw6D}a=C|BHnD{u|PuE_WiOq@x-^C&-nQJj}Pcx?dE!zvCRDso87_=?5FG`aTh z;b`3P(>IohP)jlBoFlSth zqFRZjPs{j9uj9JyJ|}$-toIyfI3gAfv=#aLx+|Kpw5)4(|=5zsnK+>tISWj$fy z+na8HoyYsW4#k7=K#6@T(;11yRjZ;`)g1y6Ii;f+O+Z;S&nSOAZy#R8qV-;R40TrX z3`!jNZ+;+zQejHPmkQ+uDvy8PD0xrPD3v&=k_v> z)QGaXnc`B{%@ro&1X}6|`EHQ$^^g7%lk@9nZdTt;t#^IxKm9DtzR$Mkk|7qO_Lt_X z(RlGPCH^j({`i;N21J8OHEvL;Cf7Z+wF4jckvKFg-qp9ro_c?dK?+Zg+dz3^Jm#_` ztIbcm>eaP8qZs8f?dPB+n&~q4`HCW2b7|fG0sraHL{K6Jc2mb+CoonZ7((X1G^0Ag zbD4L>FkCNqszP?1xYb?rlJQNzgnNL8S#Rp?+@^PIN_$C;kLu%2cGNu1| zgE%bn2akO^-Np~$RJXS5!e9_(`@|s1u=qX|EDAe_P^83O(N{xq?WcoN_{hF?T3H3$ z%`CHN>)W3&g3qO+YP#ib*{(ijIrB^2G)r<+L*N>pd0}!&r^XQ7!H;4G`x|xFRc@Zr zYikTOT^=!u^)|+Jr$2gcGy@PWw6`U zgObWDe2h$~MWs!rqE{{p(P{kQNX_L&VL_wt}^4<03c)i@^VS@O24{X#rV zaKF?A6tiyq;UAtbl5u~Jyt|IcRbQ+oElcwb4^*1=B~7PqKy&eVvFA#&GqxUVU}Jrd zA5kM(QokM1TF?-|YDsjz(I*que|FvX$-0%UL8GU0O*4wI8GkE2`-8(zx+H>$i-a_- zdmL@6^lUmraVy@ZkxIn;lOeoG7=58X84TVgQ+77EEuwS_1Y?^Ts=PGb%-<<-oR!k? zC`LLux5VXU8MMcUSAc_~JKU|MW<^<^N(fZ-vj9+LW_sLCp0on-`gpepCf+YnQW&p$a?jv5Rnd)&?wi+W&yAUbv~7UM?pmosHWhIauKd<3aCP)JLQ*TT-5}vd z`~4Moq8!lX)mXuZsg)&XG;yuV*Eyd2)w5Q?H(|A*(s;hNb2~h)!!&{V_lFjP0q7Uu z<_bg&0d?{wkWWNRph;p)gc_6rG^A-*m4om>L?B;`U{U@I7eC2$49CqXLh>=x{XZPd z5d4RMYwY}LZMfDQEHOVA9yGMI3i7@OA~oQOEcuv5ok9x?|-DE?+j6a2?;f=&muMb zPj$j`%S2ZJlCHl|qih&Btv-kCPY&y8m>JcZf){Y6jCI0!Ge=ZXb1#UZHNzSjHFJ8pk3cJ=ur-BfZAV#( zp>e}xoAYQgQU&JctZNkCFX5Mml6)JWeT3);rnaZKz{6_HEg1N*;t~jNj7=?|)2of+ zVD{ONv|-F#5Dkeavwm9*jTD}&*^-KHc(x|pc>H2*9~93;5{&Y6VWUgayDU;kpMwtc zjO-KH<}c=h5|y-l7V*qh=Pb+K6yg;%HkP&KW7GG}gGzAQH?6_cUDOyR zk7~L%>^?b7T*{4l(EJ^ab#m9*dL8}t(r(!@30bn$EUU;^_u;YnQaoe(KKO2fU7cQ{ z0lT!j;<`Ny4d~qF1d-Bq$l?aKfLUUT`@OkG+k~}RH_j*rUZe*S+(jcvamMsK9Dc^n z26_Z?-=1eRc6sw@g|%XOAq7ipy4AFj`aSqJbKQixVDhN+Ds=^kpV z_B8Q0w^XhpCW3fp-p&#pA5rxfmwyqqg@o4v}WGS zYWVfdaa0+5J1a8zy-o9oBDEV~O%B?~wJ^QDZvr-2Ct_>|DLIz@2Vcw*`<)NY9JB!m z&qOlfQC=bCZdo=A$R$3~rm{EWd{b$5(x?9{F0PBIz#~m`9)<0ea1J^0$Mjl$oAx1Y zICef|=AEtg^JrFiSnYn386D{{kH|(Qp|qEGRDDQHnXG%JlcN>La69vAS3DD8bXci6(v4Oju^3zHH<6FUkGU;)mITqjGD$ zbY{^g46906De&W1cy+Lcj~e}ngKaR@zt=nNCfEQpoEqjn683R+&czzM5WV?hqE?B6 zBtj8I`>>L@5@p@NR3gJA0#PNGNn&5( znCAVgFY@swq+DiFE8Rd(+EgGll8P77T35C)Q!k7p~(kO4nw{&o34-9q@) z&?y3!g_FrQ>r*?HjaUsRpQWZJ;iLro_0>AV&cLWF1B;i7)s=P)y~9L6$+?yW0>UZ6*-VctgH1`rle7_7&&X}>l*Pscqt%vTGBKcre*Km^cP>=;>kXS%Ygef9P~;Lc?&eTt zUG?D$ZVExvAv|lCj$cD$Px!++0^LJbTup&$xyM+p@fvZbkV0vDZ}$-i-5lW=2_#Zf zo=NP1l9;)D8vMcZ#`LdD4$bm5`l+hgSVl+zp1l{-++#mU>*wCs-}$DPynGf@v~l%d z-pMN?&&kKmZ0oVOoK9fmEk9Pe_ShYiQZ~aF)GjC_%fZJp{hg`S@|y?0SRU-Gv}wR~ zGF%gOK10Swaz1=<-4iXzK6(0gPXu&E!sG+U6^!4nccmFJH-WBP6ZVP6Uug`St$}Qq zEG(1{LPj-dR>pGFDpn}E6IPHx&64N@N#2P>l=od`aDL`1!STx7@Lwbcnb1l}g;%UsYco0B29iunPN<&@E#i%+7Rmc~^I`8&1Ojc`@ z20dgZy$9LJ+w6+F906BA)pMmdJVCrIN|J=|Mn^-;=sA@A zH8uHi2#H2YCqdq>v}}`XO7_b~ZMecXJWDy2Pt}(FF%^;TV(V6Z?r`VU+AY?j$`=Lh zmYTXl&uw!Y!<7y%?a2dw?x}|xSAY?tP1jQg)^GxqOvcK)Gs$Zwn$Rs`kEwL#ce)vq zu%`&I9*255 z0?1`Onv+#TwWJpvN=$54T$vVTm@ghhs1Z~TLSaXoOosfM$ia(yB$!-U)`tAEwoV^k zM@adWm6#E5AyN?fArB@KtQZ1`3Qb1>tojs``Ne+3@^ER zk}MZ-yL}{!7AQaQ@*BQ0+;N#M5qJ(mF-{P51MEvvp6qYfZ^JAX=D; zCz1DNnbI1XoMQLmm+z>0k)kAc`jF`GM5r&qRVq4!9ES&#O>;mV!WRf2XWm_sA}xBu zAEx!JSan(62lFPyv0?qe`5tt8N`OD&7rI530p?qyq)<=Ly0VgcLj=Ds^`MlbLJf;+ z+XpVuzssCQ#Zp-yV!vI(w$jxQnX@M#|+{U8L z%#>5oqB~~mdjK9KrtH+-6fpdNPU)pxBT6BD(yI;6bogsYI>`||NjnFgsq&8=U$XGp zOEvgAS?5$~pqZR>A&Y^ zTDe&FeB|S!5Tyo{i^UG9o{JzQY98vlDs8wlgm7+vn3@%2NRRALBP!BjOUlZpCzNp{ zGHKeh;@e0E_b}(OvrFmKs&oI^)LY~N#w{O*g?%hY@J&j03r1KEo)5j`ju@c*yEyb0vv8w)Q8z#a8FNIZXvJdxl4TG|CxaF5j zF>{Sq1TH}<{vW~fo)~*$l-!(WSvi~@SWPGx1bdA(&&ZfF)LHd#_jN$GicRZrE0b8%eg4GcS?B;=hR*Ow5ty%qWV+OYMz2^6oGD1xIQ z_*}ZVMeez`~NUpkeF^oD2g3YT3 zQVWwOrcGidQ}~K%6r_Njh9#g&Bw2ae+$#|V`owEqKI3Ti8(=o{wa{2DMJpuNGipISr*^A)8W#c2 zV~psCN^__$z;1uPYl_{ylc$Xbdt~;xqzyx1{@|;!q8vom$uC`JhjWxg#S8yP&cO5w zc6(!a=G|CP3<7*p*1ki|h=o3PRyCjPFA0A})SQqmI~68VS90@yE)zCOv?ibk=!RPz z)hsGc-=C)Gm&P0vjedw9BDkYvN8Of#4sPy+5yfDFNV4 zb<;nq1QT>-_7Z#U40$iMhjvwDU2LK+-ro^^(%civl(nWt@?i|j`V8!VZ-&B7tYC(M z^Bfz$3pzFp(HF*<<2C|62=)56NN5H=w?S%c(jOl~#juo#c<%k*<mKo&hZ#d+Dqw+X`0lv%TZ{YNVSzj{y;m<3Eq;f9|C}UU#d5f8b2w z;V1MKyq8ZcoSxM(K)POan^V(wT}LTvk;0|bb^r`Ojf_FBuZz5SClprGa$#Qv&}T{A z-bq+4{DF0f3&3Wc(+@Oh$xviPXTW(&%11RzNgX{qsDDirocbs}M7^=v-)pl$W%Pet zn`bcH)Dx@r_&zXcXq*97bQCYu8Hgr%XhyCSps=Edt4j-X zb!>xDLAipj3pw^aCfs;>5893XQJMTR7XFL~XOdEg)l6r`yc$a>xE@&m-W+#yH5mVr zmiVl2Sz8F0)uBBbV@+u=W7FR?+|`f4L3Wd8NxQ(7=l!Fu>du;M=XKRl3s z1rg+u7%UfTs)jVZ@^uO4^R}pmp0QTkyr`lU#diMp zq7-KTcTvF71fKkNQT~V&x-~iSzqA1U8#VvfT>srl|4WMb|7uZO6-vuJkr%+9=Kt66 z3YjB3ZfH51HuMFeo&)Rs@bYnU!W;bDJy!Us0{oh_th@g1`YL+wn zFZc{t%DrUNADL|aeOT(s{T~j?e-zsTW?%jq5c~wzt{rJK)<#N$S3QZ1J5uHGCosd$&+49UR1Zw0_b+|2?HKxo!6)| z*I(7|AK5w3@II)44P6V2wf^;kl!MOH5{crab)it8 zCk_68*42OK{XlKhrRIA`M)N<8jDX)aqmO^wj0pcN*{2v`0)IoZ+xc03ub}Zjhb1El zOf^d2&i~fP16xB8b)(VN%AmF3eNy?^6X1`8b4#r=-1LCdxp%^uBj}P_fMIKv%RsgS z5YcLZEysi90Nj3y-gm6N?0Z0^r5M%G==02}0QmRAjBd5)9bnB@hjJ~A&X+zYz#!d< zHaoXKqGpdVXG`=~wFcntUsvgQs|MjEIZpOJNrqw2AIo}0p$M_a5Ozs(-Ykd1nZl{_ z&9*r)d&~(SnI2@l{l)a45XJ@ONR^3}36w_QURd+Tr+@mNpPoV#HOBGI{0?A8`>4*% zrdK+_f37PbSXa--M07<>hf`f(cxhq>B=fM^<5H=rfu1uAfGBz9kg*=Of!$pXJLyZEZB*4#)$iV2JTeiy^pfvP-0CtD!PBHWAWLfIAzO=dhJlF=%vq?bLxdJ!w zpN}3KrNbrjJ;85@KMtHvz&G0bY`bT~vhkUjfqxQ5_f0`cvf)Ifko96Pfk6B0*aWK&$><))n`xa z?s@ z*#OLbZ&0^~vt1@#j<(9VV_Pbi)^EaFU#vM9+9-vP`4rCxS>#hPY zZrvf|$qDRLLd~q|r>~&0=m2)))MO(d^5@uMueb@TJ`BQsr8-cl0(w z7FkPuX$5FEk=jVMMub}lm+%ir@NUD^zqdnLMN!ot&6Jwjak&3+I1Gh2sAK5_b^&`X z1<5Fsp(tgH0GMxfV>c-238|L`i4ZkLEw{tEqN9Nrd0s65OX1l)M@jxfB{wd!*4?-6 zl9nX+3f9WX5ZT(E65FxK%8%cMn^`~s@5hqV1AWFQFGu+y}FEqlKx-|Jm9Gx%780Zjtb6(*I%-X0rWCANf6By6SjpvRQAzBz^}(Txr8{ zzmrR$Mrbb)_x%xfjwm@yE)Wn`W6yC?K!lZu0>jqm+@vRk2IWtCbK*aoH{myOR{(S~ zS0eJEHL#q_NKiy%nLGF9O0D|>T#$N+>a16%D*8AO@DE150Z7)bkn6q5Zwm&5c>tQ z%N4UE%K;=(z1fxLxy+4mkxH?neN*m!>X|6Fc8sQTO#PJ%C7GJgE&B{Gv2Suc+rsOI zwugGWJO+jw87uH1iL2qoy>JOp8OCQ4f4WzzaTzo4IDenov5mc*j<}aoK48-9 zFWzx{{;Q_O<5={c@i{^4KbuHGW-1m99*V; z@GI0#c7-qdvi`!sf;p*z5xybKYAO6 zd^l2jD#|&8z-Ix&>P(gz2~^PHm{sBC=)NSw6bJm>4I86c(!S(QNe{5E=k59ts755P z`k~H-%PZ*BIrOJcb_;P5F^7z~a`L9Y=6O4L^RN^zw}`+>?Lu#E1vq7sFW#_Attn^p z0F{9Z28vb8xU9CMy>N~m+a8e6*9B(*(nYPIej2soW~-x*CXKNb+cXGcUYedwO}YmK z0!AB;pxjD{h%R5YWweEKua{} zV&$zJaphuVfzP)V&Kdu!i;Pkh9cxf2Y-2K;z)P-9OC+crYR%cXM{e6`5`CK9Cugbjb zeC!LDE##$923``Ne{K!0gZetKQ?N@ICnp1ws|@}!#onVTsl^bI`n4NvD$v@t3Og@J z-(}H|+_=77M?n71fOu}Ta#QU03x=)e!0z{N%ME|WK@Yfv|CzeYbJpu}FKgawiF*D= z9he5?(a3DlmfoIRFwfQA-RA?%`oV@auvPutE;m{(zocCNhU%O- zy*-xOQy>?&-3TKsSv?0BkS#i)Z^;W*`Fj9!SW*F~!|qAIkx9^h>c&)L{poM)mgEhD zVLLDeeUyhx$d}A*^e_j)6JBCR{`hrmvcK|A_>AsVPC;1)phxu z(no3jglBFW_%TZX_NMli`pvtE$N314A26XZJ0!-3#w(QNMs+NJY%_i7TfKUeG1cZ)}3$!phl2PQ3D^4T*^I zi@p|foX;95Pk$J?|Fr72Rc*Qz^!*l8IWxG9kc};A5(#j}{s_1W{Sp17_@q*Hv(7vy z?DXA&hI1P8J^iGRk(%5a3qYi2p2I}N59Zj-E+T>uT zf6y>MG~4;F8WGBH8|u&X%RNkeO~v-ieS`0mgrY5KLpkkpt81Lw4z~eLP?E3(DQv>6 z@$AID*ke@pq%p%3{iW%68LrCJhudLcDY1?QeULkAE5taqS6Ga*_H|id<&NLi2y)-t zSIbckw~Odmk%Cc|cFdTaFdru32ZsVx{fbcowA6s5#0J zg@39cCdFg(g|Os@DBNJG`T|PTPz<}BA!g2>--rJj3Bgb-j2pBquypfn&^wq>273v? zRsGx*YBj!)g~UL!Q`=AYe&qD5Cizm_=v1E&*2dl_v~p>mRQ#x1K=x%gei#M(XE^^7 zpoI;P>04mpcj=-rv#c=$D0cv4iV+oeqDOt#4|Cb1 z*7yws%ThY5UR2|pc2QL$VCF$-kSyVnLAvQVEu#`?vT-Vr2E>W^^~|w23@;|-{4&N` zl9?;&{F{>Wqs!Pffnow(c(6G?uj?tTWwt!GLE8eo%r?w3LK?U+a4Og~Zt0f>F=qG> zJ?WI2q(d77DjtGs=eL0BZ9zprg zqei$?uC}N&^jN|oqZV=JLn(`!2=qRIRHc#rQmv1YsC<*y#r~M8fb6Nn6*F<`s^mj# zIva6d>r&BRr97q-(2-)m`%Sh50KSsY4YK}H-ZWl@O!7uIPd_~1VshX# z5JSz>C7&OSNm9^1zRUuZT6un;s9eu-=Ph%wkB`n-T=k=#6eV}5hLPTpb$~RKBmgFAfymhw|`q86ti8g+!qBOzy zXlJ+1x~=cioIba~+C((v(F%LC-}K3@zuBbNRv4Ff*5%Km7cw**Wu4BSe^Wx%A3h5o zD;+M=V^**}2dGgKt0g^?@`4rytwmN2#G6mX4+JQZG*YNOK&3uuv_N4o2RqIL?>x90e*7ayXlOw8Np^!ZX2(Br|eFXBel)&A?vylaP5k`Lja4!v%#@<0!rjD?XBu6>X zJj&&5>jM+qbcunpXV=guKdhypPT2utbo}qD7a=@CgHInX_%~+($N{qiS{3PGsi0t| z#t+QvY)(MTMw6LM}B>7skv+-s)x+logd*<<(4E>Bi$)!u~6hsfp+^Mv3?x%`sO?u&o4dTzLl;`LX3VeB^~M;Q?2c30p=_nsx>WTK{-)Y`-kF8#;| z#}YwWJRj$`|7o-oxVg(T^#@3zJl8dpKRGCowDqB&^^XkXPHkq{x6HBiO!wYTCcx1? zp%6!|;7KQGaPEW<&69hcDBm|pn@^JQy7}Y^>B$;+LpZ$@)6i>sffmzIYBQJvn&$b4 z!K*q_^}JqI>{~Ly5yKJFQk~=^3(HE)jCaUT{#QuozD)z=cJ@{SP}@-@(0HMFd;#oI zGhC>?dp}@3Zw5MZ30o{%x3;)1NM9#vS;MR&iQlCpRD)EPnY@$vfHrwJOgu%SRV_<_ znEjEA*AQ0%RO-@GnZesYJ2ypnnl(=Hag^q($j60#O11J-9r%qbK8F=qJTe2_sb5h5 zX)5OLL3f=_`LT6q0(=3#(%e{h_6jj zs6}T`pQMh(^)f_Tevi28K<`{QC=n#G;NE7N==n$Id;=2 z%DX_9IYII9%ElH1^Jz#m#ERc9)(U-zp;aR2)B~lVVYA6#cAA90u(+EvFQCECCK#1n z-3~fH!*1`{_(joTj07|J8T#MdqDts82x7liMXy`@^9J-{BxT8y+|fv}M+EI}l3}WC zr*j$~5oULl=yOFG-%#Uh$BzP)Gqvp7QI$G0@w_)!w6BL*SJ^w$aevAd=B*h}%0QL_ zjGux08f=mEW`Yj9vz)~yLIID$OlJf;v&kyk+9$2I8tC8aIar@M5a;CcR=!Y z;W72jrbrLTWWqQB=SGH3v~7lS#Z0@g75iFgV%5x-qsq?T%l!*?mxsb_^XFX~mAQq9 z1)DQT9xw!HiHkXZ&yi=n0@Mq+JD+?2rYuffg`@d!hPv*RYRC`O`$HsDmczt~&dnFR z$h!qv_fWrSUSMtF@0p7xM?Oby9v`LQ>G(EVUNtHgxl$L>+hG#{IP}J>wG93tv_B%p zT1Oe{-951p>{=d?c)`X~&S~u~!r*sEW;$#jH0p}(k|H^aUmbmIQ)Lc0=g`Iic0yNt zUit+CVSx^Yfe4udawy-6qvtIy{a-7=RyI*$!kqIS3%IFya0eclS3Kz052`Mu{p^i* z)p6;Tn{C|oI#ZqMC%IRuDQzyQ1vNCWiO)mf{--q5c@S3kc(gE$ImJwEaY1X0HDQZ3 zyKDc^Hw>B{h)gubp_ECS!PAeHkU$-bSmmbYT~MZV3xweV@fFu}8L}dS(vMO1;;XSW z3OnzpAH{d`-;WTD=GpVL z;Z`W16xo!6Si7vI6-(veB;;A@(v1(|%}%rJJG!00bnX{BnrW<|E46~e76FD`GtgS_~r%nT8gI^idsf&KP7a$qb zZHKSM^*|ruj$DsW;0WQHMXaTv_E5b1r|l`h&+Z7`k&5E)11C?yq|J~f`rmZMRNDmP za*i|iGnt;i6~5z}pR|X*T2p1CHInJeWLwS@uMTKG2r z2GValNqOF;N#6@lmrENs%+??AG6;);2BHN0>2P@7x$)$$nng`T`*oR**ctk|7mGe( zC6Ax}U_;!N?Y$M*>1n!*4mIfO3@@7#1q5k%-$Go_%UX45SbnF zoG9f>*J<&iHX~|G%P3G5e>zkCoV#$3%~bm6*b>F2ER+VGG#uE5b0OZ{tCR$V+; zDAe3U47@Mc&^!>RPa2FeTz>UA`y)c6v-qYf@2tTiIg5yW)?8&%P#`H&=AEVdKa{;? zSXE!Q{x7wuO-pwxAZ$YE?(R~+4G4&I$tESGLt0w8Te?#^r8}g%`?v6Wp65B|{NJ4a zb@760qkF}iYt1po829H^#fP;HzOT>7w}zLL~3hZ#6sVE zoY=X}IEgGeq(^9HINe)xz94=m$Gp0?#hgND2r&`66`J8mXHl;0?=+=fBY+uy+JTb) zOT|iZ;h9A;+i@YNVr}i~H95|gw+DvVOBe35aUpMt6_fZ_FqO>y;6%dV)SPH~%koyy zDx?xi$?UFD9j=ZaCzD@v#wC~}oyxj&Z@;-0>T|Go`}MnLvLD@jpfZS8>w3VE;yzA! zH8cz;c*tphM@ejq)WlgozTpH-O>g;+s@v&K<}BF)ExgXRvbAf<;wi@*6!8E=!LRh;heHj=umqjAwnEoDZD_ z)4jgm0R+XD``;Hedq~+wA{tO4sw7jm zW(_Fe}`Fgz7`q*2L>r zQhrTMqAcZ{D!N~-{pd?;Sp6QOaa%$dsUWJ}m0MEt*)sLQ&N?R4-Lxd6%_x0l*uJ zKGF+KUGdA4;u78(Xr`Bn@wey7m8qSx&?us7JzUx9#P5whu_18!H~1L^4g` z@TE(wvnlIs^|6}XPLDhwHrA%T$v+=t#VXR%x{(@Son(g!dQdL8nHE41xhpr zxJJl3SA)k+Qht7SLmD$=7z0|Vb%)*>d%>4N8xt;EKaZBKy96VnnQ}z2ZGP^1nHw|H zysc=%G)%ksNn@kEpFBguppjTX2e3Kg0}p)xDhd-TQ1#l)hsC!dbmUahU)6Y%pINn#J@XfY8mpwLcIagF>TknY2G2Sp8&&aTTEf znPC$bJ!=AXVD}!0Rm$F81u-N?z~>XK?RQ+joNB6 zVywPmhulX^d>&+hJt&utpg;{!xWX)1Cw6d1P!KVms5{;@JWv;kc1T-zT-g@+T3z5- z-pvJ6F1z+%Y{OhPpo!lwT>L|ny)|C#%Mux3?+3(t98*79d+KoTRu(%NfD`b7MYh?> zziXc^JBf`Cd*g$D;MSLVv9yCo0l<+PG2Tq4SLz-qMBo0h?YBR>4_%P6#TjCSiCKbn{SyYKrme%m?|VvGE}Zi6bu)gD#kOrmX=Pc6CSRin7q|BarXGL#G z^raB3UYob1Kv@f4+Xr8ZBoA#uyW2u0?bHLE?J^EEd zualS~3kWi1_*G2vBfre#XO)+%+QL4uJgLn+SUo^30u~gt%(Tg>Y;y9qttt^4x=iUy z>I>HdKMY3Inr1~gAR{*=0J04ZfL^tmW>oZcq4_B@1bTCk%~r>R#kTYIGktjB;aS=)Xf!sw zh5lakAoiw5a|&$M9&(bvfT#&U|E%Jw#9_?TY_! z40rt{a5NRkXNIWf0nY8B?Nb2tqt7kO+p60}L)f7k$Ik@!_javgQazCTlSY_ep^rj8 zqpWi0y!YZc8VK)c9J{+pYu=`U>+sRb+Z^i(3=^$mg2H+LQgr+=%e~fX%xr6q*q)J6 zt;i92v-u68)O?_leVKKmlmk(D_4Y*=~Tn$FV=F;(V4vXo(AwlzhhX{*{@;lE24Ow)8u= zpY2#U8VMy7xnCXA$d79x1*>O>3-GL$E%|aMTKii9JiR_UemMEM@a-EJ@J27A_OPQu zl9f6j_9Bn1+kUejppLcn@Jh<enuZfT_$RSF^nK!K^q9bYCz1jW8OGXK)En_R-Yw&k z<27bFCq4XWJ?ewiYLv5Pt55?VVp1gTU(-+Abc)4x)APZ1 z5jU*RBiW)Q15!ZIH}FSU5d*c7LlpB;(=rbPMFM9%5QNS0h{n7Lt2vl9wRK5{;IHPo zX<22PV1~zj{=Twm#giibuK^$DsdAQ{U|PyS6fLlSpnZjSWGuR01o7CDY0>>jmH zh2iW{4}rX~A;V;3)@`D*@#F2d?+m_(0;Q15u(PLxYjafkN{m>oH81sKsk%$yq|tzh zz|Ps4z|&)6-6L-jRukxHf1bOK5XwI zvb!gU_O`MfyR0+;wT+zQZNP73b{~PAYA}>U;zeVm1vKgEjN3uf7e>}m#Jckmfxc13 zsL`xN2nZL3rHI#N#x|jp8@ZMw3V9A5$;|PW-&<=@mU(5Ik{?`KyqzlW)0`#2Do^eo zedjYc(c6N7#`f}Xoz{cwdYr$81^o!cOlJyG(lKP0Hn^U3+0?a;!LRqHt6REvd1W|* zI~|Eeh^9Y?V3Z=`PLIABr2tfb$ynE&2s;3+2Rl=6#Uo-f-SmTl0dO{&w-(1~nQawH`} z_g(kB3^8_+VR{E%n&+Cco?DMMJKcrD@H)eJXUU^^&1yPvd=>ebBynEDf3xA z&(}BKJ@EwCsi@=BTl|F-e`jS>=>Z7qFjXv$<^@2@$*G;d@n_nCO!r*#-OXVg6*b~H zAp-HDA9flx+iOc^q07>-Y~`qC`A+y-Lsy+2O+bg;GrJm zYLjCtWjwCSUP?mIqPz08dz(9cwtnv8dH6yMc$^PG1Sq;=%-?>lcc%v^!&@ zz5%iuvgBe{{SEI)r08|b-#w1ouYLz9%fnQRIBE}GF}#KlXc5kK7R$#AF?kFC^7pbc zvMdhOX1y;OhpQ-nnu!8i7C9o={38t&4dyO_@i%&A4Xa ziR{uP7M-9@CKT-*Kc*XuQQ0KG#_G7)eL--q_I|YSrQ|5)E@TMwv*S5zcSq;%$6W!`NT9?{h|B< zMv$!+UA*R5VG4=J_+!fHMHm3$#I|xt8l=V2wOwCg$lARv*W!+~ca>&Yw9?XszEV1+L4%@`0xfq}^D5MFMgfoBC^yMe_ajPCCHU&&BO zFob0jgbaC5`3#!cj^;^db;ONS#WiL}h=(<1c^c_Jj~bR*TVp^W5SfixGe0v|<$hEx zTT%sL?3Ap-sF;9D3D3>wp*RV>j3s;D@T}g$@^`LdAHJ5ZrT~e-y~O9My0-U9Z$CN+ zm9r;iO9M)>YT1od%97~5RJ@`mw8YNCP%DfiL4lf9n6u^eEwA9v=D3X2$b$_3@3=|a1>e+aZj*7gLCU!*WH)*Ct{GkFIT zlo~cuNKm!{0=ZMo)D~l|pPJ^lJr~ph>rRvUZ@HxOxx)oRmtuzX42hLX#B05C!wMoA zX-5tqnDL++l#`Ev@#fw5b!kUrI1yOP6vGVK04{{}2jTa#la|gpM7C zimsTeC~R50!LVAr=tGz*(rSfrpukV^{8|jittuGNDieD&(7P#5Gc5gZj?uN@A>r+& zZKa#y07sr?{Ys}CN?(judGG~Q@#{}=0xyOXQ4lW|%y!x^z@&1L3J!s2ASOthg<-`% zl@N&k-vpWNuTL^CVoHHYh);svyRQlm#r)dvCX&L?i2Wq2&3bV>)+tYX%>MC=6x;QM zO`j@0Htu#UpP@X-mx=9YN^N-f0*j?>`_BZ6%!!GQH29p1BoJxI;z?N;F%Aqt zjSk*Hn0*N4Qe>+$pXPV%0?mZ85uA_h3+gU~#|*XJaCX{@)$^pCxh0(jxlH~fOf*ac zK!idT1jO>&TK)hO`Bp#2Ge=7`@yqR6IoyJSB_X($j@EbC^b4{QbWYMOk?;u zS=(UjDP#F|akIsOoHlZEp5 zlq5R2dTK_TvD_UBKvA@Ot{Cc9j*V!Oa_CO`@f=)q`3A(2>3*hfRkoR<^srp?t zZzN741GOeSN6>0JZ<7_K{R$rz6UO#AIoL;21D&} zyT#5x(^v^5Zo`hHn9^!BSl3!UEso>ZP|Tf`>?!2m{ocGQZ(9fSjBRAZ-6o=FVf$$M z>-Y_FioiuIF}cMO9@Yw{pw(^|^5Tauc~qZ?!g`kR<}#85LIU3diPeq}R7yUU2v2MD zxw-a%xH)m-+Oj(qJ(zUBFu>)|^#lsTSZz(~ls+|Hk{f2D84c}{^~^A%+J-ye8U%l+ zTq2g@UVat^EFO4@Vt?|$ymZFyS#<5++nQf)Z8qN?U~!&N+yae72%^W2TTd-sV9-+U zQ9~f8H{t1iz6Xic^u?2CIG5O9q~OK{{RKNWw$C*09Wp z&)fBFc8oUw?rBJp?Q-<`k70KQE4N#`@9NMiYdZW@fdamra&Gj--ZQ*2_^u*JzTkTh zpN^}Fb`$j5pLC!^ma9Cos;esSqQHj_Y5S)&tqyTFmF}Eq_Wbn_L0d2%mbn?Dv(i66 zTTMi4H#x_4aG)M>fk_bZW+A9RM%=243c<1b>eW=7Wmy|&!y|ze-A^UlVeLV&zuI=C{os0h zY4BCk=E*Sg7b;p67K}F% zC#L>aQ!FM2H#J@e-)=YDERpmJ>6Cc*o6uy0_VL;&$jlz*CZ}bC%VJ(*qY$^JPR&o_d^k}G))8|OWNiG+W5Q%f+cHh7CUBd$ z1T#~u&tFMia*2=4HSZKKkA(Vj>GZuIoGuj@L!~|@f&q^Auez@SL%5R)+>)DdzV1&W zXqJ%Gc^E0bkM<2Cp&WNx=c3XT|MKYWT*(19Lgjx=VHeN#ujapwhix6 z?01C-jMWQ$>Y3`!-!e*QbG=ey^g8?nZ5%iQvkQ(rdiBpD@)q~5=SHmtmj*XOx_d2K zEV9Rrp31_XS|)EzR*KsXOVGNqWHar!V~q#iAEk&`PC#L->)10IRP5MV;mbRUbts)^ ztH7&p*S^bYm(#pjDJ zzfHVj+TL=d>gz`Z6I;x~*1{U5_)PQjruL9D*octyc&P)F!@b~XQ)Nf1+|m`2ED%9)d$!IuT{^!8Fkyj-;+{aMu?F}=S6#o-FGSarc-fGSYc(x9 zC+s~m4Jbd79t&lkaOMrRo&pZbZ~1dZt8yUYQS=a<@JKl4ldrH=?W3b|dG;$gU+`PrH4|Wk3L=n8diX^jucy}BU{uyo zNenW+SE8iz_|xNqAHF8|HiJl_w%m*fJYl{?59}rJ?^(uj9s_E_KBqlPLuai`@4&^p zOLeu8JuSEqZdTItU?;}wWqiHUwSB=yDWLQXM0q2VD1$u}#vs)`L#HF^m%sV^;(51~ zW(2HzOAwzSv@6S-uk-Y4{WL@8aEJkgR-UozVR~slF8K+tHe=qoXr~XLFl#ctr1Wq< z!1ol_kxzT3N+yOsq`Y==H^fNZlZpz&v&k9m3bx&tVvk=hR1YoiA8t<;7=e&`XJ2mBY2E>a;B5*q3)-jv6F*R zHbp!w!ROB4-5jH+r!GxP#DeE`b_Uy*IWE7Z+isE+;jtu~;8m&GBfz|__ULGA?+YZUM;0bI(qyzbZMRG)uqo_PYDXAFo$ zcVbYViqw57&Z=L?v>RUF!K+exjZC5RTI)b z9!Zdit#>5K$==iJ7#?YwbI)8Pj*`U>E-6Cw5Q!$@MUh7}h2eaeoG86zfoJ&}UJ+EG zHKk=SMFuOr8w_uU)jtk*dn^(K0_{0P^aX~nOG{aTG$5nZg%2-j8??leK2ZW55M&T9u)_*1_jREEDt1Kq@4=lCyrR6~qIY6G0>-0uB@1EwuirPz|C@2Ex6CB2 z#ilCjUP6?1u3*Z-PPl5O|NG=LI6zf-dz(V*(d!W>ddnI7%RI8Y_h7Bh>wB-ekpC2cWo)mMSeAa#|k%VLM#*e}MG9F#VL7zkGKdre1yY$9cynX(R%0jIjntJ>iXa--It|&y{H4FGepn6LYBvKH2HHD<6Mx zA1=Q$BQx~@!f58NZhZyH?wIq#u#x4O`3B8*ZHNL#;lxX8wo~iRN+elmNHxkb917>2 zd_mM3x3*kY7GzCmFc@g4165ohYVThHuqaTf{knGu59F)aT?y*$^l9oz3DEuA2K3?d zc#;UWU^{-?mM@v!GE!L8 zTKYKo#45g-$#Q2?c{p|m^9bal{~9#xee=pLX7$ALM|Nyg>8I}oomX!SVvO@Mld;ZY zAM-S>OhE@SiMnLbuas4#W=C0Vhc_0RACT?`639%%_z>#K0 z=uY`ip^RSKyz@MDfp^i)G9FfJQMh>B5*M(wtetph6|K6cWJd!k}jXL?jj0a^y)-lQH3BM&ym(;ej0oOA;4_}gw<%aH?ZgxWY;`gV(=0&=5?VfPzfM>7RM^aC6s0q#1d#=SiTr<3J)Z2M=7+_x(qK$EDiP zUaIqJr$giQo(6<=4b*8!MK(}P(cLukcTC7@oBn7C+em@`2ByJ_J5YB~vnvM57!lul z2Oro5}#9Y5DIH!WMrR?E4s)+P} z?#(*!J`csAfU2=wvXb;BBWf zmFDWBP2rcdt(M9}N%J;U_T(Y6Rv@C>A2O<8CC(&MOp2#+lWBk(=)UFq%PplZ!Z1|` zv>2w7Epn?qIs`$)i-E&Zho}n~MF=t*w3{RV>}yq)>+W0R&!7?${Z`g*uI|(rH*B_1 zD$I-kHgp!@BzTu-Q=TMr`{*DZ-O1{q>Yx%gdA4bx+e3m4!{6w*XN!lmSd6`v!z)kB0h zb{325so}8_t6}8ftiBqh7wYsnK^U*Vd{aooPDd&m5MflPg66G?$(i2(AhDb-j^7U5 z;IlvGOmyBUP+`6^9Ik<2#N-J= zA%(KCoH<{67-l=`o5!+b@E@UmO$YV6QXEP>4g8$lb8+PvTgEerq=OuEX8{Eg9L6ty zahdoK`ItTPxHj;4c0P!crfddH4!Fhbuo-Wyv!irK!O0)!7NHDvBOcY)`~scWluY_X zL=GSMousyT0&CB|X~*$5FO}(XGxTNEwvoPOwdjtpc^kYa@>FTT7rj`sq*_h$Pr^p&~{EP#;V`!_txmR z*`k{r=!2PQbL|(^>0@<*1ADqUo&>Cov;t=bD6uO;Ck~q_Ur>T4h5ek$2x8MDLk`g7 zS{vG3UH*~JF><9|`hC4zJlI@xBMg}N64KGj2B4N%>e#rjBRm1U`2$4dWP0=AmJ+3J zURoGS;0S7K%z1qZBuV^#{-C%OI2)ntxG`22etVH?y-V+XQ_Pns_yF`2Y{^)TeU^XQ zfd(u^j=0&M3f1@vSk%rla>i%&2j&6I_(g+t;kkn2bVuo?vj4=2a=2-1U zUqlkeP^q=~51pAMn_Tya?KRKH6VAf)r2r%WI-UG?C;5FHeGpLZEGE-N$+iLk;FT6_ zX#LzMfeh;qz)mG3AwuG)nc5oAGVo{P->jUCE3>`VFm97;VKcKjah}gd1D-4|Oc&<2@8o$AT@FM@QU1vqKIqHGE9L zvyl@a!>^Qwc8VQHgnA=QEmsEk6Ixlo-b|iuwL))saY9fJSlL8biya18m`#Uqa^-$? z^Gy=D7Yd-BP%t{TI_-AmZdkbq9h0p6b^3I_e2fQxiyPO5XW`EQ%M&Fv>PQ-TnDBew-Ag}|r(Qb`T~e)L5O zFx_&fiK*rg+vBB^I9^fYWSJPS-M{{b!Kufo-K3D|9u0*b6(vgymIdS-X(!o9@n1m9 z4seqE)^p$CI?x-%#sz>7S0(!s0~i6^!If@4xRvZCmx5ffMg;Q`6-Hp_1YVEFp*?&y zG*9{D{@rL+4?lF)p<<;-4S-|(H_Jw^p9$lSMq(!EG^)0!FA{Ud1a{(p9;*oj>Mo(z z?wTJTK>T3kmz+1xp0L%yA)cjAuePQ6=23_Yp5sO#9*Pv-dWLhg4xS-0UZ0FLR&&l8 zxQiUE?z8oWsk=~Bgd<3i6_XCUoztv*iCqbipKk-yyw855@7Tn8dAk+@##O_Yc!A7z zys3Ql2Bpma)AaMgO75aS!mHeqjnK-*j&jSd;cW=Anj^Q7dWUQ06`xF}p49vtijyLx ziCsVcBwEDvKp{3Hv#D5{g+I$&kXIkM%BsAFzRBX6M;=p%bK)`Ci*|pM*nth(0&F)f z0^h5b+DlZKZXq{n01aGHrKAz3W8=nBp<0!=c6i(^&IIV&%gsGWs`wT4mVqrGKC zhbxfC8+bP8&p9kmnZ(F>?%>@k5n};ZK@Z;R)hX}OmPBSp~NdGU^tJ$K@MGz1ah9-OMy?8<# zW;dCx``dz^RB=%Fp8)Kl9GYG2qc9aAta~>11CS(J=)g_MxvC0n#Hd&Fc5}WhG#voc z)aDD01S~p`mnpzT(W11en%^P7smESazJp2t$Tj=txJ&@Mrb4R@3~rX;HR55I)JALi z$(}Q52&oPN7e*l3x*X}6ev9G6c89bDNNZ5#14N&~WlUFuWZlT4-SJt7g9$I4ep<4{ zyDmz{$fEspwFB4Afn5oKy4pR`-%%nc?#rD``X;r7Ows>@xN^$?40W4zX}tTK<3R0a zGWI^DM*v{WR;1v@(7>kXGSKyy`>qTZT}g)q*GA|p1iJv59ED@xp#|PF7a1s8`mXwF zlwB$X>&sQgf)GOwKyn`oo^t744#<1wZBI2FwSpYM0))N5+Dn$-0^<{*qHwF~l&pyB zUcH=ekSaat?nf=x2m?1(O`%Nv^g}X{L1NAtyv1uVl#rVCmv^203@*l>1$mL`=&RVT;{o7C&PvKp zoM1{dg%9iheON_#4{F5h?MG{W&VWU2 zNo#eWo0=q-W_8m{iO!{Ce;p;y`tne7Oz@y~t^Tm2Q_`!TJ~a-{JKygWS%^+CQyGi* zWO+|Cs=NWbe)opiO*F9j979zDt|??l>VWl8?DuLTy*5 zCO^^!TsT>CE)2Gx?p(+mcbv~i-8YaG^%`p@pjb{5&WuNunF+o5f+`Eb`EiCPeI0t^ zo(!;1TJPvvPhP+%cFl-WujTvA{=N^!0T2to63EH@^ZJ()k%89NZGC?l&ersJD2Rbr zgtjwP<0^d4EL>cr73yo6%7NXcpfhM~xXgl~*hSYlXB;8e=MF(BV<x^Nwi1^dfrx3!X5ej`=^WaXrTKKKNi;FzlQ|%>BrhlBE>EDTp}ZZwhqe z>$BnarX3t3%D3$mN%tVr6B@5B@ep`UIrgpJ#H>z5HJS8~1t0t&);Bq$gpoXchwd6u z&1p`Py6RqKEJXq@Utr$9@_26&t6AS&oecFy)KwEteH$3y{N{WiCS0&cLG+N^n8u48 zuOoAZ=J0i39mKUSa=^xyD3cfI=AhByg-10@Zf2Ek4nii|tpjkHS;zMscCOOv{8FFp z!@X;mpq8;Zey}Hx`rkh`JUc(w2{o$!GR8f=hMZO{{T!N0#R8cx>S}_j*_He9d!GRv zHjF)W{aXs|h%8T9PR8usiegNLXK*fL86~z$s~Z7k@9zXm8fi8~^HD{+%^b0!k&Uoi zu9qn~EAeixfmw9Wa>YBuZda%CARirYRa{b9cRIxHd}_E|R`0b*;PO^8bzc0_)O3#n zg}06{92+Mfy49;8lS?WdlThezKIup5BS42UeDRIs7;uF6X?+8nk;ksibUW_~Jr9Mp zD574?2bB^%2M@qY;a~moPN_owjm`=2{sb(XI%R75{5jY0fIe)(oc1j>t2vloCa+DN z9U%>7Dzi{Uldj{u;MBl2Frcx5^cEg_mBWFR`)X7}q1}ZYn4b*W`2s+1)lC+miz!wP zt7X!kA16TR2&Pqha`4lH;5`|3)Qcg2<+}f*S+z}ml z(dbrV!`bl%#?OdF!~6QvQCM%*L`@`^dA}h#`C8=HfFA+@Ie0cecWhAhnnXvbaut4E)v(tjpRBYur zd%;-saT{a{;(_=A>BfPgKA}!)anr!2x-Q0`7#Tz#raz>X&hdq0Xe)#wnLggHVxKl6jq(6 zkV_O)>zz0mdUA=`&TQp5iI43Z4oLuEgwB}l7^mb2GnQnyH&gbbo6r%U5~XeMaOms2 zr8sP8WE(9hiwHsoR21kemCs45gM1C^cbdwXULB_0A|CN|dJv#<E9Ic9`SmI$T z9qBPB3axk4Vy{siC&(S7uJI~Riqa6S?s~SK9$s&zEh%0C9&YX?>`$DD1H0{7O;%iE z-KSpW+^VEAEKC5gQzBihH(&3eKoi{K#Y1^R3G>eoJ_&bj%zURL@w3h=fR`blYn8k^ zMuNs>k-_*&`2T3(lCw_G1La>vNv{h#F11HRGM;ny}KU z!`jo5lU+GxA^nCw_ZPX&_a#@qqa?cov4nN*YyzA|c8=nIu(I8Ew!BQekIBWiU~4cs z_kQJx$~G&hGv093tL!;IO6y!3aLCUM z$=6l^c7`1eh@`%y>wS9#FMa9qw=9(Qq?24ox{4ohh@@@6yC*c%@yfzs+iS~mpk@x7 zQ%x-(?+splTYvlR&Sd0ljmy;o3)EA=>gSzZAwWhMswa6grf!dcP%uO1gPcZ#Vh{`| zY(}yA?4m%F{IcD$h=wRXm^Pdz0|h}}68ppRHljpD-$vi^DirJH{w4Fm_PsHhfiX58 z($0$TBXJ@+W*@f&S#v+{i<8+7Woj@JzTt&2PN=OUHhk#`dX@m^ zYi6x+{0R)VgW{Ks9^4f|NGKY~ll@BIPl7SpY$-r_XqX1vo_3F)kd(LHKq^dhK)Zi1U_&aqKpWqKN6ZE5{e?eL$tG40&g)d0NGL6`#ex96g*YT>-wo*&+cp;OHp+)>CJf}x5*X` zQq4d>v8$)Vo6cb5wv;QLN=E}A+CL2yIfAL7sraIw?tQ22lh~3Kfd+b2YNMpSl~XnT zZHHp$iyWucspb@VnIu9E2mTH8-KTy-G4PFV<_Id|(maleZ_HxleKZ9mLyCwn0xEma zWg>y@=Sh|oycnQde*aDn0JAm6BE92E zXgJ`gS;Dk}h1s><174MqI-bsB><>KvF^CB+NEydwdBp@9OG#aw{E>ozX2k{jkRz~F zy?h@~yS=}7$HU6Pui>{5)&2e#Z(=4Av%^{YO%?Fi{?`u_^q1+KQT3PUU2pimcwU@o zB7l0PzYwyECr;Y`eIpJrboh*P=}M{`0Os{HFhr=PGJ=y<7^tp8IiE7r-f)Q$7&_jewMzxqz$D)kS!UhRLA;RYB-I!V#Y>;U2b!LaLOWgk7p&QS0MIRUGl z&Jz~UDIm5*-?j~DBi&;d!NUuc4pB$w0}#BwIx>w!e=;?*0|-|5K=-Ehf!W{eloUJ) zuMSAd5TFh*4+-5`-t$}35T)o&`kH(hYaPoAd@FSJ1MBr$k~d+8kBZP{>}kMkD;No# zz){&b?Dgb}+jPx-D<`bvmG?TBc!%;Zoj#=%p1v<&5>ZXG4;pTo-OKV<}w7;}BjWcbWZ5=L_SzAKzYXG^o5=2W<) ze|}|uZ(whrd1l7s(oKlvaL}!$@zUddaY@kZ&%pQ3fL3HSUDUToJOI;aa^q1Y@P}c? zSAU$`>)k61Z+p~6V6($ugF*5`z3(r zVUmwLu@tdmfwnR`22p7Xh>L~`pucGsG~XKeA-bMlW&*JABF+z&T3+wb+&=SWptl7s zb_U3nlQ-sU8`wQuZzf6P6}PQR(l1h;EoNMrE>J$F*-Q7u=Ff_m}_kEgDJBd}2T0Wefas z#HSCvjS#Vm0O}ila@HUv7Pe2UjL*w|pVP-{L<+Y(_HmR+`+dL@w~Uz6^tqyt?Xt+z z4j!%K%UT$><=%98HNXye+wgfGEys}h-~adTiF;Q8-=0M;!J>sJ|?N8;cwd=6O+6&y;wetE&nxvfK$N76Df<5jMK7> zdhxoCj^0u0A@b;tKTj6&WU6>}D#is4O&vTX0Djz^S@U<68!hn=Knc0-mm{ZPE98P ziCKCHGN1Zrn@C>f!p-k+xafq5;}grra|QQzBE5Y2Zg_=%ro8n3obvw~>wjM~cHV1X zKK^7dNV#cFy-pG2cl`9ia(O4E$a(7X@0T!YmW@jG%KemYoE>q8wQcs3onhu>F~I04<-I!SNKbajxJjZV+z)c5P0 zTdLFU%0jG0YK+PC&>Ns8V^ww4!}yeQ)A>u2Go@m}iw&kCI_`-9A6*B;U*lJPk8q(hz_TGoFa+793U<8T$KQ~Jark>M1vKu4cD07u=pFzcB!!3wLu+$Z>X zu5D02m(fD8#9j0_zGnP1-8RRh>G^183rQ?&V`9G0{2G4<5T4Y@;IA>^3BAa`fk& zH>(;wZsnbA)@WGF=0x553jD>h z*m(V`28kx4v1H|PNJ<2g+4yVz8B`aaD0@Wa)tef#?wLvotu0`%{Ef>#m(q7!|Gdo6 zFQ#q_zPN5CWSA8%Rj9zh`!hXY^G6qJY3k=4jG}qr82pXL%*>J=UqI}B z-H;PjM0rvgxf!m|^NOE(cT_mPqF6~H!xFE;tejWezxt1rjukfvUhMwdDXt$A@3{Ld z-uaw^oQ*ufs6CN1jm%=Iz9w93E^q&KQW7JreuM$wOK&DQRk}TLvj=LO)Yu^jmrPMC zG6q^i?LG}*a!N8lrghvrWHZwyYn0O?%fUd^GOep!FPamoq4xBA&JlZ^5MuwqU!X`K z={$w}>Q>nvzDUf3tor=q^9copo9W9KZZs{X^RlS3qOtP(jEi{Vn^|6G&lRKM85Auv zZ&Tu@u#e+uN%HxxD2(m@v?PVmvf5Bd!xFG(81DII3|RTDM`!z|H(7OHWBi+kp5__( zv|khn3+KPM3cusR;bRDET@E>R@JZO#&Ihz-tz^ z6hQ<*kcCLMAfPnT-O?dl0@ASvK^o}>0qHL3mhSFaNHCo%ZaSzTplMF9hf+)*ae)Yp!)ya1B`^X!xILWx?w3d{6 zqQayj5U-syJH6cK*8Ne+bbxA{N|H>q<`l-XTZB*LYh%aoti;f9dwbE5_tZxDm(k;P zT8WQs><}CiL~ToCleZWrh=w!Kj1HO}5uJ4bZ`dEBNk-dBvt2*{Ww3W+ukGT$R zmKW7R>dwlZ+r1ePJXrB_kzdY%Dsv3ieAnOrq?d9yuJx*Yr5}lA2I~ea57cIHT+6E zK2VuWg22`z?ybe%TncZiPa1Cd=|)t2X)Pw%OH25YvMVqJXrtM{6K8|Grkl$*=s#-2yH?tb$QAWn9?ip5jD_A=d;0U-vH@ zaLzVS(oM$Sq`X*`;3@bLe_6P@0rSEgG2XV1Mc0js&?B_aKTz+&FX(e99ThRICu-y6 za_Y*-$I@ckQ;w5yY#y*2(Dur<;I^BnXxdRUcH`iq#VZgiu<`c09f3_pqb#olHYStG z5F+chOyoO#ApX0|r2L2P^!1Y8=uV>5`ZUJgzFr|7zCX8~sg+YU!gJCc$6b31+cd`wUZIq?9EKj4GKzfctt;+59%DB-%2 z@zS1QH;XG?$<>3eww?H-bR>o_m}__Hg(#6k9BQNQ~!g@4qKCHOii>{t)L z8Z2)5@%^9Wk-jt$D|rFOKF}mB{c&3x5#NZ}PVZ|MjvsBzq)8Q9Fvi;^B+lHD%4ovT z?!(~oVcT}0s&tP+Ka|9-QW8tEMlK?)7viBGnB+3sRdU_@YEDDC_zujhyabd7^&THw z&*c(dRj~#cYYo|GviB*BC`yh5^##QinD^G)$_XQOna@7yp&I4+8t#WmSkEt~Ky6_& zt+Z5j(ON`gzm$z1*F4t^I_0*Z!E(vR>k1M+){{g8g>pTa~HA^b2vjo9N8{e z+6$uWx_A4LK8tLT(DGA>A*l;e9bqti&aj*2343}AyLz@WrEG)4Da+J`?4f;t{haGlp zHvLks6|~<*{rue)^kZ!wZdpxMg)lw~izZX;K4mO&R8am>5*+l%p_ont?arCUY%PB9 z@Sb#f)O%rFDCB&B{K9qVS>$@KI3%mH-gPo@O7m)cf}pu)LwsSXa!**K(yZ#_xP{OM z?|MNB!G5?GTh=mIC!;=pj)-Ua>aQEQ;=FoHHxb&sEe+Zwn?#cA1*6bWI!4val@>fT zTMXrs#vQVYp}{ML3GJC8KC=oJsUky;bnnr|bYE?2EAtNs3kPi&6dX=wim_K{|Iq*iLiXiO-#D9YKh) zuS*SIgNv$^_QcUlt8#_K`Jl1G0R!1icUAUX()MLYL{1!A#rxGo6{^{IM^{Lm$CTh~ z!5XQh0Z6=r+q#BjWw45F_sf(0Fco~KjeI?2K;7oXQPsNGFo9lwHl>8~58FNb!1tbm zsJ;_p@jS4XE}k850)+~`cV?1IQ|TMeT63M0q=U|;F(+d81p~5xs+0gn>ZB|CPvA_J z|DR@CKhcgUOu-J{zNR@B=UK|{G@lKeUxSP0y7?Ps9+w;>`O$M;hPLj$!^%t#dzrmE-0N_9ZDZetZjZ_q zjE2rS;v>ifoq8JILWZf?pg4D#HhP^;AAQ(n6MjE(50q;58ZOq*y(RmgHB+-@iIG+b z76W;6{FM(f z+^M`f%uC4tRhKAb1Z2bMsSYqI1rF;;)hVn&0<%|ZePueA)rm$vHoxAu~b`-Pbk>`3i zK0ls*Jm5;5(|6p^?cC9vOcWIK!n0WZF$yc?STNcl81(p{eZ6v5=lTaV@K9K`c!cz%{`?a*L~NA*OMna_|9|8PQe1I zE#GbK&~>BOJ-li45m;ZiFjSojqo$01&7mVz62YWgGb~7GG34k@kYjMY!tE7Jj=6>e zHCRKCv8wtVtHpm9t9@CK{z@MA3;8amRnPn>89vn_ZPMi|S96UdguALc@~-ee>^!pQ z*m155zprgT1+VoYuAD!xerK7WT$1K>a<}W-&4WWZMoe@%n`la- zQpt>J9h2I3a*|wdd(8Ya5vIR>loj~hb6FfYmLg53Eu~i7Tqli!9LY%x51h`5L09@| z-rlks9mWiL7sZq;>WvzR8;``da=s$tUiLfgssA+Y8KObNcj?VlNNUZJ1jyMr&SD;& zSYesJ;DQ#zP7WEKpedJsIMit{ew;m6VKJf6QFLWU!jPx$;o@4Lg=mW_DG2k6dZfQT z#TYw)WovuwPB+uYx1~Ksy0nV8I^SawfZe}WXacpW{q|KIO&Q|q9Fb%@ zd3k>5!!tvT?(wkpvvzary^UkocVMjob=2$-@ZICWZPVlURfo2zvW_dr;85@%T#9C;trSH zZ0(Ee@;@IPj@7R7nJDGr2&slcm7>Q;KFo4~#o(*fahEF=o9KtK`lRX$j2c#~CeG=B zdi_R%&3)U6@6mNtgM~q_Vqu8r`TZ8u?GaP!3pho)AWQqv*(kMYH~zWn_T;8GqY!7G z8vBYGMNEL-UlR+@4kWy782h`N@-7rEVKM9}ua^8bn*Ki_TIEks00-+xprIIG<&LM7 zya+cUu44_2F|eU`XJ)kW24++Fp|gGG9X{jSMT-#hy7i95S|@y}EE(bd>gJ?|=W=c3 z=kZFtn-+Ci3I867Fv=&7rdRC%e4Co{#ltD+J9E%RvrQM`+o~!D3AIQ2X6fx4Y@S>( zp z&y{N z5>rvFmD;d}Cl>IfWM%T5h)*BN3|dKkFX9H@_jOtp5YNt$K>~T$KhMR}oYBtGbG*E^aCI^#d${plYclJajSx zqLW<{s|P8MCg5e(_;o*x?c1Mk^)1Z%5FT`qnm4t*sH|R3SrovWme87+CctkKlad!J zNn`D-77bnJjbr)}{ya13W}evzb9>z@$Ruw)swt$zvFR(`lWyD29Vc`d7FDK2l4BDFO41N`nrY&sQg(TmiqC(RFDn)X|k;&g;ZPFW>V|)g#UCPA(4zRcdoH z{J@(#LcLXa3)n5;3y3u;_yMM66s{S{b1~>7Y0HXL{JtW8};nh@Y;pa;P`X9@68<3o=+VnfmZEA`-R6nd=2Wpq3JzL!|Wj52QR+p zdEo`%-i2e8)Y&(2SH(faR1uNBj#7Wh8d+Yin%ul@5x=WRa&H4JI#=UhyGxc-Q%_yD) z4}L3d8SAjLjbQ3ZzY9j6CDH#T;cVhBdEzk03s zuk@nZeu%iwqio|TI%$Xz#Gt2!QO2S-_Y86Odv)PYZ94L56B^35T~jm*Qv~>CN1HVn z;&O@sZp$z;-#Ti&WG+dy8YtKH3PesKUe$H6)8ZYLaG8{kz4&`lIIY_})W3d$)yMRr z@!3sq;hO!5-!0mAItholN;RusJ8^2LRt80Cw*(_f#8j375nLdRY5os6*`{Qd(-iK- zjM7<6u_FyEM23aB$Lf+-ip>_mmREurBw_Ol7+82RO3@i8X3JE7-B*=Ev@vgygSJn| zJB$bOcc_fX{?UZB#~%(o>$K-iP>M+$1Kph&3gd_KRrdMa3?7&0cVotkdNWtwL$|!) zv~>F6LO9Mmt`iB2fo3}7cY=OT(*Aej@Bj5o77Nr(Y@V-qlXQ#SxeIB>Eh`6DD%}Hy zq~7Iv3;daG^^lMK-s|7#Hst@1Za@7sO|;(n^UVASkXXcmx@@OZ_D?`7+3O!6B>z8| zScB;N)!US7A;Zwg`#TI6VAbzG8LIxjF#I`N{=XguBasl&|Jnuc{|%Sn|DB9KDINbX z44xC>@25(1Pqy-lXS%}4EPA47CUO<2wr48xF`vB|Vuv{`Pt=U=;DKxA&y?}!TURoK z&0q3;=ou~hzX??IM__nufTjDJ+aM+IpFl)m=nt|`ZoNUi8`oiC4UeniU8KcnZ8F|} zy~TgwhzUqaOBSWn+24L2j%*$Se%B?p!Iq%E1001xKSW8~3_Q+ybqMEj-+sP3jphF9 z^WD*KWU~KTKf>&-hoyGe9q(bMi0=*dBstXMZ1!#Qyi^LVk4q3Q&#ZJ`wNv8^OuM9~p?$ z-+}P_?*aDv5v${FI;jn{XeOR9h?M>?S8aW=nVqsWLE?V8I8m(AG-qY>_lF3c=U5>4 zi|XN7kbe<~Zh??Y+dluDOKJZzmwHYJ{#*{_s|2Ly(Gb3Cr$oSwn+G&3-SrTTH3AK= zjKE@Vt_Ey$QJHQFc-(pb6!??ub01qL{`kc6x32*n%M(J7vY#Xfw)_5z{o?793_i;a z>2D5CviN`AMcIQPI18Mmk+4%oPP*cb9 zTx|851+K9-)4-R)LaWveor~RcgnWCVF!2f)F#kz!6bX4|I>>z}rCJ3x{LD;fSk1fK zDxpH2!Gv&r%E(f1^5-}mb_n+BDyE_eD`$EDA@*hphzkXQgYmUW5N+-sDA*?Cp8Lz4 zvpdse7VUT{54@H{o+vt0f^A7Z2Ka6+!?ceptri-0>dC$N3z5eI&vff=aYYt3_kI^P z*)NcVjj1Azoc*-}e7$b)4Vp|NNtD+uSl8zAsKBkv3JQGDEi$8(zk@?sC~QGQ z1g11&6yFfzNTJKymv3y>`bxgrt`D35`_L)Irb`kAv_74VHsoCn{KG|G$fijXBZgM0bQs~zxOSIMV~_^jaM=al_I zHp;-)aO{be-LP*Ujd=s={ZQ@$vG?EPzutg_2R%JiC6sv;?+p)Dx)-arpUncRcF zx0d3w6z9XWY2aFq4+5^W5i7i#o(}3cUe1hfeuFB2$Om{Voii-}8?x=u}fc2OOJD z`gZ>-hpiOc`#VL9>WliS8{o~^o0+>NRj&ekQWKN_i*uaxV<)lW)S@5YNap3VIqc5( zKwG{%ePx5Uk+d7?ajC!O$YzXPpcF}I$c6{vE-f5m^DlnoEz|?#tA+Dd+zCM3y{9J^ z`o3p3o&M+;OlW+sd&hR3%kH~{iu!Df>13*xW@cnwtES$Ac+QCPqfJFl!TVYo(#<_8 zI&OHVo)%NGi?WB_1EcgsEX=H zOLYAcuV`R4b4e!dFDM1`*eWJMPM~hn+sx3EBjKG_l(@>dgO{0rU?A;;A)EYX%o3$N$?)~ zCk>$~(~5}RMH~C_3D%EiCQbRM6h=&;I$5_$S6r_L_Q6IW)85fuRxUT?yV<1%lxB&F zZUqSH$RnqlD+E$c&FZYRwb0~&ow>m|{^JQ|1jlaq=nhz|i59&z&6NiC&pn%``3L_u8ypq{n&r!d|zZ6(FZv_>K6%mr$>WRxHInf`SjKC5im6` zz5T~sZyT_6gYjZh)+3>|Z;kpJA!ystaAi!o!bmBZ9*gJuz87AKT>y^S4&cMBqf&Kq z5`t;e>7w5WFo@;*5*Lwg6{5eJJ&*%gZ4di{2b@*Zd zyI|#U?oL(9Yb{``K{+gc^d*u$^;xroB1y_;EZ#RxmueqM+jl?`sC+ETJzVRsOW3B4 z#e4dV7|6mvZq67Y$a`p#*@_$^N&WeCHV?DL50Xhv=m^Q;sE4VUaCQc^H}=M#&M zr4rhZzzHz#_yGgL&|k?<8%&-^sO+(5iHe8o)~iI(smi;fn;*2Ztxhb&*&Vzg8~oJ0 zYbnLybC5Bg;+pKUMO`6vk;y_>3y-1wY+o4piF=hcBW30#u4Q!?jcdu>jjsEVEu2{u zm+Me{RJ#8Cuj5>Kjh%@tBRjIZgB>4q6!$drE3+xq-jW3Aey<3g^mePwRG>a4y7=OQPf}m z+59;&KOSI(GuWPB+t5f+!-@L#WN`{Hs4ugyKezf~-ba+eE`Ck5*=_6lA;ELCsytfLcl8Lz&t`pfmZlEte`}h;wBRhc&!+tG%1zdLXWd#;yeoW=7XKnooNvi1VY@ zJV>Z#>&wLJKCsMygpNXW*zN5Cd95!+#$;V1=V%r14EYG}S~vm49l`-zpx)9~pwP1< zd?VKtr87vlI}1GMm8b$vyVAe3#L`{{coPGYSs;XR01}vlJ&WK92?SLWWL~no0V9Q_u#l6)+8AP$X&<#@E7~VIWVCAvs+K`aPP`v|0?!=u-xBP(+YE8U7L=ZP$aW_wm!W5uub z;oHU8h7lg4S<-`TO`D^}1_w7reR1*P*Zp5P=oB}sCJLtGeXorMlOx3sUmFj+jyJmu zz647cJS^rdT@Yt|DZaca(R|U-6LV#P9Szn7^`3Ogv394254TFBdanelwDDkXVvsuw zUhO%P0rg4m%7N2^<1eqn#w1DbozTKHYhbq$_B#f@yms(qssE$3I5wBr)N*S-`qk`* zGU+T(DyKqjHU^LqX5sDOla8xjA$CocMn?6+FUo+B#n9EjvZEE-fIG{!gXn>+eblSv zrBJz|*G^H9^-HV}2zAvwtV^D?>9veeeJdphp3$(~(i0A+9N)A}TD8Ss`cx^aTgBuW z-Q}>`xFMkf9Qcn7>;HJQjA{N9?tgHxck1QbU`zj1lre{@7z?r*##p1N z93q;uJ$C*>uhf7dR%QFt^;h$~r%8OZB7vo&z8mU89BZUG5|;RMpvIbz{t4XUx}8MT zI=Jo;&dFQ-z%q^2_JBQ_c@M!Ei}?gAwX;IV%)8)03F_v`wz|kd`<#p=_8eW;hm_zN zu-nnjAf2!<>e0lBY}UWZ*Ctl~`C_CG^vlWCp4P+lE)Gyp#pX*`ZsIVPfK6h=A*?%k z|KUzapZZgtKD7tL*_3Ii309anQv6$5JWzWLWC6#<%5#j4c7vTT=E-w#@vN3y~L@&Tf(m zK!=E`AJU6NppLmLdOY{djF5z= zzkrx;g^$7{0coDmM4U`n9;Uzqi!NBEaJ!~ZWhj-Pr`q)?d>C-LuVF%)s?sRDFOqts z?f}aDd#PgXjTv!mJa5L#SGufD5yRFLRnh{t9ep&j6AXx{zJ3%iIijmf;^oYgfhq7U z@xHElpE<@(IoU3PiBMlgsSD_S7o1!i^ZbS7C(g#Sq4Xia=C)4CzY)<{7_2k=L{7R-{&N~q7t=&yv&>9czxIBn$>f7L; zf7AoaA-`H<(bU+xEYKV5(J;jt0c@jdgL~XoGKpiWK6H>t`aZhy5e%1WneAQY*BH}_ z3JaGi1?>i7J#3QzRPve7^|P`;UX6W9nC&1>&E>grl-MU-BMDEcBWS?>*A91)Dujik0zNfi(zPiwpWpL{jl+w(Q`>EYPbl zCqbWfiFxlPN3r14iFzjON!OAn>6xC09-Dji-sUh|7(2tmt_2vLYLxj$UrJv#U;5DH=vjyPx7t^un=y6EzsoLY`JS+q^w3VQ+c z%Zr^}V#lDr8{}DD=HM`&C~#R7+sHbug;ZeTByK{dc*+cxI13$_%Q8K`maiK$zh4_5Bw+y@ciWUxn)GKzH zhe(@(r8>S~=zWdpC0;4HK`er>Xt15zXtBh65pAWUJU&XOKcl$RlirN z>1s916%38&!9guOXXWGkSQ+gGVKk49BK{d?US)+{xg!{cgOM=(1&B!w_b?{k^RYssmI z@^UXcHVg_;mF~o#0W3DNe0ed5(^AE^T*BN#2KJ11GhHz{GM5c6(Usen*iSdrX|p3a z_o#s06d-m(Jg)-ZjWMehIiuBw!Iy%V9x9?g==}0j(b8drjoA|rxDF(g#l|k{7Osj0 zUX{yrhI)tWb8Ii(+S&={GYHmXGm>g(=@hnIGjzWfYmZ8GA{kYTQJZtbnhs4i{zWOM@TaAjA{P^>^< zFoSa?yWt${8B`<%wW80&;r@PlQ34Ss)Sz9 z^G#By_&D2-blT88E&tYMdO|h*biU5f3UPGtuEAo|I&fB5ti%TRm99+SmS4JqWkGZxiAp*w+9>{ma8^S} zw(-5Nh19-l?0TB(;V*%ji(XmxgGkPNo;F!VE7DMb(tZ&X94-9el~ihsUAxe}*A=hY z9kHC{^4rLJ#;?VM$gmd9C?BcK=~OtcYA6gTcO8DM>~DRv)&tn@8$9F(G3)WJusqPs z?e(t4etVQC`y4}@kW3(#T(;oe-oKLcEfnNb5D%4(;b5mYMvfvzu`o$VU-vSMaqQza zm$9x#L{vO$*3EhPWS)dCSrsTv#hC+mH`wwD!c1AkpH zAqB=bDYMj^iVrVM=aM_a1Bh}&@CzJ0rCz;1tl0!mh@F0VE)s~9AMJONOwJ0S=i)Ys zchWSyqA7eY9DKHaYVNJqNW=OL_mWzx@jZn!nTEU8zN6%mf}BHH%p|k0`m?n}+Ye*m z2r$9u&_W7}%SlnpPF1S!`g{p9$yB1QbES8Yp&MkVn#0LT7{uBROYvK%Aj57a+{ziK zG7$TwN8Gq&5;&+D%&Kf`(U@yXktM&bHZs)1pt!C1Ly5@(jl(_Hy_v z0eWeopD!IB*l)F4^pYeI_bM2Fbd`+`Bl^s5ixU=`?ljPmD^R)S;TK&Oj3d-$BN6b3 zfX`>=2x{?y|8f0ob>4t1~Y=dtS-s#r7DP8hyAUtlt;o)xRjrYB0)j@7IgT zu!*HJQ$8|wy*9li$SjF5cyI^zyzm=rmS zWP&%j^^H9VwSf!M_|;o&cRJ>I^9sK9JuVmHkX^!i#CCEGY0LZs&KYza%W8;foGKlA z3Wm+->M3aGNbhWPPgr$kSlCN7BaQe_fe48FOXZHFWW^x>=mLJ2yL7of@k8L^L;tIb&!; zi$cP|p$pHZ+<+J(NU6UXOP3IHf&qA1~m!8EvtTYNTnHm@iv~t*hqUXtc+naL* z%a4&U$|X2NJ}ZRWo8we!qBcj$zmL%OkQLX1uTh<;9ja`hvfr;W)-$H$vO4Uv%mJ=C z#Eveu^HiAXfL%TpYK^WRtpj6Y9bW+37f0E3D@z&}RZ3pOB7CIu!cGgQkBg|7Zb@Vt zyJW^Z3oZzs6CXAC5M6h^l~08~nHWk|IL;Bxie4I}gJQvx7!;rUSwf}nzEr6zy&r!C zde>|Kr|By3^M~|9HMs*T0;I19^Y}I?#7SQUsnx4&G}J0c1}AYnb#b^48p7%G6Am*^L?L+P{pWNiTKN`8*j z4BI%?fV&k%9yizB?Z<18sW9>fZ7lKB{WB^Y~C25*x3((S)r8lE++XXm9bGRn#Xtx9Lnu%)nj&_nyC&~+VF zqm})GW>$^{;k-KISG+b2{rL*XuTK-}2r&(vMI)|`viS-Xi|#A+32wxNnw375dDC3K zUpR2Sf`wJ8x0g0vzc^NZo2jaY=t;j1#$r|73pO};SE1xUwOq4nc@lCb$=beCWd2qk z-`Ps!8Zp_LWqzS-3a_qKjlI)n>8|Z0nzi(yttgERzq&p)M_sEu-FDX~xm5Dp7-xS} zBuhu&DX+qO{bK6ULA`!^{qU6QQL1yg{}~$5;UiYOSVj8Q^X+0Qkd2FK{O}GtM!mXS z?NVw~V!gKO6{((iG`&s?jlnRj|2#5hZ_#|jYc?~N7_g~o0Sr!SSdp0DS&s2x$=kK` zOq<@wSG+boj15O4B_^gf+fu1FS>3MI+UrTL+x${^Sw^@E_%+{6C|$UK zsZGM`@A1%EW-miWs0sxLrKP51A4iPZFSaUXeIkF!BZmZA&jhzrlo$ zWeEG*8y&U_t9*@Tkx(mm`CR-Z4#VOWuY};~1gPpLx8Vd27i4NrTwi)QoAm#BxQ-I` z{k80ZGen`2J43_?K^MaeqdVGNB-Oi5c_<@qi}f;VTNu7m>$jnyu(nv+Qa;ombZpLL z0D00?pU%-&hmUg@Vq_~zb^?~f`xtiL9PPmu^;+Eo5Tsaqgvqk!9X8==ET-veL}h%n zoz>ZdqjhB`959l@*FWz0PL0a)S=9Q~ggvjC@kv@ERc*1FpIt(@Fuv6@3gtO@iSRdL zoMJ|thpD#Gqta_ad8^wsrSt&4{oZL{^Zas^$wQ4Df-fEKH-^4BK2ZXCeZ0Qyr9rt+ z<*mjH$9mUM+qpYA014#%2K9!j-R~ajNQVliT5THCMbCU)I<^KZz<8)3#x4n@r$9T& zMY(Pr&z6ZfH3Td`W8iMp+8~5IwF}N-Zg^}09fPE~4MQdC2Wv{W=3+HV^vnMu|S}7*$IF`ppw7Wnnp^gHLlerUnnJ|76EN2r&S7(9dF0%duZ_ z_{81)x_zT$ln2^psN`BISd(v-FLvZ>hhJi7BCB>E|vncXSK zdQu`D`W;^|J@h+64u{2-U5`#rf4z3UL?vl{)hs$K{lEh(Dmeg5K9)#I>47WhWNwP| zCNLsfyUvoi;(UD5c`Ov32A`OE^DI_-lzoF)GMhgtsai+j~^7^^ybD;7LsDoVT>-vdYsa&LeyuMF>h+h zZHR~%Hm`(C1LGaJy)XH0&IW@hXlm={IG51K+EJN0)ocCmYO)b`(|<^(+tt6V_&T3Q zEzJSpUg}zLDk9Ie#w~w6vNC|>bDcW|sD=A7BbxJ>b2d<8Eyg5GXM7fC++Q+Dn=zQ#Y7FZPOeSD2->G5n__8tG#y9u9fHo4jTfC!TLb1-*l8S)3KRL9^E@$M# z+{ADOA0q|Zh`dpd`Z7Jal#sa4Q79NUbo=N!p*)!f`h?+iybOd8EFJa3^O^HEjD2Nc zB4qnrW{p&i4#lPTi}n{>vn-s*d$*D<06xnQ&j6zXe{$IE2UoK#L{=#o%M~p-)DBZr z_MSQ;7WoD7~!d^tHCtU5neXh%8 zH%MpL&Rsx#q&pXycDAr zcxJ@_vEGAZ9%@*;I>4y_gDl_m`o`3X`l*@%8$JQ`TU7*^CmDb8X;@r+bB4c=V&L-r|R}Q zh=>|G&0J%}gi}AUj;DkkS=c!n6d-~6dEjx~Fq-pOYtXc2H&%?L=?|K-sNm!tgrGD;|IpXXfb=qb3A@5<26IJ#JLaX|0~}t*P<~lU)QH z(0UbIoPz4?Ryp4-B*grx)eoy!QR(}#67f%;56Ep$hB;*_Yp~a?R`#e=_k?hJ@(uT_ zpC4xPUC}Hjqp>7$+V@z5_ApR+e4k^zupJSn@~NeGKy3&}>DEo{EGhc#&b4qFEy1O@ zOV>+FC@T5RNKIOkM6y2UnjINapaKjSML<9(5KGPYj(`q~ zuj^@N(RTHe#XxL$H;@h&rap)O;{<9nLp>PVgen=Cyd*_dGlRTT6_|e+&S_XS?6eD@ zh|}Y4E5E7yQ|yLdi?;shdGARtB!;S$BO0eOiasN{@QXl%nZJTW^rLN4!_z6^za0V$ zKu366qR_mlP?s@|lh1+)i>fYuEW_$;fh|gL_y{bk^yD29>J~i&J2hK%<4me1pryaZ8%R7 zT{&+Ubm{vol&Zrgb#?ncMg%EW;18Psh^j2X4@bd?aq_Q!NegS`XLXT1u2)vWc_!2x zn1w_CzF+jYgTU+9_p9Kf_48z}h8Ko`xl^^|W!QqCCvH!I1`rD2@U)vY)Ex|F8s9}> z@c9H21hdKMUn*PAqHn!yJd8Q;4r|gWB~x|gd0=+EH3xSp2F+;KXK`GAr>PRn^A#x$ zFFY*y#94P|Qh&V%Snx>tyaH!*rH26Y*L2QoynS$As*dkKiZE1=CRK_V48lU&FClC6 z9*0|(AznOK3>wB9d@~PkW8Muw+;gU-HG&XQJ8DN0wuPtgoqb_k^HFk^Kv!Pk5!q!v zXJ;}15gzl!z9%ad_hdGO+cAjQ;KiaiOjXB{c9hWRVwBkocewZ2vzOx76fAFM{Zo z7Kn|kw`^+`tkq^rzh9~<+o)Z;Uc(I_g^{aFy*E63@2>2uoo&?VtyrXU&&tpAr`vYF z$}X)TuFd!S-{^_6Y4r`lTPLrq^H}tZ;4gdH+fMEZ*dp4k-^JB#t8Pz~>ifSfn5qpd z)#%eJuuS6Ob!m4c*A&|-`r0qL?cKx(N zM7458B^!f9~W8>W8DtR;Y*C}al?KvFxVid`_wU4 zXgyrLGV{Fs79#BV8skL2kEx*rSpeVy_#}Qx?yse)?R7Ymv#Z=Ls!O;}MA)F{Vp;PV z)0T;_hUG|;(N3qI;(eAl`iF{f<+Zf_kD8)i2;{SihSNn8{hp%t&e3^o&Qk-&@6r0l zH1rc?QiJ6JYfCG?9ymVAb2e6E41iVW)xdHsATOR*F%E#~H*uCLOGDGv8GgU8>|(+8={O!O<#$G492V;*3T9dHt3wq<%w z(UiH&wyf*f;#};A-@vz#9AO1;tpj$9>I|RiI+@V|Ziu$F9V4QDF ztHICy=z+T|%Xd}20S$#E9$jag(BlUuLQ8{$%cnQjb-p~jfR%M#I>cq646Dz!ES!Dt zy*eE4KG`Y(Tc?&@pJ@4G%qZc~;G-O((^qc1sFav=s2nCt`Vbn-sd2+g{Py>j0&G=k zBsUF`B|yR*{j3?J0IkAf3Z6Zkn$df_Zli!DrEPf-v03Z5VE{dz|7MV2qXM+k@*KQo zy>pKxO}xj-hiVqd)LZV=aB|EYdY$Ez%uz)jL_>NvJeIVHPOnZN4x~m#92K| zbYmR(R@I<VTUNWw`d7fndXP*-A5Ys9NR&8@hw~Ty&Z_(jMaESymwaNI zF48q^qgKuUxgbp68n9$iU^X0QtC9M)c;lw~R&uV_b5RS4Rdw8-67RDwY_A(ccZqT4 zyUA}wVZSsbTuEzY{0>95Mh~vUE8Rg}_HVNixjp{zY_-Cehu?`TtDDIQyQ^0GC-lhH zSaC#x%~;%AJDsl>Riq9&h^*p75=>TPm`Q!x9vq;g-zBjB))p?bI#F73U%C8-u#{TF z>{b9wsI4ZqKpiODQwY_#we7Lfb4cGbg@e^Cjp$?|<3DL-xx0r4_c~@n0F$gFkyC{= zg2i<2#T(WLQ(8qbSsQO9mZr*PeQByPzoTxE)XPT`<7Nhu$}Ea3!DKs}|-19K2kLA|`#5xBc zwDgwg7QvIs8beLc0J{Lb<~-g=3u458KRB4mJ`_wI9-GFMCd*sRq%!?{7n3D8D@MF8 z|IymPOu7tl1E2U<9)d~@Oc#2Oc4on?%|^s|IYyIZoGc~qB)X7gd_Vmxj-C7`AKKWb zPu%_+?gel==fWbDlzzfTq)lx-kYR_BiWTm2I`AL zpgyE#@}GHVPrjWn&Nh-M89Vf-5Z;$@Puf|RdTpN{BC6!Uim5zwtlAZkEOlh*0SKt5 z)`^X0E9i0yIXL;V9%?gJ!@MN6eaxw<&Yy>@oMWlm%EI0Y!?4?Ql5NO0 zqMVdqPmbKkLh%ozp45nyKVDcqM0{Sb`1}JjXt>_@XEdB8BUB(stt% z%-?_g)7mPVFv$W=U{0wlrXQg*txB=kv+!zul-q1kxy|&ZtAXNCqN`@)Uqr5`QG*CY z-i21a#Y7?WNqbm)dDq8g+eD%{a856OUllW)koYSfEps>dfH`aMi7;hjrT|Lr@LO$$ zv1-!c5W?_f=D^ie+0_HK5Z)0^whuk?<(zkWE!v8*-V0+YV9=xvK+t9Eq}nC3ItbYa zOjj~1)zYFaheruXa}T9jDA`bR(-ko*^|D2n*fNBB@e53jc*2 zVp7djCexVXl{RNkrLhm$svSX-BPOd6ny_cIR^smDcBnDM%@@h(5v~!5d)!Fo%o^{< zzfJM&t}V}f))tL)v*UtLUE-d3b06n0e&Hl4v+CIH#2AtOe8nQwPD8OS4{4ciHFxM{ z-rAG553pWM{15itGAhcp`~SW~3_wJO(m_CF$RU(eTDrSSK#&fRp+lquq`ONxhVBqV zQo36Z1cs89evX0by6g8}zjfWudfq&1{olA2zL=TwJdbl9``CMbKVRpF8RE2+x==ac z%%GBcWHj`@&It--H^rS~ga>!%g9d^M5BFq1XA+=K%8ixEIGqh3BCFbTLH9;W0JQc2 z9vs7CIN{yQu-k4r`L#{VT&-C{RQ~X(avELYWJ#;72t1kd17%;-XLwwliTIP$5=vhA zC1lbyZ%jDIL2Q?A1&s!G_HotEbQq#OMJ*8(sNO)fz`UhaDYe3X3sa?b?N3_0q%l#W zvJywp2k$M%DYxav2r1HVA%0Ue2F%y!50)LLTe16n4AapL<1SAGhiYukC)ijjW!kbl zdrDQKE1IgArvuJaUg!M;C91P2_b}7i9)%!B;1EVJsrB#W>}URdpgP{MR5$Tyw*YA+6PvX1b%t#(FxL*Q zF6c=EBM5bnnVn`H&SA&&)0*c|#CYJC;is1`*nItL@H?$`rIT-9)D(GS&Z~#}Uj4vKYgSnOn$bl8G~hQ*p)^F^47sw1m1&J;ve=$0 z{iE-yJT{^9gZp9zDc3!IKVpi`-9CbNvrDL2l1hU{`zt})z1&a_sQ74zGLY76KWzC0ZiVSLdBG^C{;(X`4>RgY@eNDL z5dX}ePh-D78A2Lc?v8<=sT8FZYu2K^R@Z4opzwi~?Nb-fgAbb<;1K?HFDJ@pwGBc& z=>M?iP>|Ck6=-3z_27A)o&xS>jg>CCnopgw6w669oI|BZ?TZ-!?I<)7ug;Y7s1aQ~ z6(GG+qv{TZu%27VIGs_84n=D+uH*`)0JDvuH?{!nfY~p`Y#!l7g@O%=ZCFm%c;BW% zbkcs5Ysh(}y@~Ba9!OEX*Fdughv1tn6eleK9mRGhe5cfR7D~&A#{enK{i2?EK!9k; z(Fs&*eb}dkxjRkSUsLC^d`uHJ<4Cz26)oYX0X3_+P5G|R_}qaM6`@IJ!rB@d9_(|LQ%_MDH&+3F2wi#1a zRSrCiQJ*4>VAI55bK>(yKA9#5qTGue0^G0KDmcs8?g??T-4pCp_iKF#Mo{0!ryp3m za;=P$%=Smh&9*~XA@x-R-@Y}oi}LFTf+Nn_ld6Z<%$P4`Z6PXd&p<$43_>xVuQOtwgW3;tZy*3{1Tubp>%B9y#<9KaS88Rm4_V zC>F#mgF4da$=*^WILPZOgEaXIaL~deDWeGT+ew0^nsW7oar>WU+6XQxYXWq`(7lXS zWj3hroPQ4mo6qXlsv0pf+rUek4$9OQRO~jiiBNN7_0PB`7@RdnjNtsF@l+|j4+w6e z%m@^&Mrxr<2@D>nM>46&&zQ26k3As`41!zANNqhc{*2XIVBLt5Mugij;^N&Dv7aN0L|B5QAY+zV%;=A5e%`=oiS!f z4V1}XpmYb1lev*A)1o@0bZqrk+gIa~v{7P%&gq=9dqGr+zRrS)b*N_wqG3|0nA@{F zU$DOa}>8CaZ9H?oxp0_dWi&&*8%gS8~_N+G4ux5#3_gSp@;s8_r@Ln%u-eEUYanZV-ep_<9~QD$`XL&q!fnu2v?of$}@Dgh;w zhJ=$E19f8|xwa@ob`=RhN`CEBCGqeYtGTgSpYhZX<C1psq@_`9@#6 zb9bGQ3^Ff9kZp8T1aRfEvzIl(3qR$pB&~qPn(S^4JQF8!G^h*iL!; zC|@L5vgQ4Yl&6@xQv)o8V_ZQZ-d&Q(QkK6fzxiaEN@*_uv0hxzyQFJQe5#_g+~k>C z_Xce>)pa_KRA~2c^EOQoGOM)$NldVNZD4)*U@dFQdloDd2w%-5QVG`;oe$H;AogY& zN_kLTF8*;q$-)u`-q>o%@jkM%ecf)$m)+&WC0HzS5ypmTDyUn&vA#a{b|hEY@XxL9 zPId_TR8?fdQ!%oFfkR1=*8#Wp>5&<-vm<@@5sfFcwZTOcg@i- zU!_*4;6QE$x=8v$2j^8xcoi;W^?aVV@#efNLZxeU;W~-=9aR=U8S%gKVP4UXV_`1i zDOrL)f%f3nZ5{ev&uvDEyABOwDs$by;lUQ$W*>3py z{kjr}IIaf@qGsMNyp}k-hqO$MxPB?i$c6w-hI)tzi5;+gXv6<^ItJSI0YF7{gwuJ4 z`|Voq8({|1XY8-}Q707vUy5R-u@1*7?*i z({QIxEw5?~KZiSmM8F(~#7s)_$KKp56T)Ds-wo@i9aWU&F~sB9l^bgTo(?=8I5pg1Jw%-1K#=|@UbJDktS%6N{avoAXYDWmE~_hw#@rM8DCYK~MSn9Snpb=IknUEn9|BXX>;SbQ79`zDP`JD3?Dt-DX4F$*M6we$X_ z^FP}DVgbO{`ULS9%Hj+`%E1Cr1IkX%8=tDMwk!64f|ln@%Qy>VtD=VIecDk8WT{gy z)9QsV$$Zc^Jj^=;r(^@H2O!h31Ct9s9r8_%}Jn{`l1n)B`aNvPIy1yi9T+%r7&~mqrmqJ-z?EkqO9Mt@rw~+3q z@G4fOk#Y97Kc#99W}GehQeKRt3i`$rd{=tp2eK!|F zDMTlrX8C%c7Fvidu8h<>A23Ti#KL?C%pQPj`t9;N9pK0KK6|a|M-U4Zr-o-odMB93 zCEgGRek!<+WxW~DlOy@=?pkA&KCiyE)Y+30UA}hVX5j zr5q5|bF7}F+|dI=S|PN%cU90N47wkFx*5zmpDfA1A9221dG;#>7aRiaugluEz(&hvA0vz@yhgIo6l--N?4q#wQibAcc z94&MP0t5~bI(Wgu^2 z0E0E`_8VFx9F_^v3D5eeq`}lVm))&L9@{L9KzPS)HBn8`gT8iu~?&yW7w%l?m#0Z+FaJiZL(dGwkVM*Rf3{S3z{MfgR{gH;M` zq#=I^n~?i^=$ntv{@ca*&uO(k*Bbgg20w=)=)Zl%{*pla^Aj)3TRJ~b{NLO3FMde> z_cr~lfc^iw+r)4yM|KfJ~~L=$)^WiBl|;+W=rfRxxOOfd@qv|juZLbwkFk^M|&iGnMYGVJw{!_ zl0QkG195jI9v>IpMa*JDZxA@#m)|&b_usss{;Apc>vmuGvnYwwf$|4undXpAqyFiX z&r93hXuAVR{I`ueMvOD=l*b)-wO(Jpw0*0XcH5D63u8YeX_AQNJz3+v2wX!k=q&u# zC2Hi2=)co!{$pnYA^HN>d;bl3bJMt|K%u67F9ObSm z|0#0e2%2KTalXjwlU$}mn(D`FD&o3Rs{8K&sBM|FQeqQ)<(=DH^<#H^DV$EW|Nbhg&sp3QXk)lzPOi)p@K^% z!P4rZPLV*;krvoyc~=xUE*{hZ+MGg=`r`et4uJ;f8ulb0zcBtUSjawuo4{WY3g2sZ ztVX;3i;cah)3tHaBEE2RK+xgLv$|rP%4`gkS5SfxK`V35X8S|&{+stS3&fN5MJ%`1 zP{O2JA73R}FUDtd6&%zIBeZO1g=*mx0*A%;#kYhpOFI^$XJ&>0Rj5>P zyVae+(-X${0u$mheOJvq<@;}+@>QrTVOrz3Rjex?7stgFM`jo&97eE2H0Kjex!z7 zkZ+$oyQzZlzZtu!@PZV9!c6R<|KO+;d~y=^cY)*f*FBo1ZL_}(*;#v0d2YRhyt&To z4?+eO@bl@lRSYY6S-NLVny~~DXbd;z4c(vxf@GTewY#PH#v-Y1Nu(2PzBd#O59*l* zeP*~`@YS!+aaU-$+8%sMIP81zg)BKNk^G9q?$G9WKs} zc%oQM5)c2yLGpjz6FRghf=LTWsqSJSvC)_?o|M*=cGs-xq8F>GzTX_3cQHt~*Km?E zXO9ZjlC$66e0{>}dCW0}C7@wgmt8l93U-o{ zj|)irZs1PM9qRhTGL_VBkLFzS+>$RR3wL?Awby-jSb%FccXPyBi1^H$=Xhmg_z+zA zjo#%%T%+A1qnDG&!gFSWok1c-mw~rS2gT(YNYqj=V)don3TpCB{?NhQluJj+c42S{ zK2jHX_S_$j{%#;8%KzwKnhWs%_A{awv0o}ItlXhP-Mu|0PC4t=J)&p!L&xnW!d+{k z?kDXH2pxs_{V@FevX|)^eQ#2C+_q(xt_f$#@qKZdriibb4LOk9mMPIF-Bf2;`ndqY z>R8S(Vrx+<;V^LhY)Q7n@N9H=no;rK>w$C`*UO|(ZDKc}`gWmf@aYM*O6uNK{IGEo zcM>z%y5q!y*dqPkTO<>9@%eK`>Bqz)qiqCD60UMtG8apl#s_Svw=ym^=PylwuqO7u zvh*duxmE$Xl}V#E-s(tGJ?=;sN=x5El+|b@*-%gtzQ1PBwmCe`bZ*=Hm}}X;_jL1j zqYajdTVH+Tsmt;0kvvJs)ge1J5V6Wfql{g{6LbwLKk5qWsLxcTS=sHq;cziIKeZL! zq^`z|{1uph<->cw=8S*ctoB#8y6^TA;vCd1&7nO)d6YV4U%oDDaN8@|BEP6c>#9M#u zYd$)|eGdFe*o0zg$>7$HIp(u6hNnmtukUYJ4MEKz=cP9D-P5MGW(ftSy?AO}ejQcxzN>KC|FIRY76NbF z@v@oSbe_yyE#FD6%TQ(?E8sGjHI-$OZ<3h$()r!2v8b|*KD*c= zu7gr-yvh5py2<+(Yk!FAt+``N$5Yp~H|_THHU{uxeN4FFUu-ftDs(`3c;OU+hptWj zy@2vwS6K68c|2b2Kql`vu`Z3O zrWn7z9i|kjD{S{&Wqs}am(iWZtZ!!{Zrr=C&biK*0>Oxd@=RSFlI>-IM#7HoWMj#B z9Z#(Yq3HTlzneS1YNL7D&PjUk!L`F*_+h9539xfMIpwrqocCZ{{e{*H0PT-!_cX$q1cYE-J>XP<)geMJuJjcvMVW{bG|JjTf4osmJ(U z#;)v~jCudQ|FiSa%18CpAVh@n%^woFnH!XI!gU+Uvwzh}{6*2bF!-wRz}jofZsT3lIUOcGbUW z-oz7io1e`aj=SugUInp-{0izq9`2!qPKnzueS&WT&J+LtdM;K%gE9v`a1em!tN9E` zEvn?q(@)mk)ftH=TVWOLO0A)ur_2Igg6sR~(xbn)4g+zL-xlpan4KoV)?`Gl!AqBz z$|;b?X{|@=+G+`_!IOQ={l071OC5{ss?kziWSnnbt&V4SC!c@z;|Z$Yl8mqY#WmMz za*QZWx7Th%jy48t#2ee!Ymqi&gEe-v9zXeGX}{xV;|P|eh!7@6{PP!GAMiQK&^ybZ zl%0N*c49W=Sku+X@k2i3K1S&jmXpi_-d3HXh1H@7Sk3)60=@Y+qN{Wl5BOH}MNg}o zj-xp%sH;!TPj_oi=!~@3I&Bf%Fs!4($_Em&{gLHE_O~+%t0*rr%{|js>m%7jHpZusGUnTr zAIF4tO0so@cOV6Izvj3}76oQ^Q4`JklO+WO6ZxDEdh?OdN7h^c4eEuTX7NtCL;8=m~F^q`Httef4~ zaFX+dblz|tPm1v=#?qoe9!s7ek%`Xanb*#f!3^!U-A;InvW;A$4YoQa?Ancy7SpwX z<;G{)XH~ix-aAq|n|$Wx)0|36NE@dVquFG$&F8YrX3vmxrwIo{Z*;!q*e}fe%!(c& zz&w;#odb5QPVZzQFYh86RDECSa?42nyITgXc+tR&_jvWs?2U@AA_Y4eUXQO$FV)3q z{X+UtH3<);y^pNMiF(ZwDKxlTDMV*9#om*2Y^)QgJo{E7Y6omBH!LO_TDPB+T6frJ zS`2TFo8Ir2v;bS)^SLgDo&bNLN8(gSx)&j7dy`3fdbmtXn)~J!UxHTj%s7Kt#P?Uc zge$}=qui9WiqgEd=WPx{Nzh@D+VG&OEZJy^w zm**PHzZ;AmVW~7nZSHxNuXEJem*8?CllFYl5D@t~K06QX9|G50mxb<&6A@nIK7p|{ zEg*1?<{J_DY4avmwfEGU)wwQYR~D>?PjJ2o zTsx_yHu&^JYV5>|984 z5<9JFM(b3Mg{=&Ak>AV~J(YHgg5h;li<$WByDK2=*C;+Roh=PNK$$z&%1P;wm%DVX zS*_I<=cJ=r z-Bizz6pBl3Br1yut=HhAyqsUN*4MnKj{qsZn8CU^TEBZqvup9~Bl`$sBu*vE_I3l6 zEPYA(nN?ZfiN4#O6vJ61$PC%lj1c7e#&wfz3eu0<*f$26skqn&{lAARoy9T4kNfmY zb}o$15`BG^^RY$;xFQu|DPGnXhN93Rq53i;ToL-a83nx>t%Yo^;u0i3LSf`KNE5Ho zg(tR!RjpS#YS!+y@0A8ravf8|{}8~_Lr8+$czQd-_9A!8=0V>yyUUv<2%NY7)=dKr z;M-x9VO;K`T}02V=ma;(tdMTJvqY|n>!F4RZf4_sUuwoR3i7^Oso&lj-H(rN$XEa4 zEkXc%bE0mB+-25!75&X0F2DJe-+%GVoQB9AyS1dm5}arTWC<2aL-B1!dihnNJcDTW zdgEA9>lGt*$|f91_PczYBH$~>Kh@&dEzpfrcHPf^kUX>2{V`>-kNt1WJ2eyV@wDrpjY1np zoU@AVR2}`*sggt2D{rM=KIzo|*jWQFsotQ|tGsc<65Izb3$}j2FD$X`3#FYxbs~0bB8vNRvQH6ysuX z{8g3x0|W5_EE5x@yZI5V0YiI&;Ez-5ES1Z51KlU`x$G0ofeZZCLZC;h@OPlWuLSKy zqNyB4?V7QWM}pXy6jv^m_&=_}qvj#{Xp{>s0kolx&}}HUzuHh2p3Gk^o9EV`dH%0S zE&q>4Mb>iyB%HR!{am&H>!$%8gujfez<>PuMKENQ!5a4e;^Uv_HDi*no2$*=MuTnH zEhbpeXb8JgG!Phco_l(B08{#(x9QG1{tWbk4g}`t|K*}`xak5E3U)xmJq{%Q=4dYx z&>B2WMl{m#=eJn5!$-ak^g;jB@W++UN&mQw7d2}*F8U221#30~ zG;2@Mvk82EHEXfaU1GgID@~wF{I5l}l&w4W*i0i_!6@QLmMpoZQ6ggPKltApRX@xZ z+C%qu2W5{FXFwO-98ed{x&H_MI|Kay%RtZdzg>)E7%$xpGwK1Z4|IIF%|;6oXN%QK zU#t7xCS3y}HzX-yNS@tt>KS0VFP+rz983+`0YY%=pYU_JV9eay+_gMpL&->i=JBy5 zCA%Tey;cA#m!^stCo^Y&qob3jM1cEM4twHyWUE={P>fg|>BjLlQqeT~3sn1qC%W%|V&YY%QJHz=wB3%on~yuL z|K}GsNDYv6+x`VWiLwtQ8m!qCuU>(UM;xY(%a-slV1O@Q^TZ)$+=Wk8SeSv?nR;!2 zcc}oX<$YRopcuJ^j1m97J*Cqm8) z5elEIbV&3_`F{PPBIkPpgo`>cZ9a6Y|3^2x8GZh`ciED-4JIV`=WAD-Sz0HTs|PJc z=C4(J3;ft+)SX~iLue#YrziD0v_(b3)FR0s-@Z1jk&u?ed zESW@=c4iPiYL}9n>#Swqgtx`?Jc@ioAKD5?@;|p*8}q63Fu#lW@HQ>Wdg~boV*6Y1$!omMifEkW2Ve0@z)$ze zBCnUEnia*`qpPpX*satB3QxorJp?a6;$U_qluz)gwx&!|CvYaH|Ko*#S1<4Jx6L;J zL=1IVVP&=F04sOgzqpsrMD<~nuT!0Xq*-uYNG+Wlv(W-7s z4&3=fC(9}VWqi6ioXObrZ|Ovxo4EbjnqqZ z55-C8gaRat!sfKZyaCp5FxSSjV=%6DQk$H)9iwE4bAXKrU!;te) z`m)hYCn9ZCX;U9+@aAYGYw{`{_jkDbaW5J~W=lx5P}oxGAW%;Ko?jfiED>H^;Ge&- z<$fc(cWPJgl`fUQPv!PnwPTDlk1JT61%MOOkbs_-wgW8648~GkY#LwPMSyyTfhvFpTLHkqjUNMjyD{`NLmh~Gfct)dl90MmH%!{%qK(F7p*F9g(r*)9OdKUSxy$<7fH zNAtGpN29<5*kr92-$IC>R3oAXt*SnIFj!sCW9VlAGj6^pa7Mjru?prpt6Fb7KclkW zfe53>O2Mwlq}%UW3U<)5vvke{EPk@g-3QH3B9A_-K-~;IQsRp181fj{?zvV;b@khKHmZgutWxeYn6wId+Vrjd0&?Kk30D; z_CUsQINEr&NKCVT49L*y;3e3@P?%vw?pz}`OP59R;D|40Tzza*4wS{om{dNL< z@%IqNW_fyRbujbqB@SK5mGJ@$@iyP}t@|wJLrt}S^?z(bP$8~^Bk>d=k*zfzF3W3D z3)o87Ubq( zWENzoI=cxGb^yy+1TKFPz&>?Y!v_d!rAFnI4DM|Ze#KUac-b6RRh2n*WEL*-*v*mu zqout?r+8<$o%=j@II6u8O(e&jw%>_ts%aJHPqHB#khQpntr~BTm1q}7o|4|gfV~-l+v|(IsCsvt&x^us(5s zRD6ub$+^KfJG|0YpjkVw0rOG!Q`_A!4MY^7BdgxfmXK1K4n!M z7H%xwd*53?sfyF_`#7K;NX@hmK$((Pw10y9v;sXjnaok3QNI0N)6pNDEpKD1EzB+X z!wY|5=s89Y9#$OwjGs(-;ptLE;K;6shLb}9g=GGevZ`Cn>U1f~- z7RKHS{?XLBA04oxaNhGb`ohmh8#+mT7*evDtL|RdXQ|}1ZFC3;i-9t=x z4b#aU(tU6A7w{x7I{K+-RMRq4W8YGFI$?&~BfD8%U_bT7*W^2Y7;R$`+XNc4IVAR& z7h=`58r_^MHr7Aw_Cddu zx^q}}JV!6=PUL}>|ty0jgg(cqq7nBu)+Fu~h}4AT{Ej#Q9N z^<}L3j&Z)8XEhPXKG9y8m>npnUjs&Mj~);Egu}4=-)bzCULRY2jzm+*xA~7jk~PEH zPHkGVdAecWxD&8D)kCPEut|(Iv2ho5*dI4CD zk||^B7MFRC0BhgzQMCi=tO$tO$G|4#uR44WRL!0~y!QHTKEWR?sO#ussOPg==9rg5 z1L)4VM_9$qGmA58bF@QB)nWSi1oP-Sx!Yzxg~-~>D_nEaSeqRIuBifK+~<1v%>*(c zf!k=+yt#L)5E~=7=R9`6;3Eb~KJ1XmeROX`jLaKTjM>%eh83Rar?si-Qmghp-*U_w zikVWyY@=GGA}VZaIjM>in2|5Zr%gW|uUomttI2}W=)Db7C0B5UtB)oK>`|up0EOLt z_Wd0cu^3Mqtt*_KI3x#Gq}UAN+K5rJxD%26NKrpcLF$oPYcTnyXbrqYE-dGfQkw)- zFdSu&8@N)X<+?ZAATQR21V@wx3KW>_-fI&h6?%h~7rQ2crB=zuzO@&pnw8bVfMCpd z1n(GN56iGn^0!o^1wOmlAnv<6G=1Rg3X>xJ3}NW7E$}lcrb-?7BpxocB*$o16V=S- z2K<7INazC!JZ;gZSd1%v%C@} z*=pnsGMjha4Aw@?kPly}0EymjnQ#YacOJ_spbBg1Mb=$E0pGs+9v@Ldsov;zx`v+L z_EVeB4AOPva|XoeA9*KroYf23Qkw=&$#p;D%v8j(f-Dg&tH5h?C^R#4^=4t%a#4N* z)^bhg6T#W5<+x9s=DdlWJVXcBvR7rV5)mlCz)d}Hnx0+iO-}wGcyPn@s)#UA99~l8 z?kSar_|#A_gM5Wl8Y{y5h9Y$_=EEa)X=X$meAnWaP;3O%XvxHyEREzXL?fu5LS$V9 zCBNq!zh?C4Z}!GHZ4l>XD^{k)dy1#`%$88LAllJ zOmZ6}pdDz>AyF%u$}{|L$B}M(aB|M)`Kr#5LJyo;SG9%HBC1wSU#ts50+vq&bu2th zVmFu|o(m-tPRYZ6;7^-T4oEXQI);A`eLo2stXJA3g!H#aJ0g;C^wKHCBuuT)J8T^d z#|l|G^J0_oBR@eB!Mdc(XTjinojwCvXcgv;+s=+Al%Y@zgkZ`z-E8Rraz&NHTaoUv z!t#(3y(PW7EL{HhpqmlbB2C^v5E5bNw000Ks!E_)C6ZtwB*mymJp`!Hny(OdtcKL? z#fDa14?L0kqCS+S9|k;SIe~WGZF7=(%|&4T>KGanv2KWqH9ZFuX7CDX%cG%*-a2{Zs`b z#YF%!&|u)Ecr4VO@)qqdtg7FbG*C%`8OswSs4Ij$A83&^0*OYrl3OYvEP18M zb6Z2jxd+4ygU;ccCc}R8ibi>LAH`474kFQhL-2(?tSP?0qmgB+SkAh=nKH0x9UWXj z6p6RfDo$_DKsfwN76=70Ee30oIKNWH3^@NdJ(x}I4uh%1vYAC8Wl6=Lsf;hWz%agR zR+5MzJR)shdNURjfwU{=-S|^d@AN$TMBN_%C5HNf;#b24?-F zw+rcjeSmbN@gjq1@7sgOj$JV61-59m>x%ra5eRP&$;wJ`))W z!{Rz0nt6l;imjOx9MRrPE6?+teXbj~+?W=Fm4dUuQSM%YH2@y^cK;G!^Wy=yaklg( z7U*)im7XI9?x6FDZeWE}m@L6y zdmr9kAY{HBk>(uj<`u_An;Kiu*{bu3j3A&J?F{NxS5NHDb{-j}!2&@^U#C zI-ove-&Jl@R4y?DcX*q}f8U<(a%XLMN5j!Y~8SIC5xWx(Ls@ zFO+A<;jY%KDZxjPD}*IrEN19;+I48G$lctlR2>WVDL#fMwkLhZrJ%L2jTM3CY$-%B z71l$2+2Uj~eI9MmeqERrN`t(KAifv)TR!~*G+?+rT?E1=v+_H4j%MOcqDoO%MJT;R zx&kCi0Ta#{%vK^9*ik+2pLZK^(uwnN>W|d~rmP%2xj;Y8gW(WDD@r(ljgn64ZsiBkmU~TA3Xj%Fi`>T3a`|6tt5;hQmvlWN28`?W}fWT ztN77-=qcm&_y#O0Ld@}Or_>qn3Z5Q=}Jlx2_)uM(Um zv?!g(+MT&4rK1dcAHH%%G$Gl1)c2ZV(+rYbZZg=442u!kOoSAPW0DXS(s|5?vWVf|n$Qa&IL++Ut;Y)qCmeb(oHezL?j zbD9msV>OiqfKMVQ*3i4u>(9SV;}Q{uFMI0XTIyj+iKXYXupBLqWRrUP__a}~LfK@p z!v*)UryeeU7)foFcbxw)3b5(-EYh!sJRFj*uBKm$<~RX5`yD6Z3INk3U9weu+k!>C;|~55lKSQiE!vJefT?O+ZIzDQskqL@9G*U z{ay^kk_vdY<9l*eyce!7%}&?WxiwEE|FMNC4m6VGUs$==+R=#4Yt`GMc$D8ygW4}e zzCN~xKYrixDR6@;7OBTB&JCJgwpRtLPTgKYlN(u^2QEa#v z{S_X|WUC})?2qmv$ml+XB)M-NQEbA%q(Bl_n2;!xIy}6cej`$~i&CPWhyIK)&F<#3 z>TV!?uf?3}3r7P97*(Q?sD9It>8R~^5RWi}}Oy|%R_KC*6+xW8KK-XPP_j3J_ z_yOtkTiDUyaUg(GrVy2Xw-C81zUvmzA}BynpWHET0hdR%RL?j^$WBt}Knc?Ax2i(q zOyc4|-*5dxBj0O})3pgEi7p}2n@S;{FGtDbOD$^N4=es@|ND>$y>W0~ZXD@e=!C5k z;7L8-L;u*ji}bl3sO1;WSvd42jeMsl z@)*_2ZacomVq84!{Kgk3G{oCJG-o$ql4UrukQ5Ng?wP8h{H6$iR zLF_mE7#2+&NE5wG7d`B)E1YN!y+-X3Cz6lizNHgrV{xMkT@7!+%J!TG$AFzQd_U+A zmRjdcZ9gbsl5p<}o?rvi2b{pIt)m#^19a>MHfKBOr9U*O?uspA`thBoo9tOq+n*ga zUlkb?br%(-SaqZd#%UtZmskR4A4k3pL(Z3!WI@hP6>n6FaI7ASZbnqs{qnZ2*3!|n z8Sb*V84>@kE>Ua<;Az1jZc7JC$(J*HFX=vBtbF!ab~H@;>Rdc+64MkMcGkC(LJ;6T z8@53Si39U~+-1v?R59FFmusaD?nU06W?!$xL_pOZr3P`=wFp{y*>v)BSMp6XK0K1o zvKz23sG#<`HC!*;+os!ZE#!S&qutEAVQ}{}Ym@&LhSP0{?37i3(GMH=%-WhY_Aso@ zZTSFy&YkkB?UT9JMcQFCbbHQd8*JCjFlR!$Msc!N)TraY?%kjn(Qg)xo8PA}E;HO}*qjGo>hCi`OgEB;jhIT@$}Qm<}_% zQMj|8{YEm~lT}}1!%MJD$%TZ_Ouf*7%YGxlf?FXDEISFduthT^|IMtQs@H+V$|rHH zPeI&)Qv=H*ec91vvhrv<%a%*Qs&J%waFRz zaBBaU(@Z=3t$hIM`nE1grKUZjv?LW$iTX|H5{z?214KKFDgDFPBxz5motPBH1S644 z>dUqPI4bW$U?vNQF%ju6&br{tU-STan!mU8QGWVvTI4R+G=l=9WM?_3K`p7Tjf^9pvz$7YCdT z!LeIdPXyE+dVd9FVXY*Q1rvUZPHXb8XeyrM-7|PErAYz%ak}$IA^szs0bbwZS1y_$ zyMu9J>&fnt5^$%r?wGlO;{QRa)UF$H^T{nx->%sw_$ea*am7PD&sr@dkcC5}JMxm~ zlR1LHtgKUzq*{hWs4BrU1?w~6Om;F0t1w+k)O|GznYp8zb4Lp=gys{f#6)^c9d-Ku zfX8B5IQeu<7S7zaDL47uB78E)uSCRALF-+_iOEnPG;UEwg#stp z{mhYkM)b`@KslH~6o}CWxUE+r7zD9UGbO93v_Rq1N17Cvo<&qiyt6J(6O|?{DiiC0 z6~&CUgHoANk4VV`E8XG*astRxaIs(m{8BMXc=8-obh=zxVgZJZX>3t$#Cj>yB8 z+-;1&iNBZD`z(C&08AD{+?oSD+FhNhq0rpWU1qk5C48(1Ev)+zRChR2R~3uWLR(QG zDn>CUnS?jzE|Byja;Y0o zxTc(8Ayo`9z%b($>ywAk#;{L;vV&{Eb&2hHG2GNX$0~I$-C+2O##a1h0PK2o9dFLZ z?~!R1lNCC0S%&v8=P_-0SMS5G_!de!_6(QZ3-Ip%|4}CcK!}7`xv(N^&-h)b5f+_Z zC+%eH!*r`Xt)&t#O-P~8*Brgr>wL5eZ^Bj@(nI*SwLZ!5G$|pyEFg+Eu86Ctep5Q& zbX34LMDbE@sz6;}ikfjka7f$o8I*paHIvzGFYl$dGFv7n=+3$R{1h&;t^WmjpMpic z|IL6aM_5_MiBOTkv0z|U-#*D$bupkajLsg)~U$QXjDpQ&RvP-*eY=${ARd!pIkXRMXr3YT?NjTh7}aetH(&ctWYVzh;9F$w|NO$uCIpkST22 zCu0)_k=9y|yD%{Wng=s58YQDVZqr2&2gQKTYM0*(kD=32#0{_SxQbnAq*>x*DiI=c zI^@o4PtWVqR<$sk9r+bRiz?u^KFCeL6rs~sm3->hI>IxgX1O6D1fApzEZt|AcyI`& zn0a&;h=$E>@MM7kk>{;lVatlW!vga$4!l3($*y44F03f;4=KtLo<0~%@%?g}>)X;X z@T7K)A?3qAjs~IX3KIn01^>{zyF6(N>fO8VhvcO0ycE9C?S+V=gN1A`JfaV&d*OXY z7hA93;Dbx1bJ`5gqKVKk(m5<2ICIWO%HPY5M1CxiX(pdW!ZPs{vGlLsesE9a*o1Mj zKpqbc-QAm~s?UpQg+494dr&}(8j$};|HR;D^G1vAt zLrX0dvJcrUe{z6opK=XG2d>BCo2;6P&J{0?e^w|X*(=UfEDBx*j|eY3zLa|Kr(C&X zZAqDzfSNt84o=HOHTqd{7Msquh${o%hJE*NYT2BD*$7(2)7hb%!Q5T6UBfwt>(NKL z)9#q3gXh3}Gf%?3^mTX7S$7gomV`U-jqg6|PUJ$;opi?^Ae47ESCLDq-i?Pl#7bY! ziRjhT2v0SutmTylX48ZN@~}@Cbm~6Qb#}~KAW%|AkR?Ay& zDPLO(DENX<`pubO{7E-f;=4%x?_CCsG8#J9V!x^ORJtYhicU7$D(Ch!bM~cynb9Sv zSxJ}w;T%*Vx4A(Z%!)iOt@3&BvLUEC;2?oTdjOjwt$zuT7%85I!}vBZo41kVG=Zgn zL~nDjsz{D)pz8Y1HM?4o66k6gOV98d_QV|xd~M}L5hdm7R))qIHacbWBm&!rS*xGK ziPKVry*EM}C*cEJ^QD4gKR3GX>Z&3AHg*-W&_@47GUIG{7~I{qnnW*Znw{FsuW%l% z|MUpExvb59aCbEH<)Vm@3h3#XQ7#5kjqGR`RtJgk4O^W)e=5a#zY#8_3wo5>y|<{< z)Kgzm`*-uoYEkcCuaqV9KFjCZiRp|(qWhfkN&4N6 z)Ez(HrX3}WH`g~~shEE1ul%r6v)E<*{v5j6EcNsN;u`E@rMN_x+UeJKQn&N|jz6sv zirY=)dGEFH?S!_bw4Kap@}*c-O=JZ{boFh!WSQA_@^49p$4@}Fz}$8iyHR_LB6$Q6 zg)9oXChhqSCnvm~?^(Y}PoiC_N6Ato4ih`@u_k-G5$|$6_gkYsN;^ID)`iPyhe-IxCMajDhf9{d^&|i!tLW;=&xMXJ$=J6#-cdO85ErJLOSwS zz}7Er*b*|hcFW1cyY%spMj>8C(kTNAD+>;^7EWw#16n`W%iSkndhI}Xz;WN{_2VD| z;Rof3&vhy1bIS^uyXypli}bO;uElK(GpFvkDn6Ed!a7+q-27yO=O0ogDGxp ztI<*ZPE`5U?xOv@^XdgkbvDOiD6p?aG zL@vBsUI|%YSJu0jaLf@ZR#5j3HqEh@@4cGhust zk)cMwqV8PJWz=95hnTXyW25aSQg2xQ1{ej-tduhXhCg)V$(-D?A1^QO< zmnF4Y{N3+&#u+0Hngq>hMRFLMT$bz9Aj8jMe8JEXp!}XGJq8N|*!% zgC|4EjMZa&_;aH5S`3R8!s_gZ6a^IFyM2=uca)|4$@q~goXVMIa@^BB1 zM3P9ZtY2fh%+&Ect}6EXLW~NZ#Cb-n^wDCwVyGoE*vz(T^G0B(JJWELG{q%+uX14G z;~LLPgA-9bTn5|a+Tj4VCPakq-ggX5l|Ged6-JTFupT1l!wtI3ch(5cY!*MG-arR8 z3n%0@zE`7>#1+;k(Qx?jXo)3%&(HZJ6#lK5rp!iR^N-aZZ!*K+!qu>v6~|EvPHA9b zPG}N#&pLe&kB!y7B_=M05n(B^Y`>wM9NiB&w+9IOi?i()LMI=sUk{hFuMrblq@8Hs z4$x3Zxm2Z~B#j&+6CXTR8Y6x>t3%`_!n(vqq6XmMR#i2t1P}AS)EKf_e%M5H-{fi9I?nD254GWr&5scDz;P{n z^BBawgj&w2_75$Q_Ax{yia+Kw^>-53Elem8*u!BQuRhzq_ANJFB_}ok(yPHmE-Qb7 zQwt3CzPe%GkzMgC`={aK0^Twr$x@OLp9cni`K*7@Ku9F+wLSKXf^|e_WVT8wH!4fSKB?q(f5vfiX+e- z@tn@UyOZY2RHfO?B~KnbUt90#)>r|LZb7h*8uwIJ`m(=avZH|y0frMh^Tm%3~y@Su7vQ$K9koRp#F`=LE_tgG7WI)B8{S z9U>KxoPqY?;(Gl33nu?)DC76?jHcP%#a8;nB=T{4B7n)V82YM8`-H(m6Wr%k@DFik zpWj)`*Q!{D^<6!c5E}Z>#KsOr=``;GDAi=W+PiF0sU^>--0ys^3DyP#52(@dbFC?C z+I{U6Y!kc1b=FHY2=l$B@NBo$T#h^l`3Z8UNC1?TeZc24;iU&Y)l4`NLTM&EP_8#xvF}r?d_o!}=^6C~ zyR*3*2|a{{uG$Y)^)&?<4DdTqitd-}ct&%)tl|?r$$ja`Vnlu;7kr!Scnl_TQ9O}8 zD}Y@`e!~iGdqcWnc#@1@Dqp{ejPYIJ6VEgCLa>2+?FELM{?X zq9xbOmZYh}!f+GMqSaTY1uhW=N8HC63PfTf-(Mrl$yFCCb1?B<{we@_e1_eI4yL$O zk*`x!`^X>Fe8lVKi1c{F$%T#h4N^N+R*VvL4EYo5pSRJZ=vBl%ANozWMs3l1DHwKR zVWoZc}>-F*Jb$eNqaQwp+W zn+Dw4VY!UN0mXwX*w2Tkg`ay~J&@jTd~*{$XP6a{${mEHy_$zZK##`w#X|b4@_Ybh z1lba*;L9I7(!QT`(0rXLk+$C;dImap@s!!ZyysK*h8g{aSC`9i`*Q_dkQkQ6gjw3_ z^Rht7$FAq{R$P?TIwDaw#~vr;flYduV90rL;! zS=UWM`!2AHOET+~phU)3PlrzwGqfj$-P;~v{~S)xRxgtcT8+A=fT!YV%$LV{ddvag zQOULyO>GA4?gk^n_++2=FVT{Nv|lik*yq-g7it`4K0Wrfry97}o}iqYi3TuN$H#f7 z?WSZQe5pScC?frRqnI7nDfzjR;0Y@|1v60OVR7;Xwp*3a4qnBt%A7h48NHD^E0 zLGM+FMYg>%aRgc<+{}?3!L4?tbhm;C)x6tq5S~h9XY)A$#{_9r{!^A>n~}Y6 z-{GR_S&9Vr>z_v~Ok1U2k^8b`H%nbehN!i<1#@Mmq-sly9$?D+TD7Xq|n+HO%*?!}ZmY`cX7@b0-G}2%zH=XV(^DNz{A%L&-k_VR99PFD| z-b7WXX_4Wi<73l~3ZywcDye`VNvex`o(Q7-U9z(5}#41cOH97+O+;$uW)oWWi zBh(wMih<#kXefHMdJw_vxlu+rAEi|WE>qGA*67zII1S^6xa+oyKGFivCQnRuSmDbc zkt0_M`O@^R^vzAWpllgI-pU`68A2;M_!BRXmZCaHw;CUtS&Lgcz6&@YJppNXjQEvD zBCD_$IZ;5PjWLWd`8kk=J6jqZHv`n4<>koLw%kOMxcm0Aj_nZcO}v4JgQL86P9KCxKDg6_Ri5-%GcQ@vzoPS z+&|}p*~VnU8D$(2%ewM4*Ua3D)qT8&^au;q?`4ii$Q=aEsi1&yIXI)7|nnR)R_Pa<68?(yMQpT@US)0>71@XXdh z547c%#~IRNtrsl7q&z03@~)hZ_)F(E2kuL1O3$n_G{sM(DXWpEF0~|>wTm;_iyM!7 zjZpntlGqfj_cI0%5c?VGiLwz8p>72>aG)6Id|M6GC9CoBD#`0^FApZ5vpO^vzwMFi zT(bQ1PtD7x4mE6jes9*I zVJ|wiB+{q&>qrV>v38GT$Z@>{jkSgoyW&)8yi(%HcU_4?9SgapI_GN16&%CJM_GpF%K{fM3l~*+?c{7a;P{^OxQ8J?`z8V^}234y3Ws2{$3TrYwhq~Yr$-4` z!s*{Xpt127h@QFas-SP~Z&<~Xz zG&Yk@SC4MS4U9plm4;?ZlO8&jMVuYlftz&7C09;S(jRl3q$P*%G* zVv5W91ap_QNBn^0T@!ET635A!J}wN!d+viKC;Z8V!X57O4QG~}38wTN8-|U#Z>LD| z0b5C$bL%jK>y7M@;S)I%OexJViQHZ8ftaeF_VM!-%}y=$yqP);1`=rYZgW2z*(9TI z3>^k7cU<3cX584ObsbxHDQvT~`orJY$4o~T3}kC{5-DxLZzhcSr(ZkApwq>k%u7$Y z4l|@3pSe&~nN@W&qx!Sl541!?nBpyak!x5BG!&Mj70no9XBj)+5R7h8(95I4Io=kX zyfl?~wtUd$WSJ)($Eq=yQP^T=GN%V_Y#}U`?`|)k4g_UL+{?T1b3!}`KV1oo%@ib- zaK^1p&7hRsix%q~WZPbk>OAIa*o3Zb@t%JIbh$Ie91|unJ*eH z!!K=FvrS6nzxv==kL_~3Yw!KQ( zGX4l;;7NFOp97J|Dy((H@k<#H=@olc#nezBa9O*4-F8+1{ABA->PpS)FLDo4+713Q~(UQqUlO1J9lM12vRM0{i@1;4Bw7 zqpBd@>^Kjp!E+qow)yP5!5+=&S~t4qi5RrrnX+Ao2mDQShYbv)Z@?_HLZMfFlEjOl zzA>{7A5_Z5dud0wi5&4F?VtO%uIS9!FFJO&)Q}}1vgy&N$*eJzhfM;>Xc(T79pbMx z2h?Rl&keN`!x8@mJ6OE>r{DLpWa_iFfc65T@f_CKK|s{*2>mp|;R>vDW}ue-g+}E^ z4SQ3Mctd-i_ER#SPwne_0Mpz&rkI9+p~0*193xpe`MXW=c48FA@i#)KusBkN-589~ zWF8=GEoe(&%w2uZo`P@PxFA94)A#t zqA16=)?zd@4CmgrJyn`PZ~?O>7k?3E8i%zDVCbQ?T~<>5n6fqp#yP3Q;~)y@K7MR^4sWZA`-Rc-Q~$`o+5KjewdJfkYW1z)Xd zMS$kdGUq*y(O({T(cjK|)_?QBL%*6f@opznPpckZ0_fS=;3f~$(|e1~2UYLAV3qr# zsZ9$5hsZjHbEPw`{lf~mz+9?)yyYSDYb$nKyQhD}aq(Z<2x2DPn9uL1k!=|v{kK3fCXA*WlqfZSeswlh`oc9#GG zqJ$U}`LU@AJAHk(or@KtYP8*{xOUW=-yi-iy`9&cQLInN#zRB{C0kDOLg z1gaSTfeo$ieuJ70c&qxwnUOi!+w1M129d$MMC_=CWB_8 z-`^{f7$ke@_s4&heSk9+iq-fm_xo#p`j4;4ts%Rrp9O8m`lZ{qgGikN0aV}KHI&3Q z^=nxzr`jJZR(Ii;PJ=;Yz@usmMYsbGZgiE!!mKY$fT0^W=a^p}uPyF5y5NE{|6flS z{MaiDKApu_2J5dU!wm&GzO(sF{e$~A>R+J?3=!)kSpI+?(aUPLYqU^rd&2hC{no`9 z)S}EmU!=D|7DQmaaQ`Uqw3yX{sk{rP#VlY-%kDe3f0gb3c*D7r;FGoksMh{q^F!Z! z2Q<*13jYrDXa5ZJQY$E=8wk!uoh6k4Qr{emzM(Mj#kdk|$99xcww;h~>W(e>E zAoCLM|M|E6@fH8}w143;qDUlM|9l0hTY%*r&T{@+Mg{ev8cOuO}W9?<_b?Xv$jasR&(qyOE%!WrF}|GSNU`0r!*|2ouhuMoX4 z87F%IY7s#%m<9o)Ko?va%Fg%Z=AbiyzPEY=S?A19{&o2TxmgQ}k&pHWaX2pDGgf zw}1cVzVRQwx47lj=>?meSzFTfXjUN7^LLqAtZ4ss2+u`?-dH>-^#68a`9IO}0mw_K z_Cp#HE^Fl`E-R=M7KtY?^5>Ag{_`pRdLABDKyM;L6CMf5LjU?iDL?2%H0}4(_4v1o zXpnEKHv+oZbg8yK8(^r;O;;L^nVoKq%$#+%ZKJs4&UEuffhjb_izCppFTQ2%9 z@>{Oq1E{HGv2YTYhH0M_?hMR0ZY1sbU~7~DD9@}0pq|wi2OonP8U{?ZWQs)~Hf=Q+ zBP+jp`vI^D=7!UFZ?Fm$LCJ)pb20)GxX8f^TKJG9_t%^B!2DGR`s03#1HE&O)f1(5 zJqy~8RtJy*M!j#L^@YW4k?K=HQhhPB7fC@t)?ki?BmrPb>3fx961P7&k2=t+0d#>r zR(IetG~A4gU`QmB%|HRF)Z-!CESPPq$KiZ_nvnZXo>|oYjAv$3lniJ!=YR(?o5*5) z-K$WfG6SeMybXu#h!qFr4AxPG0EO=wD2hS3y|4zF9OmH=lEApN;3zcUInJJO@pb^TuZ}MvnBot2xm0 z-2;#f>d2@wP^U(Jl^Q$>ragB_F#mwr0Yc!%&g_2x%FIAN+%2j7J~`FX$wPQ=iA^NN z>DI1b+_^)>P-^f**gc@@^Y&`0C4O;4?^rAKz|N5l-VVyM0>xH5gFaTIi^uGRk(iV| z?^CH@_8U(m*w$lp%P+jv2LNDo4x36Tlv?w^tloFr2T*bl^COilz>@71k39a@!v;wT znxVp9NBriQgW;qP5Gov(sLBny+;zsV2ktDi5n5QQ&2$@zr@Dc_tlV4eG9v%0+W(eE z_KA-t98{Bk^T^Cb()smgAeSc_b3kOU!4LZw=$I0TpO6tLK`J?>=% zu^J@S*mh~YJuv?5P)Bf~glF0h|N1-JIS5Yn_%BOH-T<5f<|}a{!zbMLfO+6_A)CuE z0~YxjKRMub9i7w<-2#$O8GsXX42ce_VLxd5P{Lih{PqOp0n+DI@TWlOcUi`z!hp)r zNX9>v2b*jZ`o-G}^pQU;X}Egbu99+*L#hdt$k}8b7e0h;J@+X1|4gIz;fbL2qqaW= zwXptrDR2TU>ZWBPWc=w-51lRW3}hvN?ZI^in00A4KqDDilh@AcD-3zi`&uc?kNY=y zOqMJ$H;T8E;HZx{Ms2whZ$w%mEtv;6yy$tLvp3IoH1B3D!v`PLVJiB;ImSHabg2bu zE=RW{oDR$St?bgRAOn>c&nU-}^%*GB?+)~R%>dhk@(raxY5K?qR^)n%6$L<}131X- zgGX(Yo{jmryeh?+HPbqXb05AUtxVAU?n>4x3X#>^-9i%P2TU_tl2g#QL|W4~G{APV z2kQ2xHbkG;fi%~3Lm0hdBGsP7hKEqec-YrJ$g~~ibT1XMJzr;S-mq6q=fAkJv;zgg zf-#}v_DTTUv40NWTG2^LwE&Dw&ZhF%o15YR)V6PFXavgpuAsC(2}Ec(Ydv&1&1g6NmcX2^fxAw2{(1L zRTCkS#U`}5kpPkSR4ETZs5ZBhi!{k5yk}12X=^YZ$ErQ?aiUo$GZnC{! z)}F}-;WPp6Cwraf!=_T8+nFBElSLnkESH)JW2c8fya)hGwYTKJ|0feaSqV+{u>l!M z^vE8KB8Tw>>0rh9-l$vN7oLj+nLgs1UP*uo>#h%VGx;i2`-2EH700gcB>}2nAI9X{ zqRy0qD%8SpKnuf@IOEX3jy3?WG1CfH+d9jtndHy00EEfQPGN@r=PqKD2t8{CoBrNL zPN6R_{pF(kgE1>7Wnn@kVaVIUF1{o~ADlN6A8Gyx3F|ZU!?`1R0H5Ya^d@XL^Q9-I zV^=6Pb`&ksV!r5u=Lq_GJ}{KF3-D(^4>C0VlS+0oEwdB}z_`j6o17u^QyR`iNlx2& zxAyVwwlleEMh(kBJm8PVm?I-@>jf>PjgY%EUpx~aJgUY{5%MO0E~OR1qi?isM}cNP zX@VkUXV%aPH)D}sF!oQXS&M7#1BII+JmC&FU62StIbgRMX@>1kIPT-OxrW~R#_IJp zN9|po)_}05N|Nhld}w=y^eAKBL+R^l@J<0X{V_+*1=ftbqPjk`7X+0kW?#VKD%yVO zTlor%u6rr`BBUdufF@`OHGPv)MS2$}n3emZxY~dBfTIwW_(kdb7Y8h6DPNwFHj^Fn z4)=gQ;Txq^m~RSYacw&W@)|e{9UrIwu3;S#4*fSSuc{z?mqQQt?&nYz7$abTecT5N z#^^D6-pDqZu5kk<7K#_Q>r*M_)DO4hyF-9_mQwrt2ZzKG&0{2)O$ce;Qy;_;`@rHO zu)oeS&Y96c8$x19BlXyg;LvfK@Y>rsFeVM+2{Tmxnb>=(p}ARi^^E@_7{CrEzolC~ zc6I&+c)G}AtsX?OZXBjWrTA}k_=1S)C85Z{#s4IOoe}KfJQs;2w{>6U-_c0llZWRx z8gGEmH0&-rZeQl=7Pw9}lg2gDi@eJJP=%Hkm;;jrB`4mT$aTYcwL-y)L(WN}tX$z@ zO9_DO8?8wUK!223@r}*DD5`rr8cB=*D!XJyAX#2BZxsmc598Q*jEO|je#v93DJhQY z&)q|Y=@c(ue3&3WC3660!&f7`nCS)#6_@-@7Nk|uMp-;NCapny#ZZe!&^e)N$&$lL z(5=c?Lq5BL?cVGJ*WB8rS&m@2TRJO z+f(yzH!oYUR@Xzhy0E+3A%<*4`8#o&F@Jf0TgZ>6H!iU%O@7nzXqj04ZfMc5wMRPI{W>a|`&A_UtEW8T zLNFK@SwbjHLa0)#LNO#lQHz16>G$8qk78tEKl*_pgiXvRME3eAnhN;OWaW1oO!h;^ zi}U@9`vdyc_7GRjl!lok$G(1h14H}diwjQ4uoj@QPaj8hxBTMXkl7+o(3=_(-&hjM?r2q~Zv82`|5Ev#q2cIfB(3QU{QzugTIv=n;|1F(YGs)4Onm1}nj(+} z>u!wPZAfqxy}dRREh)biI;H8OJkx&4woKYw5fo1FhMl!)P&!YXP~2_4B|* znMF%$E4jiFS8Qx+z13(sLFqaU>*&?!zF95gY%6u^D+5g7N#)a^%z3blCa7i?Au-;I zOvG2WCLy7ixo6Px(v>!L_#l=qf#8`@<%6)wka}A>G#kO$Ls}{7bKDIo`^JLafg|DUJuRD-oUxZ zD6E|sK1?&8OyhP%*kp>{zaddFo+#sAq;OZ<1HEkG7h>cs7YA>s&z$>UQ$;`~wCUla ziqn@Hgso6HOT#1~_{9$Kkyy5rO-H)>Pr%_7vNadLoCXzEdN;r( zQp61$3-=liXLDqG&|*93B@rg{BtCJ-nmgC6crLmnzIMULoA0uxhoE34Am3x)5YyLj z(?_0Z+Bc<-pfJ0lM|q&u6*x$uRQBAy=g72_hLPp|Tj;%9f_qsOJ0-K_Ym->xIL9g= zKa)>Mt0%2grp3ym zwk9L+A~^kJMs{oFv%XYq0a^2uC|#yf8V{{ia&D%YmHApTk!}V%UJXA-*mRmWP*z_7 zn~lOi1K0?dqPa+&6e|7jvtl&e>P`Vsj!5U7fB!`FEm6AA9H?%{E*AU4VKES2J`s+z zL`CbpKv>`%9kRmmAkHf=S2jR}58p$p?DQ%D!^5e!X1F0lZk`VEECBe!tnMzI`-+j zsdOgVQBzGx^;>v3f>dGN`e2)Zm%x7=6<}a7wjts6U?N7Ummn+(JeNAY%zQ`?iO0{g z+%*8dpUlTaSf~_F5#00YFmS?~?060$7E8Z~U;Q$)QX}QLsO_Xq!lDpW!QR!FD`ho4 zxS{koEwG)cj;Dta-RcCA%`VerLNuwfAw_rRWkz1aRTtop5kurMaX@fkzsVFfnI#^f zrPJc=_Lco~n74;RCljH8!M{I5xDPyceqV7nS>uNNo~s3-WUv9a5JaCWQD2GcN*jze z43`!ZhcW((1#qw+s2-@Wuo$5$he|j8*_)JeM_}&sm1dAF zahr%VX{66{$)CMxDm~THI)Ryq29G99tSD5CZZn(HH`IW?Ff)OL10OraoxdLg*wz|0fVmBw}#%6adxN{yj{ zbLwfQBOQE?n2^Xk(D?)iesE$w?Z?q5jI?T18IT`_JD~~YH^Y1e=IH!t)-{7S9wv_{ zaYB317*fEMle(F=0@jVHQ>X4r)YQo_E(ulRnr{)qLmu%SINQ&v*V2Xh<*d3MbSIZ z6XH?!YE^PbzSp%bpQ3te6SV03a*O5C0{1{d#K`=yDYPIA%VTgMen9SzKbeEPPDx@nykp(MZ zegde?V%F z>#$hdw9Z?9yiJAzsy=$J3B$c>00$eQkQsgT`CRd)BY3(Ng@w5MD)d=bh30p>I{hs@H-C^KJNJ0a$3VzV9-T`(>}Vq2t=9;mkz_=z%Qu1Jz{PPJsD+X7(T zy4^DHzBLySCq@m95k^!9V4fLuy@Y`59LCJwO`8+L)|%)`d-go~6B0DOLO#8`6PZel zL`6%gjOwK!$viNgkgk7=-IzJX+Og)SuK9pwa+uq8b~%#U@nJKL(eVMo0R5yEsK3=N z>(e`6CBVkgKg(!gfBQ|CG)bDLIg$Q?SEx89Mg&UR~Qcm+&YkKeNYP&s=ktDcLj*OHnfB_^2kpgCiy ze}8Hk94%BB_RzJ7-t?~=h4QN%-6(37bxQl6xLxEnrmN>6am*PaAgnOHSgS%5HbG6P z%qhGWBU5AfJ!kZHk7M#z3>mx1+juhkN^pX>GGO!pATSf5)b-jH!W~eJX|)g-Y-%$n zF)?FByabgh<%tMePEr2B(88Gp3JerGGK_N7d0(p=17#{?Df&2*!ivWVtg{HjSjotO z55=PjfkM{(?zV>^-H66`36Vv`NIlaTn3DSMS>!0r9>JlN(Cu1pKn28)6T`MDo4|^O(nYy@ZBO;HQpc8lY+SSNR$!`mEq8 zp!i366jbb3_R4NFqe!7NJ@i7EK-&xr$z%?=i114+&R8YRNVH!mP}GHHau7tS zY&&qh`CFsUNDY)UpBCEm7PMq|Q!Gjd;_9h-Vh)g;&E&%2IcRlS0OjJ+YMQdPyl!A8 zQ4~*@Q*R2xq!MBA;&zomq4CojfH0%9ET~4o&&{Qem9ZGtuu*6e5oAlV>E{RFm0Kf5 zy~z}|$mN--C|ELE4O_PZfbjh(N93hy)k|hBl}z60&1MTo?mJkWk&i{@DE##!_q%p> zSPLd7SZ2y+x5Yd$k?#F0GmXRiD2#jo$~L5Ho*n_PZshwkjAhOXedy;dm>SyjHQCx>wH}$9e-jG5lbRXUO~Q>(&L(04RWkJ zS@N=)ZtQNFB8Uo+``wP&gd&|A0G6%~)Wc%4xfn+R=EA9~u27Zr#}g#TtPfF@pX99U zB*a*0_$Bk7mAp^f2vGa9#O2!$X~KfkY}>C^;O+|Vo3LzN->;EzFxm_0Ak$FL)?LWQ zjGzKF6=7E-nQ@be^B-Qgy$BjXwjgjC4{(2Y%Q(|0?|r9I{}QPhiv*fF zHLp|SlF(v}42q&fevIOcz4F?oG>M450{;K**)a@T*3eHVJVRn_y{kPD5%h$&xEUUX zNP5P0m2QM4*|nBq_6la4A7q8$GmafWBzil~O;F9nkGwp10iqxSUM%)aTINt;u`*IS z#jKF?K*~d+H+gz)CEw5rg=(w8WSi(t!dY5`F3d}z6zve3)UyEYaK@YB)1h3HWf7@8 zuW1qINJAVy`Fer{rsbj1V8J|YN`HalSt=`b&|q;b$B*(p-(~_>oN;Hjmr?1Qc}Up( zlsXS#J=h=ACllu%4rTG7g9|8$YCsc7jjm_!xBJ{o_{+x%SI4u{jl!7Ai~w9#T! z;ep0PU}s*lgtW?u*aPN;m1U!J6TzFU6R1UQUB0OH79h4f_cCRU*r)D?oznQ(x`G8E$^?Lt2s~===+qV#m7IlpHrI4Io%__P1@2? zT2c%tO@BTY)eq+|imDJP!s{yfGkvMS{AR1+saI=1QEHn3lHZjr1oTQUW^wsnN~wx0 zDfe7!gb^$G^A7;CfFvj4Nnu6u1T5-)-H~|lv0JM*1e(y`-cTDcgrIpn!2CMff33_P=@ZDtJzhvf-2fe zKrHDChVNV4PXfgEdfuh`bYOV_OjcBj7e*L$9|eK}2AU?MWjTP-K%Tm!553SKli8e< zU;a5ZEi3u`tfmhhF{4_QQSY+jzaDyMxduaVHuDr9hw#7o=;1edUtbwy?VIY;rHreA zEuAms)iWdoJpnsJQZt~4+q)<41S`ZOcj|xDeACs?G#;ghLp&FnQudd* zSj_Hug~D+vb*xDKU>&qam70qw#FvG}0$MUCv%Cw5l#ZOgxF9((4wY8}pz>=63}S?_ z`8%T5U(yFBb*{d2=haj%*v+ZrlM*ny3Peqkl^lTJVjXQ#J#)Y8(_BAS6)t_J)8Uud zOdu*>K4;NT&x;Ei{nJXxQORONRimPvq zpm6$X3H0VyUtfUgbkTIt5Rq*0BD1?wl@6MZ-2^XmdWvRQN@38%`H1`@N;+_k%#LR` z2$uF)8l>}isaOY!g={54>sLU?lsSJ3P-)4^*`p%Zg->2Gzp0r99TD|?LhAaXr>)>W z=vmx|ct!<(D9>Ws2j_;pg!G-w`E?}vT(Ot7{_vCpp6^p3=)+oZe8gADp+@@iwI6+u zRUQZ+sOq+fEPm%t3(i$zxR39h3=&l!O?~e5P0{;3Dqtkp>D32I9hq&nR4>78LmsX3 z%52ld8#62Sc!~STrrwC5y5Z|oL%I;V++4yd^+{B_ouZm(u7^%ZhHfQ7f=d2`XToaH z`d4OG8&tF^5d^Qudc%96qx;ruUO4hF@_!irRI-nvoGkBl~OuPf6EDM>5uuZzMtg`sP279~$R0T9KW7bb6V`1{e-6_lb-C#{Pf24x8~38g zD=@Tow}6k)G26z76)%fWWbUH*qDgPARQszHDPrf)g2=%0gh8*_{gDhoU?LT%DY81= zl1CfrG`GK)q9MxTcNtACUzD`p=sEh+opB$26*O!;wTSPwWBpZcuX`(SNdRl$S6GY> zJ*F%SWnZPBw^o7q4oQm}@q$B<=q;)TIgeFY0g!y|@t}-3{fw!2*VhczXLx(HDXiFT zYEgAaF@;jxhy+>Lr{n$_&y~LTq@JnW23swLu#ZN9I=Rn&VkrKGS;nd2+D`RACt)0b&B zxD*?%P>~tab|2In9jOj*EY=x>V5kv0dB)Q5gv!Xu$PqFQJ=vChR+NHC)D<7(2>63z zLf-CNcEBAl3@655eS^!eAZKYkCLWDN2B{6MJ_lCR%#(#}trK)Ilw z*myIDd0g2V;1MBYaiMwxk(>Mf|(3Wh7frflp4AQ z5ll7`XbG|@o){Ve&G=d;-WIz*t06czjBtNR*>Mu@d1As)CLI$VUOTU_iIt{Tv-U2Z z;(_sO$_5ojUnU!$TZ6i)38QXJFtNK@qN4J<;2dmQ6!P&TwbO>A6t3ccN{)Gk z3yDB|kIAS&69D9gBcn?Hku zOnF(DzW^8>4PZB$in%vYa&{UlqP5+Q6$LOnLqJET7Oxx35^jgt!`Do-K&O9QsOs1u5l%&NMLbIwm(!6P39<1v27C~&8anuvD z3VUe@40lR7F3Uq&K!lv_#8j4DANP@7ET3}HZ#!d$j}vejODZ4aNV8ba|E<}Yh4w2p z&#i}1r4?A;)dz6*w0%cuw-W31APFQ?+N^5lEJ%dGw_^O+FUvwek3MNw^X+4FH8}a? z*8tZo>5UOLH;aE0{+<_XKRc7tY?_ogKAlWydK|}NY3K+XgFz87>k$Z4y3~K?Pn!IV z0(9}W-A;N)Cq9(1`I~AQV%o- zK1XHT0r`)_{!|rS9bW)iYu5$x0NKPRjVzBo6-b&31Rex@z&R_P*qD9LoyOq-?vh|r z3n;{?;%@#tS`oz)ZrdU2Xla3}nftuVl|3;Ta)r!qRRzy5&W0X+T|`DzP)Rf7$A#}0 zVs)dgu97oJQW0ypr~xOJV?%*G%A{Zv8A@?qSl22}KzqMG`tu~i>UK`Ogh6O;j-fTd z@CxZb2XzAaaE}8>c8Ey=AtN81JBTKxmVnt)C7ihli_(c5Cv=jo;J_>Tt>j)#&I%}P z8)@3^a&8u0p_NkAt%g(2vTtQZn6Oaw+UY*P`zSOOz*!S;oB;|R5V_lcLU@r0tuXg4 zuCLp?L*sY|!YOU?3d&h7<{+}Kt~-}j^6?L1MlYaXjkT*|q}3T9bJV2C8P|>yLx0Ea z8gh|06uLB3DX|3bI} z^fkwdRlOM-`T5&eejbwlC(105{@IfK=0P8#J=C;unQq9|K06EN-^8shzv@A&nz(e1 zeUU>sQ2~Ama*F0QmFATr0&8&E2(0uC#JJh=6|%%{hIvbF>1Fv*hH37Nunr{lS-Eu} zE3jZt#=U%GW#GhR;tqxpn;*R@3C-Wyd01|!?ANLGJBP)pqh~1p(ZcaIEG9v~;auz+K zOhCUuR>v@{*fr&bxa(@njOGc@-adJN#%$7+ldvBo@+0GUX4N!MOU3sa+yKp;icBQc zz~s~uO;@HZ$%kIk{e<7Wgy<-VLxa_p^|D|`4<6h#kI1y}5{X4|Pr$|60gktrQowrZ z{R@ZeYS^|@i11X|-DK`5+ac)kio&kADpa4p_u|4?jc`B&*Tq*)b0)uV;YVrg)E7;h zvPAm5xqZ2PX$&Sa(==J2%Cfr(fF7MI`9M$y_K}xqRgio6>=J&xAxQK>?favX;oErt@sMWq%oj~f|2X9LX zmf!-mw^3)xw-r=!wReSYu2fvR zXfED-XR$OFFqNg5v(#-{DITp6IPkXZ6rBBgBqp~!Yd^!l}>3bhYvs!@T zc6o5I^rS?>kyb4E*7#=%|D$}pirK10?ja5B9!H1|<`s=fo>4>1jF(+$;$j|$joeqPzfF}_A^0}FTxJ(ybvlo9jd^8)&^zKyZMVb7ZFA^sV`UK32TzIJRZ=P@6 z6mbKG%EhMiJw!_)rvF{@Xu7nw7cwiEQhmZ97})p7n*L|@eAWOB3gZdQj0LKKCyTpv z-xchOO5tZpKj*bBI5ctl5it8-;{Z9!|6=dGqoT^TchMG6K?Ed=B*`F2iIN2^$vNko zqvVWA5)hCaiX>752~}hyN|spU97J*s1qD3BtY`X$i*;LSdvg7`3qujWI}# z=1N!Ahd#Fjpm)eJfj&8IEdwd02QmYV1MW#1v)7$-Ex=Z^^R3XW!f;nV-~r8^IIGf1 z<6yOhG4~u?(iO(ZZyGRIV+-XE86DT2Fkm1qcZzntU&(Z~J* z9`H5Ll`{nBd*l0R=&rx2tQY}~=hy~98$&){6+5HSlNMq9;_N7`Gl@jJT#NP+~E0%1?SE z!1GkI?6=Y|+2b6asHh*-%fX%bS#w|Dzs$m}g6?(c&W!UkhD*Q0)+Wq~52XGwC*dD9 z)1I?jk-LY|mCvH_kG7Y5b!a6yuKUF*DKX|-Fm1OJ-i4!$5?ILSaBn@c&_8|-=v8RR z>1Kc$0VzhX`yta>mcpiN06OJ9a|+9o6fdTHb2BLQ)oMHDw|6(w&Y$o^+-}+@2PJv@ z5?**d0Z^A9XZj@WhQuX%$@j#EGhVJ4+WwGMyooJ-awLh&i9DPOMsrHzmNh{s51#xLE zOMsGNu|znkFMx^8iRX?gf^x#d>;|d^Cs1KeU_P91mO=C%vjnbR)r}K~O48s`DqV_0 zr~sWu+OG-O|FW(%X5}bj(1u1rgu+b+S*vkP%)IMyp#5&95$TX8JBf5 zE5iVOhb8xVQGuN-^i#n-S$M${RGp@1#~jd0PtFfa(@iY{A7KN)#eFYe=0H%6?={C^ z5umBa$r=F6q3{CweA#@X67@1d&mL&bx}sfi*{e81wZga8Q5!=@G2TzDW|YP=K(Sfq z;s6$Oo!t{3Xb@;n1HEc5XFm~(0Cfkc+H2HsoRef%tE&WMIL+ zm^m3)&-sRw-H2m=(WDOm&R^Vc_OJ?TV;eAuz72p~lnb@XcYqFLg><9?Y3^^Bs+b94 z=0M=0kVBr!&*(&^ZT!|5$a=zn%Acqm&vl_KRoeLbM_L(7hJ_G-yg;iD&^a!vdpY^m ziz}&R(ec1+a4?7Ts0+aU2&ESF$rV6Q15p(d4CtH>i=~%J@@CN}2?$z+%seLBUgk=` za|J|0WZSEC>c1>C0O=GFt=zcBcvTGWmdMwi|1N{F17OCyJZz7jvFa`;Q2`?3Er31! zUdje+0_a70seSOd79b79e_Ztrau~@|z!8{pQ=0?;k=AtAfsHZlWG6uO=mEeeGNL`PN%t{(}&UZjdEQ^Fb$t34j*k0h~H~fU%&rJ^?i%;ZVi|lA4y1gJ^`tF1CtixeTRWj#~#i6ByZMk^gWO(oA7cz&4;v?R>|qC2X=JHQZQiNY9I^!4dA^B#Wqw|GVj zfKvPIBkn3jjGw3l|Hr4TBG7bWAWUUpK))xTCp39;`@Y!ue%n5fE!@E!z9z+)00FW@ zP>eSckUUJb>Ums(bFG5=(jWmk76Bp!PioG)_?+VlOxk}xVa`Au z_)foZ$+)7(LHjT zTg3e|PPItFM)dJ0UGs%nf#i)s3=oid0?aoaEYgB9Fe=v_BH-dQ*Y#zFZEqinyT{yoU^aq2E zCj!`$e|XB@Ft0Ukc!^m5F!_7`X7Uag5e0|96m7;KHJ_7RDosQ0l`nYGg@FuyDlZeS z4`KKuA&&MJ+hEB*PszVN`}DQOkZ_C2Usvk$Y|HCAHI{@V9q`Ej9WWx^7`XWUZYF=T znBOpt0jVCyeESB#R{7Tf{2lV|Uu*h(r39C||NdbBPQdSNDx_cj+Uw^_0C#HG zZ#VLHOP9O<-zAKn{C5|wZ@vHS!hd(+|D<~XR*$7AhFT{QXZS0!{*N2(VdyuYiE{)J zL((FfEaty0^2?>Kt6pE~S*3qC*Dn&VpA6c6qmcgFoxAP>{MYK919g#H4RxO(#J?W% z<~ixL!&LvnVaB(`xcxmI`nRV5m-ME>8!WbCCdI$hwX&Fxb04x6Q68^gj|FwmG z9ghE=i2vRTe@4Ln*V@AKQsa(W52&h$f4b=%)E#Xd`9h119XH%JPP|wA{2Tg;D1au0 zI%|I?ivLN>{}zv+xZM;tFvIZfYl!=Shd7m?n4R8uU2Q{6tOn=&DipDaLRKe%wtUh5 zBmXq203*Z^`r=)p7N<3!q}3p5FyE`LYh;&N>xXjpF&dsJ@{ciw0F|M?Ddqmn=l%ZA zx6#-6(&zAD>c6>f>TCCH{;z!Z-xB-(Z_f95sPT#(HZGa5y6&oj+RZWh$lmK3hp(^h z-gS*5OKV`FtGA)&~wvT;WTxwqV6v8A3q=Y`P4{LF|?%~9PEy1}JM z%y4xqs#o2|pt7kZ>i?hc!A-YCR`<8K0I1saEAQp=IW;=LZQ15E`?qXS1$k7MY-7qf z_x{@UU$EGpTik~QWTKXs8qyuSeI_%@%8hHYr7%OMjT|i6z@58`1&l`bY ztx(`K>Mt{kx|X{|eC7Hb9pI9{WK(>{_`iA4Q7{m!`4Huc|4Qot7LiaCAb)&XaNMY9 zQWu>y-f-O1s5)0G51AM{QYmb5dhtJ5Rkj?KW_kOgvC@&k{>Nakm9YewiSPxol0Aoi z0!jbzDV}G~1HtYn_}Rs;uCceBj8D{nUXSne0VN{@%`ryk!Y>{MbsW$vL(8gUPPGmH zA=EtoKY@{75$i7iWAj?Mx-op@Z#uo#OoueM{~w|0 zELLFt@Kn5s+|k>`WnCC>!>=FH z?YpxEhsQT#AqM#DQhWz=w#GOv%ISDD4 z?ff(hCs^Xb=UHf|Hl4Q3`;k70zh@%Za)hVOn_7!CCrw1JqOwRk<QEDFmVZN_!+fA&NL=Iu|RK0ICgYy->+PEKV`uSR6!Z4tJ}hJ7lo`$YFH zu`1gCDJGGh!|B{vYpfoAah=j#O)29FHN(4E@>fQxUO{_(WYN=k{GC#UqGBRXk@2~g zk_b4#r5g(!Z1cO?7Xmh(~HoYCwj)yaTk76WTO!U@@BoEwa$5Y&sdEy2Yk-aWUt7X%OG4btT8iCq7Er zd*<8!kP~(GbFm(GdH*GB_$T2H+<%;@K^ZI&J`dYxhtmg(8LQp5z3ZzsA`l@Xo)3vF zQ)NE!7-jK_p07pObniM?qle9qI)@%rrCV1|H^E~rjQZTp#*j$`}cLjg@WuAi; z*E=Yt@kh!>GBStB7KlDcaNgHglI+gBN-nG9M>{@yL;zto9;iF%J3E6;7gLO(}>rLHELD4v@`-PB`y512 z>~`tePW_iX?Mh}p22R*hTZ6l*b&7ghl*JQ~z36J)@s-4=nJ?Ayb&BU=^9x@7CD3}X zvBlhyRG19E9SHwY;KDX_aK8bItW}Sn{!>gya=?#Z3*r90+M_0qW#9k6 zmw|Fh4rW4+B|^;zZ)yHYP4_2oe%)ze4krX#m5p6GF0p#bLT5zT`9xKVA_ z5B@gmp4i8$J5eGR5J7yan!!Gv)-MheEj=HymB9b7nCHIO=Vu4_t*-@|64k|zts+Bd zu8iN#=FL}yWBbfkNL9c_9{RTd*F_%(h12QJBGnYL>S%9LJE$?plo0aNb`N3hC$cdy zA#!B+3{hRv*1)f`u| zamt_O2f~OlBv*YrSHlHy!C&C|L-2DDU~o^S>xrk`=BmffHuim9doO7A)-{yB)*DN@ z_MKg53Mlm0-4hji`7Y%B{N*gu+AIMnhWFZC+K)yRvrBgjo^rNSp?xmelEm$I9%LZT zCKgw_#IORrO8Y*@3-c`^NKKpJ>kn~ChdpQTbBohr|5#1wwFXRN4!IQgKu3!N$j(Ip}4orU)@xw zQRzo>CT*YWytdTDwgZU%eF-JsvWPMcs-8^J{ajd z9>^EJcg6j-!*0)?8pR)&gg0-$$l#!bIzRQr?sa?`*h550vJrBiu(Rb_8(dYtC3MlI zyRf?h;z<^Qlg;t5ikwWV*EfmdU$FNhYB*4F1HGOc7L$r=jd7#sM93XIo5F2tIj$7= z%dV57BDJ=#tV^zYmC_9_q7ZQTedLS!&S^7FZNYDTT!B8@7t5FBQP?|*ye$Td!r$5- zKl4e#SJU{z)UYecs%Xo0#N-zc!F)$NX4o_nYseYu(3 zcT`f_%Q`u8h-Zny)e^Jfyl3=w^fEA+E;JM!^&x z;I7DwgPgNnp8+wsI9sWJ)?>}@P0+KaisegnJ-beUHR$}w0V|QP>63Sx4q5R`UmDk> z-sShQLlU-eBc+jLXEzd8{Zem%>EcT2{haF8aQ6+i%H>os+^5vKqJQyh-f~K1&tPdj z#(wqK0l}lGK+XlO0#b>YZN+zq8%BOqO@ZwN*~&#}J1)l(avE+=1cQnpmWvnXKegg@ zxO8R`*^mu$cfuISdPu|4&3I-@xx2KI`gzuh7Zb8Lr&rsYH0P50V%@$cF132O23V$A z?qr}wK&`86HKacPong98p}+YEK#|z;{@|PClYnE%l*^TgPiZ$6Y)@3^!i@|sUMZt> zkp2|gVrEw1%$D%!S=+f80j_5n+C@HV`)7=_#2diK@<6|iIf zPQ^&Zc-r!6t$NPP%GVj9SGJFgZsx4biaqU{a~xnn%Ld0xqZvi zQKks<)J!n6#!Ob+HPAX}xQGT}s=D$~f%Go^Ir{UHp3L})Z7hn)dGDP79q_y02fl_# zU1jwkb~PVJ?};AWQ5JAFtF{~Mr=D>{*ID{`eY^d&!hUIQfNZzgPJ4Wa_3f@V^0V_K zpMoTPl|ST+omhj;fsju>a9?%tDN!w@6TwFY23auO)Q4HC1Np_U;G0qg!eSz($B5!# zD?`b=3xyw_)J<&S;Y({Cic-_=ob{(`L2c8$RivcnZSBwR=pXnH#ujQ&kV;$b9Y2;@ zQJ6S5;nbd5cxhGV@#=DC?xpiVOnhj_*PXQfa!+|!H4r>1_-Qm(e^-W19b6m?ow(j& z1D$60AcAupu$w(}D~z7a`KcYoLL6x8ORs!JKMwZp64|i*^ilg|^b6IUQW$Y+`Q zQU-0Gz=07vxMH;yg-iIRvz${ToIfhv~#b3@GRr^@T}PV^_lF-Plto82GpN$M}0iN<#4MM47fyX zhjlM@zxFudj~u1!e;M81*5AU)981lKUt`Ttq^HL%Xiw~TMg;DE(r4K!%x7s>H^gq#^aEb42 zsR{W%I@ehc})l|rNjn8WWm9UeiJ_JV1*BBBEJin?&Nm#muE7g-nI`mIIRn~w8+O3-VMuHv+EG;m<%R{Lp?mRer;!rTQ!odbl3TkO(ylz zco!|0i!xNNeHbn;;tD+u3F0|dXxCWR}!dn@r=6wQbBF?84S#e%dB3%?K4y6NLA66 z-Kh;$0KfExmkABeRv&rdTt$TG`gnt)sQa%C2eG?qHyUUuZF zGM|STLx?hoJ+OZ@rnsqHBRxv?e;_^n?gA}6`J_$S11)8H@=ACrir@Icjuj~>2|d3{ z3?^NSx7r(;E8gp${W>H`jnC7iVSkse0W2tPzLpkb5%j$!$GJ$nX1_hH#U(+IiKLXj znspU(`aAXf@f)<7uk+<`b!0o3t*NOSla+b?T(jILP&i=f$s@iIITN3lT1UU(&Zl~~ z_rxodC(3BG!)^8-{#xdB@|=>2nNV%jg<~k=Awj@WFb=AMCn)Y!;ug znBPnezE>bu?9%p0u0}TK^E#V&Rm*_6@#ITVvfU}(9}jBO`nTW;%4Mr%pX0lU(Nk~E zFZrKrBN1|j3b%O!jc-=TUc^~ERG#Y@#@?9HF-;UteE^BsakcjpIW~E=8M73>&IV{@ zctJgXuA9%;B;i@$5Jk0n)F?2VB`UC-C1qfUHg|5k39z6e$L8V9_N%~FB`%@o5A+zT z)5E73raP8r*Y1uyT=Xvr&`ro*n*UC>+F)^WOLog`hgPoYkVyK-r9i1|Lu^;SxtWMS@bPT}dKRW6VRrY`K%zhpzoW}Mfou5%sc&ss?qRPHS*qwmexWid|8 zb=|{_oXT#T*c{Hq9=mm7OU*%9BX=1adGo`L&>mBM@||$E%^4XXXF0Z z{z45nXEPTLHz&fiw>Y^08dqkBL2zKmO?%X@kL@RfcO_)5!I|{s3<5TOa?IBQFgcd2 z8uELfxQr_9#o7BLm0)$1rccK^UWugkc0>dj=5%zR$**%_GytPG=iF4H-Ij^?tNlWU zUb@*1JZ9%y+Wr%k?1n$*rl-(GXieNYD{9uL>MCv#-7k4R>{;jV?za6oAGSO|ydXX( z?0824Nv$8+wb{Uuksa}_;lmFj+FBtH>j9nQnR7MU1?az)1bJB`!ylrbxCRs2a z*CySyA9DTFdcElZ@f^igkdJA?Gxo(+9AU0P=Vij4#5Y!;$R8;yr?rzVuZ)n-N4C@Q z;ku<7!K4exM=q5s{sC4!WZ^sfor=QQ14u(7{>;7FNz=044E|oH z*RHe1|3Dgk!?Yy60TK~h?|ChQ*pOI59(i|M?1dtB(^AwMTW&v2u6X}QXVo-%o}d0= zXe6}eL0|u48tAo@Mnkx>hc)RUrDg0>j|wbuojhI0sdo=}2RnrEftc%Wa z^>p=X6EE7bSyX#>_Ql*cVL ziRh*JnNB2T86rf%G_Udj6KU}>z{OHA*NEJu6Kexo;e@^Cm8GReSAgvLg*?ulBndBz%cM3e$?>nA7xB0D^oMZ} zh3XuAs+7rfTRv8p&@i0NoD6gh$1WwF>r^1%H{@1%XFv7{p^X`t6m+HwJ?g!x+At_!ki~3x$*^rg6lI>cOJXQy zSZ551HHw}Ih~57f?i*l2M>B8NRo ztf9~o?7VT&#pdqa<+B|IcD;!l^=^*}8SAhqb6x?6+%#(YY4=Qp$3tTNvUNTiHOyp< zXv##ak>Q1#Ab1Q)5*Ei~h(_T%7>v$WMiuU%i`}Eq8 zmih#dtm3Ziiq(RMurzgl{2OHbgb2HRNO3r5ewqXK-mb$C&fzu<|AQh48v)R?-~6zk zIS6;3Tes83d3cESr;84BhOk)QxgML4oho?yV5FI9HKXX*xU8D>ZhMS<`CE3o+kxeu zq5{R%vltnMzLhcYSpVQ<+X+iUzV&Ym*dARpY5J=7N^8g`Rf_0zz*r93oIu*FzJEB4 z6450PfAlIwm%>#5KiZDFW3!9*IPee|8Cs%gL+;P$NWbMSvVXo+awjxPS4h7`8>Y%C zJ8;N^`uL53G`#?q%-q_g8?nZj#is2uq>6gnsJ7s3$t7jL@smq=LF@taJ9W+U%UBWD zp>D%19og)DOL^Hx5CX$aI{Vxm89Bc4deC~TfhAoopZte}>Vs8R=_#wgZOhdXmm3;%zBcJwLya^l--y zyv*pky1k+cdT{9C-7C^Ol7c8SE$FYk%D1QSooecdK#e}06NtmxHe$z; zTcBw8khDc9FwOMApS)Vz=F)k3AgBi%U{#pGSxKyf+8y%e$#Cq$xm+%OYEp?^Q(d#p z-ldyBS`+K%V13>I*RM~>^>3pCPYR0WebEHtrIADUI(Lj z%df|MGPJomJ-wfK_`_GJ zr{=;$pO&Wz5L(u>+B#d4WNp4QYxy@0JbscNCQrXyR14KvgYSaZhb-qx5oX*zB zr-C}>C-9>mkmOBQEIN|5EWuW>sGCYkR%Vdwbxn|@p^AmFnS1wUuGT3e!X}O@>1Fj^ zFC4V#g07BOaAd(n5?j}5H)o^Bz$l{_yb%uF9&+J(cflpI#17v+udy(O=J;BIyp;o& z=FddVyXizfWIUV8bZC|yuJuY7g*TT6@c_WJMTpTQbNJE6)vd0^Y{;YrW{hs}`{w5t z_Wqr{i;TkKvO~=3j+S>de-eME=O`JsB_Fck6STfre`)UhE)m)%@M%;&@Cx1~(L{L0 z;nw(l+eIe$CJ=!QUz}&_NV^Qa!U=8Ur~H_pbOkJR>YE?8?8#^srna2sdrEX)ky_A3 zYAct30n7GNI$=W9I-^FIU7ixmZBV?d zaJ`@;knEW^aORl7rm%QbkVOM1ADx!d0n^PuPb??}ijmpMW>8cRyt50FP&#V0yj4N4A? z98UiFub@R7$)#(1*Zil4J5qI)J_Z**d>4QEGBXZ@hxExDf5UF;i5@7j=bshiXDcN| z=l5f6I`9M;J_*}Il2CrpanOwT8K-f{b(K!3SdqSw#>%XgM!!Oyl|R*zEz+Kr7?+5X zhQ+4yQs=Ku$ZJ>7cMk*FAS!DA+H%DH+~5A&>2DOzq7~pG(8&)Mlclh=s%D+LcsjbU zp9Fj!ya$~sOh3O@cqt|9`bpuj7bNgbBTXr6F)K?J*z4MD<3xDV!(LpR8e99S+CV`Hvlw*zIwNviPNML zdw1m&Ss%s)vzEQ-6VC^(Nk$R3;r;R_<~5OqxJr%Od|I$!!+`13==(1#C-&SBW;DS> zjy`NAE(>K-h2JZmD#27o^GO$GMkH&@^S6yuT6zLVkGB@D0)F;aeU+C&ZM;VW8ABAd z9Z0Qtjl96mS^jEAefaS?o%Y!)dlRKEmX~6Nolry%Ln6{#Id@-OY(VxeTdYyVh+6#O4kbT z%~f320tY|acvp~O%$`o^ZneF4Z_)h1OP;Y5%SXG2`UxWD_vvqaqaWN)s(|&CeI4QH zefr**k*UM(pSX}Ep>pLd+{?#3EqQDMWCODhR$U2*ztBNwO{)R`yRpWR78g6+>?C3@ zf0w`c-HtgL7Dq-!2X)kYmuNJzjn;EOHs*MXIZHNSz1%^6^YpnLXt>a1c(as3d-OJy zT~&2BDCk*{;KSev!KqANC)nCT6aNiK73ITXK!%H+D}WMNRmmKYcNmyNXNCt>KcJVp zlUvE=O~lLDY`{WfpyKy+e*CZOCW>&aj+k7MHP%*eZ%(M=wHr&~NUp&jgxH|M(W zjya2TNdXfI>hCvtM5{=dL0_~~%B~&jWmdoNK_-WagD>0anK>e)b+dOM{=Me8HbQl} zoj@~s+GP>8q81dxK6K}mG@$FuQSo(f;v_upB7)<0)6v{$TD%}5 zDW-gVOtGjKIX??Mv0t2+@r&Q=no;8NP1UwZq+NJ_D>9R&Dh8->e?^`|Z300b@!x{J zlWVrt*(~YroS9^8pgLEYYCzO4$X#m`<9vu0iK4aST$=KGJ-_vg`B7x#xZn@IlP?(MXq~o6$gS`b;eT0$h;$dtCK-3AIjpPf7&YrO zbv*K)wG??`#L58zqDOI)Aqgamg_au|gMqla0zcoMWWKtR;W1*cyG?KZA_!C@ST9(& z-aMc)AIvnhb1I06R!F*TR?%~Zto^8pp~}de7j?~-0OCAL9N(+ms$6KGc9v@{QV9b> zLwz85dW3+!Fyu{_y$J{1Vx!;lXV!Oy=LUCIc+4*Jh;3_j6_NmX{FgoWkvF@>tMSBJ zYyk$WUwVR)Tx1ALMv|DdRlaJ-wt%9tW`k0s03gT~ame=g{k~nLXh8l`A~77jOg<*T zRXL=-NU;S`Njdv38$he5!w!Y(1NE&>Vsv(xuC@hx8RI7!c5IWb<9(pv9e1P0q7Eq9 z9>$iF{Dsy$ig^wOe2}Sny}Hy!c~x;_?`BosjGfQKk$5BL#eDB-$;nPradmpK5*Ofx zkaZ(5NB$hVYxJaPY6G>*1NxX_y80$fIR9 zsLhA158R-5rPdQ==H$hrSo?}b*PFlXE#KESH*%|P9gs$K3aEkhvo6s7Afw^9nwNGum+l; zXsKytiR(R71;R8y=vwd@xIzxp)#N5$<_HI5P_?WEi#OVAWHW`G?dW>*%3|l5EnKFT zErtL^Yly-J_brtd+_ymRMwHFuMyr;(NNxxLC)T=!P5fSkx)wt3aZYq4Aa)n5O`*5^ zS%BkkNRy4rxCsnxeL?EKE=;O%f-t@%MoG0K3JSh4H1>=2H8R_ZGp6R?`b#ud2ht zl9Ohv3Gs8oI4gfn*Wg+M^q}wJhnacB`OLDmY}ix}0-bm0Qnz9%eia>V(#3}k0ignj zkhS`c);fGvVQQGkr?0wmI#lrxeSE<&Vj#Vsun`;r{Bl`#m1A$1w8K zxH~ib!7u!BE!`~1895NtIxX0W-Ty)S=*#QHra&bb58(_#X}C2w>r(Y}cDj$~EXH>Y z0Xrk-y3@bBVBc}pMqYpK3$_G1?CaM0CzJg*N$?0;&)yLu#o?xV2DMhFBcFOJ2@}@6 zowso-*s z2*k%b_E2@r=H`}G65o&{fgGPT)XHUQ2!M5ki_a8nleKw=satx0F?>sEhO)z*EMvPN zq}X&0L<(-OZW|8{^0mrGIY6v$nn9Cw>@rxDPA0(+Q0f%J(%HeOmuCGlL4A43H}isp z(nDdyxZaPdcakryI)6KT&r(I!-23L?fuMZ;m=Nlr+}ES+Fqw;DvC+#WQc(|7veBLJ z(o64Gaw?#74_3J&h`R8dG!}W&`%@iE)e(E-T+vyi)iri17;z<0Ei$aPWf;+(D+}|u zFW5;2ZC^CYO#GaA%z?mU`-1RAbf?>ik^AE{Rl+|Y7b~+k9#)^WU~r@XDw3={(1J%f z1qqPFYt zy$k|MM#+24FLAok)78MK@hZ{9V*C_qxmlbK#~<)3#l4f_d6mg~r&mUxiDVw#PF<xnd4z=OwtNteGhoN4u% z=OI}I9yIRX2`riJq@UGPryg3GT3n_Ren|<5T!~Ho- zkbBKDtUC9dH+=C&KiOTxq;bLR=&XkbnP;=NN5oT58i(v2Q4rh+DsWe)zIoD5kd0lOt+?T$n>Mv*5{PBfM=;+m8%v%YYqR%tm8&JW!u|lMnG^oRDzV;3>eXIy4I`K z7B0}3m}@nJ1az_;7S2bL2|q;};_DknGt_|$>oVM==Dd1MMT6D>_bA@ueI^pByJMUF?P{Vh^Dyoz#67&D`Am zinQ=$ygS4BvrFd4U;+>SBVO83Ii~>9(x-s~RB%3QQ*~wz?V-96$ISbsftB6j<=5S# z^zugxorlH`3Ch)84SP`fw5Qt2ST&&;Ut;bg>*j~?V=E=x(cx*E<)!$k3kLB!LC0xLuPiH&(N#~XzR<_ho0)*gb99_t zJbHL3w(sGXq-QF8x^rZdMzl%d8vNa|jOxLxRrG$fXss=2th|5BtW!7a$9VonrL2t`90Y3+Xk(6^?N~`ZRPm;l|jYLzU=1?_E?&x4;w;%E%-X57Q(p^j)_-U z@NjRMCk9Mc+GnZ42mp5uCsk-h$pkmv`-v(?aYUC#n%>X#-i1r}`=m&(^gbE0?Cd42 zwt{VO^jI49Walamh>JQoS_f?qHhw2oc*zH47xIWRK}B zTpe8Ssl^{+zab3YRXqPDviDl0vh*&f;Nf7SP13k}4H=|HI-W znf3kR@c1Lmcs|8S77|A)T5Xm3{M7v}=JItRozcwv6|RpTU*%aL8?om=YqpIN4yK=t>;X&TVrP9F3S%R z&l0hjtX>8ulF2U@vPe4CSwZ>DclW__x6xlX>w(5@3nePyzXqz*QNGm#!g)htgNT=P zQUB>v4!Gxx8cL=osD?sOn~v{D)=uzoXwKjC>ptYWtk|EfAL+%9offNgih-f)F5+Ot zF2t*;lrs5x)JI)i@*Kji)B{rbvz6Nu8wDPGd^T3T8EEn;h^sPH#ZxE1-m`AAMc+HU zx!w!v{9d$nz=|$S*P-=}QLrof%HYM7&E239l*_EWF;8^#w9lgR5YVn(l~iMZ^pv9@ z#xcDQc;(Z7;!tjq7=&~zb)8W$fy$(~)&hLmjkOR}jrPQTj*c$fq)rG2x(GCQ+K9{4 z3IohHUjucITUn#ZLQ9XyxZwC58! z*zH8j#8j49xOW&-m~}P2z)U}YQm2OL$i5@Xq;u}rr*UD)2vnRvpdnHU3^@{Gca$E^ zn+}Hrvo_bCWH`5(?J=vo)X*w!vO;^8gD5V7aK+ekL3#w5^VL>k#~foZ)AbkMX-#B= zjS{z~w8RunKV>GV1CKsXonu&E+g=fM;E^u~p5jbTOk9niqr;%ZELw5K@t^ z%*Cj(azAf*LJ4lRI!ERd708#%o!CY^iEWU%s6P(xUc4(eg6O=HmE1uTk@nfnTglF9 zGFwoMZ#rNm$(T{G&}~iSMe9$d0U5n(w2pL9OC6ST8F`waZiLMCuO9g7VCwZ*Cnd){ zGL>t0(&5f+Y+|lj-#E<>pbW~{Zc`nfh=hs)*Z5t{$}BjzVAmc2Z?rz~0^N?2!K#|g z{@SW3uK&osL3L}Zj(#8prLtU%pi?o|^x;d~vIW$XiZY%e$$o`XZU#Vq#P+Ov@MJFi z^q|G4!7a7}sh0!US{$CmmQ?Kau~G?{`I_+vg^Kl1b`h(Tw$|ZsQ(09gzW#j>+uwkTxwmgr?HU{kb$$EzBSoc-t!ZHeE2qRN zizGy%M^9p}a+rKn+`^(^Tv>(BmoNs{K4lWU~d$0U(n!(BPu?eSt? z=8};p9EP~q`!W+NIs9KC^`hf!?_!M9dMn$K=`JJEaubZ}#BNV!Z5u`!yA*)9W9LkA zJ~DE@r0wop4Xndwv6WhwujsF|W1r8mHS)x4wng_mo*)Qd_HSD7+lzeS)k1A?W3u~d zVScV*UNiB=Z}Z|f{YBOw+SojuP!ey!!>#t7Ann7VC-IOCIA>O?3mPl8Z+8SFOE@J60vfr6&fyS!+K?N}+4^ z0jB7DaA*5yw#R~}W9gg^b9GF!+RWFMRuGEka%f~npi{s3tOB8YMJe8VMw;ig(979q zjo3@-Z)Rq`G+1e^X-sy(W%D(0XSsoaiFNEDdHQ?RITReN-5wC{5f{&GZ)wU%7zMAO(z4`6WXZ`k; z@0Qr<7#bYt(3Cg~{NXJt2L9!bq}6(`Q|raSD%fH~;Ks#e>4}q6ATuxMV0J}p|3WWK zm|xsIqe*9@`owU477=p7D^`Cj7GvbM8z0H`M-Yc0hjZ< zK=J59iOx>K7aRg>iY4Hj)ZVj(GfwOMc?X?8*H;bIjA_w)LqC>Oj6$(5gLU3duJi?VW+hj$* zpq}|(EBVn%{Oo$%gW=`;f_oq#i_?$y-XFs2?8GXycJti$#l8+JS zwH&g_b|O;kAoNA_yOdK?&eD((>5`Sk`9xO#^M&4@XiKm}B6nQ1_(JL_w78?`Hmw6(MOq20UE%#1~&U ze~l=4%zWjS=Y0UV;+rsXmvJ58V@^|Y3~*eHa$*Uvv!uaa?3==XXPh}9X2LR^6va@I zh;K%Q_KNy`Du~_wHP0E`WC5Ca-r`!UQB2pH?(A@{cbeWmrL{s)!GrCyyLRom_*6}-jlXi9aWfB(9_zwI}_ z8@OzU9d*J#|0+*32;=TA;?Zl9WsbuA^QZna@jq7N6@_K3?H%Q(OdbEPCGHE|e_*d2 z{_4Yn5#Q*uLG51Lih_REp0gBp4(5*G#D&jM+f8^odzL5AaiLy;;v?Z1#(%T5Klc9Z z1-7;JR8~p+FJtt$B}Ac_bHmJ9zU%*~+{%NRJk$6t>mRl&wYX}zcQuH{pdp+BX|N)6 zR0z8U;R6Mcj2wwT&4tL(#qLBz4oeAyD@?iK5dx@@atO?F1Osvu5=uZ|GzSpCA+fR; z)O{o7TjRjan7_XHzM1EH-rw^(p3Ee4`N&H(1PC*yRg8D;Za@ceb~LWAr(FLpWpstZ zjln6&``mxfBO0FBjsiUV4wK;PiXvZyc_^S~RH15P5DVlWEQR?J5#RTVB-lK#JGyt* z`u(N=B*w1Zc7*#S85J{Ggs5R3r!#g|q_3kykh>PNrpfxZEy`}PCEIPG=qCB zZf-UYWPHtAJGddhfIQ#n$P=}IhshH<-9mzp7M@JWYzK;}-g-Yi{-%p3Hy%gJX_ZPj zOjc2b>Bm*prz*BTP?{c30RP~Ay_31WQ!yZ``h|GJw5n} zX73YD={P?5i;0Hf>$-To>j!T<=j`~Mg@mrAIRqzX<+bKgEN)Ljpr=dqry+1C3QXuv zJtOqXUg!PZ*lJ%gNg8Sdjus|6VCBrS+Yz7-Zi7kx(TECyy^lqJ7Yh4R;SA7VyNSkh z*sBxUjScKq^2pA}ok6Kn>GEfTshna3LDT!Ho@|UFR>@30Dz^qhyAt7EG(hI|IrDsna!=^tGE@`!^JplTFdPhH zonlm>+d*!y6S8I_>k9@nkTqIKlpyzp$_r!fd*B$JYIVITEV195YUpf+^%X#!A95o+ zySs@GNnGS!0K?~o-yv04fyb?OIbf*5G@aNYKo;Avmk$$s`7|3kf-3@rA)%g@U zSP4gHp3zShf$HuofWsHHk2O#b9t#zoRxHc2##HWT68MK4l`cPcjSK~sbMx1SA|K4{ z!>8dHYb+CCtOB}4AXIZ$8j6*&>QWO^5-VA1NgwU+S?-=MSfP?h$}L?^ye*%N?KPy> zdupYn=n}SoM}-sXc-kbB6>WVDvqRWosd-qhaft78msg?|r}&JsI<+PEU#2_L6n6@H zBMn!11BIlm0A(n~Z`tTy%eJ+@1-whD^R5TLyB@F2toFSqWGgW^#l+pn{*X^|ndzBZ zTzr||wV@j(!~;8%FcOMxC%BV8wfN#WOCTsp?_y7(BIJi`qq2a9V{~H9lfiaxZ~+!} zC*Hi5xDb||b6P*ZVlEwEf$jboYAsXD-WEUbXig#!Q&#G~BEH#TVqvZv*-{!jKKf~) z7z?Z?(?r9+c$!ALFTP%abzy`lvVyrKz|3P2wXH7~jK!^&MN4CYjY5>d8(0^W@ZlFU zz25-7^a5kEbEO&SZ?zvvD+OJP(lJ)Re0d992N13UNF`MwDB6A<)fV3=y6B4!{z*br zk)nPF+;}nj8yql(BTNA{G{)uMgSkOTs~F~%;VjakMKk7vuz9&A#BR!8L!--CN#nBJ zZD^Os_~Uq)g7{`WF?pep$r5;%!iK9S!rD8CpzBOlF8s7EV44NG?g3p}YD{%2*i_5J zof>ZE$+#x9axLF1Yt0iFHE|^04p9JR1)h?@MO{AdB7se*m)`m8Z3f5ca$Om~;%PIv ziNT(cp}oiIS4CyW9APqW3evM&sWdSZV~h%Qcxq4zZn!R97w$V5I6cINH(vmfj?82i z5cj7;xZ18Tx7C?_)gFSx6t|o!s}rriu^%GcemtEmP$H=k2uRy&$ZY-E_TPex|1egm zn;7PSWVN@ab40+I-0yM$tl&|~NjtuJHC_3PU4m^ZLD!%MR3T(& pHrHe$6Z$hCT0@-%e(#a#w^v>iecn^;pZW*zIp&Q&%0Ci*<-dU7g<}8! literal 0 HcmV?d00001 diff --git a/docs/source/docker/index.rst b/docs/source/docker/index.rst new file mode 100644 index 000000000..2c92a4cbc --- /dev/null +++ b/docs/source/docker/index.rst @@ -0,0 +1,17 @@ +.. _icefall_docker: + +Docker +====== + +This section describes how to use pre-built docker images to run `icefall`_. + +.. hint:: + + If you only have CPUs available, you can still use the pre-built docker + images. + +.. toctree:: + :maxdepth: 2 + + ./intro.rst + diff --git a/docs/source/docker/intro.rst b/docs/source/docker/intro.rst new file mode 100644 index 000000000..b09247d85 --- /dev/null +++ b/docs/source/docker/intro.rst @@ -0,0 +1,171 @@ +Introduction +============= + +We have pre-built docker images hosted at the following address: + + ``_ + +.. figure:: img/docker-hub.png + :width: 600 + :align: center + +You can find the ``Dockerfile`` at ``_. + +We describe the following items in this section: + + - How to view available tags + - How to download pre-built docker images + - How to run the `yesno`_ recipe within a docker container on ``CPU`` + +View available tags +=================== + +You can use the following command to view available tags: + +.. code-block:: bash + + curl -s 'https://registry.hub.docker.com/v2/repositories/k2fsa/icefall/tags/'|jq '."results"[]["name"]' + +which will give you something like below: + +.. code-block:: bash + + "torch2.0.0-cuda11.7" + "torch1.12.1-cuda11.3" + "torch1.9.0-cuda10.2" + "torch1.13.0-cuda11.6" + +.. hint:: + + Available tags will be updated when there are new releases of `torch`_. + +Please select an appropriate combination of `torch`_ and CUDA. + +Download a docker image +======================= + +Suppose that you select the tag ``torch1.13.0-cuda11.6``, you can use +the following command to download it: + +.. code-block:: bash + + sudo docker image pull k2fsa/icefall:torch1.13.0-cuda11.6 + +Run a docker image with GPU +=========================== + +.. code-block:: bash + + sudo docker run --gpus all --rm -it k2fsa/icefall:torch1.13.0-cuda11.6 /bin/bash + +Run a docker image with CPU +=========================== + +.. code-block:: bash + + sudo docker run --rm -it k2fsa/icefall:torch1.13.0-cuda11.6 /bin/bash + +Run yesno within a docker container +=================================== + +After starting the container, the following interface is presented: + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall# + +It shows the current user is ``root`` and the current working directory +is ``/workspace/icefall``. + +Update the code +--------------- + +Please first run: + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall# git pull + +so that your local copy contains the latest code. + +Data preparation +---------------- + +Now we can use + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall# cd egs/yesno/ASR/ + +to switch to the ``yesno`` recipe and run + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall/egs/yesno/ASR# ./prepare.sh + +.. hint:: + + If you are running without GPU, it may report the following error: + + .. code-block:: bash + + File "/opt/conda/lib/python3.9/site-packages/k2/__init__.py", line 23, in + from _k2 import DeterminizeWeightPushingType + ImportError: libcuda.so.1: cannot open shared object file: No such file or directory + + We can use the following command to fix it: + + .. code-block:: bash + + root@60c947eac59c:/workspace/icefall/egs/yesno/ASR# ln -s /opt/conda/lib/stubs/libcuda.so /opt/conda/lib/stubs/libcuda.so.1 + +The logs of running ``./prepare.sh`` are listed below: + +.. literalinclude:: ./log/log-preparation.txt + +Training +-------- + +After preparing the data, we can start training with the following command + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall/egs/yesno/ASR# ./tdnn/train.py + +All of the training logs are given below: + +.. hint:: + + It is running on CPU and it takes only 16 seconds for this run. + +.. literalinclude:: ./log/log-train-2023-08-01-01-55-27 + + +Decoding +-------- + +After training, we can decode the trained model with + +.. code-block:: bash + + root@60c947eac59c:/workspace/icefall/egs/yesno/ASR# ./tdnn/decode.py + +The decoding logs are given below: + +.. code-block:: bash + + 2023-08-01 02:06:22,400 INFO [decode.py:263] Decoding started + 2023-08-01 02:06:22,400 INFO [decode.py:264] {'exp_dir': PosixPath('tdnn/exp'), 'lang_dir': PosixPath('data/lang_phone'), 'lm_dir': PosixPath('data/lm'), 'feature_dim': 23, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 14, 'avg': 2, 'export': False, 'feature_dir': PosixPath('data/fbank'), 'max_duration': 30.0, 'bucketing_sampler': False, 'num_buckets': 10, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': False, 'return_cuts': True, 'num_workers': 2, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '4c05309499a08454997adf500b56dcc629e35ae5', 'k2-git-date': 'Tue Jul 25 16:23:36 2023', 'lhotse-version': '1.16.0.dev+git.7640d663.clean', 'torch-version': '1.13.0', 'torch-cuda-available': False, 'torch-cuda-version': '11.6', 'python-version': '3.9', 'icefall-git-branch': 'master', 'icefall-git-sha1': '375520d-clean', 'icefall-git-date': 'Fri Jul 28 07:43:08 2023', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.9/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.9/site-packages/lhotse/__init__.py', 'hostname': '60c947eac59c', 'IP address': '172.17.0.2'}} + 2023-08-01 02:06:22,401 INFO [lexicon.py:168] Loading pre-compiled data/lang_phone/Linv.pt + 2023-08-01 02:06:22,403 INFO [decode.py:273] device: cpu + 2023-08-01 02:06:22,406 INFO [decode.py:291] averaging ['tdnn/exp/epoch-13.pt', 'tdnn/exp/epoch-14.pt'] + 2023-08-01 02:06:22,424 INFO [asr_datamodule.py:218] About to get test cuts + 2023-08-01 02:06:22,425 INFO [asr_datamodule.py:252] About to get test cuts + 2023-08-01 02:06:22,504 INFO [decode.py:204] batch 0/?, cuts processed until now is 4 + [W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware. + 2023-08-01 02:06:22,687 INFO [decode.py:241] The transcripts are stored in tdnn/exp/recogs-test_set.txt + 2023-08-01 02:06:22,688 INFO [utils.py:564] [test_set] %WER 0.42% [1 / 240, 0 ins, 1 del, 0 sub ] + 2023-08-01 02:06:22,690 INFO [decode.py:249] Wrote detailed error stats to tdnn/exp/errs-test_set.txt + 2023-08-01 02:06:22,690 INFO [decode.py:316] Done! + +Congratulations! You have finished successfully running `icefall`_ within a docker container. diff --git a/docs/source/index.rst b/docs/source/index.rst index a7d365a15..0fa8fdd1c 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -21,9 +21,11 @@ speech recognition recipes using `k2 `_. :caption: Contents: installation/index + docker/index faqs model-export/index + .. toctree:: :maxdepth: 3 @@ -38,4 +40,4 @@ speech recognition recipes using `k2 `_. .. toctree:: :maxdepth: 2 - decoding-with-langugage-models/index \ No newline at end of file + decoding-with-langugage-models/index diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst index 534b674f9..5a034ef5b 100644 --- a/docs/source/installation/index.rst +++ b/docs/source/installation/index.rst @@ -3,6 +3,11 @@ Installation ============ +.. hint:: + + We also provide :ref:`icefall_docker` support, which has already setup + the environment for you. + .. hint:: We have a colab notebook guiding you step by step to setup the environment. From 1ee251c8b385f6dcf06da40b1760b76496b0d812 Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Thu, 3 Aug 2023 15:50:35 +0800 Subject: [PATCH 28/30] Decode zipformer with external LMs (#1193) * update some documentation * support decoding with LMs in zipformer recipe * update RESULTS.md --- .../decoding-with-langugage-models/LODR.rst | 54 ++--- .../rescoring.rst | 6 +- .../shallow-fusion.rst | 4 +- egs/librispeech/ASR/RESULTS.md | 7 + .../decode.py | 7 + egs/librispeech/ASR/zipformer/decode.py | 216 ++++++++++++++++-- 6 files changed, 238 insertions(+), 56 deletions(-) diff --git a/docs/source/decoding-with-langugage-models/LODR.rst b/docs/source/decoding-with-langugage-models/LODR.rst index 7ffa0c128..b6625ee1d 100644 --- a/docs/source/decoding-with-langugage-models/LODR.rst +++ b/docs/source/decoding-with-langugage-models/LODR.rst @@ -4,59 +4,59 @@ LODR for RNN Transducer ======================= -As a type of E2E model, neural transducers are usually considered as having an internal -language model, which learns the language level information on the training corpus. -In real-life scenario, there is often a mismatch between the training corpus and the target corpus space. +As a type of E2E model, neural transducers are usually considered as having an internal +language model, which learns the language level information on the training corpus. +In real-life scenario, there is often a mismatch between the training corpus and the target corpus space. This mismatch can be a problem when decoding for neural transducer models with language models as its internal language can act "against" the external LM. In this tutorial, we show how to use `Low-order Density Ratio `_ to alleviate this effect to further improve the performance -of langugae model integration. +of langugae model integration. .. note:: - This tutorial is based on the recipe + This tutorial is based on the recipe `pruned_transducer_stateless7_streaming `_, - which is a streaming transducer model trained on `LibriSpeech`_. + which is a streaming transducer model trained on `LibriSpeech`_. However, you can easily apply LODR to other recipes. If you encounter any problems, please open an issue here `icefall `__. .. note:: - For simplicity, the training and testing corpus in this tutorial are the same (`LibriSpeech`_). However, - you can change the testing set to any other domains (e.g `GigaSpeech`_) and prepare the language models + For simplicity, the training and testing corpus in this tutorial are the same (`LibriSpeech`_). However, + you can change the testing set to any other domains (e.g `GigaSpeech`_) and prepare the language models using that corpus. -First, let's have a look at some background information. As the predecessor of LODR, Density Ratio (DR) is first proposed `here `_ +First, let's have a look at some background information. As the predecessor of LODR, Density Ratio (DR) is first proposed `here `_ to address the language information mismatch between the training corpus (source domain) and the testing corpus (target domain). Assuming that the source domain and the test domain are acoustically similar, DR derives the following formular for decoding with Bayes' theorem: .. math:: - \text{score}\left(y_u|\mathit{x},y\right) = - \log p\left(y_u|\mathit{x},y_{1:u-1}\right) + - \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - + \text{score}\left(y_u|\mathit{x},y\right) = + \log p\left(y_u|\mathit{x},y_{1:u-1}\right) + + \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - \lambda_2 \log p_{\text{Source LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) -where :math:`\lambda_1` and :math:`\lambda_2` are the weights of LM scores for target domain and source domain respectively. -Here, the source domain LM is trained on the training corpus. The only difference in the above formular compared to +where :math:`\lambda_1` and :math:`\lambda_2` are the weights of LM scores for target domain and source domain respectively. +Here, the source domain LM is trained on the training corpus. The only difference in the above formular compared to shallow fusion is the subtraction of the source domain LM. -Some works treat the predictor and the joiner of the neural transducer as its internal LM. However, the LM is +Some works treat the predictor and the joiner of the neural transducer as its internal LM. However, the LM is considered to be weak and can only capture low-level language information. Therefore, `LODR `__ proposed to use a low-order n-gram LM as an approximation of the ILM of the neural transducer. This leads to the following formula during decoding for transducer model: .. math:: - \text{score}\left(y_u|\mathit{x},y\right) = - \log p_{rnnt}\left(y_u|\mathit{x},y_{1:u-1}\right) + - \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - + \text{score}\left(y_u|\mathit{x},y\right) = + \log p_{rnnt}\left(y_u|\mathit{x},y_{1:u-1}\right) + + \lambda_1 \log p_{\text{Target LM}}\left(y_u|\mathit{x},y_{1:u-1}\right) - \lambda_2 \log p_{\text{bi-gram}}\left(y_u|\mathit{x},y_{1:u-1}\right) -In LODR, an additional bi-gram LM estimated on the source domain (e.g training corpus) is required. Comared to DR, +In LODR, an additional bi-gram LM estimated on the source domain (e.g training corpus) is required. Comared to DR, the only difference lies in the choice of source domain LM. According to the original `paper `_, LODR achieves similar performance compared DR in both intra-domain and cross-domain settings. As a bi-gram is much faster to evaluate, LODR is usually much faster. @@ -85,7 +85,7 @@ To test the model, let's have a look at the decoding results **without** using L --avg 1 \ --use-averaged-model False \ --exp-dir $exp_dir \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search @@ -99,17 +99,17 @@ The following WERs are achieved on test-clean and test-other: $ For test-other, WER of different settings are: $ beam_size_4 7.93 best for test-other -Then, we download the external language model and bi-gram LM that are necessary for LODR. +Then, we download the external language model and bi-gram LM that are necessary for LODR. Note that the bi-gram is estimated on the LibriSpeech 960 hours' text. .. code-block:: bash $ # download the external LM - $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ezerhouni/icefall-librispeech-rnn-lm + $ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/ezerhouni/icefall-librispeech-rnn-lm $ # create a symbolic link so that the checkpoint can be loaded $ pushd icefall-librispeech-rnn-lm/exp $ git lfs pull --include "pretrained.pt" - $ ln -s pretrained.pt epoch-99.pt + $ ln -s pretrained.pt epoch-99.pt $ popd $ $ # download the bi-gram @@ -122,7 +122,7 @@ Note that the bi-gram is estimated on the LibriSpeech 960 hours' text. Then, we perform LODR decoding by setting ``--decoding-method`` to ``modified_beam_search_lm_LODR``: .. code-block:: bash - + $ exp_dir=./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/exp $ lm_dir=./icefall-librispeech-rnn-lm/exp $ lm_scale=0.42 @@ -135,8 +135,8 @@ Then, we perform LODR decoding by setting ``--decoding-method`` to ``modified_be --exp-dir $exp_dir \ --max-duration 600 \ --decode-chunk-len 32 \ - --decoding-method modified_beam_search_lm_LODR \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --decoding-method modified_beam_search_LODR \ + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --use-shallow-fusion 1 \ --lm-type rnn \ --lm-exp-dir $lm_dir \ @@ -181,4 +181,4 @@ indeed **further improves** the WER. We can do even better if we increase ``--be - 6.38 * - 12 - 2.4 - - 6.23 \ No newline at end of file + - 6.23 diff --git a/docs/source/decoding-with-langugage-models/rescoring.rst b/docs/source/decoding-with-langugage-models/rescoring.rst index ee2e2113c..02eba9129 100644 --- a/docs/source/decoding-with-langugage-models/rescoring.rst +++ b/docs/source/decoding-with-langugage-models/rescoring.rst @@ -48,7 +48,7 @@ As usual, we first test the model's performance without external LM. This can be --avg 1 \ --use-averaged-model False \ --exp-dir $exp_dir \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search @@ -101,7 +101,7 @@ is set to `False`. --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search_lm_rescore \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --use-shallow-fusion 0 \ --lm-type rnn \ --lm-exp-dir $lm_dir \ @@ -173,7 +173,7 @@ Then we can performn LM rescoring + LODR by changing the decoding method to `mod --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search_lm_rescore_LODR \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --use-shallow-fusion 0 \ --lm-type rnn \ --lm-exp-dir $lm_dir \ diff --git a/docs/source/decoding-with-langugage-models/shallow-fusion.rst b/docs/source/decoding-with-langugage-models/shallow-fusion.rst index 0d2837372..f15e3f1d9 100644 --- a/docs/source/decoding-with-langugage-models/shallow-fusion.rst +++ b/docs/source/decoding-with-langugage-models/shallow-fusion.rst @@ -46,7 +46,7 @@ To test the model, let's have a look at the decoding results without using LM. T --avg 1 \ --use-averaged-model False \ --exp-dir $exp_dir \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search @@ -95,7 +95,7 @@ To use shallow fusion for decoding, we can execute the following command: --max-duration 600 \ --decode-chunk-len 32 \ --decoding-method modified_beam_search_lm_shallow_fusion \ - --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model + --bpe-model ./icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29/data/lang_bpe_500/bpe.model \ --use-shallow-fusion 1 \ --lm-type rnn \ --lm-exp-dir $lm_dir \ diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 1b8e690bd..b945f43fd 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -90,6 +90,11 @@ You can use to deploy it. | greedy_search | 2.23 | 4.96 | --epoch 40 --avg 16 | | modified_beam_search | 2.21 | 4.91 | --epoch 40 --avg 16 | | fast_beam_search | 2.24 | 4.93 | --epoch 40 --avg 16 | +| modified_beam_search_shallow_fusion | 2.01 | 4.37 | --epoch 40 --avg 16 --beam-size 12 --lm-scale 0.3 | +| modified_beam_search_LODR | 1.94 | 4.17 | --epoch 40 --avg 16 --beam-size 12 --lm-scale 0.52 --LODR-scale -0.26 | +| modified_beam_search_rescore | 2.04 | 4.39 | --epoch 40 --avg 16 --beam-size 12 | +| modified_beam_search_rescore_LODR | 2.01 | 4.33 | --epoch 40 --avg 16 --beam-size 12 | + The training command is: ```bash @@ -119,6 +124,8 @@ for m in greedy_search modified_beam_search fast_beam_search; do done ``` +To decode with external language models, please refer to the documentation [here](https://k2-fsa.github.io/icefall/decoding-with-langugage-models/index.html). + ##### small-scaled model, number of model parameters: 23285615, i.e., 23.3 M The tensorboard log can be found at diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/decode.py index 3444f8193..02029c108 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7_streaming/decode.py @@ -396,6 +396,12 @@ def decode_one_batch( The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used only when --decoding_method is fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + LM: + A neural network language model. + ngram_lm: + A ngram language model + ngram_lm_scale: + The scale for the ngram language model. Returns: Return the decoding result. See above description for the format of the returned dict. @@ -907,6 +913,7 @@ def main(): ngram_file_name = str(params.lang_dir / f"{params.tokens_ngram}gram.arpa") logging.info(f"lm filename: {ngram_file_name}") ngram_lm = kenlm.Model(ngram_file_name) + ngram_lm_scale = None # use a list to search elif params.decoding_method == "modified_beam_search_LODR": lm_filename = f"{params.tokens_ngram}gram.fst.txt" diff --git a/egs/librispeech/ASR/zipformer/decode.py b/egs/librispeech/ASR/zipformer/decode.py index 93680602e..2cc157e7a 100755 --- a/egs/librispeech/ASR/zipformer/decode.py +++ b/egs/librispeech/ASR/zipformer/decode.py @@ -115,9 +115,14 @@ from beam_search import ( greedy_search, greedy_search_batch, modified_beam_search, + modified_beam_search_lm_rescore, + modified_beam_search_lm_rescore_LODR, + modified_beam_search_lm_shallow_fusion, + modified_beam_search_LODR, ) -from train import add_model_arguments, get_params, get_model +from train import add_model_arguments, get_model, get_params +from icefall import LmScorer, NgramLm from icefall.checkpoint import ( average_checkpoints, average_checkpoints_with_averaged_model, @@ -273,8 +278,7 @@ def get_parser(): "--context-size", type=int, default=2, - help="The context size in the decoder. 1 means bigram; " - "2 means tri-gram", + help="The context size in the decoder. 1 means bigram; " "2 means tri-gram", ) parser.add_argument( "--max-sym-per-frame", @@ -302,6 +306,47 @@ def get_parser(): fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", ) + parser.add_argument( + "--use-shallow-fusion", + type=str2bool, + default=False, + help="""Use neural network LM for shallow fusion. + If you want to use LODR, you will also need to set this to true + """, + ) + + parser.add_argument( + "--lm-type", + type=str, + default="rnn", + help="Type of NN lm", + choices=["rnn", "transformer"], + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.3, + help="""The scale of the neural network LM + Used only when `--use-shallow-fusion` is set to True. + """, + ) + + parser.add_argument( + "--tokens-ngram", + type=int, + default=2, + help="""The order of the ngram lm. + """, + ) + + parser.add_argument( + "--backoff-id", + type=int, + default=500, + help="ID of the backoff symbol in the ngram LM", + ) + add_model_arguments(parser) return parser @@ -314,6 +359,9 @@ def decode_one_batch( batch: dict, word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, + LM: Optional[LmScorer] = None, + ngram_lm=None, + ngram_lm_scale: float = 0.0, ) -> Dict[str, List[List[str]]]: """Decode one batch and return the result in a dict. The dict has the following format: @@ -342,6 +390,12 @@ def decode_one_batch( The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used only when --decoding_method is fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + LM: + A neural network language model. + ngram_lm: + A ngram language model + ngram_lm_scale: + The scale for the ngram language model. Returns: Return the decoding result. See above description for the format of the returned dict. @@ -425,10 +479,7 @@ def decode_one_batch( ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) - elif ( - params.decoding_method == "greedy_search" - and params.max_sym_per_frame == 1 - ): + elif params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1: hyp_tokens = greedy_search_batch( model=model, encoder_out=encoder_out, @@ -445,6 +496,50 @@ def decode_one_batch( ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search_lm_shallow_fusion": + hyp_tokens = modified_beam_search_lm_shallow_fusion( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + LM=LM, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search_LODR": + hyp_tokens = modified_beam_search_LODR( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + LODR_lm=ngram_lm, + LODR_lm_scale=ngram_lm_scale, + LM=LM, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search_lm_rescore": + lm_scale_list = [0.01 * i for i in range(10, 50)] + ans_dict = modified_beam_search_lm_rescore( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + LM=LM, + lm_scale_list=lm_scale_list, + ) + elif params.decoding_method == "modified_beam_search_lm_rescore_LODR": + lm_scale_list = [0.02 * i for i in range(2, 30)] + ans_dict = modified_beam_search_lm_rescore_LODR( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + LM=LM, + LODR_lm=ngram_lm, + sp=sp, + lm_scale_list=lm_scale_list, + ) else: batch_size = encoder_out.size(0) @@ -483,6 +578,16 @@ def decode_one_batch( key += f"_ngram_lm_scale_{params.ngram_lm_scale}" return {key: hyps} + elif params.decoding_method in ( + "modified_beam_search_lm_rescore", + "modified_beam_search_lm_rescore_LODR", + ): + ans = dict() + assert ans_dict is not None + for key, hyps in ans_dict.items(): + hyps = [sp.decode(hyp).split() for hyp in hyps] + ans[f"beam_size_{params.beam_size}_{key}"] = hyps + return ans else: return {f"beam_size_{params.beam_size}": hyps} @@ -494,6 +599,9 @@ def decode_dataset( sp: spm.SentencePieceProcessor, word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, + LM: Optional[LmScorer] = None, + ngram_lm=None, + ngram_lm_scale: float = 0.0, ) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: """Decode dataset. @@ -543,6 +651,9 @@ def decode_dataset( decoding_graph=decoding_graph, word_table=word_table, batch=batch, + LM=LM, + ngram_lm=ngram_lm, + ngram_lm_scale=ngram_lm_scale, ) for name, hyps in hyps_dict.items(): @@ -559,9 +670,7 @@ def decode_dataset( if batch_idx % log_interval == 0: batch_str = f"{batch_idx}/{num_batches}" - logging.info( - f"batch {batch_str}, cuts processed until now is {num_cuts}" - ) + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") return results @@ -594,8 +703,7 @@ def save_results( test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) errs_info = ( - params.res_dir - / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + params.res_dir / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" ) with open(errs_info, "w") as f: print("settings\tWER", file=f) @@ -614,6 +722,7 @@ def save_results( def main(): parser = get_parser() LibriSpeechAsrDataModule.add_arguments(parser) + LmScorer.add_arguments(parser) args = parser.parse_args() args.exp_dir = Path(args.exp_dir) @@ -628,6 +737,10 @@ def main(): "fast_beam_search_nbest_LG", "fast_beam_search_nbest_oracle", "modified_beam_search", + "modified_beam_search_LODR", + "modified_beam_search_lm_shallow_fusion", + "modified_beam_search_lm_rescore", + "modified_beam_search_lm_rescore_LODR", ) params.res_dir = params.exp_dir / params.decoding_method @@ -656,13 +769,19 @@ def main(): if "LG" in params.decoding_method: params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" elif "beam_search" in params.decoding_method: - params.suffix += ( - f"-{params.decoding_method}-beam-size-{params.beam_size}" - ) + params.suffix += f"-{params.decoding_method}-beam-size-{params.beam_size}" else: params.suffix += f"-context-{params.context_size}" params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + if params.use_shallow_fusion: + params.suffix += f"-{params.lm_type}-lm-scale-{params.lm_scale}" + + if "LODR" in params.decoding_method: + params.suffix += ( + f"-LODR-{params.tokens_ngram}gram-scale-{params.ngram_lm_scale}" + ) + if params.use_averaged_model: params.suffix += "-use-averaged-model" @@ -690,9 +809,9 @@ def main(): if not params.use_averaged_model: if params.iter > 0: - filenames = find_checkpoints( - params.exp_dir, iteration=-params.iter - )[: params.avg] + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] if len(filenames) == 0: raise ValueError( f"No checkpoints found for" @@ -719,9 +838,9 @@ def main(): model.load_state_dict(average_checkpoints(filenames, device=device)) else: if params.iter > 0: - filenames = find_checkpoints( - params.exp_dir, iteration=-params.iter - )[: params.avg + 1] + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] if len(filenames) == 0: raise ValueError( f"No checkpoints found for" @@ -768,6 +887,54 @@ def main(): model.to(device) model.eval() + # only load the neural network LM if required + if params.use_shallow_fusion or params.decoding_method in ( + "modified_beam_search_lm_rescore", + "modified_beam_search_lm_rescore_LODR", + "modified_beam_search_lm_shallow_fusion", + "modified_beam_search_LODR", + ): + LM = LmScorer( + lm_type=params.lm_type, + params=params, + device=device, + lm_scale=params.lm_scale, + ) + LM.to(device) + LM.eval() + else: + LM = None + + # only load N-gram LM when needed + if params.decoding_method == "modified_beam_search_lm_rescore_LODR": + try: + import kenlm + except ImportError: + print("Please install kenlm first. You can use") + print(" pip install https://github.com/kpu/kenlm/archive/master.zip") + print("to install it") + import sys + + sys.exit(-1) + ngram_file_name = str(params.lang_dir / f"{params.tokens_ngram}gram.arpa") + logging.info(f"lm filename: {ngram_file_name}") + ngram_lm = kenlm.Model(ngram_file_name) + ngram_lm_scale = None # use a list to search + + elif params.decoding_method == "modified_beam_search_LODR": + lm_filename = f"{params.tokens_ngram}gram.fst.txt" + logging.info(f"Loading token level lm: {lm_filename}") + ngram_lm = NgramLm( + str(params.lang_dir / lm_filename), + backoff_id=params.backoff_id, + is_binary=False, + ) + logging.info(f"num states: {ngram_lm.lm.num_states}") + ngram_lm_scale = params.ngram_lm_scale + else: + ngram_lm = None + ngram_lm_scale = None + if "fast_beam_search" in params.decoding_method: if params.decoding_method == "fast_beam_search_nbest_LG": lexicon = Lexicon(params.lang_dir) @@ -780,9 +947,7 @@ def main(): decoding_graph.scores *= params.ngram_lm_scale else: word_table = None - decoding_graph = k2.trivial_graph( - params.vocab_size - 1, device=device - ) + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) else: decoding_graph = None word_table = None @@ -811,6 +976,9 @@ def main(): sp=sp, word_table=word_table, decoding_graph=decoding_graph, + LM=LM, + ngram_lm=ngram_lm, + ngram_lm_scale=ngram_lm_scale, ) save_results( From 00256a766921dd34a267012b0e2b8ff7d538f0e6 Mon Sep 17 00:00:00 2001 From: Yifan Yang <64255737+yfyeung@users.noreply.github.com> Date: Wed, 9 Aug 2023 09:40:58 +0800 Subject: [PATCH 29/30] Fix decode_stream.py (#1208) * FIx decode_stream.py * Update decode_stream.py --- egs/librispeech/ASR/zipformer/decode_stream.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/zipformer/decode_stream.py b/egs/librispeech/ASR/zipformer/decode_stream.py index 946db275c..d6918bf32 100644 --- a/egs/librispeech/ASR/zipformer/decode_stream.py +++ b/egs/librispeech/ASR/zipformer/decode_stream.py @@ -79,12 +79,12 @@ class DecodeStream(object): self.pad_length = 7 + 2 * 3 if params.decoding_method == "greedy_search": - self.hyp = [params.blank_id] * params.context_size + self.hyp = [-1] * (params.context_size - 1) + [params.blank_id] elif params.decoding_method == "modified_beam_search": self.hyps = HypothesisList() self.hyps.add( Hypothesis( - ys=[params.blank_id] * params.context_size, + ys=[-1] * (params.context_size - 1) + [params.blank_id], log_prob=torch.zeros(1, dtype=torch.float32, device=device), ) ) From 74806b744b81620d06645c27f5a2dda307e58322 Mon Sep 17 00:00:00 2001 From: zr_jin Date: Thu, 10 Aug 2023 20:56:02 +0800 Subject: [PATCH 30/30] disable speed perturbation by default (#1176) * disable speed perturbation by default * minor fixes * minor updates * updated bash scripts to incorporate with the `speed-perturb` arg * minor fixes 1. changed the naming scheme from `speed-perturb` to `perturb-speed` to align with the librispeech recipe >> https://github.com/k2-fsa/icefall/blob/00256a766921dd34a267012b0e2b8ff7d538f0e6/egs/librispeech/ASR/local/compute_fbank_librispeech.py#L65 2. changed arg type for `perturb-speed` to str2bool --- .../local/compute_fbank_aidatatang_200zh.py | 18 ++++++++--- egs/aidatatang_200zh/ASR/prepare.sh | 2 +- .../local/compute_fbank_aidatatang_200zh.py | 18 ++++++++--- .../ASR/local/compute_fbank_aishell.py | 18 ++++++++--- egs/aishell/ASR/prepare.sh | 2 +- egs/aishell/ASR/prepare_aidatatang_200zh.sh | 2 +- .../ASR/local/compute_fbank_aishell2.py | 17 +++++++--- egs/aishell2/ASR/prepare.sh | 2 +- .../ASR/local/compute_fbank_aishell4.py | 18 ++++++++--- egs/aishell4/ASR/prepare.sh | 2 +- .../ASR/local/compute_fbank_alimeeting.py | 17 +++++++--- egs/alimeeting/ASR/prepare.sh | 2 +- .../ASR_v2/local/compute_fbank_alimeeting.py | 32 ++++++++++++++++--- egs/alimeeting/ASR_v2/prepare.sh | 2 +- .../ASR/local/preprocess_wenetspeech.py | 20 ++++++++++-- egs/wenetspeech/ASR/prepare.sh | 2 +- 16 files changed, 132 insertions(+), 42 deletions(-) diff --git a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py index 387c14acf..9caacb78b 100755 --- a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py +++ b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py @@ -32,7 +32,7 @@ import torch from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): +def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests/aidatatang_200zh") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -85,7 +85,8 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) @@ -109,7 +110,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) - + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -119,4 +125,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_aidatatang_200zh(num_mel_bins=args.num_mel_bins) + compute_fbank_aidatatang_200zh( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/aidatatang_200zh/ASR/prepare.sh b/egs/aidatatang_200zh/ASR/prepare.sh index 46ecd5769..2eb0b3718 100755 --- a/egs/aidatatang_200zh/ASR/prepare.sh +++ b/egs/aidatatang_200zh/ASR/prepare.sh @@ -77,7 +77,7 @@ if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then log "Stage 4: Compute fbank for aidatatang_200zh" if [ ! -f data/fbank/.aidatatang_200zh.done ]; then mkdir -p data/fbank - ./local/compute_fbank_aidatatang_200zh.py + ./local/compute_fbank_aidatatang_200zh.py --perturb-speed True touch data/fbank/.aidatatang_200zh.done fi fi diff --git a/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py index 037971927..6a9bb4f42 100755 --- a/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py +++ b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py @@ -32,7 +32,7 @@ import torch from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): +def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -85,7 +85,8 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) @@ -109,7 +110,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) - + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -119,4 +125,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_aidatatang_200zh(num_mel_bins=args.num_mel_bins) + compute_fbank_aidatatang_200zh( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/aishell/ASR/local/compute_fbank_aishell.py b/egs/aishell/ASR/local/compute_fbank_aishell.py index 115ca1031..c7000da1c 100755 --- a/egs/aishell/ASR/local/compute_fbank_aishell.py +++ b/egs/aishell/ASR/local/compute_fbank_aishell.py @@ -32,7 +32,7 @@ import torch from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_aishell(num_mel_bins: int = 80): +def compute_fbank_aishell(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -81,7 +81,8 @@ def compute_fbank_aishell(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) @@ -104,7 +105,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) - + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -114,4 +120,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_aishell(num_mel_bins=args.num_mel_bins) + compute_fbank_aishell( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/aishell/ASR/prepare.sh b/egs/aishell/ASR/prepare.sh index b763d72c1..ff8e1301d 100755 --- a/egs/aishell/ASR/prepare.sh +++ b/egs/aishell/ASR/prepare.sh @@ -114,7 +114,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Compute fbank for aishell" if [ ! -f data/fbank/.aishell.done ]; then mkdir -p data/fbank - ./local/compute_fbank_aishell.py + ./local/compute_fbank_aishell.py --perturb-speed True touch data/fbank/.aishell.done fi fi diff --git a/egs/aishell/ASR/prepare_aidatatang_200zh.sh b/egs/aishell/ASR/prepare_aidatatang_200zh.sh index f1d4d18a7..ec89450df 100755 --- a/egs/aishell/ASR/prepare_aidatatang_200zh.sh +++ b/egs/aishell/ASR/prepare_aidatatang_200zh.sh @@ -53,7 +53,7 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "Stage 2: Process aidatatang_200zh" if [ ! -f data/fbank/.aidatatang_200zh_fbank.done ]; then mkdir -p data/fbank - ./local/compute_fbank_aidatatang_200zh.py + ./local/compute_fbank_aidatatang_200zh.py --perturb-speed True touch data/fbank/.aidatatang_200zh_fbank.done fi fi diff --git a/egs/aishell2/ASR/local/compute_fbank_aishell2.py b/egs/aishell2/ASR/local/compute_fbank_aishell2.py index ec0c584ca..1fb1621ff 100755 --- a/egs/aishell2/ASR/local/compute_fbank_aishell2.py +++ b/egs/aishell2/ASR/local/compute_fbank_aishell2.py @@ -32,7 +32,7 @@ import torch from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_aishell2(num_mel_bins: int = 80): +def compute_fbank_aishell2(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -81,7 +81,8 @@ def compute_fbank_aishell2(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) @@ -104,6 +105,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -114,4 +121,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_aishell2(num_mel_bins=args.num_mel_bins) + compute_fbank_aishell2( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/aishell2/ASR/prepare.sh b/egs/aishell2/ASR/prepare.sh index 3e8e840ab..42631c864 100755 --- a/egs/aishell2/ASR/prepare.sh +++ b/egs/aishell2/ASR/prepare.sh @@ -101,7 +101,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Compute fbank for aishell2" if [ ! -f data/fbank/.aishell2.done ]; then mkdir -p data/fbank - ./local/compute_fbank_aishell2.py + ./local/compute_fbank_aishell2.py --perturb-speed True touch data/fbank/.aishell2.done fi fi diff --git a/egs/aishell4/ASR/local/compute_fbank_aishell4.py b/egs/aishell4/ASR/local/compute_fbank_aishell4.py index 400c406f0..f19163988 100755 --- a/egs/aishell4/ASR/local/compute_fbank_aishell4.py +++ b/egs/aishell4/ASR/local/compute_fbank_aishell4.py @@ -32,7 +32,7 @@ import torch from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_aishell4(num_mel_bins: int = 80): +def compute_fbank_aishell4(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests/aishell4") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -83,10 +83,12 @@ def compute_fbank_aishell4(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) + cut_set = cut_set.compute_and_store_features( extractor=extractor, storage_path=f"{output_dir}/{prefix}_feats_{partition}", @@ -113,6 +115,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -123,4 +131,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_aishell4(num_mel_bins=args.num_mel_bins) + compute_fbank_aishell4( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/aishell4/ASR/prepare.sh b/egs/aishell4/ASR/prepare.sh index cb2b73a3e..1b1ec0005 100755 --- a/egs/aishell4/ASR/prepare.sh +++ b/egs/aishell4/ASR/prepare.sh @@ -107,7 +107,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "Stage 5: Compute fbank for aishell4" if [ ! -f data/fbank/.aishell4.done ]; then mkdir -p data/fbank - ./local/compute_fbank_aishell4.py + ./local/compute_fbank_aishell4.py --perturb-speed True touch data/fbank/.aishell4.done fi fi diff --git a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py index 96115a230..f8c10648a 100755 --- a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py +++ b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py @@ -32,7 +32,7 @@ import torch from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter from lhotse.recipes.utils import read_manifests_if_cached -from icefall.utils import get_executor +from icefall.utils import get_executor, str2bool # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -42,7 +42,7 @@ torch.set_num_threads(1) torch.set_num_interop_threads(1) -def compute_fbank_alimeeting(num_mel_bins: int = 80): +def compute_fbank_alimeeting(num_mel_bins: int = 80, perturb_speed: bool = False): src_dir = Path("data/manifests/alimeeting") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) @@ -82,7 +82,8 @@ def compute_fbank_alimeeting(num_mel_bins: int = 80): recordings=m["recordings"], supervisions=m["supervisions"], ) - if "train" in partition: + if "train" in partition and perturb_speed: + logging.info(f"Doing speed perturb") cut_set = ( cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) ) @@ -114,6 +115,12 @@ def get_args(): default=80, help="""The number of mel bins for Fbank""", ) + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) return parser.parse_args() @@ -124,4 +131,6 @@ if __name__ == "__main__": logging.basicConfig(format=formatter, level=logging.INFO) args = get_args() - compute_fbank_alimeeting(num_mel_bins=args.num_mel_bins) + compute_fbank_alimeeting( + num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed + ) diff --git a/egs/alimeeting/ASR/prepare.sh b/egs/alimeeting/ASR/prepare.sh index 604cc92c6..1709733c7 100755 --- a/egs/alimeeting/ASR/prepare.sh +++ b/egs/alimeeting/ASR/prepare.sh @@ -97,7 +97,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "Stage 5: Compute fbank for alimeeting" if [ ! -f data/fbank/.alimeeting.done ]; then mkdir -p data/fbank - ./local/compute_fbank_alimeeting.py + ./local/compute_fbank_alimeeting.py --perturb-speed True touch data/fbank/.alimeeting.done fi fi diff --git a/egs/alimeeting/ASR_v2/local/compute_fbank_alimeeting.py b/egs/alimeeting/ASR_v2/local/compute_fbank_alimeeting.py index c6aa2ab36..833d11c72 100755 --- a/egs/alimeeting/ASR_v2/local/compute_fbank_alimeeting.py +++ b/egs/alimeeting/ASR_v2/local/compute_fbank_alimeeting.py @@ -25,6 +25,7 @@ It looks for manifests in the directory data/manifests. The generated fbank features are saved in data/fbank. """ +import argparse import logging from pathlib import Path @@ -39,6 +40,8 @@ from lhotse.features.kaldifeat import ( ) from lhotse.recipes.utils import read_manifests_if_cached +from icefall.utils import str2bool + # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. # Do this outside of main() in case it needs to take effect @@ -48,7 +51,7 @@ torch.set_num_interop_threads(1) torch.multiprocessing.set_sharing_strategy("file_system") -def compute_fbank_ami(): +def compute_fbank_ami(perturb_speed: bool = False): src_dir = Path("data/manifests") output_dir = Path("data/fbank") @@ -84,8 +87,12 @@ def compute_fbank_ami(): suffix="jsonl.gz", ) - def _extract_feats(cuts: CutSet, storage_path: Path, manifest_path: Path) -> None: - cuts = cuts + cuts.perturb_speed(0.9) + cuts.perturb_speed(1.1) + def _extract_feats( + cuts: CutSet, storage_path: Path, manifest_path: Path, speed_perturb: bool + ) -> None: + if speed_perturb: + logging.info(f"Doing speed perturb") + cuts = cuts + cuts.perturb_speed(0.9) + cuts.perturb_speed(1.1) _ = cuts.compute_and_store_features_batch( extractor=extractor, storage_path=storage_path, @@ -109,6 +116,7 @@ def compute_fbank_ami(): cuts_ihm, output_dir / "feats_train_ihm", src_dir / "cuts_train_ihm.jsonl.gz", + perturb_speed, ) logging.info("Processing train split IHM + reverberated IHM") @@ -117,6 +125,7 @@ def compute_fbank_ami(): cuts_ihm_rvb, output_dir / "feats_train_ihm_rvb", src_dir / "cuts_train_ihm_rvb.jsonl.gz", + perturb_speed, ) logging.info("Processing train split SDM") @@ -129,6 +138,7 @@ def compute_fbank_ami(): cuts_sdm, output_dir / "feats_train_sdm", src_dir / "cuts_train_sdm.jsonl.gz", + perturb_speed, ) logging.info("Processing train split GSS") @@ -141,6 +151,7 @@ def compute_fbank_ami(): cuts_gss, output_dir / "feats_train_gss", src_dir / "cuts_train_gss.jsonl.gz", + perturb_speed, ) logging.info("Preparing test cuts: IHM, SDM, GSS (optional)") @@ -186,8 +197,21 @@ def compute_fbank_ami(): ) +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) + return parser.parse_args() + + if __name__ == "__main__": formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" logging.basicConfig(format=formatter, level=logging.INFO) - compute_fbank_ami() + args = get_args() + + compute_fbank_ami(perturb_speed=args.perturb_speed) diff --git a/egs/alimeeting/ASR_v2/prepare.sh b/egs/alimeeting/ASR_v2/prepare.sh index 76a108771..1098840f8 100755 --- a/egs/alimeeting/ASR_v2/prepare.sh +++ b/egs/alimeeting/ASR_v2/prepare.sh @@ -85,7 +85,7 @@ fi if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "Stage 5: Compute fbank for alimeeting" mkdir -p data/fbank - python local/compute_fbank_alimeeting.py + python local/compute_fbank_alimeeting.py --perturb-speed True log "Combine features from train splits" lhotse combine data/manifests/cuts_train_{ihm,ihm_rvb,sdm,gss}.jsonl.gz - | shuf |\ gzip -c > data/manifests/cuts_train_all.jsonl.gz diff --git a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py index 93ce750f8..5de3c23a9 100755 --- a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py +++ b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py @@ -16,6 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import argparse import logging import re from pathlib import Path @@ -24,6 +25,7 @@ from lhotse import CutSet, SupervisionSegment from lhotse.recipes.utils import read_manifests_if_cached from icefall import setup_logger +from icefall.utils import str2bool # Similar text filtering and normalization procedure as in: # https://github.com/SpeechColab/WenetSpeech/blob/main/toolkits/kaldi/wenetspeech_data_prep.sh @@ -45,7 +47,7 @@ def has_no_oov( return oov_pattern.search(sup.text) is None -def preprocess_wenet_speech(): +def preprocess_wenet_speech(perturb_speed: bool = False): src_dir = Path("data/manifests") output_dir = Path("data/fbank") output_dir.mkdir(exist_ok=True) @@ -110,7 +112,7 @@ def preprocess_wenet_speech(): ) # Run data augmentation that needs to be done in the # time domain. - if partition not in ["DEV", "TEST_NET", "TEST_MEETING"]: + if partition not in ["DEV", "TEST_NET", "TEST_MEETING"] and perturb_speed: logging.info( f"Speed perturb for {partition} with factors 0.9 and 1.1 " "(Perturbing may take 8 minutes and saving may take 20 minutes)" @@ -120,10 +122,22 @@ def preprocess_wenet_speech(): cut_set.to_file(raw_cuts_path) +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--perturb-speed", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) + return parser.parse_args() + + def main(): setup_logger(log_filename="./log-preprocess-wenetspeech") - preprocess_wenet_speech() + args = get_args() + preprocess_wenet_speech(perturb_speed=args.perturb_speed) logging.info("Done") diff --git a/egs/wenetspeech/ASR/prepare.sh b/egs/wenetspeech/ASR/prepare.sh index f7b521794..097a59a5f 100755 --- a/egs/wenetspeech/ASR/prepare.sh +++ b/egs/wenetspeech/ASR/prepare.sh @@ -91,7 +91,7 @@ fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Preprocess WenetSpeech manifest" if [ ! -f data/fbank/.preprocess_complete ]; then - python3 ./local/preprocess_wenetspeech.py + python3 ./local/preprocess_wenetspeech.py --perturb-speed True touch data/fbank/.preprocess_complete fi fi