From 0967cf5b384f7c48722c011cead6b5f529b1fe3f Mon Sep 17 00:00:00 2001 From: Duo Ma <39255927+shanguanma@users.noreply.github.com> Date: Thu, 25 Aug 2022 10:54:21 +0800 Subject: [PATCH 01/15] fixed no cut_id error in decode_dataset (#549) * fixed import quantization is none Signed-off-by: shanguanma * fixed no cut_id error in decode_dataset Signed-off-by: shanguanma * fixed more than one "#" Signed-off-by: shanguanma * fixed code style Signed-off-by: shanguanma Signed-off-by: shanguanma Co-authored-by: shanguanma --- .../ASR/pruned_transducer_stateless6/hubert_decode.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py index 10b0e5edc..49b557814 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py @@ -81,18 +81,17 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): - + # hyps is a list, every element is decode result of a sentence. hyps = hubert_model.ctc_greedy_search(batch) texts = batch["supervisions"]["text"] - assert len(hyps) == len(texts) + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] this_batch = [] - - for hyp_text, ref_text in zip(hyps, texts): + assert len(hyps) == len(texts) + for cut_id, hyp_text, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() hyp_words = hyp_text.split() - this_batch.append((ref_words, hyp_words)) - + this_batch.append((cut_id, ref_words, hyp_words)) results["ctc_greedy_search"].extend(this_batch) num_cuts += len(texts) From 1e31fbcd7def2e52a3edd7c4ead02c4b884ebfe0 Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Thu, 25 Aug 2022 12:12:50 +0800 Subject: [PATCH 02/15] Add clamping operation in Eve optimizer for all scalar weights to avoid (#550) non stable training in some scenarios. The clamping range is set to (-10,2). Note that this change may cause unexpected effect if you resume training from a model that is trained without clamping. --- egs/librispeech/ASR/pruned_transducer_stateless2/optim.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/optim.py b/egs/librispeech/ASR/pruned_transducer_stateless2/optim.py index 432bf8220..041a81f45 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/optim.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/optim.py @@ -164,6 +164,10 @@ class Eve(Optimizer): p.mul_(1 - (weight_decay * is_above_target_rms)) p.addcdiv_(exp_avg, denom, value=-step_size) + # Constrain the range of scalar weights + if p.numel() == 1: + p.clamp_(min=-10, max=2) + return loss From 2636a3dd58301191af0a5bca06088544e91c450c Mon Sep 17 00:00:00 2001 From: rickychanhoyin Date: Sat, 27 Aug 2022 17:23:45 +0800 Subject: [PATCH 03/15] minor changes for correct path names && import module text2segments.py (#552) * Update asr_datamodule.py minor file names correction * minor changes for correct path names && import module text2segments.py --- egs/alimeeting/ASR/local/compute_fbank_alimeeting.py | 2 +- egs/alimeeting/ASR/local/text2segments.py | 2 ++ egs/alimeeting/ASR/prepare.sh | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py index 2ff473c60..eb3acbbea 100755 --- a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py +++ b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py @@ -43,7 +43,7 @@ torch.set_num_interop_threads(1) def compute_fbank_alimeeting(num_mel_bins: int = 80): - src_dir = Path("data/manifests") + src_dir = Path("data/manifests/alimeeting") output_dir = Path("data/fbank") num_jobs = min(15, os.cpu_count()) diff --git a/egs/alimeeting/ASR/local/text2segments.py b/egs/alimeeting/ASR/local/text2segments.py index 3df727c67..7c1019aa8 100644 --- a/egs/alimeeting/ASR/local/text2segments.py +++ b/egs/alimeeting/ASR/local/text2segments.py @@ -30,9 +30,11 @@ with word segmenting: import argparse +import paddle import jieba from tqdm import tqdm +paddle.enable_static() jieba.enable_paddle() diff --git a/egs/alimeeting/ASR/prepare.sh b/egs/alimeeting/ASR/prepare.sh index eb2ac697d..17224bb68 100755 --- a/egs/alimeeting/ASR/prepare.sh +++ b/egs/alimeeting/ASR/prepare.sh @@ -107,7 +107,7 @@ if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then # Prepare text. # Note: in Linux, you can install jq with the following command: # wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 - gunzip -c data/manifests/alimeeting/supervisions_train.jsonl.gz \ + gunzip -c data/manifests/alimeeting/alimeeting_supervisions_train.jsonl.gz \ | jq ".text" | sed 's/"//g' \ | ./local/text2token.py -t "char" > $lang_char_dir/text From 235eb0746ff4b75a362c6eda11e47e5c57130519 Mon Sep 17 00:00:00 2001 From: kobenaxie <572745565@qq.com> Date: Sat, 27 Aug 2022 17:26:21 +0800 Subject: [PATCH 04/15] fix scaling converter test for decoder(predictor). (#553) --- .../ASR/pruned_transducer_stateless3/test_scaling_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py index a9feea83c..2e131158f 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py @@ -181,7 +181,7 @@ def test_convert_scaled_to_non_scaled(): y = torch.randint(low=1, high=vocab_size - 1, size=(N, U)) d1 = model.decoder(y) - d2 = model.decoder(y) + d2 = converted_model.decoder(y) assert torch.allclose(d1, d2) From d68b8e91202d02924f7df6e5c843d03425657f33 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sun, 28 Aug 2022 11:17:38 +0800 Subject: [PATCH 05/15] Disable CUDA_LAUNCH_BLOCKING in wenetspeech recipes. (#554) * Disable CUDA_LAUNCH_BLOCKING in wenetspeech recipes. * minor fixes --- .../ASR/local/preprocess_wenetspeech.py | 25 ++++++++++++------- .../ASR/pruned_transducer_stateless2/train.py | 14 ++++------- .../ASR/pruned_transducer_stateless5/train.py | 5 +--- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py index 64733eb15..f4c71230b 100755 --- a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py +++ b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py @@ -23,6 +23,8 @@ from pathlib import Path from lhotse import CutSet, SupervisionSegment from lhotse.recipes.utils import read_manifests_if_cached +from icefall import setup_logger + # Similar text filtering and normalization procedure as in: # https://github.com/SpeechColab/WenetSpeech/blob/main/toolkits/kaldi/wenetspeech_data_prep.sh @@ -48,13 +50,17 @@ def preprocess_wenet_speech(): output_dir = Path("data/fbank") output_dir.mkdir(exist_ok=True) + # Note: By default, we preprocess all sub-parts. + # You can delete those that you don't need. + # For instance, if you don't want to use the L subpart, just remove + # the line below containing "L" dataset_parts = ( - "L", - "M", - "S", "DEV", "TEST_NET", "TEST_MEETING", + "S", + "M", + "L", ) logging.info("Loading manifest (may take 10 minutes)") @@ -81,10 +87,13 @@ def preprocess_wenet_speech(): logging.info(f"Normalizing text in {partition}") for sup in m["supervisions"]: text = str(sup.text) - logging.info(f"Original text: {text}") + orig_text = text sup.text = normalize_text(sup.text) text = str(sup.text) - logging.info(f"Normalize text: {text}") + if len(orig_text) != len(text): + logging.info( + f"\nOriginal text vs normalized text:\n{orig_text}\n{text}" + ) # Create long-recording cut manifests. logging.info(f"Processing {partition}") @@ -109,12 +118,10 @@ def preprocess_wenet_speech(): def main(): - formatter = ( - "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - ) - logging.basicConfig(format=formatter, level=logging.INFO) + setup_logger(log_filename="./log-preprocess-wenetspeech") preprocess_wenet_speech() + logging.info("Done") if __name__ == "__main__": diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py index 5208dbefe..d3cc7c9c9 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py @@ -81,7 +81,6 @@ For training with the S subset: import argparse import logging -import os import warnings from pathlib import Path from shutil import copyfile @@ -120,8 +119,6 @@ LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler ] -os.environ["CUDA_LAUNCH_BLOCKING"] = "1" - def get_parser(): parser = argparse.ArgumentParser( @@ -162,7 +159,7 @@ def get_parser(): default=0, help="""Resume training from from this epoch. If it is positive, it will load checkpoint from - transducer_stateless2/exp/epoch-{start_epoch-1}.pt + pruned_transducer_stateless2/exp/epoch-{start_epoch-1}.pt """, ) @@ -361,8 +358,8 @@ def get_params() -> AttributeDict: "best_valid_loss": float("inf"), "best_train_epoch": -1, "best_valid_epoch": -1, - "batch_idx_train": 10, - "log_interval": 1, + "batch_idx_train": 0, + "log_interval": 50, "reset_interval": 200, # parameters for conformer "feature_dim": 80, @@ -545,7 +542,7 @@ def compute_loss( warmup: float = 1.0, ) -> Tuple[Tensor, MetricsTracker]: """ - Compute CTC loss given the model and its inputs. + Compute RNN-T loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. @@ -573,7 +570,7 @@ def compute_loss( texts = batch["supervisions"]["text"] y = graph_compiler.texts_to_ids(texts) - if type(y) == list: + if isinstance(y, list): y = k2.RaggedTensor(y).to(device) else: y = y.to(device) @@ -697,7 +694,6 @@ def train_one_epoch( tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py index 5a5925d55..2052e9da7 100755 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py @@ -61,7 +61,6 @@ export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" import argparse import copy import logging -import os import warnings from pathlib import Path from shutil import copyfile @@ -103,8 +102,6 @@ LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler ] -os.environ["CUDA_LAUNCH_BLOCKING"] = "1" - def add_model_arguments(parser: argparse.ArgumentParser): parser.add_argument( @@ -684,7 +681,7 @@ def compute_loss( texts = batch["supervisions"]["text"] y = graph_compiler.texts_to_ids(texts) - if type(y) == list: + if isinstance(y, list): y = k2.RaggedTensor(y).to(device) else: y = y.to(device) From e18fa78c3a010fac4e6d3e83bdcff28197df04dc Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sun, 28 Aug 2022 11:50:11 +0800 Subject: [PATCH 06/15] Check that read_manifests_if_cached returns a non-empty dict. (#555) --- .../ASR/local/compute_fbank_aidatatang_200zh.py | 7 +++++++ egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py | 7 +++++++ egs/aishell/ASR/local/compute_fbank_aishell.py | 7 +++++++ egs/aishell2/ASR/local/compute_fbank_aishell2.py | 7 +++++++ egs/aishell4/ASR/local/compute_fbank_aishell4.py | 7 +++++++ egs/alimeeting/ASR/local/compute_fbank_alimeeting.py | 7 +++++++ egs/gigaspeech/ASR/local/preprocess_gigaspeech.py | 7 +++++++ egs/librispeech/ASR/local/compute_fbank_librispeech.py | 7 +++++++ egs/librispeech/ASR/local/compute_fbank_musan.py | 2 ++ egs/librispeech/ASR/local/preprocess_gigaspeech.py | 7 +++++++ egs/spgispeech/ASR/local/compute_fbank_musan.py | 7 +++++++ egs/tal_csasr/ASR/local/compute_fbank_tal_csasr.py | 7 +++++++ egs/tedlium3/ASR/local/compute_fbank_tedlium.py | 7 +++++++ egs/timit/ASR/local/compute_fbank_timit.py | 7 +++++++ egs/wenetspeech/ASR/local/preprocess_wenetspeech.py | 7 +++++++ egs/yesno/ASR/local/compute_fbank_yesno.py | 7 +++++++ 16 files changed, 107 insertions(+) diff --git a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py index 0b54fcb9a..fb2751c0f 100755 --- a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py +++ b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py @@ -62,6 +62,13 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py index 8cdfad71f..42700a972 100755 --- a/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py +++ b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py @@ -62,6 +62,13 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/aishell/ASR/local/compute_fbank_aishell.py b/egs/aishell/ASR/local/compute_fbank_aishell.py index e27e35ec5..deab6c809 100755 --- a/egs/aishell/ASR/local/compute_fbank_aishell.py +++ b/egs/aishell/ASR/local/compute_fbank_aishell.py @@ -62,6 +62,13 @@ def compute_fbank_aishell(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/aishell2/ASR/local/compute_fbank_aishell2.py b/egs/aishell2/ASR/local/compute_fbank_aishell2.py index 7bc969a1a..d8d3622bd 100755 --- a/egs/aishell2/ASR/local/compute_fbank_aishell2.py +++ b/egs/aishell2/ASR/local/compute_fbank_aishell2.py @@ -62,6 +62,13 @@ def compute_fbank_aishell2(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/aishell4/ASR/local/compute_fbank_aishell4.py b/egs/aishell4/ASR/local/compute_fbank_aishell4.py index 09f885636..3f50d9e3e 100755 --- a/egs/aishell4/ASR/local/compute_fbank_aishell4.py +++ b/egs/aishell4/ASR/local/compute_fbank_aishell4.py @@ -63,6 +63,13 @@ def compute_fbank_aishell4(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py index eb3acbbea..af926aa53 100755 --- a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py +++ b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py @@ -63,6 +63,13 @@ def compute_fbank_alimeeting(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/gigaspeech/ASR/local/preprocess_gigaspeech.py b/egs/gigaspeech/ASR/local/preprocess_gigaspeech.py index 0cec82ad5..48d10a157 100755 --- a/egs/gigaspeech/ASR/local/preprocess_gigaspeech.py +++ b/egs/gigaspeech/ASR/local/preprocess_gigaspeech.py @@ -62,6 +62,13 @@ def preprocess_giga_speech(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + for partition, m in manifests.items(): logging.info(f"Processing {partition}") raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz" diff --git a/egs/librispeech/ASR/local/compute_fbank_librispeech.py b/egs/librispeech/ASR/local/compute_fbank_librispeech.py index 642d9fd32..f3e15e039 100755 --- a/egs/librispeech/ASR/local/compute_fbank_librispeech.py +++ b/egs/librispeech/ASR/local/compute_fbank_librispeech.py @@ -66,6 +66,13 @@ def compute_fbank_librispeech(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/librispeech/ASR/local/compute_fbank_musan.py b/egs/librispeech/ASR/local/compute_fbank_musan.py index fef372129..056da29e5 100755 --- a/egs/librispeech/ASR/local/compute_fbank_musan.py +++ b/egs/librispeech/ASR/local/compute_fbank_musan.py @@ -65,6 +65,8 @@ def compute_fbank_musan(): assert len(manifests) == len(dataset_parts), ( len(manifests), len(dataset_parts), + list(manifests.keys()), + dataset_parts, ) musan_cuts_path = output_dir / "musan_cuts.jsonl.gz" diff --git a/egs/librispeech/ASR/local/preprocess_gigaspeech.py b/egs/librispeech/ASR/local/preprocess_gigaspeech.py index 0f4ae820b..077f23039 100644 --- a/egs/librispeech/ASR/local/preprocess_gigaspeech.py +++ b/egs/librispeech/ASR/local/preprocess_gigaspeech.py @@ -68,6 +68,13 @@ def preprocess_giga_speech(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + for partition, m in manifests.items(): logging.info(f"Processing {partition}") raw_cuts_path = output_dir / f"{prefix}_cuts_{partition}_raw.{suffix}" diff --git a/egs/spgispeech/ASR/local/compute_fbank_musan.py b/egs/spgispeech/ASR/local/compute_fbank_musan.py index 70372af2b..6cb8b65ae 100755 --- a/egs/spgispeech/ASR/local/compute_fbank_musan.py +++ b/egs/spgispeech/ASR/local/compute_fbank_musan.py @@ -69,6 +69,13 @@ def compute_fbank_musan(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + musan_cuts_path = src_dir / "cuts_musan.jsonl.gz" if musan_cuts_path.is_file(): diff --git a/egs/tal_csasr/ASR/local/compute_fbank_tal_csasr.py b/egs/tal_csasr/ASR/local/compute_fbank_tal_csasr.py index 367e098f7..4582609ac 100755 --- a/egs/tal_csasr/ASR/local/compute_fbank_tal_csasr.py +++ b/egs/tal_csasr/ASR/local/compute_fbank_tal_csasr.py @@ -62,6 +62,13 @@ def compute_fbank_tal_csasr(num_mel_bins: int = 80): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/tedlium3/ASR/local/compute_fbank_tedlium.py b/egs/tedlium3/ASR/local/compute_fbank_tedlium.py index e324b5025..327962a79 100755 --- a/egs/tedlium3/ASR/local/compute_fbank_tedlium.py +++ b/egs/tedlium3/ASR/local/compute_fbank_tedlium.py @@ -62,6 +62,13 @@ def compute_fbank_tedlium(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/timit/ASR/local/compute_fbank_timit.py b/egs/timit/ASR/local/compute_fbank_timit.py index 094769c8c..f25786a0c 100644 --- a/egs/timit/ASR/local/compute_fbank_timit.py +++ b/egs/timit/ASR/local/compute_fbank_timit.py @@ -63,6 +63,13 @@ def compute_fbank_timit(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) with get_executor() as ex: # Initialize the executor only once. diff --git a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py index f4c71230b..817969c47 100755 --- a/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py +++ b/egs/wenetspeech/ASR/local/preprocess_wenetspeech.py @@ -72,6 +72,13 @@ def preprocess_wenet_speech(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + for partition, m in manifests.items(): logging.info(f"Processing {partition}") raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz" diff --git a/egs/yesno/ASR/local/compute_fbank_yesno.py b/egs/yesno/ASR/local/compute_fbank_yesno.py index fb48b6f8e..9a4e8a36f 100755 --- a/egs/yesno/ASR/local/compute_fbank_yesno.py +++ b/egs/yesno/ASR/local/compute_fbank_yesno.py @@ -47,6 +47,13 @@ def compute_fbank_yesno(): ) assert manifests is not None + assert len(manifests) == len(dataset_parts), ( + len(manifests), + len(dataset_parts), + list(manifests.keys()), + dataset_parts, + ) + extractor = Fbank( FbankConfig(sampling_rate=8000, num_mel_bins=num_mel_bins) ) From 9e24642faf985e50425b78caef5517c87c0c7efc Mon Sep 17 00:00:00 2001 From: shcxlee <113081290+shcxlee@users.noreply.github.com> Date: Fri, 9 Sep 2022 21:32:49 -0500 Subject: [PATCH 07/15] Modified prepare_transcripts.py and preprare_lexicon.py of tedlium3 recipe (#567) --- egs/tedlium3/ASR/local/prepare_lexicon.py | 23 ++++++++----------- egs/tedlium3/ASR/local/prepare_transcripts.py | 15 +++++------- 2 files changed, 16 insertions(+), 22 deletions(-) diff --git a/egs/tedlium3/ASR/local/prepare_lexicon.py b/egs/tedlium3/ASR/local/prepare_lexicon.py index 59377b5aa..35dd332e8 100755 --- a/egs/tedlium3/ASR/local/prepare_lexicon.py +++ b/egs/tedlium3/ASR/local/prepare_lexicon.py @@ -23,8 +23,8 @@ consisting of supervisions_train.json and does the following: 1. Generate lexicon_words.txt. """ +import lhotse import argparse -import json import logging from pathlib import Path @@ -60,20 +60,17 @@ def prepare_lexicon(manifests_dir: str, lang_dir: str): """ words = set() - supervisions_train = Path(manifests_dir) / "supervisions_train.json" lexicon = Path(lang_dir) / "lexicon_words.txt" + sups = lhotse.load_manifest( + f"{manifests_dir}/tedlium_supervisions_train.jsonl.gz" + ) + for s in sups: + # list the words units and filter the empty item + words_list = list(filter(None, s.text.split())) - logging.info(f"Loading {supervisions_train}!") - with open(supervisions_train, "r") as load_f: - load_dicts = json.load(load_f) - for load_dict in load_dicts: - text = load_dict["text"] - # list the words units and filter the empty item - words_list = list(filter(None, text.split())) - - for word in words_list: - if word not in words and word != "": - words.add(word) + for word in words_list: + if word not in words and word != "": + words.add(word) with open(lexicon, "w") as f: for word in sorted(words): diff --git a/egs/tedlium3/ASR/local/prepare_transcripts.py b/egs/tedlium3/ASR/local/prepare_transcripts.py index 416264ea0..1039ac5bb 100755 --- a/egs/tedlium3/ASR/local/prepare_transcripts.py +++ b/egs/tedlium3/ASR/local/prepare_transcripts.py @@ -23,8 +23,8 @@ consisting of supervisions_train.json and does the following: 1. Generate train.text. """ +import lhotse import argparse -import json import logging from pathlib import Path @@ -60,15 +60,12 @@ def prepare_transcripts(manifests_dir: str, lang_dir: str): """ texts = [] - supervisions_train = Path(manifests_dir) / "supervisions_train.json" train_text = Path(lang_dir) / "train.text" - - logging.info(f"Loading {supervisions_train}!") - with open(supervisions_train, "r") as load_f: - load_dicts = json.load(load_f) - for load_dict in load_dicts: - text = load_dict["text"] - texts.append(text) + sups = lhotse.load_manifest( + f"{manifests_dir}/tedlium_supervisions_train.jsonl.gz" + ) + for s in sups: + texts.append(s.text) with open(train_text, "w") as f: for text in texts: From 145c44f71095f174a6d994a479074f105a4fb6ad Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 13 Sep 2022 10:59:27 +0800 Subject: [PATCH 08/15] Use modified ctc topo when vocab size is > 500 (#568) --- egs/librispeech/ASR/conformer_ctc/pretrained.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/conformer_ctc/pretrained.py b/egs/librispeech/ASR/conformer_ctc/pretrained.py index 28724e1eb..a2c0a5486 100755 --- a/egs/librispeech/ASR/conformer_ctc/pretrained.py +++ b/egs/librispeech/ASR/conformer_ctc/pretrained.py @@ -322,7 +322,7 @@ def main(): H = k2.ctc_topo( max_token=max_token_id, - modified=False, + modified=params.num_classes > 500, device=device, ) From 97b3fc53aa2c8606e923631fc2f3967855259d35 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Fri, 16 Sep 2022 18:40:25 +0800 Subject: [PATCH 09/15] Add LSTM for the multi-dataset setup. (#558) * Add LSTM for the multi-dataset setup. * Add results * fix style issues * add missing file --- .flake8 | 8 + egs/librispeech/ASR/README.md | 1 + egs/librispeech/ASR/RESULTS.md | 70 + .../ASR/lstm_transducer_stateless/lstm.py | 2 +- .../lstm_transducer_stateless2/__init__.py | 0 .../asr_datamodule.py | 1 + .../lstm_transducer_stateless2/beam_search.py | 1 + .../ASR/lstm_transducer_stateless2/decode.py | 827 +++++++++++ .../ASR/lstm_transducer_stateless2/decoder.py | 1 + .../encoder_interface.py | 1 + .../ASR/lstm_transducer_stateless2/export.py | 400 ++++++ .../lstm_transducer_stateless2/gigaspeech.py | 1 + .../jit_pretrained.py | 323 +++++ .../ASR/lstm_transducer_stateless2/joiner.py | 1 + .../lstm_transducer_stateless2/librispeech.py | 1 + .../ASR/lstm_transducer_stateless2/lstm.py | 1 + .../ASR/lstm_transducer_stateless2/model.py | 241 ++++ .../ASR/lstm_transducer_stateless2/optim.py | 1 + .../lstm_transducer_stateless2/pretrained.py | 355 +++++ .../ASR/lstm_transducer_stateless2/scaling.py | 1 + .../scaling_converter.py | 1 + .../ASR/lstm_transducer_stateless2/train.py | 1273 +++++++++++++++++ .../scaling_converter.py | 4 +- .../ASR/pruned_transducer_stateless3/train.py | 2 +- 24 files changed, 3513 insertions(+), 4 deletions(-) create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless2/__init__.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/asr_datamodule.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/beam_search.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/decode.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/decoder.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/encoder_interface.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/export.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/gigaspeech.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/jit_pretrained.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/joiner.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/librispeech.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/lstm.py create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless2/model.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/optim.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/pretrained.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/scaling.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless2/scaling_converter.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/train.py diff --git a/.flake8 b/.flake8 index 67c6c164d..22cd63b3d 100644 --- a/.flake8 +++ b/.flake8 @@ -22,3 +22,11 @@ exclude = **/data/**, icefall/shared/make_kn_lm.py, icefall/__init__.py + +ignore = + # E203 white space before ":" + E203, + # W503 line break before binary operator + W503, + # E226 missing whitespace around arithmetic operator + E226, diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index f590bc837..570d1ba1f 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -26,6 +26,7 @@ The following table lists the differences among them. | `conv_emformer_transducer_stateless` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer for streaming ASR + mechanisms in reworked model | | `conv_emformer_transducer_stateless2` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer with simplified memory for streaming ASR + mechanisms in reworked model | | `lstm_transducer_stateless` | LSTM | Embedding + Conv1d | Using LSTM with mechanisms in reworked model | +| `lstm_transducer_stateless2` | LSTM | Embedding + Conv1d | Using LSTM with mechanisms in reworked model + gigaspeech (multi-dataset setup) | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 1c6a350f0..8a27b4b63 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -1,5 +1,75 @@ ## Results +#### LibriSpeech BPE training results (Pruned Stateless LSTM RNN-T + multi-dataset) + +[lstm_transducer_stateless2](./lstm_transducer_stateless2) + +See for more details. + + +The WERs are: + +| | test-clean | test-other | comment | +|-------------------------------------|------------|------------|-------------------------| +| greedy search (max sym per frame 1) | 2.78 | 7.36 | --iter 468000 --avg 16 | +| modified_beam_search | 2.73 | 7.15 | --iter 468000 --avg 16 | +| fast_beam_search | 2.76 | 7.31 | --iter 468000 --avg 16 | +| greedy search (max sym per frame 1) | 2.77 | 7.35 | --iter 472000 --avg 18 | +| modified_beam_search | 2.75 | 7.08 | --iter 472000 --avg 18 | +| fast_beam_search | 2.77 | 7.29 | --iter 472000 --avg 18 | + +The training command is: + +```bash +#!/usr/bin/env bash + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./lstm_transducer_stateless2/train.py \ + --world-size 8 \ + --num-epochs 35 \ + --start-epoch 1 \ + --full-libri 1 \ + --exp-dir lstm_transducer_stateless2/exp \ + --max-duration 500 \ + --use-fp16 0 \ + --lr-epochs 10 \ + --num-workers 2 \ + --giga-prob 0.9 +``` +**Note**: It was killed manually after getting `epoch-18.pt`. Also, we resumed +training after getting `epoch-9.pt`. + +The tensorboard log can be found at + + +The decoding command is +```bash +for m in greedy_search fast_beam_search modified_beam_search; do + for iter in 472000; do + for avg in 8 10 12 14 16 18; do + ./lstm_transducer_stateless2/decode.py \ + --iter $iter \ + --avg $avg \ + --exp-dir lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method $m \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 \ + --beam-size 4 + done + done +done +``` + +Pretrained models, training logs, decoding logs, and decoding results +are available at + + #### LibriSpeech BPE training results (Pruned Stateless LSTM RNN-T) [lstm_transducer_stateless](./lstm_transducer_stateless) diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py b/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py index 6ce966b13..0d268ab07 100644 --- a/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py +++ b/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py @@ -773,7 +773,7 @@ class RandomCombine(nn.Module): """ logprobs = ( torch.randn(num_frames, self.num_inputs, dtype=dtype, device=device) - * self.stddev + * self.stddev # noqa ) logprobs[:, -1] += self.final_log_weight return logprobs.softmax(dim=1) diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/__init__.py b/egs/librispeech/ASR/lstm_transducer_stateless2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/asr_datamodule.py b/egs/librispeech/ASR/lstm_transducer_stateless2/asr_datamodule.py new file mode 120000 index 000000000..3ba9ada4f --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/asr_datamodule.py @@ -0,0 +1 @@ +../pruned_transducer_stateless3/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/beam_search.py b/egs/librispeech/ASR/lstm_transducer_stateless2/beam_search.py new file mode 120000 index 000000000..8554e44cc --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/decode.py b/egs/librispeech/ASR/lstm_transducer_stateless2/decode.py new file mode 100755 index 000000000..21ae563cb --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/decode.py @@ -0,0 +1,827 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) beam search (not recommended) +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +(4) fast beam search (one best) +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 + +(5) fast beam search (nbest) +./lstm_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 15 \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(6) fast beam search (nbest oracle WER) +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(7) fast beam search (with LG) +./lstm_transducer_stateless2/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import AsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from librispeech import LibriSpeech +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="lstm_transducer_stateless2/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_bpe_500", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to specify + `--lang-dir`, which should contain `LG.pt`. + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle + """, + ) + + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.01, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + # tail padding here to alleviate the tail deletion problem + num_tail_padded_frames = 35 + feature = torch.nn.functional.pad( + feature, + (0, 0, 0, num_tail_padded_frames), + mode="constant", + value=LOG_EPS, + ) + feature_lens += num_tail_padded_frames + + encoder_out, encoder_out_lens, _ = model.encoder( + x=feature, x_lens=feature_lens + ) + + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in hyp_tokens: + hyps.append([word_table[i] for i in hyp]) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=sp.encode(supervisions["text"]), + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: hyps} + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + word_table=word_table, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_words = ref_text.split() + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + if "nbest" in params.decoding_method: + params.suffix += f"-nbest-scale-{params.nbest_scale}" + params.suffix += f"-num-paths-{params.num_paths}" + if "LG" in params.decoding_method: + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params, enable_giga=False) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), + strict=False, + ) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ), + strict=False, + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ), + strict=False, + ) + + model.to(device) + model.eval() + + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lexicon = Lexicon(params.lang_dir) + word_table = lexicon.word_table + lg_filename = params.lang_dir / "LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + word_table = None + decoding_graph = k2.trivial_graph( + params.vocab_size - 1, device=device + ) + else: + decoding_graph = None + word_table = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + asr_datamodule = AsrDataModule(args) + librispeech = LibriSpeech(manifest_dir=args.manifest_dir) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = asr_datamodule.test_dataloaders(test_clean_cuts) + test_other_dl = asr_datamodule.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/decoder.py b/egs/librispeech/ASR/lstm_transducer_stateless2/decoder.py new file mode 120000 index 000000000..0793c5709 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/decoder.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/decoder.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/encoder_interface.py b/egs/librispeech/ASR/lstm_transducer_stateless2/encoder_interface.py new file mode 120000 index 000000000..aa5d0217a --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/export.py b/egs/librispeech/ASR/lstm_transducer_stateless2/export.py new file mode 100755 index 000000000..a1ed6b3b1 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/export.py @@ -0,0 +1,400 @@ +#!/usr/bin/env python3 +# flake8: noqa +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" + +Usage: + +(1) Export to torchscript model using torch.jit.trace() + +./lstm_transducer_stateless2/export.py \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 35 \ + --avg 10 \ + --jit-trace 1 + +It will generate 3 files: `encoder_jit_trace.pt`, +`decoder_jit_trace.pt`, and `joiner_jit_trace.pt`. + +(2) Export `model.state_dict()` + +./lstm_transducer_stateless2/export.py \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 35 \ + --avg 10 + +It will generate a file `pretrained.pt` in the given `exp_dir`. You can later +load it by `icefall.checkpoint.load_checkpoint()`. + +To use the generated file with `lstm_transducer_stateless2/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./lstm_transducer_stateless2/decode.py \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 600 \ + --decoding-method greedy_search \ + --bpe-model data/lang_bpe_500/bpe.model + +Check ./pretrained.py for its usage. + +Note: If you don't want to train a model from scratch, we have +provided one for you. You can get it at + +https://huggingface.co/csukuangfj/icefall-asr-librispeech-lstm-transducer-stateless2-2022-09-03 + +with the following commands: + + sudo apt-get install git-lfs + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-lstm-transducer-stateless2-2022-09-03 + # You will find the pre-trained models in icefall-asr-librispeech-lstm-transducer-stateless2-2022-09-03/exp +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +import torch.nn as nn +from scaling_converter import convert_scaled_to_non_scaled +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for averaging. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless3/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit-trace", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.trace. + It will generate 3 files: + - encoder_jit_trace.pt + - decoder_jit_trace.pt + - joiner_jit_trace.pt + + Check ./jit_pretrained.py for how to use them. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + add_model_arguments(parser) + + return parser + + +def export_encoder_model_jit_trace( + encoder_model: nn.Module, + encoder_filename: str, +) -> None: + """Export the given encoder model with torch.jit.trace() + + Note: The warmup argument is fixed to 1. + + Args: + encoder_model: + The input encoder model + encoder_filename: + The filename to save the exported model. + """ + x = torch.zeros(1, 100, 80, dtype=torch.float32) + x_lens = torch.tensor([100], dtype=torch.int64) + states = encoder_model.get_init_states() + + traced_model = torch.jit.trace(encoder_model, (x, x_lens, states)) + traced_model.save(encoder_filename) + logging.info(f"Saved to {encoder_filename}") + + +def export_decoder_model_jit_trace( + decoder_model: nn.Module, + decoder_filename: str, +) -> None: + """Export the given decoder model with torch.jit.trace() + + Note: The argument need_pad is fixed to False. + + Args: + decoder_model: + The input decoder model + decoder_filename: + The filename to save the exported model. + """ + y = torch.zeros(10, decoder_model.context_size, dtype=torch.int64) + need_pad = torch.tensor([False]) + + traced_model = torch.jit.trace(decoder_model, (y, need_pad)) + traced_model.save(decoder_filename) + logging.info(f"Saved to {decoder_filename}") + + +def export_joiner_model_jit_trace( + joiner_model: nn.Module, + joiner_filename: str, +) -> None: + """Export the given joiner model with torch.jit.trace() + + Note: The argument project_input is fixed to True. A user should not + project the encoder_out/decoder_out by himself/herself. The exported joiner + will do that for the user. + + Args: + joiner_model: + The input joiner model + joiner_filename: + The filename to save the exported model. + + """ + encoder_out_dim = joiner_model.encoder_proj.weight.shape[1] + decoder_out_dim = joiner_model.decoder_proj.weight.shape[1] + encoder_out = torch.rand(1, encoder_out_dim, dtype=torch.float32) + decoder_out = torch.rand(1, decoder_out_dim, dtype=torch.float32) + + traced_model = torch.jit.trace(joiner_model, (encoder_out, decoder_out)) + traced_model.save(joiner_filename) + logging.info(f"Saved to {joiner_filename}") + + +@torch.no_grad() +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params, enable_giga=False) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), + strict=False, + ) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), + strict=False, + ) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ), + strict=False, + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ), + strict=False, + ) + + model.to("cpu") + model.eval() + + if params.jit_trace is True: + convert_scaled_to_non_scaled(model, inplace=True) + logging.info("Using torch.jit.trace()") + encoder_filename = params.exp_dir / "encoder_jit_trace.pt" + export_encoder_model_jit_trace(model.encoder, encoder_filename) + + decoder_filename = params.exp_dir / "decoder_jit_trace.pt" + export_decoder_model_jit_trace(model.decoder, decoder_filename) + + joiner_filename = params.exp_dir / "joiner_jit_trace.pt" + export_joiner_model_jit_trace(model.joiner, joiner_filename) + else: + logging.info("Not using torchscript") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/gigaspeech.py b/egs/librispeech/ASR/lstm_transducer_stateless2/gigaspeech.py new file mode 120000 index 000000000..5242c652a --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/gigaspeech.py @@ -0,0 +1 @@ +../pruned_transducer_stateless3/gigaspeech.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/jit_pretrained.py b/egs/librispeech/ASR/lstm_transducer_stateless2/jit_pretrained.py new file mode 100755 index 000000000..da184b76f --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/jit_pretrained.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# flake8: noqa +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang, Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script loads torchscript models, either exported by `torch.jit.trace()` +or by `torch.jit.script()`, and uses them to decode waves. +You can use the following command to get the exported models: + +./lstm_transducer_stateless2/export.py \ + --exp-dir ./lstm_transducer_stateless2/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit-trace 1 + +Usage of this script: + +./lstm_transducer_stateless2/jit_pretrained.py \ + --encoder-model-filename ./lstm_transducer_stateless2/exp/encoder_jit_trace.pt \ + --decoder-model-filename ./lstm_transducer_stateless2/exp/decoder_jit_trace.pt \ + --joiner-model-filename ./lstm_transducer_stateless2/exp/joiner_jit_trace.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + /path/to/foo.wav \ + /path/to/bar.wav +""" + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import sentencepiece as spm +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--encoder-model-filename", + type=str, + required=True, + help="Path to the encoder torchscript model. ", + ) + + parser.add_argument( + "--decoder-model-filename", + type=str, + required=True, + help="Path to the decoder torchscript model. ", + ) + + parser.add_argument( + "--joiner-model-filename", + type=str, + required=True, + help="Path to the joiner torchscript model. ", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="Context size of the decoder model", + ) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def greedy_search( + decoder: torch.jit.ScriptModule, + joiner: torch.jit.ScriptModule, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + context_size: int, +) -> List[List[int]]: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + decoder: + The decoder model. + joiner: + The joiner model. + encoder_out: + A 3-D tensor of shape (N, T, C) + encoder_out_lens: + A 1-D tensor of shape (N,). + context_size: + The context size of the decoder model. + Returns: + Return the decoded results for each utterance. + """ + assert encoder_out.ndim == 3 + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + device = encoder_out.device + blank_id = 0 # hard-code to 0 + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + hyps = [[blank_id] * context_size for _ in range(N)] + + decoder_input = torch.tensor( + hyps, + device=device, + dtype=torch.int64, + ) # (N, context_size) + + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ).squeeze(1) + + offset = 0 + for batch_size in batch_size_list: + start = offset + end = offset + batch_size + current_encoder_out = packed_encoder_out.data[start:end] + current_encoder_out = current_encoder_out + # current_encoder_out's shape: (batch_size, encoder_out_dim) + offset = end + + decoder_out = decoder_out[:batch_size] + + logits = joiner( + current_encoder_out, + decoder_out, + ) + # logits'shape (batch_size, vocab_size) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + hyps[i].append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = [h[-context_size:] for h in hyps[:batch_size]] + decoder_input = torch.tensor( + decoder_input, + device=device, + dtype=torch.int64, + ) + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ) + decoder_out = decoder_out.squeeze(1) + + sorted_ans = [h[context_size:] for h in hyps] + ans = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + logging.info(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + encoder = torch.jit.load(args.encoder_model_filename) + decoder = torch.jit.load(args.decoder_model_filename) + joiner = torch.jit.load(args.joiner_model_filename) + + encoder.eval() + decoder.eval() + joiner.eval() + + encoder.to(device) + decoder.to(device) + joiner.to(device) + + sp = spm.SentencePieceProcessor() + sp.load(args.bpe_model) + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = args.sample_rate + opts.mel_opts.num_bins = 80 + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {args.sound_files}") + waves = read_sound_files( + filenames=args.sound_files, + expected_sample_rate=args.sample_rate, + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, + batch_first=True, + padding_value=math.log(1e-10), + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + states = encoder.get_init_states(batch_size=features.size(0), device=device) + + encoder_out, encoder_out_lens, _ = encoder( + x=features, + x_lens=feature_lengths, + states=states, + ) + + hyps = greedy_search( + decoder=decoder, + joiner=joiner, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + context_size=args.context_size, + ) + s = "\n" + for filename, hyp in zip(args.sound_files, hyps): + words = sp.decode(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/joiner.py b/egs/librispeech/ASR/lstm_transducer_stateless2/joiner.py new file mode 120000 index 000000000..815fd4bb6 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/joiner.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/joiner.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/librispeech.py b/egs/librispeech/ASR/lstm_transducer_stateless2/librispeech.py new file mode 120000 index 000000000..b76723bf5 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/librispeech.py @@ -0,0 +1 @@ +../pruned_transducer_stateless3/librispeech.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/lstm.py b/egs/librispeech/ASR/lstm_transducer_stateless2/lstm.py new file mode 120000 index 000000000..a94e35f63 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/lstm.py @@ -0,0 +1 @@ +../lstm_transducer_stateless/lstm.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/model.py b/egs/librispeech/ASR/lstm_transducer_stateless2/model.py new file mode 100644 index 000000000..b0fb6ab89 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/model.py @@ -0,0 +1,241 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional, Tuple + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface +from scaling import ScaledLinear + +from icefall.utils import add_sos + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + encoder_dim: int, + decoder_dim: int, + joiner_dim: int, + vocab_size: int, + decoder_giga: Optional[nn.Module] = None, + joiner_giga: Optional[nn.Module] = None, + ): + """ + Args: + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, encoder_dm) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, decoder_dim). + It should contain one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, encoder_dim) and + (N, U, decoder_dim). + Its output shape is (N, T, U, vocab_size). Note that its output + contains unnormalized probs, i.e., not processed by log-softmax. + encoder_dim: + Output dimension of the encoder network. + decoder_dim: + Output dimension of the decoder network. + joiner_dim: + Input dimension of the joiner network. + vocab_size: + Output dimension of the joiner network. + decoder_giga: + Optional. The decoder network for the GigaSpeech dataset. + joiner_giga: + Optional. The joiner network for the GigaSpeech dataset. + """ + super().__init__() + + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + self.encoder = encoder + self.decoder = decoder + self.joiner = joiner + + self.decoder_giga = decoder_giga + self.joiner_giga = joiner_giga + + self.simple_am_proj = ScaledLinear( + encoder_dim, vocab_size, initial_speed=0.5 + ) + self.simple_lm_proj = ScaledLinear(decoder_dim, vocab_size) + + if decoder_giga is not None: + self.simple_am_proj_giga = ScaledLinear( + encoder_dim, vocab_size, initial_speed=0.5 + ) + self.simple_lm_proj_giga = ScaledLinear(decoder_dim, vocab_size) + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + libri: bool = True, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + warmup: float = 1.0, + reduction: str = "sum", + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + libri: + True to use the decoder and joiner for the LibriSpeech dataset. + False to use the decoder and joiner for the GigaSpeech dataset. + prune_range: + The prune range for rnnt loss, it means how many symbols(context) + we are considering for each frame to compute the loss. + am_scale: + The scale to smooth the loss with am (output of encoder network) + part + lm_scale: + The scale to smooth the loss with lm (output of predictor network) + part + warmup: + A value warmup >= 0 that determines which modules are active, values + warmup > 1 "are fully warmed up" and all modules will be active. + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. + Returns: + Return the transducer loss. + + Note: + Regarding am_scale & lm_scale, it will make the loss-function one of + the form: + lm_scale * lm_probs + am_scale * am_probs + + (1-lm_scale-am_scale) * combined_probs + """ + assert reduction in ("sum", "none"), reduction + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + encoder_out, x_lens, _ = self.encoder(x, x_lens, warmup=warmup) + assert torch.all(x_lens > 0) + + if libri: + decoder = self.decoder + simple_lm_proj = self.simple_lm_proj + simple_am_proj = self.simple_am_proj + joiner = self.joiner + else: + decoder = self.decoder_giga + simple_lm_proj = self.simple_lm_proj_giga + simple_am_proj = self.simple_am_proj_giga + joiner = self.joiner_giga + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + # sos_y_padded: [B, S + 1], start with SOS. + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + + # decoder_out: [B, S + 1, decoder_dim] + decoder_out = decoder(sos_y_padded) + + # Note: y does not start with SOS + # y_padded : [B, S] + y_padded = y.pad(mode="constant", padding_value=0) + + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros( + (x.size(0), 4), dtype=torch.int64, device=x.device + ) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + + lm = simple_lm_proj(decoder_out) + am = simple_am_proj(encoder_out) + + with torch.cuda.amp.autocast(enabled=False): + simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( + lm=lm.float(), + am=am.float(), + symbols=y_padded, + termination_symbol=blank_id, + lm_only_scale=lm_scale, + am_only_scale=am_scale, + boundary=boundary, + reduction=reduction, + return_grad=True, + ) + + # ranges : [B, T, prune_range] + ranges = k2.get_rnnt_prune_ranges( + px_grad=px_grad, + py_grad=py_grad, + boundary=boundary, + s_range=prune_range, + ) + + # am_pruned : [B, T, prune_range, encoder_dim] + # lm_pruned : [B, T, prune_range, decoder_dim] + am_pruned, lm_pruned = k2.do_rnnt_pruning( + am=joiner.encoder_proj(encoder_out), + lm=joiner.decoder_proj(decoder_out), + ranges=ranges, + ) + + # logits : [B, T, prune_range, vocab_size] + + # project_input=False since we applied the decoder's input projections + # prior to do_rnnt_pruning (this is an optimization for speed). + logits = joiner(am_pruned, lm_pruned, project_input=False) + + with torch.cuda.amp.autocast(enabled=False): + pruned_loss = k2.rnnt_loss_pruned( + logits=logits.float(), + symbols=y_padded, + ranges=ranges, + termination_symbol=blank_id, + boundary=boundary, + reduction=reduction, + ) + + return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/optim.py b/egs/librispeech/ASR/lstm_transducer_stateless2/optim.py new file mode 120000 index 000000000..e2deb4492 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/optim.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/optim.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/pretrained.py b/egs/librispeech/ASR/lstm_transducer_stateless2/pretrained.py new file mode 100755 index 000000000..bef0ad760 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/pretrained.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +(1) greedy search +./lstm_transducer_stateless2/pretrained.py \ + --checkpoint ./lstm_transducer_stateless2/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav + +(2) beam search +./lstm_transducer_stateless2/pretrained.py \ + --checkpoint ./lstm_transducer_stateless2/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(3) modified beam search +./lstm_transducer_stateless2/pretrained.py \ + --checkpoint ./lstm_transducer_stateless2/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(4) fast beam search +./lstm_transducer_stateless2/pretrained.py \ + --checkpoint ./lstm_transducer_stateless2/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method fast_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +You can also use `./lstm_transducer_stateless2/exp/epoch-xx.pt`. + +Note: ./lstm_transducer_stateless2/exp/pretrained.pt is generated by +./lstm_transducer_stateless2/export.py + +You can find pretrained models by visiting +https://huggingface.co/csukuangfj/icefall-asr-librispeech-lstm-transducer-stateless2-2022-09-03/tree/main/exp +""" + + +import argparse +import logging +import math +from typing import List + +import k2 +import kaldifeat +import sentencepiece as spm +import torch +import torchaudio +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + + add_model_arguments(parser) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params, enable_giga=False) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + encoder_out, encoder_out_lens, _ = model.encoder( + x=features, x_lens=feature_lengths + ) + + num_waves = encoder_out.size(0) + hyps = [] + msg = f"Using {params.method}" + if params.method == "beam_search": + msg += f" with beam size {params.beam_size}" + logging.info(msg) + + if params.method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + for i in range(num_waves): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError(f"Unsupported method: {params.method}") + + hyps.append(sp.decode(hyp).split()) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/scaling.py b/egs/librispeech/ASR/lstm_transducer_stateless2/scaling.py new file mode 120000 index 000000000..09d802cc4 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/scaling.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/scaling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/scaling_converter.py b/egs/librispeech/ASR/lstm_transducer_stateless2/scaling_converter.py new file mode 120000 index 000000000..3b667058d --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/scaling_converter.py @@ -0,0 +1 @@ +../pruned_transducer_stateless3/scaling_converter.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless2/train.py b/egs/librispeech/ASR/lstm_transducer_stateless2/train.py new file mode 100755 index 000000000..b3e50c52b --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless2/train.py @@ -0,0 +1,1273 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +cd egs/librispeech/ASR/ +./prepare.sh +./prepare_giga_speech.sh + +./lstm_transducer_stateless2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir lstm_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 300 + +# For mix precision training: + +./lstm_transducer_stateless2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir lstm_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 550 +""" + +import argparse +import copy +import logging +import random +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AsrDataModule +from decoder import Decoder +from gigaspeech import GigaSpeech +from joiner import Joiner +from lhotse import CutSet, load_manifest +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from librispeech import LibriSpeech +from lstm import RNN +from model import Transducer +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=int, + default=12, + help="Number of RNN encoder layers..", + ) + + parser.add_argument( + "--encoder-dim", + type=int, + default=512, + help="Encoder output dimesion.", + ) + + parser.add_argument( + "--rnn-hidden-size", + type=int, + default=1024, + help="Hidden dim for LSTM layers.", + ) + + parser.add_argument( + "--aux-layer-period", + type=int, + default=0, + help="""Peroid of auxiliary layers used for randomly combined during training. + If set to 0, will not use the random combiner (Default). + You can set a positive integer to use the random combiner, e.g., 3. + """, + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--full-libri", + type=str2bool, + default=True, + help="When enabled, use 960h LibriSpeech. " + "Otherwise, use 100h subset.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=35, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="lstm_transducer_stateless2/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="""The initial learning rate. This value should not need to be + changed.""", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate decreases. + We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=10, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=2000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=20, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + parser.add_argument( + "--giga-prob", + type=float, + default=0.5, + help="The probability to select a batch from the GigaSpeech dataset", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "dim_feedforward": 2048, + # parameters for decoder + "decoder_dim": 512, + # parameters for joiner + "joiner_dim": 512, + # parameters for Noam + "model_warm_step": 3000, # arg given to model, not for lrate + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = RNN( + num_features=params.feature_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.encoder_dim, + rnn_hidden_size=params.rnn_hidden_size, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + aux_layer_period=params.aux_layer_period, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model( + params: AttributeDict, + enable_giga: bool = True, +) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + if enable_giga: + logging.info("Use giga") + decoder_giga = get_decoder_model(params) + joiner_giga = get_joiner_model(params) + else: + logging.info("Disable giga") + decoder_giga = None + joiner_giga = None + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + decoder_giga=decoder_giga, + joiner_giga=joiner_giga, + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def is_libri(c: Cut) -> bool: + """Return True if this cut is from the LibriSpeech dataset. + + Note: + During data preparation, we set the custom field in + the supervision segment of GigaSpeech to dict(origin='giga') + See ../local/preprocess_gigaspeech.py. + """ + return c.supervisions[0].custom is None + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + libri = is_libri(supervisions["cut"][0]) + + texts = batch["supervisions"]["text"] + y = sp.encode(texts, out_type=int) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + libri=libri, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + warmup=warmup, + reduction="none", + ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + pruned_loss_scale = ( + 0.0 + if warmup < 1.0 + else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0) + ) + loss = ( + params.simple_loss_scale * simple_loss + + pruned_loss_scale * pruned_loss + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + giga_train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + rng: random.Random, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + giga_train_dl: + Dataloader for the GigaSpeech training dataset. + valid_dl: + Dataloader for the validation dataset. + rng: + For selecting which dataset to use. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + libri_tot_loss = MetricsTracker() + giga_tot_loss = MetricsTracker() + tot_loss = MetricsTracker() + + # index 0: for LibriSpeech + # index 1: for GigaSpeech + # This sets the probabilities for choosing which datasets + dl_weights = [1 - params.giga_prob, params.giga_prob] + + iter_libri = iter(train_dl) + iter_giga = iter(giga_train_dl) + + batch_idx = 0 + + while True: + idx = rng.choices((0, 1), weights=dl_weights, k=1)[0] + dl = iter_libri if idx == 0 else iter_giga + + try: + batch = next(dl) + except StopIteration: + name = "libri" if idx == 0 else "giga" + logging.info(f"{name} reaches end of dataloader") + break + + batch_idx += 1 + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + libri = is_libri(batch["supervisions"]["cut"][0]) + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + if libri: + libri_tot_loss = ( + libri_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "libri" # for logging only + else: + giga_tot_loss = ( + giga_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "giga" + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 30: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, {prefix}_loss[{loss_info}], " + f"tot_loss[{tot_loss}], " + f"libri_tot_loss[{libri_tot_loss}], " + f"giga_tot_loss[{giga_tot_loss}], " + f"batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, + f"train/current_{prefix}_", + params.batch_idx_train, + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + libri_tot_loss.write_summary( + tb_writer, "train/libri_tot_", params.batch_idx_train + ) + giga_tot_loss.write_summary( + tb_writer, "train/giga_tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def filter_short_and_long_utterances(cuts: CutSet) -> CutSet: + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 20.0 + + cuts = cuts.filter(remove_short_and_long_utt) + + return cuts + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 800 + + fix_random_seed(params.seed) + rng = random.Random(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + # # overwrite it + # scheduler.base_lrs = [params.initial_lr for _ in scheduler.base_lrs] + # print(scheduler.base_lrs) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + librispeech = LibriSpeech(manifest_dir=args.manifest_dir) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + train_cuts = filter_short_and_long_utterances(train_cuts) + + gigaspeech = GigaSpeech(manifest_dir=args.manifest_dir) + # XL 10k hours + # L 2.5k hours + # M 1k hours + # S 250 hours + # XS 10 hours + # DEV 12 hours + # Test 40 hours + if params.full_libri: + logging.info("Using the XL subset of GigaSpeech (10k hours)") + train_giga_cuts = gigaspeech.train_XL_cuts() + else: + logging.info("Using the S subset of GigaSpeech (250 hours)") + train_giga_cuts = gigaspeech.train_S_cuts() + + train_giga_cuts = filter_short_and_long_utterances(train_giga_cuts) + train_giga_cuts = train_giga_cuts.repeat(times=None) + + if args.enable_musan: + cuts_musan = load_manifest( + Path(args.manifest_dir) / "musan_cuts.jsonl.gz" + ) + else: + cuts_musan = None + + asr_datamodule = AsrDataModule(args) + + train_dl = asr_datamodule.train_dataloaders( + train_cuts, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + + giga_train_dl = asr_datamodule.train_dataloaders( + train_giga_cuts, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = asr_datamodule.valid_dataloaders(valid_cuts) + + # It's time consuming to include `giga_train_dl` here + # for dl in [train_dl, giga_train_dl]: + for dl in [train_dl]: + if ( + params.start_batch <= 0 + and params.start_epoch == 1 + and not params.print_diagnostics + and False + ): + scan_pessimistic_batches_for_oom( + model=model, + train_dl=dl, + optimizer=optimizer, + sp=sp, + params=params, + warmup=0.0 if params.start_epoch == 0 else 1.0, + ) + else: + logging.info("Skip scan_pessimistic_batches_for_oom") + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + giga_train_dl=giga_train_dl, + valid_dl=valid_dl, + rng=rng, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, + warmup: float, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=warmup, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + assert 0 <= args.giga_prob < 1, args.giga_prob + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py index bb54c77a6..428f35796 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py @@ -60,7 +60,7 @@ def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: linear = torch.nn.Linear( in_features=scaled_linear.in_features, out_features=scaled_linear.out_features, - bias=True, # otherwise, it throws errors when converting to PNNX format. + bias=True, # otherwise, it throws errors when converting to PNNX format # device=weight.device, # Pytorch version before v1.9.0 does not has # this argument. Comment out for now, we will # see if it will raise error for versions @@ -206,7 +206,7 @@ def scaled_lstm_to_lstm(scaled_lstm: ScaledLSTM) -> nn.LSTM: return lstm -# Copied from https://pytorch.org/docs/1.9.0/_modules/torch/nn/modules/module.html#Module.get_submodule +# Copied from https://pytorch.org/docs/1.9.0/_modules/torch/nn/modules/module.html#Module.get_submodule # noqa # get_submodule was added to nn.Module at v1.9.0 def get_submodule(model, target): if target == "": diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index c2ef28c7b..723b03e15 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -39,7 +39,7 @@ cd egs/librispeech/ASR/ --world-size 4 \ --num-epochs 30 \ --start-epoch 0 \ - --use_fp16 1 \ + --use-fp16 1 \ --exp-dir pruned_transducer_stateless3/exp \ --full-libri 1 \ --max-duration 550 From 436942211cf9bef5b55a9772914b929e6048f852 Mon Sep 17 00:00:00 2001 From: Teo Wen Shen <36886809+teowenshen@users.noreply.github.com> Date: Tue, 20 Sep 2022 10:52:24 +0800 Subject: [PATCH 10/15] Adding Dockerfile for Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8 (#572) * Changed Dockerfile * Update Dockerfile * Dockerfile * Update README.md * Add Dockerfiles * Update README.md Removed misleading CUDA version, as the Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8 Dockerfile can only support CUDA versions >11.0. --- docker/README.md | 110 ++++++++++++++++-- .../Dockerfile | 72 ++++++++++++ .../Dockerfile | 63 +++++----- 3 files changed, 203 insertions(+), 42 deletions(-) create mode 100644 docker/Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8/Dockerfile diff --git a/docker/README.md b/docker/README.md index 0c8cb0ed9..0a39b7a49 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,24 +1,114 @@ # icefall dockerfile -We provide a dockerfile for some users, the configuration of dockerfile is : Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8-python3.8. You can use the dockerfile by following the steps: +2 sets of configuration are provided - (a) Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8, and (b) Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8. -## Building images locally +If your NVIDIA driver supports CUDA Version: 11.3, please go for case (a) Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8. + +Otherwise, since the older PyTorch images are not updated with the [apt-key rotation by NVIDIA](https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key), you have to go for case (b) Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8. Ensure that your NVDIA driver supports at least CUDA 11.0. + +You can check the highest CUDA version within your NVIDIA driver's support with the `nvidia-smi` command below. In this example, the highest CUDA version is 11.0, i.e. case (b) Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8. ```bash -cd docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8 -docker build -t icefall/pytorch1.7.1:latest -f ./Dockerfile ./ +$ nvidia-smi +Tue Sep 20 00:26:13 2022 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 450.119.03 Driver Version: 450.119.03 CUDA Version: 11.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 TITAN RTX On | 00000000:03:00.0 Off | N/A | +| 41% 31C P8 4W / 280W | 16MiB / 24219MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ +| 1 TITAN RTX On | 00000000:04:00.0 Off | N/A | +| 41% 30C P8 11W / 280W | 6MiB / 24220MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 2085 G /usr/lib/xorg/Xorg 9MiB | +| 0 N/A N/A 2240 G /usr/bin/gnome-shell 4MiB | +| 1 N/A N/A 2085 G /usr/lib/xorg/Xorg 4MiB | ++-----------------------------------------------------------------------------+ + ``` -## Using built images -Sample usage of the GPU based images: +## Building images locally +If your environment requires a proxy to access the Internet, remember to add those information into the Dockerfile directly. +For most cases, you can uncomment these lines in the Dockerfile and add in your proxy details. + +```dockerfile +ENV http_proxy=http://aaa.bb.cc.net:8080 \ + https_proxy=http://aaa.bb.cc.net:8080 +``` + +Then, proceed with these commands. + +### If you are case (a), i.e. your NVIDIA driver supports CUDA version >= 11.3: + +```bash +cd docker/Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8 +docker build -t icefall/pytorch1.12.1 . +``` + +### If you are case (b), i.e. your NVIDIA driver can only support CUDA versions 11.0 <= x < 11.3: +```bash +cd docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8 +docker build -t icefall/pytorch1.7.1 . +``` + +## Running your built local image +Sample usage of the GPU based images. These commands are written with case (a) in mind, so please make the necessary changes to your image name if you are case (b). Note: use [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to run the GPU images. ```bash -docker run -it --runtime=nvidia --name=icefall_username --gpus all icefall/pytorch1.7.1:latest +docker run -it --runtime=nvidia --shm-size=2gb --name=icefall --gpus all icefall/pytorch1.12.1 ``` -Sample usage of the CPU based images: +### Tips: +1. Since your data and models most probably won't be in the docker, you must use the -v flag to access the host machine. Do this by specifying `-v {/path/in/docker}:{/path/in/host/machine}`. + +2. Also, if your environment requires a proxy, this would be a good time to add it in too: `-e http_proxy=http://aaa.bb.cc.net:8080 -e https_proxy=http://aaa.bb.cc.net:8080`. + +Overall, your docker run command should look like this. ```bash -docker run -it icefall/pytorch1.7.1:latest /bin/bash -``` \ No newline at end of file +docker run -it --runtime=nvidia --shm-size=2gb --name=icefall --gpus all -v {/path/in/docker}:{/path/in/host/machine} -e http_proxy=http://aaa.bb.cc.net:8080 -e https_proxy=http://aaa.bb.cc.net:8080 icefall/pytorch1.12.1 +``` + +You can explore more docker run options [here](https://docs.docker.com/engine/reference/commandline/run/) to suit your environment. + +### Linking to icefall in your host machine + +If you already have icefall downloaded onto your host machine, you can use that repository instead so that changes in your code are visible inside and outside of the container. + +Note: Remember to set the -v flag above during the first run of the container, as that is the only way for your container to access your host machine. +Warning: Check that the icefall in your host machine is visible from within your container before proceeding to the commands below. + +Use these commands once you are inside the container. + +```bash +rm -r /workspace/icefall +ln -s {/path/in/docker/to/icefall} /workspace/icefall +``` + +## Starting another session in the same running container. +```bash +docker exec -it icefall /bin/bash +``` + +## Restarting a killed container that has been run before. +```bash +docker start -ai icefall +``` + +## Sample usage of the CPU based images: +```bash +docker run -it icefall /bin/bash +``` diff --git a/docker/Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8/Dockerfile b/docker/Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8/Dockerfile new file mode 100644 index 000000000..db4dda864 --- /dev/null +++ b/docker/Ubuntu18.04-pytorch1.12.1-cuda11.3-cudnn8/Dockerfile @@ -0,0 +1,72 @@ +FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-devel + +# ENV http_proxy=http://aaa.bbb.cc.net:8080 \ +# https_proxy=http://aaa.bbb.cc.net:8080 + +# install normal source +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + g++ \ + make \ + automake \ + autoconf \ + bzip2 \ + unzip \ + wget \ + sox \ + libtool \ + git \ + subversion \ + zlib1g-dev \ + gfortran \ + ca-certificates \ + patch \ + ffmpeg \ + valgrind \ + libssl-dev \ + vim \ + curl + +# cmake +RUN wget -P /opt https://cmake.org/files/v3.18/cmake-3.18.0.tar.gz && \ + cd /opt && \ + tar -zxvf cmake-3.18.0.tar.gz && \ + cd cmake-3.18.0 && \ + ./bootstrap && \ + make && \ + make install && \ + rm -rf cmake-3.18.0.tar.gz && \ + find /opt/cmake-3.18.0 -type f \( -name "*.o" -o -name "*.la" -o -name "*.a" \) -exec rm {} \; && \ + cd - + +# flac +RUN wget -P /opt https://downloads.xiph.org/releases/flac/flac-1.3.2.tar.xz && \ + cd /opt && \ + xz -d flac-1.3.2.tar.xz && \ + tar -xvf flac-1.3.2.tar && \ + cd flac-1.3.2 && \ + ./configure && \ + make && make install && \ + rm -rf flac-1.3.2.tar && \ + find /opt/flac-1.3.2 -type f \( -name "*.o" -o -name "*.la" -o -name "*.a" \) -exec rm {} \; && \ + cd - + +RUN pip install kaldiio graphviz && \ + conda install -y -c pytorch torchaudio + +#install k2 from source +RUN git clone https://github.com/k2-fsa/k2.git /opt/k2 && \ + cd /opt/k2 && \ + python3 setup.py install && \ + cd - + +# install lhotse +RUN pip install git+https://github.com/lhotse-speech/lhotse + +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +WORKDIR /workspace/icefall \ No newline at end of file diff --git a/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile b/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile index 746c2c4f3..7a14a00ad 100644 --- a/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile +++ b/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile @@ -1,7 +1,13 @@ FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-devel -# install normal source +# ENV http_proxy=http://aaa.bbb.cc.net:8080 \ +# https_proxy=http://aaa.bbb.cc.net:8080 +RUN rm /etc/apt/sources.list.d/cuda.list && \ + rm /etc/apt/sources.list.d/nvidia-ml.list && \ + apt-key del 7fa2af80 + +# install normal source RUN apt-get update && \ apt-get install -y --no-install-recommends \ g++ \ @@ -21,20 +27,25 @@ RUN apt-get update && \ patch \ ffmpeg \ valgrind \ - libssl-dev \ - vim && \ - rm -rf /var/lib/apt/lists/* + libssl-dev \ + vim \ + curl - -RUN mv /opt/conda/lib/libcufft.so.10 /opt/libcufft.so.10.bak && \ +# Add new keys and reupdate +RUN curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub | apt-key add - && \ + curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub | apt-key add - && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \ + echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && \ + rm -rf /var/lib/apt/lists/* && \ + mv /opt/conda/lib/libcufft.so.10 /opt/libcufft.so.10.bak && \ mv /opt/conda/lib/libcurand.so.10 /opt/libcurand.so.10.bak && \ mv /opt/conda/lib/libcublas.so.11 /opt/libcublas.so.11.bak && \ mv /opt/conda/lib/libnvrtc.so.11.0 /opt/libnvrtc.so.11.1.bak && \ - mv /opt/conda/lib/libnvToolsExt.so.1 /opt/libnvToolsExt.so.1.bak && \ - mv /opt/conda/lib/libcudart.so.11.0 /opt/libcudart.so.11.0.bak + # mv /opt/conda/lib/libnvToolsExt.so.1 /opt/libnvToolsExt.so.1.bak && \ + mv /opt/conda/lib/libcudart.so.11.0 /opt/libcudart.so.11.0.bak && \ + apt-get update && apt-get -y upgrade # cmake - RUN wget -P /opt https://cmake.org/files/v3.18/cmake-3.18.0.tar.gz && \ cd /opt && \ tar -zxvf cmake-3.18.0.tar.gz && \ @@ -45,11 +56,7 @@ RUN wget -P /opt https://cmake.org/files/v3.18/cmake-3.18.0.tar.gz && \ rm -rf cmake-3.18.0.tar.gz && \ find /opt/cmake-3.18.0 -type f \( -name "*.o" -o -name "*.la" -o -name "*.a" \) -exec rm {} \; && \ cd - - -#kaldiio - -RUN pip install kaldiio - + # flac RUN wget -P /opt https://downloads.xiph.org/releases/flac/flac-1.3.2.tar.xz && \ cd /opt && \ @@ -62,15 +69,8 @@ RUN wget -P /opt https://downloads.xiph.org/releases/flac/flac-1.3.2.tar.xz && find /opt/flac-1.3.2 -type f \( -name "*.o" -o -name "*.la" -o -name "*.a" \) -exec rm {} \; && \ cd - -# graphviz -RUN pip install graphviz - -# kaldifeat -RUN git clone https://github.com/csukuangfj/kaldifeat.git /opt/kaldifeat && \ - cd /opt/kaldifeat && \ - python setup.py install && \ - cd - - +RUN pip install kaldiio graphviz && \ + conda install -y -c pytorch torchaudio=0.7.1 #install k2 from source RUN git clone https://github.com/k2-fsa/k2.git /opt/k2 && \ @@ -79,14 +79,13 @@ RUN git clone https://github.com/k2-fsa/k2.git /opt/k2 && \ cd - # install lhotse -RUN pip install torchaudio==0.7.2 -RUN pip install git+https://github.com/lhotse-speech/lhotse -#RUN pip install lhotse +RUN pip install git+https://github.com/lhotse-speech/lhotse -# install icefall -RUN git clone https://github.com/k2-fsa/icefall && \ - cd icefall && \ - pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple - -ENV PYTHONPATH /workspace/icefall:$PYTHONPATH +RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \ + cd /workspace/icefall && \ + pip install -r requirements.txt + +ENV PYTHONPATH /workspace/icefall:$PYTHONPATH + +WORKDIR /workspace/icefall From 099cd3a215a4f840bf6312b62aaa4693af31fc51 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 20 Sep 2022 22:52:49 +0800 Subject: [PATCH 11/15] support exporting to ncnn format via PNNX (#571) --- ...-lstm-transducer-stateless2-2022-09-03.yml | 160 +++++ ...-lstm-transducer-stateless2-2022-09-03.yml | 136 ++++ .gitignore | 2 + ...speech-lstm-transducer-tensorboard-log.png | Bin 0 -> 423059 bytes docs/source/recipes/librispeech/index.rst | 1 + .../lstm_pruned_stateless_transducer.rst | 625 ++++++++++++++++++ .../ASR/lstm_transducer_stateless/lstm.py | 41 +- .../ASR/lstm_transducer_stateless2/export.py | 29 +- .../lstm_transducer_stateless2/ncnn-decode.py | 295 +++++++++ .../streaming-ncnn-decode.py | 353 ++++++++++ .../ASR/lstm_transducer_stateless2/train.py | 3 + .../scaling_converter.py | 36 + 12 files changed, 1674 insertions(+), 7 deletions(-) create mode 100755 .github/scripts/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml create mode 100644 .github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml create mode 100644 docs/source/recipes/librispeech/images/librispeech-lstm-transducer-tensorboard-log.png create mode 100644 docs/source/recipes/librispeech/lstm_pruned_stateless_transducer.rst create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/ncnn-decode.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless2/streaming-ncnn-decode.py diff --git a/.github/scripts/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml b/.github/scripts/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml new file mode 100755 index 000000000..19d606682 --- /dev/null +++ b/.github/scripts/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +cd egs/librispeech/ASR + +repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-lstm-transducer-stateless2-2022-09-03 + +log "Downloading pre-trained model from $repo_url" +git lfs install +git clone $repo_url +repo=$(basename $repo_url) + +log "Display test files" +tree $repo/ +soxi $repo/test_wavs/*.wav +ls -lh $repo/test_wavs/*.wav + +pushd $repo/exp +ln -s pretrained-iter-468000-avg-16.pt pretrained.pt +ln -s pretrained-iter-468000-avg-16.pt epoch-99.pt +popd + +log "Install ncnn and pnnx" + +# We are using a modified ncnn here. Will try to merge it to the official repo +# of ncnn +git clone https://github.com/csukuangfj/ncnn +pushd ncnn +git submodule init +git submodule update python/pybind11 +python3 setup.py bdist_wheel +ls -lh dist/ +pip install dist/*.whl +cd tools/pnnx +mkdir build +cd build +cmake .. +make -j4 pnnx + +./src/pnnx || echo "pass" + +popd + +log "Test exporting to pnnx format" + +./lstm_transducer_stateless2/export.py \ + --exp-dir $repo/exp \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model 0 \ + --pnnx 1 + +./ncnn/tools/pnnx/build/src/pnnx $repo/exp/encoder_jit_trace-pnnx.pt +./ncnn/tools/pnnx/build/src/pnnx $repo/exp/decoder_jit_trace-pnnx.pt +./ncnn/tools/pnnx/build/src/pnnx $repo/exp/joiner_jit_trace-pnnx.pt + +./lstm_transducer_stateless2/ncnn-decode.py \ + --bpe-model-filename $repo/data/lang_bpe_500/bpe.model \ + --encoder-param-filename $repo/exp/encoder_jit_trace-pnnx.ncnn.param \ + --encoder-bin-filename $repo/exp/encoder_jit_trace-pnnx.ncnn.bin \ + --decoder-param-filename $repo/exp/decoder_jit_trace-pnnx.ncnn.param \ + --decoder-bin-filename $repo/exp/decoder_jit_trace-pnnx.ncnn.bin \ + --joiner-param-filename $repo/exp/joiner_jit_trace-pnnx.ncnn.param \ + --joiner-bin-filename $repo/exp/joiner_jit_trace-pnnx.ncnn.bin \ + $repo/test_wavs/1089-134686-0001.wav + +./lstm_transducer_stateless2/streaming-ncnn-decode.py \ + --bpe-model-filename $repo/data/lang_bpe_500/bpe.model \ + --encoder-param-filename $repo/exp/encoder_jit_trace-pnnx.ncnn.param \ + --encoder-bin-filename $repo/exp/encoder_jit_trace-pnnx.ncnn.bin \ + --decoder-param-filename $repo/exp/decoder_jit_trace-pnnx.ncnn.param \ + --decoder-bin-filename $repo/exp/decoder_jit_trace-pnnx.ncnn.bin \ + --joiner-param-filename $repo/exp/joiner_jit_trace-pnnx.ncnn.param \ + --joiner-bin-filename $repo/exp/joiner_jit_trace-pnnx.ncnn.bin \ + $repo/test_wavs/1089-134686-0001.wav + + + +log "Test exporting with torch.jit.trace()" + +./lstm_transducer_stateless2/export.py \ + --exp-dir $repo/exp \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --epoch 99 \ + --avg 1 \ + --use-averaged-model 0 \ + --jit-trace 1 + +log "Decode with models exported by torch.jit.trace()" + +./lstm_transducer_stateless2/jit_pretrained.py \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --encoder-model-filename $repo/exp/encoder_jit_trace.pt \ + --decoder-model-filename $repo/exp/decoder_jit_trace.pt \ + --joiner-model-filename $repo/exp/joiner_jit_trace.pt \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav + +for sym in 1 2 3; do + log "Greedy search with --max-sym-per-frame $sym" + + ./lstm_transducer_stateless2/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame $sym \ + --checkpoint $repo/exp/pretrained.pt \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav +done + +for method in modified_beam_search beam_search fast_beam_search; do + log "$method" + + ./lstm_transducer_stateless2/pretrained.py \ + --method $method \ + --beam-size 4 \ + --checkpoint $repo/exp/pretrained.pt \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav +done + +echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}" +echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}" +if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"ncnn" ]]; then + mkdir -p lstm_transducer_stateless2/exp + ln -s $PWD/$repo/exp/pretrained.pt lstm_transducer_stateless2/exp/epoch-999.pt + ln -s $PWD/$repo/data/lang_bpe_500 data/ + + ls -lh data + ls -lh lstm_transducer_stateless2/exp + + log "Decoding test-clean and test-other" + + # use a small value for decoding with CPU + max_duration=100 + + for method in greedy_search fast_beam_search modified_beam_search; do + log "Decoding with $method" + + ./lstm_transducer_stateless2/decode.py \ + --decoding-method $method \ + --epoch 999 \ + --avg 1 \ + --use-averaged-model 0 \ + --max-duration $max_duration \ + --exp-dir lstm_transducer_stateless2/exp + done + + rm lstm_transducer_stateless2/exp/*.pt +fi diff --git a/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml b/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml new file mode 100644 index 000000000..a3aa0c0b8 --- /dev/null +++ b/.github/workflows/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml @@ -0,0 +1,136 @@ +name: run-librispeech-lstm-transducer-2022-09-03 + +on: + push: + branches: + - master + pull_request: + types: [labeled] + + schedule: + # minute (0-59) + # hour (0-23) + # day of the month (1-31) + # month (1-12) + # day of the week (0-6) + # nightly build at 15:50 UTC time every day + - cron: "50 15 * * *" + +jobs: + run_librispeech_pruned_transducer_stateless3_2022_05_13: + if: github.event.label.name == 'ncnn' || github.event_name == 'push' || github.event_name == 'schedule' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.8] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + pip uninstall -y protobuf + pip install --no-binary protobuf protobuf + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + .github/scripts/install-kaldifeat.sh + + - name: Cache LibriSpeech test-clean and test-other datasets + id: libri-test-clean-and-test-other-data + uses: actions/cache@v2 + with: + path: | + ~/tmp/download + key: cache-libri-test-clean-and-test-other + + - name: Download LibriSpeech test-clean and test-other + if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true' + shell: bash + run: | + .github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh + + - name: Prepare manifests for LibriSpeech test-clean and test-other + shell: bash + run: | + .github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh + + - name: Cache LibriSpeech test-clean and test-other fbank features + id: libri-test-clean-and-test-other-fbank + uses: actions/cache@v2 + with: + path: | + ~/tmp/fbank-libri + key: cache-libri-fbank-test-clean-and-test-other-v2 + + - name: Compute fbank for LibriSpeech test-clean and test-other + if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true' + shell: bash + run: | + .github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh + + - name: Inference with pre-trained model + shell: bash + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }} + run: | + mkdir -p egs/librispeech/ASR/data + ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank + ls -lh egs/librispeech/ASR/data/* + + sudo apt-get -qq install git-lfs tree sox + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + + .github/scripts/run-librispeech-lstm-transducer-stateless2-2022-09-03.yml + + - name: Display decoding results for lstm_transducer_stateless2 + if: github.event_name == 'schedule' || github.event.label.name == 'ncnn' + shell: bash + run: | + cd egs/librispeech/ASR + tree lstm_transducer_stateless2/exp + cd lstm_transducer_stateless2/exp + echo "===greedy search===" + find greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2 + find greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2 + + echo "===fast_beam_search===" + find fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2 + find fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2 + + echo "===modified beam search===" + find modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2 + find modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2 + + - name: Upload decoding results for lstm_transducer_stateless2 + uses: actions/upload-artifact@v2 + if: github.event_name == 'schedule' || github.event.label.name == 'ncnn' + with: + name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-18.04-cpu-lstm_transducer_stateless2-2022-09-03 + path: egs/librispeech/ASR/lstm_transducer_stateless2/exp/ diff --git a/.gitignore b/.gitignore index 1dbf8f395..406deff6a 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ log *.bak *-bak *bak.py +*.param +*.bin diff --git a/docs/source/recipes/librispeech/images/librispeech-lstm-transducer-tensorboard-log.png b/docs/source/recipes/librispeech/images/librispeech-lstm-transducer-tensorboard-log.png new file mode 100644 index 0000000000000000000000000000000000000000..cc475a45f7b7b25948b1235034b924c67bf0a73b GIT binary patch literal 423059 zcmbTdb9iRU@+cg0GI27oZQHhO+xEn^ZS#$7O*j+VoY+pj%--jR@7_P|K4(48O848{ zRn;hTRjq`}%ZkClV8Q?a0l`U#3o8NvK`j6QK^Q|pe6|o>W6}WueKD{Q5|Wn?62g~v zv@^A^HUR<>4^K{oR8N#Z{{BE!PTa^3Rur@$9FNH_2}Kj|U0Fa`5J{YzffNjt%d>22 zSpnbF4<2A#Pd1zF!ylTZgXO4YD)T^3ii7!mLBQ2 zhzsy&;=%ZCf1rkuqM}9-84#o?KVBNBPB%Zn?%=Q}F!|dX-~O4)_p)ayT1q{>m=CpI z=n*I&Kyl2Y7=pq0@Qy@4?(zJj(~#f4f!r-5-k4CL39-fv01?QkIizHyRKQz94(@U6rWbVvpxQ2=p5#DRhtb|W}~>=Af)Aggd=)q^&s z3TyzUXJ186SNT0of4iJMG=r<-tmP#L6-TMpexLhjVE)j$c1(V=TIv%;9w*vSej32s zm`MLtZiu-jYa!|!Rk0AaDHZg)9rU~6&{&vo6OSdOyuHC25ORHG93B$%GKymwGaIL5 z;q+)y&=vl`dVp;F-VGA76M7g*{uO5K$sY2q;WLNQ)V}$_!sWitNyTYqGDkRe^hc&4 zV(sQfrl5l$qcJ7Ww6Jx22B&shjh{&lv%}(ah4^IeRPnZ}Q2)9F=91;^$?9iZ z#fjSqBn(Uo1x%e6L<1@fw#+{>qp=9>S1#5Mgf}XHh5(EnUE(L|xEY3`yDLEOh@1dL z4sihf5FNn}Jm-h|v!a$3zrn6C&rbBKI|v87aDeus)-szEXPqXJ`xll3JXQ2wbteO* z=(n<6ewifE!qbQGnd27X5G0{6#6kK3BHIASEnjz(PKdsPkOqg`ck`YVmAnsnnPH4g)2LN}+#GlMbNDWyX?A2bVA@tl?a)BT{$!r~l*(|e zWUiCVJ&w3OHkQRD1#Jp)69kg5oy#|A8#0?|V7iJ3UoAYBDhyDS{jX8pJ-iQ@oUjIX zPhBLl9bw& zI$YX(7Q_7A=91mNA$DbxWRj)o2cM|Fdc%gPbC>HMA*5thW?}(VxJ5iO++{33)bOVr zcK?om0lIWMJv-xhRYBhe?FD}HE_U^v=jb)dY6b@~LxW1X25I`LrUP|pkq3G~ZuHn) z5Al_rh;0k}l^+2LoJin{I1q(CcU8l;b2E;#FC~E-dEb%(iuF;|VOaxc^dXnv?)^FK zzhOg`_IlVuYlFINy<0%i@uR!LdLn=k5I6_eud($1UbcNH>26!*>e<=?NsKzNK&Ik$s8)Y3Z2BLCHGC zDx}Ij%q>12aH|1M06aUQ*%tL0`1v*zHzN90;$`|Hp*Kx8Xzftu71bkXC+vq{9H;#VZu{*LnO%M@{9#_)S#J9oAcgog0hu*k5JaIdiFLDd~1 zL$+{XMpBN%R|y7UUW#00X-%OO5l+!|X-l#$GOKu+QQJLwM@kJzTEf(Lw}jXPy2RWB zFYZt$&#u36eX)NvU0cb+NQ?Fsz&cd zQ7ef?Q>$dFvw69ZaE}SkD?XKoDs= z?QG5+ci%h*?hJ3QZ_$p==C~>!q;rexO7SY_=5kAWL_UJvi6DmSOzkm1hP8)Phb2Ti zfPeLFd^d};k+-Z{%^S)am*4z3K>x!&!XS7Xae$R{xY6i3t!;v2!e`=SA~tXW!T};2 zf(Bce>F7GoO*Ak%AWB%2MAR#)2E&Kh6eAS#@EaXd4O6#SKqh;76l=P5ecxV@#G!s* zpKl;|pcKRfmiHXbBWTFvVB>dRUrny4CQfOa#PCQ~gl&DuuW z`ty1~9fk?hddbQ`GtE$n4OO#_jn~S3c6OIe{knGtXNTFn=sov~;EUx;$5-c2{!odp z$zRoQ!egvQut)AH$x(+J0LV+3s4=Cs(UAgPwA zkV5G~g&}Bg)mYDtX_Iwx2IC>wxc5Wzvn-1)l_%NT&jVs#=`c6wU+vh)*!;DES5@5F zz0zK&A3Pt_aHntyW8rYwcrbDbxevWqJ$&!4?-RV{-Cf*Q-Alb{Z@T+f^+)Q_0n?8N z-T^+QPuTZQTPORO7ah|NbbRPN*ge_4)1phU2+>QNJMlfqe4Bm^ep*0O_&oYb{91V? z_0}5%K*Ru0??6mPk!`75wTpVjW--)rB%op7wXt3sBq=3vR~Vph zTJDvh7M_c)ib@rp81jFiGZmTWQh!;J0Q5wY#>C+0Fg#1FUA7)Ol0t?fmh$b|pr7B)uiKlq)MPC%c*c9*#FqyXR(h$+%$R4BZ+qAAsJE zzb$DHI;z*Geak${=j3tFK>bmLs)4h^?~vZA)Sq0H+&LCAx{>xqOGawuC2MEvc%H-U zsOz~wP~Ocfw!;0(dnYNpn$b)HP-T;FQKXXCz88OO10&2JtB`94#J535j+4YkfrkQy zqKwj+o5k$$fzm}WLxEnxDWk)FXJMeeO*eaK5nv%ROH(?X{;m&UHhhpiIuVsF(@m|@Juuwn*{6n8g_SE^Gr>>HLQbFUkaI4Vsp-W@SD^;Yobl@Dz9Ct zt=%eOOJUnsM}=kS@f69tc~gD6k=ja)RELo3;$rKZ^~S)OQak#o%DKvCx4h)a$iSjxnGFc~CJg!~8gG-IghR`X% z^nCUMr=C~eO@H4ek00OC&b*@6uNT8Nl3XdcY9h^8-LIYAuacw5N@^Zz(|VEJ3!Xez z&y!ddY%8`fcDx28VC!&i?`uH z?0&lg*tsgb_mDf18_Ut;uJ$~*KeZQAA1OXG%5CY1>pD87epkAfcA>-3#eJkYs)f}t zYG2X&p?CM*b<}iqa%RW9!R#{Py6Kns(0r)#!)y0}i?@z1>pkuD%3^v?u1Ib_7l)6{ zJN(tLg{`(lZ8-Jjg%_ND>F0zr(EG+$ARcg_VUou;dR3o;6J%i@*)yWcTy-DXQ)D_I zOa0Pk?L$_OI&@a9r6U2LGTwRQ0HELJyg!)>MxEW1UJCk<;kSM4F07b<#NmLvQy%W` zt1iyozD$=b(EwqRl}~3-67)Cpk@}(PFdl%I7dATVG|An?x`@aM}5jP>93PY(bH^sjahAfQkSAh3Upk^LFCId3XJ>m(IyyHuH(EC)T02KGItC674mx^9Iz~pC&k;0E9=6T~ z?liVeM1LdsFFe8~PDYLv_Rbb|w)lVW8W`HSIP(w^{vq_=&)@Sjakuy%O14h_TGrHhT4G0@V}{WtDUQ0_miobnd#Cf4f07B-)I_DO@6k&%^=`!9h1tLuMA{tc?)Wa21f zXY&c^%=HhoFcwq?5sY5;w65B#pPWiL^WV1gHDC6fJ^1sW^I_NoTXrFBa5D-6*gs_0JJMgIv zcoypX17BuchqDx2^moDt81j5_SF#A003CfY%CIG<=5ODYBGh|&(v}Lu1HU`F*(`o| zU##A=+PNMKov#KsJ=QTfz4#t>r*l1cUvMx;rB+ko!-t25UxkIs+FPVv%#U4HELQSh z)&e(HIr{U2ci z7RlQolK(Tk>c&xuVwIXktr!!P*tydGe0u);6Eh#GX!6q1;aXalV97J&v(<&4IJMR> zOa9%%_}A&lGaz0fTAem0e>Yb88{>IRR;mh*VlFP~zrBHf15mPph=jAuY`_TDQBciD zY0MKLoGARqTS&MiQx(sOo~Vy(lOWkRo_Nj?`?=x<-7eMt;DrB7uuL5)h;%{RD^j2? zoK=EkWnW@3YoxwnM59)h>OW%ZJSb&sFhfU2M;JmI*`mQj@mWYKIp*;Ss5>yLz+sC2 z5N8q|2*dL7GWZZlrEmgWLSz0`n5jnJZ^}x~fF^<6f1kPkKASN}V4*5NCGFRA;x+E= zl7`h?7u?DEv4*d;?JWN>n|mXY3w}y9GPFl=% z;m-X)etE-aW(uSLiB^jN88VwoPSMJ&Ys$?^T{yM>@bJm?!1@Un#E}O`0K#0^a~CdO zT6k+;`}(CCPWA6Bv;I>(^hAh&2oTM{{JKn>#Y|4O=B(JV$e_8;K2BHdn0;>hV)Xi^L^}w~OG(;C{oi zWjD^BeT!zy+D{QppW?jL{FK-=#L>y|!q->tvGMWJfB+DuoM|!Cf2vg}K|ufv3@BJw zR?hhj4uAu0UfLQ)W}Vftzcp1a6biut6lHvzM85=%)U~}1rb+vJ zp}{BPYzg6$v86gX-9Y_w!uKLzNISa}yu2~?$_jt;Nm(nDJCaq=kN7xA!D2X6y(~_W zSf4K~xLk5|H|DA6RkYDcVE1ZZ`oW}RWU2K|j!`BVZ>W%GE&uG?PW7Pp(bLo7_@SY% zz)n~x|HbgAu>NTi6B7aQgYoiLuBm?r)0qTk%Fb}bl1$pAqEFVJI{sx_M&r!u)8!MB zv#ag@sbQ_tBa$a!h?ii!`mJEpgA2am@kg_nLw)-c7)lg>Vr6gkec{3{*vEK{f2Fb(4dHysD^URv-^Y=>|J|CcLFriEYgaS}y8RaS26 zG?j`a3);gf8r|fv{#*%OFhPJnt*16Up+p_~k65ER=f0hOngQ8E7hGE&-KVol^AwZR z%EO4>_XvJb(-eY*)7dx3R8c z`b!fo*&*7KEC5C83n%D1uqO1)fJef%eBsJo^=Nfz-FFEQX`nV+_N<0Fm4n(lJX9jp zD*rJ#zIxbUtM|IMPP)geGn;-)Z5-$a~8cO+r2X*^1m!!_Nr2I|xfYL6iuBE|CFVo~m8 zRc8L3@*mj;{>~Mfo~y?(u$y6tgS ze}PJj+SBJv?eqldyK>@G5#}zWtKLP`z1%-E#Qx-jKsqM%304JRWoX5J7=-`vkIG0O z`Q|@JHASU8Hprl!q3^JrVo>Uo6ROqClzYSZN8`;~iRkNz0iEa0Ffq<6PImhiHu?QC z3m|U^)$1tZsHoxst6~Xd)%O=vE0KJVI?<0#abn%=QFQ7W>Yd^<+Yxyir}5n{k)jw= zjCY!dOLz!C<)o=J3uXUQeDa`B@ci9fA;BG^BmS0g{$+wKiK=)~_jn0HICvG^#+_p9 z!(WwMQ_lG{g(|XTp3J(4OKJ$C`VVp(ZEY`-ZjE^VY*l$sNmL-vS64}cw)x8cGTUPi z{%N@N;0$+m$;*w*EFCX|l$&%yBi6d&Y{s=;@v1wv zZO1m7$c#0)1g!5Y=iFb-D^|LrakMR_U}0dSwer5)hzRqpZikrWrVrzoxmzseqA-(uK8=Fog2A7w)qPnq^37Lm4= z&Mzf~23#lYmJ=+9zvYV)*(5m?tAw;6|%~Q8IzJ zFBcb^r9m2)oQF2+{6lsJCIgXA5X$>P2On+{>>}x?D4uQY(Fo^aI*}_iUfUgY?RrM7 zio9bvS2TC1S6U>i6Or80$%LA!OmtRLwTF2GDU#alv`DMjhT67f>n_}kQc4kY3ZHt`o*Ne$Tds~O^H|;Ztkb7N3gc68h1Nfkl>Z}T|EbJzLHypI z$V^O)6)$+Z9=fK2e--{nB+FC=h5Y)3$mN*0yq`FTG8}cfU1Yd(x$!llPr71Rr66X6 z0YG@ywn$!n(?>#CTE&!_^tnaVReKrI3-IqPw|tt7=7{?T#HjvAHZN@kyWGJ`0&G(996uv_X$ zte3A*Z#V1rCq}I#&sLj~w|wqfO~zH9%@(XPo~EfK&7{cm=>>bmrA>!zT7=qX2z1~} zJnw!(4C(JwCYm3`Nd=aae~CYW+@MN@ukrTrZ>!<4;6*k~2DY;KV~_uV$_k1F5+`AL z&oW@d_I=!T-tL7RnC^rxniw6;>p1Tmzcrp1Fe0a*C|~nbp&?1v?wI}@VoNO06gE%7#yyGnKjKOONGsD zA1a(SQa-*&#~D`n`*W70+@|O>#i8a1dbA}|1y@p&8zyGlo{v^A%@zYPo3#W{mCD6} z;qvNo-(A)u?FNEUY5I0YP~u@}4!*-L0Ep--!gBXuuYCup3p7(8^4Hxczq>oOqf|XV zHb9uye(~Yy9`NB*CvZG2`p}PEYD!EK_Hk!jxtDt{KlxoyOodWVuIJi^;zW@T+7h`^ zUs?7LBAk_$LRdQXMFiWdV*$~p{l(3rrf@p;LdyB3ThM6U3bj~Cu^I&{mSoFT3!}fT z0#N)w9guZSdEDrrAefDKde)oW>yPL$W<4`69iR1R%ep7MD4GA?Z8!@`-@tdZ*+#j= z_Ht5v%G(ns?7J^4*iOFMzclv-)KU?qL{m?^mGA4eSO%j>#PK_ZS(bp$^F1Q@*sFSc z)k4wq^Qu_IHp{U5jPV3}#jg%J8a!7D6WS~sW)EyFdWOA>notNCwT<{+r1Ro_l(224 zHz_pCM#FlInd^SVHxRGinuS4tqlYEl>S-32Jl(i#fIFfjO?T8?bkR>Y-smgzYI7)R z3*Ld#A&M~LtO_XV)IlBL#92{QE@cinV!@MAyF%+{RTZXk=96-pV+xt%2f+hlkoI%D~jffY8A3-LBev? ziEIFnwsm#-4)H1e^~A9<6$0)bMlZj5VW$SlOT~4k!D7P1XSwQM=n7S0diY0v^hBoC z&8M)u|MZF1W_ntGuvlT+e0urzNOxu`f&GICj6xpis zbAgyhUJ=xq{aZA%+qt&oay@w0{j(Jbjcx8I-`R-;v+2Y@3EedBL)2B51}09&RsgKU zZb-EAUcA`P0VAKgWKkXx8f0Io5nk>E6ejbZLvuAgF9~REn|mL3sd^T_=gLcZ(Dl3- zT+U}adXBv$xH!+t9U5IuRDUlwkY&5igp<(3Y^1+_oy_4`Jr!=Xc$5)H7MA~Br7sm~ zwN&di5CT{3QmI^_&|n(ADnA2q>5!F}c%t#en=I65__msDqrl_lFujp&lRLn2zUEBB z)I#+1Ku6VTp(@`-tL0ZsHWx*(o78-@Qs`S5+t*4NR@_fV!_-Zd{cLjVx@{pUGP3yb zk)Nrot*y)FnXYGO_>U4Mbuz7TZik zGL^=0TXqGhN--LZ<~O|C?I>WgwluRE&8~=M>lMM(wC^F^^W{pJeq?tOul%lp95>({ z%>Zd*j*4vYN=)JBhSQ`Y26Ihjn>!V>FWC|u0};cId&%(f4k>9lNY z(`kCHA)!t1M)}M~+mGJ?W@aDYNY^C*=Zf&q-q!gDd;D8{}*VlmCZ{gZE_|wHLOaOGn z&_&4et_xlN{E67K7iF_+0S~E_35>?;=lrCRlLp-c@5f(Bj!TG*chRY3v(rp6^)Ayf zrP+AhLg6%;O3zt*1{H%WUf6!HEot=eitR3L(R3HRGlcWvJ(QGfqEM(E?q{cJtiE*? zyATZ}-yM#Nsho#7YKy0la7QBH;5?@0cSmtMpK97E&>c`E_)DI4BIycbE8=+#;m-CF z<-S_+s9Y!A#oPRnw_TteTNC^oeji?NfO&%hP^;iARVRuVzyhZv!{d{++z zfKcgx6KE3L6%L?qZDj`nf3%EZC@;lVua zSn2j25Zq|{2H`i?Em$KVCsztR-7P;TDN~!n1BGUh!DjcnMI1C$zEZWCfwo|aaty6C zwB7rXg!yS5KB^cqys~N#lT9)Dw!A*h%2B;9F+QDSlJ;g-@qYPn{^LE?KB&=PGm*wbrWKIbUzn zXoB`9kr%cEUAMXeP2k(x;^u81B;L0#=U5(i9mv!lv15vD1jX`4O8_H_k$aqyy}7v* z=Y18mWI>u2-FSQrs5TnRAYyXm^Nt5zKKT(`FKQgOSBG~i4vp7~p4;~qyfhIX=tQF7 zw>&A@{7+aU<%5yUN1H?_HATNiSzIl6bR@FXMjIia(64tR z+r~3iiRRk{!K9}Pt9DGnx!=Kp(Z4C}=Hkj}xi2CVsh(*%znrza^Z*dvAB|!&*%55d zTW@4{Bj&-to&p_WuVP=I8WwE>%*L#0yF~F9S|z4+d5)8mWT7L?!AusW`QC*2-r2Kt zM%p`YL|(k^ev7=hOLP1Xvb`g+p3365J?FZ>4ES0@*LAA+dkNrO9o)}yuGaaDX}7lL zXAvPte{bA49oxeUhQ+3wS(f&0j=^?|SS;>bU{9aD&A65wV}A6Z)=+$fO6ANO{F+`? zRdEhk7DvBc)Ke&)_Yr5!Mc1Kj1HbEvMJeL2tx4*{gmk;#-dhP4B#tuzT&KBy@fp&n zgI)I%ser=;3VCO3>orW%@F7?Uh|Y8-*R)&}$4gd&2braDM8ea}LFNmcPW$=u$(Ln; zn(-HQ&pGoC@3WeY3quipxJ6R`?@kvyUWeb^HasBbZy5!Z zJX~*_p@tr(RhgH}EC(`;ad*>vLN3!Bu}CLudGm$mr8sy`;)|p+S7b4Zotja8%W+QF z3j3ZKD@C8N84;pIeJ7ZM!X<4spJ(TldJkj)xwIe_j{|uW)Mokq;|F?ZNGHv`;2Itd zjv@652nM2Ii7fqhY1&l1U^YRO-8Sdk4WEe+^zsgK922~N?$=A9+ciFqyF9}7U?fA- zm8{%UA&k{j`2z^uX-Pdn7OeVUf@a0fOS=Fxh?HNFfoUobS==w^u{~PaSH&rOB z^_uK*x>uQMGIvGd#9rnr_ey%QunHEo^t=|f?2o%ur)FY0^D$!X5VzJ0gELVChY*h1 zGqm=hV5|^FU7t6Z=R3CC7c%YDB24X$HSX+W@OngbIx9x~|%D(D*w|EW4%TnTP z?f&y|Tn!}FxbWR?aJJ=+&pUj^C=3`E9g5gg@ZFerl|zE%AUGt7Bw@0@HmD+;ZpJ7wp$tB$owR2l z8nwE(juG^;fm+8rF6KQ>@l~%?G?3Dk(YMcQBikPgC9Y`*&rT$#@FzkHd)^A@vCul0XQ!0@-Y?U`aHG z*S!|9*`1-twpwWC#dWuigORVv@V z5q99KQ16asM_7`hA*F=_i@r%(�cT+L$lZ;SXl4+PXAZ2Z@BB=laE~)@WK(46J!P zhHamkR<-v$PiD6KM2>w4V}Y{X%;$l_*LFVr8Be8QIh0{&m{}%)yKH1zEKq(I;3v z@DMwPh1Gw$P4+uZG?8t0Iz6w-7@EKR!50z|?Osp3Q7Qy(EU^u~`mp4d`ZE`On~Sa& z+Bc=g8jg?Go8vg!P%u*RPR&+(0vS)A1U}kth!1=l(zn;+beWC#F4_+J$?x7nJQQbk z{s|jq$7&7-CkZXBBlZVD?%nz^Dh(j&G1RtvEV+Lmx6)J99yGF(9d?qiYsGS zU3qd<{WoRJXDdoFt`AAm#xMwWAVD%C0TXpeH^xO5`LfyV@?yFj5h72xE?|3ffsq;w zroR$Mq{i|{8F67bT`1daRY;RNlns%s92wo@S2A+R{a(B!i@r!!sm`zMT9YcG7L@|_ zGt?ubswBa!hahabzx)bGXJv5krdw|J;MJ=Cv63y|)qG(*&Q@i2aPbxp^gC&-2y6b_ zov?V$&T4@`Cqsc$z}FDYvH^Xj=-z&yfAT_Amt0Y%u30=w?aj5*=X~KS7w^nx(EqT? zPmgeseVwl_8+naWgplv!8mp@x42!PZ`p*?(5~n4aejV%ODDgmq*r)nvFGI(xQ|46y zPjZ(%RBx%a^SWJwJUBQb@au2uil^P_o+Cf&J_yG*m&K&a{ft2PYYhbdd4IB87+wnI z=y|>zC~`mvIkRNdawIgVL)|TK2)j4|erVaJYk#TLZyes*>TWrS)^jh`w(E-b1Q5AF zbG~a*3kB~R3`e#&PAE2YTYzI*Lr~3;56`>QYt=_&v7YQ+BXr_mX-=~4R_=n0q) zstLswgu}e7$x*31&a{{{2k%%zgl=ZioDuf)eqTO(6OIPNM!8JdB1`bY0IJ)1C%^FK zq|#^|Z1zP9%5uFqyOz!q<#DTP`4>Q=LH-=6M*?UP+zBWwo|F}SIj%6R&H!7Kr)quU ztCMqC&JS%IfoIQ&;W+sA1hPo_sCOY_?Dnr) zE5@|nF%aU;qmLv;2VhgOnYI_Q87fYH!exJQ_iR-Q8O8VGP^1c&1KJ4Ban0fFcNe%ja-o80iSBHtn2iZ zepIM3{n9I+!dE1`AC=h4`!nose{WV+T(~&1Uc@6ZFRM_ogA$p4I zgL~2R`zc2tY64+jSdIGO7cddW#+w$;v?qD$`lqdpV0n|;t?yIJ*Vqnk6nC6Vx zo6Ki$H0hd&k*6}IJV97Rv!%4^15M$O&wD{ZO0Y^m1aNA~dG{odw8rwd5^PpjfTFNa zglXX_Lb!Ok(iGYKcJ7sfe3mDyTiW|9N2FT|O$wVH20AWh;pM7St?A-^woANl0sb+S z!(v8Dqc5nXsK|)pb;^*Ff*&}qh3R0=^|qg?7oy@Uz=$kT(clp9+q#Ve+L<8b3u*Fc zOkvvk6-wecKI)Sq%9GcO1er?f1+KT8@tL-S1yy%R&?yTFRw{qx!bOn?teWCM%=%oF z020npH*oP>g>2gsAZhx<2k>-?6tURXVl7iBv&y)g$|}mpZL~oz^6KuW z<`lu9ky)G?6&;^-+8+L*oz@uX=C9EhRJFu4uwo$`BA6G~EuF`L9sw{ST6K!E*hd2S zQ&o)#*8DI*);$&=l`9b{zBTJ-n#OpLGs|({ygOUv%1goXO_R;70|Bwk=dYoFS1^>h zl~*&AQJ_v%Z$L-ZI`*LP`^iS-opLUdZx+FC4@Rg{7HWl=wmo9|1T0;4bSPK`rE9bY zXr95rQ|NjD>wfrIg1#j9r8@E27X&+`FqQ9qPo66usf3*r}x0+Bbh?r?+S}#XHOSB6la@ zuH9VUl{f-K``OD(?bQd2huzeEUwj?51n_$Ug08>OdYX3`6U<*^Q3vWn;UNnU09M5YCQe+#JzN!0_2jsz2DO#70Zc&}O zEkbpjshZ-IW-BW-l|5`**R2&;5UD8pT@?-DEDn&m)|06Y>C~)g+1BPjDK>SX_T8^| z&<*vs>y_CrFo1)bY2MOYzxQS5S&kS$FV{gn18I#DnJ=!kBLE#6Q&$oyh{-GrZMz_; zU4F+fKL}+p#huJk<0bK-LOIOsO2`P;QKvGS-TG zKZndIDDbOki$@iCGq~yf;e`eoS` z&|yI;s-yy$zsEU97?`CM>8K0eK24c}=HR5J zd-Mubl59pt!(+c$w4W`T6D8~$#)}`R}6ED1Q_f1TcysddnOpJf61!o?$ z^eoOCWMj;gh`{~!fU;w)&f&L^9+i-eDy;G*#hE`7Z{oz2yIZ*{R&VXvZTD!=S$1x$ zOCcP@EMJ#Ea?mBp)=JZA)#40li?e}gY@dO-?Q9VjvhPyC0*062Tm1rxF+a6P&u563G?PZy_&TtTdv7?? zmUTYH$DO%B1=Q!%U*Nqr5I+5}bmVN^M0w>bs6fWG?&0*;xH2U8U|}q;;S>X+eY466 zSnv-_UE zOrw%ls;CA0io4^|Ie=Y#Yt0@IxZ1KxCNmX~r!I0|+fWcB{7AS+ldRgJu@6BPU&7il5M$Q-INJSRUOJa?uN}RvnNV6@-1Y z9BIT}Aw(F$Irra*v~xg@a)XSnTn0k!h801FF@LABQ_P+~N#R|pbVTbY4mOyib5^4AGN5L3gFT65&}8s>^@J=h;x;cs=FIy8;%;K`z5G zvH;~9Pe@cOlR)N%?SE;FRpkR{;c!X(I){M6La`{k6?&$Tgg<4+WVKE^CQG|ed2~A@ zZN_-d41s5xwrX)UA1RT^oNxkJkk$7Yvf?J-tD@a*33BiU7)6pJBODf6r}NqIc0CTtc+}<^1U;fpf#2x+?$~Qpb3>N8y-^ zLSJil9+zNEa*^N*Y=fhy$g2yiRV|!uxt%Tny4#Rv_|w6QRp~Xly}`R4`!^nvW`onJ zqfYzK68OJ9J)c8Dp`O3guM&7LELNx%V&eQf0{ZBHx`0)Ga`e(&F$rQ4>z!J; z+9gY~X1i`3YS7%XaAgU4%E?qznHfbi!n--LS#8er5M6J-bGOBOj3%_m7Kr8_9ifzxx9!{id zA13AoiTgvZWTsQi_Xjj;v9pb?$U_wwB1evfOotJ+S==1R(#sYc@nGABURXSg%*Xq_ z9oI9>CXAEkoQo0hgD<*4uu+Bc+lu|URF$24_nx-cc%$EhL5!-Cv=jl2Zru5tGP>EWFS&TGS}u! zJ@Q->^uUBQvksg1sl~HqG0zy66i7VxpT+}T5bEPCaXM()K7IS>gEqM4rSndIeg8&@ z1%e;;nf;*6Tb6O31Up%=9n8=_28AgfM0eY8DAl+ac)9M5e@M^S^6`Iyw|DTxOEpoe zynaSKAUC17X>T%mbM^g6sU`aySAn;d%-VU)$kM*nvVjI?2&WC(vQuu@BrMXWwx1Hl`D*j8J>? zLj@9@PTx901+MFgZ8}_?kuR7bIYuZhX*}*V4%4hUb&O?8_ zRRP`A?*N1M9x1^8y3O^484Zn`mE((kYu4)%jw}(`LHOP*#Bk5~66Ds5b3g~l=!Mtq zk{vlP>UC1x@@w(qp~+ymo9BrJlP6=fx5|$Imgi|Makvq0hBKVyqWcB6>tn-f_>^r& zgcjEsQH#8NN^UOR{sn3+C;gJzN6%bvH3oEQbTi)wl8b%o72Y>i#`xx1pO~xo;g{HK z*Q)_II;w1VQP1ehjU3->q8zVAQEZtJyOu~AliSzm&c)It-?!++&+senqS2IRpw56c z0gC7az_1?;At_It^I^@Y>;o{8?sXzkMKSf5JsdHiW!pBd?D3ginPFMfwo${g_hr1W z{C@Iv^uq7;KvFsp+^QDm$0%Pgje2+_Tt~oEHrFi2nw&REq(0)i|H}p2E|x*+*Y2dZ z7S|;6k6+Ct_LqH-4Q;EiGT6VpO#7J*UQeB-N}B1z$B}KwNo@qOOS3#-a9o!RT38_?iGD8LuSElf1r#LoD!K*Z`OQntwklH>&|SUIq5|qK*XWX~05# zLTI(VG?~jE*tmj6hy8T@*@{?B>wV0wjdyb(A9TQA^jkH)7XSq%K5Y1H+M(&p6qPg= z^$CTv(uxzwG(JO<9E{-k?5XMJSwp$r=rWtfDdas0hMfl6Lp$Mit>J`=Agl?o6+`40 z*{?Es-CLW*i(ZAk*zQt?U*aOTQ5E80o~h{y_Q9vJ>+hA8G+)QE{2&`StBUh6J}o>z zI@b(on1>CMQT#4=wy^5m!-stQc(b5ohlqlYfB=^P@Tv+Gjdm@9=^XSbP^nZUrAH$C z!S~wj8JoKw$LsS_|Fnoa!SUW|06es1)awtv>$@caI@$I%LOQ}8j+F3qA>t{9{=!E_g(Srp3AnO z68dGWJ2yX!MSh2G@P;<*mPlP>8_7!l4!notm3Nxw0$WJ?*}`+{73rbPY&ysJnu5%$ zhH5MOa$Zd@y)EvoTL4RdJm3Xcz4Lh&esiUNzzFZ2dfY1J`%8zztmy37dgnMo4;NGz z5x~J~q(D8SHw{7oIY30v%i3oD>1D5F^Ij{SJ{;y_3&(>ov9+^GGOsob{cZ5Zo`6Nt zxAV~J_GsGz6>dqt8wytcdNsMz+-?MvE90Rd5h}3pA+~dq_|7vW;3(K@E2{$^x#}Uy z>7E1FSX}Ab2jCD6zhf)AK0w0G5ik4vKr$fv%{%yHeK&?rHbdZJWb@l56D>mPSYKLr zGfL^%)RP9whQ;%b5vA8%c$>j4P+5GZZH@33ph}5m{=Qfa(t}*xfOg(&#(BB-#@TH* zzvp8!cO<%aG$Um8_kLCXEF)wa?tto@Z0BnU;Az`9cfxcgc4+jpjz58SVA_+PXhC{| z)3z;}h#Mrm(1Vmw99pMlGv%*3FAGI%aOI-IL^B}cu<9NTTmIRnRltX7OokhDo8?H* z*wY*@)(sTGVUw~2^wI<4Q?6_YWjyc~%ax^@JJ{k`pxEa&YE|le3(!7biZ81|D}j?p zO1$ZUHwRA#4oERX;thvcPO?A_@;hg&>^18KIrar;aV@nG3Q`~u|+k+Nk=ERBdpOAe&03gj8%ewDor?{QtDP~o>eQ|7~B_75ip4wnhNl-nwSZ*@2f3_xM6~ZUZ5s8+>1u#YR zvwMACwF|lI$J=}So@cC!WjnC@_VY8h5ofdqG5P34T5aIAs$MExuA05la>1X?iT6$4 z26~g=XGBht1iyhfyY{%|1NyH44cqEj3b;m}-TV z6cR~V2trRBZArPQ6) zg_M)Ad7hy;Voe!t+bo%nj2G<$3wf$^jA2Y`9%spH>h1R8EV}y;uwvMQ)wX(3FPq}C z9jQp)G%-z9Sd>K_Cl_bCi`_O}dYuc=>1^R~9Lv{>>!z}eQEH`8v(%t?9OR%5-^_d^GpeImcs$S=RXbG6X!t$#U-9h=M*o@Zwqd|FlohBBECMnX8nBbA;U7dqXE>>0 zmTU59R4MA{fg& z>OT4$7o$h+#TNC*!xUue2C(M%^*y~iC}A7Ip(WY1J*ht5uiAB0FpQ1Rbw4ta14y$N zwy=A?mGZ=#TKopLIEkd=bF&#p7>e`&e&j}mG=YXPmhA(%J`ybccsk6L3%wj%MJ+33UIU=Qj(9_qd@apRnojpaWclaHd&!L7JM2~Z%oySueWae`Ca zix+5dcXtc==DMHpeLwRr*<)v&Yt41cd1}Q|6-9>5*l!I)5$Nnh^3kzMl5eNl-dz9u z5zj-**L%jl;w?#!=9Y|?>(tjo3bDqLKrIaQG_)P=6OR16piiSJ$pD`d7Gw=6{VMDvhS7Xfq}c(A~&_JDj|#Fv%$+**ya4=X0gt?pBXyF z2*u9xoYep7ecw;rw#w<0p*+cbHGJNE-O%h2h+GL<*_=p5NKdIaN-Ha^kvHGc;Q0+s z5ae66Dzq<#^09@Pb?q=IE;u!u+pt(*UoGm^NuO55rUGZnwV9WC#pl9OEn2iEFCBY$ z`_#nWa*dz4%m-8|E~O2|{-%Qo3SkGnLp+>gkt=7-!WW|FJF{t)g?7}bt7DheuE@%d zf8R!i3m(v=O-|w4yfy|A5!cXx8h%*;wXoLee$={;8x<~!Tp2q_V}K&@%OwZ;9CIqN z77V{_SAy)ZT%+8OlkiB2Hx2^kQi()>Y!tT}wr^A_RXU5#>lYmTR5+G^Y^{~%r<%R+%YC zDdL^Jvk9W>5g%9KHw7{J$OrPhGT~lBT)fAn=;-!?Z_uzVmIr+o((P1Pp6_y8RR18W z-oQ<3AnAhh_k20#+?ZCxw{mu^*9tek-jkge!a-G{rIgM*l=hB6>x^P=p=l$Tf|6#` zINf@jN&7TgE<0+Pb)l92qrD}q{4@rC}7 zeVAeQ2LcG7%Z=Cm@d`LmE>v${P>Qyo(JQPU9~P&g*9yAv{QSQofA zmFK`V)h~@NJ1P=cP1h@;?4!&gsDV5J)%nAbm`kSXyU4CH9^j}czB+HyB1$|+tpj!@ zu{r}zBGs+G=k|EFPCG|vQ-Aw3&bO43uG9`{K>lhH5S5Tn6#4+%PJ%x+Kz2O*7&B{S zwW}#E{QDTS?K$U@YGhf(%IPy(^UeTeUu1v^Wbx35#?ZTVGGFLdzSOPoT}j11`}SU~ z3iJQ=(@8-GRnz2W9e1P0su9PoO8he@HsnQpol6YRj(C$>?-E?*4!+UG<7&qpie+q~ zNIJSuSq60;<+={yG$11K6=Q;!)rz&^Qx3m_ve$}mUu|fbYc1m_TzcgqRlOpruB%DF z>1Xo{dgyomm6UYnXGZyUquQ(VE9vv)lyz+7Sl6+bktaDsP8NRM*1zbObgVwiuOg8sP{1O>Fe5~ma`vyp^;wS~#wwGf(9LA~xd!YH zshag*N$9|Y$CPf;aWMPSoY7#sIFvcxdnVyuR#ivqTkq4ma-ryVWJXvte&l(*U1ksw zQ13JHe@q;i9(<$+BY#gsRjZxkli4bg2r1+MihAxC*n|SH@aT`5&ykZ)ea9e6*d|YI zljjfcG?&P7t%g%!a3zIH&rXS{w&FLzvqyOSlK==geBRufs%W zg7z0xfzkn2`VgG3(AjA03-@2aA4VPq86L_C)cK7cHbU;P?PMMq6`vn14hOTZOP&8iC#!< zH?yR_B!Yx~#wgJ^a<7`O$XftK4p++rKkrk|)y0R|glW?wA#W6^B2`Ud@VA2c)xY60 z>!KxqB-}WQ{#F~8HM{wS<*j`$4oV|qsi0f%X$o4Mm-_Pw?a>S8oFD>DPmoktWO3qO zKW+lQT`mTDEH<#QPn{VJjY2~hWOhtewbiG^P#htJ6qRgCNDm%mD&1YH-6;sATgYdv=PqNR)-KXw?=3Le*eV!aE|~@9I^hMTEA-FL&cZMpQsqgBtYJ)gml%1 za83h3`nqrIh0@wzr^IWEneSwgg+7l*iQ2&|3_7vxlZt(jFwqDJWI`G=ZM7ie%GK3^ zjjl&AslX7(^Gd@MG$5v~I0zIC&@ zStd5{iu|00eu|!EORnlNn^6+}1=%nO555gP*bOk(;))$S@(nN}&4o*F*yo{>2v6_2 ziJ-@Umv|H}T#>O@f;vVpyJ(OA98*zaKZ^eie%pNQlU8DG&W2CkHZqYkYIQAo^TNJj zsmhBQ-)Ixq>td*LPRd@I85iX0?rFyzVEFAed~4Hv`!XT)y3qX@daA;-c1XuIwt~L1 z&FIq==ax0tI+4lUzrtAT^^_;Fk3CbXTVAee7B4uZ=hkUJa8lgqnsA3=NS}1K;j6r7 z^OJIlgdVJ%Z%Y-k3~GUr=g$NEOuE6;DOJ{Ww<(>J_J4FcOcR9e$hx|#S=gNfU(Uzg zpi%YCUkHHAepgY<&)tOMZ<39nx16mx*%L@$x8ulqg;+Ap&?*awj6lYGe^ij@pDJ38 zEjOVU_N$Q4>THEDj>VK7{Q{!pomvTq$9@Jzw5c$y|7b;n` z*yGcadhBz!UP_N<-?L7^jl2k-!P)I0Xh6hoTorD$9KMoW)^y6`=JA+f9A*eg_j^cW z#Qg`wHHIF{n!H35r(ds+$8r2XA9eAIjSzT_%@ZG*Jwb0=sod*N<2EqncF;;-7g8V9 zo%Y6ePh&S7zS*yUluCJGNJl}Ix+AV2Y>Gg@i)%)pT zm((28*U@ zzDOpmPkiwjiI&y9D)!e@4IH)HQfB@rBqmPq))u?gtW(_0B5X6ucILhNt|peT%>!4D z-c3a7D|Aa%zGQr-!k&#mc?Jfo5gr0*Fh2x>s5IJkHOO+VKwRHyT2!c+<*ytJco4l zTf?dmqqg`tV^7rG^BaF!J~EMH``v?x%cq+FCvjO;D00+guMe z%vpGGkNR7HETg%{Vgl4WI$P>#xGhiUAy}qP?XfN7v444g;Ri7N;s#}bSGUv04t+BM zR5{T4s!GVF=4|+aJvLF~TjYuOMvH>w7o9cf`z-{Rw8q1~Q)<7n^lu(t%NREDQE}eG zY1X|K%u#-eK<93M5ZKj%KaRnC$jnIV7sdiT4R~>6xLBBgv1;ir0=km^NRSK8v_Y-& z=t@AI%H_j3BMaZOhbC7WtyZm4-_OQUD8+wMP2c-EmiD5(o~AB78-%TtZYPO5KV z*a?#N{)SvDFWcWHLw#+Dw`B3RE!LWu6VqY!Lz+QJbF6{SG}}QJs{GRk9d2w#jp z_|M9bWBGbXq$MdtdwEqtsDRUi5VQ#=v{mqr?sl#S0P;B}X|TMl;_oA^p~ ztJ%vllOaMR1(TEPfYbeIr)6B-4ZSW3Kv&-}3ZU;?`79L0&-ux({I>#{5udT!V49Yw zKlC5z>nb7|k)GzkJ&Uc`dJyqHg)D%Zb5V~yIp0_}LKh8^tu!KIwoY8Qonqx%x9%6kM@HV|@lYi>c zJ^LMtflH`yH5>LF-cfgRr3imL;F=Autm%rb*?UtLI{iCqx2t?NG2*TMb~qfEx0I}# zdhR@jU$;yl5>4-%&I*DM2Qp{*0WgFcf57lyfY|Fsn@Bo3k%g1IQGZYMAZbS+hFdZ` z-kwgjq@54zd0lm1ij1KN*ynpeV9;tG-F&2E5;KWgVdembBlG2u?l->Odw3FF*<2^r z4^Qu?<9kVFoshF!=)oWFe?&`&q=HjS3U<0d%Tbtv3?#YquI!ealCtdu-fK%WbQ=5r zZIAtjgi>Xk)5!E3imFV#KH* zQDonZrmWTn>u>qSCM+`SNO941eU~sNEPG>h9O6M@)QMynM;>U!tZ}_mEnV1d!IVAWE zfZ#t?!pMS5Qbp45?;s8(G3ynNc)(i(f?-`TZ$fcF}Ihk0#liwX|<2f{raFCv7W zBA{a?{>Xf0y4G&ZPjU+q-HOND@7Y{(sKw62EyP(~O2QLPq}g(0{H^4)^ceZ2i)bKp zxsxLb52aGETs6P`U9M0_3amG7|9CpM!v~X_Df{xey?oAFU(}tF0Rl2umh))>x@5TC zWt&iJcui^ynIxM6pUF>(u+{bmM{+F|&`b99Qne%LDa_CLT>L3N_6zBRh14jxI}%gA z<89rhr2?;mP}u9xaq`lSC^+7O<{q}>B4qQI3hc)gwLXEqzuL;*e!_Mi=N|rZ`RqrG ze#ef7jrHBFV0zAV@3;?%{NP>Be#`lpWa5RQD&XC(xD))F&)7igyZ9IgIq;`H0;`iU;7zO@{ zo-_79CBz2hfH5Hen=yf1U0hr6I-|a`a zfnYPv)9t6m-B*RtJRonF;JEYhuf;^0*iMSq$v{>S1=Tmg$Gbl2ab64%sJHzVV*q0#E zY}yzdZ7&oOarQY?S0{!?^R(^<*Rn+d^=BhCN6`||%+Dda*;mQ+fVoKDvaP%OL$@GZ z*=R%ck5FlR2NN*V3PgHU_5LVHVQ2>Zgk(T1RA-B6XkmaNGvGf9ob#gHz(vl>85sQN z^NV{dK9p|U$h9{NfD3Ai_g#9i9I4h=PL9m+BpX4j6y&nOCKr9Ehx3>dm`0j2Ua4YM z6bgo~scnBX0Fm;jen*J^kG1Bc=yL3H)YHn`i9DufD-96#jHX3xhbtC&x#6|bF2c;9 zEqhaJvj5kZf+VJ}YKyqD{YkuZuG$bf8?n3&_D|rRvv=+8eq7h9xwYJlsEokthGuptdc<$jy|oW|)ojmP<-xkYzZFPh1X2 z|93*pvje)ouGs}>5 z@u5_-)H=GWO`325L;d#y6gxr3A4T8fYRu${ody}nAbSnv+J~etbB)xpPRDq2fH zWo!6Cv52AF>B{Q43iUJRTEE4+oeGBIWatp1`MzjIn@ zEXiGQ1QFIRoZq3{n&le%{3C|1DLQ#Nl&k9XC6wKHuU%S5#u)NIjmc`-Tl^8m)tqw} zj`;DnTM!+a2`nNhyRJwGLfqs#US3mgdiT(hQHK9j>diKUd9;X}wTcB|MXkewah;1b zcZ_>?)=4qgk6}DeBrf321$hqe$rEpZiF4?nPnuN%xFAvJW*LRylj@c15pO1;aJmlu zjetqGu-$t&_a|R4cT9K6>}6aHcC@9UsxZ2Azr?@Yb8H^e&b|=lr${fk6VSagZ5>W- zT+J`=5nFdm{s~O*Xr5?J1rZW94B8j@yIfNBivAPYm(o`hgfYNi_sLb!0Kyn*ZKx2= zu`}M&q9a0;M01ItAcF5l50o6&O{Ofv&q?E*c-HN=15F+CGWZS~*$amV=+I3P(z7Uf zQWLTRdCk4no~tw8Gl6v?K+!Gg^$G-6;v|5y9pAxmE9_-FtVDDJjl6eVm`;%!d9NT*KGHfYL?SFxO}| zyM$N6Doo;fj5NQN$r^QYxI8=?R$c5y1JOsI#E`4wh{E>R5?Ai~;a`!w`HhzP3xDwG^Oo2jWsNgOWJ zIF9pdX-w;pQ`lsxE{&hI-Wh|OabEa6II6cM)O_y(V|TW>0!IV+@KPgO+Os?41J?Ar zu#W-Xsb2khbUQNHiuw=_9t4{}^qyF9_Dz;RWrQde?%)Yvv5)rcE%8tXU?2QoG-AyS%8I<#=v zQ0zD5p^`X=Z@)R)CDwf0VpCz8D`c}V`gz5uMq43jLwV;F!!Lke-1;Tim8?KYJL+>SG1v6!eU(!*OgPNW4t=X>eQgV?b!t!haPLLa^@&A%EK2wZNk zfj0TBf|6>W!*RaZCYI}`iLeL`FHW%{l7c3K$(;0?Q<;8o7Ge4<5z=8n`zs!G0!U}`eQ2u1y zP_(6Y58I$x&!hShf8M+ywXS@+Idf3{>d|dA?|<4f4>i`4Q?8PtT=$#x;uL>AKon>> z6DNmUekE`?;nv8#jOpr`AXGF+#Fg+XfSChqCNGBpE}dhvTGY5F2MH-DX{>tF*HdCP z9G5}%=i=cd6p3hb>bvdeY|n^Znp+|iXTvbO3PSBP$*mdAkB3GwXO$spv%2qweoCN2W@-*LtRi-#@z zxh!JXLd!}~e)UCKOGuRlR=yX69)#e&FvaJDuMf1VQzA2^+zENzqB3n^=J@dhs=WoL zvIKw3ypGuTK_K#5Moi^hEtONBZ0i6;Ye%3bBYB2168HOn-r!CS-V3&Ycq@EEP3#Q% zVd{C)@6W@osb2(=kT?jl`JG^4JUJOEh5}ASp2zdhN9_EUU<@U}fqP>VnfmT8r_V?+fv)ZE<|=~m9VE%9rgT3J@S`mK=`!|ROPkMj zeaombL_Tp2U6|7L6_wV1{S93AT~b@zokzJ z0S%57f}p=2w!T8G)<20Rv;zUl=E3BxeJ?SI?wUA8_I(B#ERZN7^{|>eJ)$}^k6&Dn z*_U2jGx7J0K%^MOx>zI#J9+OZ{t9hdlv=W62PFisK-J5kz0$vFvf0KO946v6z4f&k z=J63i>bvkgoo}fnKQGFEyP}d22ZmkNFGW6Vp|RPSaUjo2;N@W1rEunUzi4>ElL9;8 zgfN2dBb}gq`GT!UP9@LDW)VD+ec3(Axb!4jLZ2gy{Vif;xZZx*J;c54&pCr)ZvXt8 z2294VSTXcJXea--)~p3BkQ}V6loUk?(D&yf_8Tu%hOPVGJxx^`-Zcz2$66$)l_8V1 zQ$1yE%M;WW4BkC7jKQ=_XObDFYPCBz10{?Im{GQ3R=6@X?wlHrik`VarSivO6Y&6; zw5_d>2SzfXK}a*42Pi06{j__()5_L?CX2%U+C(hNma^8E^*J2tmSXw7>a!zdnXIip zboL;mN6xoMHgf%ERx|9+s_p+iYw?N`E1SHKR;NZMZ!#|PCm?cJWJs3iRnsCaE<7KW z#oE&q*z>Ad)%&tMr28I{&vLCC+UrTEc(xgtq7&kYKaN{B-OyMvL+D`QlVyJ=k)Q0H zg8XGfZeMfnIV->JGm(XwbPnk6O&GUMk8{i%PC%lu;OTeyg!O)=K$6DI%c#Z(bQV1N zIifH78joAMLj<3sUw)-7{1msUdxD)Fbym7##6+1a$Uaao0?z0HuZOtPF9){W^Cc6Q zjL6)juSDs!uR#?_V9QEqZk7en8>{%+?N2Qej(%%w)Z>pZ!4QH@V#=|kbUjzb3t^Ax z&y?)9dQ{{;X#3=vmtiW2?NFC?Vd=juyEXdwbASSduaf+k4AU-G-oseoHfR9jvKuzU zoZASL&}igk3o3K#+FfO0A&(UuX8G(%IDd23qAJl*D}4BXp39p)EZm-CQLiQirYVan zLNM8%SV@s0Hw0;L(0XiFXigv|6af@Wfv*D;SOWVSAd`p`05E?Z&?(nlzwWLy+c1pJ z_b?-6ovV`B=K6dYeU0JeQ1(M{e#~sQ%^D4#?DXp7Xs;nG%JW$8A%ueVPvlD->1TmQ z!qn@L*E5WxeN*rcJ(HY)y{VE5o&nebY>QS0D%6! z-qRg=p9%<2&hiJ+W(8-v%rn?pGQWrDcSr@dtoh|c2_|GU{?PArKe8*!_JAS^Doeb4 z%(I@#m!TAJHrw44Sn4l*?<7Y&gTgrHeM{O-J|IQ_g5tJYWa=31MEXYiVn(F`5tH$M z%O{ctyyy>XMMrH&2;{3<#EGG-3`M4H@`LfaEe4f^Um9oCv|57^$dzSBT_$3N>Rz`2 zreJ8v-&*vbt8Sf}Of)1iqSC!zw&l~gvj+g7Y*`ZoM|NB)h@V#ZT zAtilfVVCPt{8s_;9_JA4!?4C?Z}w8gxN5p3dO3V4n^%5aPSU7 zeQ3^apj2*WgW-(w6Kpn2=Xq}iV_^Wx7x;|NvIEJeMZgkPt1?3voNd5(K zgp$~k(xd4HRKwM)m(rDr9$udve@5)`!9)3;=4?086c$&BJc9USHZ6uIG3V3AU~_X; zBCCu}HLT!z+q)`#Sp0=HRvQv<3-QeBl?GphT0NL}#kc4*@jn0}in-v6AoD?6wT7Dk z2ILYh2ySXdquyg5M*_kGLymrjyd+5E=bOzGvt)&c{U`<-PVf8aO8GdqAJwyqMq1z$ zlMBwCu`C%mXVYbB(+=H+pZMp zD-u`WH2ZQwd3+tuo}2YDbckJ!6GmtkO{S(YC!Gq{gi);P_Qw;XsmZI_ZsSA zC`Kl3(dtvP4Bc627G*Sla0>jr*5&UjTx=+)#~O9_#F@L_nR+T3%++Q)ZDBAt=fU*} zzD;e9A7t=EO@jkLL3-|9L=s%l>~#?xckN%w6tygN+^1s9^$FQw=K$HFDo)RQ0@6|P ze2L7O5`TO40F#E$kq3tcahtVAMBR{co;D>6I{ALQ=FNc-0`apGq)V6l5+d9Yp|If} z7~Fr3Mi7e>;SID4Y9fhLb8Q#~tYi!}c!~-3`fg2%obhS)EC5RBxnOddTI`G`uXp|eYfzQgrs?dJbcntVuiUKZGAs9koKVkb=V zxxMSvTQk!>>xniPwNk$U1_L2F9x!i^&z@Vl=FIV^dez82RMRo-14{VGM9#rFeCNUm z?@!V*EzL;&e+%x)KMccp@aZz@=jzVV=hp}*1&e{8fCh(0EI{7{3JwmTh1%V(xW>Dq zb{3aq-RMf~42syUIIUleWv2xJ3Wmd?hhE6Yl>$PBExho|30I&p%|}xY+kIc{_W{QM zL`5INsw^&l>gu&gCNeHd3}h0Acy+b!uTBmW7OGX$@1rmho`uY>&PGOmyuWUlzRH7C zSuR@5-CtG*d4j3(f(xyiD*xPeQi}W-y?Kn`9GxO2V!NZ7Iu6`r_Z46-s>bXpTrkF5 z9MmRpT1r-T5V%~Dau(;W(MzM4Qz|xsh0~HwkvMo8pLeIV+g=8UECj2;H`(i_2shTK zydQ~5%}+b_ARHv3tP*l{@4Kc8Vc&p_Vg>n1Lg@ThkW#yXAk`{6?LFg!O}Ph#nnFh_ zB7>a{5axec*0RqBp0=UtRa;-T|H9KnK2VmUoHiTNrwPK#y0X>&aB>1r3}kPm3s!mfope6~0~Vb)VCXA*%bIkE5t|d8IhO_YR`ed?E5I%67Z-6ai1+H#aug7I$H6avL|v#(Rt6akry6QG1! zZo|T$cND*6IwWI_|Cn4&2(HgeF#0)1Y!FC-n9Kw>U~WeF^RDX}y}7`PEj)+PIt#Cz zPDVAMq9yd1-#`2ET9ODH23B=G-haA)_4-Vrg|qY=Czq^B=H_T3a;HyCsnz7U{&PYA zr8>w9**Y#z&b5KhlG9b$xG3I4*ZOLK_TD|wWQpG2Oq1>#AIw%--Qd#4$JoJg4am=1f18+5{GZM0#)h8e~% zN5?+2ucrzQwOG>~Kz!Ej{)SEl}ZZXkz95X#>G2fd)^}Kt7 ze+{#3c`y*77lG$2Cu3nw=EPA3u3p5gA z-S++Y(n2q71XF&Q?m?dbfX-0HBs>T3E7a!kx#40wT4bMWbDZ+zjs#U?*%Q|&+}Q13 z8W_{KwYzQ$pxdCOZOEhff{`ua#StWn9F@M+{?Q572CPc}>mlBnMtDkL2)_V&r-Who z@zmTAT~e%C79vA|*pMsrWG;Q6Yj1bb*=jqZ1OEH>kRkBoN6dSMxrM1cGm=J)e6>{A z2ec^MLkKH~o)Hi3(O`eq)>w(o65y=o#dadg&ghD4wZF`D`EKbsb}G4=+POdv z%>p|>L4eGI^WVX#cmi^i^#pN?_G>dcEI%AwCg5m|KiIy&9joM}A#l!gigI;QB7(w5 zA~MnIg*t%?-iDI+!kg{4Vne&>8HvN%qXVH~=Ew!q`@?UW3M6tp4F$>={-zLvDyb#1 z*KWo<<3Y{^r8novy*Mh>DxNPCw3|ap6_s{f^Y>(mY^KVk<$iCicOc9`x z{f0p=8qCiYEN%8|uxy*mKeV4*(Qk*xv2SJzj#*;ZXQ{qv$Q=30dQ@nOM1jsPSDyxu zS|3M$RMQ<)Eh)8oS!={Cu-Ru)$+&&4)V4VOQLf)DrBkUN!Sla}@5&T7f+Jxa8XK_9 zw?qf9Wx*MPMcWX2LGk}u^||-Ep~5JvoH`~g54IV4z3j?EWs|`BWTlj~LgoVR8QBDL zJr3pq_j;j-v}pA0`!!}Zcj+YKZm?JbVVl9Nz4boxl}xlu75u9V3NANPril>HXKIN| z#H3|0v73Qg(rx{0brl{{mIAa)2-=D@)fRe#9`xfL_M$4}sJNk88ER$UP4Lfu7tUi>x_8(vazds6YqipvGe#X$u6;1C6g1C~~Kk3F1e3b@UChAHs|atN9?+ zq6>CAE4oBI{%Z4m;DzYupp$eI0GEu?thJi4v6N7n$)tM+$!rn_6>?T%)5igdk(gh~ z4Fxbk)J%cvH_GY)La)z$L4@@a@>BsgZXEY~kx?L&S4cfLN$I3mgQ{;e9_XM5bJ)s6 zVDswgpL})RUvT3GcT7bu2Eq?hj(urvBIRr$WMQRxM@|%1_lkg?p=d!M@`}`taL(2W z?s2)U>&Ibi)c;M>Ovwtw3w}orSz~A)#Dzs+GGH1tKYbM3O8wyjpK*7-V|3?+b??A1 z*vm)&>$4+nkN%|`#g-rd*#ZMuTzQA*Z@Gk=2)S1x`C*XrTM(Sav@c zwlz8?G+b8@0m_@aTb@s{u<4FjK?`;LhvbK3`b;C4-{*que$C#J32k2u-^y250zHxM z4yZ3_z>0I8-kQHw&9$vzK!fqvqDO8_X~3+Mk5N z^7njb2M(`h^LfoU;u|;7x)NO1>Hhx`eToIsW^N-qPd;!Ez#l@SLL>)c6Av}xoj(7q zHp)35M<-vXX2bs9bqa+Hba*%rYgo!0Sa9qC;51Vls$PSnijjGS!#92y@maK}Z z)=tRodO&+8V5gX4;+IixJt63%Zo8C8vC@}Q_|R3=wh50@(3SygFvqO!w&L4`5I=jr zhC6&rHeL{LQ5ETWNo!sa8FGODJa&Z*y#7(27})2q;_Ny>_MS@G4~_@hf$#%oz7M`+ zU$U5*fgVJW{HOL|jlhO?>=9iCx$tUQy;qtsODs%}53cmH3;% zEe|4xZuPYU4m=C{<~Y1qW6F2$EGE+QOsrP4;6%z|VCL3X0Axs?mZZ20Ed1|8C!=*r zyg+#(M^G>Wv>b*-e|W-Jg^}xCKb1z13UcY%Wz3^fgW)B;M>&I~&z#gdj2DXoL?Toq zQZxb2-0fNL`@n(c)F&Jtm^WyVe-PhV%S*M}GOoZ_WQD2zRdl$$%+zmFj!+BscB|j@ zE-5RQ@D8glaF1M7$z5TW*g8LvG zT^VC~YcbLpguQ^|7<|}i2}{jy#6!r`kjC9%EV|c_Mt}EJ8azeq*b^F z4)@3xN}N$n8MGnQ$P)796hFDuv*CCIxbV_?t}eDqG=-fZf=Ee1YUTsq-4C-=k9gF6 zsa{#IG>#`-VRb5Ix=o-!2 zNg>v!tbq+p{Q0emaPA-M$_gm-f}d9U#4ZBf?iF|?57vHw=PBHf8y&xf*8%={h5J$m z16yxF$c$hWW@jN*L)hjQwKM0mt<`yPqYKCR1kWf5e+AORuAx2aK zh8l&9qh0S$5jK+I!Q9uVBcK)eEgIRt+dOW|p={6RMR3Gv32=~g1VMOiJ>bp1=?kF; z%q$b|@yW5Nf`9x=SkH8Jx&-vTJ7fBXLe>LIfKiLQV!-quDY;afL~YYjh_~KCq9V}a z?FIKY?rdq+_djjNr(aI0eiK7E+HMFK?=CZfyzbOK z)IgjB+9k3M#!4N|dGuF*G)@`0sb(g!juWRk_*>l^c#i>IJ-%%Op~n6r8fZVIXmJ8% zPmY?%7&{?{$;FW5++*J%qerjqLww)btV(2D&5>D})wa`Js?T9B5@HLO)bwlA`$>a- zk1^2vsxOZ!5K+Zh&j{Q$()@C_OuqRc->Pl{b>8dsBGjNUw3XPYLG1SFUl-xaoHiX( z+eI+2Am^=fgOk^B*}sgYvSx_e2Z`PoYsM{4vw)?KVm^-cN)}yeovY3jO(g=YW=uN3 z$JWc({c^CBI?CaeG%qt{^409^vQ-U%NfNp#<(WO%-`)jP@T2w&#<07wGC+`z(GQO9wB@(X+t+#>h%2Ob7mE9bq z@f-2Hjwpzxlq&}zr?ITDa{&yf=Y)7c$8p0>6$moCT`&@_pIGSm0%*u2HuZN z<^(=@HbBSrSNw^sX->yH4w=&W)|+2S?SHvuG5~zw)a)ws zKm1V19AnQ$I#2!ATA1)Hb{6d7)#TuFyndd_`L!9h50;6t)8R2?rzd2bSKfl*aP)P^ zF;4v9*M}Qf^NG?NM+>&-2l6%&jmho}KcTDNjL%Br0@j8W&4LJ3U0=#)$cujTWmBA9 z)KD(1)a2~|7?!VtJLZ#)&TZ$CFEGGO{ zdtzr2eTU)jN8e{p-^P2ll@*qM@#DFn1o0%@&?WtHdpKwH<9u~XJob`V^Qj7FH_L^~ zIWC?L)z=NF{J_1=`*r>^@Sj8Uc?H<3%MkD?gDYl)Y76F?KE^Uc5?aWefN4-bpyUO@m$Z#JTg0V9hs=O+#70`LrvS9hyO0=+}E2|&jhctwg3Vw zyueenfc9w*L}d6>&$yk^;a{Gl+^PYDuB4l|WQtM>A_&4+oSK66<`(eB9B>W9+_THr zyx{C2gi2pkA{4I#(IdsS*I$|MwNf$lgkMF^3ekD;!9#~U?mLPtzy=)n<)%@xbkt{R z$i?gjnv@b&;Lk+1kG!VjpOstEg^fHuO(vgCBfk#(h& zle`smC{xI2#KFN1IB%nsUs1dQA9k9Tom2kHs6GLYy=zuQ>$u_!O`7U^=*cjeZ%kaM7C34!4)wG{i$vP zCpk_>0(VcEiG=~0`*8szH2LJ$24KY23@%1-mX7aJA6wa^(CX=SFcuT{A{pZo)dZJ` zDghfUa0T~w_bA6^4##YYtQOJ?-5)=s^S-)N^iiNm(jlQ^p8D7nR^{9X+8RG1wN*9@ zdz||M34~OPJ(o62rq1Z9f>=S3J4et-!y!Nv}G9iy_GmD4Lx_@w1Hx=tgW)e-iSA$=-4ZIa!exHe93-(K&# zk+*P>0w@`k+u@NCSwqF(aBUR07)h(-D;f{C#p?+ttSvGN@Q?)Z5E@*`=*e+Ed#Fw+**ucvj zIn1MO?xODO6%2d$Px%T)nRO^nRSVctRAb`fR`Xu#6?NO25lIt`a*5)qWP1j6?giz5 zhHKO_B+9RJ%qr(`uaB!{GT&W-zH%uJd&(PE*8wzr#fJ;ll^&jeE{H~wuA&YwT7L7@9D5~d?JePHqWo7)9#^6B zYOFMx6Z@UnOtL4n=ks~IpzgaLuF4A8&mBf1{{-aq!rw1D{S3zQL{6oPZN1YaZ@uqA z*J@nlH+>4vkoan|Os%e(Zg6Py7UGhdS!0^lwfZ{qHW0<*y;WQM?a@)+brd?ILr&R0 zjcY*uI6qc=4$4v5&Cvd`sV|J~@aF&Jp^dpwSEoCUveKc%`3&K`&$C>));Vnj#{qX= z_DnX4&xNI@-e+Bx0iHOu348wcD;8=g`J8N%;|99yoS*dRCG>vs6B0LUi%_gP4q@xC z=C$(Q8~S>?#jCDZXWooW#jDKzqqHrMfI*8&xXXM7)pIwP_Yd&*pD1zHr0FX3U(fzg zll*;PMGBheq2^TVZk4YsGZ8IkH)T^P?O@oI=wxrJm#%lOFX3i19OPR}F1nZ1_Wxl} zWXH2kN3NgsOG3r{+T-7M&G0+kigvcsfRVWe)h4^ypB4?m)U{PLH7Spy7}G&EBP_Ma z*~t{;6SM%t79kD!1PRsXh(>Cg%L1=oEz4*Qu5kB5H!e^2(EF#BAH}!?^Hc#V+$vc+BM?I9)|E%^*#Gm z$7q&N+67i|v58isJU<^O^<^6mogbm|t-Wn@3WS_DSWl!Q;@VQ@2Jw@2-(vFpO?aJf z>2wEl^Xfm|CwRJg=>QSCYhmDgZQIujQ=c{rWrJgj+Wjn>+;=I=p9FGTx^7Et1pMQ( ze-+fd!3$*BEDpx}NrUAONXLtSzLV~@#Lk$QV`FsOHXD9{Me5#-4q^B$7raF17Ohgc zbXbQf3M4s1S;w%qaCtT}yesZ??B<;kRax`B7fTs*;U2;;L8)NHMDs9;LHfNB9a*<( zVm=W@YQj`kA6S=#!?5PN-szUkLPsEMP<;i2$DM(O@qf(N*TK7;9xnI|emZ5{HbjzF zjDf?%#KT*3oj3OS)-dR_Qu*fd`Sp4K`lR$BjGwwQpflKWH_vPR^R$&J8#1ZM9PuJ7 z-H8RdM0bzM*IaECwF+8h*YVq){1uKFp7Ix&Ojv&P;-q0}1QrMM<$Lp*HKowQhAxMO zimAu#zcsU~wt&|ZxALp-N%3Jiap^>Ubgw^1c7M=cnI|wcGqA~k9Wbfw%i2mCU{r#x z+qD;=4Z3IO=q{wW`rJbtLdg{gyDp{0oBEpcDArRXX019T;LjL4;&GJl><_F&Sl#(= zyio;i|I%&SnNhhlXzDyaYkP9e;Btl zlll0f(?AWYdkq;$aNl{b@+DQ1?O8ZEGrEm#NmH=z^P9kgUklo-WOVHQAA4^dR7bb& z4PwEa00BZExD(th5HvvWjk|7KgL?=B*8o9-yKUSxSa5eX1b6qZIWphenmgybXTCq@ zR&`NS(X3wGkNqB9r2;`1W{vP4(>762T^vVXOHOo-(Kr{)gXQsPdaF(Kejc_)W5IGv zvVB`@AzR!t$K~@47<#G;k&+7LDj3(W>KJxCHx&4l7R%U;O!7lYFDN7LiRx;Z1kBNG zWYme&23SuOK@A*xyL7k58;5)BqjF%HW@3__@;7njBa@9YwATXz0}-jh0=%J|+z3Lc z2f}VdA3#vdaq6n~)5~s%yHG8wjwZMOq9+615}4(}#FPDpohR>xnQ8$*c=YVOgRaU+ z7CS!Qv%LK>I6arZo#}1060z#ti9-9C0y;mhb@m9!W=ieDpRRAa16-VqU@kZiwJ51B zkGz3*(=>bsgvOlWhe(-A_PdTRp29$5Z~FP(%1S&8p*&9k%)5huNTTZ~_ZQ9KnR0(*UlGxuT*D(sxNW=CWOMDWDD~*w1%uw7ZecAPNJ-QP&kgH|r}=CvghH2u8X(_r-&Heuo+v@QmjYh-X>Q}fy@58d3i_RY(_N1GVnI>=!rQ)00QiErAV+&VmtRlsCw}ujjj35dVMjex?IGqRwp0=vRMd{>HZYG$o zm^8a4$OfphSn)G?;w2-W4f&m%i+rIVD&WBVtf4&N#?|MBYCkOjC4O}0=TnG zZ`ZBdPntHXR73De3lP0GEcA_m5f`eB@a8JP{BK?Blp3nK=51*Gf;JC^u1yXz_bVed zQ`S64;TPX)g1H!lTh*)xylks;T$0=7&BlHAI3~1cba-SWhg?+p7gZP=U5uqLro#+kVu6!!_LxHh@_Xtt6eVR{owv$Sua+44cvSWM{lI_|VL^$C*d zI2jW%_@?P-*G5Y7ds=j*GpZP|*b1K8(ZYWLB$~h}K5vY`Jbrs$; z=r?;$xG+h&n%bUel8zZSTeM@)Z;2(3Vo?5V-HJD(qVP?8rCtC+euhH|kq%Gjg(9G4 zUU62~rhKAd8rg5$W8vq3w59ZButo{qe{H@wrIT?nbqcX_KsQk>~bKX^<*(b^Rjvk!=7S0#5e`%ULn zavRa&m>org?>Nrtp^;Yz^ib|uQP%|zK>>`>H~?bT!;(N6^Nc0_$$z{q_x?ExV2m>I8sfI3 zG_rKz&ZEAB?FAh8^9TNr7LvV=2q;VVnEW)?#-t6%%;Q8}^|2U)CTK2#d-c{(ouTAY zOaA*L4d*X4K(vNj5?kO=&@bv9lvlWhnfD+^YflhobQ;C@ei_2LK_0;_ue3(7#tY6LBjm~BG30~ z@bCjykVD?M7FbvI5SRhkDKR{fPzBo|X)-RWcqXl8Q0th27MLC>(DdCN<~I&ozaC=e zcFIhjRJ+>blB>b*mY2!ZW&^YgKi=mHz?tbAe;Pro!+aEDP3>jDbIFtXn{S2avK@Bj z+=DGPP5!PvR7?S3C?NQk)$yu-)#ELjn_^k=Rm;_!cJE!c6^-#AnZ|tpSim;Wp*QC- z44<0aWk5BV%db2bAE}HC;~Eq3u?aWQhe-`2 zQcf6_T?{-O~qhC5q9W8t*tc664NrHy=f{behMHSD1ivP~!HMMLZF#=R6z z+mzPwP}8>x#4F@ocGvw^q@PRFWesy;bHk+Z=b8rTV%}-zIi`M z*CUzdJDS6=na84kAR;P4F`4(h!{QNQayBYh_~snX{ZFC!kb}LiP3_H}T~xUv@iZ=O z-Gz@Cd?PQZPXBIU%PIHCi<&M|)N!bM&OG-kLrVCpj0S0ne; zzycnt^*jl`@)g|y$5pHJ6#mQKC~aA(Pp)web0Id!licHCnNu2MhOOGiLWT>O*EU%g zS~;YXwiRu#BuF!&dk@OY(K{_SfeJqY+6R_XKj*J6?P4j-qwiqrIOkO$rrlkw__on9 z^zyB{v5CHLluKehd*zq$unhR2$!y}xhRdYYL7`OvI@oWQzbriYgwXm(0*79_v7GmH zZPU$8#}Yiw-0H=c!9T=4!K9yB>0-j4YiL`7MrF_$_wHU;qlC7;81En_ogq?lS&0)n zaPbu0(OP$dV-)B1n_IS$sj0WqchXIFztucyq{X#exYvn=L5UVsd@E`wzn9>sX<*bP zaMTQy_py=3*Y1)SN~_Ut*Qx+X(e|ae-tIh_q}49f6)5H>kgD6qX~lnc-<2r5W#E3q ziDxn2M%FyK=of2(gQ_=Ql?@4b8?6eHXx5+PvEN1_QB^B?kxU;R6`2l4acpdHuteuo zf!drjNC~r3rtDf|m)1)r)I!jO6l4;#N^eXmnM##sL>OQOhKG|#rMCJ|z((&;V~%{v z;*;7~<2DoKlWZi2#+arug~IGiK%zuC-|72oxKt5e)++o)Db*p|_v7+7=aWJi4lbhG zDW292Q#DW-rJyMK9o@yvr!~T=LeTPAdA@XN)}zB}xB{hgY6y+iCK%Fv0Zsi`GF zc#{r?fN3qr^j#p^I&_;k=zr03)5Eo3R~PYd5|i5(c>p{Sbb$7)?j|7dpg-==(Id5> z_tZi6{aZ}iEQ1!aPiFzi9#t~L7-&`LK~c{% zh}zUAz7phFq1wBrSk^)u&-!_im}+-n9=dSZX-026(xSFgwujI8*M`1mtuD~A(A^Uw)96OMFYLLU%M?<^3&akq($7=00kr5 z@}5WfsVgiF@?;*{8LrjCeC`6yJt+l!qG5ZNJ@UpgYq*GMDWbb zN+x%-`3j#`%t_{7xm(GS?O59*9bzA)%)24UljkL~o62@VX{&J8wI(b3E?Es7HiGaJ zN5%To2dQmI?*sK7@BiU=se%G;oiE*K@TTk6k&>cFzdDOce!TXFW?I7W=XpV}TS~%}hR1^i&dPGZ&kh^aJSF*!=+k8n5 zBj3sK(ba@VH3o5E2R$6*rLH4>Iw`55paYUcA3%YptJx!{y2txxXyA4j_zs}UU`&R8)5{NSeT3YVH4oCq!JXf$JUy-j{ZqXw`e^L4=?-fC!jb4A5?XS@~i(ZK#ge(if#(d@iM z>h=n@N(TEFDf{JK*|;z1&l(cz7*fNKJJP-?BSdYpQQYikt{E-yo}F^fCg0p_3uLT$ zl6h@F!BPf`IItcw8Q%j43s ztA)q(xEU}6>FWwM19{27m{L$$Evs7eOF@o_>1oV zKPg(&j(jyt=F!z@FR1zQhqj{I3)4L^=R+gQ)L}w7z6i+}? ziWp3%E&Jp)3E4C%#gKFk#@hx~1EI9-h`C^8#siVp^=|LMT;AMEI+9C1Z6*YWb)f+R z2SfE`1T)UGN*FnBZ|7LaeybL9aaiTN|3Kq?zTg%DIW?(cgIDFmnGhCzJl6D5T4l!E1L!xy9^+yEGhnGe}{BbX4owYAw*1F{bKVT0&9sh2fPlWBvA> zot@|tVrS=8B*)7M3npS^&7?gEcFdz}JKND*R61WCN zOy!GS{EA}(pT`J7x>NIaQxIK{E}+e_H?l_&ALNQmhO_3XhCRG3^c<%H1U> z5|#s|0VQND9I}FK1jG>0i4ZwTiiRvn^LHlk!1RK~{jt=;+wBtO7SYk&5l%BPA_+FW z^A-13BG(nbF|d8iQxlqNO$QWGZ+r^sY%~G}&=@#0J(@%;h+M-nYx>vcRiU^wS8wuycHQwzVPcIzBTJm@GV_?r(*Y5PfRSu=hD}}?b4KKQ%g?|U~ zBv1i{m_tDq*kH_o^p_sh6^PlyU%`zO_czUGEpy3*Z|vIJVXa8>_7F5buXQax;6(`? zYQ7gsd*~zvAnMESKc&ozNeqZK5ia^s(NG>?im5|tIAz(kqlhBJI$V?x2qnQa_rI;@ zM^t$ozBT@+G>f#k{}n^&a=G@~v9V}RS1fADE@`Ada|r3k_mi1``l|}8o99VOE=%^H zR5zr{u3_v&jJMax-=()k?4pEwC$ZG|Y}+1=dijT|fguWg_#+6&Vyj)#X&nbIjnzA@ zTRjgsrq%E7E*E@7poylF1(5qDASYF->)xZ5UQCgSOd7vs(RU6eZ795-Dl0;Yd`+M=L9LErIan*>u$nfWM+jQO*i_!`=y_^q?cJ;r|+?2I>H|jAtiu(n1J~(RW3M#J{!^j9b|19 zB6%siIjI6I!JWN-HCmO3YfuSKx`X^E7AHR``x|H#T={eTUCgT;7H@uNx^B}LrPbc) z3oMfW$I4*{_17iW6`|j#iAzXlk9Et$cr6XnuKPZ2p+rmeU_TfK7zsG;MWDlx{b$d6 zWl%mO?Ih}q@2L6Adb&OrKloagY)&H0JH2c#rh0@=3Q2T5n2q<5b^!M9!#8e*Bdl@q@xD{FnN&E;OQ2| zd~eg+AuFU3e}i`eiOIi~LbWf?$@2GvZW1fcO`*Dm5qqfH(c+V_$;P?Ec~Mnf8yQi^ zOm%N{E|QAZb$Vv+>v(vLt%XYNh^?P)nnemB#s01usnp%UJVhU9Jkyr8XfAbqm-OY+ z48l zBWHT8B=?nAk5+p=)nccYKB!p1oF1UZZMQ@5=_D92bL_oJQ!3!Y^^kI=7p z>GqP%(-TJ^7}ziE9}LDY&>uQl(n-P>L?ea&Sop@e+CklXSG=#1?|t@IY_l%;UYv{^ z)d(SS?34vlyM3|Cx)&T6Ko~J9NwT%W*lQBX(rJ zhZXEU!d|zQ#skavG*D9HV!jFkr%n9ImDaWhoZKaYy--uB*$(!>2}L2(lFzpOBfLvq=JTA>9xBmlbax+5n{*)VmCmKVK7M~ER=Z;8 z=~k!N=ZSm(TO%i{5uZ#{-Blx0mw;KY*7V}Q8EncPQ-K)Vu+CAR8uW{ksh$OTEGVi? zsU(`Qz{uK`t?ndpwS31d)GdNk7(5P?l+4lDC2woD)9fM#I_sEEQfhiGO>-^D)mwg8 zfuA>j^gGH6oC**Fc`jt6F93BERHJyvpFns`vK96ZZZ3?wSl5_AQ+A+K2~qy@2yLEJ z+uMmL?>hOd-h59|z%TB2yYxbesa$4(yn5t1P4!+8X#)Peq>q1z%Lt`%v(jD=FxV0i z-*kw76QS^tzaBk@b>2%(bFbF7pdimP0?jp{K2IRj%3i3XHcRgXyP{XCL|$>_8X~@@ zU4vxn>w2-`&rd%!H&0i{2Gvwm@>eh`xk=|;uFbmG=`vR^@Aj)I6f5tu_2XVw&SCc} zu|*2Gep)Vz?BY3FxE)rVH<}Zkf?g3AV*G9@F{vE7y(CTM-qB1x*`#HES1!{VP8ZLF z|HNEq9V^_!$L>|oSUUJS_B})o-eGEz2>*1>Ew{>E2~xsFr(OF-n+e`2HMcoO;ZB;E z%6N2s+;>pOl*3Y#>l1BUHT=5JG;-@1Jccs8{2 zgz+=IfDd!Ev~-!f$x7mm)43jZz>)0ji<_`1m*QM4WZH5vy`b07BBKgTi!Zxgdf#vt zTYANLrFeK#-$~5V+OmhW=%(RW?JC>qctJ^YN%3(DHec~c=8gL~8@;Apta2sx)a%kt zyzhoOM6hW(ui`t5#yJ zBZ%?&Nm#Z)^_Nx{@`;doWugL7--`S3^eux=H--)d{2DK4g1u2AQZuZ*Wu!15yZJ9U zvKa>$bEM_zFo^_t3$^jJxk1BfI40uaEe^#v+)kr;N57Ppc*0PPrc6!qO0r1nrpHC& z*&S9s;JnR-{n{}u>$1l!&4*6J&MSN3VA;6)$jr)mRdRBFJJS|f&5oy$<%faZ$fV(* zny}0hh=dhNB5_9BH}ny&Chbohs=>s1YoCxG%tXlLBLLzrRpZu>>74r4b9aU76wHyZ z&|e4fpuj(_f?j>nE8~bhn_|aU%*@1@UwH4vtBCyl*o%&dX%u$#J3cw%n@+!|ht<%| zH*Md>$MJ8ybPUUQ5I^7w5iY^Xv;j%rlSbfwp{k6<`}?oEiWsAo68btHj?YHWCKs!P z1)Q<#>>Mg9;n*aYvjtDC@!_VblC zSb)^s9D%Lfok^$S)1QYDn}|k4F#*?7h@{KiN(yQB7a)T23z?U&{$t@NY`+gfdk`AF zbEXR?2o__YDlv|KTyopOJ9_0uacwZ;MU%z#u^~$E&LebfnPG;Tetv@Q)`u)l-FN?C zQ*m#l_iVNXA7W=-Q48&yXMXfjs#PMhKggfHJ8r9fFEyK`Pyhu0NllWvu5VFpy8o_T zb#ZzhX{o0<8)S3+QS`2|F;B~!eT!t~XT|L+uI@wm17y1$x-V*&_lm)Gc6J;fSnls% zP|~tb6{PsTLRSN7KGJFf$)j?)*1@I}u}_{tr2Cx%?MEjpWywsAATAQyimcbZohL%5_%s$Rd3br&?>g5{pyIl6ln*9p=<5n$3*e&` z@reaTfkuhc63|_bhN~YWdCvR?IerF&_>=u!d`v1WJf?{Hm-NhOjXSJk*S9ODPt6o^ zwoPKhvWcHh9uu;eYt@y8y<>L(CpBL)c_EFtY{xd6c#znJYB7-7D)dibd1C($<(U0H zTAEIx>6de&5^--ilBT3x0vj>LYi=9N;7-ZyvSF?irDM|KXu%gF@!~vhsNhknfXJph0AIYV57F0tIcSj)duRwd@|5MWaDw^9L9}aCx;}bJsUjK#m-->IT%zV zEi~SH>;5#^Fp*Vg-Bq5{l2=GX&Dn(z(LKCGuU*iy?K%@kH*jYTNFs`@EE~(4^LBwI+ugGCm zw3!&gu5KQ1F@4;kRqB2ioHP$AM-f=5H17K)hP2PRw^Nn>MP-C{0hw`J$rt_Cvi;K3 z4jqGy>E!fOYR7sF{;vROKLlxVzjoRTABqvdi|{$8xy__t)G4?sS54LL6?uaU?eMt? zFo^i&O_%&Cf7tpSxwC?^B458Eizn2x&m#Knf)pm8OmpVS0*n4q&-dYC8Qtvb_9{WJ z;U>2I3f3!5;irHz6E8xt<^r+>A1;RnZNOuk zNDc*lVl}}`vkx-vb*uUM%{2pB!SJk7rz@FH?($D@>GgVtPU3w*J_RC~b z!oA5+-#irDSX|IM;_72r+ANqWK`_0sE%C>ZLkZy_6@xiWgH10|21N||=W8_uY4v31 z+i^N3hQui}Vw05%qyk>b5#L|7kcj@VI8~jbqIw#9!(kGArUbb+>1Mz0g5}ig$60yU z1An`|FlAK!Npb#xU&i|K+*0}C%#FSN7KxrNx=4mz2C#uqX{T`Te5`{BuaEW(s3+)Jm0%&TE9}c3PfE=~Cl6Y_?50!%Aq%*IZY!>x6Y$i1?7oH!=8sH?+ymYHru-BJ zuyC*h2?nL`?^8%?7u3y{At{)9g8D9AO0&)Hb z08jGAwZmfxl25c|m@&P45L>cy=>y`zZp{n_hTHKyjHxU2a4Lv4JR} zt$r)e1DY&kUMqv>^kAOhALoSz9O)B4}$I+d023YB%~|Y&>K&EDfP} zxxrr82XvMT`#;BDtgcp`{O;c|Pb<|-r-1v{+Jq^7Q?y5k+|xw~U4FlDmADPW5|*x2 zcauRD%8D>x7!X+s@Pl~7%bG^=@6yd#KqHjigS;Zn)GWBVxk^DVL+|Yb!AxP#HSrYO z_8U^3AZf$^B4nEiXZYT+($(JOeg;Uiu$7G0Q1t6Lr!)$pS=vUwZsw-qnYTqUQ_QL_ z4dva{0vC^m(A{-)ZRd^`d$fPOgP_0Uat^IPoFSM|NlL)SS8lLwon*S#+x_E3&vlk3 ztzr|LIf(h-dI4d`ECf&cG@)Ne#!xp{4I>gnTBU?hIc>k9*K%W|EZX7u{2gaHfMH8fuWgUD@lW*dcKDjsaTK52E|OkCG>okDn)|Db7oAr+vr z$*y-!h}(X(CG!1$vR%1w*fa>#fi5#E#&)LIx@*qs7J44G zYMTvxrw5l*=KXNJEj(bMWuolnbX)c@Upx%OqaB#N=aJ#zG}=`*O&Z_M6TS< zA|tkSUb8oipFx!lFQrLsmN6HSrQ420QfJ^Y$Md+EKYdC@V0w(eJate619pScFd z()5#_uveIiBBj4D{+vGr6k@|^}^!p%D>B;m%Z zQeRLJ81S5!+s`3eM;A(JXFvEgVq4Q4l|xHs<0 zUWH|r_;EBNh0DmNXr?X@a(??&zc=QEGy^v-A)FP;@Za7nxuD~&r||g-stqisO|s~C zyS-)wFMpPb!ke0nJ1D5n%MZ2MckfaTg=E&0RO#ih6eT_lEBVX+Yz$E*6dT5#wrCUX z#labRNR6=!;`(souKYudHMLxmsqM8odEjlg&W{GudFXi%_53Ihut*z97ZNaZi9-Ps ztAeU$f&s6wFC2(v@`;vt+)KIN#PuxcfuQM}-I5Vgo4dD#vbw&-1(Gj#B0R7}nG#}% z;y$j~sk=s+Y~jl)7y`~K!6Kvj*G+=?o;{+Xdh6QimUm}pK85@6vn)&e4GxBT(qt|f z%nR~IBV;wCiL6S^Pp`ObvQ0ft5fYNh90LZ-UZXf`z+LR(Rcqlo)GlTWtBCVzXD zdS%**)vXNE*2fUP){>f!w^-Y(lv0tZ-@Z_qw)^=P63)UqMT=oITa>UrFB>gdPW>s9 zBIq?_eY7;tXCguHDPC3aM$)^j2E-tvzE<*)9^@-dWWSoVdqvHmxog|f*0P?G&QRpQ zifFMtA zeqhT|QpA~lbrO#=vW;R=>#`OkOos50^8#t(;?U2Qyh!YU{XCm%W^%G%-MPUB2J>Af zaowqp*w?0mveZs5+RZ~O?Yij2hyH;o_ki&^@5j^Fo`r`;QxM;ybARoGnnD%SUIfLN z2M&8$)S&C1MEJj`@!$M1>Hvp5DGGYNchnO(2nBp^KC2SM_2_YReh>~!)%=4ZKLa6!qi6kUg2%q{9-#wu9h^hyyb*D!*{1V#MgK706-)cpFcj^ z0Z}!V2+jf-R>Wsi_-`)%OT1sx4vU=u_m_n2KY*Ps#li%0EuB_l2whRS3% z;rY?Ceg&_>$NUZBJZp`Aagn>gCGQF$WE0QaN$=*qLju^!jq2|psMO^&hzlr_jqM0T zLP7;cFS4RL)OAfi+~DS(vF?o%RU{uR zAOIIlGRL3f&)_i(@js{C|Mexx^VYAj_r1&0#+Bf>d{LhjaKCdj@J&b&X8Y%$L0lEU z2#}Q`N9P{kbL?)|hLq;V{?6bY;2tNh?~Cf`enjj+>S=Hfm|lqM(^udhI8N%f!QHk* zATKLacfeqtIj>>tyZyJ@C}D!DP*o9C8Xo8zfyRm^VP{t4$-S$)BewxshTKnNr8KbD z%b;HmU1u9pf6GIAjh7I4BAOx}9uG1FxmlRJK}5E~PadA9&XPor><#j8LNPFl@24iG zWdg&jJv@I!qgzh<{%>{h-!SMiV=BSzI~~$>q$i6AFv^{tIqp0bn(GNqryx+^WmeFz z7bV12$N}}f;u<;YWee3;eGLnTjq*pV9GYJLkH;s&FwIQzaGr0dNb9*-Sh2^_8l(Oj z)|8g@^ZqS^b5V=HH#gU-EN>^knvMnVh4yn5qv?2>k)Yx5P%?+<@oER$H?HL5r1DL7pfCY?gt+H0~MwYGH@-yFhN`ptCl>C zrZ3}Ir1y`O4S98o5J9YewX1C{IA9|j{HaZs^@8&QlrS@j?pC>ooy35o`Ktwrsu~XK zr+;T*^=_8dt9OG zvuj-SN5}Q^GdcU$$CV_ji~)6WE&(@u%l1j594@u7F&*SRo%}B)=x@R`E=kVHqv7aO zwGf`1cL2lm6)@G>xS;cOneoM0K4>nHW&^u*r_j=Av!`tCdJtD2B1Y`}z1N6T1|wqS zo~xzeD4=4b<^dJsRmJ|WIeQNDQkjP2x(uF{ha^B1lPm;CO{o|%?f?LQ=Qow&fe)N| zJcHKF{qI-_;HC-&>~>xwTZPDtf=t9|t7jOYE(G_FqRVA^+Vp?aTngmwtL2Wy2Xnzd z2}A1_T$fmwrQHOqh3HD3KHKW0E@#mEoxR+Irp|5kn$jcLtu=LGMeS%|2$e7}A_A!4 z|DA-xpqxv>*TX#=-px|krF&)9bhqfjqYDxoC#RY}S|J=9<5tYRu3)!*^Ta-N{&+vD zAVnm$$sUQ|A>jX|SJKkTDz>+m#<(AGufq1P7ziPMn37QwGqaSF&XSUk)#(S}eV0K_ z2Bb28x0nIb&6O~r7d_vz;tDc=7n5He#mE_%g{Lc$>B{}aRMJtY;~-tMr*w8VaZ|Sh zHOjy!><-C=nBF86=3Q;|x$+MnGbB0u_cH(gZoAUN{W?>Sbtc}1$w(356b(@Qrwdq8 z0QwvudI!5F=Vv&wnknwTlC=NEYWZ{G09ps`m-AO6KtzIy*irsHq5q4n7fpFiJHVRo zORxd<;k{aN(7zX`|6;Sq%9VJFT04w&JrqK|*`s{R8)Mbp5gSD2j& z91Zxv!c+0^Xs~mzuP*ez?!*rYDy7U?GEb`HRS7zyRX4NNKW_BR0F>+s{j5MM31RIU zb;-tYq=dL#w!i^{*Z(PLX8uOoz5f&1e(p?B99fY?d}$(Q>-wbAgvMWZ(C>;?>gu8Kt(@p)+(PQSF$8g?!X{gZG~i z4nPPju&7{)<6MA2sc4=99Dn`t*OPpfEW&u;NA&mqkS9Heo}a7p3HTvu^dCypwkZO@ zE%svc0Lz5R{1;CD7f9{zf67dHCRpc-|9RoRK~KGE~^~?I+f0>M%lQ(PTBDG&@dQ0NB!!XDIZS?V36&EVz)S*sK!$ z`p~SOC8JV#wrvb_&t#AhM08k%KUfZrT;1<9Nk1HqFH2-?$ zcoU5vZ9t7%p$=vr^iAv}RNR|Pc}CN$z-GzW{vs3?O~NbSJ*AHsVj|+cvFNw3-9Ck$ z`90IKf*qOD++;92jSR=qDn7dp>tK@M|J(ArnodJ&agqC{#{XOyh3WNEJ)hUuPb4=h zE`TZglKvBw0v$!g8|IY(HfCs_ z_7?>=fn3yZn86|0JraTGW@ko05NJH&h@1Z4&dG*+2=1kpz=Te?o8qD zAF@Q}zITA@78LBeq?7_?gs$_0jb|I;s5#nZ8C!kUuBy(Le>U5c{T1fxeh%KHBh@_z zU`PSx;ho`+si2{(i+X7*D8~LC_r(Vw;Dc$YzW2wAXL(jBf4v9X3WPX;4}d^l;$To> zRD6Ix4f;47aiRQhQ>}bEum2$>)tWQuEx^Ypuw54a9+hq90k`ht5q_Bh(pcNh&r5&( z$I_(0(mMD@kKzNgOSU`>=+BK`$gd;;nf*5;6Xl@>ZslotS3&>;2=W2i^@{zF2E`@o zGksW4RMq&TfM||yn^A=n5!>wIc5Z~D(RS=vn0ix4NLZtM1~n;6J)_nYd(p9zT~BT( z|B!8|%mani6PuW9N}G)t^b@~Rc2vJJU2L6|td19Sh@8BC$7ewHlL_!DnNV0{$iIu4 zW%$R&BrmZMI;Dsdue`G?D~!(D9bbiKt6f51cf6A}%Xl2GH|k)pDMUZ&2@miYFS&YE zQpe`Z;ve2j;mvJw!n4%yJ#CzWs=A_$lV?)aNjCv+<)WB~??r5~K9#)Mb?KB#JzVl_ zQ^FrJ%Od(cd(C2HSfV-iSpU6Gl{$(|?rQuOWg;5h)20NNt9QA3{cdwO!*vj0<5a8S zb0Y0I(>{5h%)_&NMLmr?a{q3G>t)%Y5YJ~M^0#YVDpLa$%+BtzXlK(;+~~i9WTkVO z#+mS2O%$c|0rEt;!lfkcr1JCYLjKP_3D04~iMsho1D?R71e;4P9;FX?L4T^eGd=e`e({yng7{9;0Spuk;^3mz;)Hp(aiP=32} zL(}EiS+F&ZXfAHYL8m-&}Jsw=jeJU@zh!qE^T=YYgZ*MX5 zbg~n$hZVA!Y%V|Wg>WaodMnnMQlyv>Kdzx&o=>Lo`}iW~sY65Nauot~o%($bN)&OF z)p^ac4#)!DNT4=VW+@N-tGcA~xm!B#yl~vF+}J4$Niq*3Dmk`~b;>vW#^XA`+1CV! zOK3bo(x^NF5>-fL^mH@vKeI!=rI^%5eQr4` zbML$u8(rbs!NSj5Z!AH=`4^(FVDBqlsUA)E;q-YTGMS4R))zSg(Tfmnjw>Gm(AGvO zr##nY-hHoWwc5~Kq|Y@kSuSkud3V7*n}E#Yt!Hmh8JPv6UOm&Ar6}FFTe(vU=V!BZ zH%sx$QI{NxCg^@%{L>Y;C6o4s%|b%>zD_?$b@fKX@_-K6*VhidJZem8f1Gj+Pok9X zCPGr0A(U`4I5X8PS+>hF$`^BSHdW?3c1*rQpZkd#vP}LEc-*~B>}~bI#?g;>xkJ>kSA6MBw_a!wYs)in~4$qt`iUG)4+6NJsOXfmeu+-`*RJQgU7 zda%7n3pR&2G?$si*@#!I?^z4jVtjAhuS1{VRaedPc}kzGnM_VUVmnrC#pdDiruwR^ z2j?A8_UTV4IIJ&Mj=)Rrc)DQV^Wdj)u70`8Qu5v6UCH#Muc&zQa>uvKU2VpkJYB5* z>iBop4J|n1<-zA{h4-3+Fa1}jeKN#g-xJq_R9y(5WpjHe{tQGs0xxP`+-i3~9L3kn z+F}>EcwBxeYq5HsnVsEX0>^kKcHW`1aV+ThgL2Lj!0WrvHn*j;Z3FPQo280rsJpKR z1*z3PX8}wNKkBavAcgM-z0vtrrBUwmEmYg?&e}^SBVvW2dibL!8;b)fiQ2qvQR+ou zKhDK1g@c)oe(4=%tN>65$vpGwg!hWSg~!SzKsK;-p)+6*>!`Jg$5w%EpkLP*kACs~ zQ+fS(QaO9yDTS8${Mr6h(wIu!9hrTy-gi)+*Kj;ftO72>;<XOF864)l}Qi`ew>5#OBiv%(pYR7y} zlO1w7)z{+Yz|>lojTaTmu6q&?CpQaM=@&l8#=vXOnN1X759H>#x6qs2j7465|Ahfh@CgC)pHDdgxy zc*-6_&SMj8?&UXM1fSGR8?PT5whoRaKQK6+7^j|EM&&ZNtSRXi+aOpuh8<5iw+Y?e z%kHU9dX7AN)A34IQ(ek;bEJCiAQ=$tL#wTFhlJCnk>!u8%1K3E&4nJDyWVxwj}>d> zz5zLA8cxHgh1WB}dR0L#7s`+_D(FlK`{!X_uN;QiJN5oIm8J%W$l&&(LY+QrvliwH zj^)v#q;9;U*fbT6+g3tv=7?{tv5l7eZ>uYh4geu%-bl{?DB*UV`DU! z+_ir&Xj0axi{lo%Z$ipA<)A;XaxPPSWuTp`T0hgct_1hmd=;#gv5t~{E!@9sF^@WN zobj;_oRs&TUpTa61T4P>Qy3jJ6UG;N00kRdR!m(2lX1eZr_gU6;+9vr)>Deqe`S-n z^)QGd1Ge^k`#X;Ws^}@V@96gzHv9$+3Y}+mpwl=e-}{nJS+FG*7=ot+#%hd8VlLGp zarh&8FZ+oVmbXhySI68uda};EPD<}y_(*>abv9kKvrTG-)Nv{8V3U|_!yH#suN+If zp!?iU8nWF!$mA4WAz~}kIX?TDM*I%UEh7-RXyDs-=aF*+YImma(gsl;A?fkHR{XK$+cEhoG*~^_`>zB$2Ig|?lyYBYF0M4*z};6)qj*5cT@f3JwUr+8WgD~_LdP_ zir@~x2fLycCvw-R%woYfHjF!EwSNOo;>@o zEOA78r8}vrU1(E=w%E1fM0ab468NZwjdClI9h~}_{ATaKY`~_71z;Ba*KX);KZmPN z9*_`5DgfSy5!QZ~mroh@*vwb_?r&le=I9-RX`^QRVm3J@^s{e4@@4I^;t1H;iv(`x z{+@gv70>FeciA$@2al-PPp9YZHbJisye}8nJ#QV`*LaGg`7fJ_m19`DxkBL%Nft$r zEdp2nuC;_MT4WY}Pp#b3W?i>GJ@yKi^`L!TscR8gG|o}R`kM9DKG=X=)56aC{)#2C z>q?z{e(WlLPj|Ie)BM%>-&g3KPf(R zJ{5S&H}1MlJ)ZBV$eFN$Y$~DcwSjGN_rd)f}Jrbm(y7OmY%kwcZm?;t&=S4-+oO>M9%>2XFN)CJ%?tyNQ}C z6TQ@qunPA36U_&84wu2*wv=@75?c(pJaDpBU35r-HX@KpE8p32$UY}alXk*T4E{gqylbHiB7`5={?Jv2+;^gv%m=*7tlH1Q zixpP6BXdr`QJVDvvG|_X5P&gkt?wHdEo4F}5IKzBzPA?-#zem{|J8U`c{mM!B(L$= zZz~=`u)aBA-d*LJK4}(BOqz3OZJrY{ijru>{w)OAOx)8<;Nmu+)hk!4+`WUbwyvao zJ_L*pGp$1`Hzk;ZT4wFpPWl>)_{RyBj!|9*{=cGJoPQTc! z>@YbYx%UQ`V;x~z^FHT5=09{dkkh^?UUPJYhDQ#aB1xPVir#uXxKmJpc#yEZG!nlL z3;x{qr5>UB+OVHN?nt1f%^IdJ%^w%y&`?p{ic+*>Wvw^-Ki%Ox-e>A@la+fBr zm8&~d-4^BoDg772PbrtQrC3p0`wv*K8BJXilghA5@FB4i_ zy-CWYKOHj~IjgXYYODWS@$iry+kYTx7N>MtsH zfcopV_zdW>1YxxzdKn8Oebi{U)9tYekNgXLxg3cq=XG}J?(=VIX)+w5)Ws|^6Kg%> zjzwnn&xqAcR^hPac)qU$h1~}PMfV30Uc35pA@Q`ct30e{gm0rx+b(*dUwLlkN2P9D zw`_!mx24beb0jj%Txd!-pTOK8hN( zTSaZP)!waLTkTy{Ge*^n*_KcgReMFzqQu@awYMN7X4D=*>>$XS=llNN_w)NR|J?Z` z_jMiDaUSP!9VhrkzajXvOyC8nd)uXfFug+pzQbicc=K{HnC(aCmr{pV9=;cc+0y*s zKZ-(+^X4h>N_;JhTVV5?U>O)m4iW5$g%Wvi^qho6W0TTt2&2_42ZgNTYm%q(~@a%u*$cPFkB$Yc$Jli7{#%5pVN8X&@rtDCu5qBEv;C&M=&GZ6u zp5Oji{Ljk!U$^1(;kHnB_GjB$jyb@>tK+dLQQz+p!nYbw)w7Tf7sWp5x}S-ODx3c_ zPa4Gw#;UB@G%#GRB-nj_>OiJOlZIDPFxmbdSKV)4@^!J(d32`y#fY}?W$16z1U6flo=d*WL&j~-oESsTxp{|T@!K%gLIW933BH1JjCCaIG zxGi~oGtXAPGSAQR0Vb|%ks@g$Gh+n{<|{uUP&k?=q`Oyvg&U0I08N zze@S5%1+hYD4=`cJj-?M&crKA!l=$y>g?1}bZEid&k6oKHE>zyk*-=h62B3ANE`Ok z&H=d<4A*zQ`>(o-^37Z8C6;BQ_)}4&wfq^hzkxwcvHOnT;Jb3y!GGmTt?S3cUe}NO z_Sx#VUP)SQW<1^2mpA$h(Fx*tmS^iH& z`Tll3i_l@K7ZqTo7S$5n!=n-xj0++;=-CvwnBK(vx`PcCY1Wo+&W-tLPjVaL5K^WpEsH?q_EpN4V-GE>Vukk_o}y>O;CkHEGdJ3 z3Op|E9GR)5n40?-c=k_N<%$*io4-%|bxtd5fg!^*3~5&f!O`YR!SoEK?YN}oN14r< zIFB1<*w26k;pWj2Ts);7@`1>hhM!|noy8a8%y#!2EnuYNL+B!J zT9(kRQ4b5M8c*KE2&bx@zdG>~vf8q@a$L0*DOH$36_-D>;tmOI^qQ$egrs8kxtA6X z+%r0SQt+aR0Shd%+hh(D7Mo?fxi=I&o%eq;Lf;nK$* z@8@@fQpw|4t9!rkT|@wa5O4mi6Zj_WRpB?mW1J7a|6YeUo~SZtg0;vZqUpSe_#t!e zeMDHi-d!VALOHtDu(StpBV-{2zM%fRbNuS4IQpnk6q=I28}yd;{v)&PK+uAj-$-w( z{8Ml2%AA``gsi7-8%%iVYE~wfO)&6}y1m68Unu^idC-q< zAgq$P{B%iEmLBrmB2zB<1uUqTAf zyvC#VgOXsG=9f_Gm0>Cl9_FV~f{S~9o4qUFrL(Sr*{^c+H`AX_UiR-k+`mbUtL8au ziV7TWl@DfUd+$;I>rLKpG~rr}hvhAIXVGHgv#5XsIX507t)MVT&hWsSb7KL@^DL0d zUJMl==Uje?xy}4(<)>{5+5nG3YgX0J32!#o!0YCoV=%HYYMlWfhn`!uiadaBk{M13 zK}ZD+pthox%44HDvX4v{IZJ`y8-fDZ9#R#)io0 zd4Z1RF0CKtxFcgBMi84P6$Lv%FWvQodT|etll)6K#yG9GEelM6fK@nWwd+3qTj0*T2e zsOgN@i=i(nlOCUGxVTN?+ak`;zZYUx#ZfXFWU#L(T`sdpimmoU_=XoO3$KZYA5p6~ zF(iGd$hhW8gVkHPd8X8GbGVEW4=o?v@s?IIkxUUzDgOS*F4y+c4?gdd*ze*8?-e_E zegiRbzwzvyc=0))$`9k9KVc|YDd9`~GCL)`u9^9s(^+nii7Mh%it~TmGmpl1nR%!ZKuBk?jf8% zeGazxKyoUGtj2UN-VOYU~XI)pn+Y+a6Fqm zeToqsGIQ??x6}qh)l&MDPa~rw-}`@Cicn)_e~?)7SDXH*K9vY9dN;9M*5W~QuMkoC z^FKr!OwxWP7RJ?GN*X4SiCDFQ-MW(L(e_p`qvR}#SV2T@c*?Ge5s7FD2(i2_Zuk2B zL|lN<+(Q2_ndD15#f`IOnZP1Ek$JBgK!1HIB)sBoC_I;`^8DXBK|EVYOMSUzgSUaQUKrI7=3Fki2+(uwpGDO0mx+3<`19YHe(4c0IEBSdz^$1xe^uGQ z0|E+_jj84j+6Lwb4~qmlRV`D?d<3}wpkd=!Nc)A50T)~I*Oy&SI{?}pV=o{{w)vh( zY3&4cxF*4g{LGfFP4j!KULk_yp6bPd%ZQl~NWL^jTefjGmT(_Q{#w899dp*tGaY9p zBgoj6@J*$Hwtb77{>PQ#eK+)PN{@P}(}^fFJ0TT;4zQ2ra}@#pEg%!SfW z`X$CPzJG~0{?fy2sId@V65`0-f6keLN>=imRKJY~wNa&WTLn@ap`nK!Zi6$ud| zS3bau6bEK_g*|Qm?Eft49Y;Gs+W!4w&74PQ98T7C)16KD3}{tt_j7IQYW<@8SMD@W zu#8*2Q#m3Km3U=yLQZ7M`11UkhE$~XRQ|3w*7M^pPAHnQj%wLR?qzT5gpSldtk?&y z;;VVgdfs752v=AINwA6&Kg?{0r@u#2DC${6EraJ#bIX zn;!Yt=)T#lbAtG?-r<)&5{!Gwy$hr#$k69{y>f$*i%l(C%gI;GSm9-B;hAw;gEL#| zjLy97$8ZfehTY;H)sJVY0JoX@6jadY^TnrITmMU9CiMJ%ro8Z`M!c)) z{6+UCJ36}mm+_k5?`Ftx(~a^cz8~S6IoYC}_D{2#{|NEAD(Yw6sZpDBuZi8~cP36aP#!l**nlWV?+i494A5?D&Th8ig=7KBJ z=}w1mxSUpghobFujruu8%U$N*-p#m&Tik3BEYI@$z3x?+B4phwX$mu#4!OYn3H>s< zCvHL(aaQWG|1UP=tlZ_E)uIpi5KG|CVk1B|9KWWKbEdXMD)xGX*xz^FTQ7w{~CQ_P8PB z=9UhNK8;Ok<|{Zap_*PA*zx?{JvID-{?-+vnG+VvK>#Fm&=aD&R954EioD>DDi?T$ zS(~eDNFMVm-J0iwg*#Blb=`>PdK7Q$9v7MESu`Q(>2$5AvhSZa-+fC|`7#}aWp0~m z-HY;7U%5-!V&-B6qDpKsdm3JGeU_8D%;}T-vitFa!kAc72AfCBy}f_NemI(j%@!Uc z2uQ5<7eWtGC*pJp--<4jR0Y&GNx!JRvl6Ggp1HiPxPPs8+e4S3XsAw9(h#>yZCMoQ z&+Ty6CpiC)_I=%d7AC4a(gj;MrJ5brn)g_U|II%v-TIg8Zi@g|R{5~^={!^D;$2iH zy@<_f%gsmf(#3|io0F>f%0jYY&7#{)fgu~scUVMfoty|9vg6auRK^VVbU#VGSGiLZ ze*-ZMLm`<}_i@|Ndxy`DM(;C8bUg@YvlZV+h!USDLfp+eRfV~GKCKKoSMe2mLi;%f z?}AbIds;et6A@6Cv!o}b$o}z8+?{}XFV%l@e5QLy@w$?a|BJ2Qg?ZIdNCyW)@8=G7 z^%udK&*DBykUzacqejF|`YdN(2)`wb0ndroiDTq32Oe5d%UdUNdEzePxw0pDSL2s? zTSl6#y4~XgP^Gfh~>6#o*5;)ju3RxQX!Q0JZRAO>eb%Kh)7AjGexOV06b9369X*g>KZcscKuZ8kN z2?URkXG1E3H3AB=t2J2klpdI*zj^zU4A<2$F-ECE?Z5OvV{GDn_!32!mG?luT9o9m zwgeJY66%^G=Wts{^B+^fu%NV8Cwzz87Ds{&#&(m+P-GWnXvxb&kYef ziUU45Dro28GM6FDdWgA)Za&YcnYBbmAv+b;Ew(!r- z3HhvLZ#~{A@ZsV{3;pZ7MZXT%XSGWoGC;lNUqq{l!0bs6YNgJj^c29VKbq(cK{Qa#%7=@zu%H3gQ{%$2=OD?l=8bzim2*xeHk?(G>Z&TBeYT{5%;%R0F1UHT z6)V7mM=BOzcd0JznKK;d4NBZq80maC|3}DM9sAp%Uj-Sy&OPbg0~qtg$fmt~(+Pe* zKik=o1H8CQce7O``oORs7{`{b2~Hfe1?*U7N1iA9l-9C?zW z;%(_)X-MXpVxZl_Fx~GV>kPQee98jOz^#$a%XwF|4DV$ohlHw$;;X-l&{Kx;lTO!O zgONIHtY!g8-y*GYNC@4@+?E^N)hg|%q!kl(Dctou6PwaW63&*8Ix}$E=(2Ado9~rJ zS5VP%QCBIyAq-v8$b<#YkaWK_vg!uk#vFWkTREclek7nuD=P3cyZ=+jH}g+vczsAd z1A>d8*cp6r=`%TSdvqUgI%!apBRMZsZ@LvK10Tb`+XIx>fCFcTRyH}YSX*i9l9E0? zwq3hNz#JnIL#D^N0Wc}lsJ2%S1v^E!yH zC5}q5FUmC0+@gqbW(*aPMQXGeJ)`dM>+1H1MEY%FOE|EdlQX-g+p_zy zZ3?;Ibv%AzYb41o1#8_wUS^X*qKW*8unyXLR3q?a46VAfX20Yzs*{{Plmh$QbfY^o zE9YJPO_8CIBqx}d&RM0v_Z?YQ%_@l|V#Bkr#=jBd-6q8#KRo7#6mr@494ALjc^TT? zjz;+Iu?ATyU}$%BtxttJAX#%At4CG)%wbPVwhDCFBI%EXVR2+u$B3j8y}{}^65%9F zodE3i1sBB9?^@On8E95?`!y(`(n-nsCR;lqpa>O+EnO44E&(_zGN^kjzC05VEpGyg`7dFudBxJ39?7%1=Us)U z*Y$+uC6fb}IHkDDn;ad#{wNYol0;ZGF2~knM7h4JQDHHag~hYR2g|+ePCVsU{) zFE2kF%dit=H-%COBj#@BVf`3yKm_h)yW^Ez&^vl>;{5SbMA3#r2-gRAM{A8|BSTi{ z8h+36$Rd>z4{DEYHJif}D66rfOn8JKMy^oH&AvYi^UV4=&kH4?1`Wjow9MYW@kbmC z^qW?SVfcex&IyE|$mMWR70N`;RoiZM5xkqlbHL6`4k%O1AErSJs#17|qcG@YregJbeBt)PbO^59+KCt%1Tf{$)~@VbGH(O9vM02|{z(W+IrmS& zR{JrAe?O#yB2UwK2f?l(-M$%H=_QJn@ZkJ=vB$K8RD$=*9w8-JQUIg+6jdHpvmoy< zyhk-ISGVvjNJ-HBtlgNQBIUz>2>JA%Pe(MFWZl`(QskN;(@tWaJDH*DSD3Nea3)4= z?hk$Uet!Vg6^J?=PA=GBs+2dKg=H6B`LU0XJv3>o)>z0oDofE7td05Z_4mNn1~XQ) z++8%98{vQ(;PiNwvK-V{wN$2KQAAItvNsrSqY7|(6PmS;8lD1V-fl~n8 zv94QIw|^S@hoJ>nKA$LVqP!WrP+{%Gr!P0C=44+ZR&v zUtI_@;%IoBNC6VsO)#Z(5Kj%LTSs>xv`wtwt4ZZapz`PwSy@XQKamE<&+oC8X5icL zL2`pxuU}iUT2qfi6#0|$l-SBh0av%-d8DPfhwrX?^;j$-1z78oJ7K=$DDXAYkMRRZ z#?Yyqo*-A6m?84$Kn#m8b5%8uPBm9xs{ky>KtfZ)Q=W#Ym0>F=14Rdt@woqC78?p87s0Aw{YVjVSBr;s`ujD zkRvJU_sbU!Hkv3=!+nM|Ill%X0WbV_qUhvmgfh0h8=#mM^XNs+QA}HB?8i zoV!cMaoDD3(TCiPmhv7I&3Bee#br1HMvYZ7U@BmF-_z)5aG|#^e~3;T0cRx@-x9j~ z@TB$!4%R3XA~n2Q#%smNJ@twd;O7aOM=x=5?*}Hw{u6xA4q3e{`qRN~=fkN#y-*`& ze||Viq}GDmV@r9xFpzofK@OO>QG6 zRPO@qkWNB&Q+Bzt0;ck|fuB=FbxrAlRaqVO0$(tZm1hrts})gn()O-5c2% z<`Tf)DU3x&R;#S9YpaXrPoA}ByaT0?!j|>`zq^PpYGA*!Yd2f zINzCDQ$~x-mQG=)Ko29=!+vWyuRh?6{S71plwH=DF-*9C6a$NNs$|rzDGZgNJ{_ap11^P7cH=A z?kdF52Qwry{+!v$%RIa_Gy6X^x^GxB}!<0rv<*jV6>|=_zhbz{Lf0oC6BB_>BFImLK#0+e{31>*N zkr}VQ`iWb0v^{CWwIVtMpjAi;zZ~lB?jt{ur1H!M4tj8@kk&?X(2F)Caj8zF` zcZ~|{k2QEAVI5WiI@u zSl<$!cJ-7OBSACy&N984r`*O4taMe`ac+$u@^x@d9-2e`+jP-`n3M1Luclwaw(y2u zrw1wNs^zURyydI6JCUmAfA5*k3)$(sG4~qVD58p~PQ26%?NK1xXLUB+-%c;p`20h3 zI@ydhsak`Lo5Nd@XO>hC{8eIJVZ^Fwd1Nf4h}2F)gY@?(Q<7vWkKT)?&V)?K^#aq% zp_J#9LpMc}$TU&$3~ST0M3I@_liDoM%jha4@UGnS*|z838fXq<%Fn06B93yB_ebYX zf{>j9N3nU6ioXWBpPX5hyk?5yG#YePS&2=rxVU{7ptPJJY_{B>-Tet4yYPf;RA|0 zZqIu%ao$feS7E%gPQf#FvIp^~wjX8f2TgYkIzZp@?s_2Qj-$oiut>QH%(GT7hPxuK zt;h-K`b~a{WMZ7=88d>%t5O`NsUL+-EsD;%6Itz(6u$7O>EoeZP2aBH5ufJ!?42o+E z$Z=ldN+Z|pswDb~sgI=rP-P!fCp7P|X5Y?Pw41fiy^eb)x!X6AoJSDcASAETw zx{b%$3(`#_(YBQbdMP4H8j?Cg&@vR+nlvC9xZGKsWsmPG*r_SBEJ#(`LniZCE3^S@ zP+(Y;Uai6Upxe-m=KipCIkb4D9Rc;*o`jgq7o6W9Iwo=}y(!4VhwD;hL+k%aGG;lsmCF{}m6 zFX&H~)BSX*&Ez~*Ge6;GG$q^#T+wwy*#>)0QQW4NoB?^=abjuO&p2D{&0M4K!a2(3XaMOgJIJ=R(OyCms zign9@4vtm(nlJ#1Wlx^s7$m933?goGbO&GWa6Y=mj#X~$uKx|EC+icw8*u|dcbqLu zw$k1(9&G(t7Wvxi>WRCyU&P2Q9#x~|u+_0%&XG234tnn`j@86pCeL!}W|o{1lp!I5 zv>eWgYGw(ZfiGp22N zrwA$?=4yRTLHh7mm1VGe{JQA->W>NWFT2|0fKzt=IcXt510-#@v12hUJz_9NXFYsb zjoMPm5|`aj;SP*9Xh$Uu?KOW{bgH^QtsI3C*73kzEE}8n<|kvz-(8-!M>%_{CKGcW ztG@<>By<{*mQOtW{N;YmIohK-Hn;nl3pisT)vvB(gdUnoT8FMu#8M&LjW{ZDUph#Hf=;LwPd)9PREc;ij~IlgDpaYdfAm%7@ZPV2uE=>`S^mcm zHQyA2`f@sw5*jHEy23^1wO?|kwRtOhkMaTi+hK}+CZ~;i(|4cu*k829=~{!`uydRO zSj(2-i0ksIINQx~PM7hn{DiVe#WSCf%6IPCkJJh%_fN58bnp-}NqGyR z*7YjgR^Ua-`O?k14o!%tFP7{+^Mcb3IV`G{R;NXxfY$tx+WCrWzmWAA$H7L%oDMcU z@gwIGGkJ|d3VbC4h^<=RIpVei9g7sn9Hc?Gnu5BgzIZA>mxt)1BCihc`>FH+AQtA8 za7I0(03>#|VQs0iyf0mfD$tS#_NoJW-A|kpIKMj2#$=+YnD>+>dfWB|qUm(Atl*|l z<=lY%Dtl9jJ3H;71hAg>@(w$xbca*IuD4Zng1v%snSSA6p28mWliK$zWKEB9fd0u$ z#^AzrS&hbV4PfB1yPXt4rkpRFP>u;@Kkt7S7yek!LJa%EV4xf#R`t#@uiYWUk1hF! z>v_5U7YvJ=m~9=~XzwIoBCa~kY%MrNZ1NL9V#gmI1*epALmslkYWvx2ut<24-4#Fl(no|} zFbzlw=jxF0Rcepxo%!rRB$pRKm{YO{rTvU1VQ%ft-$7XlVjDn=o;0+B| zg_9vFLHi*N38=NrCx=HDEP8gHZF@jr!dJwS>2-G~mj(V0j(0sj2stEF7Il(%J+44> z0(bI^KA$7@&%Z!9)ll-4S0DC%@t?Z$h~ak0l^;P#8WnDSF=%5;OUL<4O2?BPc!#fq zzk)OO#NXOfLV}KLlHow|ex&Kic zA3ATWuZWwDz)_XOxfh=A-!50Wh0ww?DHeS{p>KLJit2AA#}`M(6ja7pbqn4=W94As zw%u*X!%=owgeg~LX#4_j7+ej)$=0v3B4w~{=i~Cb#44zCE;?pA+;&t`LxJhXY91Mm zPnFz*dOQS)Q_Vu$WtqzQ3z&0_?R0*XRvBnb_kp+NY*IF%+CKMKsdjS}W;tM|Zfem~ z-Ct%FRuDt!&pHT5Y!Wnz*ElaKoXnkt2I1BuglZ)rR{kX^Gd7xkz1-C=4?k@)Y&e00rqPNbh;h^-D!xU z8cQ3i6%d{CpfzDhbB-Qs1nC3px214@XTr~1v(G=LSjap#POmQBZ^5;-RN>#gr3DCI z<^m~O`I0lhzqtw8WzJcz0m|xcwr+H%mG6;@XO!DUc5VxtdC>#+gJ%BNyF==#boHb^ z-I5VFZW#2`W0&^Z`BOHp1w%HjOwYNCP=7 zu2<{_-e#CWuYV-ew{?jmmK6L@Ke#ZzU9~-`EhP2t<>UIIo7X(j>GNWBZB6AQ^jFeK zZKX2U=u+m?G=-gjQICG@H8ka)t=e76;1=e?j(Kt0ZWzK{xUrrA+M1wZp} zJ*g8){h_8+EBHxU=CNY=z5wj9`s^!(M*I{@gXKn#f`7lOnoF_fkLm3dzwP9Sn^rle z8L{wsF+qY6N_q1V3!3OjIPMr)`=oYf?WlMaJxg73 z7U1y{>tNHJovh7O?AmZhtGD@bJg^ybrt@v#vW<;C7+X<7tHvU7t5{K8?F^+|qs}39 z7)ysqvJc6xIww6WojnuBA65;Fz)3+T57eU_V&1fR1lzH!91B17=Cou8zWx^E($FCS z!i+lvNrUWmZ{hbG%Sad2)SuAE!a|208qJ+{@SnEN0O32yhT`m<;vy1xvXPHZb-lt6 zsN{i0!G{G01~g`r#CPxaO9XL_YAqw($pDns@BE-smBp}S4#MT}qfiYTA)%s+_6c&D~=KHwK^UEO$;v8yY@Fm}`Utd4bfB_N^$FmlmB`0su0= zo^(+-JQ{^rr|=m|L$JT)SPzDP1BZime+d$VL!$dH-Y5_kr&c=?zffHWmAqZNb8DD3mzcN!wE9_8S-_T1qM@7if*?7a#=n-0SEDNJS)AC0a5nL=AB&;e znXa@q5wc5XOXqof`*@J&civ>wa{fBwh5SpFg6r+E_iYE&Vbj1o_S{Pdb-jEJ@RX^HWq54Qi_U@sbH6YoNwDcs}FE7|6TR zWKkHfL$ z$v;+P#f=;iG$ z1%H3Lm_y7XV4#c|!o}%soKDUz#9`RH3fvB`b^ZoLC)3?Op8bsTm=Q5x;-txPVJ(^N z;zM^}V>(wU@tPN$vU1Ote(rdmCzg>y>QP)TJkv zmqN^y#wU)9<+;)WM7=fZgnmXf#oh;mvK;41hUMgOY@z@u_B>S)c?@p_M^e6v6uA+2 z8|l#}kg)MhgP`1iri#?eE^rJjqe=Z?CdX1aMy6_ z_?fVL5Ajg#AoC{`c(GhQ4NEG%U5Lzp&)}VrOF8iM0}Sc0vl!MtKMB>fP6)_X&yNdt z0j)ebRzRpFe8Eeqv3eAH*VWm^i)aPaOrFzi)>&QjH)khsC3jYtG9oPaN_+GZF4zsoccIx1<5WKc%sCCL95oWNd0jFYN) zj=_Fm8`zNg&M@U~Nb~(Vlz2M{Uu?KML8XO~Hi?k}R9IV;A##|aBSQ1kIeV)*{h>j= zI-%(k+)e#b6_BC15}Px2*Rn0Pu)~xVaq-+~5f^65+#^BvLu<5@x?~2je~%<<77H>; zjwJ)Edm7rYZH&!W`@xiY59aJ-P8Qg{J{0EHUG{6E8u44+Z$HuuKMQ(%V4jGA#j3PsZD`NlAB}o+0U-uNy5aBSu-ic%dW~muSOQ^6fvnl z|8Gv_wz4~wlD_Gqhe;E*<6{?L)}zq3 zInR@x10rfmxDo0L73U%z&|wOt_j(Phm0L~uI_-fOLk7&1{q`d`jF`93Gw+PGBMbQT zD6$!$IObbC_qV618C&QuzSwLUpXdQN2S^kVYwYv#p( zrILCXhp`2gzgF1Nb*ez6&DQbXp#tROlHP&e081UK)g1eZnj6K+7%k-S)G)T|6BO)F z@Pi*5&@I=WLWzY}DYv`-MmN1T1}7ZH2W=doy#)!$#9hh5v}45TJn9y8e?67`;bV$G z#FyFk3k_2pfp!i>HOi0ZbDlWpahr^Nmc~y!d!Q)^kAZZCRQlUCrs&& z1@61hUhaw-U0O+D|H4`c4Xgk75!>p&=C|_0{5lr!Ftn^x92XEy-t@a(DO5NJ2eT~# z`g|Gl8XYmPe#^3q|60rAYkwm((;{48YPWsIURcqTFMeY=+%~sD|E!O#MHp;em_3j! zXrRo3$eMCO-7B+9fA6GF@MDwp;u*3LUOK?yX>6CSfNPhUra^3H?q=tf?qu8euastF zUuuT;G${CDo2^=&WoW4szqg84{t$LeWD8_54;}Jf3P}7u%cK1QH8dZnj2M%^2-(p| zALfTF#Msj7TusadE3 zeKkvC#pn8naG5w#WBLrHqU)7>AAwnxHZtuAD|;+`lopXfyU z_}GW-rY9Cf$rq|DBcVpftHqnQBKOk9A19Oz-m)_W#Ek%SFv}^*`lfCpjUtlXtZ9uD zRu&DE36vCqOFoeCGyg6hYiSABE$r~1{MZi=+Yq@xR%J;riQg|GU&<>JL04f82x&G; zY>OX@_Sc%%OA((qNH(FL<%dOunVRV1={mLV$a~ZLvkt^(OS$+mwc;ZlSiQmR8qS$8C#?iXEDrJpBACC?5miV#;vbjBnc&mYSgwPsfH z@ISsundsbj)TVJqhmEIaB$jWC~SLuHTLx{P8T z**8PZqqc-$LI#1yC1*!E=q zqgMF}|AW+zFS3Y>8>^jDO-=$xS`ka5MXeE4*O8#9WEsa^6`z+57}0R290H7rbw9R7 zou+13loooMAVj-#koUZ$YG9Q$hJ}I-dM9wKPes*rzN${#4i`NxHWg(LEr zuVJ1(irS+xhw@(YoANxz?qh!T$CSJVVyNuK*`X&)M0x*T7C<{XR6aXFVQ}{Nx!|I~ zOo^e}QpOX&@&sF;XIt2!8RFr#LUqL7k+lMX=yIzs5EaNsQPsm+dD zuCHx}2+kXfS)Qp8SdUO)QnvW`k0p(+uLFcuxseNgFWFEi9H*Y>NHBr{y)SA(13_uaeiC8V>mBX^&V=RS zao(v@2zYLEB;)L-SE=CZcN%K-7wy*SGm+b>Cc%muV@lmM-n=~Vh(zLY_Mxj#4=czcnwrhk?_A| zF=M#JAyXd&oOJ5EJCc(svrLrC*0(Dnse|{PwK4Cp4XrCsCu&MGN#i6vR5sk)fZXX? zZp8#;$K{^2y8kK7Qt2b;NBqi;mrHfw~S>D>g?0&yfIDV4T(VOWn4t&L(qu1ee8@eFT~2v&krB&6DmZ;OKwME=qDIjPELxaOiSU{cf#n z2f=zKjBV0`2C9nOu9Rw|dT^Q+cHL`pRdVd(R*u+aL#Br!_i1Jls}vwR!Dk4cF(lx@ zXIIhWbr%iA)~t(4d8W{m=5wLx6fET~+TclGNQp61MWYL5jWNf(E9o9=Xs8W4z`S^$ zCgmCA8OQW1PO^O79Vg%k*`v}B71x<_9lO|5R-RAo5P~PP{~CPcpEd*TZ5i<$S<5?h zTLl~bC3HQMi5wE5hJ7h7qLxNBHE=TT*x$A31}Th$Z%KVsz+RCD^PYa&1(uwmMM3}h zVuMueV@! z^(UGYXS_~dyg^TI@Pk0#oZGJeUh`&c8aT=KTUnLZbnkwgg;32aGC*-d^EszTZTPY5 z!)$Q5^6J0%8yu6d;+7hEM0qzmKg_k>3!fk1#Tu)jI4?CG9G7#cL7~vNT;XqFwG9{T zSZZJk)3Z@UUg9^_Eu9M)4oY@+I}e$pPkTFz8ps?Z*Um}?*k_`VN~^6RGIAF}N|P1J zEog!v<{~DH>16;%mbpgBd64Q$AqfoAvD{zHCuqk`rEfEU8>&SQ7Jj^ll_$Xj*=hTj zRK92NpQg;V^UadX0T$+#ESC>QZO0|v@x;XZaCc6~mgQ{D1cK1DV?GOU$V|%`r!f|k zcTy&gs8-;Ic7i0YE}FM(Ur`A;+|M6cMAjRs{baa5*$N7UyG;i{BD{RysFCNjlum z)>d4D@|fS09e*zheq0QDAFq6h4odxgJpFR%qzSN?d!y`J(1$983|^(9*3H02tjZ&P zWHeeqF*P^kJY*@F-_$XkPm8OM_o$jXaS@DTA_@nBWoUO=vSxPK#uWK}&(_+x z;C1T zdJ;E~@9MR7EA5Fg&Qq&E(Se`svkZ#B{W!WGSBN;IDRd7>lvz4BO|6KjQqdW8ea3ei z%3A#bC$72YFc~NUyR#*rrZgk#L4Pu(0OHbHFKF$*S_n~5b}sgdRuGJN1o`PagUyk1 zS%CA!l!N9J#BVvXWn{%DKz;P5{yfUBCa4pvPrZab^IFp@R9`}#2(tIqP_EDL^pY-2 zb>e)yT04qH=w7>xapyDp3W59$)GRoSHU&=>H!9HQC#zeju#~L^Q3dNCf$y+F|b}B*i=tg{6q`9R0w3vV1_gQX@g>1O7E~$&>~${bpEw_As&hBO|9h zCR(XttCIoM6eDMDvHewqE1)X-kV648h22);mJ;_xHpyU|RG^Mrsl#=>ia%etxF_FG zV8t<)CWP}5TCpWub7;lM_EbgW1sOd(*{_th$y~|x)lLm8IS%p~K(oeb4UE69Jr?S- zEfMR)O*34#DWvE`4qFCr2aVcueeOACt5E^Sm@MNbDtJzq$X3B2n- z0jp>)veFO#pM9D1KHxtE6+|$gqewAf-u?Pj(gwjHi9<=^Wn2XGfxM=O|DUw>&z)b? zv)dra^R|M^9h^pCv|ElM%-=o~!ffSZz>~9C0v&E|F$rIzRJ?UL=@h#P;ihBMCHw=d zdjC1}u2P*f_EVbsL&Tn`TELIro^0X@9aqCJS2IRlL1&Rip$)w}ybv)Xy@gKR9Nq$3 z4#DH8Dd0K#)ygBsvaQ_vb5tmo!iwd4g%x>wIjBVMcyyX3SOJh*=ilzS^dbW-ck&d? z%X(urcRq~w)iYQY;1_S-B|?T>hKxDlc7yWVW1|s3GpR=vFE)B^zRKkKzb|}jYc5WC z1-@By`+5w=W$?8pkNxLVu>iaB@O@``kvrIwbwrrRJfR_?z<29f9Di6Ng2`HlrSX!^ zJI7TkMMtk1xlk<1>mA;!Pc8;T02r+( ztPc_S77F`}iRsVn@R!ROz0y@@LXb(mk}>7*6W0(y$@+jS>i}&hKm7VWAkzAJl9|SV zaQEpcIyS#!i9#XVzECmTSsV>98dlZ_P>B&|W7E>)}xpCPKy#T35`ZxPsZ!)ix z#nx-wHerquWef^_(-jU|3%!gN$&gHY^A@#I)IE|YU=XLC6-i=_Q1&%EHFyQ40Uc%72@rhhzU;ilu+@T(?8Xyg>}HEd3r1`kjt9S;jCuh)!J za7eIGfLVWdz4*vp(Zo732OBNYSGWUit9-8*Xe(RG$c+8D9j3?LSNJWBvrXVr*i~3@ zcC|A?3Db14`Dk+9=)T0s^YvxDDdPW!z4s1la@*EN6~O|cNRcH~q<5rh=paq0(z}S% zP^E?{(#3#C?^Sy5C4{Clr9(oG^b#OaL+5_D*4pQ{@7a4V&-v%x``q=1JUp1>n{$pa z-!aBJO7=E0JR1~eo+6Ee*}|3Ll#;J8-T=Q7xhOi$#(PNJ{8+%JIm+_Mj3-P%0&zZI z>m&G(P}tE$vl{%{G%!Awnyp>7%cqZKE;;2efyNAq zt@td+DCqU;vlJzK^K3@V09JxgLIKnM3Li|mtz=SS=5fpIeDUM%(mdn_OM96ge(-9$ zf1zse>ATddrRt90ov+(8&&@rFm+ho<+MU?N@}fTEUDTI|E1UFIK~zFT!Xf?q+eD4~ zN5h#-!!HVZj`cnH#Yd`pq~1(o*a!$Uzpi=b7HQIcrMpNbT*4utD;p#LuM2N}lkUyx zfb|Hyx2AioXZ*5RH_i7|0Qfu2vFu^+pnZEr-HQva>&Nag)smhoHFd~h#5CYIaB};^ zg<8+77drVr^PTr7IesN#%w7%@X)+W*_OuO#V}(0)j(mJ8(C41K9ul)oZJ!PKZXJdu z3HmnX!Oz4`$M&YT{39DEr@poK*ItIjoVCG|;G0M1T4oYenWR*ZP_ShRWiiULeR;Y? zyd-3Dd_WM@>(!o69!CuUs?_GZ?F=Ixo8`S!W2w?kwp{*fG4)+qh~eqo@&4H8M|oJG zG>0qaFfO(Jjz|7LHo@8`n)nq|VJRjdq%(2f5M@K_Gn01kqjH-Z(|6OGCr2{>CJGKf zmIA-@?N2E)FX-N`XHADtD1E)QKHvI|RtpwpIWo3H+uRLe&;3%kKjRXk@6y^ifA}pR zCPD<~TRb_pmd&Oa%3Vq1!Ve?9!|JSRc^WaZn0lbL=*O>nTrhYNHj@IgLJ~8_0M+n1 zWxe7@jSfi8t4Ck#_88NY7Cu=3x@MW(4{L8u&GUhN(Z-yCx7``=#}qkDlp3KyDSHLC z8MO2d!`z_gL`994e5-LxxZ7dSPh~Imu^61)CAv^)nWG?QPZj zQMoZO7Vu6fzUibf&QPp#YOPHpn9*3OG|IKM4=86hqPwFJfsKA(g6iQz3FkN&(wUA} ztzo`ye|vD7Gv?_6;}Y8rF`szisQgRs>TkQEKY}&tp>BfHbYdf0@u4QZJUOO{C3_a|-L5W@G}NR=pnx9t z`VSgGdro4|fs>f1N1fIRnP~3v`nej4iX30_ohwx*`!Yj^x(Z9Wyz|s#lS$uY+K9k1 zViwEKKEJg+?foF+zds!z5TV)sPEV6TQCoROsB8UqSc@c=b$QMI9v0U#{1Qp)Bgj;h zUGwl*llDEv8z?&&Rv6IkUE9EoB=6E)vg$~xp;LK6tm3Ke2^3=Y9Z;#sRZBewD4TEO z`HC9`&*jO_lOuyuQiZvlnwD6(ZHH?;)JWg<6Xt)r%%pDRVd&QRJc8^bi3MQT9buQB~;ZACPANamx z7-Cn)!b49r$1^3h@(iSumNmN{tvn_Dlo*jWW8Kfp$t|rzZxRCAQB2Z3>;=A`m-|HH zgleQQ8R*63l-Wl?E%9^A=DlMg=Cvb$kr*OHyw>-w0j)e*sbYi#hR&PZW`d!lMr?T*8>{w4a^EGN~>aBhpu zJ>Er~T|r{w(wKbu1#QbK!PCpWN*{QH*<$Z8sO6q&G&}ldE zmYR5UguBR!XX^_Uvg$C*0HDt5t@I<#%-^-`8MwbEe6*oxlQp3@<^$1K1z~p6+q-r3 z`65okuUr@bZr&I%BFzEs)}-lyp5v?r=b38iAH7eT_LV2BW?odlIyCS#&T2Q0OpqtZ zjDz34``WpQF2yiaEm^=OsyFxQAA#NX#0@d_=beO{&BA>>$kU6 zx=9joDYlaW;hvKWU=m01#?T4;uj%`1rItiR5C5?uUf`Ut_di7C2d1rf~ll->M z-jt)CzV|r!oVjub+}ze}lv{jd_aUgi=Ptrh7-0Vm7gjs$oeX{B0!_!o2QM$g9j>0V zH=EOa-rueq+%4x$QoX;z|CHlff>q6c0D*c2_~_gv7_#wL&;t5&e=g!o@a$4974*K| zmVefqi(c*Iv{a04uEdKN@ax@H5{?4){D_Vu-A62;bg;d%dD-J1jY(-P-gm9dKLsYY z=7?Q(k-ebb^7t|;YS9Kjd)!3G!Y8%h5)sAf%`%Gz9M5A{WZFJL?4yZfRp>p>g&Ywr zS-xQ)>_TsPHz><%IV1Mjn@k>6ih7QyBx~=yotm})zbi~cMdoV*YPo@m)S$0$xN((X zZ}8i@E~yKU0X5z6bSTK!G%IdTO?OTKLhzwjp!9JHqTaA~cgtypQu$q&dSxGtV~_D=U9b3To7L}y8?8TU}nFvZs#e4_~fR@YYWo1solaC{KWe14NY@xjkDR1gwwV01u`dCf~vnFPb^0a^IuYm?|Z1Zk2PoJ_q|eu!nNUGe5-`x)JvNaA*FwIy_VgchyL>w`2GQ z@6M-lvzLK!pGSN6`UVPoTcP{eAeQR4pToEJmx|9(zK$u)=j8xXoFVZ5=prVU3p~9;YfpTIP;Bc1 zbVZkBLnR7gce7q5^CN03_%_LwBHl9;lGIwJvnpdyjH6eN$`kVAxS$LMEsBp z(J0rqwF0|$1DtrLf_~xAdqI9fFh!cL<_Qjc1W;05B@A;rlb6fBr}P62%nkX)tN~5! zWPY|jHS-_<diGciaqlt*lyE9neGpr1z=e>o_%R=Z+OB^g7k^;1cr3$ zlMWS^7jIvl(k8|SM+@mvqtKl)pr`9ZEV5dKt66%+Jh>}GZVl{ z{7!1tX^LKzsNfV>OONw;$@OeG)H8MVCzbO+dZQrt&-BJU+?=;%vQ*|&yM{uxTLh*I z_)&bjE7be5pJAvz)C?}U@Z|D-hvlKevabO)(7xK93Ou)+5h8^yqFf1&Gh+;*X&Bs_ zI{4zQ#-f$47BFCugQOpb#K)Xrj)Q`kqFRcu# zF`)NWzh@!$Z28C7tBsb18G#bCc2@H7R_uLZ268O<;<+Z-$){cwPCoAAqWhf7Tk_7MJf2`=RiGis zQbuP|&#u899ed?B3o;Ufj#XfO;#Nc2IOY^#Io&E}?XM zddF=nq|cA$bEN{i{QYaY$BlYDv`|$VB);A{^Z9j zErq^|cZjj|-Y?7qi>ya?6-Z}N z1g1P+P8afa-#0Q2Dw=l~uQfuGFDkomvcLaWtJ!?yTTULJIxrWfoy6vq)K?=vKf6Fv&n6cw)#~1FLSoww}CikO(>V)|G5W;u~#Ww!|=_d`+6^S1hZywU!>8C)a)7ISb2xW$) zKfx}P==T+jCojlto8&rp5(kV9r&?3qQxP2n0zj?GKw-7z7@){*5+mdo9KfK94YEet zc6}x8{05@ZO4-RM=mm;5Wn*e}^E$LBn>N4DLMSzY9r}&As~v=z``C$>BT7j=x82`x zwp>``Dq*3&Q{4^%dtjQlOjj*qrIczgu0=QS$f4#-*$j2j5@49ogQ3~(-KjPB{E4M8 z=||Zd*V6h0r}8%sb6F!p6jBkSueA`K=g^YleWxtv;#)mcEON7luod2dmNTGcCGWj^ zZhI>b8QtB<;>oQH}x3I3+#1 zqI$$;MtE$cjgqYco1XoI&pb#%KeJ~}*V-M#6LyV5W|eE+*i*qQ^_q&0pV^3$j=&71 zY@w+OE()1`dV;!b6t-?hg52RVdoYV4SdC@+ljUBE&p4rF26Gdo4~jx-;)s`7fngGA zGq0wOH8CA8WpG($;Q*b;H2djW-?%{BsY>1WH|Ha-jgwQAxv~KnEc?x^JgB>g-S~z= za^t-=id6lCVE48JAkEvhEh98zI-Bgtar340Gf&2EQGuQxWlSD!i1gumCWyIROJl`07B@jD`ieg!W-6gMCP0DexVH&z2b0g_ggc#x>3)sg$F3L>~ukSGMn~=;%Z> zC%7vJ_&8>;g}O8q8ZO@>YkkLk7a#0pG~;ZD(#!pzbn0e-ZMAM{=(~ZQUu`_34CPS) znqGi}GZ!?F#T2%(AkepMLE~BiPCgyUdvZ=K?(FS*G#EdGZ#G(S%xrUM_O5BHnVJ09 zuu~J{jWUE6AIpa{?UfH4PIJivL%zt(uN|KqC?S2330o%}u@b=4hKK){+VDgt3;NA{ zwpFdux=zwNexq)iJ@V^5kFgcxLxS69WLBi%(d?+>sCHI3<4?qzSju!uMJL`BmF|A0 z%N*Pm+x5wLV&q2E_c1;jT-n73h}Tdap#3(|fkKeGpPE23*F`5NmaI>n4PO6D`KkNf z04V0ib2)u$jcLL0HVT&-%WQ|Qd~UT71)b14#+D3++ZgK2AT4oX4C2*#q3jiI{*DY* z+;1dH#gepG>7=bTq%3{8lo~X)u-#)k|Jr96MwZBHi36lu64XnOoXCN&88Gdgb@vYQ z+X1%-9uB)4LmXp?x8on4_o8Z*rPMugIDlSoSHP_#5^C~o`qsC_mlZ)WTcB(7enhun ztq3dZ!XKoywY~HB>kyf+wUr4p_Q^M+Rh2i?Z32)RGX@`W+t`ZB>2!d1-;Jzm_ z#Uf~LwJp_wkIiQ*#N0)?2jP{b{U9;>Dh(g&O*-QcP=$sRa==`{sWSE1%cR^|94u3&Y ztU#g=btpv>#Qxr1RcC3E06?As8M2A+8DF3ga@wFtY`U+$nD_xD)GkJ%1Q`&o&bX`C zCAdB|057?10W1;N~&lrEFCa2>ms zUsF=Q?weI+rnKbpM6m8Uj-qZ4 zu|r8Rk~@fyV%Q#wNS@C#Hq!8+QN`xM-5Dfo(dlFVbx1(DX|_dfJlS4$^ya!@7LOBC zLy5UeMCn?{X89z>3OULZdR!RGdNXame*3IUS=W@qaRXULZnd~C8eZ%7xIYx#4X|h? zCgK(!A<_zfDrAC?42rnHL^^Cx;5Kg%zpqqD`g<$u*%Wj>A(J zR%zTMn-)Nm4kDo(qb(~PG43+zGWc+b(;~mrE-Mkp z4cv&p} zJ*jH{R(vdO+adN~EOZ_5sTcbe7)$nA?(3eKZ42n;?j04{iX>a- zqzF0;QhPb54Vx|fKAN#eP3{flji|ZW{Cqo*sXc!dqn7c30*R4I%LeS&-BFoWpU5s$Fa zEM+R(i7OaL6|6-bd~sVk@DSkiQR3>Rxo2$~-u4;~nN-@044-BT6lfK+!f8M$B~{AE zWWSIKbhdb*^l0Biu%I2JGMdk8X{7PaHE*OS3Fpq`&9?pGYZlt$~GUZbcUY8D7fvGQpr?Ei_*2t0Kn%2!vNirB8d`-~Qq*db)L-IVa zi+-+&|Fm0>XVI`>rw`79@T-&x#RKewOJ~{`j4pkC?&iakvD8(!H@LJOD8!B?CTWb} zPT7vp%v*+{sJbV#X=Q7;nLERqx*+agrS&Sww6uLs`DRfU?xB{qv&zC98T7H{MA-*1#V@pVN36R6zSm>p_?Jdp>5%h67KUK4(PZ3FB3 zML3_kofJp?-J3wKM8WfWC5|(E#q*WJeaFFB%|$%g>ekK7WqjAcIPO0?3ZSYtfsO(* zpvl!%W8I4cna5Xc6bAF3asXO|;ry=OBJ{sy@j^*$)CToVAsiCSi7L()aIqw<%7*lAJ zq2-I=GXmyk{2+%d$}M$Ud>wpOn7tb*G&|eTlM^UVx$mXDaEGOcN#)XT#YTzU%&ajb z-`Yb1A;sLuluT6s7P-S=opv4<;Ud1!&iSy-}=L` z`+dyJ@5lE0#2MzO77a3lNP^-)Ac;FK^p2JfK6uvcWaEo_z z$21vSTb=|VPdh6g^y3tBKf14GGdfNiGARn26`M^}`H{5L$OFsoTgTJu zrz(mZK;P|xE6`G`{49DEz8JT(x03xNHBBu(;4nDtCD)NZ5y)q@OXF=~wYKl5!MEnF zYBBqBcdOJgSv+7E!7&=fr1Gw`+nqlD^DbxF@?F^Ja_H34>q*{1>w}*{@A6&wKhEvH z|Jsrxb;+bR=XLhD%5{nRN|#KOBtC~de_Qmh8^#M%ADvv)uNfcjojTpP+hYMtk@3+m z?`redopSkvZrEFYGUbATcatDA^zz2g8}F{c+KmwKdiZ#&a|+rQ2t*+B5ddqL_&;%@pP|euln1KA}4?L^Nj|db9LnDGJzqQ&&_EkTtq-e z3}FflZ9HT@Qj^vwwJJJgz9!mk~i=vG7>VC?>IXoOK&MAIDmR{BDYE2xo6z zKccN4o;bKHw6kcx6{fV}RI0yqNOjr6NUQe!P zdEqOG{Em4)f5d$Pj{O6CW4Cz)u3USqwsO@wIk&JI3ek z5k6T)@8*i2YEX+;&zvHn?pui<21e#6^f+?NkR&d}GyJ|_lGgR_S2ql(_-{W(2OXjH zn)PNqO1iE0%5MHXJmvS??0>t(m#^D!;1VHhION{B0_S3Ov0Ea}2T!}%`4ZX^(?bQK zARC4C5AgooVf~XfF8~-2btT9KIJTaP52BiTkQoi*1t9VR!yn1vf3bjn;EW%u3~&#k zLCNbkaqp{MyBr{Jx#$8X1BtK=@A&`^Ve<>92~6IM=~BLxZo3ftgqy*MZs; z%f(Rhw-2s^rGCr)l{F~61%$6O$5RXq=r-lDx1+u^cXEgwL6x0Nk z@s=Lwmw6*e{1*cFxsjIZz@w7!iQcs2JOu#2Vc`w}mq-3DH1+rA{zmtvEJ*z_4$+v# z4Pd|FB5jG6t}6Uqe*Jq1D;IH(6=pERAUy|5>$G1kx>5#PCzwwXl{0E`G2)NWn zp^OJeTAlHtzjqrD@^8N)LW5h zvPv<4l)iTXm5?t!1pLJ&e@c!AfLOv@9KcZJ6JKa1^a&_Ch1t7Dfpvtg zlV3;_U;|wUl{U9TeP^eBd;p7kc4u!oNz2J~f^26I5zU`_!Qj`!NfZWIthyAkf=SHQ z3d_d=ZC=K~D1Vrl3B2d1p#rf3S+#1o)hsf7bx~-6Y@hv8j;>4gyOHE#rzin*)~_7q zCBIGo=8Yf=+ts@>XE%^luB;Lls$xTod)2}gtpz&>D{Zo7hZcD`Z zz=HD&idE!>#s`E$t?8Rkn%vGB=&0F|ie6mD~XW?Lug~91VWZm41 zf-TR@>DW!A339h!CWY zDz1a^B{Hx7g2MmEzOyoZ%x7mAMlLg{t&@jX6sosMIe$%vzM57gSZ;E$VKX8ffHJTDcDrt!j==2 z?p_iGUJZ!#&*R>!DFqgv&LvIaUzT&p@*VK}zr;n72RtwbU|i>J)AYbnzgs&WxiIpP zt<+vQFK_~W%qcB^9Q#4zbLC0HOce(_2XC-<0LF zT$cI!eg7ntzt81ggWBKc^7py?{{vr&uHoo=t%dKEkqKV*3%zsDPwtwigIDyI4s}^8 z;H#;BSG`}vdMWh51*jmd0FEO1GH+5m5AX|#tUTU7LCt?xs2Bc#dz#ad0esFMU=A$i zIv8=~mEc1Y;1{O%AMpPI;#;mt11_#w|7=GFuo!fxtSahw+=4Bi3i{fu|WPf3a z{0M_2IEo%)r<|tO$14q;oGQUKHt#9N26))myPtIia;g<8)^XTVFXn#zwA}9E4JuXC z^3h5ywW!EZ=@0~F#pIFywWa&>By8|;iQ5t%J1sqFqX?CN5q(y{qgTwGJuvgptMbyS zFAIllDDsOp`BWcTZ{*h))fK%5wH?X-F|X|}&f@n2R^;;2(Q%rAEI$qq7%%%u^d(wN z&K2GDN5|7&oW<|YyWJSldu#!I=ni-nv7Xo9bw&%*hYp7S+wE*Npt*#6&%BsTluS)cIW%tI(Lt;PhPii! zdHON3b>EiBSjkJsa|%aiia-@>jJJRNl1sdJef@J=V)OYa#;)}( zo@Stc3CYk)5^+o}8)eS$4j`lyuDDc{xY5_TqK6POb^++AvHfAWz(Or|AqV77sQ8tu zs&b(RI}yGorcHnjd|@-ulP~Daje`X2EiA|ExqQmqi&ZHtJ>*^MDv0}|Ea|^)#vi)$ zub$)AO$P{{#{IirA`Mu|Q9fR4_ym>~1c9$YzP6osAa?f5i|6A$p-Z$7rBSKoqmf+d zVEB8GwXOf+|77`#cR~UMp2cy==tQ+w-&6zc!2*8-KoR|Ur~UyI{|@>56z*TF)9Z(1 zfWYH7zk{9fPFK(`ZSoJW!v!o2z67T3i8-UrM3jg2OD@qZ+*;x~}jcR0rFa|a(y4^)@w=q$zmbBcPwCp-&a;^4NH?a^H zQN-*menErcnuWaV$;*;ZXZ;mk_>XHPXo>SRj@wQAY%5}V=+m=#R}aH44KC`cNqh$ZJe z2%kUZ!LR#CFZ>w>P$-Pp`C-2>q)4~UAw&4=V162*Z!eltj79l2ANOd(i5Huf1WFxC zJ*Uwf(YnU>fPH@8`d^s%GD+eO%!%OACd7r@RlMd%>vw>l`ZnmcJczY*TP-N;!p*0xbKti1W=%B>JwF~?brIY1#CYNuX@N$W)6L<#8s z6iUT}EX^+v?|)p=v4I-~j$g<&+X%Rwhg4X;se5{Qjxuo@HjGSrW2e;;fFH=}ni|+ujrk(ml?T$BU-PpBDoY}RCLeP)5K9fxIr5%2=H2SJ6yuptx^*v=bqG3widT&v8 z+j~XYRAfv(-DxSoLUxWi2FSfV|Eds2_l`9yJb5YdFS(ijf)e0T7@w|bU#w}lX52i9 z<>rVxCHt15oSa-$18jV~to_y?3i#$fvG1SqHU-1aKN%NFad?9^FeBV+UE8R9{D&wXb zn-9HSS(x`nnhO$w~3r?zS5iw4E5+>ZHZUmM9=xzH-)% z?evS=+fLS0^YuMy7d>M&TJpO_Q+AVg6Z9jN#y>dz4%*! zNafT7qKtSVmj$Z__H=)}q>EKEvZl*{3?Yj#CAOW^ZppTFvq5;(>hv>~3zSM%uHC9q z5**J~7FEwtbexS3Bp{zSJI2@xua`-u@S?nSpeb%M5W7?3vu$$p(5J8@WDNr0%68 z)az(9ccY|n^L2z3g_u_zWh9esBrQ}nBA{X6<1|9PFmAF7LYG;W zL61p?L@m2N;QE(8Bi{b6UD0>1}sc2Dylw_C~e!!|rn8Td&yGPMgnn zKr0vHjAsDFNvymDmDmB`NGJFfb&+Ga(x9cG9+M&M4hlR~0?}uL_AFUCo?-LUFRjHcL*5Y)FJQaojQsx;|za zQLO$YQzmRZfJC{h-f0;D;kNx!znlsd_1t&1XP3|V-v2q2WV!-J;=N836S+R^Phsdp z#moGHB!HLnNPPzcUfW6QE3y+JFy9$byS8S1A?o7t4vN8tw`fGhtgWrrg_;kc8vpOxU5K3L&N z$4+X)aax~)*1kvcH-&sZ-;o>^f`P_*wRVQN z<@QZ`v6SPoS)(T>%i^`|w}d&^_&jzkDIdSiuC^H;DuHc3#UA9W^p1V|Q|0$JCMN+U zwgH@_uqJSg<=_J$-rfdui9Y(FIt8OePc$o{(n3XD|MW>&$Fl_p;ZJBxW!k>`OUQU% zwE~CMdpFV$Fl2%NE2i(cQN7V}<#rW~s7L%Z>2QuxWp1iR4}c2uH1kzQYlV$>?B-^Z zlI$9~jeHIlL9KBMm)WVD_Qx$6 z`|Ap2Okp}duU|z~4DYY){}PS~E^u-EO@F?R7jSTY^?iQ0*AcJY-2AD!K>(HYYu8=k z{)bDgi8nvcNj(G*eL&;gFL(k!UxG_~UnlA^?@#gG1L(eAyV95ZBye!Syb?h_O`5bi z>)T($%lky~Z(b-}?fR+XuK74%--oH|xzi-~Zy8D0MynhAUf5YZK0m`4D z;r};n-bcw7Q+1AlE^8Y4PQ3!QfREK*h9^UGou0>Ph{5yhaCu|WzS)KBY>vFSOi5^( zUvZI9^=pLqnPtByHW{pdf_oQxAMY~`6!h4&0g;UjruiH$Ob%l1wJL~4bMAW2``;ET z1DK#LkjvUD;G?{q6*%;&x@bIB=xV0`Zw@_}CAUk%_n#aK2}Xiw@3N(Fi?nLM%hR{5 z9<}i&6wGtta>CP_Lof9vS(L)I-=SAhMIaR}>tlmj#@;UI>Bd^L#`AIa!`w8V3G@C` zLksb};)7Kc-?fRn*Zm?p@4=NzLQ@XX`erej{7F{N^+%PJz(wUj#f_T{2{l;*#D0MP zG!bX)W7T}NS89v$#u{#4b&?@-2jcN{@$AxHwW!|UXFjK;Z{-h7JL;O)zDHHl8)93|Pf_R91qE!A z0GX8Kxd5!yI?mvAXdxyujH|+Kx&co#Gn9mRVr4MPwrLMh$yqzaYcUP@@J`5t%_#Ac zAO07f-lk6pxCD$Wd0cDN$4=|H{?PNg-?gdwfyb<0@&`PnAC}ZsfjzD2$>!l1{hRa= zA}@h>whDNl5a?%Y*W!0&dV5fg)>iy@ygw5g{~z58Hr@1fe*u8TNk!|R%#&=_fzYDJ ziEBRNxAPq1+*|A}kF0k;8brzR-{ZIZKrnfL?FXSbl6cMD8Gk$RXt{jt$vN*yvQwX< zEusU6Im;*IV&7fFycZEZ5OFum5#9*O<1?OI9UHx8)UD2V4b1<*?{-i%7}=*+WjXjN z?yZF%(h=;_g3C2;zm23_tJzFi&8o%}#2o!r#Mq;KC; zP2gz>_rz{~nE=jkZN?4YHR@l>Q8@K_PQ7R3QEC?e{TWm zF32%x--rgXeBlpnKrOWc>eTGb@RXr zXj@=~wmb&lPFymsZRGwY8SH+Hf-F-vE~k1b(!bfLMfq&iPI-tQg>ND$TGyGy4?|hw zw*aDN3|QR?4h1Ff3~Ivv}^Y1c}0h-!| z0{HaVDTgE~B7>u~NF&d2d!L;o(3caI#~$=FMS~SIpDHHp8m$B|&a;8!6G&rU>KHnJ z{^GX`deJ*{1Hr$-AW+!`r`L~oaFlj_nD+5;gOtC3)iqT>c_!dEB)Z~fngPa-pp1KIJ`Aa>tNQLAOj4g1~xtr0oCITUU-JoNv zzTM+SpdxF^G;wH6J86`zw8pX#KoyGCME>Fv^bG_Ml=ub)_+mF3F*O(5fGBUbxCiTr z5rvVd(+z~>=O}>A(FNGDY5L4L$&R{r;zLUJl#3T_Y`fsxy1k&7Z@-$Q}0v=H~IV$MS6i+ufX5z6^*ORqSG7FaU9A~<%0ABR zvqdlofZ%J_*ZpbYDACww%Zd>vgY&*x297e5z}}Ryl&h?UJA{f6g%gJZ(zom7S`<@7 z#y8|TizEF=3!V#q^jZ6?@j@YDW5Ohu+Iln>589D&OF`*^JoYS#)jz<{UYHEW`Hpe+ zQrOFhTpQB2N8|lI=99LyhSZeJ#XW0xOlg5inO7A(Y=w0-)&Wrmq*$s)4(Xm%L%wG_ zv~G5{MSe)QDbFvSo&ZMVLLVyuQuQ%Ou`!T$QM!Dk+VT7p>n1b058T(j6m?PSgpGEKt)$b1j2x{h$omfH%UC*wuc4ofkhMAcr4>KC}I<3wH%SVJf*9Q!dw00@Y zaLt5uVSQ(dGaA8J@QjauoXeRGzZbpr+~a0a+656d+@%3sU0atM%~C_^xi zYIr@_rN5a-x-z`Nwx!Uyi}5|5Owq2^L>~9Bm#*VH_P81TmcG0DcoaLXWS6@L+|cd8 za;qViy!oMMs&d&hW=%CGuY}*X@B0@kcCd? zDyF=CA{+b+E4?x78{`akX$~>&@3wU@Tr1c17(F$lIZKT^S!eVYXfR%JD)Fo<;fCr} zpi3GjK6-2#~dnYC+B@5|Bx8KH&zPSG`^BFMDRD3iC`no(Txbp(Tqj_kh*8vp#; zHi5>gGTD@=3%4L@&@_G|Ki>W+<>`I65&%M1Zar$jyFQ{&3*_$_<(_{*@9wO5odVT? zuK=348n87JcuJ&g@>iw34i5@_-^+TXq@jSkP?2%7kDZz%u8VmAiE>KdngcRB+(J}L zCJOYj6flv;0IxOmx=^dwAUIcF=cMz3V`EP9szCqPMbA-h;QLGgj2qX~EDBqSUvc0N zYz$KtHIB~Saf%<3b`qT>K?`*jl1UX++POeG~Z zZ5lVfPF7irtkZiDdOQ}my$mI(I-bU%suX1F-a{bf!oOkefSUE(%o zVT7ync1{+7oz}<7yB};;CSw}s0Au8cOb{G-JS^IS%5^rp^&3w*;#6vp^fRTPO+0`} z+ImHMkh_==CSOOOZ4h`7PWj{!E= zwz5}nr|`KkS&!Na^)v-Tlr`9kg7;O(s<0LoR@pBmd=?ANg|qJ&-aS8Izz`mN>*2Q@ z+CuBhFDs|HP=ibQi~GPQsZm;`h+M--AId<&YFhCx;#o&EVhF?#$LHK2(6q$dD+&*Wm)A^dHgI?cn_#p)F$)PT;{WxL>gxN$4=z>;7Mra5gL0SXiN7~V!q zF)c~|fn_TFjUlt0tSc|h`7y_(<1ERdeDZ*eycsIVxfc|YBO<;nv82?&;6gR%{LZb7`D;~2RQth;!n?Q05`Z?3}2#j)T zLwg%ytt({}h8$Nl7x{ZW_SJfkeH)Tj_Au@&*buOOJ5fBCK9?oLAlzSlTWJfdGRdl# zj((DY!LJtqQuLcJcK56|Z_~D2aE@Ofb8&QMlwoO({rMfLEVPxb|zc2 zy5aPkMCoe!T(BQfLcDP|oeh^LB)?+V&k|vBtMK$etzF%r6-FO^GTSKjs(D)JPpW}! z{tbQ3wrd`P%Q|VI-Zk@d_nJx_LZo={(^pb622=Ux-j%pPUwl%)8hCe{X797C`2K$J*YxEE4&q19{^3 zONR=}I#@^X)tOJ8iD}N9Xt8v`HP3Ix=iQ$EVthyImd}%%Q`S7cL@?w;j_@<~8%o2C z4e%bHb&(ehS{z)IcFYd`%nlLd)U3c*rZPR#sK{8hu^Jnh8Ay4=aggkO!+OQnDYX@t z^$7PgW}yg0*xZUk&bQeTz-&KW2By=v5`ruxoj+1p# zP=>4PrxdLH2zP7CFKWB)K@|gv;I3czsFyXg;*EM(&^N``FQQ&JMT$Al2(HAry%nxc z9ULyFF0znb_U<-&zO<&`mI8GUU=75l3JyZ<;ZGa*p6=Osmnb`k&OD|6%JPbs4VbxU zg9m;c=s!ZVF&uG}H4t(8cD}R*brwZrM=+;BJNbD=xS1h2e%t5Lu)5ru;g#!-D$U*# zg>O|UKFLJUY*cx?d===<45QJErRr5!mYFOx^w5aLVc%k%VJ)s%tz2OCg)+!R{6MDS zh=G>7!79i?DPepZmXL*9uap)soh-S|l}BVJTTwOYJQ9~LyF8%<>b1@|$zKevFymXS z7_53ks5qu^v{Ez|x83sy(DQP>f1YY2pchTWzv#h{EcI$R%@X3$GOkS~&4739FF>Q@1kJ4oU!#Gf> zhq&0GY6gvQu4+IU>Cupz%yz@*@sZ8iOiq9LTMvq9dG(V-rK?`Ma4C_qEgk&!FSOVk ztdkOjm0X17(;4~`IB9%j63)wusv?a*oJde`w^N#{{Xme-OEa^?tR^%}oxdFcmcI&FiW7?e zNn$Wv(!WZ9zuOq)cU1E1OiTWlYOyg_mW=N|mP=@)OMC(BLkXFYs@(S|6|sA#X!l~I zJQYR0bc#C#9z(ITmf7io^!+u<_YsQUQ4z>lVL7)+TOn-SZB%(a(HoHQyqR}r27-_p zG;)geb(vu$ieBf~Iqum@6wo2Te1Iae2Bk+NbAR~Ln%zHx%cc&l-0c1yZr|L)c0U^Y zgbeDDe#@sme_lHZbTGo2Cn3ozpN_!QC36&*L`%Rkqz$5u-i9*&RAnscGFe*<{p%E|JnScKyt>FWBF(yFkKxURm$+6#E zo*8ZkKP+mcfF@^Odh+`DauW_~qBHf1M~I4tG5vKb+{jmQbyy_=!~#A+o*k|7fQ1Ad zCDb*8)tI^LHezAz60!zhNU|uaWwZQPh-R5nNDTFl)(cO&6_%C!=#=RE1+dd@X_Su{ z##0Z@I{zqp;nTkUMfgDC>4b%lk=$!|d@F@_-(1Az_VIoM@lXYhg%~V)@zD^@Ww>1+ z#jET$DJ8eXSz9vi*f?`UMZOBbx0w60Q`g!2|(@&xS@l8+d$;zx-0A; z;-cseUP`m4}RQX)WqQ zkE%`p(9vNz@VmtS7kh6V6m{4CjT>N~2ndKQ0xF7xfC>vrBd{pl-AFE7(g=u@OG$+#Ic<$db&&)H=%>4&4Fu0%3`<(ZA=j;7C$j$m&ry}n? zMTmCZ`fnvlX|pJQK7-b>>p>4S{U^9fqzc#yufTT(!}#&ISW0!rlnyia)dTRP^z#~s zAPldmEDic^A|R9LPh(Mru~}5G&-vl%4z1YNtdTgkGN_(Sq)4d~X%2xprC5EcstkA= zb|vT~AxrB|0EDr8LM7GzAj`kQE}|{GCplqfkN_L+#G6ft^K`F5z32*@h_TyZfGtx; z0qsU1nA{aa%~dLj7PMsXE8bqYOl*i&R~9PE{pNQ0YNa!U^G?s&CM(7b1Jl@Pf*lzw zlk2bR>E?8oo9-W+kXZVpK-`>b<+$W`k0s}Y0ZdTzh zf{v%yPwnAk84i!%p<%#QBa+{i3G4c@Mq62C(3|nt!6}3q;gGg0LluzfEJ1TOS?JMX zfi{l+LglU&8RA8f(Eg(*&bdTHJv00)7+8s;)0fv=I2rF^o${$ynPb_AB)A)JAVpJOW z9KNYjwP{2XTOtltmv5jBEFD6#>qw3_rhz+~cIkU0bWQm992PFlyC!yn=yWgW5^?G#tATPAVoZb;3ULEV&WmP=c&VJvDhb6P(CO@Q-O2*uSDp&tu8h&2Md z6q=vOn!)Vm(*dDdBS+}KddR9s?2JRB7(eV;56#>CamPF}>6$zoa)U>yPvZ{h{JfZ_ z60g*WImd3)#=!c!^#uY9f*o^dNlk@0x^}VYdf6+tZ7z}AMjnDe$ki!7CR?wmZ#P?~ z>0tRVv_Az?O69I19bz;D-n+R}z5}U4Ep4~Ug=>)DRV?)I;pFK?_IADvTV{5-2iz?M zI^|!UZL8!E(;}G^7%Q`u=y@YjDPbT>FQznnk7pg{+X8pavU)&auQPl5%sLtrT|yUw zM3kQJ^dcz8Q-$8pdQS^Qv`Py($1gg?Cp+yP2Q@og6HB%=Y`o+7`Dk}ppZo4B&94{O z-2Gf1#Zf>*InRn6J@(e?)`O`u7i*r*Gir=wQN2o48^O5-F1|3J_oOKj^;D;-T?5X0 ztPi>rd#Suq+)4}!Zkd^X>#urwzy0nzBfttY36skzXgHUUA$Z+BwKG;db=VQk6~iGd zpgzmnc1}+Q`CnbN!iPp4^|#x~>H92)x5Or-qO-YLZ4l%Iu#2}(+Z{_}C@?kXoXVvv zQx;-`a>A~W*>_+_uGqxY?hV6DWgpl)JbFt$4asce5vCs3$`g9A*dtnz2DX-q^-VoQ ztoAY|nmc6qWZ^2LAPrRQTg#T8o^J-YS_j0p_6DiyPY?RyV?0Qrl+R3#xQpjaYlOU< zT*Y>t&{u=TK+7AbMw8t)WaKiIzE_`4O@_Uzn6a$AwRHd`!s~v#?5#23Md?y*QqXiK z_e~hDa3Nz|HH$kGhm0mBX}V`Y5t8`^WU(}l6sH5^lIa#V_1Nd^y6?M;_rbV&P9?PN zm5f~toSGNY3Q`UDzI}|!UK7HJJqXe-7LSj&dnvgb96~{o+QN)MyE5_ucX^UL8LVr? zKtivW+*7<0!FoQ(;(A8(aS`;UYCsyk#v%LQ%~(1>)M+___=16gW_PuN#Dz9g6Y=_e zJkza-!|7dBKDYD0#1@icHY!m>@33KfeJ`uMLKuWfxQ@|&MUVbm()A4|2 z$t1s92zubSQsA{(d%R{k0nn)4VW%$Pu>3jDEh@njg#ip~l!m>Xsjk}0-*Lcz17h=J zCON@q^=I9Utsb4KIJ>FqebbCY!aX{IC)NP>T-n)tc6vMlz|k`bXa`(Uq*N0qIA<*n zaxNrz7)P6wW|rg?gPzwk2Fo3{B-_cZd;Q`}c6LEM1p6k@mXw!;^3@`WP-gqBnLcp~VAU{^8`}(JQpB zPZ648qe}Y&0Mpu1!UzmE4wkHf!U*V(OfN}ZNChP+S|K>A?TX@l#tW{>H++1amx`Ry zh%~!v1Vdm1eFjQVI}6VcmZ-q4+BL08G~72{sO(`ErHjB_Y#Ji`Hg;lq#cj~pD*rHM zl$|rTcAY&O$C_p#ab46|zF533HC)|DLCq%ghCPjUdHof#ej}GxSa8$w$t#WkQ>dCC zR$q#gd4TrLtE$yRxc>OU&2KqiLmrQ5CCX%kX^N?O+W$jmfHz?hv$dG1cT7vtDnPYI zhpv~JYl=VMNb*S{1w-+Gyh7$t2pFHDQ{jXQ3HW+IeDcfN{5{#p`Sgz5BNU`T+9SJK58=#UMnoj7F-Jktqti7 z$j4t@sbhA*y25a_YUXNX(pTLXLV-xRA8?7QA`OoR+=qQZG6LR2tiZwmCwe5d^e7;H zLUrWWM(Op1TH{=F^A$J%jWeSyaw`Xva{YH^WOKnWhb2jQ5nVetU3WDD{TP$2?omfb zg@8;3c>VBPY&F8eTouHy^_UI1BFIm6JivO2Cwlfa(uc?B3HNK!u<7r_NFOhcTigel z$FoeXw7oGhC(Ov>50~7PxMen1i7rWfl?-v^47_B6gMJrDwA2qx>EWBA!W$^dfl>6F zip{Id*Ma7?4Xc#F+x?_429?U76xjrjPkJsB74~i31EaKS&n1t3EEG*YQOm0|-Bqf< z3@Ma8xqN>XAGwK+s|~OYXou2%&K3$-bXoJT_f!R@*{L3)#)Gr7#AzUK-`VW(6L;ua zf-r)AKy$k_V9~u$v;x)DN})#?I?~DbkAyTfzZ{Vqg3eXW@=Z=q;j_HL=`@5-g)+dp zs>Pki;$Y#K}B{9?4G=PbgfQoD{L$e3eg@w{2y$7l-8wp~!v@I~7cf9=x15=I0jlJMQ8*4M zax*!I2Hyho@BmnMY?OP-^+2e{pcwD`;CYS4#IKBin?iUq;H7X_Yso#lmd4BSo6AL@ z^1Rj~)VjLs%{4~PGTeVSA5W<^qU8V0i_F@EC~zkP_Mzv^vuq|8(coNgzmKI@CEEf; zVpkU4lco{m{Z!cg8*%RZ@M^{reT`!oN1`YZaij3uX@Xl4IxZx7t!SJdg;(h>6lh2J5+kL?xIbsKY#PqQ%^0vi zCIzW#0G$3?Eah0cQ&n+IyruKigZ*B22L~RPwW$XTbvf$W1LREe^F^v7n^Jx!whp@& zD-OI3L0piOa0)r4D5Z0NG7 ztU<=#>_Ut=f&6f-9BJ0kMY-fUmV99U4&jTM< z^nSt_9jx^MppEMRNrK+{n^h}?em9a1;Gz%cvA>=HGc;m5Vlof=*xgxV2SbQP_LLxl z;3rfqQ%y_D9|2$>s_JZqv}am~McC`o?HAGu0`a6BaAE(cAXL5DL`gWRCx;h}!CTY@ zQ?V{oFBiyx-0e+gP+IRL2qreXp`O{>!8S2!k|!Cymr9`fi+SSp$8kL1DNiAMv&(sP zwpuqlo8%R>uveSOy;Lki;v~^N!|5MVDF@!PsDFnhozC7sEjW8tu)Oxal)^smAP||$QL(_GGQksN zJw}^9pAPDYI=R>yY_0-ZnatZE~j77mPsW_9*05JbL1^a4$u%R z5n*60H~JTFZ4_w(3r8*6D=ZCNKjNUA0B9;Lh?XUDTRb`YQ9yQ}CSplOXzk;5LRDv+ z8-_a<5OhhIwc;i=U$Yq?p}ZllAIr(fF~+3m@;r2#n@;9tNt>2QMvYqciAe?W9e3LY z2T|}Oy{*5C{cb7*_N}`@PQP-6MfxMV9dU+l83)jeSjge)*qNw)DdcwUeSda(8-|6- zo<~fS0LQ@IomY3}AalQ&r&jp*OBuC|@zQhtS{=1z*>rc=B^FeFBL7u1ZQir+(!OJy z=?vx}9y&U|^z5fyl<&uE!W~y4d}(ebT<{uB121eBx*1y-g-+>07!S~9>quVX6^Gyo zC3_vv=4@Bj!t-c7phns5M9D${wjJ_sc;|Wf9>tCvK=EHFCdbnDC!2e(@l@i`$|f$W zQ3)<8ORHGND}mVrwUHCb-NQr63iavws2OK!1$*o()(mN}AqTse;%5@vhm9pFElJOO z{P{y*V^VfB6v5vu@0|^ceCn4?U%LTe5J4wV$I_4Smq+Et@zN>F_o5k$9k6Zs>vR0#q6p>12vbuHiE3Lz;J~aP`%`l0>T%dqnYU-r+C( z^*q>o5%5n9Cl)q0Jz`|5jpbBq6Bx=-Btl9l**sj-h*%CHTdGD)d!D3-SFMr+C>DZH z8ZBxGCp38@4cBhx7clV z40nim7Byy<3mz@eC{(eKn}5C7DZY(ZJ&^q>K=p9pBqL;4VySx+H1LxM$W9#2vpNku z<(+6?=z2_G4ExmWP3j=<7U+h_0Bs(Kx>_?+Dp?gW6i@DUh2ZMvdN^(=An< zVmToJuMB!0gz;|`wJl;v+TUiS2Njv5MPk=s%{{5X`5PXPpett%qkkW>P>{z z?Tr%!OtJ#0vnh+^Cm7}Cynq&je7^oF>BXJfN4Yh{_(HYfKVJ(W zH%;Ug_^6)K+9kau?_GL|KHQ+{bFe)K;l=M=jXE2HCViv z^BMFgfRa`)-c@ZLLTcp4P-@P1xej4g6b9Y;LP4B*?BQ-7)vw2uE4DxVE?Uj4$rU6j zLh~K20W_NFj{dbmC*NPxR=W-Cs36Y^f3-uw zi`xT2Ae{V_WuQj#xG&k91@f8un?nb57W7ZH6IqubW(Lux`mY~+D!wmXNPf1R(G}>5 zrC2DMtLy68H!r~#H^*sMd>4U}Vx>eEe-*oLht%YI1?m;u3?y@Xv<(KUREPTB`o}!} z1Mf@BSjba;Yb>t}s(($dAc!RGs2T86gX~z6f-Vt4v>t_TPBdbe7w;7Lx51R|YZtjFmH~jL%28AvjDQ+mp|m`VHOk*U}`=Vn&=KZ~fbd)1~WARm;0~ z4$=X*P8pr6mLn8Efn{`QC`=k!q}Ndhg3%vP!&n>mKi|hD1Z{l+YV9ts!Fb90Ja}P~Lg1wb6>Tqpq!Ut)uE6 zaa*1d08L0ydFa3%x; zkF=^=ERI=cbC8!I&vsc~ejn!&-jZ&4F2=6Yd~^R?WUSK0s!n9OrTO<7pyVwu8y&Uq zRQK@47_|}3AJZ#Qv0uUX*MfiMs5|t5#7Fjp-SE7nmlEP6YSuwDpgvm)kmSP`uiguE zsv2UVS4`3D?c`V=T?yC)J)L}@0MGg?;;cGfF)CWE6tq-sv5i^F#MXW7l9gg++5yBn zgP^C(Og;+|HDh>AiF{fi(d%Dkq| zHd;o-K3RrIHba@WMU=lqhWE>OiI6=SUzarU4I3e+*(g=Mz=O(z>MU`_dFAkI(J)KH z5knQ8)X>%vZZx}w#9t{bZz>o>iUKf%9x6Hr}4J*|xrxCG<;FttKO@hSiwly0ui4zq%ntyRnu z(TQ(jzq9l^{fMwwLazsuol=75v=}p5vG$zXfhgz`9-1nAXOP-+K7f6R2ITRp!mIr~ zaj8!eG&`QR+?!Um*zRB>L@Kq-dyG;2vK-wpSO?C-F`d*cQ`q7!C$aX+(=cQ{81jX> zD23k^_kx=qBR)2K4a^iM*F#)KH+kO-S2Ja!oph(xQ%ypURA@^l6`uH#HRSH01Ak3g z36tz;|L-s;*}@ME7oz|%fl}+uJ)r-?g9SP!CkLB*0}2`y=3;>SA;Qme5AQc@@UG^}r8FjPpgA=NPMq3H=V=5R<7#s+`ycPB zc}XfC!~0`i9|h*R%FRFHxgVfsC_H+RsGMxCqta*i-`<5or zPYD5_dVi!@V|u-1zOU__5+#$&gwrth@(AOZi#Pk!?(;uNJNW{dfDgnp5#Z8A!vn8@i z-+SfTevc|bX?}V@!H|jNV+!t;&;KBu;B9YrC>Wp=B|;kh@P=;U3p;TSogFxC+K0B6 z#W;1Lqf5u@C|yn7D4C$Uw>F&*5AgvLvfb}Wil4!-0eZAm#M#I~;Z-)pKeYcZd^zL+ z&@gqUO~K{il5B8Q2sg3aOjVJgp(qOfQ*;{r5mVXlMxhnYRxb0ZadmPfjg-s+RP4j z=84oc?~}PQVM-TMBai0El({Qv!ub1xvei+4e0T&P>3Z+41?EFMF8@mE{^p-+)Id-S zE0zlAs^U--G+z}u$yUA_FTvO{;ny|KUU+c4d1yEE2`NEOmdC_B0an)J)XAIONv-I0VRQx zovY_=RiF;HD%8o8YZYx~vchft_!JQs@`2*Ff!`nh7Y}-X4UPinMK8mUo8TB|-9FEX zP3vfxXjh;f-*B3pT5nnw?@S+1m^gZOy17*`_ZLsm!wmY|1aoem{(}VsLmxluqu$X^ zUkoj_Hz9OossI|fp8R0B4_t*4oQKO0KdRmis!DQ9%YS4U*HGrTfAQ|W3Ce4MInTfg zQ$A88G}C(_dRs$08ojr1 z=^Bv`hDrqeCn^y)U!tE-VSiE`UkqsHvc;e31H6Jb_{hxfeVu&&vgd!b zLz32*B8>N+Nk!3P2`nOo8gFf+(|?@vS~l0m>a(rp!6b^o?_f0ms{ z4I}Z?UrBslQ6?xo*5mJIjS?0xge~}9eWvat+4ek zgOl(OAlhVS4CfR6z6noLiK&V;{+XDD7!;Y(9!hXr{d?j%-X8Gz@9zJ5rRZM^nehf} z!T-fA-c!C>1K6iP69#RRBcg&qlNtj;LkY(!WAr)b{IR;6*rCrlV>vJldXgPQ`I`N% z(jV@JQn4C4JR?ZmY=a6~J!*+KGGckfdh5>}CcOoVcWsQ>o99>u`uhh#(+dfh z_e!_g=-JA?S+!s&-pbKXv}%zO#RLeXO)%q)>NO5ZRko{9W6v!EXxC+wY4;<*yg+)j zge-PR+(fz_;?#L~cQqZk4dN|@j(&t~vEi~A8H0YT(&i8c(rD`ZTpYzZVd9w3u~s3T z)=^b+ly7sg5Sp=q=WJU>3g7h!J6!XoX|qg|uH~?{JaASH3(q({=Rcz?=j@7AR-@g& z>k=Qf4i#=EqsG+R6#n)+sdw|$fk7}#xT1W1c6wvD_6yq)KoJ^)c6_c64#Nsy@#2f- zSUvGDmJ+j()enDAHvB@4Jb5-PLMr_X0Vg>EgV_yDZEg3p+|N0Q6BE2CY zO6Pa0BV}bJX!WYMCecRvX0?ZDBLe_QG%8zV+Fetb)Ql5C?Rpi(ZEHNZOduWERuPEim}*!I{f1X$IX{5%6Z#GqS0Mv zXddIv8vz2kBkML_N`|)BdR|PB69_7G%C^1t5*N2RJ^Q(DsN}Mf1lvCUN+3AYxp3|l zy?mN#EP;c|6ookj#eaJW9Qm$_wFvSVnoSg9)Z zmUmcDr|g7XT*vXj`M12`v~QF1O3NjMo|^hR*2x|AXNd+1tNSk-hK);9ihr#4c_%&9 z$Lw=@j(qZ=v$9|E${p~dZ8}oi1N`Xqk+?$<7@F>K-}`8S3ws)^A$r z43e=iht9!clf8wJq)V%|gXrPYqXdIj%RhdQeHuKG829&m&pCAoUNZNo$FepuN(iRd zQGPY~pgtb{(FZ#laAau~h*|FI{Z$#jb-D_+rcn~v1v zjG5i?0ekYJ`ph``e5gul(_iRxNO^bszH%p<%=SWu{Ps#l#+w0CFXa)9HRAu{lGtQ1 zL~K~d^0nxADP@PEeZ!))KY>-3WE|&SD5r@tkr@ze5tMmL0@Op5sHKD!o(goYB-&Kt zgj=jE?)aq67{$*fh&^mllwLAo!&@9-xKXlSH;Rp6)P8(h_T={EHddieL27{!6 zvefVjK-++vA-r$B&pyw;;E<5JK(Hu#%+-X1M>%_dZdsru82)M9UzKjQM0|ogigotytoslX|;MV zr-4PXzTHEFF&RW{O=DMq$=Fp>E&=C5&1J1UUY=@-?V~z{G@-VbQ@D4SXrqO3<{N)zw)@SQ-(WXjXT;IAB9ohNDClrp zpTLO5J*J#@zKwC02O>?swDcN^sH-&7h#C|v{>}#0Zhh>4*xSe^tj^UHW;tIGY**6P zJ6V0F@@%s&!!CWAsU(aG?H#t=@q^T=tixq17o0coW0jHGI=`^?#r?${t;;5pcoQ%a zv$}Z#)Er5bV0(q$!P@s!Y4zo6s4su9dmG2a#a(zgH$V%F+C%@J++d%D6n^xG0ZJ`= zXq2r*kl1?gJOJe|ZIYzRldQEDZiucA%UsfAU}s!bxN1MkMC|d5u|yaYL!d{R9Dqmm zvt_2NKueRf7G9+cJEOa0n5Zr|m^{SW>W*z$>!&KR2_1UQIj3d;ei$2bx7IEt%#0_% z_dUd^qD0kj*WlH}kC^oJlA(YJ0x-+*o(Epmcd^Iv)hTzZOV0(BmpVrMdowyYNo?B8 zPVXC^wOIq`Y-C*dIcpWrR?jz90ki@+#if(`hK8|t$}*~$K-lST1K~aQTT%Xg?t`PS zqV@Btj>E1Hu%mb08x^aMRuIIqG}e9^-tT*nGol!NV{Ok5;R2*36Dyaczpq#%d+4ze zqbY{poGO2voJ0h{J?Ot!ehsE1O$=Pasr3)XY0~W4W8VzkXtA=gy<=Q|jI`z2`3w;| zy0X>3TWKd|tLAaDBB6;p#D-t~7i@AaF(~iuHt>A!nNp?)z(N&HjU}-Sa??#E?)T=nU%SgZ#YwC!oqG z%%ZEDFgcjzmE`ta4_9iZsDbCuTf5cCK9$X7DWy`Bfy^g9&$+MIcef_Bjsb{#evhRT z+gg=HKA)v*e&6uOK5`#rCGR|+D&@xNCnREAXV>5ONt~IHtMiT%&x2uPA7x%)82yef zMyu`hQaW5#P#JHkR`&~RIVkV_k~vd<$8R_%vQ%YNPtNh46kN^HZQVcR%^?FJlD5w3 zj+w=)F`cQ^PWCP?>TKD?u<3on#W*tMJL;IEqwjB%j+z9IwCSY#tYuOiROiJ}7UyJg zk|oOzP`1%E$+{iCdASl)$vRv2u<_=y@-mLvPbT6Uq4bDVaF&|w4Mgd$p;%yMi)JD% zO}#u<5hfSONf5+OJ5~>!qg>SIYPa1xDxDR9Vt-OmI=&qK+5c{hRM(H4x?6>wGPOR! zHr%sD@SYr@;utf%&T=W%Bdpp@<%pqmf>Vf}Jit z9TnadK^<%nD@T|3wD@rQ^%~=WvNH+UgxxDD%3dDgn4>Iu-u}FFqvfsCW~XZ0rrrZndcI%S{w7u2U&9G z+g&!c-Il2XIekCdmyt~Guw641y_FY@`Sa7PI|UON9w^FWs2UmzwFnxw&S@&77id4!o2%W( z?>Iu^N(F%c&s468lH!-Fo;;$3i{JcxfMTPTPkyl9e3_1_oVheX{x)b!g<4U@W&Y{- zlx0OrkL-Ld^T+xHhn0RF!gn&vRiCL|W8h_bS0j`Lh{jQnZQXPZ~& zj4Ek1(Ut2^b6nTVLEik^^y2wxjpA=Eb^w7dC|@^u-6D8B_VcA^@ZHV)aV;qb&n3F6 z1Juiwq8Yt$-d%SW@YG@MW8xmYBYyrsxetWVc=i(8Gvzpo!|SC# ztMZa&4@6s%y%^8@j(4ZCaiZqIDb}WsM1WKLBa^250Tvu5jot@$v7E-IuR8Yr63Q?6hWK@GvlmZ)5%4Awb3Nmj(}c`}+mIBF1L>2)x~*umeBl z7{?vP*T9+ccsS^~jS#pw(?KGze4p_QeZAp$yaK_2zGP{&&f1}{#qL)i?q}W)ePWFI zo}K)#)!e?~ggbVBGg{tx+P=raAWg800(JQ1af|$N$2sH1Sdd6+HHrX9o3|ZmYyt!i zUhS;>@xN!Wd#cOg%a%CKAadPv>A<#5#2=lnKYq<7m3ZD+JhTvwI{Oe3l1*0qLxYYr z0-3@JicHe!$qCIMRQ1kz$C@;q^iEu`9v37+nw7!hftZcfKO>Y=sGbZiNX?Up z#`i+pv0HWlnqu}b>BaJ4?OfD3r@J}x6f*IWi*;S=eZ4$F#m{0esddZw zu;9WgABno}xkIrNLc6K0Rljt%(&W>E9O;_ER5T_i!Rk0Ss_v*hD$d}|mO~Zzdl-4~ zC$Vk+5an3sRbkP$l}&25FB^GbG9rYrW;J{LQ&ME-ATB9V(tw|jOef7+9y+tH?-s~A zcWrNc`9RhoMlq)hQvp{U-uQH$r`PRC;~$#23`z2UyNKMYu-j4^zH$9bRyZ1~6? z%>$aJm{~9~K-x`@&Q@R=u|cclm`&88z%2nWBTL+DmNRdTe|q4F#z$i6U$%f7W_hBn zuEem9dY%=b*1FzjU5^bCMN);>+r$P#aE4{;(R@q+p!mV@NRqu((v%uwd3%`Q7%MnF zAvaTshpfGt0{FQe45t8(-|1{$+yRkeTc}-W!uz-M(jRD2Y~DL~`z%F>@q3i++H<8; z;WAaLGiT+>rD9`449oQWe_c-S?<~`QPQI9l&a-$5{4bxk+;Q5j=qTSxc}!epVA#%h z7M{nP$I&Q~@r zkV{U2Dd=6id}R<2GA+Jl0^N*MXMl&_+v$@S$&gB3VRAk6GpXKej$8zsoF?fpmcBO{ z?ztRHmUgl)4`S|K0Ii`iun7YL&YGzHvi-SnU`LP1KojAHW#re>6fA&YZ zW$v0{L$QU*xQyYBqB$@&O*co;%lX2^23Due%1NUva9T`J-2Bq(KhbpmPeuKYMB)au zixKmgt);Bn`rwz7SB{Q_XKBda?caP)hStOjDTVf>Qbp--A( zi!pw$RbJ<(Nmld@`uqF!zQjWq3RW{<3NXg(EsJswW`Im zXk!Evm9g!TON=vRAOP`%TZ&xd&K1 z%QeM^4cIF7LUqZ3&kqHaqh3%!dP&z^c$|e=Y33M?6O(TLt3?vr<3 z5AAV4$R|s|A7c-LmEvc4I0GaL^dg*Ql;9`py4qTEt%6A4cc-x8ZoS&Uc%VDAlkmh8|71y>< zHr?oQp2v>1ISy`-C>b)Rhr9mFlnMgMs{C7(ZOkQYn-<)wV#;-*9jj2wu@v}a420&o z@7s)^<*IAXOCVz(utCuHm98hLV{_loN4gJr8$ZKavjZSaOaS@sul9Sj{Wj#sc!Z?r zGAd`RE^QFN1d#e&ak%4bWPkL=`k6qt!-Y!tr0xPaiKoW(8|mVui{bZ(<+`_c^ zu4S3?=i|m_!RK8|)}tCZx_M9r(nh5;6USxC@ztI^&mX`Ofqb-+6?md~RifeB?oiLR z&}LiUiGJl!QDCiop2Qhorj8x4WTQG7ny3&o2T4-803sA2|0Bm0TufmCCX;Ldm}iEe zY`c@As>e`X+3;~+&FV@~S83IHE=QD5b+8bHeWN-Uj;i|Ueq$<|j0?C~>xSRLY#f>k zN%!A)q%{RxjCPpg7GYJ=BURZ;3K9uhj*Q-46Onfwc`TKQA~QB5zs1g=%oskAUL>Rj zw^FAdFB$2wW3El(A08N%O2K(e0#zwJq$RsG)Z8KfjphIm~~!U=na-8G}x<; z0>%91Qwiw@ZV=Z(2qALQhDI}4ADb|D^Wh)O+ex6}AB&9Fpum_Okp33A$=1IhuTu0n zJvU%Bd-=Y#(v}e)lHrigfck zI(|dRPZ^JbX?mXeB)vT~ls*s?{)BI~Gq-=XU!|cxVbvAeS+TCZSMh$e%d5((WMA#L zs&h=guBc8w2M{v*kvt^4ecP$wg?8gLBNUR}Hj z4m%@ShQuEQWiM6ksrzP$+J1C@5$WqXB4D2<7SwAouL`5(5^60}+e>d5cM9`$IZ$27 z(Um@yNE>tzD^41DGQlx=zd#L}5N@phg2Z#Ru(_PZB=IZ!Gj=`^F>cvuX411FW#9Yw zdyP@L?O{FOwibHq_s_hck_mu@vmf-Kg%=xQNiV`a5NKlE{nxiq0!?iM2es-n=|%4H zjn96O!&?nbZ4g&4B80C*%16D`OwF2Y#KeJBG>7UmUigcO?G~xl+<4trvwL1NYt?rO zjiKN0h;tsMg>e$X`wNBc4?ioQ#_#W6{C1UOY4D+>Si|a1?XEa37V8B`hh>KIM^``k zHQe|Tm?!%BZpP#{FBt$!GXz(qXw%pwDis?LbVNbh=eq&ujht)a>*XDQ2sr#wH2nQo zX5BkfmojmM);X2e8U_`nJPdvs-TGk$x3DZMNqd?`r}st9!hVt6$o`kI4GUTl^ucZ6 zMJOz6wUDPng(cM2^;8}#C4Lt11C^S`weRito4I#Ikp%%coQ6i>(B_aey(K%ubC2NR z7n7wta%3i`H32HjI%dj{lQmyx;90~ae5#FGjRJSij9M?(d2D~aK=t+7vrGT;&5i17 zW7O9Izv4J2bE#K;`u|USgRdVhHmKlU;c(cY0W{)4_%OQW$&N;qC>~dCE^s|y zsEbeEOYU_Eg!jvKSb$HDp&X6#&ZYDZKC1Ro7j?O8tG-~p;?!v%D_micsn%hhWr%px z#QXXpsi27MQ2bmCXPmF=xs;5G%Mn%Wb`Ae)-I1F>1>x+8=Ygv z8rlJbyxm}|2v#;`ONQ}uf~MaCA6SDS4JptYRa+{#^qmdpWisGY_54}UEK0}3L^81` zaYsidU^rKe!!@ZC&SSTcaIFHnDO)ZZhM_%AeI-Clk-USQtQ)_TBHOm6Yr;e-_eEkc z@3W=<nLvro&adFypj=ZYiN(61*^o5+rjWU`Bzkv)Xk1W~Dhs$#9DYe^A zn@);G#BxqhW0@J7#HPE-*mIv=X`4pql&8A8UpfMNPy76NkT}ojj(K!kM8KV%M8IdE z^@?W@&-UY~W{bIqt&+~ArdV_YZ;(tnB`N|ivx<{@DiXr(f4ugrpc$)N6#4w(ZNJ}j z7wm!gC13_Ru47H;X>@=!CB12OJe8v%L0hTJAWX?UlA8H+`I)XnjSxz(sflA8UtQS0 zjQm|}D9{3ALTcPoaPo_MgUQW+D}n!?-T>P>*^;HC#`j4;uRb5z;)#ExRbC z3d_eG5zoKnA(`C3{IbYLD|fG`TIbjYpKeidivl<$A@04Gfet$hlP|FLD3f7`+YeHB zs}J{7_RZND$0|_*NUL{UsqpPhH!Y1$Sp*v)EF`o5( z>lUER$Fj(F*j@VgDl5v!mD6kL#kqyd`hp-@#+HA*5fa$@Oy~!$)y*=j*UjTKjj z{rwZRX@Ectv|S%(5H^n_BPS<+uK1PfImN;KNj?#9AD0Qo`|3$g9#%l&E~r*n{>f?s znn~zW5Ac)%cHch|E!OdCuuieU?gxXN7JH;JF+3WnsDsOtY>k>xmyHWILEf4DNN<*q zLN+$B#k%*_@h7Qorv0au=O=P63YsZj8XO-n9|_amjruIP>;(XgAjTr8YjDBZ+SU(=7La)Lc6erkm7hO$Tt^f)?3Ae|W>UOI)=MF>zUyWCnWbYXAw%TS7xpo} zTT5r&Pj&q7<4-DS4P?pb)>uZ8khAMkSjy064Rpx52nv-suEn!j9w|g2E>ECdJ71ZrLF=XD) za{%AbK;g1-=*Cy*XRkK^#7r=G#2S5~bwT0#{Y0CEZa0vaJ}vqd6|4drt5~%Brfm-h zl{0`RNjoaQ2kuMY)+JG-&pj0d+cZ&R<~{eJz9X4Pud7>kp{et4d^uX2^J2<2uG5Xr z-HZ%$+Sc@~n;HHJy;raG zV=O+|HT#CQ1Dxxl?=QjVndP!fB+hvJ5pUP#D2J6X8!bLDnGk76SZiUVXzSVp&Ay=i z%gl)3UD{Rd?YID8+w-Lb9+-=qaSe3}o{3TTsUd3my^OtSSd3TLTjR0N+atUEzNYa` z>*gMfw#4P{@5j2H9hUZueQv1cfqDd2zNl`_fRhzPd5)a*uT(2BzQBY>*Q>HckpSky z&=K=0gyp?U^WwXA@47)=l>r8@o2fU$t`fpWH7&BipAz1tMgbd3R&h-r7N9kS?Hc&1 zE$!HF^t83bp7qQW+;!W)IJ-$hwB!$8JVrV%qt4F{WF74Cj_><-i<3@=OA?JgWKC)4 z3X?jEVtZ%Opo^B04tFaweykJjaf9Bfw|$FWkPEW+W4qpyYcfLVpn$-uWd@cTX5}G(0@yHjRC!U=3A~M%_FqG+p>dK9~Az@*2Rx5zP z$XA?w@zGLWdcgmLs|dw8n%J`0g}Yf>2P(B3ca{BIM|tf<=;Ia>Y1d-f?|s~mxXYx9AbGgNJjTxq&e#@-@vM4-k#wjayhHS2*EVDOF~M%#9eG6D-o zvkf7nv0U2G(MGVBYKpxMz*s;{BwfLkO|SW`cqaW1chkKu4k2B9_UfNF7_}91X;!esLn%gY^c-PoOQ4O%&hs>cp^alNlEL&ER$j9^;B3 zTMcT`&fHBuT2N4s1<+5N%BpQ$y>%}E;6|gl&kl6L9V9$~SMk+{g9}?c()Eb7ybFr$_! z*LpnbS}y5(L>2s|{`|#bM_b9Y6@o(cz5Y*M*fSckU(kVA*X99D>kxjE#^9%5vl8M#pFNF?{*S~QA&{NnxeO}P)FK@ME48q>bf|7fA&ruuFQtbEk)9I#uuxGkqHf4}c zP#ahKkb9?_|FvTytKi85hC%4ZmFFMg0WTJ~fi+%$Ydq~PpuPU0rH%O5pikZ37{#Dc zD%llByv9mt_w!;U7^QGpvU*4ssraoCV%7*z4ZkMj<;4fKcB}Qt)m>>_Z86EonfkkK z`IrOI(t>o{TOqi53s8v=*V~)}s62JU_Je9oU4DVu^Rr{w9Azj$8_SoXNf06?Msw&t z3Oy+qGYM>jnm$q!eVc4O@4 zA~A8{+x&cq*k@XooB`R{)GYr{Li@^CmBPmlz#oNf@Oq>H9H7*EXKW^1;i}I!eh0ub zZqVXS{zA3-YS4#lJJ%($N|5y7hlagLqzkAMo4MAZ%Q{=Pf1B84*NsS`vi8a2;w@u+ z+LZP$C#M{|(cH6=DFcM-(lunsQ>~3T^wSr_@toT%5Ev|j-`?~}M z8j(KGy`o5U9S-Dk2j`qQdqjb6gu~WkIgcEkF6s?276{53Zqwa{gIS+jd=d2*o401$ zYbyCVIy#68eD~KUm~L@w=n3U0moy?lK+te-i+~#S(n4Dsmk{nm0FO}+-mn5EhdYh) zAr-zUh##qU?toisBs{~p?m2-AB4B|uoAOM1;TypRpoYO7XBO@HH3xk+1!AY@1TJt~ z1cX5YY}i~~S4l!WSoTvbWDB?T%lLlYqBPyCNnLqzReC7sav|xs$Ri|H;^5-Ztca`LD-nasD%7ez*jf8`Esm49`9qHA2dTYEHJ=Gt0{dWxo@1 zJ;yQ?hpyAqdPDDWJb5BiTcUM|n%3PT^qUu+OemdPBV}!2{S{Kp==gXEQc_ZB6)qRd zoz-8}SslI`YzzI+Wi~TaXx70UCz22rPHJovz>AX2#ld4r;Aut&%V~?fed}Wv==MxE z^pM9&KRoXvTRJjW_ysd_liB^pkDJil+#}`(7s&hi`kdSB;f+xD+9No@ z^3~U@^;H*g&5CI8Z=N6!6E|49zD}UEjPreIPTh?DBR6U7ryY{=)q7~dS+y7M!~ z25!rtLS5}u7MBq)Ek(=C9oHN{X=c2o)a-oA3_^wV_0@a~KZ&aOMWB8U=c)+Cr+*IJ zzWzXaq|9u+ivL26eV%yQ)av5eQskiu(&UQ^NU zgLR8YT`~;r&_ZV;6qD1`kOobS5PE~mC+(mvG1Qm^n{+b2D8ame<54$2AdV+M!dZ18 zk*q73lS^R>hP;>yZbnaeWi{6g?(cobIZ1Tq4o(2G-QET1j!*^l#Vh9^_gDn?Ry^Tk z{yI+71)C(eM`7`kfs=uuBS+#H9q>hDO~~u-rS19Rh}!N}1tv-V;Ca#>yL&D4Cj6=4QnAbQ_6CwhK^EQJ zT@unDEh;KVBcYVENC`-n2nYgaF82Qazx`c&=h^4tT=_ivV(Xf}Io}v#yyG>u{=`sL zXDYG}pH#>f6xX`x?v4ts-1)~eqC0+7N*PJIjGj&&iLgy~Ni82(9$J08??Q*&lkk!D z#l!qXP+L!c%8cHXX$qH8Ao|W-Lm9A%s|+Njn6o~S z!4Jq2MdP$8c&kt7{b*pIr2)+Z$eIeUs-nlElQNxvgPJ!UnZ2=k9MGmZ^cc$~s&tfE zk}+~Xhb6n^u`x=&+;@i*0|O)6a7-D-xJ;)F+H~xban>bV0tJLPb&SUZL~uwH9#~E$_R48eRia1ctP!k`idlaA^4R8f{2_4ozby%- zC^c+tZM&W(Pzm25;BvoQ)?f@kP1y$KV5Bn^U>UIhAkqR}esZ3^U8e$f!^OqzQ4R{^O!T1LT=af{@R`ZS2Jhj9P2l5uV7GiMp~XiRHA;Nwwo+rSP}PIo_*`mL75V?K2oab z^on#|0TxJx(amc9)7{&e?i7RUWizO7HT;GK$mK*slG zgf%=ATB4CB^y^H)(E2eQ0wipW{q6?tE3mPv5{#^_;H|w`@KvL>YWPT*1kJ0KpzX=E zu9sF(X!l<$hR@GtX$_i{m~O>Kba5}1{W&G;-#YtgqV3^rpKUz5#0ucT%Uh)g{*#X| zyCpYq__7p%^8E0oM|Ld1NMg39_t=blQPjH zckq_ykYz3HVvaN)#NQebx_>E$55h&BsC^%lL$cKXCu9QEBW@NQ&UG+TTy`AwL;F}o zu(^0~1ndov_Q+&54e0?e1O-M~G9P-(vcN z7IocM8qp>v3E?5L@Nd)#$Hb>*^ZZEn5tff&@Ne<*&|PvRh4M;)jYaU!p{uB&35qO- z@---qXm0P8l04n8gIdC1r9NG<4t1|xzxooB{qo(HtL!l0Lr|H1>D959C@K(kD^Riu zW%UW5@uZYMG$8zxEb7-}<59*ir}2m$Why=+ETOdi1NOri3%`Lw)!a8Q)sL~6qJa^Y z@k4wn${=r8Xs`r25&k+hg`^R-^>RY7-RImG;ap%*sDWgn&u}hZ;alNF3nTp*ueNO$ zzP@@CX^2JvO_Mn~NI!sKPUEaRrTT&8bKLz^m;1w6@igz~eu@%pNUlvNi>lX2)^z&_`@T#eBQHL0u7ezD9^!s&xQ86Vodr~i4I z;PFFjKzNLGvJ_mbmU!#(4g%88MWuD>rPRLNsLSUMQzWMDCkVHWmQ}$q*@+) zl?rkC*h9J4zt+YV5_@fM$Uw@ZHx*|t&N5IPCV!{bRY>^$);MEeD8Scv z;+<1r8P;|AGyA-FK7TXaqC1hA$nMsevTTiWrqaHCh6M<9KcltHk?`?ue(i2;@*Dj5mn!9~KGI zj)w{q`DBd^A%+;IjsS7Xp1ZtR1|sl^hKq(bH7q2!h?)AVqa-%deH0zedG;t-@<2!! zLgP+$JBb{Ik0j*fsKZf{? zU6h3+sqPujgvuZ^c-CgO&vVoDd$g(BV{OwRwr@VCsm)Tm4GP{b4qt z+YlRunFsIpM)AkV-Gu?#^Dxq|WI1UYb(I48d}$fGT`*xO()k)pHb3!UR1XRWmWT{A z?s7B#X&ABC1CkZS<^$)C6^bA3(@wLvB>nDYIe1tSa7uxn%FsGz`1|sqx+W42UO}RS z8!kM4CoYfo!*2&#VEYAwjFpD;xwWt|{|YJ`QpzuQAHxjOu7{~P=8t3IG#v@rWO!NM z6CeDNTkBOw|CMcBSWe?4LdoCPuBnB+ep*AGM8~nE^~wSi_@HIcvSyhA4U9n!=%a19 z;d4iw5v;m97RYxZ^vQxZ52yW!RF?YQMAk~4*YUT__Nk^4EOx*1HfO4iZITP~ouF4XQJ0Q~*RLNea1skwRA`2}TdqGagbT?8g>Rs!C>F|a zY>qw;LtKzY{XV-7_j^I^;l3yhtcb@ju;vb+R)6*Y`IiW_!?6yzi^6cbAJcfeb>Gx$ zQZGD=DxeGHv?pd*L%Q{u1J+V5wVVo42T}~NLgV2{J7z+Uzde3SPG*z1wKy0Zg#j}) z#SqO+7}f|2k++T?!R^Aq&lccAgK*?dQ;rQB(m@CgN<+}yIQk9*E3pP{Stcis)D z^$d&!86F`bcM4ID(*Ez#)ac8`_q+1;fS}?{zw>p7Zpjk1o6nZ>#ya2ft?iu`o1ZX)96Jvr1hG;wr1D>)CIhFOsVFYgL z&eEH3n;rtM4zzX(razgl8r+Jzzz;El9e2FI8;^laE%|}XV%mp(yzjL8yb>OGDl5jf z5jr=l2P=n#Lv=eow_tWg@K2F3J;Y5C=|d-Cf|x18cr49UdO(0bkpoD=^Ot0MLoFkn zPoA}-uauDPhnydR20)3i;m>3Gsn%;g(m*5G%*f0!3Ra!-{@>*!!#Seqs;B#zP3!@E zTJuI^gH1WO(z*gjEKnWHW(e5{0=UYkbRzwG*lodGD+NxemWI&G02_cLeASQGUMT(p zH_wub;d)fsgWqO5Yfx@;TAzNgLe5U4Q-M;3SA`H_=e)ETOr$OX-L7iYhSMvyfR>*W zuR*&A>Lrbj=7slu6phID_G$H%#{P&o_vIQn#mB?G2n4YEhu2~2PYta(!g%U=Gi2cj zA6ljLy76mz4`sm*WpURB*W(o-C02p6dvcwRr>rD|P~@xwg%3X}0Uf&^wmE2$zy=;1y(qr;j#&<_hg1P&>8nW0^4VC(6fsAUL7 zDJyX>dc))Iip#TdKeI#kW&N$W9K*3TZte5fedlHrAZ-nVbrKUu93fPFjRV zLz1K0M+a=pptT>-+<|<2g3XBq#xROF4=G${#k%#f61eb!L+rKkS zsPEZHGR+W*F`zs_C zQS(?JCMam(vf_k2xjLGn3gIrH(+t+*#J_^^c*av%t?+h=2wyd@EWKHGG)=x(O2EPH zVFF8H5)wv`tkeVN{_a2|%h>b`5bhm{$O7B54|SFl-2k!rGpnJntAL>3RxJz5l%c{W zvnvGDIrhV4HqKO=hOJRl1S)pc>$1|iu=$L`2iC_=(>%Y?q>uLf{4j9eTdS9Vw2j_6 z$(Lnnrd#tvGr-@uJty42CH>{P3cnAy{HSEfugFXKs8`xw)5lF8ff8k7n#RetHca?I z{Bqj(&v(p0lk?{E1o0nyN^I=*2<_P=MxCd}bWzh(6+axUH<-kC;BjwiN?L7)5A^yr z^?kYs&A%=4e$`p=ivp2a4@7Fe*`dl>nkn*0ZTS+Mjv6wt)QkZ_})N=O<*S5aH|T{{LxY) z27Lswh1vod#wjzz(yiZ^%W(q5@h-r4B4fs|OMJ?0;5h2Cp{+e`8>Oo&)-2 zKbu+g^1DBww(uuXafx->`Qu8T(#&=nq`A)58O2@?R=w1{YFF>GPK4IR4D>l^t@}kO z8M^2V0Q@XxNW%!=oR3sZDwhQNPz_^akFIpJx*Lq?Zvx^&y?nzG4SHeWEF|=P4yXh9 zeikS}vnSFdM#6DJ(lDEuY9of(H;D_04B!f)jiV`(eIQa6fkcHF791QGGfRfp9V~T@ zmtd`jj-o-b(2hO>x}6)}3mC7cs%70H{r1ZS2O1^=<;z<_Vdm%82~Qf6Blp>QQP|FH zh1YiuI;RLMs!zlxw>BYEbX>>y5CxHE_&7yc)3pxU? z{*FSB1!zRl3+^5JZrk{BWsYkL%%7dD63kM^C*iSeTL5O~_M>?9s+-b!VpcV0njBz_>_a`2 zg1cSD5tuiO@Z>$<RN(t#4ouwDZ> zaug_hi5`czkI7T{CJ|9`CRlHpv zu|L)?4?C9l`H8>l1s?v;F@0~dGnSa+ci(Zio3msLZqq`vAWTG-mFc^ix-R8am=X+X zkqoWFv3zliJuR``6mao0;FZb3MkD5Op8%YPQ@^GNAYJc%jdAQ~f*otY+j43UEU(L7 z)6NP>g|Nb9Z+!7B;%HG3!w5V=hBy@=Ur&({$johUs^P=tGpxa%a7Jvdw1JUYPaDPd zi^-lI$`{dZhhAvbJf82^Y<@#qQ8`Xj^=E-9N?XMv^6tgZ!SgrL;S-O5@M`Xaw}gh+ z`4ed1annvGUL_cPmRUd+e9b$IAiqh!_Sm84|0vcKm6@U186V>YcqQ+ovD}9X#Z|h0 zw%-`1Xul&UYx!B9YRjL~^h|v)u$ZAoWVPf=wf4EgkDtuq)V^f?=a{55X6s(>9J|en zvbVe>UjKal=Kqdpe2t3#h-j}E8x`~Jw3-EHqb89lLqdvuo>tzSy%iow$jGrW8`^-T z9a8TJSkRa^(bm@1*b5@ZDvYW{c;;$QRV-TDrC$ngcL&f1Bf1IcF%M3f70YG%Im6lb zoH^tDfC?2t3Z$vZ5ZP6(&4D|LZg&r;QMuV6pTi~qN=cZAP4BowibflHur2`6R1P6> zmSMOx1v7(9)|ur2TRBdbm{B1}PPk|St)y|Y3H=nzGi9B^W~C=aif$CaIMqQ#)EWt+ zuer0c%+)yr0XIUd1GAxCs`DqbuLVe3y@g%Dz5%s_z@vGQl+zPB3GOyvQC)0AH;6+Di|WKwhUlTM z5JLDmbo{7DrlB9cDA#nlL8m!Vue|SZ;9k$>8-{Jd?vh6vB20vsnH8YeuU>uTG0S40 zl?z3LJFfwqn73oMNiW3F-i|se(G)3BVfYdI?R3ab4+|GsRMD;@vbV>UK^wkl4f zAQGvV0YK*+cE5qctBLkDXe@Stb91`Tu}`dMS)KmzdCt$SZryj&zyGkly0(d}o$h(j zS`m5F>N1$SgX>3^kt<7KZJtpKIKeyqEx|ZKt(;y$cH@x}D!g990{U@$i{cVpRxeaR zO*m-!Xb|uOk9FqcFzR{lbZG0QpH{?DMo!{jBO6_m7e-By70=zF&*29<+Ox+%-x}L( z6aRgPGvY)NCJ}rVj2-~JgHzRH6NVJQz4P9`*GDPl$=10YUysfJiIx&{o+6>OKW>7R zgSFx+x}WpK4G>gs!AbQj{|0Xv5>CaGAQ23`cR*B5)mi=D}Q;l4HxpxP8ZA z@=+n+v~mq-5K6Mq=B?YO%PjQA#5|W^RjT9F zpWiEV>p}p@VD|-c8BjSa$+M-f6|QsO7^aP2I0}CZiZlNz@PAZ>!>X5(Z`tsewtpb% zhr`h5H?eu&qH&_|=D5;*LPz>8m1QUK*YTCzqS_^*rZZ-4e_573#EJyhL) zOfr2SXLOP|5ZEf5v1T{8eyp^v?Kw%=9aFhR9*aHtKKFrtSoae|rR>=qiDwEB7}UFl2|<@pMAtje{<|uC9*)?m)C7a3^0qh^jr^sS$w&t-)v=- zK-xT!wg%ck7b`tRfLX+5O9Ex)_eIT*nV(CGxwcCmdSs!H1Q|sLI~zHE5|LAifodma zt;lK*GdCQoo6R$OH|F(6B+*ZR~iZxbM{dxSyJ3~?+_RyM~I{j-Qq~C2>rY9v_&^$*K2?|%b`2&Km&1~4%-Iy`AIU#8Fxv*{Tf`Wh(BX-mk zs6-hoWCNosi#Nm_(}YF>F`CeWCP&m|y)5hj7Df^Eix#BbDQA?Hwg_Jx?^5zf6`cCr zb*rfZT`aT`!w3*5_}ceAoLRjM-29png~fnikVCf3xLT2X>m?~WOSL$(70Em-U0sgL zGr{Z)ldRCL*KUjVrmDObe3VS#VDHc`bbjbb!-=~U$di8bNLpp?wlhzE&q9}d=v^ET zNf(XJ19tX)EHc&H-c+F0vc$gk6~f=x*ItDq6%d9qr7z(Z=4qApC>@cXwzJ|G z{x>Y##4c8gQ|b) z&Pa1pDt=|yjeY1GcTxPch=(_+;d9~FPgdl|M1t*!VwI;^8zaxf?QJ# z;X=DS#+HX9tH{X7j1#*DOZSU7pey~WK;nRb)@x%5|DN}Mejpc`* zvSkcIgekc;V+1R|ba~S6f3}WY=vz?c*&xskE65HRRc;whP+_EYobCc>Lzysjn+s2! zmqjTN^?FwrPM{ksG6Vqxh2;jaP)!+*^S$@`@|Vv7m} zgaMMQh-du%Y3-Dm|5n=@#L-H4gB}nzH!#`%W>{*${bSKdh6xoV?4k7MRyI69N7p8B z4+1LLN}z;x078(n2T`loWE0NvT&tkz(>NgN{hz$*%)R5+P|RG-6f11JT&3$GLYe20>=E z*r;3t_wWWuW0^X;ue|&8hQ+gM2KX93+bkTYpRZi?k1s1ccXo}mc@|_*Rh}lx`j@4C zYuDb3vo2Lu&&Ny`T4gs9%0oe6b~g29$JbWvlgHxy(~cx57KXl8ot@GNW*`OjsLN(* z17=uSf68266%T zlze6-w?Xrl#y9wI`8xD)0Sqq5A?Tn1&?`qEz!d;K%Y@~SCRjqS#0vZJgI3^Gz2Z*2 z3t_%>6vDfl;Lv73M!gF2*M+ITS0-ChqzqixNvWd=OA*ZHWjMy>!M zt!}Qc+dceq*>EGEihD+|D!CaOH3a#LHr|a?(I~T%-wrPX3h`p9&2Fub2Pl5soJyLS ziLm)rQ1{E()9;)S(CRFS_PU_TVowt&KTtl1MdjwPFYqZpU_j-=>Sq{LQ3LdxXXn?A z1EFx|8nElGD{Tab16Ux^2VJPzX!z^ypF^p^dQ5xA?zjG-{-_>dnKB-uS+CAPT{^s0 zvkU6T?HkjxCgo-hVl?Lrlq(hDOsiD`@y^r zV^}~Zng)W0Q86?mehPZ9$Z^Rc{!8Uw;NgZGh%dsR+T6|{VQE@%TMF} zrg}hG97iOcyMpw;sCXp+&|qYP`y=f3t6ghFaj6!bu+&Q%K-w+Z!6JgGi_2!dQ0i=T zup5tkw{K)T)+%5`Pt9Bze)~+@H*q3%U0N-c{ioIs;Da_JGMK%3Two0@EfAaPHrYo# zd$L1euLGplHlkJ5ZmJG}C3m;^{r5*pEJq~vXp|9eMW~7Q;k&MS=GHr|(hegRIu~D) z?H|(KKp?7&#o16VVp#-PK?pf^+j-nS(0UBs0W6J?`Xg>fP!g}&93@cMuq~U9eq5c z0=-@ocG1C40JyU!X9t$=z_MCw;UG9R)&)R3sL6<;iD9PXUm7gCHvk6xxQdqNVK9Q9 zIs;}BTxrFmy&)bv5z3?4K||{*v(=ai>N*zu-sUhBYYXavkd{|Sk|I2>><*4yRfi9v_DcQIy%f&-yXS);dpueD^;XCYjSAw z@#GM!o|T8fbvMZnE&tFj+<+~)G+Cy)^E^a|%W@&ReN3EaPB!vLAsY4f?URV8J4}dM zzHsgLxSYJciZHj)$0!8aec(+ z#XSlFf>p17t7&qBwwdF6jnhce$xocwbeD->08=UQw8B?lC+Ejy6;kJpludpSOop3V zqsnU3N`Y%WL|dcbaHG!m*JGZN@uGGL#=XT&0>}l{WZI=RG|TWMmA>pyS)tKB^psn^ zcP&1cCUXc_q0A9hW0xia*^1+exinY;%pvhQIJx1`H?bu8l2IdZ8KssikT3jbM%2-w zaX^G5UVgmfih(`TxsBg13G)V|e`<g@sy_Gp7Hoowpqx8_nwHIKO>f&v<0%Q}tytTj-^DEGIQvH-YA{(i@}-Zs`vxXR8}A|@h{4(~9J2AtJll~t2brxi}KVXv^cZqxwvgMbb>7m5DZ$qOL|T_(|WJgvg<(euIY zmhAY)vZc2UF`m2Zqynt1z#Uqwy#MWE{1vRYL+7>x1j(y-1SO|HV;zB#ME9kgMo2#E zMdt;!kPfU?X5_)r?Rps?mmw>qgvv>5T4EBr#L9RC8<0YSC43xt`4eeugO`DQt%9<^ zAN-}mkAudcXjaM6?pz%@4`A5s3J8WS@{YnZVw69_jhZ3u=oj+iuQugFp2W)_kmdM6 zlRcX~0Gyu7PtVUNEY?1^=Co5=K8ve=zWV6C&3{i{2+UEsF$PioJ>dmrN^bB_q;P2< zkPl46*uUIi_0=)b>{UzINVz&Njg_j3oF4W8Zk31ic{YBkr_V{euk^rlN}A|kMzRi= zRA_n+h;5@ZKP?~gh*NL20zg^blRpjTtg9#;J{=U@{7e~&E6bAYcCqdR0$C=9r!k9TJz z|1(0}-6!}iTYK**K^6ywC(!>^qIE|a0W@sX0W$-r+i&&Q0cfNlr&FR5$be~Ou){bO z3Um3b{!VQqnr-$By;IT#nF%@1$Rc$%7xJ6L{;SnhRSXaHKztt4&iJ^fh>Or_?!FRm zo_EzXH!ZD#4+s;KTf2>thKE+24>VVFr;OUp<*WHvQo0R>m^}HcP z<@$7hCAB4(Vrjl%Nk9C*4`gDIu`WCmfp&IAXIgCu6gAVs9Tf@-!CVTf#Go*Mi+zZH z>m|ns2GVOA2NT>&7fNDZ1Fwff{WUR|BN0ubF*zb3z5G)~qWN}BD)#+`laY{J6K#9m z;FFg&UgwPX1j2f4el01Xw0t>93#z)>cTMEUtFsNYUUy$jY&>QKlVF1;0(!%&VE6~y ztAGeVjL)s=#lWEsnJc=2*F*00T6&M3kmOt0lP6j_!Ada~V{8S>-*M#ffr|OXUHz(tKFl3KNtJ~GY zo}OnJxuvZmjBoiox?>g`N2lrKdpFWMZtUWv|=sdce>yezUX9S zkB#cyA!Myh!hEV%NF+)7LL=EWAab7unTr5lfXqd;=ipOPY#=`+LxrX^_>|VSr=SYHBF#2g4iMWE!wkcg<{D%AST8u#pS#8qYD zL(8^bGl7l zBK-8#D8Ap(($%g8fI6U!>qlpbk>W_6G&nOjM`GGFDlnql8uN#vAIW;NGvBiM45l|V zHqnn%z^J9hz?=#Mt6co)4wR$fubE<+hI5sRr=#o^cT+BJJ57`X%^j!O2CytIJ*xga z`=Y73!O2x@w66WG$>(S1CnT{`gw7|PR0_6@q&>O!qwuw!J{8idH_zu$n$$MkBdor> zQm@iinH%tI=&jYgD4A`l6KNCMb(S{F{6zt#i+kpIzzjDKyEl99o2x1-7XjWh^6PPr zKawt%q7ra`X8!D6jE;zi(5=vj`myjJ|*5dQh)H zCZQC(7qL@;sWsbvQO5T-fn_sm0{UMtkf4MdNU&tJFb4U9Ol<27c%T7PzrTQ31n7?! zG(EY}S7+Vz+OWchH)HedO;3qM5C<^V*(QZbGceHgZZ%a6wLtDT557QR@n~? zn|ith$u;{{FC?M`!60Y_%;s6(`R42KZ#7NE@==NjlsUUroT4`)zg;PYCFhhnvvIvG zeY#I2CeM`yDUni-Don}kZb&$KNHlDUJmsZodLxJ1AF1>(-pGo_ZxjE=Xajk~Jz-Ky z55VM7WEkN=3^--~t|#-9Pktm8r%JyW*jL+?&avA5?X5A~Ly4Wk#SaRUvRTt~0IA-l ze$5H^>ZdGEbh2XvA0>w?VC=&`=QOsqL^t()TPI0W6d7AX9PHIxa2uTc#u>bxp%CJD zZiJ<}H8G+w?4t13h&NX+f8V{*%5ovq*;BO@8bjsy~Fyn?%G${;xll)3V^O24Yz3}5LJ>|1#Gcv?z(DjO&y z76Xn`y^^AI^!#F@s5^LKx~_sU+8A56RymA(eH0~4>| zMRR~clkiZ~RywcDVi&t)dV_Mbn6DIJ5yZ5e_NXO}sBVj4NS}Weeq0Wi*;%fDPpGk) zwFjCd=U2^$k9LD5JStnX(+3Q)H3R;~`bJnD+8M0+B17+<4YhR>Inf1VI;)y zyy}&M4F9WcVM5j|$}h159Hm+ykk>`tgBHISNCSU1QVaj?3nU4$#RB9EU=NXRE1*M6WjK1F-RAiDVR?Y*?dagUiAHteQDlCk3?g| zp#4gnTAYyiQaF16ZAr_^d!q)k9H4;*x|@N6!vpD z5m~GmijbFPKa97O)4OB&A8Q$K<6#U)9kG0_WRc3xP6e3;j5eL+IbrteFJ&I{JulvC zS$(B#+TkiW?~D*``#kmYzQRl8D$b9z&VY!g_N7#+l>1QPaR{dj#MU2w*+Pz8KuHug} zrMbP5+Bck3Kdec~CXYpV%CWJ9hx4&qUNt}UxN9pdI;y|~q;VwEt2kL=;-m~~3n+Qv z+m~T6f=|zT-)>i1SOfSZF^R?ra2QyejftH|u_;S3E+#t*xpnq~426Hj&}e1Q01>Cy z5jc$dyAwiPWcPKCcb&vud(=@2FOR=YpYTe)>o-&x=)SDs_VO2~Z|-Vu4TA7#_BDGf zewrLf7ZbBbl`Q-6RPSYw`hG?PInjRqb81oC#VwsolUld0f?Z2pV1UcMefYjgLRf98 zT>6MX!9Y?Wv(~PuVo}XjiD2;2`JpcO?E#ncG=AP6CM2I5`KfMs5&#>IsKF-uq4Q(H zpKR|`y^N;#&MzG`IEvs!%;A?ZwY7zM7jI!_kiA($DN!1zn?)@!oKzu)@_@p|8x{h{plIn>oX?LEE9{1mBU426CY>1d=Q$I z*~ExtlLSUI9)Q^&1*2bnN;kN~cLnX!o+&f-Ml<-J&$<+c`FQsQIE&?%NVr)`|8haA!48|$|F!8BvOP;5UB46cBS)!n zb^m(cd_wjM8<;3>s9qN_k$Y)`V z%7X!XAWDj~49k*{MFa8ZzYTz0RPBhL25Ro&1gAf*Qn#?ku8vQqhrxsxeQBUL2=i}J zxbXn$goHM4;s933mWAc>FUhAB>y2FBF2vVbmv9x(fh@SW7|tBzQa10S*`%SK&(H4K zpXN(AnXs0!R1*>yKyijRd{I`|@wO6G~F^rH9TZ0*TpPcl!MG zvvsbnauRDjzP)5M|CT`X^<=ci^^}7B6rY^Mbe|4nXy3YM3YV6L6a!9@pDp04V> zv6v`Gg!2~fzm0=`8!tVX&nAzGyL$7?mRod^;9hH`>$_Z_u8dSQlD$z^>ck#WS|jN( zhBbEqQv$_|wuZzoE6|z~c=(tL*=p=Di_7=+>h$#p+fwAL(0wRs6)sHidaO}blrFB+ z?*YqsW=~fH=H61xep#mJw(G`DF8s#}z7i4##=7Aq6*7g%#$xx8v*_9q{!fYd5S@VtoYp^z0cA1U#9c*QV~&r*pZ}z40;t#Xk z+k%kAnY1`r#4l%1o0t|gY9RX!UQ$j<^qa8I^Y6k!s^;t$tOv7WL8~*H-S#@3uZ){^ z*pbPRHNSQOG=%?^6k6$X-pu}%o0d{1Ms9aj<{C^Mj>VoxwLDEX;GunCo<4fSVua6>_Vdqu-FI$TJJr{kllwG2w+o9_To+Zo>G&p; zxAbLcUk$$1J==9MD)j$yE4IBg|1L5z9+%eZKN!pOGEO^-mwqG4yvRHM|HnO88>W>7 z-WL6hI{N{kRwE)NnUc1;(k%z2FEhjdQ;fkSXbZZxaN9upZLF!`2yxm5a3?g8)GntsO*y4~>f6ooFM7ynRUm=J5>}h{}cFlGX zZ?l`vK7ZJFADAZWJY=5YHN3%fQafS2=+?Nz)lHCpdc_eUOMiy(9TbgRf_tc;Tuk|Y zmu>@%X?_xNhZ^fYdpXy)*{F06EhBcQBZ2txinR2Nikb z%e;yV%>ecfMoOa5WAEcBj$ZHVFn?Es&=YNvjs&1uOr{PHOIo0+B1#);noy+GYBEE95)(1{@*$T01T{jW2!HP$jx0Ad( zy!vj}|M=y_%g6&dJvUmwPbO_WEH3!rZIy~Y-s=yR%i*gt1Nj#^e@KP<|3wI>9z