From 2baa7e3631c2deeefbb09c10c463a9533274ef37 Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Mon, 30 Jan 2023 16:22:59 +0900 Subject: [PATCH] from local --- egs/librispeech/ASR/local/decode.py | 879 ++++++++++++++++++ .../.conformer.py.swp | Bin 94208 -> 0 bytes 2 files changed, 879 insertions(+) create mode 100755 egs/librispeech/ASR/local/decode.py delete mode 100644 egs/librispeech/ASR/pruned_transducer_stateless5/.conformer.py.swp diff --git a/egs/librispeech/ASR/local/decode.py b/egs/librispeech/ASR/local/decode.py new file mode 100755 index 000000000..691b49e0a --- /dev/null +++ b/egs/librispeech/ASR/local/decode.py @@ -0,0 +1,879 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(0) for d2v-T decoding +for method in greedy_search modified_beam_search fast_beam_search; do + ./pruned_transducer_stateless_d2v_v2/decode.py \ + --input-strategy AudioSamples \ + --enable-spec-aug False \ + --additional-block True \ + --model-name epoc.pt \ + --exp-dir ./pruned_transducer_stateless_d2v_v2/960h_sweep_v3_388 \ + --max-duration 400 \ + --decoding-method $method \ + --max-sym-per-frame 1 \ + --encoder-type d2v \ + --encoder-dim 768 \ + --decoder-dim 768 \ + --joiner-dim 768 +done +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, add_rep_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + "--model-name", + type=str, + default="", + help="""It specifies the model file name to use for decoding.""", + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=9, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless7_ctc/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_bpe_500", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to specify + `--lang-dir`, which should contain `LG.pt`. + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle + """, + ) + + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.01, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; 2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--simulate-streaming", + type=str2bool, + default=False, + help="""Whether to simulate streaming in decoding, this is a good way to + test a streaming model. + """, + ) + + parser.add_argument( + "--decode-chunk-size", + type=int, + default=16, + help="The chunk size for decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--left-context", + type=int, + default=64, + help="left context can be seen during decoding (in frames after subsampling)", + ) + + add_model_arguments(parser) + add_rep_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 2 or feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + #feature_lens = supervisions["num_frames"].to(device) + if feature.ndim == 2: + feature_lens = [] + for supervision in supervisions['cut']: + try: feature_lens.append(supervision.tracks[0].cut.recording.num_samples) + except: feature_lens.append(supervision.recording.num_samples) + feature_lens = torch.tensor(feature_lens) + + elif feature.ndim == 3: + feature_lens = supervisions["num_frames"].to(device) + + if params.simulate_streaming: + feature_lens += params.left_context + feature = torch.nn.functional.pad( + feature, + pad=(0, 0, 0, params.left_context), + value=LOG_EPS, + ) + encoder_out, encoder_out_lens, _ = model.encoder.streaming_forward( + x=feature, + x_lens=feature_lens, + chunk_size=params.decode_chunk_size, + left_context=params.left_context, + simulate_streaming=True, + ) + else: + encoder_out, encoder_out_lens = model.encoder(x=feature, x_lens=feature_lens) + + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in hyp_tokens: + hyps.append([word_table[i] for i in hyp]) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=sp.encode(supervisions["text"]), + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: hyps} + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + word_table=word_table, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_words = ref_text.split() + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if params.simulate_streaming: + params.suffix += f"-streaming-chunk-size-{params.decode_chunk_size}" + params.suffix += f"-left-context-{params.left_context}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + if "nbest" in params.decoding_method: + params.suffix += f"-nbest-scale-{params.nbest_scale}" + params.suffix += f"-num-paths-{params.num_paths}" + if "LG" in params.decoding_method: + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + elif "beam_search" in params.decoding_method: + params.suffix += f"-{params.decoding_method}-beam-size-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + if params.simulate_streaming: + assert ( + params.causal_convolution + ), "Decoding in streaming requires causal convolution" + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.model_name: + load_checkpoint(f"{params.exp_dir}/{params.model_name}", model) + else: + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[ + : params.avg + 1 + ] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lexicon = Lexicon(params.lang_dir) + word_table = lexicon.word_table + lg_filename = params.lang_dir / "LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + word_table = None + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + word_table = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + librispeech = LibriSpeechAsrDataModule(args) + + #test_clean_cuts = librispeech.test_clean_cuts(option='male') + #test_other_cuts = librispeech.test_other_cuts(option='male') + + #option = 'big' + option = '4446' + test_clean_cuts = librispeech.vox_cuts(option=option) + #test_clean_cuts = librispeech.test_clean_user(option=option) + #test_other_cuts = librispeech.test_other_user(option=option) + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + #test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = [f"test-clean_sampling"] + #test_sets = [f"test-clean_sampling", f"test-other_sampling"] + test_dl = [test_clean_dl] + #test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + results = results_dict['greedy_search'] + #jsons = open(f"{params.manifest_dir}/userlibri/{test_set}/{option}.jsonl", 'r').readlines() + #new_jsons = open(f"{params.manifest_dir}/userlibri/{test_set}/{option}_p.jsonl", 'w') + + res_dict = {} + for res in results: + res_dict[res[0]] = ' '.join(res[2]) + + res_dict = sorted(res_dict.items(), key=lambda x:x[0]) + + for k, v in res_dict: + print(k, v) + exit() + + if 0: + for line in jsons: + splited = line.split() + utt_id = splited[1][1:-2] + text_idx = splited.index('"text":') + + pseudo = f'"greedy pseudo text": "{res_dict[utt_id]}",' + #splited.insert(text_idx, pseudo) + splited.insert(len(splited)-2, pseudo) + new_line = ' '.join(splited) + new_line += '\n' + + new_jsons.write(new_line) + + ''' + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + ''' + + ''' + test_clean_cuts, test_clean_sets = librispeech.test_clean_cuts(option='user') + test_other_cuts, test_other_sets = librispeech.test_other_cuts(option='user') + + test_clean_dl = [librispeech.test_dataloaders(user) for user in test_clean_cuts] + test_other_dl = [librispeech.test_dataloaders(user) for user in test_other_cuts] + + test_sets = [test_clean_sets, test_other_sets] + test_dl = [test_clean_dl, test_other_dl] + + for sets, dls in zip(test_sets, test_dl): + print(len(sets), len(dls)) + for test_set, test_dl in zip(sets, dls): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + ''' + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/.conformer.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless5/.conformer.py.swp deleted file mode 100644 index 95f7227d030e37191f31b9e72d6ddf1728e1cfa6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 94208 zcmeIb2YjSgb@xB?mzEG90aL=`UBDw-(yp|&v1R12-d*Fx>s_;2W5Z&kku;LVE6vC= zqb)WD-ajv3N=Qf{3BA|QLQP@<0aEB-hZb5wC?S{@$e)q~-tX`B`^-pMV;hpZn$P}r zG|zMEx$T^D&pmf~>mznwP@FluwZi8vmC9Qm^{JUnlPBH!UME&6v#o{t)@`Sb>eX7G zorJ5eG=?sy&79L}9lEgH8eVM8)E3Ke9_JI>a-`Kh)LE|0)VDNd>T|Wl#Vz&u&X&c- zbi2`6uGeQ4wrro+zh$|-(yY%;cH6aPXLe<#-k$7qYu)-{z0)~u%S@{|*J>}-+r!Jp z${B6^-k`t+1#$|kbmxZ7xN~LO=!hBWn>U?U-0iLxHS^Z?`ik_w$l5%x!(ue z_dQ((KA*e0=d``e}WcXrQx`##JO%%}fL?!Bk~t4i;G*}eDm|7AxoA0O3{`CRJ0 zFE71!)vZjp_a7;}caJx|HYl(`fei|5P+)@s8x+`}zy<|2D6m0+4GL^f;Af;jZPrFS zDnSh)&CCB^N1%H)_&sm|I3L^|{568$^T2aJ1I&XH!RHYme+TRVr+|~euYI`Rr{J~VHQ?3Y$>4FI3w{G!i(vc=umHA! zA0kL!3tk8gf;)g~5WW|{Ht;0`^=E;0Mt5{or}P&>2z}?$cRlwY!ru3oFe- zlbyyD^|7MS>=xt2=xKv}!*mzgb>b|}jumr@t(t;v9ez*}c=lMcw$zw$Nn$(EXf~Sj zW5sl2tlFmDv}sdlPPM$L-*9nYq0uQWxM<=)Fe>N9;VX*jKlIl5HnW39;c=3?sQMNma=DYye0~g4zolB|kVcQM=ekrk^{)8B zrA-;yyQrE~gaJ__cek{t+sxLf1p`=vP-7)_(U71LRzEP)!(wBpq2@NDS|hUEm|s|B zd^1yNRCT+mjV2Ci5-Cbad`&x4wOgM*rUEV14%w1~!I>50SZk#?Qlp&+Y6wI!M6a_J zl8Kr1)w3Zo|LRAV>oY2?C2&4>tun{?X_g5ZdW31#g?98wnTVG?Nna4wJ*)2q@7{xygE$kmCSSVcYqU%e?n`!Dv!Q4_|7p+c+T3U&H zykv1M2x2GF5Xr2}K4{EB>vag@R%st9G`LN!!%{X_m{m$06MkcPr5j&`RC?EQpJ4K) zuq%C9%c|+_Nn2Q6gptTZOc#_jsOueB{_W1mpY!=tn1 zCnN63&eqEGVtwLBqqAUxoKx#GX7;j(*`M2Ix{bp%t*+N+3nl%s`zU#3yT#WQvC^*gqlq`0Ti=?)hAmX&vHaj-bBvb3;SrurODz_WcE1K+jN^H2RBmgWT zJi?~!6UFX{O~pCeCw5N^lEy_%!$7p@M2$A-z=+kB$zi5OJ5!@0%CpA8XD3uvsw3@& z=63pCYpx4n>y^!Pxj_MQgf?9K)2Zme62LiCpUvQuDt;Hd|f8{+g+Nu0a9oM`!BGUCaE6FzRxa zIY<%_tz4s}gxYS(=-&($kEplRPzB{H%sjElMcOoQmMO40!0_;v@XuAqbOZ)@Tb;mk zprT!$M|@H5Tv}P|Ht2~(ZKaDe*f9z1prUpOt1x6;E z{`Q&LY<9n^@H=0}O&BMj=G4g6|t4RL8 z3iRcH^@_fei|5P~aCq0j<1~-Fl}x zX_?2#nbr~uK^+dU(X0+w6;)(rS+SQ^mdCdaCu&8*RSiqzu2zjGY_yew1FZ4&%cW5m zEM~jMmh0m-Al%yNrw&+#Pr2Kt%DtIJkC7d7Jc-2MqRpYLoT*k6dvo!yl}^`+fE`qC zX8DJ=k^-<;7VFGOv5pmeTydmA-D)Do((>#y^Js>@BD00sF76{j*pC<0b48? zW&Bi2rX-kB-C98bGC7InwLUpnjE@(aCJk*TCpU?6rwYZKs)4-Ot}ns*p}d7lKLe4~ zah(bhTVR9QYg4--a`o%cRdrf4;^x0?mY}PL8&S}8Lv39@)R||VNg##Wx?Y~g5h_c~ zD3ezoyS{rd2-Ol zx-%@f4ZUQQ*?4>;R@A>gI5t=eja*usGR_3PI5%wb=dEjuib<6JVpKgr=d94S7!^J7 zpLBT;sps0vq#dm-gpaiMYm8ZG_)$NM;5cr-VphJCw3Ka9<&tl}OrgUW?yM}i3QkE9 z&PRhS8D00_*5P5QiaxiCDrHGiIQ_kl_-;k}I(oa=qIR@Fd8K%+*r@t{d%->eIu%K* zc2t=$uuAP%OOGrx(8qAXS!%PWY1?m_$LWiF@(0i}KpLl`L!FB3KUdh%pk0%Wg!QyE zk+4?rlqO_pSTvF0k>b7D0xBKhfOW-(EF8)E#IJR0Lt6iR#-%h+Qw+}!+bv5P&Zvm8 zbQL!y8?|G~dTLFoO~uhpBU@5Qn;j@`c^=Q{t zHk@_1@X&NWJLlOZH9LY8rD2BZd_6E+OO2zPWOrFt?c}F$Zs0-aihk-7JF%r^5AAl!-HnKkg=(O?Kpuh$NZhi{b=`Q;}U&o7LLQA(4xt-QZ8|JpOgNu^i2k6$?Ovt?f_WA%u zP&B)7gT*fY&9l9Ch&iUM7U^-#cFR~%-8)!J^lUA64)JE9_sw#vGg)7n zw$VmUu1=g{Z56Cp#J~^|%C1skWNXtRqx9W`*!H-J*^k6EO}oV&Axr>U-)tkWjbl=whw#s) zb(ZWn9$btJBcvYDCduMr#Wte-zkiR{=+mBJVEG7EFfcmW+Ol>NMYZL2-AbKV@vJdU zPYPiquNK$$R^t??V=Xc;mtgChT^6`ln0LDir>z?xzkDWk#D^j<45{sX0NQHJ4EqrY^kbEm+zaLW#oQNpxnhv37UO z2<;we1#!Yy_8^qDQKglNN?OgyQc+9glemE?7Upxvs8|rtyAf>4UEK;Wnh?+|+KJgD zJ6p^mA>_2P$>g0o%!e1Y*yb`!ZNdClH#4l~>8pM#vecQ(%xK%gneia3>9=d>qF=;Z z)M&H5TquR2qS2$5w{tzqST-N9gv$>7wLyy>GZ9n4U&CVFasU@72yIr-9 zG1mwsD)^WkY4?iV=;eBI)`iSPu+2=04b3J-fC?6~C2?FS=JE7IAKPBz_A64$zO?`M zYqjxy4_A+_797`q{e!r+>HpFaq4!-NCv$34rKrfL_j-5@NwXcxB8+>poQkbq{NE*s z=Ew)`3G)9E6@Aw_I68RR;2DD>BZDKS4vsu1nJhf%Jh>RUmWCPKOggA+x5imU({p1` z%c1&i9Zi6-$gG-PY$q_39@_uE53;mmY{~!kcU=AZk@IB(@Lcd{a1iJmz}>;8knx`j zc7yZ4nc%+Q$H@2p0Nw`90%KqpoB-|#ZUw%9O#d(73*ZyrmEbqQ3V1Xa2e$ER&ZDF4fFzU z0e=9Ff=j?C_#XO!uYf-WEpQOr4!jK=fzAuu4}1o_z;nSQxHEVcx`E#TN5KI2l=Ld# zH^D=|H#rONPVjp0J75y*0`~?l;%vb00eyUvZ^$}kMVH*PA?x@~VZOz&C+71TvW}?Y z`66+j8?sK;UwTD~4Ou4@%PKjwHe{VdXxfl<{)fprzOSEj(n*om@DIz(ci%uo(f4r{ z_h${^3(_~fd%4!T<0#tk1o6d4g*X4KZ}wT``{eL5MIy)b(j`ciw0`)4>N}6GCuxws z$=)-%H-!RF^8cfdIX@uT6Zzl!xBDV;{=0zW|Hpw@ARhpupbEZ^y#GD$1@L+Bav(c^ z5pV*ifG;8MzZYB$8sKd3D?oYxFZ;g&S^sMA6d<1fmw-WV3Xtr75jX%I2JR0k;4{eh zp9UWT9|cFic5p{}U!1KWrm<0C$-$JH;4|oZf1_R*Vkmp|t_JZ4hZz0pa2s|8A zz+WTNKLd<|`+?6R)4vwH7|ehNfG;7}zYx3tJRIBsyb*bR8H|Hlfe%7k(f9qL@y+*P z8ZsZF;E@F%`!BNbaAs0RhwUHUh>G7bx{2$9iDGrGoKdMBT*mUwj2Ip8sQpP&Nxz~OcE!G&2?_Ev0P1F3}SqB=|KPZn+rBm z&}(fvfO(HQk2iG5p0Q%PJ-f{Y*B*b?>H?kl?-b|Mj~Pp?;#?Wy4DGQ;e0SK*%XmD0 zGgeTxqNG2Ifmz3$(`TdEoK|L2p|i4V&f;lG>nt?p%t=-bTFo4)m?q%A&BcBW=d>w@ z0=3#YEr!_cDeBYKkuPm7L2+C(aO5}y&{?pMT0&tN-;Ioojb0k)Hq+`at&STgvR~ES;;zAAVc-7hn`rE25s z%W#E2_YC8qU0*aaKh-~wB^z>^Lm+i~wu6hqY&^Hx!^OqMtwem? zCspGGLtDsU0C2WaV~%{L8dJ`jgez^JKKU@ww0%yd>{FM8&?OHJ>PFW5E@Yxd{u5_W z_m`PU%@GGTbJC>N0M*Z|u^oIOsO5cZ)BMGWSXJA3SBEA!n`zbYOw=~50fo@xkY-%= zeWIJ4mBs;?CQi(>8=P*OZPh!?lQ`wb2}ff*TC~`FuR(7%1;jMlez(&UxZ+dNcTz-o zj;M|d6lYUN*KHYSqs0*OO%{iP49scKM}nsnGvp{S%)Y7dGC|BzL?1M45%)}bvAZqT z+KCZtET}fvbjfF{l*v!mQ4_mcnC)gG)%DPW14V(#bj6LI@SI4#R~(!yi4cN z?TKD?Z3l*fcbiy72*y%M*Ik+EGV+Sv$eiN6j42=^ZAeIEVn8MSdH7}PVM-s2*d28| zu}@>_FVj1d&O-PLFwtJZfZOis+s+#e67swp1s|jfz1W)Gf^@{1?H}>BvxRfI8s?MJwV6Y7 z!YzsWfi1x>LRVlFe02I44#;d}H;xwPP}iG~K{8pGkyBa)l7XY`Mm$9gJC6o^J5kkk zemW{NB5=Qx2u>s5Oidymq*_`dgo7iO9yF>+j+9{REN;kHn(*8dR3Y_op|`FQdTSm! z_GcTLB|@i5`~)(d{p!`7q?Q}w>5tx^zc4L~CCIoTR&TMG3}ukm`@*$I#nVOKzLl)y z5T}D$v%`H1IeZi!>|p;kfhRMDMdg-_Eu@7kY4deAw<|v9Ooz_ zIa1d1R{b5~bLwoOM*D;l)Vl#p<1+lw)jxyqNw-mI@g;P#1<-t!k@dDlr<0 z?cQtEH2HW(HD>=G(YNwiC;v}7`SexD`2Pny5v+jIf#m$B0Qvn}02hOMfzKe%e;RxU zybK%#mx8@u2>crOlH_^tF7Q;~W&Vr#`|jXw;BCnEZw0dTzXDtUZVPS$bpHR}!1KU4 zU<{lIbmspAa0~EtF{+-JIId~IzBRB$v z!Pk-hkAaiHXOaDX4_pA`8}Jlx7jPZ&|Fz)l;0kaITnO$9{sle2*TJX32Y~zsN=NV% z&;)0JdxGoH4}2KB6g(ST4MxE&fZF6WQD>lj8+Ln8i49smZ)0#T%SFxc;SwIOYlP3TBEXRK z1-D$o22rvGY=Rw`aUJe8$1@rBF|I2ct0guw=^*orLSiFltR1C^91G51RyB;%i70<= zCpb|wKC4W*M1M>jDL1zZgf1J!83G0NGR>m*p&@?nW5Y?AnBrZTGFfS`-U?~4 z?#eaca1)O_9G`Ro2-E&_qt=-`tXX)Lv)@}2fpqOCE9EHW@Do~gjz7V|bCM6HrepFc zb81k}AAvH{lIUWKff!MGGAaK+b@9oZSXV@oO^jhP;5@9}>Eq5;<;kq{B)qat!Ye%q zn-nK|Dj1iBJj=s213Ce8ywDTGaY0tDuMP|#0Cuqz=C6t=J^$s&xN_duWDfUFz;;A) z&`#&vIXY%?*Wbpzc-2zf*yM~NkJF0Cv6%VZq#P^!usIl4Y#ZJdh-ze4)CjVSUc|9g z(|9pbr!yK5qv3;XM!D36g`P1=1otC)C^NLdSrzf6BZUU3udJ$J%tbbiJxWxte=3i0 z<9{_0Q$D4XN^~n`y#tBUA>m*G?c}fF*t=sI>Mlt($)6@i=U|T^-(a|#LB#MLwVn_Z z*HngiSm&%SD_5nW5c-%s8HHehV+-k46kZ?4`9xMJ>(^8_L@Bmxi78f0Wd1Y3opeZO_gDTp zDaUFl#9WdbV=85l`vUodQcayYTQ2WZk{;1JnNfX-T@jtcOqrR?BR3h!Y8J-$JQT3> zT)Em%X1-nBt?k)opDo9!%}Fm6BtQHLN@0w*E7OL0U+v)_*lMsMNbKNB86}}c+GHM` z^b!%XmuTro3>9}>ao16K`^AlKvSb9()ry@~`C#P@4iBew^VJzHc5-_)O*JMOMY2 zS8C7F&~xZKdZFJ{fec0-vi{R(lUyAf^k==|f1kElQvr|;JA6*K<;b;9wWo_j8BwF) zl&ArW9o)VcK|x~DM&;_2gpB)9ZP+whm)DA2qg&%*p*TZ98Cua#|LK=V#lO_|Sj^fL zm)`1P9Sk9Fh}P0XjylAngPHmczpGaual8u-%(=csWrL>W=+PM>muB#cj{_R zBN?S6@60|#G1F+ztSlh`%L!hjzfw(Q0`H+y&&<&{h^lAqz}|q}=bK%@?Z$kc4CB~U z!bkB0Xq*IC`r=A=w{iDbB5yzHBy&;D3Q1 zLxXpMCjrsp6p((@2L5xibth$@zB1xBxw4&>bbqVV4Oi)RT5UM_rxg7d9~l}+#&>Tg z#2*g+YPGtQ=J!O=Ycp4m)XHr3z>S=AoVz2o|0UBxoV`SVl)7u^Q4NP-49>z|`Jhjw z_wn>c9;__EkCz|h!M*a#derd0@-Swu_DuD#Jb{Q6u9mccy`ij&kiOyE;@2l1$Cj_1 zO&Vy8TmtXXC!fHov@j0$r-F1sJRO~bUv&$g08B zo3wDhts^#$2|ugD&`5bxQ$`e?nzD++cz&DH+BguK$9GY&d#A$pO-W*&n#wW@VO*ikIo7TBOH0LRwwgnwBFVNw zy7wMpWSOkjIc;||CX{R(TR(`H4f(R^kg9QGRid}bw9-_xdemSPe75bh_L!kVpBxAO z`dxe7!Iss{jU}sf9W7;?4Wo5C^?GE=^0gXg1S#LF!B0SA9t|iRGxnoENYf8Qwj{Ri zt>NN|&`q=aLex?Z=#6H3_iCL|x%=|Cj>)4eD>FK7U`O=DSiSOD7DIiM+o89tH}|b) z6rE2Os_=2li_pY6Oi79{dh#A?uzaKD#X{3_R{P^DlGEAy0{PR5zA)=_SEh8oXq6q% z{49lGjg9L?D2hDGX^Hyzmc?yQc*5Kyt5_#B2KG7=88C{Xw=q5J!a8~UGNiahiKR+hPd}IdRdaPjLMltQz z4dWe19zf)*P>Yjg)5IQdtAp{_AEvP3V!L9Sy!|9J-EP%pXJqqYT@F(0imK*6DZ@VI z4`P_oSQTSX!&Q6?|Lv7ji{=t5H`(=ss-bE|%rRxMTa|XcH=K)I!>!MhnHF~db(XOn zmO{WS3i2)kL*#4Sle*p>PsE^6%LEI`A~G z0Cs@~g4=^nq7!%;&|d!qU=NUQKi&0r3vfL;0-g2O8UN>kCxRKU3!DP(0KS3l;6vb# zz+vz(a0<}A|M$@+d=>mQkiGwj;5+CRUIyymuHXyk7v#(T6mTuNh3A3I;H~Hu9t~~_ z{u%wj*TAR2`@o~YgTTjb#a=wv0e&6)DY}9-xE&DPo?p@vq&kBe_qXrQ`o3wG`jp2d!3)@K$NxX$I?UZcTz+L$0z-}FT+S0I^)*-F18xHY6Q^VicB`|B6j+Vi zm`DkiwT~$BZvx{kuczdyKW=Y2=6AA{HVls!kMnAJ1*YS)@R{t)1dKM*I_|T)nDi;K z?$+%{R=a2{C#3YulXP0l$z}FN@iktXZY;X{j3AYFO9uxDb+tz2F0A3$7Qn@d%C;yI3HrAK5W%^Q867}+iu)zV3+h@a{@O;%{yZ`VgGiubd&Cg zOTd_Fzh0LG2py}OqvJ)&kByF?{Ll&O<;P~9(ez5~^{lOx51lvP%;~U2$(47Lt+_c& z0>_(^Y~f<2iv3R2Zgg=|jET6_u;giGHxqG-uS{!b&{?Lv6 zMc5Kk5mM`Iw+V4{7-LAAW#anIXTeTBXN*kgs_y;}jagG2U$i7ha*!~oN}5PwY-uF$ zK8YlUXZxK1b3QT6<)+-Hhpwa6=B8yhUsul9C|Fa>ZMw2~m8_<&Ot;#u!g4|fxUMu6 zxk?AW^8J;jqNFiggwT$m3-p*>89GyC8+4U(S-#y@X{#ny?IOH99fzFqJs>Tu+Gp}= zU9*$Q`X)l?o$2Pu?k8?wQOsjMW6LKOtqR8V%YKR06T5rLHHy+9+0;V*w7c_dNg`$v zmomj&_h1^+cHmNv3-N!kCK8hWw;-fG*UA3}oWS}zyXi}1+N0H1oPlw;CsmG zveW+{_yCX$|5%{&`ga81M}GelcqNcLuRHm+0NLz+7J2@i;055x;8EZ#a4+z8$oJQQ zzXEM=0O(wQ0ltHb|8elgK)(Bqfct~ng0CU(|223vI0E*A0^ARL2>D;W{GJXv;Gv)j zq!+jjdR`0O2t?cGfE)Nx$MT<^;RM$f^m6Kv@jPV3$I-X5_D!ycu=b`^vos7AlSjaG zoEm)G$!SK~po7&HF4-b8RhRK&V zVew;HipN<@HY%deM(kgfz-D{RIE8F9Rb?L%FXZ;N49nU3_zBDEADJYxG!Ky>Mr%F= z?%Qz0G!xEol2SFksmQ6cRwL;egzKSgR!h_L84*Nn|G7EndiiQ}bqw%-RmxU8QS@y` z3N^8}&iPkA7i#*X~Y30V<;E`-i< z+~~G9a&lF#@}JG#N&Ap>GS7|DJv%G)MOGeb@5pTjwZ*FGz-A@W8DgK5zI(gxHs7+Es#UzMK+5OT>U1Ia#usjiK*QZxu=vaOqPI>uF>C%K1yu@#-ME zn5vN{7_tM!$@oO!;=@P>*!-XIoP|%ce8eKGDWS4Gd`XC!F5TI|igH{kpB}t5%^4nO zTP_5^H3i~F5+IqSTtPw!6*Bw?>Y9*ucfM;cEuHRMksQ)t`Zi5s^B;(`%!P~%Lo;*R z$c*;^(tiUd5WC>B@8&WyTh+A6Nq`%6W(cLZ-Pbx+Aj64dVeQN$6K@!rF{d**=myoY z^AL4#Gc8H-_nP#Z-O$Y13zW?zZ=gY$2ac~WIC6KLvXl?W@j@GMwsbA@n%ZvDyFl$%EymSBg1uc8X z;+8JqQKjrw%SEnnsTwtc-NXq7HRfd0fLU%2#znACNEXaKPlpdT>PM>O+zVeu?v5Q; zFI9WwcCt2uhSB{!J*z`n{I>uWlzP|$ettjhgQ$ts4`>LQd=Av z#;4N~hdSh73Ly_}7(XXHS5%egRZHm=J@H1m24=hHV0KlH9js41 zokExDMluzAM4ZqPS$jjn>gDuBb7I4sL>2Qtc~~d+*>7rM9majzH*KaXt7N*eTBgfH zSPm_XlSA9mSvU0K$MsRKn-(dGA#Skp@9p?99l{1RqgF)HM>!c%SPhs65)NhB(eE~; zCJlov^W3n#<@R$Vv{X!D{O2a5Boabaq0UNt(}3aDa;t%le&y0QRNvYMdjTRgh@|z) zlcs_?$P>v3aD5`wJDH+X%2-xkX5^CW?nj#^TUqk{qZzv5L+abraJ5L1SNz0YY`W6! z&0Mu}jU*WdI>VWMy>MIl>Te$2Fpn6QX1w&qTmRmKsb^4!r2jd&3BRCns$P=l)r(j|E zG6d?|okM8~K0QQ&4CO9^dtST7Xs0@F*Zh|f+$3XvzsIBvt#Wc|hS@q6xp$ntUf5K> zuhVPQhs;BaSozVC{!$@BFy^N4x4h7#GTAn2&i|i)0QzEN zQpx{_d*<^6MKF&EVx=AGjO% zN96qLz_sA{Ksta4upitDd;ppMPrxL&2>d7V{eOUe0&fIeumhY1{slSzbKvbjJ_p_k zo&{#XcaZnL4ZZ?i0p!o`fk3|ez5)ITd$WGIGUtPtokYb_qCOGCp&ZTcp+_FdCKF)|{YAc=EV(^@2PEp1+Mlw`*ms{zX z+SbJE;hLB%C9UAVSebj;IoHItFZrjo>q*4h;Ujr3M>C6vD0-ZSt@ig8 zq;Bdg_RLgg^;qmjqy{CuTv)#-N&Kh>TUn~Y`i&zg_VW#<+lO)Pg+`7_M*vgAZCdo;7t9Yt72{*E%mX3iTg%deg=4Yd7 z((hkJm1~}lax)Q|3{U5PPeLG3sPTn5{i;eA99#Jpo(Ud_S*xQQfBQ^XCj5!oN_`sp8>+<{|WZ(2CYMhPYH7MphcI|MhAt$Ho7V^E=VyUwmhtqZ3q{M}ueU8R( zeOUb6RI}EcDr$2$(JeZyxh@wPNX6K~#JOH*$P`ly5<=M9EJSQ3k&0@ikG-qo$U+OB zD>E$0OZZA-ld{j?#9@I_Q)_*1ue=v)Lo0LMeP$sk-tIZk zLY4q4Tx2v~%pPmjmKrlQ^+`XSnjXOk1anza4SmH@11(vcFZ+;$UYpOdt^xW_{^As} ztt}BSAAEZ9^P7Qt^R`Aqo7BFW`sjMfZJ7-2(nyayysFfnbbX!<+sFe3*vJtEc_{(~&)OKb_?N$2d9klgRp?0Pg^A2TucC@F?&bU;x||{5$ghcfohS zv%zWL^T_*G1K9z59C`l=uo-*`Isboy2Lhete*<_d*a98^?hk%~Z2t-HPVjQ@GH?`3 zgFRpyxG$)He?sQ}5YWATvI%%5kS<^s*a_|f?hU?$?Eh)-9-wmr&jMEfogLT#&ID(G zlfVh!w%`ls2mS&)1%)QN2;>j~ADFA><4y7IN--tWOwH;X8}@$7omsY6oMNgX~gr;0)u zl^Sc7y${9hK8Ph546< zFKi_yr@zi?9l5@qhz!MMcoZgvS1&aoAo?Fkb9zt>CGDf{w<86)|8=3zx0V%Tvx`NmlbMU1@N* zuKPt!vp3y^MISOXb@h%5aWb)sj-?nXf|V`F3k%xij8N5!KVTg8R;ixNckI z!TaaX`_$Dm9kr`BL{dog?}~ow1AAxEamx0JdB}fWY(g!NbQ&G&MW2b&^AOO)UbL6g z%1VKa2Di^Z*yC;A+R@3{bZ2~ow!P74z1rR!x`CSKfN zA@#AS*uI8`3RL~}5S+OIL?WYB4`kNnJf}M(coPEW$1@AepxsuWlQCeRC!nY^>_B_T z$?sOUXW6ZFSgv%7g(*{UxPjf8)+igvT!f?>2?L>$q`8zs*=`-)7M4WafN9sNc`o?W zykj>28kmYG1%T3mzYZL3>B3spo>A3VwDHFtUlTRTd!;U?IrKvH{V1B5~X|__Q+AM+_ zx}@R~+&w`J^GA7xhtE_u^8B7xbT3zro-MN`(?)*kki!pM#|)?i#4j zMbh*xUjp9@rgNh0Fs^i$YKPJWqpu#R`gARA0j{*$!(TSf_bgL&v6g+<3jr;87J!(u?>MsHZeOw-4d$ z#ZpBW8{*ac#HYWHiXmd)(3jCQWeq=~ho`0)SiM)NpGn&zM=wVyEB$PKfiVW9QA*j) zi@RH90Bsq0!gE%4esKjZGIYD+S}y#GaAVhHIaTl^h_x&D^gcCnak;{4ugz}DQa4~~ z?=n`>?S=Ht{tv*<1G<8(VgFw?>XQHGo$UEeWczEti$Di#20uWy{|tCF&>evH1$P90 zik$x>umju|d=pv!Ip8WV4;~I~18xoe5!wGAfPDSwj)3QZ$AC-0ox%6e0elR66ubhw z9J~xX0h|xy_fKa7zJ>h%7Vu{9BCr)y!Aam(!R^2|k^Qd)Zv?*y8sJf2FL(%$PXYM_ zd^va~7zdlckC5>t|GyQy8ax#=!4h~F*b06H$ZkMq2VM=H0}cV5A2=P{9_Z}AHQ*)S zcY$mNc7rY8mf-Ku3A`0N7d!)83eEv%fO~+yK{xPD&;&Y9a95yq`q)}J0(B~VOgCaa zp44z;H9e=+Y0T{9@2Vb#ZpT>`to|nJFhxCAQ)#y_^}nQbRmmC=$;4yCnG2;PeVKU! zNvw^p(fWKFnTN4H$MQ9KX+agn3){C6S4 zYBf_GddM*HUyBTWX(|baD*6rX4y#qIOovil{X2jeGSUGsW>|qI%%M36#nrejSzDx@ z%gmIYTe1@J>puDiRrF4uvVL!`HGE2W3|I=&%cFe5jUr-$V?A>n%Nl8r%F_BgG4l)lNV1H=KmQgeD@SV>(>0FYI*@~&S`)^0h7;mAk=^}1i zzS6)mF-jna^dNIE-OucLkVWiUNn2LyHZLpcfh@61{gyOMB%@Ua`GWdJ>SGXAntzND z7d>TU^o%&W*wzc<&4<~ROnv3HkZeRUNg^o)OQvFJ4HPlvtj=qH$=z8WBQn+SX1UD0 zvoej-sbw8L;5v6Lk-CnoqWoxSI?!~jqg~gg9YqIDqPBhd6RyA!^YHN4ZgJf`JaMN` z(DCBHmMzvi)Z9?u+*8pf51d3Nq}Q?n8Xxf{N2!2hrjPQ{_6>$t5{15@7-H55VSBnQ zmQe!+^&dJ_OtX`k>8h+^WXq}U1TEbvntP4TPDB!{V(#Vaw&MPRHWze|XV9uZ;|wFZ zS*@ryJZ3YYd#svRlOQSE5D-^N^Vpe@9nFnimvK$5E+$=$LoI05jhQj>i_u)mTNrXQ zc8Pd{lpIo<#sDjpW|i%h(b9H{8@qn5#WaN~5~4>XoOW~cN}upL-|HnQt+Chm&}t6be>jFyJ!mrM*%0HN ztV^v&l73b*osLGIjp>bD7{dwOd&=h4O>J(NVBYEhYxy1AE1X&kn;jy_-Ok|5Z0+S| zg8H|bYL*Mta3NLhAxxV@+kIqebY8V)cT*o5E>UW;_Dz8kxHM<#mG)>8+CeOV;`H3?Yv!3cBoy9>OI@h_3VM4W3>N| z$X@JHYh@GIaO$oZcHzYq3+G4QX*^tv11J76!k4fqi<`xn3)!Q;Rrcnmlb+y#6S`CVrMJ_=q3E(SY+ zYychz9ss^YJ4nWVFHoDj5o90R9rV9hnG;v%JuDc?Ic-)2?W5I@i?KofXST7_*Tv|; zslr6=!5zRsyY+ji-K;Zv&xs98Tys=_VeF?k7FW^V{=>XHUwUNcLbRRuH?CIaxRGMf z*@I$KT^)fJPM>jP41wJYLRpE~vKf9kW@O&lNMz>|lM!&mm%{|%Ci~gCdrgN`Yx347dahVA`6QP;yLz!I~hFv{eYd2)YQcT&FoN|FtdD8)gTqagEWYy1{ zk?BdPE`tHHs@U6NjSSH_OS!TQ!XcW6vU+sL2-X{mnEvt01hA#HjpS&iRRqZlMXalX z|Jt$8!bmxuU5lMMS62HZ-M4v)GTEfh){}H@P41yf znzE;LF4k>#8UWr0d)~BTKDi`~oGa^1AoRSc^j2;7Yq)_K7B6VRf)A%W*V46sx_jGK z3pwcEK*@v5rXme|ke+9wV0U3-#P6=B|&=+vzZ(Bd7 z!gg%~HfJ?SGEp?__O@vX*^|;ENL3w7RI-~pWY1)oH;V0(u`0^h7+%yk&C{G`Gtd-L zi8-x6wOymr$s;kH%?Md+PcFA~-D;~D=Q$-Lx+IMR4RDhWLuYML5Zcs=mP(Y__JE2a z+D~Yaym(@l8%-U%cV+8Ko}}d^9C8D*=~*;kXdg>{HH-=1pI3bT`c@TcOY8@l?N2xC ziIG%GJ36+%yOEtFkWM&xEt)8i;i8D^=;}%{ayl|0qT$FH*23H^B1im@xovqbu#uS< zGzPU9IY)nSd#&_cXTwqyyN2h7d9DT+oeId)L6<9doCNVteyh zh4ysqz?UpnSAy*Bn))-cW>;7)!vy5Uhx(&Bxp8l96*?vb@qoEFXq(gAVq*KhD~--p zhiW!>9mMM>z46EtcZ)T7k_K?pyS{@U%;1d)aR3jL)_SJwWfEa-E zrM%@IAq_}N{i7Ny*Pcl&>oj3~^Q|6D4z0mtjrMa9GEI$9MZv+&4a8}jr!(fo9K#Uz z^{ht;i@aL#q?GHcGG;T2SGgiNW)^707&x4gK`)MG!l|T6simdsHTpcc|KT^a3y)-I zK`D5;W3V`Hu(&wpw7J;FQq=6W@wRWMo2C_LnDdv`K$I@zro}ZHq)&VFH!s}{PiI?| z7Ea5M&hkggGiX{suXwCOAoILeM#3`C$}zUjoB1|a${Hp|BhLupdlPGfZ)4En7y#GHmw$5$h{2l$^=_|D{gY zl+C~H{cC_hApd_l`+psH4Oj;I!Nb7=z>ksd-wWOYUIu2sX7C+k`mchofIkKD2{-|E zgY&>0!55J6Ujv>Bt_J4<-Tn6;$o29K_$DBqfcwG2!KvVSD*O%^i%H zupb&Wj=F-6?Ra9ws#=Au;W64CSlF5RxcwRkvZUB7Cq>HlMboV9A!Y2J>qnW@z0T5X z0#qX#?IVN$%9i!MpNP&xk(390S6-r!r}j2%F(er1qR@_C^GLZqkHMN2q=Q?Bha;DP z4qz0ODJu;9*KldDkWt3zVPH))L#gLhI9pmOof%zO28_>Di)_;ipy^Pg%%`Evtq_!R#iuRsMldbayBhZ>=AJaR(eXv65AY0hxvkK zGYk&i^p4cqX&N?MpZK+IZOHhSaT%A=z_qh@c5{Y`rZrZ}S-OfFlg)YS^@vsRRH51W ztZP2V{XZ=^xNJu%d9!cU@EqP_CjJv z#pKF=cJw5wY`r+HiKe2E_VfSvJhRIDQoz$&mNj;Tn`5T&qc*O2u3OWKD!z#}+dr1u z;vY6$?DVnY#U>UWr=G)_MYnrpT_{yEoMy>KFFnkQGA`SlKt(i>aP$r#v4fR@~-cXI2b6 zlT$fxmgX_`rP@Pdt>&;3{4tJ7 zeGjOZ$oz-VYt~ME3cPj!lrGsu66!(hIkEP6pp~WiV$~e@Q3#(!C|3F~i)aSoLHcV5 zy1AIp;1mnv+97sSPpuG`8vCWql{ig`G1gpa1_n^8UMkd;mTfJPAA+oCIzK-ipls7BB~N55V`4^{)X>1v|hw;HSv;KLH;G zuLoCx$AA;TSIP9P;K|@g;1S?VPyn3)klg<|ApZb0a4|R&+y#6M`TiR4LU1)W2)2OR zgC8K@|1U7!MlVOMr%6KN*bg1;@v^O@ zJJb;Ktf~uyM%Iuj&6l%7FdMDK*<^C*$;8e_UgshGoy`?h$H*YQ-}{(A_$-62H?cWX z`m&DNVeOQpqxDCnnRc8#ua$%`_pjr{27ZK4Zn)+~T1w2+%33c~^xJ>jEE(5HC}YC* z_l=XNFZ&W^k?i^R)L+|V}Rx7s5NdvJ&neChtbMb`HSB>V%ac+;;7&xKIHsDz%4G! z$!+7dzMmnbe{bB*rAqXCxe&2g3#}9`?a!2nAZxbH6mvPP7JL__%SS==T>aTQ>6M?R zb@J2>(gCj_Q%C3q-Al!{v%ZJyO;;vcbOD(${YpoHC-n-8Ev#k^ciTVQ>Th?mR2Gw?I70?jCb!k5M4Y7}Q(Z z5r%x5q`5mLZ^J0GIgYqV3#$nyw+2~i+pVHUr8Vd0oNFz!<2B!|$;SgJHJURz^$2%A zrxjA?9nBd+wB$)@%5P;k$_S9Tm^!+4qxO-66Y{Upl352z3SfssUI2+Hh8J;dF$^Wr zy^ka}5#6@3bCse~Mwx|9&yI+ysE_aCW;7j8KR9S-K`TdUQx8#S*#1f!VrzO~C&=rm zd}vpZEEXb)rEl0;_NV{TotHTD&CCUV*XZh+O5~yo}r!0+Or4V*X+|}w4 z0uMwkX{_)(kJ_JkAa4H`d1Tw{1TL{FrWQS|t#IW&k*DXVt=#QGmcPzuFSShXxF}69 zJ_gJJ!S~k}FEs8-i0Qhk?#vRQoEp&gg$8UC8@rxBNc3a@N!yvt6?=@6Apu z*&!8Nsh8ZWm*x+Do(^*av;V&b`Tsk}pW6SQcKgp?M8^L(cr%a>fTw^(FamTA;0MV3 zUk4uq?*K0ZvtSpHK41WR7ahQNz$d{cz`MXJ!R0`A1KbDv68KB>0dE8|;O^jW&;dLh zJQUm>{3kNMd;u%~?fpv!Ae;YxLf-!XcnA0`ARB=5!5QELa0l>RCVyXYf;W1OEzM z3Z4bd0h_>Gz+a#nXn{@ObLa#95Ih>_%)jmpxDGrSoDOae)F$6nyWE5y4H^5%PBO(F zu9ply=@X3Tv9515h93ObaeG#?iVAyLtFL3V;8u;J|A&q8%E?!fe(;>h2**g4MPz-) zYH4114MQd_?W*&y8wc@#^(8kgS1edi%+xuPWd(C%eQRZ!nZ)Z5?v{KoG2V%sB$8Sx z8Rj(_48rhONBr7Pki%T>t|ZQwid*&brR6TMCMz$D=FTkVypY+nhu62x3!OtkXPzKY zOC;6wJ86!ZIcq{^Bh(Q6vD2dDRk&6Mi_+x?wEmFs!jhoVowvOXf~;g$x3Tz-QwzrXJYNeN^BW> zb$u_#7n?CR1f+ZPDY@lE%t9cBIh5a~Vq~;lC1vb$8ddof)MjBqYBqKeUW;mf85(L0 zkhNSK$H%(>umR$>8{z<8V>iWbprAo;=Jaf)B+baAZjKK4aM>%`#wmBl?b6>nsrB1o z8t?b8H>%sm>3wlDe-utZxFHs&_uh@gUsz?dbvmy9^0JG9H@==qCD*>{iCe{v4%%cnc}|A3P<--?Vs3r+_G_$c!HYr$`Vv%t5I+g|`Wpa$*+{tDUs55ZHwCE!j# ze*R?Be>V7aa98jv;K#`C?*{JzSAb=3KOnz;-vpBDW!rxk)WAc)eSrM^zY6>=xD1Q| z`S|}4viaA*bHHif>&W8I1a&YA?g;JxK876rQSdnM2yh;dufD%TCVw~3dHw@H{`+@< zdxLv{k07Hz9n`@rI1zjf8U3^1mEgC*5%6GeU+@#?c^!BK5N)3Z*7>M?{3rCua|VlZ zdz&%icgM3|y?rU27st~dy+JKoJ8r*vU)uqFvyJQKOmCJ(MVLQkHnRnUWlfpBVJjm9 z=D6aCsc)~3HQ%W>mbi%wshQEv4V%clq_>5!gR?nP%HC`yyAUar2lA;iE672-=Zs(y zI9QD3eD-Fycj?4cYmI13cbNTQwZ!Upukaix>R<@SmRwe2&H`~jur)VVjMybloQu_6 z$l!t{%N9i<_(HwsuATm+PU)=;^*ZifN)_zwtp}rcWoEm+ZcjMc0UI=)TY>aj6d=3h z+vQy+FJ+z&0ju$eR5J395TB?l1H*HRjb)g_jC3)KiIqmu1A9+-Fm=d`C)L;T#PlQy zn|6(g-uX|=R0RvQtl6EwG@9oTu+_2C_<$rood(Luu?qE8vE-=ug6P|^3&jxUhPBqS z8E@STuiB+%ZI^N!t?gR&;8|WM$2ebUhw#A>XBd0caL$BNhAS-EJB7ou((fBv0REdcx2= zGRH>!Xob=&K#x&Y!N)y}!(UU3L@Wu`QX}l$CFfcof= zvef*>t2+yj*|kJecT+J}s9<3%q3v*uqxJHCZyaFw0M@4GjO2!g%@vcbNjl6nDvni^ zNj|oB6eNG)f4I|GVH2R~FCULJ+Fb1LjxjGL1mTvPZ@#F~FYNxO%6aOEN&e{H4mt}m zj=xeQ^XZw7H0IX0!)3@W$kJu1`K`>nvW!|81%hv{=FH-XadxKB$t7$YRX09ADlSv^ zMNWxP86HS5>RQ|O2!k?(GCL19jZK-&RnqusCPsF8kM?Blx3n2aeVpjLVN&Ka;R$I9 z-96oOmlpO8*qom;T)x+43|Pml_F6~37p0DEF1^=Il~u-dbjN_annd3azNgLAIt$uJ zvlmU)jemc`L7SZrJAQR@k-I0+OqxD>EihRe>VHk8PK@Hvv+2z&<}z_-Esz{`N{1lR*k z0rv$zMHg^AxDLDnTm^I=zz%Q@xC4+4z}L_Xd=-2Jyc4_?JQ~~?+zH$g$PQou+y%(~ z|2bd}_$K;*>%r%N_VW(``TGB5a652Y@O^XxUzL6UTm$|990L_?#gssfS-ANVb1KRZ?;{BORvtQ0dJ}vHyVTB7{sLnr z7%j1SWC1w?mK#uKXqd>S>AF<7>#m$f|FY$ux3XdZWJ4mq)2p;SgzL?nx`Ae;TlH39 zIrsdt`@_e}OP-YVx$etsjmM1SoSH2raSY*Q;pjIFIj}Lq%~00WIm6CQ&yKNYedP8b6uIE1|OAN!Z+o zY?l?u_sG=LN;o&(psZ)e`Azk0$>wZUys=>EO~oIScp7sfCViizQ5uy#o6)VOCZ666 zW$o@sm_i%{OHV~+JD#+evWk0mO`2zO^n~#Yku%v*4u8X8k~mBh+G*^GVYO2_GE~K5 zeKB(8U~$?=;^kSPNhc3Ef*w+#dqQUU6NiaVQbWC+wyW=D<3#81sq5Hb+aci*1t#FGi`yVip-ateT%B@ARZVKkIN#Wqn+~5=;mg# zrx}huVb0~N8g0)+#-r z#;|2Ta>aFwksE^B{a|_{tD=}@@>5cY={QbIuw-fBymI3?0{dU;MTGH(hGYjN5zD2|Izibl{(l2_0r(v-1tx*~0^SvT0v*8bgB{@CkoW%< z{0(>qcpf+g4uTQzDdhYQg13M-1NjAbFt`u6CAc0LU-trD4O&2c0Vcp%U<~{c_%O2n zhk$$oTm=?^`~y4;41qg?zegwVVenG$67VFT^8oS*gE=7k|6y=% za4&Fc@J)0AUj!cjZvwvy7QhI&JNP$<`w8#{pfdrNgBfst@HO-S*8%AXhQYVd1-uem z4z_{sA^X1=OaR>va3`R4y2k4Q(&kO>BQM)Li#3SM#PJx;tN+DWDjF@>45yDR)1{qj zU6>QJ1l;py&iIxO_y+s;ijmKD;lg`OMRUZIGs-tidrW7^-t-L7xS ze^VLrvyy}j!h#e1vB8haeE%jl;J@ij_*rGlUQFJBouoEaTo{h`#nX2FFn*as_T7*U zWBW?z8i+OObZ)V;J-IKdo*NZjs3D|??m)M@$hIe`xT^ULDYH)7bJ^D5+*#mz>+@LK zY`*uhbK(U&ZSy{_{opn36Is+XF2i{2KNi@N@tlO&TN*Ieu5B(Y}aH?jRJ^hqAa%Hh@wIn3NQSM{%8KRvUGtKt3@{CCu_r+e# zd;H~6PONV+odK-AE2gUMzvC)L>@A8Vc{G+;YefFu^}liVoxh|+H{^kwZT0-#A}H=-D7O#ui0M=wah3mow}mm z`t&lkv4w018-`o4a;q=BWTYf|v}*9QBNMJFLx*8zDw8Q0d(*d57>>khc;NS$>e~*( z)Ut#eO^RNoh|{RLrF3XrZj1M93#V;DdbmJ{)#nludFZoVOQi2IVo0_+n0vqqdmDv9 zhlJA7(iw36Q-wB6EA|Dt#9V3YrWf@aCMRlTK7O6>zbB+_F?>;LrOl-PVdb(i2HtXf z0OLQzDAWdvUH+ldAUSna#iy!o_1p5$!Xq5=qrca|hgL|-XH_S9--fwdQAban5D&B~ zZ%h&(W#STISEB!#(0RmU9<>>6aHyW}UN8L%;&v*&953dXl@YKpgEq)1*{9R+H_}Ju1h8V z-xrzxD^C7Dcoxtde-8xr1a|Yz>6?pmnj{JQcvb*jA z{9Dik_XnGSWcm*wyKB$?0C*tyKC=7u;2YpG;Dg}R;J3l$U>ZCWYyvv>e=T?&_+zjF zWDBqp+zH$fd>J|a?ck5VW5FzV6nG?%-@q}Ty8@*TP`iEE>jPZd^?dn3XlLMc8Sk66 zcjrCU-byr{T}Gth1^3kK!))7Da)^{V%jbQvd#I?=m>MApzexUYVDw2;-V3+)hp`>S z#w}ZPttEdKO&nupJjN6Vnfy4VIA~O~#@CXJ6CGIi+t)T7rPsgHTPH&E9(xUvZ;4zWSV0UN?W*IsccGuZydkP6Qio2 z_SbQ5@*=!2EwicW9eG(1#tno>$(wVdx2FtHd5168F6VYDtG#toY@1~K^t_Z=z&ej|ev;pA0~6;rLoP>qFIw2mbWi&#M6xoD$*h-)K=3#OkBUlAIh5;f zON}SJMQKmUI+|n`vkenmjx2^2jhB2dEB(E>%BHi*ZsOW^RKb?^?jZ6IirYaNOrHKr z%JLO6Q}CbFlVM6v!Rd(3AxL=9>+%bKY1PKa{CQ$WOId zYn*c{3oLzg5^**Wg>IBC;Baj*PmKcP57dc5wfG^~-&0t<=tk)RRDRdGTh+ScesCxy zmAk+_6rfjZk86Mu(`P7C7bBo%nnqp1-e)^Xil=7l@>)eH_)ocL75@TpJWDjcYaOE6 zx`go5izo49*uju81GGKkj+Z5_c;$<1nbTF0>N&f>sR@)~>D4v3N87h^U-ja8<9t>{ z3{Ea+9hUiuE>oBNvTqsoN)Fc-ogKA(t-?G|1}33+X>R6(yK>`#fupD!NDAv~YV^=^ z&>pa#l6XnoTtuPVsSJ#daGbKiy)Xx|Zecj2m=vthtaDU|y0Ck)dP!7xF5z`~X;^bw zVp%pDY`e%H-J3~^BZ&K@&S2aeY)I;LF)nVgl+tt^HBydB+JQ}}<6Pn{Qcmt)r0Uj& z#9ezD5_J>kacDMg3jLMi!a{$sH#LX>%X+!#M)s%>Mw#kU_QxnX*}{ENhR)1{r9(jO zy*Zyl_tA}l<~(O)?f%;kfgq_fL_)#XHGnJ5MA)>7tpYT9p>YBLe2pLW;vM&>